diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml
index 5696edca12..d0cf267dfa 100644
--- a/.buildkite/pipeline.yml
+++ b/.buildkite/pipeline.yml
@@ -1,89 +1,37 @@
steps:
- - label: ":yaml: YAML test suite :ruby: {{ matrix.ruby_source}}:{{ matrix.ruby }} :phone: Transport {{ matrix.transport }}"
+ - label: >-
+ :yaml: YAML test suite :ruby: {{ matrix.ruby_source}}:{{ matrix.ruby }}
+ :phone: Transport {{ matrix.transport }}
agents:
- provider: "gcp"
+ provider: gcp
matrix:
setup:
ruby:
- - "3.4"
- - "3.3"
- - "3.2"
- - "3.1"
+ - '3.4'
+ - '3.3'
+ - '3.2'
ruby_source:
- - "ruby"
+ - ruby
transport:
- - "8.3"
+ - '8.4'
adjustments:
- with: # JRuby tests
- ruby: "9.4"
- ruby_source: "jruby"
- transport: "8.3"
- # Test for different versions of transport
- - with:
- ruby: "3.4"
- ruby_source: "ruby"
- transport: "main"
- - with:
- ruby: "3.4"
- ruby_source: "ruby"
- transport: "8.2"
+ ruby: '9.4'
+ ruby_source: jruby
+ transport: '8.4'
+ - with: # Test for main branch of transport
+ ruby: '3.4'
+ ruby_source: ruby
+ transport: main
env:
- RUBY_VERSION: "{{ matrix.ruby }}"
- STACK_VERSION: 9.0.0-SNAPSHOT
- ES_YAML_TESTS_BRANCH: main
- TRANSPORT_VERSION: "{{ matrix.transport }}"
- RUBY_SOURCE: "{{ matrix.ruby_source }}"
- TEST_SUITE: "platinum"
+ RUBY_VERSION: '{{ matrix.ruby }}'
+ STACK_VERSION: 9.0.5-SNAPSHOT
+ ES_YAML_TESTS_BRANCH: '9.0'
+ TRANSPORT_VERSION: '{{ matrix.transport }}'
+ RUBY_SOURCE: '{{ matrix.ruby_source }}'
+ TEST_SUITE: platinum
DEBUG: true
command: ./.buildkite/run-yaml-tests.sh
- artifact_paths: "elasticsearch-api/tmp/*"
- - label: "Create :elasticsearch: Serverless projects"
- key: "create-serverless"
- agents:
- image: docker.elastic.co/appex-qa/qaf:latest
- env:
- EC_PROJECT_PREFIX: ruby
- EC_REGISTER_BACKEND: buildkite
- EC_REGION: aws-eu-west-1
- EC_ENV: qa
- commands:
- - mkdir ~/.elastic
- - touch ~/.elastic/cloud.json
- - echo "{\"api_key\":{\"qa\":\"$(vault read -field=qa secret/ci/elastic-elasticsearch-ruby/cloud-access)\"}}" > ~/.elastic/cloud.json
- - ./.buildkite/create-serverless.sh
- - label: "Run :elasticsearch: Serverless :rspec: Tests :ruby:"
- key: run-serverless-tests
- depends_on:
- - step: create-serverless
- soft_fail:
- - exit_status: 1
- agents:
- provider: "gcp"
- env:
- RUBY_VERSION: "3.4"
- RUBY_SOURCE: "ruby"
- TEST_SUITE: serverless
- ES_YAML_TESTS_BRANCH: main
- QUIET: false
- command: ./.buildkite/run-yaml-tests.sh
- artifact_paths: "elasticsearch-api/tmp/*"
- - label: "Destroy :elasticsearch: Serverless projects"
- depends_on:
- - step: run-serverless-tests
- allow_failure: true
- agents:
- image: docker.elastic.co/appex-qa/qaf:latest
- env:
- EC_REGISTER_BACKEND: buildkite
- EC_ENV: qa
- EC_REGION: aws-eu-west-1
- commands:
- - mkdir ~/.elastic
- - touch ~/.elastic/cloud.json
- - export EC_PROJECT_NAME=`buildkite-agent meta-data get "EC_PROJECT_NAME"`
- - echo "{\"api_key\":{\"qa\":\"$(vault read -field=qa secret/ci/elastic-elasticsearch-ruby/cloud-access)\"}}" > ~/.elastic/cloud.json
- - qaf elastic-cloud projects delete
- - wait: ~
- continue_on_failure: true
- - label: "Log Results"
+ artifact_paths: elasticsearch-api/tmp/*
+ - label: Log Results
command: ./.buildkite/log-results.sh
diff --git a/.github/make.sh b/.github/make.sh
index a3ce02170d..b36c4cea5b 100755
--- a/.github/make.sh
+++ b/.github/make.sh
@@ -128,6 +128,7 @@ docker run \
-u "$(id -u)" \
--env "RUBY_VERSION=${RUBY_VERSION}" \
--env "WORKFLOW=${WORKFLOW}" \
+ --env "CLIENTS_GITHUB_TOKEN=${CLIENTS_GITHUB_TOKEN}" \
--name test-runner \
--volume "${repo}:/usr/src/app" \
--rm \
diff --git a/.github/workflows/main.yml b/.github/workflows/9.0.yml
similarity index 85%
rename from .github/workflows/main.yml
rename to .github/workflows/9.0.yml
index c72072a437..23577f5188 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/9.0.yml
@@ -1,20 +1,20 @@
-name: main
+name: 9.0
on:
push:
branches:
- - main
+ - 9.0
pull_request:
branches:
- - main
+ - 9.0
jobs:
- test-main:
+ tests:
env:
TEST_ES_SERVER: http://localhost:9250
PORT: 9250
strategy:
fail-fast: false
matrix:
- ruby: ['3.1', '3.2', '3.3', '3.4', 'jruby-9.3', 'jruby-9.4']
+ ruby: ['3.1', '3.2', '3.3', '3.4', 'jruby-9.3', 'jruby-9.4', 'jruby-10.0']
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
@@ -26,7 +26,7 @@ jobs:
sudo sysctl -w vm.max_map_count=262144
- uses: elastic/elastic-github-actions/elasticsearch@master
with:
- stack-version: 9.0.0-SNAPSHOT
+ stack-version: 9.0.5-SNAPSHOT
- uses: ruby/setup-ruby@v1
with:
ruby-version: ${{ matrix.ruby }}
@@ -39,4 +39,4 @@ jobs:
- name: elasticsearch
run: cd elasticsearch && bundle exec rake test:all
- name: elasticsearch-api
- run: rake es:download_artifacts[9.0.0-SNAPSHOT] && cd elasticsearch-api && bundle exec rake test:all
+ run: rake es:download_artifacts[9.0.5-SNAPSHOT] && cd elasticsearch-api && bundle exec rake test:all
diff --git a/.github/workflows/otel.yml b/.github/workflows/otel.yml
index fd17c34acb..6be79e1cd4 100644
--- a/.github/workflows/otel.yml
+++ b/.github/workflows/otel.yml
@@ -2,10 +2,10 @@ name: opentelemetry
on:
push:
branches:
- - main
+ - 9.0
pull_request:
branches:
- - main
+ - 9.0
jobs:
test-otel:
name: 'Test Open Telemetry'
@@ -28,7 +28,7 @@ jobs:
sudo sysctl -w vm.max_map_count=262144
- uses: elastic/elastic-github-actions/elasticsearch@master
with:
- stack-version: 9.0.0-SNAPSHOT
+ stack-version: 9.0.5-SNAPSHOT
- uses: ruby/setup-ruby@v1
with:
ruby-version: ${{ matrix.ruby }}
@@ -41,4 +41,4 @@ jobs:
- name: elasticsearch
run: cd elasticsearch && bundle exec rake test:all
- name: elasticsearch-api
- run: rake es:download_artifacts[9.0.0-SNAPSHOT] && cd elasticsearch-api && bundle exec rake test:all
+ run: rake es:download_artifacts[9.0.5-SNAPSHOT] && cd elasticsearch-api && bundle exec rake test:all
diff --git a/.gitignore b/.gitignore
index eda31fc818..b6ec5bd32e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,4 +16,5 @@ profile/**/data/*.json
parsed_alternative_report.json
.byebug_history
build/
-*.gem
\ No newline at end of file
+*.gem
+elastic-client-generator-ruby
\ No newline at end of file
diff --git a/CHANGELOG-9.x.md b/CHANGELOG-9.x.md
deleted file mode 100644
index ef1d09842f..0000000000
--- a/CHANGELOG-9.x.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# CHANGELOG 9.x
-
-## Gem
-
-The size of both `elasticsearch` and `elasticsearch-api` gems will be smaller, since some unnecessary files that were being included in the gem have been removed.
-
-The required Ruby version is set to `2.6` to keep compatiblity wit JRuby 9.3. However, we only test the code against currently supported Ruby versions.
-
-## Elasticsearch Serverless
-
-The CI build now runs tests to ensure compatibility with Elasticsearch Serverless. You can use this gem for your Serverless deployments.
-
-## Elasticsearch API
-
-### Development
-
-#### Testing
-
-The gem migrated away from the Elasticsearch REST API tests and test runner in CI. We now run the [Elasticsearch Client tests](https://github.com/elastic/elasticsearch-clients-tests/) with the [Elasticsearch Tests Runner](https://github.com/elastic/es-test-runner-ruby). This gives us more control on what we're testing and makes the Buildkite build way faster in Pull Requests and scheduled builds.
-
-#### Rake tasks
-
-* Some old rake tasks that were not being used have been removed. The rest were streamlined, the `es` namespace has been streamlined to make it easier to run Elasticsearch with Docker during development. The `docker` namespace was merged into `es`.
-* Elasticsearch's REST API Spec tests can still be ran with `rake test:deprecated:rest_api` and setting the corresponding value for the environment variable `TEST_SUITE` ('platinum' or 'free').
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ac3416994b..9d7b1b2b49 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,778 +1,80 @@
-*See the full release notes on the official documentation website: https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/release_notes.html*
+**See the full release notes on the official documentation website: https://www.elastic.co/docs/release-notes/elasticsearch/clients/ruby**
-## 8.17.0 Release notes
+# 9.0.5
-### Client
-* Tested versions of Ruby for 8.17.0: Ruby (MRI) 3.1, 3.2 and 3.3. JRuby 9.3 and JRuby 9.4.
+- Fixes [2758](https://github.com/elastic/elasticsearch-ruby/issues/2758) - `msearch`, `bulk` and other NDJSON endpoints overriding headers for `content-type` and `accept`. [Pull Request](https://github.com/elastic/elasticsearch-ruby/pull/2759)
-### API
+# 9.0.4
-#### API changes
-* `async_search.submit` - Removes `keep_alive` parameter. Adds:
- * `ccs_minimize_roundtrips` (Boolean): When doing a cross-cluster search, setting it to true may improve overall search latency, particularly when searching clusters with a large number of shards. However, when set to true, the progress of searches on the remote clusters will not be received until the search finishes on all clusters.
- * `rest_total_hits_as_int` (Boolean): Indicates whether hits.total should be rendered as an integer or an object in the rest search response.
-* `open_point_in_time` - Adds `allow_partial_search_results` (Boolean) parameter: Specify whether to tolerate shards missing when creating the point-in-time, or otherwise throw an exception (default: false).
+- Source code documentation and code has been updated to support common parameters and common cat parameters in APIs that support it (`error_trace`, `filter_path`, `human`, `pretty`). The API reference documentation can be generated with `rake doc`.
+- New API: `inference.put_custom`
+# 9.0.3
-## 8.16.0 Release notes
+- Adds `ccr` alias for `cross_cluster_replication` and `slm` alias for `snapshot_lifecycle_management`.
+- Tested for JRuby 10.0.0.
+- General updates in source code docs.
-### Client
-* Tested versions of Ruby for 8.16.0: Ruby (MRI) 3.1, 3.2 and 3.3. JRuby 9.3 and JRuby 9.4.
+# 9.0.2
-### API
+- Udpates setting 'Accept' and 'Content-Type' headers as to not duplicate or overwrite set headers [#2666](https://github.com/elastic/elasticsearch-ruby/pull/2666).
-#### API changes
+# 9.0.1
-* `capabilities` - Adds `local_only` boolean parameter: True if only the node being called should be considered.
-* `cluster.stats`- Removes `flat_settings` parameter, adds `include_remotes` boolean parameter: Include remote cluster data into the response (default: false)
-* `indices.get_data_stream` - Adds `verbose` boolean parameter: Whether the maximum timestamp for each data stream should be calculated and returned (default: false). Adds `master_timeout` (see below).
-* `query_rules.delete_ruleset` - Accepts `ignore: 404` common parameter.
+- The request headers were updated for Elasticsearch v9: `compatible-with=9` [#2660](https://github.com/elastic/elasticsearch-ruby/pull/2660).
-##### Timeout parameters:
+# 9.0.0
-These parameters have been added to several APIs:
+Ruby 3.2 and up are tested and supported for 9.0. Older versions of Ruby have reached their end of life. We follow Ruby’s own maintenance policy and officially support all currently maintained versions per [Ruby Maintenance Branches](https://www.ruby-lang.org/en/downloads/branches/). The required Ruby version is set to `2.6` to keep compatiblity wit JRuby 9.3. However, we only test the code against currently supported Ruby versions.
-* `master_timeout` timeout for processing on master node.
-* `timeout` timeout for acknowledgement of update from all nodes in cluster parameters.
+## Gem
-Added in:
+The size of both `elasticsearch` and `elasticsearch-api` gems is smaller than in previous versions. Some unnecessary files that were being included in the gem have now been removed. There has also been a lot of old code cleanup for the `9.x` branch.
-* `indices.create_data_stream` - both.
-* `indices.delete_data_stream` - `master_timeout`.
-* `indices.get_data_lifecycle` - `master_timeout`.
-* `indices.get_data_stream` - `master_timeout`.
-* `indices.migrate_to_data_stream` - both.
-* `indices.promote_data_stream` - `master_timeout`.
-* `search_shards` - `master_timeout`.
+## Elasticsearch Serverless
-#### APIs Promoted from Experimental to Stable:
+With the release of `9.0`, the [Elasticsearch Serverless](https://github.com/elastic/elasticsearch-serverless-ruby) client has been discontinued. You can use this client to build your Elasticsearch Serverless Ruby applications. The Elasticsearch Serverless API is fully supported. The CI build for Elasticsearch Ruby runs tests to ensure compatibility with Elasticsearch Serverless.
-* `indices.delete_data_lifecycle`
-* `indices.explain_data_lifecycle`
-* `indices.get_data_lifecycle`
-* `indices.put_data_lifecycle`
-* `security.create_cross_cluster_api_key`
-* `security.update_cross_cluster_api_key`
+## Elasticsearch API
-#### New APIs
+* The source code is now generated from [`elasticsearch-specification`](https://github.com/elastic/elasticsearch-specification/), so the API documentation is much more detailed and extensive. The value `Elasticsearch::ES_SPECIFICATION_COMMIT` is updated with the commit hash of elasticsearch-specification in which the code is based every time it's generated.
+* The API code has been updated for compatibility with Elasticsearch API v 9.0.
+* `indices.get_field_mapping` - `:fields` is a required parameter.
+* `knn_search` - This API has been removed. It was only ever experimental and was deprecated in v`8.4`. It isn't supported in 9.0, and only works when the header `compatible-with=8` is set. The search API should be used for all knn queries.
+* The functions in `utils.rb` that had names starting with double underscore have been renamed to remove these (e.g. `__listify` to `listify`).
+* **Namespaces clean up**: The API namespaces are now generated dynamically based on the elasticsearch-specification. As such, some deprecated namespace files have been removed from the codebase:
+ * The `rollup` namespace was removed. The rollup feature was never GA-ed, it has been deprecated since `8.11.0` in favor of downsampling.
+ * The `data_frame_deprecated`, `remote` namespace files have been removed, no APIs were available.
+ * The `shutdown` namespace was removed. It is designed for indirect use by ECE/ESS and ECK. Direct use is not supported.
-* `ingest.delete_ip_location_database` - Deletes an ip location database configuration.
-* `ingest.get_ip_location_database` - Returns the specified ip location database configuration.
-* `ingest.put_ip_location_database` - Puts the configuration for a ip location database to be downloaded.
+## Scroll APIs need to send scroll_id in request body
+Sending the `scroll_id` as a parameter has been deprecated since version 7.0.0. It needs to be specified in the request body for `clear_scroll` and `scroll`.
-#### New Experimental APIs
+**Impact**
-* `inference.stream_inference` - Perform streaming inference.
-* `query_rules.test` - Tests a query ruleset to identify the rules that would match input criteria.
+Client code using `clear_scroll` or `scroll` APIs and the deprecated `scroll_id` as a parameter needs to be updated.
+**Action**
-## 8.15.0 Release notes
-
-### Client
-* Tested versions of Ruby for 8.15.0: Ruby (MRI) 3.0, 3.1, 3.2 and 3.3. JRuby 9.3 and JRuby 9.4.
-
-#### API changes
-
-* `snapshot.delete` - Adds `wait_for_completion` Boolean parameter, should this request wait until the operation has completed before returning.
-* `cluster.allocation_explain` - `body` is no longer a required parameter.
-* `connector.put` - (experimental API) `body` and `connector_id` no longer required parameters.
-* `machine_learning.update_trained_model_deployment` has been promoted to stable from Beta. Adds Integer parameter `number_of_allocations`, updates the model deployment to this number of allocations.
-
-##### `master_timeout` and `timeout` parameters
-
-These parameters have been added to several APIs:
-
-* `master_timeout` timeout for processing on master node.
-* `timeout` timeout for acknowledgement of update from all nodes in cluster parameters.
-
-The APIs:
-
-* `autoscaling.delete_autoscaling_policy` - both.
-* `autoscaling.get_autoscaling_capacity`- `master_timeout`.
-* `get_autoscaling_policy` - `master_timeout`.
-* `put_autoscaling_policy` - both.
-* `enrich.delete_policy` - `master_timeout`.
-* `enrich.execute_policy` - `master_timeout`.
-* `enrich.get_policy` - `master_timeout`.
-* `enrich.put_policy` - `master_timeout`.
-* `enrich.stats` - `master_timeout`.
-* `features.reset_features` - `master_timeout`.
-* `license.delete` - both.
-* `license.post` - both.
-* `license.post_start_basic` - both.
-* `license.post_start_trial` - both.
-* `security.get_settings` - `master_timeout`.
-* `security.update_settings` - both.
-* `shutdown.get_node` - `master_timeout`.
-* `snapshot_lifecycle_management.start` - both.
-* `snapshot_lifecycle_management.stop` - both.
-* `watcher.get_settings` - `master_timeout`.
-* `watcher.start` - `master_timeout`.
-* `watcher.stop` - `master_timeout`.
-* `watcher.update_settings` - both.
-
-##### Inference APIs have been renamed:
-
-* `inference.delete_model` => `inference.delete`. Also adds two new parameters:
- * `dry_run` (Boolean), if true the endpoint will not be deleted and a list of ingest processors which reference this endpoint will be returned.
- * `force` (Boolean), if true the endpoint will be forcefully stopped (regardless of whether or not it is referenced by any ingest processors or semantic text fields).
-* `inference.get_model` => `inference.get`
-* `inference.put_model` => `inference.put`
-
-##### Query Rules parameters consolidated
-
-Changes in `query_ruleset` and `query_rules` APIs, these have been combined into the `query_rules` namespace:
-
-* `query_rules.delete_ruleset` - Renamed from `query_ruleset.delete`, promoted from experimental to stable.
-* `query_rules.delete_rule` - Deletes an individual query rule within a ruleset.
-* `query_rules.get_rule` - Returns the details about an individual query rule within a ruleset.
-* `query_rules.get_ruleset` - Renamed from `query_ruleset.get`, promoted from experimental to stable.
-* `query_rules.list_rulesets` - Renamed from `query_ruleset.list`, promoted from experimental to stable.
-* `query_rules.put_rule` - Creates or updates a query rule within a ruleset.
-* `query_rules.put_ruleset` - Renamed from `query_ruleset.put_ruleset`, promoted from experimental to stable.
-
-#### New APIs:
-
-* `ingest.delete_geoip_database` - Deletes a geoip database configuration.
-* `ingest.get_geoip_database` - Returns geoip database configuration.
-* `ingest.put_geoip_database` - Puts the configuration for a geoip database to be downloaded.
-* `security.bulk_delete_role` - Bulk delete roles in the native realm.
-* `security.bulk_put_role` - Bulk adds and updates roles in the native realm.
-* `security.query_role` - Retrieves information for Roles using a subset of query DSL.
-* `transform.get_node_stats` - Retrieves transform usage information for transform nodes.
-
-#### New Experimental APIs:
-
-* `connector.sync_job_claim` - Claims a connector sync job.
-* `connector.update_features` - Updates the connector features in the connector document.
-
-### Development
-
-- Added a build using [es-test-runner-ruby](https://github.com/elastic/es-test-runner-ruby) and [Elasticsearch Clients Tests](https://github.com/elastic/elasticsearch-clients-tests) which will replace the Elasticsearch YAML test runner.
-
-## 8.14.0 Release notes
-
-### Client
-* Tested versions of Ruby for 8.14.0: Ruby (MRI) 3.0, 3.1, 3.2 and 3.3. JRuby 9.3 and JRuby 9.4.
-
-### API
-
-API changes:
-
-- All Connector APIs have been migrated to one common namespace `connector`:
- - `connector_secret.delete` -> `connector.secret_delete`
- - `connector_secret.get` -> `connector.secret_get`
- - `connector_secret.post` -> `connector.secret_post`
- - `connector_secret.put` -> `connector.secret_put`
- - `connector_sync_job.cancel` -> `connector.sync_job_cancel`
- - `connector_sync_job.check_in` -> `connector.sync_job_check_in`
- - `connector_sync_job.delete` -> `connector.sync_job_delete`
- - `connector_sync_job.error` -> `connector.sync_job_error`
- - `connector_sync_job.get` -> `connector.sync_job_get`
- - `connector_sync_job.post` -> `connector.sync_job_post`
- - `connector_sync_job.update_stats` -> `connector.sync_job_update_stats`
-
-- `connector.delete` - Adds Boolean parameter `:delete_sync_jobs`: Determines whether associated sync jobs are also deleted.
-- `cross_cluster_replication.delete_auto_follow_pattern`, `cross_cluster_replication.follow`, `cross_cluster_replication.follow_info`, `cross_cluster_replication.get_auto_follow_pattern`, `cross_cluster_replication.pause_auto_follow_pattern`, `cross_cluster_replication.pause_follow`, `cross_cluster_replication.put_auto_follow_pattern`, `cross_cluster_replication.resume_auto_follow_pattern`, `cross_cluster_replication.resume_follow`, `cross_cluster_replication.stats`, `cross_cluster_replication.unfollow` - Add Time parameter `:master_timeout`: Explicit operation timeout for connection to master node.
-- `cross_cluster_replication.follow_stats`, `cross_cluster_replication.forget_follower`, `cross_cluster_replication.stats` - Add Time parameter `:timeout`: Explicit operation timeout.
-- `indices/rollover` - Adds Boolean parameter `:target_failure` If set to true, the rollover action will be applied on the failure store of the data stream.
-- `inference.get_model` - Parameter `inference_id` no longer required.
-- `search_application.search` - Adds Boolean parameter `:typed_keys`: Specify whether aggregation and suggester names should be prefixed by their respective types in the response.
-- `security.get_api_key`, `security.query_api_keys` - Add Boolean parameter `:with_profile_uid`: flag to also retrieve the API Key's owner profile uid, if it exists.
-
-New APIs:
-
-- `profiling.topn_functions` - Extracts a list of topN functions from Universal Profiling.
-- `text_structure.find_field_structure` - Finds the structure of a text field in an index.
-- `text_structure/find_message_structure`- Finds the structure of a list of messages. The messages must contain data that is suitable to be ingested into Elasticsearch.
-
-APIs Migrated from experimental to stable:
-
-- `esql.async_query`
-- `esql.query`
-
-New Experimental APIs:
-
-- `connector.update_active_filtering` - Activates the draft filtering rules if they are in a validated state.
-- `connector.update_filtering_validation` - Updates the validation info of the draft filtering rules.
-
-## 8.13.0 Release notes
-
-### Client
-* Tested versions of Ruby for 8.13.0: Ruby (MRI) 3.0, 3.1, 3.2 and 3.3. JRuby 9.3 and JRuby 9.4.
-
-### Experimental ES|QL Helper
-
-This version provides a new experimental Helper for the ES|QL `query` API. Please check out [the documentation](https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/Helpers.html#esql-helper) and [open an issue](https://github.com/elastic/elasticsearch-ruby/issues/new/choose) if you encounter any problems or have any feedback.
-
-### API
-
-API Changes:
-
-* `async_search.status` - adds Time `:keep_alive` parameter: Specify the time interval in which the results (partial or final) for this search will be available.
-* `bulk` - adds boolean `:require_data_stream` parameter: When true, requires the destination to be a data stream (existing or to-be-created). Default is false.
-* `connector.list` - Adds the following parameters:
- * `:index_name` (List): A comma-separated list of connector index names to fetch connector documents for.
- * `:connector_name` (List): A comma-separated list of connector names to fetch connector documents for.
- * `:service_type` (List): A comma-separated list of connector service types to fetch connector documents for.
- * `:query` (String): A search string for querying connectors, filtering results by matching against connector names, descriptions, and index names.
-* `esql.query` - adds boolean `:drop_null_columns` parameter: Should entirely null columns be removed from the results? Their name and type will be returning in a new `all_columns` section.
-* `field_caps` - Adds `:include_empty_fields` boolean parameter: Include empty fields in result.
-* `index` - adds boolean `:require_data_stream` parameter: When true, requires the destination to be a data stream (existing or to-be-created). Default is false.
-* `indices.rollover` - adds boolean `:lazy` parameter: If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. Only allowed on data streams.
-* connector_sync_job.list - adds List `:job_type` parameter: A comma-separated list of job types.
-* `inference.delete_model`, `inference.get_model`, `inference.inference`, `inference.put_model`: renames `:model_id` parameter to `:inference_id`.
-* `termvector` will show a warning since it's been deprecated. Please use the plural version, `termvectors`.
-
-New APIs:
-
-* `indices.resolve_cluster` - Resolves the specified index expressions to return information about each cluster, including the local cluster, if included.
-* `profiling.flamegraph` - Extracts a UI-optimized structure to render flamegraphs from Universal Profiling.
-* `profiling.stacktraces` - Extracts raw stacktrace information from Universal Profiling.
-* `security.query_user` - Retrieves information for Users using a subset of query DSL
-* `text_structure.test_grok_pattern` - Tests a Grok pattern on some text.
-
-APIs Migrated from experimental to stable:
-
-* `synonyms.delete_synonym`
-* `synonyms.delete_synonym_rule`
-* `synonyms.get_synonym`
-* `synonyms.get_synonym_rule`
-* `synonyms.get_synonyms_sets`
-* `synonyms.put_synonym`
-* `synonyms.put_synonym_rule`
-
-New Experimental APIs:
-
-* `connector.update_api_key_id` - Updates the API key id and/or API key secret id fields in the connector document.
-* `connector.update_index_name` - Updates the index name of the connector.
-* `connector.update_native` - Updates the is_native flag of the connector.
-* `connector.update_service_type` - Updates the service type of the connector.
-* `connector.update_status` - Updates the status of the connector.
-* `esql.async_query` - Executes an ESQL request asynchronously
-* `esql.async_query_get` - Retrieves the results of a previously submitted async query request given its ID.
-
-New Experimental namespace `connector_secret`:
-
-* `connector_secret.delete` - Deletes a connector secret.
-* `connector_secret.get` - Retrieves a secret stored by Connectors.
-* `connector_secret.post` - Creates a secret for a Connector.
-* `connector_secret.put` - Creates or updates a secret for a Connector.
-
-### Development
-
-* Migrated from `byebug` to `debug`.
-* Added extra testing for OpenTelemetry.
-
-## 8.12.0 Release notes
-
-### Client
-
-* Tested versions of Ruby for 8.12.0: Ruby (MRI) 3.0, 3.1, 3.2 and 3.3. JRuby 9.3 and JRuby 9.4.
-
-### API
-
-API Changes:
-
-* `bulk` - Adds boolean `:list_executed_pipelines` parameter: Sets `list_executed_pipelines` for all incoming documents. Defaults to unset (false).
-* `indices.put_settings` - Adds boolean `:reopen` parameter: Whether to close and reopen the index to apply non-dynamic settings. If set to `true` the indices to which the settings are being applied will be closed temporarily and then reopened in order to apply the changes. The default is `false`.
-* `open_point_in_time` - Adds Hash `:body` parameter: an index_filter specified with the Query DSL.
-* `security.get_api_key` - Adds boolean `:active_only` parameter: flag to limit response to only active (not invalidated or expired) API keys.
-
-#### New APIs
-
-New API for [Universal profiling](https://www.elastic.co/guide/en/observability/8.12/universal-profiling.html):
-
-* `profiling.status` - Returns basic information about the status of Universal Profiling.
-
-
-New experimental API:
-
-* `simulate.ingest` - Simulates running ingest with example documents. See: https://www.elastic.co/guide/en/elasticsearch/reference/8.12/simulate-ingest-api.html
-
-##### Connectors API
-
-Version 8.12 introduces the experimental [Connectors API](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/connector-apis.html). Use the following APIs to manage connectors:
-
-* `connector.post` - Creates a connector. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/create-connector-api.html)
-* `connector.put` - Creates or updates a connector. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/create-connector-api.html)
-* `connector.delete` - Deletes a connector. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/delete-connector-api.html)
-* `connector.get` - Returns the details about a connector. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/get-connector-api.html)
-* `connector.list` - Lists all connectors. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/list-connector-api.html)
-* `connector.check_in` - Updates the last_seen timestamp in the connector document. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/check-in-connector-api.html)
-* `connector.update_configuration` - Updates the connector configuration. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/update-connector-configuration-api.html)
-* `connector.update_error` - Updates the error field in the connector document. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/update-connector-error-api.html)
-* `connector.update_filtering` - Updates the filtering field in the connector document. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/update-connector-filtering-api.html)
-* `connector.last_sync` - Updates the stats of last sync in the connector document. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/update-connector-last-sync-api.html)
-* `connector.update_name` - Updates the name and/or description fields in the connector document. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/update-connector-name-description-api.html)
-* `connector.update_pipeline` - Updates the pipeline field in the connector document. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/update-connector-pipeline-api.html)
-* `connector.update_scheduling` - Updates the scheduling field in the connector document. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/update-connector-scheduling-api.html)
-
-Use the following APIs to manage sync jobs:
-
-* `connector_sync_job.cancel` - Cancels a connector sync job. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/cancel-connector-sync-job-api.html)
-* `connector_sync_job.check_in` - Checks in a connector sync job (refreshes 'last_seen'). See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/check-in-connector-sync-job-api.html)
-* `connector_sync_job.delete` - Deletes a connector sync job. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/delete-connector-sync-job-api.html)
-* `connector_sync_job.error` - Sets an error for a connector sync job. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/set-connector-sync-job-error-api.html)
-* `connector_sync_job.get` - Returns the details about a connector sync job. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/get-connector-sync-job-api.html)
-* `connector_sync_job.list` - Lists all connector sync jobs. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/list-connector-sync-jobs-api.html)
-* `connector_sync_job.post` - Creates a connector sync job. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/create-connector-sync-job-api.html)
-* `connector_sync_job.update_stats` - Updates the stats fields in the connector sync job document. See [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.12/set-connector-sync-job-stats-api.html)
-
-
-
-## 8.11.0 Release notes
-
-### Client
-
-* Tested versions of Ruby for 8.11.0: Ruby (MRI) 3.0, 3.1 and 3.2. JRuby 9.3 and JRuby 9.4.
-* Adds native support for *Open Telemetry*. See Open Telemetry for documentation.
-* Improved documentation, now you can find more examples in Ruby in the [REST API reference](https://www.elastic.co/guide/en/elasticsearch/reference/8.11/rest-apis.html).
-
-### API
-
-New Experimental APIs:
-- `esql.query` - Executes an ESQL request.
-- `inference.delete_model` - Delete model in the Inference API.
-- `inference.get_model` - Get a model in the Inference API.
-- `inference.inference` - Perform inference on a model.
-- `inference.put_model` - Configure a model for use in the Inference API.
-
-## 8.10.0 Release notes
-
-### Client
-* Tested versions of Ruby for 8.10.0: Ruby (MRI) 3.0, 3.1 and 3.2. JRuby 9.3 and JRuby 9.4.
-
-### API
-
-#### New Experimental APIs, for internal use:
-- `fleet.delete_secret`
-- `fleet.get_secret`
-- `fleet.post_secret`
-
-#### New stable APIs:
-- `security.get_settings` - Retrieve settings for the security system indices
-- `security.update_settings` - Update settings for the security system indices
-
-#### New Experimental API:
-- `query_ruleset.list` List query rulesets.
-
-#### API Changes:
-- `indices.reload_search_analyzers` - Adds parameter `resource` changed resource to reload analyzers from if applicable
-
-Promoted from Experimental to Beta:
-- `security.create_cross_cluster_api_key`
-- `security.update_cross_cluster_api_key`
-
-#### Synonyms namespace update:
-
-All synonym related APIs have been moved to the `synonyms` namespace and some of the endpoints have been renamed, as well as their parameters:
-- `synonyms.delete` => `synonyms.delete_synonym` - requires `id`, the id of the synonyms set to be deleted.
-- `synonyms.get` => `synonyms.get_synonym` - requires `id`, the name of the synonyms set to be retrieved.
-- `synonyms_set.get_synonyms_sets` => `synonyms.get_synonyms_sets`
-- `synonyms.put` => `synonyms.put_synonym` - requires `id` of the synonyms set to be created or updated.
-- `synonym_rule.put` => `synonyms.put_synonym_rule` - Parameters changed to `set_id` (the id of the synonym set to be updated with the synonym rule) and `rule_id` (the id of the synonym rule to be updated or created).
-- New Experimental API `synonyms.delete_synonym_rule` - Deletes a synonym rule in a synonym set
-- New Experimental API `synonyms.get_synonym_rule` - Retrieves a synonym rule from a synonym set
-
-## 8.9.0 Release notes
-
-### Client
-
-* Tested versions of Ruby for 8.9.0: Ruby (MRI) 3.0, 3.1 and 3.2. JRuby 9.3 and JRuby 9.4.
-* Updated product validation. The code for the product validation was refactored in a few ways:
- * Just check header, does not check the version of the server.
- * Warns only once when there's a general server error.
- * Removes the call to '/' (client.info) when doing the first request, checking on the first actual request from the client.
-* Fixes User-Agent code. In the migration to 8.x, the user agent code was extracted into transport, since we're now using that library in other projects. So for the Elasticsearch Client, the user-agent would be reported as the one defined in elastic-transport. This release fixes the issue and brings back the user agent in the format that was being used in 7.x
-
-### Helpers
-
-This release introduces two new Helpers in the client:
-
-* BulkHelper - This helper provides a better developer experience when using the Bulk API. At its simplest, you can send it a collection of hashes in an array, and it will bulk ingest them into {es}.
-* ScrollHelper - This helper provides an easy way to get results from a Scroll.
-
-See [Helpers](https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/Helpers.html) to read more about them.
-
-### API
-
-#### New APIs
-
-* `cluster.info` - Returns different information about the cluster.
-
-#### New Experimental APIs and namespaces:
-
-This functionality is Experimental and may be changed or removed completely in a future release. Elastic will take a best effort approach to fix any issues, but experimental features are not subject to the support SLA of official GA features.
-
-* New namespace: `query_ruleset`
- * `query_ruleset.delete` - Deletes a query ruleset.
- * `query_ruleset.get` - Returns the details about a query ruleset.
- * `query_ruleset.put` - Creates or updates a query ruleset.
-* New API: `search_application.render_query` Renders a query for given search application search parameters.
-* New API: `security.create_cross_cluster_api_key` - Creates a cross-cluster API key for API key based remote cluster access.
-* New API: `security.upate_cross_cluster_api_key` - Updates attributes of an existing cross-cluster API key.
-* New namespace: `synonyms`
- * `synonyms.delete`- Deletes a synonym set
- * `synonyms.get` - Retrieves a synonym set
- * `synonyms.put` - Creates or updates a synonyms set
-* New namespace: `synonym_rule`
- * `synonym_rule.put` - Creates or updates a synonym rule in a synonym set
-* New namespace: `synonyms`
- * `synonyms_set.get` - Retrieves a summary of all defined synonym sets
-
-## 8.8.0 Release notes
-
-- Tested versions of Ruby for 8.8.0: Ruby (MRI) 3.0, 3.1 and **3.2**. JRuby 9.3 and JRuby 9.4.
-
-### API
-
-- Updates development dependency `minitest-reporters` to `>= 1.6` to include showing failures at the end of the test run.
-
-#### New APIs
-
-- `watcher.get_settings` - Retrieve settings for the watcher system index.
-- `watcher.update_settings` - Update settings for the watcher system index.
-
-#### New Experimental APIs
-
-- `indices.delete_data_lifecycle`- Deletes the data lifecycle of the selected data streams
-- `indices.explain_data_lifecycle` - Retrieves information about the index's current DLM lifecycle, such as any potential encountered error, time since creation etc.
-- `indices.get_data_lifecycle` - Returns the data lifecycle of the selected data streams.
-- `indices.put_data_lifecycle` - Updates the data lifecycle of the selected data streams.
-- `search_application.delete` - Deletes a search application.
-- `search_application.delete_behavioral_analytics` - Delete a behavioral analytics collection.
-- `search_application.get` - Returns the details about a search application.
-- `search_application.get_behavioral_analytics` - Returns the existing behavioral analytics collections.
-- `search_application.list` - Returns the existing search applications.
-- `search_application.post_behavioral_analytics_event` - Creates a behavioral analytics event for existing collection.
-- `search_application.put` - Creates or updates a search application.
-- `search_application.put_behavioral_analytics` - Creates a behavioral analytics collection.
-- `search_application.search` - Perform a search against a search application.
-
-#### API Changes
-
-- `clear_scroll` now works with the argument `ignore: 404`. [Issue on GitHub](https://github.com/elastic/elasticsearch-ruby/issues/2067).
-- The code generator was updated to fix a bug for `ignore: 404`. APIs that were supposed to support this wouldn't parse the parameters correctly. The support it now: `security.get_role`, `watcher.delete_watch`
-- `cluster.get_component_template`, `indices.get_data_stream`, `indices.get_index_template`, `indices.simulate_index_template`, `indices.simulate_template` - Add `include_defaults` (Boolean) parameter: Return all default configurations for the component template (default: false).
-- `machine_learning.put_trained_model` - Adds `wait_for_completion` (Boolean) parameter: Whether to wait for all child operations(e.g. model download) to complete, before returning or not (default: false).
-- `machine_learning.start_trained_model_deployment` - Adds `deployiment_id` (String) parameter: The Id of the new deployment. Defaults to the model_id if not set.
-- `search` - Adds `include_named_queries_score` (Boolean) parameter: Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false).
-- `transform.delete_transform` - Adds `delete_dest_index` (Boolean) parameter: When `true`, the destination index is deleted together with the transform. The default value is `false`, meaning that the destination index will not be deleted.
-
-## 8.7.1 Release notes
-
-### API Bugfix
-
-- Updates `logstash.get_pipeline`, fixed in the specification `id` is not a required parameter, so removes raising `ArgumentError` when id is not present.
-
-## 8.7.0 Release notes
-
-- Tested versions of Ruby for 8.7.0: Ruby (MRI) 2.7, 3.0, 3.1 and **3.2**. JRuby 9.3 and JRuby 9.4. Ruby 2.7's end of life is coming in a few days, so this'll probably be the last release to test for Ruby 2.7.
-
-### New APIs
-
-- `health_report` - Returns the health of the cluster.
-- `transform.schedule_now_transform` - Schedules now a transform.
-
-### API Changes
-
-- `transform.get_transform_stats` - Adds `timeout` (Time) parameter. Controls the time to wait for the stats.
-- `transform.start_transform` - Adds `from` (String) parameter. Restricts the set of transformed entities to those changed after this time.
-- `ml.delete_job`, `ml.reset_job` - Add `delete_user_annotations` (Boolean) parameter. Should annotations added by the user be deleted.
-- `ml.clear_trained_model_deployment_cache`, `ml.infer_trained_model`, `ml.put_trained_model_definition_part`, `ml.put_trained_model_vocabulary`, `ml.start_trained_model_deployment`, `ml.stop_trained_model_deployment` - These APIs are no longer in Beta.
-
-## 8.6.0 Release notes
-
-- Tested versions of Ruby for 8.6.0: Ruby (MRI) 2.7, 3.0, 3.1 and **3.2**. JRuby 9.3 and **JRuby 9.4**.
-
-### New APIs
-
-- `update_trained_model_deployment` - Updates certain properties of trained model deployment (This functionality is in Beta and is subject to change).
-
-### API Changes
-
-- `cluster.reroute` - `:metric` parameter adds `none` as an option.
-- `ml.start_trained_model_deployment` - New parameter `:priority` (String), the deployment priority
-
-
-## 8.5.2 Release notes
-
-### API Bugfix
-
-Fixes `security.create_service_token` API, uses `POST` when token name isn't present.
-Thanks [@carlosdelest](https://github.com/carlosdelest) for reporting in [#1961](https://github.com/elastic/elasticsearch-ruby/pull/1961).
-
-## 8.5.1 Release notes
-
-### Bugfix
-
-Fixes bug when instantiating client with `api_key`: When passing in `api_key` and `transport_options` that don't include headers to the client, the `api_key` code would overwrite the arguments passed in for `transport_options`. This was fixed in [this Pull Request](https://github.com/elastic/elasticsearch-ruby/pull/1941/files).
-Thanks [svdasein](https://github.com/svdasein) for reporting in [#1940](https://github.com/elastic/elasticsearch-ruby/issues/1940).
-
-## 8.5.0 Release notes
-
-- Tested versions of Ruby for 8.5.0: Ruby (MRI) 2.7, 3.0 and 3.1, JRuby 9.3.
-
-### Client
-
-With the latest release of `elastic-transport` - `v8.1.0` - this gem now supports Faraday v2. Elasticsearch Ruby has an open dependency on `elastic-transport` (`'elastic-transport', '~> 8'`), so when you upgrade your gems, `8.1.0` will be installed. This supports both Faraday v1 and Faraday v2. The main change on dependencies when using Faraday v2 is all adapters, except for the default `net_http` one, have been moved out of Faraday into separate gems. This means if you're not using the default adapter and you migrate to Faraday v2, you'll need to add the adapter gems to your Gemfile.
-
-These are the gems required for the different adapters with Faraday 2, instead of the libraries on which they were based:
-```
-# HTTPCLient
-gem 'faraday-httpclient'
-
-# NetHTTPPersistent
-gem 'faraday-net_http_persistent'
-
-# Patron
-gem 'faraday-patron'
-
-# Typhoeus
-gem 'faraday-typhoeus'
+If you are using the `clear_scroll` or `scroll` APIs, and sending the `scroll_id` as a parameter, you need to update your code to send the `scroll_id` as part of the request body:
+```ruby
+# Before:
+client.clear_scroll(scroll_id: scroll_id)
+# Now:
+client.clear_scroll(body: { scroll_id: scroll_id })
+
+# Before:
+client.scroll(scroll_id: scroll_id)
+# Now:
+client.scroll(body: { scroll_id: scroll_id })
```
-Things should work fine if you migrate to Faraday 2 as long as you include the adapter (unless you're using the default one `net-http`), but worst case scenario, you can always lock the version of Faraday in your project to 1.x:
-`gem 'faraday', '~> 1'`
-
-Be aware if migrating to Faraday v2 that it requires at least Ruby `2.6`, unlike Faraday v1 which requires `2.4`.
-
-*Troubleshooting*
-
-If you see a message like:
-`:adapter is not registered on Faraday::Adapter (Faraday::Error)`
-Then you probably need to include the adapter library in your gemfile and require it.
-
-Please [submit an issue](https://github.com/elastic/elasticsearch-ruby/issues) if you encounter any problems.
-
-### API
-
-#### New APIs
-
-- `machine_learning.clear_trained_model_deployment_cache` - Clear the cached results from a trained model deployment (Beta).
-- `security.bulk_update_api_keys` - Updates the attributes of multiple existing API keys.
-
-#### API Changes
-
-- `rollup.rollup` renamed to `indices.downsample`. The method now receives the `index` to downsample (Required) and instead of `rollup_index`, use target_index as the index to store downsampled data.
-
-- `security.get_api_key` and `security.query_api_keys` add `:with_limited_by` flag to show the limited-by role descriptors of API Keys.
-- `security.get_user` adds `:with_profile_uid` flag to retrieve profile uid (if exists) associated to the user.
-- `security.get_user_profile` now retrieves user profiles for given unique ID(s). `:uid` is now a list of comma-separated list of unique identifier for user profiles.
-- `text_structure.find_structure` adds `:ecs_compatibility`, optional parameter to specify the compatibility mode with ECS Grok patterns - may be either 'v1' or 'disabled'.
-
-Machine learning APIs promoted from *Experimental* to *Beta*:
-
-- `machine_learning.clear_trained_model_deployment_cache.rb`
-- `machine_learning.infer_trained_model.rb`
-- `machine_learning.put_trained_model_definition_part.rb`
-- `machine_learning.put_trained_model_vocabulary.rb`
-- `machine_learning.start_trained_model_deployment.rb`
-- `machine_learning.stop_trained_model_deployment.rb`
-
-Security usef profile APIs promoted from *Experimental* to *Stable*:
-
-- `security/activate_user_profile`
-- `security/disable_user_profile`
-- `security/enable_user_profile`
-- `security/get_user_profile`
-- `security/has_privileges_user_profile`
-- `security/suggest_user_profile`
-- `security/update_user_profile_data`
-
-
-## 8.4.0 Release Notes
-
-- Tested versions of Ruby for 8.4.0: Ruby (MRI) 2.7, 3.0 and 3.1, JRuby 9.3.
-
-### API
-
-#### New APIs
-
-* `security.update_api_key` - Updates attributes of an existing API key. [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.4/security-api-update-api-key.html).
-
-#### API Changes
+## Testing
-* `get` - Adds new parameter `force_synthetic_source` (Boolean) Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index.
-* `machine_learning.start_trained_model_deployment` - Adds new parameter `cache_size` (String) A byte-size value for configuring the inference cache size. For example, 20mb.
-* `mget` - Adds new parameter `force_synthetic_source` (Boolean) Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index.
-* `search` - Adds new parameter `force_synthetic_source` (Boolean) Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index.
-* `snapshot.get` - Adds new parameters:
- * `sort` (String) Allows setting a sort order for the result. Defaults to start_time (options: start_time, duration, name, repository, index_count, shard_count, failed_shard_count).
- * `size` (Integer) Maximum number of snapshots to return. Defaults to 0 which means return all that match without limit.
- * `order` (String) Sort order (options: asc, desc).
- * `from_sort_value` (String) Value of the current sort column at which to start retrieval.
- * `after` (String) Offset identifier to start pagination from as returned by the 'next' field in the response body.
- * `offset` (Integer) Numeric offset to start pagination based on the snapshots matching the request. Defaults to 0.
- * `slm_policy_filter` (String) Filter snapshots by a comma-separated list of SLM policy names that snapshots belong to. Accepts wildcards. Use the special pattern '_none' to match snapshots without an SLM policy.
+The gem `elasticsearch-api` migrated away from the Elasticsearch REST API tests and test runner in CI. We now run the [Elasticsearch Client tests](https://github.com/elastic/elasticsearch-clients-tests/) with the [Elasticsearch Tests Runner](https://github.com/elastic/es-test-runner-ruby). This gives us more control on what we're testing and makes the Buildkite build way faster in Pull Requests and scheduled builds.
-## 8.3.0 Release Notes
-
-- Tested versions of Ruby for 8.3.0: Ruby (MRI) 2.7, 3.0 and 3.1, JRuby 9.3.
-
-### API
-
-- Added build hash to auto generated code. The code generator obtains the git hash from the Elasticsearch specification and adds it as a comment in the code. This allows us to track the version for each generated class.
-- Updated for compatibility with Elasticsearch 8.3's API.
-
-#### API Changes
-
-* `cluster.delete_voting_config_exclusions`, `cluster.post_voting_config_exclusions` - Add new parameter `master_timeout` (Time) Timeout for submitting request to master.
-* `machine_learning.infer_trained_model_deployment` is renamed to `machine_learning.infer_trained_model`. The url `/_ml/trained_models/{model_id}/deployment/_infer` is deprecated since 8.3, use `/_ml/trained_models/{model_id}/_infer` instead.
-* `machine_learning.preview_datafeed` - Adds new parameters:
- * `start` (String) The start time from where the datafeed preview should begin
- * `end` (String) The end time when the datafeed preview should stop
-* `machine_learning.start_trained_model_deployment` - Adds new parameters:
- * `number_of_allocations` (Integer) The number of model allocations on each node where the model is deployed.
- * `threads_per_allocation` (Integer) The number of threads used by each model allocation during inference.
- * `queue_capacity` (Integer) Controls how many inference requests are allowed in the queue at a time.
-* `search_mvt` - Adds new parameter: `with_labels` (Boolean) If true, the hits and aggs layers will contain additional point features with suggested label positions for the original features
-* `snapshot.get` - Adds new parameter: `index_names` (Boolean) Whether to include the name of each index in the snapshot. Defaults to true.
-
-#### New Experimental APIs
-* `security.has_privileges_user_profile` Determines whether the users associated with the specified profile IDs have all the requested privileges
-
-## 8.2.2 Release notes
-
-- Updates dependency on `elastic-transport` to `~> 8.0`
-
-## 8.2.1 Release notes
-
-No release, no changes on the client.
-
-## 8.2.0
-
-- Tested versions of Ruby for 8.2.0: Ruby (MRI) 2.7, 3.0 and 3.1, JRuby 9.3.
-
-### API
-
-Updated for compatibility with Elasticsearch 8.2's API.
-
-#### New parameters:
-
-* `field_caps`
- - `filters` An optional set of filters: can include +metadata,-metadata,-nested,-multifield,-parent
- - `types` Only return results for fields that have one of the types in the list
-
-#### New APIs:
-
-- `cat.component_templates` - Returns information about existing component_templates templates.
-- `ml.get_memory_stats` - Returns information on how ML is using memory.
-
-#### New Experimental APIs:
-
-- `security.activate_user_profile` - Creates or updates the user profile on behalf of another user.
-- `security.disable_user_profile` - Disables a user profile so it's not visible in user profile searches.
-- `security.enable_user_profile` - Enables a user profile so it's visible in user profile searches.
-- `security.get_user_profile` - Retrieves a user profile for the given unique ID.
-- `security.suggest_user_profiles` - Get suggestions for user profiles that match specified search criteria.
-- `security.update_user_profile_data` - Update application specific data for the user profile of the given unique ID.
-
-## 8.1.2, 8.0.1
-
-### API
-
-- Fixes an issue with the generated API code. When updating the code generator for 8.x, the order of `arguments.clone` in the generated code was changed. This would make it so that we would modify the parameters passed in before cloning them, which is undesired. Issue: [#1727](https://github.com/elastic/elasticsearch-ruby/issues/1727).
-
-## 8.1.1
-
-No release, no changes on the client.
-
-## 8.1.0
-
-- Tested versions of Ruby for 8.1.0: Ruby (MRI) 2.6, 2.7, 3.0 and 3.1, JRuby 9.3.
-
-### API
-
-Updated for compatibility with Elasticsearch 8.1's API.
-
-#### New parameters:
-- `indices.forcemerge` - `wait_for_completion` Should the request wait until the force merge is completed.
-- `indices.get` - `features` Return only information on specified index features (options: aliases, mappings, settings).
-- `ingest.put_pipeline` `if_version` (Integer), required version for optimistic concurrency control for pipeline updates.
-- `ml.delete_trained_model` - `timeout` controls the amount of time to wait for the model to be deleted. `force` (Boolean) true if the model should be forcefully deleted.
-- `ml.stop_trained_model_deployment` - `allow_no_match` whether to ignore if a wildcard expression matches no deployments. (This includes `_all` string or when no deployments have been specified). `force` true if the deployment should be forcefully stopped. Adds `body` parameter, the stop deployment parameters.
-- `nodes.hot_threads` - `sort` the sort order for 'cpu' type (default: total) (options: cpu, total)
-
-#### Updated parameters:
-- `indices.get_index_template` - `name` is now a String, a pattern that returned template names must match.
-- `knn_search` - `index` removes option to use empty string to perform the operation on all indices.
-- `ml.close_job`, `ml.get_job_stats`, `ml.get_jobs`, `ml.get_overall_buckets` - Remove `allow_no_jobs` parameter.
-- `ml.get_datafeed_stats`, `ml.get_datafeeds` - Remove `allow_no_datafeeds` parameter.
-- `nodes.hot_threads` - `type` parameter adds `mem` option.
-- `nodes.info` - `metric` updated to use `_all` to retrieve all metrics and `_none` to retrieve the node identity without any additional metrics. (options: settings, os, process, jvm, thread_pool, transport, http, plugins, ingest, indices, aggregations, _all, _none). `index_metric` option `shards` changes to `shard_stats`.
-- `open_point_in_time` - `keep_alive` is now a required parameter.
-- `search_mvt` - `grid_type` parameter adds `centroid` option in addition to `grid` and `point`.
-
-- New experimental APIs, designed for internal use by the fleet server project: `fleet.search`, `fleet.msearch`.
-
-#### New APIs
-- OpenID Connect Authentication: `security.oidc_authenticate`, `security.oidc_logout`, `security.oidc_prepare_authentication`.
-- `transform.reset_transform`.
-
-
-## 8.0.0
-
-First release for the `8.x` branch with a few major changes.
-
-- Tested versions of Ruby for 8.0.0: Ruby (MRI) 2.6, 2.7, 3.0 and 3.1, JRuby 9.3.
-
-### Client
-
-#### Elastic Transport
-
-The code for the dependency `elasticsearch-transport` has been promoted to [its own repository](https://github.com/elastic/elastic-transport-ruby/) and the project and gem have been renamed to [`elastic-transport`](https://rubygems.org/gems/elastic-transport). This gem now powers [`elasticsearch`](https://rubygems.org/gems/elasticsearch) and [`elastic-enterprise-search`](https://rubygems.org/gems/elastic-enterprise-search). The `elasticsearch-transport` gem won't be maintained after the last release in the `7.x` branch, in favour of `elastic-transport`.
-
-This will allow us to better address maintainance in both clients and the library itself.
-
-### API
-
-The `elasticsearch-api` library has been generated based on the Elasticsearch 8.0.0 REST specification.
-
-#### X-Pack Deprecation
-
-X-Pack has been deprecated. The `elasticsearch-xpack` gem will no longer be maintained after the last release in the `7.x` branch. The "X-Pack" integration library codebase was merged into `elasticsearch-api`. All the functionality is available from `elasticsearch-api`. The `xpack` namespace was removed for accessing any APIs other than `_xpack` (`client.xpack.info`) and `_xpack/usage` (`client.xpack.usage`). But APIs which were previously available through the `xpack` namespace e.g.: `client.xpack.machine_learning` are now only available directly: `client.machine_learning`.
-
-#### Parameter checking was removed
-
-The code in `elasticsearch-api` will no longer validate all the parameters sent. It will only validate the required parameters such as those needed to build the path for the request. But other API parameters are going to be validated by Elasticsearch. This provides better forwards and backwards compatibility in the client.
-
-#### Response object
-
-In previous versions of the client, calling an API endpoint would return the JSON body of the response. With `8.0`, we are returning a new Response object `Elasticsearch::API::Response`. It still behaves like a Hash to maintain backwards compatibility, but adds the `status` and `headers` methods from the `Elastic::Transport:Transport::Response` object:
-
-```ruby
-elastic_ruby(main)> response = client.info
-=> #"instance",
- "cluster_name"=>"elasticsearch-8-0-0-SNAPSHOT-rest-test",
- "cluster_uuid"=>"oIfRARuYRGuVYybjxQJ87w",
- "version"=>
- {"number"=>"8.0.0-SNAPSHOT",
- "build_flavor"=>"default",
- "build_type"=>"docker",
- "build_hash"=>"7e23c54eb31cc101d1a4811b9ab9c4fd33ed6a8d",
- "build_date"=>"2021-11-04T00:21:32.464485627Z",
- "build_snapshot"=>true,
- "lucene_version"=>"9.0.0",
- "minimum_wire_compatibility_version"=>"7.16.0",
- "minimum_index_compatibility_version"=>"7.0.0"},
- "tagline"=>"You Know, for Search"},
- @headers={"X-elastic-product"=>"Elasticsearch", "content-type"=>"application/json", "content-length"=>"567"},
- @status=200>>
-elastic_ruby(main)> response.status
-=> 200
-elastic_ruby(main)> response.headers
-=> {"X-elastic-product"=>"Elasticsearch", "content-type"=>"application/json", "content-length"=>"567"}
-elastic_ruby(main)> response['name']
-=> "instance"
-elastic_ruby(main)> response['tagline']
-=> "You Know, for Search"
-```
+## Fixes
-Please [let us know if you find any issues](https://github.com/elastic/elasticsearch-ruby/issues).
+* Some old rake tasks that were not being used have been removed. The rest were streamlined, the `es` namespace has been streamlined to make it easier to run Elasticsearch with Docker during development. The `docker` task namespace was merged into `es`.
+* Elasticsearch's REST API Spec tests can still be ran with `rake test:deprecated:rest_api` and setting the corresponding value for the environment variable `TEST_SUITE` ('platinum' or 'free').
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 4c7bbf1cc6..aeaa7b0b62 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -16,12 +16,12 @@ This will run `bundle install` in all subprojects.
You can run the client code right away in a Interactive Ruby Shell by running the following command from the project's root directory:
```
-$ ./elasticsearch/bin/elastic_ruby_console
+$ rake console # calls ./elasticsearch/bin/elastic_ruby_console
[1] elastic_ruby(main)> client = Elasticsearch::Client.new(host: 'http://elastic:changeme@localhost:9200/', log: true)
[2] elastic_ruby(main)> client.info
```
-This will use either `irb` or `pry` and load the `elasticsearch` and `elasticsearch-api` gems into the shell.
+This will use either `irb` or `pry` and load the `elasticsearch` and `elasticsearch-api` gems into the shell.
# Tests
@@ -31,8 +31,8 @@ To run the tests, you need to start a testing cluster on port 9200. We suggest u
rake docker:start[VERSION]
```
-E.g.: `rake docker:start[8.0-SNAPSHOT]`.
-To start the container with Platinum, pass it in as a parameter: `rake docker:start[7.x-SNAPSHOT,platinum]`.
+E.g.: `rake docker:start[9.0.0-SNAPSHOT]`.
+To start the container with Platinum, pass it in as a parameter: `rake docker:start[8.x-SNAPSHOT,platinum]`.
There's another rake task that will read the STACK_VERSION value from `.buildkite/pipeline.yml` and run that version of Elasticsearch: `rake es:up`.
@@ -42,6 +42,12 @@ max virtual memory areas vm.max_map_count [65530] likely too low, increase to at
```
Check [this link](https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#_set_vm_max_map_count_to_at_least_262144) for instructions on how to fix it.
+You can also use [start-local](https://github.com/elastic/start-local), which can run Elasticsearch and Kibana locally for development/testing. You only need to run Elasticsearch (`-esonly`) for development purposes, and you can specify a version with the `-v` parameter:
+
+```bash
+curl -fsSL https://elastic.co/start-local | sh -s -- -esonly -v 9.0.0
+```
+
As mentioned, the tests will atempt to run against `http://localhost:9200` by default. We provide the Docker task for the test cluster and recommend using it. But you can provide a different test server of your own. If you're using a different host or port, set the `TEST_ES_SERVER` environment variable with the server information. E.g.:
```
@@ -51,12 +57,20 @@ $ TEST_ES_SERVER='http://localhost:9250/' be rake test:client
To run all the tests in all the subprojects, use the Rake task:
```
-time rake test:client
+rake test:client
```
-# Elasticsearch Rest API YAML Test Runner
+# Elasticsearch Rest API Tests
+
+The integration tests on this project run the [Elasticsearch Client tests](https://github.com/elastic/elasticsearch-clients-tests/) with the [Elasticsearch Tests Runner](https://github.com/elastic/es-test-runner-ruby/) library. This runs in CI against an Elasticsearch cluster in Docker. The [Elasticsearch's REST API Spec tests](https://github.com/elastic/elasticsearch/tree/main/rest-api-spec/src/main/resources/rest-api-spec/test#test-suite) can still be ran following [these instructions](https://github.com/elastic/elasticsearch-ruby/tree/main/elasticsearch-api/api-spec-testing#readme).
+
+You can run the yaml API tests with:
+
+```
+rake test:yaml
+```
-See the API Spec tests [README](https://github.com/elastic/elasticsearch-ruby/tree/main/elasticsearch-api/api-spec-testing#readme).
+Check `rake -T` for more test tasks.
# Contributing
diff --git a/Gemfile b/Gemfile
index c618282910..a2263210df 100644
--- a/Gemfile
+++ b/Gemfile
@@ -38,5 +38,5 @@ end
group :development, :test do
gem 'debug' unless defined?(JRUBY_VERSION)
gem 'rspec'
- gem 'rubocop', '>= 1.51' unless defined?(JRUBY_VERSION) && Gem::Version.new(JRUBY_VERSION) <= Gem::Version.new('9.4')
+ gem 'rubocop', '>= 1.51' unless defined?(JRUBY_VERSION)
end
diff --git a/README.md b/README.md
index 8e87f0340e..939ecf6e0a 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
# Elasticsearch
-[](https://github.com/elastic/elasticsearch-ruby/actions/workflows/7.17.yml) [](https://github.com/elastic/elasticsearch-ruby/actions/workflows/8.16.yml) [](https://github.com/elastic/elasticsearch-ruby/actions/workflows/8.17.yml) [](https://github.com/elastic/elasticsearch-ruby/actions/workflows/main.yml) [](https://buildkite.com/elastic/elasticsearch-ruby)
+[](https://github.com/elastic/elasticsearch-ruby/actions/workflows/8.17.yml) [](https://github.com/elastic/elasticsearch-ruby/actions/workflows/8.18.yml) [](https://github.com/elastic/elasticsearch-ruby/actions/workflows/9.0.yml) [](https://github.com/elastic/elasticsearch-ruby/actions/workflows/main.yml) [](https://buildkite.com/elastic/elasticsearch-ruby)
**[Download the latest version of Elasticsearch](https://www.elastic.co/downloads/elasticsearch)**
or
@@ -79,8 +79,8 @@ Elasticsearch language clients are only backwards compatible with default distri
| Gem Version | | Elasticsearch Version | Supported |
|-------------|---|------------------------|-----------|
-| 7.x | → | 7.x | 7.17 |
| 8.x | → | 8.x | 8.x |
+| 9.x | → | 9.x | 9.x |
| main | → | main | |
## Try Elasticsearch and Kibana locally
diff --git a/docs/basic-config.asciidoc b/docs/basic-config.asciidoc
deleted file mode 100644
index d0edd7ec76..0000000000
--- a/docs/basic-config.asciidoc
+++ /dev/null
@@ -1,37 +0,0 @@
-[[basic-config]]
-=== Basic configuration
-
-The table below contains the most important initialization parameters that you
-can use.
-
-
-[cols="<,<,<"]
-|===
-
-| **Parameter** | **Data type** | **Description**
-| `adapter` | Symbol | A specific adapter for Faraday (for example, `:patron`).
-| `api_key` | String, Hash | For API key Authentication. Either the base64 encoding of `id` and `api_key` joined by a colon as a string, or a hash with the `id` and `api_key` values.
-| `compression` | Boolean | Whether to compress requests. Gzip compression is used. Defaults to `false`. Responses are automatically inflated if they are compressed. If a custom transport object is used, it must handle the request compression and response inflation.
-| `enable_meta_header` | Boolean | Whether to enable sending the meta data header to Cloud. Defaults to `true`.
-| `hosts` | String, Array | Single host passed as a string or hash, or multiple hosts passed as an array; `host` or `url` keys are also valid.
-| `log` | Boolean | Whether to use the default logger. Disabled by default.
-| `logger` | Object | An instance of a Logger-compatible object.
-| `opaque_id_prefix` | String | Sets a prefix for X-Opaque-Id when initializing the client. This is prepended to the id you set before each request if you're using X-Opaque-Id.
-| `opentelemetry_tracer_provider` | `OpenTelemetry::Trace::TracerProvider` | An explicit TracerProvider to use instead of the global one with OpenTelemetry. This enables better dependency injection and simplifies testing.
-| `randomize_hosts` | Boolean | Whether to shuffle connections on initialization and reload. Defaults to `false`.
-| `reload_connections` | Boolean, Number | Whether to reload connections after X requests. Defaults to `false`.
-| `reload_on_failure` | Boolean | Whether to reload connections after failure. Defaults to `false`.
-| `request_timeout` | Integer | The request timeout to be passed to transport in options.
-| `resurrect_after` | Integer | Specifies after how many seconds a dead connection should be tried again.
-| `retry_on_failure` | Boolean, Number | Whether to retry X times when request fails before raising and exception. Defaults to `false`.
-| `retry_on_status` | Array, Number | Specifies which status code needs to be returned to retry.
-| `selector` | Constant | An instance of selector strategy implemented with {Elastic::Transport::Transport::Connections::Selector::Base}.
-| `send_get_body_as` | String | Specifies the HTTP method to use for GET requests with a body. Defaults to `GET`.
-| `serializer_class` | Constant | Specifies a serializer class to use. It is initialized by the transport and passed the transport instance.
-| `sniffer_timeout` | Integer | Specifies the timeout for reloading connections in seconds. Defaults to `1`.
-| `trace` | Boolean | Whether to use the default tracer. Disabled by default.
-| `tracer` | Object | Specifies an instance of a Logger-compatible object.
-| `transport` | Object | Specifies a transport instance.
-| `transport_class` | Constant | Specifies a transport class to use. It is initialized by the client and passed hosts and all arguments.
-| `transport_options` | Hash | Specifies the options to be passed to the `Faraday::Connection` constructor.
-|===
diff --git a/docs/config.asciidoc b/docs/config.asciidoc
deleted file mode 100644
index c9897f8313..0000000000
--- a/docs/config.asciidoc
+++ /dev/null
@@ -1,9 +0,0 @@
-[[ruby-config]]
-== Configuration
-
-This page contains information about how to configure the Ruby client tailored
-to your needs. Almost every aspect of the client is configurable. However, in
-most cases you only need to set a couple of parameters.
-
-* <>
-* <>
\ No newline at end of file
diff --git a/docs/docset.yml b/docs/docset.yml
new file mode 100644
index 0000000000..fb1b1471b7
--- /dev/null
+++ b/docs/docset.yml
@@ -0,0 +1,490 @@
+project: 'Ruby client'
+exclude:
+ - examples/**
+cross_links:
+ - docs-content
+ - ecs
+ - ecs-logging-ruby
+toc:
+ - toc: reference
+ - toc: release-notes
+subs:
+ ref: "https://www.elastic.co/guide/en/elasticsearch/reference/current"
+ ref-bare: "https://www.elastic.co/guide/en/elasticsearch/reference"
+ ref-8x: "https://www.elastic.co/guide/en/elasticsearch/reference/8.1"
+ ref-80: "https://www.elastic.co/guide/en/elasticsearch/reference/8.0"
+ ref-7x: "https://www.elastic.co/guide/en/elasticsearch/reference/7.17"
+ ref-70: "https://www.elastic.co/guide/en/elasticsearch/reference/7.0"
+ ref-60: "https://www.elastic.co/guide/en/elasticsearch/reference/6.0"
+ ref-64: "https://www.elastic.co/guide/en/elasticsearch/reference/6.4"
+ xpack-ref: "https://www.elastic.co/guide/en/x-pack/6.2"
+ logstash-ref: "https://www.elastic.co/guide/en/logstash/current"
+ kibana-ref: "https://www.elastic.co/guide/en/kibana/current"
+ kibana-ref-all: "https://www.elastic.co/guide/en/kibana"
+ beats-ref-root: "https://www.elastic.co/guide/en/beats"
+ beats-ref: "https://www.elastic.co/guide/en/beats/libbeat/current"
+ beats-ref-60: "https://www.elastic.co/guide/en/beats/libbeat/6.0"
+ beats-ref-63: "https://www.elastic.co/guide/en/beats/libbeat/6.3"
+ beats-devguide: "https://www.elastic.co/guide/en/beats/devguide/current"
+ auditbeat-ref: "https://www.elastic.co/guide/en/beats/auditbeat/current"
+ packetbeat-ref: "https://www.elastic.co/guide/en/beats/packetbeat/current"
+ metricbeat-ref: "https://www.elastic.co/guide/en/beats/metricbeat/current"
+ filebeat-ref: "https://www.elastic.co/guide/en/beats/filebeat/current"
+ functionbeat-ref: "https://www.elastic.co/guide/en/beats/functionbeat/current"
+ winlogbeat-ref: "https://www.elastic.co/guide/en/beats/winlogbeat/current"
+ heartbeat-ref: "https://www.elastic.co/guide/en/beats/heartbeat/current"
+ journalbeat-ref: "https://www.elastic.co/guide/en/beats/journalbeat/current"
+ ingest-guide: "https://www.elastic.co/guide/en/ingest/current"
+ fleet-guide: "https://www.elastic.co/guide/en/fleet/current"
+ apm-guide-ref: "https://www.elastic.co/guide/en/apm/guide/current"
+ apm-guide-7x: "https://www.elastic.co/guide/en/apm/guide/7.17"
+ apm-app-ref: "https://www.elastic.co/guide/en/kibana/current"
+ apm-agents-ref: "https://www.elastic.co/guide/en/apm/agent"
+ apm-android-ref: "https://www.elastic.co/guide/en/apm/agent/android/current"
+ apm-py-ref: "https://www.elastic.co/guide/en/apm/agent/python/current"
+ apm-py-ref-3x: "https://www.elastic.co/guide/en/apm/agent/python/3.x"
+ apm-node-ref-index: "https://www.elastic.co/guide/en/apm/agent/nodejs"
+ apm-node-ref: "https://www.elastic.co/guide/en/apm/agent/nodejs/current"
+ apm-node-ref-1x: "https://www.elastic.co/guide/en/apm/agent/nodejs/1.x"
+ apm-rum-ref: "https://www.elastic.co/guide/en/apm/agent/rum-js/current"
+ apm-ruby-ref: "https://www.elastic.co/guide/en/apm/agent/ruby/current"
+ apm-java-ref: "https://www.elastic.co/guide/en/apm/agent/java/current"
+ apm-go-ref: "https://www.elastic.co/guide/en/apm/agent/go/current"
+ apm-dotnet-ref: "https://www.elastic.co/guide/en/apm/agent/dotnet/current"
+ apm-php-ref: "https://www.elastic.co/guide/en/apm/agent/php/current"
+ apm-ios-ref: "https://www.elastic.co/guide/en/apm/agent/swift/current"
+ apm-lambda-ref: "https://www.elastic.co/guide/en/apm/lambda/current"
+ apm-attacher-ref: "https://www.elastic.co/guide/en/apm/attacher/current"
+ docker-logging-ref: "https://www.elastic.co/guide/en/beats/loggingplugin/current"
+ esf-ref: "https://www.elastic.co/guide/en/esf/current"
+ kinesis-firehose-ref: "https://www.elastic.co/guide/en/kinesis/%7B%7Bkinesis_version%7D%7D"
+ estc-welcome-current: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions/current"
+ estc-welcome: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions/current"
+ estc-welcome-all: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions"
+ hadoop-ref: "https://www.elastic.co/guide/en/elasticsearch/hadoop/current"
+ stack-ref: "https://www.elastic.co/guide/en/elastic-stack/current"
+ stack-ref-67: "https://www.elastic.co/guide/en/elastic-stack/6.7"
+ stack-ref-68: "https://www.elastic.co/guide/en/elastic-stack/6.8"
+ stack-ref-70: "https://www.elastic.co/guide/en/elastic-stack/7.0"
+ stack-ref-80: "https://www.elastic.co/guide/en/elastic-stack/8.0"
+ stack-ov: "https://www.elastic.co/guide/en/elastic-stack-overview/current"
+ stack-gs: "https://www.elastic.co/guide/en/elastic-stack-get-started/current"
+ stack-gs-current: "https://www.elastic.co/guide/en/elastic-stack-get-started/current"
+ javaclient: "https://www.elastic.co/guide/en/elasticsearch/client/java-api/current"
+ java-api-client: "https://www.elastic.co/guide/en/elasticsearch/client/java-api-client/current"
+ java-rest: "https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current"
+ jsclient: "https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current"
+ jsclient-current: "https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current"
+ es-ruby-client: "https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current"
+ es-dotnet-client: "https://www.elastic.co/guide/en/elasticsearch/client/net-api/current"
+ es-php-client: "https://www.elastic.co/guide/en/elasticsearch/client/php-api/current"
+ es-python-client: "https://www.elastic.co/guide/en/elasticsearch/client/python-api/current"
+ defguide: "https://www.elastic.co/guide/en/elasticsearch/guide/2.x"
+ painless: "https://www.elastic.co/guide/en/elasticsearch/painless/current"
+ plugins: "https://www.elastic.co/guide/en/elasticsearch/plugins/current"
+ plugins-8x: "https://www.elastic.co/guide/en/elasticsearch/plugins/8.1"
+ plugins-7x: "https://www.elastic.co/guide/en/elasticsearch/plugins/7.17"
+ plugins-6x: "https://www.elastic.co/guide/en/elasticsearch/plugins/6.8"
+ glossary: "https://www.elastic.co/guide/en/elastic-stack-glossary/current"
+ upgrade_guide: "https://www.elastic.co/products/upgrade_guide"
+ blog-ref: "https://www.elastic.co/blog/"
+ curator-ref: "https://www.elastic.co/guide/en/elasticsearch/client/curator/current"
+ curator-ref-current: "https://www.elastic.co/guide/en/elasticsearch/client/curator/current"
+ metrics-ref: "https://www.elastic.co/guide/en/metrics/current"
+ metrics-guide: "https://www.elastic.co/guide/en/metrics/guide/current"
+ logs-ref: "https://www.elastic.co/guide/en/logs/current"
+ logs-guide: "https://www.elastic.co/guide/en/logs/guide/current"
+ uptime-guide: "https://www.elastic.co/guide/en/uptime/current"
+ observability-guide: "https://www.elastic.co/guide/en/observability/current"
+ observability-guide-all: "https://www.elastic.co/guide/en/observability"
+ siem-guide: "https://www.elastic.co/guide/en/siem/guide/current"
+ security-guide: "https://www.elastic.co/guide/en/security/current"
+ security-guide-all: "https://www.elastic.co/guide/en/security"
+ endpoint-guide: "https://www.elastic.co/guide/en/endpoint/current"
+ sql-odbc: "https://www.elastic.co/guide/en/elasticsearch/sql-odbc/current"
+ ecs-ref: "https://www.elastic.co/guide/en/ecs/current"
+ ecs-logging-ref: "https://www.elastic.co/guide/en/ecs-logging/overview/current"
+ ecs-logging-go-logrus-ref: "https://www.elastic.co/guide/en/ecs-logging/go-logrus/current"
+ ecs-logging-go-zap-ref: "https://www.elastic.co/guide/en/ecs-logging/go-zap/current"
+ ecs-logging-go-zerolog-ref: "https://www.elastic.co/guide/en/ecs-logging/go-zap/current"
+ ecs-logging-java-ref: "https://www.elastic.co/guide/en/ecs-logging/java/current"
+ ecs-logging-dotnet-ref: "https://www.elastic.co/guide/en/ecs-logging/dotnet/current"
+ ecs-logging-nodejs-ref: "https://www.elastic.co/guide/en/ecs-logging/nodejs/current"
+ ecs-logging-php-ref: "https://www.elastic.co/guide/en/ecs-logging/php/current"
+ ecs-logging-python-ref: "https://www.elastic.co/guide/en/ecs-logging/python/current"
+ ecs-logging-ruby-ref: "https://www.elastic.co/guide/en/ecs-logging/ruby/current"
+ ml-docs: "https://www.elastic.co/guide/en/machine-learning/current"
+ eland-docs: "https://www.elastic.co/guide/en/elasticsearch/client/eland/current"
+ eql-ref: "https://eql.readthedocs.io/en/latest/query-guide"
+ extendtrial: "https://www.elastic.co/trialextension"
+ wikipedia: "https://en.wikipedia.org/wiki"
+ forum: "https://discuss.elastic.co/"
+ xpack-forum: "https://discuss.elastic.co/c/50-x-pack"
+ security-forum: "https://discuss.elastic.co/c/x-pack/shield"
+ watcher-forum: "https://discuss.elastic.co/c/x-pack/watcher"
+ monitoring-forum: "https://discuss.elastic.co/c/x-pack/marvel"
+ graph-forum: "https://discuss.elastic.co/c/x-pack/graph"
+ apm-forum: "https://discuss.elastic.co/c/apm"
+ enterprise-search-ref: "https://www.elastic.co/guide/en/enterprise-search/current"
+ app-search-ref: "https://www.elastic.co/guide/en/app-search/current"
+ workplace-search-ref: "https://www.elastic.co/guide/en/workplace-search/current"
+ enterprise-search-node-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/enterprise-search-node/current"
+ enterprise-search-php-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/php/current"
+ enterprise-search-python-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/python/current"
+ enterprise-search-ruby-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/ruby/current"
+ elastic-maps-service: "https://maps.elastic.co/"
+ integrations-docs: "https://docs.elastic.co/en/integrations"
+ integrations-devguide: "https://www.elastic.co/guide/en/integrations-developer/current"
+ time-units: "https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units"
+ byte-units: "https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units"
+ apm-py-ref-v: "https://www.elastic.co/guide/en/apm/agent/python/current"
+ apm-node-ref-v: "https://www.elastic.co/guide/en/apm/agent/nodejs/current"
+ apm-rum-ref-v: "https://www.elastic.co/guide/en/apm/agent/rum-js/current"
+ apm-ruby-ref-v: "https://www.elastic.co/guide/en/apm/agent/ruby/current"
+ apm-java-ref-v: "https://www.elastic.co/guide/en/apm/agent/java/current"
+ apm-go-ref-v: "https://www.elastic.co/guide/en/apm/agent/go/current"
+ apm-ios-ref-v: "https://www.elastic.co/guide/en/apm/agent/swift/current"
+ apm-dotnet-ref-v: "https://www.elastic.co/guide/en/apm/agent/dotnet/current"
+ apm-php-ref-v: "https://www.elastic.co/guide/en/apm/agent/php/current"
+ ecloud: "Elastic Cloud"
+ esf: "Elastic Serverless Forwarder"
+ ess: "Elasticsearch Service"
+ ece: "Elastic Cloud Enterprise"
+ eck: "Elastic Cloud on Kubernetes"
+ serverless-full: "Elastic Cloud Serverless"
+ serverless-short: "Serverless"
+ es-serverless: "Elasticsearch Serverless"
+ es3: "Elasticsearch Serverless"
+ obs-serverless: "Elastic Observability Serverless"
+ sec-serverless: "Elastic Security Serverless"
+ serverless-docs: "https://docs.elastic.co/serverless"
+ cloud: "https://www.elastic.co/guide/en/cloud/current"
+ ess-utm-params: "?page=docs&placement=docs-body"
+ ess-baymax: "?page=docs&placement=docs-body"
+ ess-trial: "https://cloud.elastic.co/registration?page=docs&placement=docs-body"
+ ess-product: "https://www.elastic.co/cloud/elasticsearch-service?page=docs&placement=docs-body"
+ ess-console: "https://cloud.elastic.co/?page=docs&placement=docs-body"
+ ess-console-name: "Elasticsearch Service Console"
+ ess-deployments: "https://cloud.elastic.co/deployments?page=docs&placement=docs-body"
+ ece-ref: "https://www.elastic.co/guide/en/cloud-enterprise/current"
+ eck-ref: "https://www.elastic.co/guide/en/cloud-on-k8s/current"
+ ess-leadin: "You can run Elasticsearch on your own hardware or use our hosted Elasticsearch Service that is available on AWS, GCP, and Azure. https://cloud.elastic.co/registration{ess-utm-params}[Try the Elasticsearch Service for free]."
+ ess-leadin-short: "Our hosted Elasticsearch Service is available on AWS, GCP, and Azure, and you can https://cloud.elastic.co/registration{ess-utm-params}[try it for free]."
+ ess-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg[link=\"https://cloud.elastic.co/registration%7Bess-utm-params%7D/", title=\"Supported on Elasticsearch Service\"]"
+ ece-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud_ece.svg[link=\"https://cloud.elastic.co/registration%7Bess-utm-params%7D/", title=\"Supported on Elastic Cloud Enterprise\"]"
+ cloud-only: "This feature is designed for indirect use by https://cloud.elastic.co/registration{ess-utm-params}[Elasticsearch Service], https://www.elastic.co/guide/en/cloud-enterprise/{ece-version-link}[Elastic Cloud Enterprise], and https://www.elastic.co/guide/en/cloud-on-k8s/current[Elastic Cloud on Kubernetes]. Direct use is not supported."
+ ess-setting-change: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg[link=\"{ess-trial}\", title=\"Supported on {ess}\"] indicates a change to a supported https://www.elastic.co/guide/en/cloud/current/ec-add-user-settings.html[user setting] for Elasticsearch Service."
+ ess-skip-section: "If you use Elasticsearch Service, skip this section. Elasticsearch Service handles these changes for you."
+ api-cloud: "https://www.elastic.co/docs/api/doc/cloud"
+ api-ece: "https://www.elastic.co/docs/api/doc/cloud-enterprise"
+ api-kibana-serverless: "https://www.elastic.co/docs/api/doc/serverless"
+ es-feature-flag: "This feature is in development and not yet available for use. This documentation is provided for informational purposes only."
+ es-ref-dir: "'{{elasticsearch-root}}/docs/reference'"
+ apm-app: "APM app"
+ uptime-app: "Uptime app"
+ synthetics-app: "Synthetics app"
+ logs-app: "Logs app"
+ metrics-app: "Metrics app"
+ infrastructure-app: "Infrastructure app"
+ siem-app: "SIEM app"
+ security-app: "Elastic Security app"
+ ml-app: "Machine Learning"
+ dev-tools-app: "Dev Tools"
+ ingest-manager-app: "Ingest Manager"
+ stack-manage-app: "Stack Management"
+ stack-monitor-app: "Stack Monitoring"
+ alerts-ui: "Alerts and Actions"
+ rules-ui: "Rules"
+ rac-ui: "Rules and Connectors"
+ connectors-ui: "Connectors"
+ connectors-feature: "Actions and Connectors"
+ stack-rules-feature: "Stack Rules"
+ user-experience: "User Experience"
+ ems: "Elastic Maps Service"
+ ems-init: "EMS"
+ hosted-ems: "Elastic Maps Server"
+ ipm-app: "Index Pattern Management"
+ ingest-pipelines: "ingest pipelines"
+ ingest-pipelines-app: "Ingest Pipelines"
+ ingest-pipelines-cap: "Ingest pipelines"
+ ls-pipelines: "Logstash pipelines"
+ ls-pipelines-app: "Logstash Pipelines"
+ maint-windows: "maintenance windows"
+ maint-windows-app: "Maintenance Windows"
+ maint-windows-cap: "Maintenance windows"
+ custom-roles-app: "Custom Roles"
+ data-source: "data view"
+ data-sources: "data views"
+ data-source-caps: "Data View"
+ data-sources-caps: "Data Views"
+ data-source-cap: "Data view"
+ data-sources-cap: "Data views"
+ project-settings: "Project settings"
+ manage-app: "Management"
+ index-manage-app: "Index Management"
+ data-views-app: "Data Views"
+ rules-app: "Rules"
+ saved-objects-app: "Saved Objects"
+ tags-app: "Tags"
+ api-keys-app: "API keys"
+ transforms-app: "Transforms"
+ connectors-app: "Connectors"
+ files-app: "Files"
+ reports-app: "Reports"
+ maps-app: "Maps"
+ alerts-app: "Alerts"
+ crawler: "Enterprise Search web crawler"
+ ents: "Enterprise Search"
+ app-search-crawler: "App Search web crawler"
+ agent: "Elastic Agent"
+ agents: "Elastic Agents"
+ fleet: "Fleet"
+ fleet-server: "Fleet Server"
+ integrations-server: "Integrations Server"
+ ingest-manager: "Ingest Manager"
+ ingest-management: "ingest management"
+ package-manager: "Elastic Package Manager"
+ integrations: "Integrations"
+ package-registry: "Elastic Package Registry"
+ artifact-registry: "Elastic Artifact Registry"
+ aws: "AWS"
+ stack: "Elastic Stack"
+ xpack: "X-Pack"
+ es: "Elasticsearch"
+ kib: "Kibana"
+ esms: "Elastic Stack Monitoring Service"
+ esms-init: "ESMS"
+ ls: "Logstash"
+ beats: "Beats"
+ auditbeat: "Auditbeat"
+ filebeat: "Filebeat"
+ heartbeat: "Heartbeat"
+ metricbeat: "Metricbeat"
+ packetbeat: "Packetbeat"
+ winlogbeat: "Winlogbeat"
+ functionbeat: "Functionbeat"
+ journalbeat: "Journalbeat"
+ es-sql: "Elasticsearch SQL"
+ esql: "ES|QL"
+ elastic-agent: "Elastic Agent"
+ k8s: "Kubernetes"
+ log-driver-long: "Elastic Logging Plugin for Docker"
+ security: "X-Pack security"
+ security-features: "security features"
+ operator-feature: "operator privileges feature"
+ es-security-features: "Elasticsearch security features"
+ stack-security-features: "Elastic Stack security features"
+ endpoint-sec: "Endpoint Security"
+ endpoint-cloud-sec: "Endpoint and Cloud Security"
+ elastic-defend: "Elastic Defend"
+ elastic-sec: "Elastic Security"
+ elastic-endpoint: "Elastic Endpoint"
+ swimlane: "Swimlane"
+ sn: "ServiceNow"
+ sn-itsm: "ServiceNow ITSM"
+ sn-itom: "ServiceNow ITOM"
+ sn-sir: "ServiceNow SecOps"
+ jira: "Jira"
+ ibm-r: "IBM Resilient"
+ webhook: "Webhook"
+ webhook-cm: "Webhook - Case Management"
+ opsgenie: "Opsgenie"
+ bedrock: "Amazon Bedrock"
+ gemini: "Google Gemini"
+ hive: "TheHive"
+ monitoring: "X-Pack monitoring"
+ monitor-features: "monitoring features"
+ stack-monitor-features: "Elastic Stack monitoring features"
+ watcher: "Watcher"
+ alert-features: "alerting features"
+ reporting: "X-Pack reporting"
+ report-features: "reporting features"
+ graph: "X-Pack graph"
+ graph-features: "graph analytics features"
+ searchprofiler: "Search Profiler"
+ xpackml: "X-Pack machine learning"
+ ml: "machine learning"
+ ml-cap: "Machine learning"
+ ml-init: "ML"
+ ml-features: "machine learning features"
+ stack-ml-features: "Elastic Stack machine learning features"
+ ccr: "cross-cluster replication"
+ ccr-cap: "Cross-cluster replication"
+ ccr-init: "CCR"
+ ccs: "cross-cluster search"
+ ccs-cap: "Cross-cluster search"
+ ccs-init: "CCS"
+ ilm: "index lifecycle management"
+ ilm-cap: "Index lifecycle management"
+ ilm-init: "ILM"
+ dlm: "data lifecycle management"
+ dlm-cap: "Data lifecycle management"
+ dlm-init: "DLM"
+ search-snap: "searchable snapshot"
+ search-snaps: "searchable snapshots"
+ search-snaps-cap: "Searchable snapshots"
+ slm: "snapshot lifecycle management"
+ slm-cap: "Snapshot lifecycle management"
+ slm-init: "SLM"
+ rollup-features: "data rollup features"
+ ipm: "index pattern management"
+ ipm-cap: "Index pattern"
+ rollup: "rollup"
+ rollup-cap: "Rollup"
+ rollups: "rollups"
+ rollups-cap: "Rollups"
+ rollup-job: "rollup job"
+ rollup-jobs: "rollup jobs"
+ rollup-jobs-cap: "Rollup jobs"
+ dfeed: "datafeed"
+ dfeeds: "datafeeds"
+ dfeed-cap: "Datafeed"
+ dfeeds-cap: "Datafeeds"
+ ml-jobs: "machine learning jobs"
+ ml-jobs-cap: "Machine learning jobs"
+ anomaly-detect: "anomaly detection"
+ anomaly-detect-cap: "Anomaly detection"
+ anomaly-job: "anomaly detection job"
+ anomaly-jobs: "anomaly detection jobs"
+ anomaly-jobs-cap: "Anomaly detection jobs"
+ dataframe: "data frame"
+ dataframes: "data frames"
+ dataframe-cap: "Data frame"
+ dataframes-cap: "Data frames"
+ watcher-transform: "payload transform"
+ watcher-transforms: "payload transforms"
+ watcher-transform-cap: "Payload transform"
+ watcher-transforms-cap: "Payload transforms"
+ transform: "transform"
+ transforms: "transforms"
+ transform-cap: "Transform"
+ transforms-cap: "Transforms"
+ dataframe-transform: "transform"
+ dataframe-transform-cap: "Transform"
+ dataframe-transforms: "transforms"
+ dataframe-transforms-cap: "Transforms"
+ dfanalytics-cap: "Data frame analytics"
+ dfanalytics: "data frame analytics"
+ dataframe-analytics-config: "'{dataframe} analytics config'"
+ dfanalytics-job: "'{dataframe} analytics job'"
+ dfanalytics-jobs: "'{dataframe} analytics jobs'"
+ dfanalytics-jobs-cap: "'{dataframe-cap} analytics jobs'"
+ cdataframe: "continuous data frame"
+ cdataframes: "continuous data frames"
+ cdataframe-cap: "Continuous data frame"
+ cdataframes-cap: "Continuous data frames"
+ cdataframe-transform: "continuous transform"
+ cdataframe-transforms: "continuous transforms"
+ cdataframe-transforms-cap: "Continuous transforms"
+ ctransform: "continuous transform"
+ ctransform-cap: "Continuous transform"
+ ctransforms: "continuous transforms"
+ ctransforms-cap: "Continuous transforms"
+ oldetection: "outlier detection"
+ oldetection-cap: "Outlier detection"
+ olscore: "outlier score"
+ olscores: "outlier scores"
+ fiscore: "feature influence score"
+ evaluatedf-api: "evaluate {dataframe} analytics API"
+ evaluatedf-api-cap: "Evaluate {dataframe} analytics API"
+ binarysc: "binary soft classification"
+ binarysc-cap: "Binary soft classification"
+ regression: "regression"
+ regression-cap: "Regression"
+ reganalysis: "regression analysis"
+ reganalysis-cap: "Regression analysis"
+ depvar: "dependent variable"
+ feature-var: "feature variable"
+ feature-vars: "feature variables"
+ feature-vars-cap: "Feature variables"
+ classification: "classification"
+ classification-cap: "Classification"
+ classanalysis: "classification analysis"
+ classanalysis-cap: "Classification analysis"
+ infer-cap: "Inference"
+ infer: "inference"
+ lang-ident-cap: "Language identification"
+ lang-ident: "language identification"
+ data-viz: "Data Visualizer"
+ file-data-viz: "File Data Visualizer"
+ feat-imp: "feature importance"
+ feat-imp-cap: "Feature importance"
+ nlp: "natural language processing"
+ nlp-cap: "Natural language processing"
+ apm-agent: "APM agent"
+ apm-go-agent: "Elastic APM Go agent"
+ apm-go-agents: "Elastic APM Go agents"
+ apm-ios-agent: "Elastic APM iOS agent"
+ apm-ios-agents: "Elastic APM iOS agents"
+ apm-java-agent: "Elastic APM Java agent"
+ apm-java-agents: "Elastic APM Java agents"
+ apm-dotnet-agent: "Elastic APM .NET agent"
+ apm-dotnet-agents: "Elastic APM .NET agents"
+ apm-node-agent: "Elastic APM Node.js agent"
+ apm-node-agents: "Elastic APM Node.js agents"
+ apm-php-agent: "Elastic APM PHP agent"
+ apm-php-agents: "Elastic APM PHP agents"
+ apm-py-agent: "Elastic APM Python agent"
+ apm-py-agents: "Elastic APM Python agents"
+ apm-ruby-agent: "Elastic APM Ruby agent"
+ apm-ruby-agents: "Elastic APM Ruby agents"
+ apm-rum-agent: "Elastic APM Real User Monitoring (RUM) JavaScript agent"
+ apm-rum-agents: "Elastic APM RUM JavaScript agents"
+ apm-lambda-ext: "Elastic APM AWS Lambda extension"
+ project-monitors: "project monitors"
+ project-monitors-cap: "Project monitors"
+ private-location: "Private Location"
+ private-locations: "Private Locations"
+ pwd: "YOUR_PASSWORD"
+ esh: "ES-Hadoop"
+ default-dist: "default distribution"
+ oss-dist: "OSS-only distribution"
+ observability: "Observability"
+ api-request-title: "Request"
+ api-prereq-title: "Prerequisites"
+ api-description-title: "Description"
+ api-path-parms-title: "Path parameters"
+ api-query-parms-title: "Query parameters"
+ api-request-body-title: "Request body"
+ api-response-codes-title: "Response codes"
+ api-response-body-title: "Response body"
+ api-example-title: "Example"
+ api-examples-title: "Examples"
+ api-definitions-title: "Properties"
+ multi-arg: "†footnoteref:[multi-arg,This parameter accepts multiple arguments.]"
+ multi-arg-ref: "†footnoteref:[multi-arg]"
+ yes-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png[Yes,20,15]"
+ no-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png[No,20,15]"
+ es-repo: "https://github.com/elastic/elasticsearch/"
+ es-issue: "https://github.com/elastic/elasticsearch/issues/"
+ es-pull: "https://github.com/elastic/elasticsearch/pull/"
+ es-commit: "https://github.com/elastic/elasticsearch/commit/"
+ kib-repo: "https://github.com/elastic/kibana/"
+ kib-issue: "https://github.com/elastic/kibana/issues/"
+ kibana-issue: "'{kib-repo}issues/'"
+ kib-pull: "https://github.com/elastic/kibana/pull/"
+ kibana-pull: "'{kib-repo}pull/'"
+ kib-commit: "https://github.com/elastic/kibana/commit/"
+ ml-repo: "https://github.com/elastic/ml-cpp/"
+ ml-issue: "https://github.com/elastic/ml-cpp/issues/"
+ ml-pull: "https://github.com/elastic/ml-cpp/pull/"
+ ml-commit: "https://github.com/elastic/ml-cpp/commit/"
+ apm-repo: "https://github.com/elastic/apm-server/"
+ apm-issue: "https://github.com/elastic/apm-server/issues/"
+ apm-pull: "https://github.com/elastic/apm-server/pull/"
+ kibana-blob: "https://github.com/elastic/kibana/blob/current/"
+ apm-get-started-ref: "https://www.elastic.co/guide/en/apm/get-started/current"
+ apm-server-ref: "https://www.elastic.co/guide/en/apm/server/current"
+ apm-server-ref-v: "https://www.elastic.co/guide/en/apm/server/current"
+ apm-server-ref-m: "https://www.elastic.co/guide/en/apm/server/master"
+ apm-server-ref-62: "https://www.elastic.co/guide/en/apm/server/6.2"
+ apm-server-ref-64: "https://www.elastic.co/guide/en/apm/server/6.4"
+ apm-server-ref-70: "https://www.elastic.co/guide/en/apm/server/7.0"
+ apm-overview-ref-v: "https://www.elastic.co/guide/en/apm/get-started/current"
+ apm-overview-ref-70: "https://www.elastic.co/guide/en/apm/get-started/7.0"
+ apm-overview-ref-m: "https://www.elastic.co/guide/en/apm/get-started/master"
+ infra-guide: "https://www.elastic.co/guide/en/infrastructure/guide/current"
+ a-data-source: "a data view"
+ icon-bug: "pass:[]"
+ icon-checkInCircleFilled: "pass:[]"
+ icon-warningFilled: "pass:[]"
diff --git a/docs/dsl.asciidoc b/docs/dsl.asciidoc
deleted file mode 100644
index 922624a4fa..0000000000
--- a/docs/dsl.asciidoc
+++ /dev/null
@@ -1,6 +0,0 @@
-[[dsl]]
-=== Elasticsearch DSL
-
-The https://github.com/elastic/elasticsearch-dsl-ruby[elasticsearch-dsl] gem provides a Ruby API for the https://www.elasticsearch.com/guide/en/elasticsearch/reference/current/query-dsl.html[Elasticsearch Query DSL]. The library allows to programmatically build complex search definitions for {es} in Ruby, which are translated to Hashes, and ultimately, JSON, the language of {es}.
-
-See https://github.com/elastic/elasticsearch-dsl-ruby#elasticsearchdsl[the README] for more information.
diff --git a/docs/ecs.asciidoc b/docs/ecs.asciidoc
deleted file mode 100644
index 218708038b..0000000000
--- a/docs/ecs.asciidoc
+++ /dev/null
@@ -1,34 +0,0 @@
-[[ecs]]
-=== Elastic Common Schema (ECS)
-
-The https://www.elastic.co/guide/en/ecs/current/ecs-reference.html[Elastic Common Schema (ECS)] is an open source format that defines a common set of fields to be used when storing event data like logs in Elasticsearch.
-
-You can use the library https://github.com/elastic/ecs-logging-ruby[ecs-logging] which is a set of libraries that enables you to transform your application logs to structured logs that comply with the ECS format.
-
-Add this line to your application's Gemfile:
-
-[source,ruby]
-------------------------------------
-gem 'ecs-logging'
-------------------------------------
-
-Then execute `bundle install`. Or install from the command line yourself:
-
-[source,ruby]
-------------------------------------
-$ gem install ecs-logging
-------------------------------------
-
-Then configure the client to use the logger:
-[source,ruby]
-------------------------------------
-require 'ecs_logging/logger'
-require 'elasticsearch'
-
-logger = EcsLogging::Logger.new($stdout)
-client = Elasticsearch::Client.new(logger: logger)
-> client.info
-{"@timestamp":"2022-07-12T05:31:18.590Z","log.level":"INFO","message":"GET http://localhost:9200/ [status:200, request:0.009s, query:n/a]","ecs.version":"1.4.0"}...
-------------------------------------
-
-See https://www.elastic.co/guide/en/ecs-logging/ruby/current/index.html[ECS Logging Ruby Reference] for more information on how to configure the logger.
diff --git a/docs/examples/apm/screenshot.jpg b/docs/examples/apm/screenshot.jpg
deleted file mode 100644
index 6ba778dfb5..0000000000
Binary files a/docs/examples/apm/screenshot.jpg and /dev/null differ
diff --git a/docs/getting-started.asciidoc b/docs/getting-started.asciidoc
deleted file mode 100644
index a4500847b4..0000000000
--- a/docs/getting-started.asciidoc
+++ /dev/null
@@ -1,141 +0,0 @@
-[[getting-started-ruby]]
-== Getting started
-
-This page guides you through the installation process of the Ruby client, shows
-you how to instantiate the client, and how to perform basic Elasticsearch
-operations with it.
-
-[discrete]
-=== Requirements
-
-A currently maintained version of Ruby (3.0+) or JRuby (9.3+).
-
-[discrete]
-=== Installation
-
-To install the latest version of the client, run the following command:
-
-[source,shell]
---------------------------
-gem install elasticsearch
---------------------------
-
-Refer to the <> page to learn more.
-
-
-[discrete]
-=== Connecting
-
-You can connect to the Elastic Cloud using an API key and the Elasticsearch
-endpoint.
-
-[source,rb]
-----
-client = Elasticsearch::Client.new(
- cloud_id: '',
- api_key: ''
-)
-----
-
-Your Elasticsearch endpoint can be found on the **My deployment** page of your
-deployment:
-
-image::images/es_endpoint.jpg[alt="Finding Elasticsearch endpoint",align="center"]
-
-You can generate an API key on the **Management** page under Security.
-
-image::images/create_api_key.png[alt="Create API key",align="center"]
-
-For other connection options, refer to the <> section.
-
-
-[discrete]
-=== Operations
-
-Time to use Elasticsearch! This section walks you through the basic, and most
-important, operations of Elasticsearch. For more operations and more advanced
-examples, refer to the <> page.
-
-
-[discrete]
-==== Creating an index
-
-This is how you create the `my_index` index:
-
-[source,rb]
-----
-client.indices.create(index: 'my_index')
-----
-
-
-[discrete]
-==== Indexing documents
-
-This is a simple way of indexing a document:
-
-[source,rb]
-----
-document = { name: 'elasticsearch-ruby' }
-response = client.index(index: 'my_index', body: document)
-# You can get the indexed document id with:
-response['_id']
-=> "PlgIDYkBWS9Ngdx5IMy-"
-id = response['_id']
-----
-
-
-[discrete]
-==== Getting documents
-
-You can get documents by using the following code:
-
-[source,rb]
-----
-client.get(index: 'my_index', id: id)
-----
-
-
-[discrete]
-==== Searching documents
-
-This is how you can create a single match query with the Ruby client:
-
-[source,rb]
-----
-client.search(index: 'my_index', body: { query: { match_all: {} } })
-----
-
-
-[discrete]
-==== Updating documents
-
-This is how you can update a document, for example to add a new field:
-
-[source,rb]
-----
-client.update(index: 'my_index', id: id, body: { doc: { language: 'Ruby' } })
-----
-
-
-[discrete]
-==== Deleting documents
-
-[source,rb]
-----
-client.delete(index: 'my_index', id: id)
-----
-
-
-[discrete]
-==== Deleting an index
-
-[source,rb]
-----
-client.indices.delete(index: 'my_index')
-----
-
-
-[discrete]
-== Further reading
-
-* Use <> for a more confortable experience with the APIs.
\ No newline at end of file
diff --git a/docs/helpers/index.asciidoc b/docs/helpers/index.asciidoc
deleted file mode 100644
index 1e475886cd..0000000000
--- a/docs/helpers/index.asciidoc
+++ /dev/null
@@ -1,10 +0,0 @@
-[[client-helpers]]
-== Client helpers
-
-The Ruby client includes the following helpers:
-
-* <>
-* <>
-
-include::bulk-scroll.asciidoc[]
-include::esql.asciidoc[]
\ No newline at end of file
diff --git a/docs/index.asciidoc b/docs/index.asciidoc
deleted file mode 100644
index b83dd47058..0000000000
--- a/docs/index.asciidoc
+++ /dev/null
@@ -1,32 +0,0 @@
-= Elasticsearch Ruby Client
-
-:doctype: book
-
-include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[]
-include::{asciidoc-dir}/../../shared/attributes.asciidoc[]
-
-:es-docs: https://www.elastic.co/guide/en/elasticsearch/reference/{branch}
-
-include::overview.asciidoc[]
-
-include::getting-started.asciidoc[]
-
-include::installation.asciidoc[]
-
-include::connecting.asciidoc[]
-
-include::config.asciidoc[]
-
-include::basic-config.asciidoc[]
-
-include::advanced-config.asciidoc[]
-
-include::integrations.asciidoc[]
-
-include::examples.asciidoc[]
-
-include::troubleshooting.asciidoc[]
-
-include::helpers/index.asciidoc[]
-
-include::release_notes/index.asciidoc[]
diff --git a/docs/installation.asciidoc b/docs/installation.asciidoc
deleted file mode 100644
index 15ce2f60a8..0000000000
--- a/docs/installation.asciidoc
+++ /dev/null
@@ -1,61 +0,0 @@
-[[ruby-install]]
-== Installation
-
-Install the Rubygem for the latest {es} version by using the following command:
-
-[source,sh]
-------------------------------------
-gem install elasticsearch
-------------------------------------
-
-
-Or add the `elasticsearch` Ruby gem to your Gemfile:
-
-[source,ruby]
-------------------------------------
-gem 'elasticsearch'
-------------------------------------
-
-
-
-You can install the Ruby gem for a specific {es} version by using the following
-command:
-
-[source,sh]
-------------------------------------
-gem install elasticsearch -v 7.0.0
-------------------------------------
-
-
-Or you can add a specific version of {es} to your Gemfile:
-
-[source,ruby]
-------------------------------------
-gem 'elasticsearch', '~> 7.0'
-------------------------------------
-
-
-[discrete]
-=== {es} and Ruby Version Compatibility
-
-The {es} client is compatible with currently maintained Ruby versions. We follow
-Ruby’s own maintenance policy and officially support all currently maintained
-versions per
-https://www.ruby-lang.org/en/downloads/branches/[Ruby Maintenance Branches].
-
-Language clients are forward compatible; meaning that clients support
-communicating with greater or equal minor versions of {es} without breaking. It
-does not mean that the client automatically supports new features of newer {es}
-versions; it is only possible after a release of a new client version. For
-example, a 8.12 client version won't automatically support the new features of
-the 8.13 version of {es}, the 8.13 client version is required for that.
-{es} language clients are only backwards compatible with default distributions
-and without guarantees made.
-
-|===
-| Gem Version | | {es} Version | Supported
-
-| 7.x | → | 7.x | 7.17
-| 8.x | → | 8.x | 8.x
-| main | → | main |
-|===
\ No newline at end of file
diff --git a/docs/integrations.asciidoc b/docs/integrations.asciidoc
deleted file mode 100644
index c758d17201..0000000000
--- a/docs/integrations.asciidoc
+++ /dev/null
@@ -1,30 +0,0 @@
-[[integrations]]
-== Integrations
-
-The Rubygems listed on this page make it easier to operate with {es} by using
-the Ruby client.
-
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-
-include::transport.asciidoc[]
-
-include::api.asciidoc[]
-
-include::open-telemetry.asciidoc[]
-
-include::ecs.asciidoc[]
-
-include::model.asciidoc[]
-
-include::rails.asciidoc[]
-
-include::persistence.asciidoc[]
-
-include::dsl.asciidoc[]
diff --git a/docs/open-telemetry.asciidoc b/docs/open-telemetry.asciidoc
deleted file mode 100644
index 87fd86206b..0000000000
--- a/docs/open-telemetry.asciidoc
+++ /dev/null
@@ -1,94 +0,0 @@
-[[opentelemetry]]
-=== Using OpenTelemetry
-
-You can use https://opentelemetry.io/[OpenTelemetry] to monitor the performance and behavior of your {es} requests through the Ruby Client.
-The Ruby Client comes with built-in OpenTelemetry instrumentation that emits https://www.elastic.co/guide/en/apm/guide/current/apm-distributed-tracing.html[distributed tracing spans] by default.
-With that, applications https://opentelemetry.io/docs/instrumentation/ruby/manual/[instrumented with OpenTelemetry] or using the https://opentelemetry.io/docs/instrumentation/ruby/automatic/[OpenTelemetry Ruby SDK] are inherently enriched with additional spans that contain insightful information about the execution of the {es} requests.
-
-The native instrumentation in the Ruby Client follows the https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/[OpenTelemetry Semantic Conventions for {es}]. In particular, the instrumentation in the client covers the logical layer of {es} requests. A single span per request is created that is processed by the service through the Ruby Client. The following image shows a trace that records the handling of two different {es} requests: a `ping` request and a `search` request.
-
-[role="screenshot"]
-image::images/otel-waterfall-without-http.png[alt="Distributed trace with Elasticsearch spans",align="center"]
-
-Usually, OpenTelemetry auto-instrumentation modules come with instrumentation support for HTTP-level communication. In this case, in addition to the logical {es} client requests, spans will be captured for the physical HTTP requests emitted by the client. The following image shows a trace with both, {es} spans (in blue) and the corresponding HTTP-level spans (in red):
-
-[role="screenshot"]
-image::images/otel-waterfall-with-http.png[alt="Distributed trace with Elasticsearch spans",align="center"]
-
-Advanced Ruby Client behavior such as nodes round-robin and request retries are revealed through the combination of logical {es} spans and the physical HTTP spans. The following example shows a `search` request in a scenario with two nodes:
-
-[role="screenshot"]
-image::images/otel-waterfall-retry.png[alt="Distributed trace with Elasticsearch spans",align="center"]
-
-The first node is unavailable and results in an HTTP error, while the retry to the second node succeeds. Both HTTP requests are subsumed by the logical {es} request span (in blue).
-
-[discrete]
-==== Setup the OpenTelemetry instrumentation
-
-When using the https://opentelemetry.io/docs/instrumentation/ruby/manual[OpenTelemetry Ruby SDK manually] or using the https://opentelemetry.io/docs/instrumentation/ruby/automatic/[OpenTelemetry Ruby Auto-Instrumentations], the Ruby Client's OpenTelemetry instrumentation is enabled by default and uses the global OpenTelemetry SDK with the global tracer provider. You can provide a tracer provider via the Ruby Client configuration option `opentelemetry_tracer_provider` when instantiating the client. This is sometimes useful for testing or other specific use cases.
-
-[source,ruby]
-------------------------------------
-client = Elasticsearch::Client.new(
- cloud_id: '',
- api_key: '',
- opentelemetry_tracer_provider: tracer_provider
-)
-------------------------------------
-
-[discrete]
-==== Configuring the OpenTelemetry instrumentation
-
-You can configure the OpenTelemetry instrumentation through Environment Variables.
-The following configuration options are available.
-
-[discrete]
-[[opentelemetry-config-enable]]
-===== Enable / Disable the OpenTelemetry instrumentation
-
-With this configuration option you can enable (default) or disable the built-in OpenTelemetry instrumentation.
-
-**Default:** `true`
-
-|============
-| Environment Variable | `OTEL_RUBY_INSTRUMENTATION_ELASTICSEARCH_ENABLED`
-|============
-
-[discrete]
-===== Capture search request bodies
-
-Per default, the built-in OpenTelemetry instrumentation does not capture request bodies due to data privacy considerations. You can use this option to enable capturing of search queries from the request bodies of {es} search requests in case you wish to gather this information regardless. The options are to capture the raw search query, sanitize the query with a default list of sensitive keys, or not capture it at all.
-
-**Default:** `omit`
-
-**Valid Options:** `omit`, `sanitize`, `raw`
-
-|============
-| Environment Variable | `OTEL_RUBY_INSTRUMENTATION_ELASTICSEARCH_CAPTURE_SEARCH_QUERY`
-|============
-
-[discrete]
-===== Sanitize the {es} search request body
-
-You can configure the list of keys whose values are redacted when the search query is captured. Values must be comma-separated.
-Note in v8.3.0 and v8.3.1, the environment variable `OTEL_INSTRUMENTATION_ELASTICSEARCH_CAPTURE_SEARCH_QUERY` was available
-but is now deprecated in favor of the environment variable including `RUBY`.
-
-**Default:** `nil`
-
-|============
-| Environment Variable | `OTEL_RUBY_INSTRUMENTATION_ELASTICSEARCH_SEARCH_QUERY_SANITIZE_KEYS`
-|============
-
-Example:
-
-```bash
-OTEL_RUBY_INSTRUMENTATION_ELASTICSEARCH_SEARCH_QUERY_SANITIZE_KEYS='sensitive-key,other-sensitive-key'
-```
-
-[discrete]
-==== Overhead
-
-The OpenTelemetry instrumentation (as any other monitoring approach) may come with a slight overhead on CPU, memory, and/or latency. The overhead may only occur when the instrumentation is enabled (default) and an OpenTelemetry SDK is active in the target application. When the instrumentation is disabled or no OpenTelemetry SDK is active within the target application, monitoring overhead is not expected when using the client.
-
-Even in cases where the instrumentation is enabled and is actively used (by an OpenTelemetry SDK), the overhead is minimal and negligible in the vast majority of cases. In edge cases where there is a noticeable overhead, the <> to eliminate any potential impact on performance.
\ No newline at end of file
diff --git a/docs/overview.asciidoc b/docs/overview.asciidoc
deleted file mode 100644
index 108094e23b..0000000000
--- a/docs/overview.asciidoc
+++ /dev/null
@@ -1,38 +0,0 @@
-[[ruby_client]]
-== Overview
-
-The `elasticsearch` http://rubygems.org/gems/elasticsearch[Rubygem] provides a low-level client for communicating with an {es} cluster, fully compatible with other official clients.
-
-More documentation is hosted in https://github.com/elastic/elasticsearch-ruby[Github] and http://rubydoc.info/gems/elasticsearch[RubyDoc].
-
-Refer to the <> page for a step-by-step quick start with
-the Ruby client.
-
-[discrete]
-=== Features
-
-* Pluggable logging and tracing
-* Pluggable connection selection strategies (round-robin, random, custom)
-* Pluggable transport implementation, customizable and extendable
-* Pluggable serializer implementation
-* Request retries and dead connections handling
-* Node reloading (based on cluster state) on errors or on demand
-* Modular API implementation
-* 100% REST API coverage
-
-
-[discrete]
-[[transport-api]]
-=== Transport and API
-
-The `elasticsearch` gem combines two separate Rubygems:
-
-* https://github.com/elastic/elastic-transport-ruby/[`elastic-transport`] - provides an HTTP Ruby client for connecting to the {es} cluster. Refer to the documentation: <>
-
-* https://github.com/elastic/elasticsearch-ruby/tree/main/elasticsearch-api[`elasticsearch-api`] - provides a Ruby API for the {es} RESTful API.
-
-Please consult their respective documentation for configuration options and technical details.
-
-Notably, the documentation and comprehensive examples for all the API methods are contained in the source, and available online at http://rubydoc.info/gems/elasticsearch-api/Elasticsearch/API/Actions[Rubydoc].
-
-Keep in mind, that for optimal performance, you should use an HTTP library which supports persistent ("keep-alive") HTTP connections.
diff --git a/docs/rails.asciidoc b/docs/rails.asciidoc
deleted file mode 100644
index d1c84e1b86..0000000000
--- a/docs/rails.asciidoc
+++ /dev/null
@@ -1,24 +0,0 @@
-[[ruby_on_rails]]
-=== Ruby On Rails
-
-The `elasticsearch-rails` http://rubygems.org/gems/elasticsearch-rails[Rubygem]
-provides features suitable for Ruby on Rails applications.
-
-
-[discrete]
-==== Features
-
-* Rake tasks for importing data from application models
-* Integration with Rails' instrumentation framework
-* Templates for generating example Rails application
-
-
-[discrete]
-==== Example applications
-
-You can generate a fully working example Ruby on Rails application with
-templates provides.
-
-Please refer to the
-https://github.com/elastic/elasticsearch-rails/tree/master/elasticsearch-rails[documentation]
-for more information.
diff --git a/docs/helpers/bulk-scroll.asciidoc b/docs/reference/Helpers.md
similarity index 78%
rename from docs/helpers/bulk-scroll.asciidoc
rename to docs/reference/Helpers.md
index 8d638bab65..67c7be3393 100644
--- a/docs/helpers/bulk-scroll.asciidoc
+++ b/docs/reference/Helpers.md
@@ -1,98 +1,93 @@
-[[Helpers]]
-=== Bulk and Scroll helpers
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/Helpers.html
+---
-The {es} Ruby client includes Bulk and Scroll helpers for working with results more efficiently.
+# Bulk and Scroll helpers [Helpers]
-[discrete]
-==== Bulk helper
+The {{es}} Ruby client includes Bulk and Scroll helpers for working with results more efficiently.
-The Bulk API in Elasticsearch allows you to perform multiple indexing or deletion operations through a single API call, resulting in reduced overhead and improved indexing speed. The actions are specified in the request body using a newline delimited JSON (NDJSON) structure. In the Elasticsearch Ruby client, the `bulk` method supports several data structures as a parameter. You can use the Bulk API in an idiomatic way without concerns about payload formatting. Refer to <> for more information.
+## Bulk helper [_bulk_helper]
-The BulkHelper provides a better developer experience when using the Bulk API. At its simplest, you can send it a collection of hashes in an array, and it will bulk ingest them into {es}.
+The Bulk API in Elasticsearch allows you to perform multiple indexing or deletion operations through a single API call, resulting in reduced overhead and improved indexing speed. The actions are specified in the request body using a newline delimited JSON (NDJSON) structure. In the Elasticsearch Ruby client, the `bulk` method supports several data structures as a parameter. You can use the Bulk API in an idiomatic way without concerns about payload formatting. Refer to [Bulk requests](/reference/examples.md#ex-bulk) for more information.
+
+The BulkHelper provides a better developer experience when using the Bulk API. At its simplest, you can send it a collection of hashes in an array, and it will bulk ingest them into {{es}}.
To use the BulkHelper, require it in your code:
-[source,ruby]
-----
+```ruby
require 'elasticsearch/helpers/bulk_helper'
-----
+```
Instantiate a BulkHelper with a client, and an index:
-[source,ruby]
-----
+
+```ruby
client = Elasticsearch::Client.new
bulk_helper = Elasticsearch::Helpers::BulkHelper.new(client, index)
-----
+```
This helper works on the index you pass in during initialization, but you can change the index at any time in your code:
-[source,ruby]
-----
+```ruby
bulk_helper.index = 'new_index'
-----
+```
If you want to index a collection of documents, use the `ingest` method:
-[source,ruby]
-----
+```ruby
documents = [
{ name: 'document1', date: '2024-05-16' },
{ name: 'document2', date: '2023-12-19' },
{ name: 'document3', date: '2024-07-07' }
]
bulk_helper.ingest(documents)
-----
+```
-If you're ingesting a large set of data and want to separate the documents into smaller pieces before sending them to {es}, use the `slice` parameter.
+If you’re ingesting a large set of data and want to separate the documents into smaller pieces before sending them to {{es}}, use the `slice` parameter.
-[source,ruby]
-----
+```ruby
bulk_helper.ingest(documents, { slice: 2 })
-----
+```
This way the data will be sent in two different bulk requests.
You can also include the parameters you would send to the Bulk API either in the query parameters or in the request body. The method signature is `ingest(docs, params = {}, body = {}, &block)`. Additionally, the method can be called with a block, that will provide access to the response object received from calling the Bulk API and the documents sent in the request:
-[source,ruby]
-----
+```ruby
helper.ingest(documents) { |_, docs| puts "Ingested #{docs.count} documents" }
-----
+```
You can update and delete documents with the BulkHelper too. To delete a set of documents, you can send an array of document ids:
-[source,ruby]
-----
+```ruby
ids = ['shm0I4gB6LpJd9ljO9mY', 'sxm0I4gB6LpJd9ljO9mY', 'tBm0I4gB6LpJd9ljO9mY', 'tRm0I4gB6LpJd9ljO9mY', 'thm0I4gB6LpJd9ljO9mY', 'txm0I4gB6LpJd9ljO9mY', 'uBm0I4gB6LpJd9ljO9mY', 'uRm0I4gB6LpJd9ljO9mY', 'uhm0I4gB6LpJd9ljO9mY', 'uxm0I4gB6LpJd9ljO9mY']
helper.delete(ids)
-----
+```
To update documents, you can send the array of documents with their respective ids:
-[source,ruby]
-----
+
+```ruby
documents = [
{name: 'updated name 1', id: 'AxkFJYgB6LpJd9ljOtr7'},
{name: 'updated name 2', id: 'BBkFJYgB6LpJd9ljOtr7'}
]
helper.update(documents)
-----
+```
-[discrete]
-===== Ingest a JSON file
+
+### Ingest a JSON file [_ingest_a_json_file]
`BulkHelper` also provides a helper to ingest data straight from a JSON file. By giving a file path as an input, the helper will parse and ingest the documents in the file:
-[source,ruby]
-----
+```ruby
file_path = './data.json'
helper.ingest_json(file_path)
-----
+```
In cases where the array of data you want to ingest is not necessarily in the root of the JSON file, you can provide the keys to access the data, for example given the following JSON file:
-[source,json]
-----
+```json
{
"field": "value",
"status": 200,
@@ -107,56 +102,50 @@ In cases where the array of data you want to ingest is not necessarily in the ro
]
}
}
-----
+```
The following is an example of the Ruby code to ingest the documents in the JSON above:
-[source,ruby]
-----
+```ruby
bulk_helper.ingest_json(file_path, { keys: ['data', 'items'] })
-----
+```
+
-[discrete]
-==== Scroll helper
+## Scroll helper [_scroll_helper]
This helper provides an easy way to get results from a Scroll.
To use the ScrollHelper, require it in your code:
-[source,ruby]
-----
+```ruby
require 'elasticsearch/helpers/scroll_helper'
-----
+```
Instantiate a ScrollHelper with a client, an index, and a body (with the scroll API parameters) which will be used in every following scroll request:
-[source,ruby]
-----
+```ruby
client = Elasticsearch::Client.new
scroll_helper = Elasticsearch::Helpers::ScrollHelper.new(client, index, body)
-----
+```
There are two ways to get the results from a scroll using the helper.
1. You can iterate over a scroll using the methods in `Enumerable` such as `each` and `map`:
-+
---
-[source,ruby]
-----
-scroll_helper.each do |item|
- puts item
-end
-----
---
+
+ ```ruby
+ scroll_helper.each do |item|
+ puts item
+ end
+ ```
+
2. You can fetch results by page, with the `results` function:
-+
---
-[source,ruby]
-----
-my_documents = []
-while !(documents = scroll_helper.results).empty?
- my_documents << documents
-end
-scroll_helper.clear
-----
---
+
+ ```ruby
+ my_documents = []
+ while !(documents = scroll_helper.results).empty?
+ my_documents << documents
+ end
+ scroll_helper.clear
+ ```
+
+
diff --git a/docs/model.asciidoc b/docs/reference/activemodel_activerecord.md
similarity index 55%
rename from docs/model.asciidoc
rename to docs/reference/activemodel_activerecord.md
index 8f4da31f7c..8a143def32 100644
--- a/docs/model.asciidoc
+++ b/docs/reference/activemodel_activerecord.md
@@ -1,16 +1,16 @@
-[[activemodel_activerecord]]
-=== ActiveModel / ActiveRecord
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/activemodel_activerecord.html
+---
-The `elasticsearch-model` http://rubygems.org/gems/elasticsearch-model[Rubygem]
-provides integration with Ruby domain objects ("models"), commonly found for
-example, in Ruby on Rails applications.
+# ActiveModel / ActiveRecord [activemodel_activerecord]
-It uses the `elasticsearch` Rubygem as the client communicating with the {es}
-cluster.
+The `elasticsearch-model` [Rubygem](http://rubygems.org/gems/elasticsearch-model) provides integration with Ruby domain objects ("models"), commonly found for example, in Ruby on Rails applications.
+It uses the `elasticsearch` Rubygem as the client communicating with the {{es}} cluster.
-[discrete]
-==== Features
+
+## Features [_features_2]
* ActiveModel integration with adapters for ActiveRecord and Mongoid
* Enumerable-based wrapper for search results
@@ -18,44 +18,38 @@ cluster.
* Convenience model methods such as `search`, `mapping`, `import`, etc
* Support for Kaminari and WillPaginate pagination
* Extension implemented via proxy object to shield model namespace from collisions
-* Convenience methods for (re)creating the index, setting up mappings, indexing documents, ...
+* Convenience methods for (re)creating the index, setting up mappings, indexing documents, …
-[discrete]
-==== Usage
+## Usage [_usage]
Add the library to your Gemfile:
-[source,ruby]
-------------------------------------
+```ruby
gem 'elasticsearch-rails'
-------------------------------------
+```
Include the extension module in your model class:
-[source,ruby]
-------------------------------------
+```ruby
class Article < ActiveRecord::Base
include Elasticsearch::Model
end
-------------------------------------
+```
Import some data and perform a search:
-[source,ruby]
-------------------------------------
+```ruby
Article.import
response = Article.search 'fox dog'
response.took
# => 3
-------------------------------------
+```
-It is possible to either return results as model instances, or decorated
-documents from {es}, with the `records` and `results` methods, respectively:
+It is possible to either return results as model instances, or decorated documents from {{es}}, with the `records` and `results` methods, respectively:
-[source,ruby]
-------------------------------------
+```ruby
response.records.first
# Article Load (0.4ms) SELECT "articles".* FROM "articles" WHERE ...
=> #
@@ -65,8 +59,7 @@ response.results.first._score
response.results.first._source.title
# => "Quick brown fox"
-------------------------------------
+```
+
+Consult the [documentation](https://github.com/elastic/elasticsearch-rails/tree/master/elasticsearch-model) for more information.
-Consult the
-https://github.com/elastic/elasticsearch-rails/tree/master/elasticsearch-model[documentation]
-for more information.
diff --git a/docs/advanced-config.asciidoc b/docs/reference/advanced-config.md
similarity index 55%
rename from docs/advanced-config.asciidoc
rename to docs/reference/advanced-config.md
index 916a8f84c3..a05d51340c 100644
--- a/docs/advanced-config.asciidoc
+++ b/docs/reference/advanced-config.md
@@ -1,14 +1,16 @@
-[[advanced-config]]
-=== Advanced configuration
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/advanced-config.html
+---
-The client supports many configurations options for setting up and managing
-connections, configuring logging, customizing the transport library, and so on.
+# Advanced configuration [advanced-config]
-[discrete]
-[[setting-hosts]]
-==== Setting hosts
+The client supports many configurations options for setting up and managing connections, configuring logging, customizing the transport library, and so on.
-To connect to a specific {es} host:
+
+## Setting hosts [setting-hosts]
+
+To connect to a specific {{es}} host:
```ruby
Elasticsearch::Client.new(host: 'search.myserver.com')
@@ -32,12 +34,12 @@ Instead of strings, you can pass host information as an array of Hashes:
Elasticsearch::Client.new(hosts: [ { host: 'myhost1', port: 8080 }, { host: 'myhost2', port: 8080 } ])
```
-NOTE: When specifying multiple hosts, you might want to enable the
-`retry_on_failure` or `retry_on_status` options to perform a failed request on
-another node (refer to <>).
+::::{note}
+When specifying multiple hosts, you might want to enable the `retry_on_failure` or `retry_on_status` options to perform a failed request on another node (refer to [Retrying on Failures](#retry-failures)).
+::::
+
-Common URL parts – scheme, HTTP authentication credentials, URL prefixes, and so
-on – are handled automatically:
+Common URL parts – scheme, HTTP authentication credentials, URL prefixes, and so on – are handled automatically:
```ruby
Elasticsearch::Client.new(url: 'https://username:password@api.server.org:4430/search')
@@ -51,37 +53,25 @@ Elasticsearch::Client.new(urls: 'http://localhost:9200,http://localhost:9201')
Another way to configure URLs is to export the `ELASTICSEARCH_URL` variable.
-The client is automatically going to use a round-robin algorithm across the
-hosts (unless you select or implement a different <>).
+The client is automatically going to use a round-robin algorithm across the hosts (unless you select or implement a different [Connection Selector](#connection-selector)).
-[discrete]
-[[default-port]]
-==== Default port
+## Default port [default-port]
-The default port is `9200`. Specify a port for your host(s) if they differ from
-this default.
+The default port is `9200`. Specify a port for your host(s) if they differ from this default.
-If you are using Elastic Cloud, the default port is port `9243`. You must supply
-your username and password separately, and optionally a port. Refer to
-<>.
+If you are using Elastic Cloud, the default port is port `9243`. You must supply your username and password separately, and optionally a port. Refer to [Elastic Cloud](/reference/connecting.md#auth-ec).
-[discrete]
-[[logging]]
-==== Logging
+## Logging [logging]
-To log requests and responses to standard output with the default logger (an
-instance of Ruby's `::Logger` class), set the log argument to true:
+To log requests and responses to standard output with the default logger (an instance of Ruby’s `::Logger` class), set the log argument to true:
```ruby
Elasticsearch::Client.new(log: true)
```
-You can also use https://github.com/elastic/ecs-logging-ruby[`ecs-logging`]
-which is a set of libraries that enables you to transform your application logs
-to structured logs that comply with the
-https://www.elastic.co/guide/en/ecs/current/ecs-reference.html[Elastic Common Schema]. See <>.
+You can also use [`ecs-logging`](https://github.com/elastic/ecs-logging-ruby) which is a set of libraries that enables you to transform your application logs to structured logs that comply with the [Elastic Common Schema](ecs://docs/reference/index.md). See [Elastic Common Schema (ECS)](/reference/ecs.md).
To trace requests and responses in the Curl format, set the `trace` argument:
@@ -91,11 +81,10 @@ Elasticsearch::Client.new(trace: true)
You can customize the default logger or tracer:
-[source,ruby]
-------------------------------------
+```ruby
client.transport.logger.formatter = proc { |s, d, p, m| "#{s}: #{m}\n" }
client.transport.logger.level = Logger::INFO
-------------------------------------
+```
Or, you can use a custom `::Logger` instance:
@@ -105,8 +94,7 @@ Elasticsearch::Client.new(logger: Logger.new(STDERR))
You can pass the client any conforming logger implementation:
-[source,ruby]
-------------------------------------
+```ruby
require 'logging' # https://github.com/TwP/logging/
log = Logging.logger['elasticsearch']
@@ -114,60 +102,42 @@ log.add_appenders Logging.appenders.stdout
log.level = :info
client = Elasticsearch::Client.new(logger: log)
-------------------------------------
+```
-[discrete]
-[[apm-integration]]
-==== APM integration
+## APM integration [apm-integration]
-This client integrates seamlessly with Elastic APM via the Elastic APM Agent. It
-automatically captures client requests if you are using the agent on your code.
-If you're using `elastic-apm` v3.8.0 or up, you can set
-`capture_elasticsearch_queries` to `true` in `config/elastic_apm.yml` to also
-capture the body from requests in {es}. Refer to
-https://github.com/elastic/elasticsearch-ruby/tree/main/docs/examples/apm[this example].
+This client integrates seamlessly with Elastic APM via the Elastic APM Agent. It automatically captures client requests if you are using the agent on your code. If you’re using `elastic-apm` v3.8.0 or up, you can set `capture_elasticsearch_queries` to `true` in `config/elastic_apm.yml` to also capture the body from requests in {{es}}. Refer to [this example](https://github.com/elastic/elasticsearch-ruby/tree/main/docs/examples/apm).
-[discrete]
-[[custom-http-headers]]
-==== Custom HTTP Headers
+## Custom HTTP Headers [custom-http-headers]
-You can set a custom HTTP header on the client's initializer:
+You can set a custom HTTP header on the client’s initializer:
-[source,ruby]
-------------------------------------
+```ruby
client = Elasticsearch::Client.new(
transport_options: {
headers:
{user_agent: "My App"}
}
)
-------------------------------------
+```
-You can also pass in `headers` as a parameter to any of the API Endpoints to set
-custom headers for the request:
+You can also pass in `headers` as a parameter to any of the API Endpoints to set custom headers for the request:
```ruby
client.search(index: 'myindex', q: 'title:test', headers: {user_agent: "My App"})
```
-[discrete]
-[[x-opaque-id]]
-==== Identifying running tasks with X-Opaque-Id
+## Identifying running tasks with X-Opaque-Id [x-opaque-id]
-The X-Opaque-Id header allows to track certain calls, or associate certain tasks
-with the client that started them (refer to
-https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html#_identifying_running_tasks[the documentation]).
-To use this feature, you need to set an id for `opaque_id` on the client on each
-request. Example:
+The X-Opaque-Id header allows to track certain calls, or associate certain tasks with the client that started them (refer to [the documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks)). To use this feature, you need to set an id for `opaque_id` on the client on each request. Example:
-[source,ruby]
-------------------------------------
+```ruby
client = Elasticsearch::Client.new
client.search(index: 'myindex', q: 'title:test', opaque_id: '123456')
-------------------------------------
+```
The search request includes the following HTTP Header:
@@ -175,15 +145,12 @@ The search request includes the following HTTP Header:
X-Opaque-Id: 123456
```
-You can also set a prefix for X-Opaque-Id when initializing the client. This is
-prepended to the id you set before each request if you're using X-Opaque-Id.
-Example:
+You can also set a prefix for X-Opaque-Id when initializing the client. This is prepended to the id you set before each request if you’re using X-Opaque-Id. Example:
-[source,ruby]
-------------------------------------
+```ruby
client = Elasticsearch::Client.new(opaque_id_prefix: 'eu-west1_')
client.search(index: 'myindex', q: 'title:test', opaque_id: '123456')
-------------------------------------
+```
The request includes the following HTTP Header:
@@ -192,12 +159,9 @@ X-Opaque-Id: eu-west1_123456
```
-[discrete]
-[[setting-timeouts]]
-==== Setting Timeouts
+## Setting Timeouts [setting-timeouts]
-For many operations in {es}, the default timeouts of HTTP libraries are too low.
-To increase the timeout, you can use the `request_timeout` parameter:
+For many operations in {{es}}, the default timeouts of HTTP libraries are too low. To increase the timeout, you can use the `request_timeout` parameter:
```ruby
Elasticsearch::Client.new(request_timeout: 5*60)
@@ -206,42 +170,30 @@ Elasticsearch::Client.new(request_timeout: 5*60)
You can also use the `transport_options` argument documented below.
-[discrete]
-[[randomizing-hosts]]
-==== Randomizing Hosts
+## Randomizing Hosts [randomizing-hosts]
-If you pass multiple hosts to the client, it rotates across them in a
-round-robin fashion by default. When the same client would be running in
-multiple processes (for example, in a Ruby web server such as Thin), it might
-keep connecting to the same nodes "at once". To prevent this, you can randomize
-the hosts collection on initialization and reloading:
+If you pass multiple hosts to the client, it rotates across them in a round-robin fashion by default. When the same client would be running in multiple processes (for example, in a Ruby web server such as Thin), it might keep connecting to the same nodes "at once". To prevent this, you can randomize the hosts collection on initialization and reloading:
```ruby
Elasticsearch::Client.new(hosts: ['localhost:9200', 'localhost:9201'], randomize_hosts: true)
```
-[discrete]
-[[retry-failures]]
-==== Retrying on Failures
+## Retrying on Failures [retry-failures]
-When the client is initialized with multiple hosts, it makes sense to retry a
-failed request on a different host:
+When the client is initialized with multiple hosts, it makes sense to retry a failed request on a different host:
```ruby
Elasticsearch::Client.new(hosts: ['localhost:9200', 'localhost:9201'], retry_on_failure: true)
```
-By default, the client does not retry the request. You can specify how many
-times to retry before it raises an exception by passing a number to
-`retry_on_failure`:
+By default, the client does not retry the request. You can specify how many times to retry before it raises an exception by passing a number to `retry_on_failure`:
```ruby
Elasticsearch::Client.new(hosts: ['localhost:9200', 'localhost:9201'], retry_on_failure: 5)
```
-You can also use `retry_on_status` to retry when specific status codes are
-returned:
+You can also use `retry_on_status` to retry when specific status codes are returned:
```ruby
Elasticsearch::Client.new(hosts: ['localhost:9200', 'localhost:9201'], retry_on_status: [502, 503])
@@ -259,23 +211,18 @@ You can also set a `delay_on_retry` value in milliseconds. This will add a delay
Elasticsearch::Client.new(hosts: ['localhost:9200', 'localhost:9201'], retry_on_failure: 5, delay_on_retry: 1000)
```
-[discrete]
-[[reload-hosts]]
-==== Reloading Hosts
-{es} dynamically discovers new nodes in the cluster by default. You can leverage
-this in the client, and periodically check for new nodes to spread the load.
+## Reloading Hosts [reload-hosts]
-To retrieve and use the information from the
-https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-info.html[Nodes Info API]
-on every 10,000th request:
+{{es}} dynamically discovers new nodes in the cluster by default. You can leverage this in the client, and periodically check for new nodes to spread the load.
+
+To retrieve and use the information from the [Nodes Info API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-info) on every 10,000th request:
```ruby
Elasticsearch::Client.new(hosts: ['localhost:9200', 'localhost:9201'], reload_connections: true)
```
-You can pass a specific number of requests after which reloading should be
-performed:
+You can pass a specific number of requests after which reloading should be performed:
```ruby
Elasticsearch::Client.new(hosts: ['localhost:9200', 'localhost:9201'], reload_connections: 1_000)
@@ -287,38 +234,34 @@ To reload connections on failures, use:
Elasticsearch::Client.new(hosts: ['localhost:9200', 'localhost:9201'], reload_on_failure: true)
```
-The reloading timeouts if not finished under 1 second by default. To change the
-setting:
+The reloading timeouts if not finished under 1 second by default. To change the setting:
```ruby
Elasticsearch::Client.new(hosts: ['localhost:9200', 'localhost:9201'], sniffer_timeout: 3)
```
-NOTE: When using reloading hosts ("sniffing") together with authentication, pass
-the scheme, user and password with the host info – or, for more clarity, in the
-`http` options:
+::::{note}
+When using reloading hosts ("sniffing") together with authentication, pass the scheme, user and password with the host info – or, for more clarity, in the `http` options:
+::::
+
-[source,ruby]
-------------------------------------
+```ruby
Elasticsearch::Client.new(
host: 'localhost:9200',
http: { scheme: 'https', user: 'U', password: 'P' },
reload_connections: true,
reload_on_failure: true
)
-------------------------------------
+```
-[discrete]
-[[connection-selector]]
-==== Connection Selector
+## Connection Selector [connection-selector]
By default, the client rotates the connections in a round-robin fashion, using the `Elastic::Transport::Transport::Connections::Selector::RoundRobin` strategy.
-You can implement your own strategy to customize the behaviour. For example, let's have a "rack aware" strategy, which prefers the nodes with a specific attribute. The strategy uses the other nodes, only when these are unavailable:
+You can implement your own strategy to customize the behaviour. For example, let’s have a "rack aware" strategy, which prefers the nodes with a specific attribute. The strategy uses the other nodes, only when these are unavailable:
-[source,ruby]
-------------------------------------
+```ruby
class RackIdSelector
include Elastic::Transport::Transport::Connections::Selector::Base
@@ -331,24 +274,19 @@ class RackIdSelector
end
Elasticsearch::Client.new hosts: ['x1.search.org', 'x2.search.org'], selector_class: RackIdSelector
-------------------------------------
+```
-[discrete]
-[[serializer-implementations]]
-==== Serializer Implementations
+## Serializer Implementations [serializer-implementations]
-By default, the https://rubygems.org/gems/multi_json[MultiJSON] library is used as the serializer implementation, and it picks up the "right" adapter based on gems available.
+By default, the [MultiJSON](https://rubygems.org/gems/multi_json) library is used as the serializer implementation, and it picks up the "right" adapter based on gems available.
The serialization component is pluggable, though, so you can write your own by including the `Elastic::Transport::Transport::Serializer::Base` module, implementing the required contract, and passing it to the client as the `serializer_class` or `serializer` parameter.
-[discrete]
-[[exception-handling]]
-==== Exception Handling
+## Exception Handling [exception-handling]
-The library defines a
-https://github.com/elastic/elastic-transport-ruby/blob/main/lib/elastic/transport/transport/errors.rb[number of exception classes] for various client and server errors, as well as unsuccessful HTTP responses, making it possible to rescue specific exceptions with desired granularity.
+The library defines a [number of exception classes](https://github.com/elastic/elastic-transport-ruby/blob/main/lib/elastic/transport/transport/errors.rb) for various client and server errors, as well as unsuccessful HTTP responses, making it possible to rescue specific exceptions with desired granularity.
The highest-level exception is `Elastic::Transport::Transport::Error` and is raised for any generic client or server errors.
@@ -357,3 +295,4 @@ The highest-level exception is `Elastic::Transport::Transport::Error` and is rai
As an example for response-specific errors, a 404 response status raises an `Elastic::Transport::Transport::Errors::NotFound` exception.
Finally, `Elastic::Transport::Transport::SnifferTimeoutError` is raised when connection reloading ("sniffing") times out.
+
diff --git a/docs/api.asciidoc b/docs/reference/api.md
similarity index 68%
rename from docs/api.asciidoc
rename to docs/reference/api.md
index 85386c8122..261413b45c 100644
--- a/docs/api.asciidoc
+++ b/docs/reference/api.md
@@ -1,52 +1,49 @@
-[[api]]
-=== Elasticsearch API
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/api.html
+---
-The `elasticsearch-api` library provides a Ruby implementation of the https://www.elastic.co/elastic-stack/[Elasticsearch] REST API.
+# Elasticsearch API [api]
-[discrete]
-[[api-install]]
-==== Installation
+The `elasticsearch-api` library provides a Ruby implementation of the [Elasticsearch](https://www.elastic.co/elastic-stack/) REST API.
-Install the package from https://rubygems.org[Rubygems]:
-[source,bash]
-----------------------------
+## Installation [api-install]
+
+Install the package from [Rubygems](https://rubygems.org):
+
+```bash
gem install elasticsearch-api
-----------------------------
+```
-To use an unreleased version, either add it to your `Gemfile` for https://bundler.io/[Bundler]:
+To use an unreleased version, either add it to your `Gemfile` for [Bundler](https://bundler.io/):
-[source,bash]
-----------------------------
+```bash
gem 'elasticsearch-api', git: 'git://github.com/elasticsearch/elasticsearch-ruby.git'
-----------------------------
+```
or install it from a source code checkout:
-[source,bash]
-----------------------------
+```bash
git clone https://github.com/elasticsearch/elasticsearch-ruby.git
cd elasticsearch-ruby/elasticsearch-api
bundle install
rake install
-----------------------------
+```
+
+
+## Example usage [api-example-usage]
-[discrete]
-[[api-example-usage]]
-==== Example usage
+The library is designed as a group of standalone Ruby modules, which can be mixed into a class providing connection to Elasticsearch — an Elasticsearch client.
-The library is designed as a group of standalone Ruby modules, which can be mixed into a class
-providing connection to Elasticsearch -- an Elasticsearch client.
-[discrete]
-===== Usage with the `elasticsearch` gem
+### Usage with the `elasticsearch` gem [_usage_with_the_elasticsearch_gem]
-**When you use the client from the https://github.com/elasticsearch/elasticsearch-ruby[`elasticsearch-ruby`] client, the library modules have been already included**, so you just call the API methods.
+**When you use the client from the [`elasticsearch-ruby`](https://github.com/elasticsearch/elasticsearch-ruby) client, the library modules have been already included**, so you just call the API methods.
The response will be an `Elasticsearch::API::Response` object which wraps an `Elasticsearch::Transport::Transport::Response` object. It provides `body`, `status` and `headers` methods, but you can treat is as a hash and access the keys directly.
-[source,rb]
-----------------------------
+```rb
require 'elasticsearch'
client = Elasticsearch::Client.new
@@ -91,22 +88,21 @@ client.search(index: 'myindex', body: { query: { match: { title: 'test' } } })
"content-encoding"=>"gzip",
"content-length"=>"188"},
@status=200>>
-----------------------------
+```
-Full documentation and examples are included as RDoc annotations in the source code and available online at .
+Full documentation and examples are included as RDoc annotations in the source code and available online at [http://rubydoc.info/gems/elasticsearch-api](http://rubydoc.info/gems/elasticsearch-api).
-[discrete]
-===== Usage with a custom client
-When you want to mix the library with your own client, it must conform to the following _contract_:
+### Usage with a custom client [_usage_with_a_custom_client]
+
+When you want to mix the library with your own client, it must conform to the following *contract*:
* It responds to a `perform_request(method, path, params, body, headers)` method,
* the method returns an object with `status`, `body` and `headers` methods.
-A simple client could look like this (_with a dependency on `active_support` to parse the query params_):
+A simple client could look like this (*with a dependency on `active_support` to parse the query params*):
-[source,rb]
-----------------------------
+```rb
require 'multi_json'
require 'faraday'
require 'elasticsearch/api'
@@ -151,17 +147,14 @@ p client.cluster.health
p client.index(index: 'myindex', id: 'custom', body: { title: "Indexing from my client" })
# --> PUT myindex/mytype/custom {} {:title=>"Indexing from my client"}
# => "{"ok":true, ... }"
-----------------------------
+```
+
-[discrete]
-===== Using JSON Builders
+### Using JSON Builders [_using_json_builders]
-Instead of passing the `:body` argument as a Ruby _Hash_, you can pass it as a _String_, potentially
-taking advantage of JSON builders such as https://github.com/rails/jbuilder[JBuilder] or
-https://github.com/bsiggelkow/jsonify[Jsonify]:
+Instead of passing the `:body` argument as a Ruby *Hash*, you can pass it as a *String*, potentially taking advantage of JSON builders such as [JBuilder](https://github.com/rails/jbuilder) or [Jsonify](https://github.com/bsiggelkow/jsonify):
-[source,rb]
-----------------------------
+```rb
require 'jbuilder'
query = Jbuilder.encode do |json|
@@ -181,16 +174,14 @@ client.search(index: 'myindex', body: query)
# 2013-06-25 09:56:05 +0200: > {"query":{"match":{"title":{"query":"test 1","operator":"and"}}}}
# ...
# => {"took"=>21, ..., "hits"=>{"total"=>1, "hits"=>[{ "_source"=>{"title"=>"Test 1", ...}}]}}
-----------------------------
+```
-[discrete]
-===== Using Hash Wrappers
-For a more comfortable access to response properties, you may wrap it in one of the _Hash_ "object access"
-wrappers, such as https://github.com/intridea/hashie[`Hashie::Mash`]:
+### Using Hash Wrappers [_using_hash_wrappers]
-[source,rb]
-----------------------------
+For a more comfortable access to response properties, you may wrap it in one of the *Hash* "object access" wrappers, such as [`Hashie::Mash`](https://github.com/intridea/hashie):
+
+```rb
require 'hashie'
response = client.search(
@@ -205,16 +196,15 @@ mash = Hashie::Mash.new(response)
mash.hits.hits.first._source.title
# => 'Test'
-----------------------------
+```
+
-[discrete]
-===== Using a Custom JSON Serializer
+### Using a Custom JSON Serializer [_using_a_custom_json_serializer]
-The library uses the https://rubygems.org/gems/multi_json/[MultiJson] gem by default but allows you to set a custom JSON library, provided it uses the standard `load/dump` interface:
+The library uses the [MultiJson](https://rubygems.org/gems/multi_json/) gem by default but allows you to set a custom JSON library, provided it uses the standard `load/dump` interface:
-[source,rb]
-----------------------------
+```rb
Elasticsearch::API.settings[:serializer] = JrJackson::Json
Elasticsearch::API.serializer.dump({foo: 'bar'})
# => {"foo":"bar"}
-----------------------------
+```
diff --git a/docs/reference/basic-config.md b/docs/reference/basic-config.md
new file mode 100644
index 0000000000..f7bd47df7a
--- /dev/null
+++ b/docs/reference/basic-config.md
@@ -0,0 +1,37 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/basic-config.html
+---
+
+# Basic configuration [basic-config]
+
+The table below contains the most important initialization parameters that you can use.
+
+| | | |
+| --- | --- | --- |
+| **Parameter** | **Data type** | **Description** |
+| `adapter` | Symbol | A specific adapter for Faraday (for example, `:patron`). |
+| `api_key` | String, Hash | For API key Authentication. Either the base64 encoding of `id` and `api_key` joined by a colon as a string, or a hash with the `id` and `api_key` values. |
+| `compression` | Boolean | Whether to compress requests. Gzip compression is used. Defaults to `false`. Responses are automatically inflated if they are compressed. If a custom transport object is used, it must handle the request compression and response inflation. |
+| `enable_meta_header` | Boolean | Whether to enable sending the meta data header to Cloud. Defaults to `true`. |
+| `hosts` | String, Array | Single host passed as a string or hash, or multiple hosts passed as an array; `host` or `url` keys are also valid. |
+| `log` | Boolean | Whether to use the default logger. Disabled by default. |
+| `logger` | Object | An instance of a Logger-compatible object. |
+| `opaque_id_prefix` | String | Sets a prefix for X-Opaque-Id when initializing the client. This is prepended to the id you set before each request if you’re using X-Opaque-Id. |
+| `opentelemetry_tracer_provider` | `OpenTelemetry::Trace::TracerProvider` | An explicit TracerProvider to use instead of the global one with OpenTelemetry. This enables better dependency injection and simplifies testing. |
+| `randomize_hosts` | Boolean | Whether to shuffle connections on initialization and reload. Defaults to `false`. |
+| `reload_connections` | Boolean, Number | Whether to reload connections after X requests. Defaults to `false`. |
+| `reload_on_failure` | Boolean | Whether to reload connections after failure. Defaults to `false`. |
+| `request_timeout` | Integer | The request timeout to be passed to transport in options. |
+| `resurrect_after` | Integer | Specifies after how many seconds a dead connection should be tried again. |
+| `retry_on_failure` | Boolean, Number | Whether to retry X times when request fails before raising and exception. Defaults to `false`. |
+| `retry_on_status` | Array, Number | Specifies which status code needs to be returned to retry. |
+| `selector` | Constant | An instance of selector strategy implemented with {Elastic::Transport::Transport::Connections::Selector::Base}. |
+| `send_get_body_as` | String | Specifies the HTTP method to use for GET requests with a body. Defaults to `GET`. |
+| `serializer_class` | Constant | Specifies a serializer class to use. It is initialized by the transport and passed the transport instance. |
+| `sniffer_timeout` | Integer | Specifies the timeout for reloading connections in seconds. Defaults to `1`. |
+| `trace` | Boolean | Whether to use the default tracer. Disabled by default. |
+| `tracer` | Object | Specifies an instance of a Logger-compatible object. |
+| `transport` | Object | Specifies a transport instance. |
+| `transport_class` | Constant | Specifies a transport class to use. It is initialized by the client and passed hosts and all arguments. |
+| `transport_options` | Hash | Specifies the options to be passed to the `Faraday::Connection` constructor. |
diff --git a/docs/reference/client-helpers.md b/docs/reference/client-helpers.md
new file mode 100644
index 0000000000..72220c37b7
--- /dev/null
+++ b/docs/reference/client-helpers.md
@@ -0,0 +1,14 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/client-helpers.html
+---
+
+# Client helpers [client-helpers]
+
+The Ruby client includes the following helpers:
+
+* [Bulk and Scroll helpers](/reference/Helpers.md)
+* [ES|QL](/reference/esql.md)
+
+
+
diff --git a/docs/reference/configuration.md b/docs/reference/configuration.md
new file mode 100644
index 0000000000..fdebc05c75
--- /dev/null
+++ b/docs/reference/configuration.md
@@ -0,0 +1,14 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/ruby-config.html
+---
+
+# Configuration [ruby-config]
+
+This page contains information about how to configure the Ruby client tailored to your needs. Almost every aspect of the client is configurable. However, in most cases you only need to set a couple of parameters.
+
+* [Basic configuration](/reference/basic-config.md)
+* [Advanced configuration](/reference/advanced-config.md)
+
+
+
diff --git a/docs/connecting.asciidoc b/docs/reference/connecting.md
similarity index 57%
rename from docs/connecting.asciidoc
rename to docs/reference/connecting.md
index 80e2acba63..8a75793639 100644
--- a/docs/connecting.asciidoc
+++ b/docs/reference/connecting.md
@@ -1,34 +1,28 @@
-[[connecting]]
-== Connecting
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/connecting.html
+---
-This page contains the information you need to connect and use the Client with
-{es}.
+# Connecting [connecting]
-**On this page**
+This page contains the information you need to connect and use the Client with {{es}}.
-* <>
-* <>
-* <>
-* <>
+## Authentication [client-auth]
-[discrete]
-[[client-auth]]
-=== Authentication
+This document contains code snippets to show you how to connect to various {{es}} providers.
-This document contains code snippets to show you how to connect to various {es} providers.
-[discrete]
-[[auth-ec]]
-==== Elastic Cloud
+### Elastic Cloud [auth-ec]
-If you are using https://www.elastic.co/cloud[Elastic Cloud], the client offers an easy way to connect to it. You need the Cloud ID that you can find in the cloud console, then your username and password.
+If you are using [Elastic Cloud](https://www.elastic.co/cloud), the client offers an easy way to connect to it. You need the Cloud ID that you can find in the cloud console, then your username and password.
-image::images/cloud_id.png["Cloud ID"]
+:::{image} ../images/cloud_id.png
+:alt: Cloud ID
+:::
Once you have collected the Cloud ID you can use the client to connect to your Elastic Cloud instance, as follows:
-[source,ruby]
-------------------------------------
+```ruby
require 'elasticsearch'
client = Elasticsearch::Client.new(
@@ -36,55 +30,56 @@ client = Elasticsearch::Client.new(
user: '',
password: '',
)
-------------------------------------
+```
You can also connect to the Cloud by using API Key authentication. You can generate an `API key` in the `Management` page under the section `Security`.
-image::images/cloud_api_key.png["API key"]
+:::{image} ../images/cloud_api_key.png
+:alt: API key
+:::
When you click on `Create API key` you can choose a name and set the other options (eg. restrict privileges, expire after time, etc).
-image::images/api_key_name.png["Choose an API name"]
+:::{image} ../images/api_key_name.png
+:alt: Choose an API name
+:::
-After this step you will get the `API key` in the API keys page.
+After this step you will get the `API key` in the API keys page.
-image::images/cloud_api_key.png["API key"]
+:::{image} ../images/cloud_api_key.png
+:alt: API key
+:::
**IMPORTANT**: you need to copy and store the `API key` in a secure place, since you will not be able to view it again in Elastic Cloud.
-Once you have collected the `Cloud ID` and the `API key` you can use the client
-to connect to your Elastic Cloud instance, as follows:
+Once you have collected the `Cloud ID` and the `API key` you can use the client to connect to your Elastic Cloud instance, as follows:
-[source,ruby]
-------------------------------------
+```ruby
client = Elasticsearch::Client.new(
cloud_id: '',
api_key: ''
)
-------------------------------------
+```
If you create the API Key through the dev console or the REST API, you may get instead a pair of `id` and `APIKey` values. The client also accepts a Hash for the `api_key` parameter, so you can pass in these values and it will encode the API Key internally:
-[source,ruby]
-------------------------------------
+```ruby
client = Elasticsearch::Client.new(
cloud_id: '',
api_key: {id: '', api_key: ''}
)
-------------------------------------
+```
-[discrete]
-[[connect-self-managed]]
-=== Connecting to a self-managed cluster
-{es} 8.0 offers security by default, that means authentication and TLS are enabled.
+## Connecting to a self-managed cluster [connect-self-managed]
-To connect to the {es} cluster you’ll need to configure the Ruby {es} client to use HTTPS with the generated CA certificate in order to make requests successfully.
+{{es}} 8.0 offers security by default, that means authentication and TLS are enabled.
-If you’re just getting started with {es} we recommend reading the documentation on configuring and starting {es} to ensure your cluster is running as expected.
+To connect to the {{es}} cluster you’ll need to configure the Ruby {{es}} client to use HTTPS with the generated CA certificate in order to make requests successfully.
-When you start {es} for the first time you’ll see a distinct block like the one below in the output from {es} (you may have to scroll up if it’s been a while):
+If you’re just getting started with {{es}} we recommend reading the documentation on configuring and starting {{es}} to ensure your cluster is running as expected.
+When you start {{es}} for the first time you’ll see a distinct block like the one below in the output from {{es}} (you may have to scroll up if it’s been a while):
```sh
----------------------------------------------------------------
@@ -105,31 +100,25 @@ Note down the `elastic` user password and HTTP CA fingerprint for the next secti
Depending on the circumstances there are two options for verifying the HTTPS connection, either verifying with the CA certificate itself or via the HTTP CA certificate fingerprint.
-[discrete]
-[[ca-certificates]]
-==== Verifying HTTPS with CA certificates
+### Verifying HTTPS with CA certificates [ca-certificates]
-The generated root CA certificate can be found in the `certs` directory in your {es} config location (`$ES_CONF_PATH/certs/http_ca.crt`). If you're running {es} in Docker there is https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html[additional documentation for retrieving the CA certificate].
+The generated root CA certificate can be found in the `certs` directory in your {{es}} config location (`$ES_CONF_PATH/certs/http_ca.crt`). If you’re running {{es}} in Docker there is [additional documentation for retrieving the CA certificate](docs-content://deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md).
Once you have the `http_ca.crt` file somewhere accessible pass the path to the client via `ca_certs`:
-[source,ruby]
-------------------------------------
+```ruby
client = Elasticsearch::Client.new(
host: "https://elastic/#{ELASTIC_PASSWORD}@localhost:9200",
transport_options: { ssl: { ca_path: CERT_DIR } }
)
-------------------------------------
+```
-[discrete]
-[[ca-fingerprint]]
-==== Verifying HTTPS with certificate fingerprints
+### Verifying HTTPS with certificate fingerprints [ca-fingerprint]
-This method of verifying the HTTPS connection takes advantage of the certificate fingerprint value noted down earlier. Take this SHA256 fingerprint value and pass it to the Ruby {es} client via `ca_fingerprint`:
+This method of verifying the HTTPS connection takes advantage of the certificate fingerprint value noted down earlier. Take this SHA256 fingerprint value and pass it to the Ruby {{es}} client via `ca_fingerprint`:
-[source,ruby]
-------------------------------------
+```ruby
# Colons and uppercase/lowercase don't matter when using
# the 'ca_fingerprint' parameter
CERT_FINGERPRINT = '64F2593F...'
@@ -142,77 +131,65 @@ client = Elasticsearch::Client.new(
transport_options: { ssl: { verify: false } },
ca_fingerprint: CERT_FINGERPRINT
)
-------------------------------------
+```
The verification will be run once per connection.
-
The certificate fingerprint can be calculated using `openssl x509` with the certificate file:
-[source,sh]
-----
+```sh
openssl x509 -fingerprint -sha256 -noout -in /path/to/http_ca.crt
-----
+```
-If you don't have access to the generated CA file from {es} you can use the following script to output the root CA fingerprint of the {es} instance with `openssl s_client`:
+If you don’t have access to the generated CA file from {{es}} you can use the following script to output the root CA fingerprint of the {{es}} instance with `openssl s_client`:
-[source,sh]
-----
+```sh
# Replace the values of 'localhost' and '9200' to the
# corresponding host and port values for the cluster.
openssl s_client -connect localhost:9200 -servername localhost -showcerts /dev/null \
| openssl x509 -fingerprint -sha256 -noout -in /dev/stdin
-----
+```
The output of `openssl x509` will look something like this:
-[source,sh]
-----
+```sh
SHA256 Fingerprint=A5:2D:D9:35:11:E8:C6:04:5E:21:F1:66:54:B7:7C:9E:E0:F3:4A:EA:26:D9:F4:03:20:B5:31:C4:74:67:62:28
-----
-
+```
+### API Key authentication [auth-api-key]
-[discrete]
-[[auth-api-key]]
-==== API Key authentication
+You can also use [ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) authentication.
-You can also use https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html[ApiKey] authentication.
+::::{note}
+If you provide both basic authentication credentials and the ApiKey configuration, the ApiKey takes precedence.
+::::
-NOTE: If you provide both basic authentication credentials and the ApiKey configuration, the ApiKey takes precedence.
-[source,ruby]
-------------------------------------
+```ruby
Elasticsearch::Client.new(
host: host,
transport_options: transport_options,
api_key: credentials
)
-------------------------------------
+```
-Where credentials is either the base64 encoding of `id` and `api_key` joined by
-a colon or a hash with the `id` and `api_key`:
+Where credentials is either the base64 encoding of `id` and `api_key` joined by a colon or a hash with the `id` and `api_key`:
-[source,ruby]
-------------------------------------
+```ruby
Elasticsearch::Client.new(
host: host,
transport_options: transport_options,
api_key: {id: 'my_id', api_key: 'my_api_key'}
)
-------------------------------------
+```
-[discrete]
-[[auth-basic]]
-==== Basic authentication
+### Basic authentication [auth-basic]
-You can pass the authentication credentials, scheme and port in the host
-configuration hash:
+You can pass the authentication credentials, scheme and port in the host configuration hash:
-[source,ruby]
-------------------------------------
+```ruby
client = Elasticsearch::Client.new(
hosts:
[
@@ -225,36 +202,31 @@ client = Elasticsearch::Client.new(
}
]
)
-------------------------------------
+```
Or use the common URL format:
-[source,ruby]
-------------------------------------
+```ruby
client = Elasticsearch::Client.new(url: 'https://username:password@localhost:9200/')
-------------------------------------
+```
-To pass a custom certificate for SSL peer verification to Faraday-based clients,
-use the `transport_options` option:
+To pass a custom certificate for SSL peer verification to Faraday-based clients, use the `transport_options` option:
-[source,ruby]
-------------------------------------
+```ruby
Elasticsearch::Client.new(
url: 'https://username:password@localhost:9200/',
transport_options: {
ssl: { ca_file: '/path/to/http_ca.crt' }
}
)
-------------------------------------
+```
+
-[discrete]
-[[client-usage]]
-=== Usage
+## Usage [client-usage]
The following snippet shows an example of using the Ruby client:
-[source,ruby]
-------------------------------------
+```ruby
require 'elasticsearch'
client = Elasticsearch::Client.new log: true
@@ -266,20 +238,17 @@ client.index(index: 'my-index', id: 1, body: { title: 'Test' })
client.indices.refresh(index: 'my-index')
client.search(index: 'my-index', body: { query: { match: { title: 'test' } } })
-------------------------------------
+```
+
+## Using the Client in a Function-as-a-Service Environment [client-faas]
-[discrete]
-[[client-faas]]
-=== Using the Client in a Function-as-a-Service Environment
+This section illustrates the best practices for leveraging the {{es}} client in a Function-as-a-Service (FaaS) environment. The most influential optimization is to initialize the client outside of the function, the global scope. This practice does not only improve performance but also enables background functionality as – for example – sniffing. The following examples provide a skeleton for the best practices.
-This section illustrates the best practices for leveraging the {es} client in a Function-as-a-Service (FaaS) environment. The most influential optimization is to initialize the client outside of the function, the global scope. This practice does not only improve performance but also enables background functionality as – for example – sniffing. The following examples provide a skeleton for the best practices.
-[discrete]
-==== GCP Cloud Functions
+### GCP Cloud Functions [_gcp_cloud_functions]
-[source,ruby]
-------------------------------------
+```ruby
require 'functions_framework'
require 'elasticsearch'
@@ -304,13 +273,12 @@ FunctionsFramework.http "hello_world" do |request|
}
)
end
-------------------------------------
+```
-[discrete]
-==== AWS Lambda
-[source,ruby]
-------------------------------------
+### AWS Lambda [_aws_lambda]
+
+```ruby
require 'elasticsearch'
def client
@@ -336,17 +304,16 @@ def lambda_handler(event:, context:)
}
)
end
-------------------------------------
+```
Resources used to assess these recommendations:
-* https://cloud.google.com/functions/docs/bestpractices/tips#use_global_variables_to_reuse_objects_in_future_invocations[GCP Cloud Functions: Tips & Tricks]
-* https://docs.aws.amazon.com/lambda/latest/dg/best-practices.html[Best practices for working with AWS Lambda functions]
+* [GCP Cloud Functions: Tips & Tricks](https://cloud.google.com/functions/docs/bestpractices/tips#use_global_variables_to_reuse_objects_in_future_invocations)
+* [Best practices for working with AWS Lambda functions](https://docs.aws.amazon.com/lambda/latest/dg/best-practices.md)
+
-[discrete]
-[[client-comp]]
-=== Enabling the Compatibility Mode
+## Enabling the Compatibility Mode [client-comp]
The Elasticsearch server version 8.0 is introducing a new compatibility mode that allows you a smoother upgrade experience from 7 to 8. In a nutshell, you can use the latest 7.x Elasticsearch client with an 8.x Elasticsearch server, giving more room to coordinate the upgrade of your codebase to the next major version.
-If you want to leverage this functionality, please make sure that you are using the latest 7.x client and set the environment variable `ELASTIC_CLIENT_APIVERSIONING` to `true`. The client is handling the rest internally. For every 8.0 and beyond client, you're all set! The compatibility mode is enabled by default.
+If you want to leverage this functionality, please make sure that you are using the latest 7.x client and set the environment variable `ELASTIC_CLIENT_APIVERSIONING` to `true`. The client is handling the rest internally. For every 8.0 and beyond client, you’re all set! The compatibility mode is enabled by default.
diff --git a/docs/reference/dsl.md b/docs/reference/dsl.md
new file mode 100644
index 0000000000..af686223d1
--- /dev/null
+++ b/docs/reference/dsl.md
@@ -0,0 +1,11 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/dsl.html
+---
+
+# Elasticsearch DSL [dsl]
+
+The [elasticsearch-dsl](https://github.com/elastic/elasticsearch-dsl-ruby) gem provides a Ruby API for the [Elasticsearch Query DSL](https://www.elasticsearch.com/guide/en/elasticsearch/reference/current/query-dsl.md). The library allows to programmatically build complex search definitions for {{es}} in Ruby, which are translated to Hashes, and ultimately, JSON, the language of {{es}}.
+
+See [the README](https://github.com/elastic/elasticsearch-dsl-ruby#elasticsearchdsl) for more information.
+
diff --git a/docs/reference/ecs.md b/docs/reference/ecs.md
new file mode 100644
index 0000000000..65100ebe4a
--- /dev/null
+++ b/docs/reference/ecs.md
@@ -0,0 +1,37 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/ecs.html
+---
+
+# Elastic Common Schema (ECS) [ecs]
+
+The [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://docs/reference/index.md)) is an open source format that defines a common set of fields to be used when storing event data like logs in Elasticsearch.
+
+You can use the library [ecs-logging](https://github.com/elastic/ecs-logging-ruby) which is a set of libraries that enables you to transform your application logs to structured logs that comply with the ECS format.
+
+Add this line to your application’s Gemfile:
+
+```ruby
+gem 'ecs-logging'
+```
+
+Then execute `bundle install`. Or install from the command line yourself:
+
+```ruby
+$ gem install ecs-logging
+```
+
+Then configure the client to use the logger:
+
+```ruby
+require 'ecs_logging/logger'
+require 'elasticsearch'
+
+logger = EcsLogging::Logger.new($stdout)
+client = Elasticsearch::Client.new(logger: logger)
+> client.info
+{"@timestamp":"2022-07-12T05:31:18.590Z","log.level":"INFO","message":"GET http://localhost:9200/ [status:200, request:0.009s, query:n/a]","ecs.version":"1.4.0"}...
+```
+
+See [ECS Logging Ruby Reference](ecs-logging-ruby://docs/reference/index.md) for more information on how to configure the logger.
+
diff --git a/docs/helpers/esql.asciidoc b/docs/reference/esql.md
similarity index 72%
rename from docs/helpers/esql.asciidoc
rename to docs/reference/esql.md
index 9a310911b5..6fea4bcdd9 100644
--- a/docs/helpers/esql.asciidoc
+++ b/docs/reference/esql.md
@@ -1,36 +1,29 @@
-[[esql]]
-=== ES|QL in the Ruby client
-++++
-ES|QL
-++++
+---
+navigation_title: "ES|QL"
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/esql.html
+---
-This page helps you understand and use {ref}/esql.html[ES|QL] in the
-Ruby client.
+# ES|QL in the Ruby client [esql]
+
+
+This page helps you understand and use [ES|QL](docs-content://explore-analyze/query-filter/languages/esql.md) in the Ruby client.
There are two ways to use ES|QL in the Ruby client:
-* Use the Elasticsearch {es-docs}/esql-apis.html[ES|QL API] directly: This
-is the most flexible approach, but it's also the most complex because you must handle
-results in their raw form. You can choose the precise format of results,
-such as JSON, CSV, or text.
-* Use the Ruby ES|QL helper: The helper maps the raw response to an object that's
-more readily usable by your application.
+* Use the Elasticsearch [ES|QL API](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-esql) directly: This is the most flexible approach, but it’s also the most complex because you must handle results in their raw form. You can choose the precise format of results, such as JSON, CSV, or text.
+* Use the Ruby ES|QL helper: The helper maps the raw response to an object that’s more readily usable by your application.
-[discrete]
-[[esql-how-to]]
-==== ES|QL API
+## ES|QL API [esql-how-to]
-The {es-docs}/esql-query-api.html[ES|QL query API] allows you to specify how
-results should be returned. You can choose a
-{es-docs}/esql-rest.html#esql-rest-format[response format] such as CSV, text, or
-JSON, then fine-tune it with parameters like column separators and locale.
+The [ES|QL query API](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-esql) allows you to specify how results should be returned. You can choose a [response format](docs-content://explore-analyze/query-filter/languages/esql-rest.md#esql-rest-format) such as CSV, text, or JSON, then fine-tune it with parameters like column separators and locale.
By default, the `query` API returns a Hash response with `columns` and `values`:
-[[esql-query]]
-[source,ruby]
-----
+$$$esql-query$$$
+
+```ruby
query = <>, the helper returns
-the following:
+The helper returns an array of hashes with the columns as keys and the respective values. Using the [preceding example](#esql-query), the helper returns the following:
-[source,ruby]
-----
+```ruby
response = Elasticsearch::Helpers::ESQLHelper.query(client, query)
puts response
@@ -87,15 +73,11 @@ puts response
{"duration_ms"=>1.2, "message"=>"Disconnected", "event.duration"=>1232382, "client.ip"=>"172.21.0.5", "@timestamp"=>"2023-10-23T13:33:34.937Z"}
{"duration_ms"=>0.7, "message"=>"Connection error", "event.duration"=>725448, "client.ip"=>"172.21.3.15", "@timestamp"=>"2023-10-23T13:51:54.732Z"}
{"duration_ms"=>8.3, "message"=>"Connection error", "event.duration"=>8268153, "client.ip"=>"172.21.3.15", "@timestamp"=>"2023-10-23T13:52:55.015Z"}
-----
+```
-Additionally, you can transform the data in the response by passing in a Hash
-of `column => Proc` values. You could use this for example to convert
-'@timestamp' into a DateTime object. Pass in a Hash to `query` as a `parser`
-defining a `Proc` for each value you'd like to parse:
+Additionally, you can transform the data in the response by passing in a Hash of `column => Proc` values. You could use this for example to convert *@timestamp* into a DateTime object. Pass in a Hash to `query` as a `parser` defining a `Proc` for each value you’d like to parse:
-[source,ruby]
-----
+```ruby
require 'elasticsearch/helpers/esql_helper'
parser = {
@@ -104,12 +86,11 @@ parser = {
response = Elasticsearch::Helpers::ESQLHelper.query(client, query, parser: parser)
response.first['@timestamp']
#
-----
+```
You can pass in as many Procs as there are columns in the response. For example:
-[source,ruby]
-----
+```ruby
parser = {
'@timestamp' => Proc.new { |t| DateTime.parse(t) },
'client.ip' => Proc.new { |i| IPAddr.new(i) },
@@ -125,4 +106,5 @@ puts response
{"duration_ms"=>1.2, "message"=>"Disconnected", "event.duration"=>"1232382", "client.ip"=>#, "@timestamp"=>#}
{"duration_ms"=>0.7, "message"=>"Connection error", "event.duration"=>"725448", "client.ip"=>#, "@timestamp"=>#}
{"duration_ms"=>8.3, "message"=>"Connection error", "event.duration"=>"8268153", "client.ip"=>#, "@timestamp"=>#}
-----
+```
+
diff --git a/docs/examples.asciidoc b/docs/reference/examples.md
similarity index 82%
rename from docs/examples.asciidoc
rename to docs/reference/examples.md
index 22e9619505..75e850cfbc 100644
--- a/docs/examples.asciidoc
+++ b/docs/reference/examples.md
@@ -1,26 +1,26 @@
-[[examples]]
-== Examples
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/examples.html
+---
-Below you can find examples of how to use the most frequently called APIs with
-the Ruby client.
+# Examples [examples]
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
+Below you can find examples of how to use the most frequently called APIs with the Ruby client.
+* [Indexing a document](#ex-index)
+* [Getting a document](#ex-get)
+* [Updating a document](#ex-update)
+* [Deleting a document](#ex-delete)
+* [Bulk requests](#ex-bulk)
+* [Searching for a document](#ex-search)
+* [Multi search](#ex-multisearch)
+* [Scrolling](#ex-scroll)
+* [Reindexing](#ex-reindex)
-[discrete]
-[[ex-index]]
-=== Indexing a document
-Let's index a document with the following fields: `name`, `author`,
-`release_date`, and `page_count`:
+## Indexing a document [ex-index]
+
+Let’s index a document with the following fields: `name`, `author`, `release_date`, and `page_count`:
```ruby
body = {
@@ -34,9 +34,7 @@ client.index(index: 'books', body: body)
```
-[discrete]
-[[ex-get]]
-=== Getting a document
+## Getting a document [ex-get]
You can get a document by ID:
@@ -46,9 +44,7 @@ client.get(index: 'books', id: id)
```
-[discrete]
-[[ex-update]]
-=== Updating a document
+## Updating a document [ex-update]
Assume you have the following document:
@@ -65,9 +61,7 @@ client.update(index: 'books', id: id, body: { doc: { page_count: 225 } })
```
-[discrete]
-[[ex-delete]]
-=== Deleting a document
+## Deleting a document [ex-delete]
You can delete a document by ID:
@@ -77,13 +71,9 @@ client.delete(index: 'books', id: id)
```
-[discrete]
-[[ex-bulk]]
-=== Bulk requests
+## Bulk requests [ex-bulk]
-The `bulk` operation of the client supports various different formats of the
-payload: array of strings, header/data pairs, or the combined format where data
-is passed along with the header in a single item in a custom `:data` key.
+The `bulk` operation of the client supports various different formats of the payload: array of strings, header/data pairs, or the combined format where data is passed along with the header in a single item in a custom `:data` key.
Index several documents in one request:
@@ -109,8 +99,7 @@ body = [
client.bulk(body: body)
```
-As mentioned, you can perform several operations in a single request with the
-combined format passing data in the `:data` option:
+As mentioned, you can perform several operations in a single request with the combined format passing data in the `:data` option:
```ruby
body = [
@@ -122,17 +111,13 @@ client.bulk(body: body)
```
-[discrete]
-[[ex-search]]
-=== Searching for a document
+## Searching for a document [ex-search]
-This example uses the same data that is used in the
-https://www.elastic.co/guide/en/elasticsearch/reference/current/find-structure.html#find-structure-example-nld-json[Find structure API documentation].
+This example uses the same data that is used in the [Find structure API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure).
First, bulk index it:
-[source,ruby]
-----
+```ruby
body = [
{ index: { _index: 'books', data: { name: 'Leviathan Wakes', author: 'James S.A. Corey', release_date: '2011-06-02', page_count: 561 } } },
{ index: { _index: 'books', data: { name: 'Hyperion', author: 'Dan Simmons', release_date: '1989-05-26', page_count: 482 } } },
@@ -160,10 +145,9 @@ body = [
{ index: { _index: 'books', data: { name: 'The Moon is a Harsh Mistress', author: 'Robert A. Heinlein', release_date: '1966-04-01', page_count: 288 } } }
]
client.bulk(body: body)
-----
+```
-The `field` parameter is a common parameter, so it can be passed in directly in
-the following way:
+The `field` parameter is a common parameter, so it can be passed in directly in the following way:
```ruby
client.search(index: 'books', q: 'dune')
@@ -178,10 +162,10 @@ response['hits']['hits'].count # => 15
```
-[discrete]
-[[ex-multisearch]]
-=== Multi search
+## Multi search [ex-multisearch]
+
The following example shows how to perform a multisearch API call on `books` index:
+
```ruby
body = [
{},
@@ -192,12 +176,10 @@ body = [
client.msearch(index:'books', body: body)
```
-[discrete]
-[[ex-scroll]]
-=== Scrolling
-Submit a search API request that includes an argument for the scroll query
-parameter, save the search ID, then print out the book names you found:
+## Scrolling [ex-scroll]
+
+Submit a search API request that includes an argument for the scroll query parameter, save the search ID, then print out the book names you found:
```ruby
# Search request with a scroll argument:
@@ -214,13 +196,11 @@ end
```
-[discrete]
-[[ex-reindex]]
-=== Reindexing
+## Reindexing [ex-reindex]
-The following example shows how to reindex the `books` index into a new index
-called `books-reindexed`:
+The following example shows how to reindex the `books` index into a new index called `books-reindexed`:
```ruby
client.reindex(body: {source: { index: 'books'}, dest: {index: 'books-reindexed' } })
```
+
diff --git a/docs/reference/getting-started.md b/docs/reference/getting-started.md
new file mode 100644
index 0000000000..41920656aa
--- /dev/null
+++ b/docs/reference/getting-started.md
@@ -0,0 +1,125 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/getting-started-ruby.html
+ - https://www.elastic.co/guide/en/serverless/current/elasticsearch-ruby-client-getting-started.html
+---
+
+# Getting started [getting-started-ruby]
+
+This page guides you through the installation process of the Ruby client, shows you how to instantiate the client, and how to perform basic Elasticsearch operations with it.
+
+
+### Requirements [_requirements]
+
+A currently maintained version of Ruby (3.0+) or JRuby (9.3+).
+
+
+### Installation [_installation]
+
+To install the latest version of the client, run the following command:
+
+```shell
+gem install elasticsearch
+```
+
+Refer to the [*Installation*](/reference/installation.md) page to learn more.
+
+
+### Connecting [_connecting]
+
+You can connect to the Elastic Cloud using an API key and the Elasticsearch endpoint.
+
+```rb
+client = Elasticsearch::Client.new(
+ cloud_id: '',
+ api_key: ''
+)
+```
+
+Your Elasticsearch endpoint can be found on the **My deployment** page of your deployment:
+
+:::{image} ../images/es_endpoint.jpg
+:alt: Finding Elasticsearch endpoint
+:::
+
+You can generate an API key on the **Management** page under Security.
+
+:::{image} ../images/create_api_key.png
+:alt: Create API key
+:::
+
+For other connection options, refer to the [*Connecting*](/reference/connecting.md) section.
+
+
+### Operations [_operations]
+
+Time to use Elasticsearch! This section walks you through the basic, and most important, operations of Elasticsearch. For more operations and more advanced examples, refer to the [*Examples*](/reference/examples.md) page.
+
+
+#### Creating an index [_creating_an_index]
+
+This is how you create the `my_index` index:
+
+```rb
+client.indices.create(index: 'my_index')
+```
+
+
+#### Indexing documents [_indexing_documents]
+
+This is a simple way of indexing a document:
+
+```rb
+document = { name: 'elasticsearch-ruby' }
+response = client.index(index: 'my_index', body: document)
+# You can get the indexed document id with:
+response['_id']
+=> "PlgIDYkBWS9Ngdx5IMy-"
+id = response['_id']
+```
+
+
+#### Getting documents [_getting_documents]
+
+You can get documents by using the following code:
+
+```rb
+client.get(index: 'my_index', id: id)
+```
+
+
+#### Searching documents [_searching_documents]
+
+This is how you can create a single match query with the Ruby client:
+
+```rb
+client.search(index: 'my_index', body: { query: { match_all: {} } })
+```
+
+
+#### Updating documents [_updating_documents]
+
+This is how you can update a document, for example to add a new field:
+
+```rb
+client.update(index: 'my_index', id: id, body: { doc: { language: 'Ruby' } })
+```
+
+
+#### Deleting documents [_deleting_documents]
+
+```rb
+client.delete(index: 'my_index', id: id)
+```
+
+
+#### Deleting an index [_deleting_an_index]
+
+```rb
+client.indices.delete(index: 'my_index')
+```
+
+
+## Further reading [_further_reading]
+
+* Use [Bulk and Scroll helpers](/reference/Helpers.md) for a more confortable experience with the APIs.
diff --git a/docs/reference/index.md b/docs/reference/index.md
new file mode 100644
index 0000000000..088cd04b9b
--- /dev/null
+++ b/docs/reference/index.md
@@ -0,0 +1,40 @@
+---
+mapped_pages:
+ https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/index.html
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/ruby_client.html
+---
+
+# Ruby [ruby_client]
+
+The `elasticsearch` [Rubygem](http://rubygems.org/gems/elasticsearch) provides a low-level client for communicating with an {{es}} cluster, fully compatible with other official clients.
+
+More documentation is hosted in [Github](https://github.com/elastic/elasticsearch-ruby) and [RubyDoc](http://rubydoc.info/gems/elasticsearch).
+
+Refer to the [*Getting started*](/reference/getting-started.md) page for a step-by-step quick start with the Ruby client.
+
+
+## Features [_features]
+
+* Pluggable logging and tracing
+* Pluggable connection selection strategies (round-robin, random, custom)
+* Pluggable transport implementation, customizable and extendable
+* Pluggable serializer implementation
+* Request retries and dead connections handling
+* Node reloading (based on cluster state) on errors or on demand
+* Modular API implementation
+* 100% REST API coverage
+
+
+## Transport and API [transport-api]
+
+The `elasticsearch` gem combines two separate Rubygems:
+
+* [`elastic-transport`](https://github.com/elastic/elastic-transport-ruby/) - provides an HTTP Ruby client for connecting to the {{es}} cluster. Refer to the documentation: [Transport](/reference/transport.md)
+* [`elasticsearch-api`](https://github.com/elastic/elasticsearch-ruby/tree/main/elasticsearch-api) - provides a Ruby API for the {{es}} RESTful API.
+
+Please consult their respective documentation for configuration options and technical details.
+
+Notably, the documentation and comprehensive examples for all the API methods are contained in the source, and available online at [Rubydoc](http://rubydoc.info/gems/elasticsearch-api/Elasticsearch/API/Actions).
+
+Keep in mind, that for optimal performance, you should use an HTTP library which supports persistent ("keep-alive") HTTP connections.
+
diff --git a/docs/reference/installation.md b/docs/reference/installation.md
new file mode 100644
index 0000000000..73048cfa0d
--- /dev/null
+++ b/docs/reference/installation.md
@@ -0,0 +1,44 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/ruby-install.html
+---
+
+# Installation [ruby-install]
+
+Install the Rubygem for the latest {{es}} version by using the following command:
+
+```sh
+gem install elasticsearch
+```
+
+Or add the `elasticsearch` Ruby gem to your Gemfile:
+
+```ruby
+gem 'elasticsearch'
+```
+
+You can install the Ruby gem for a specific {{es}} version by using the following command:
+
+```sh
+gem install elasticsearch -v 7.0.0
+```
+
+Or you can add a specific version of {{es}} to your Gemfile:
+
+```ruby
+gem 'elasticsearch', '~> 7.0'
+```
+
+
+## {{es}} and Ruby version compatibility [_es_and_ruby_version_compatibility]
+
+The {{es}} client is compatible with currently maintained Ruby versions. We follow Ruby’s own maintenance policy and officially support all currently maintained versions per [Ruby Maintenance Branches](https://www.ruby-lang.org/en/downloads/branches/).
+
+Language clients are forward compatible; meaning that clients support communicating with greater or equal minor versions of {{es}} without breaking. It does not mean that the client automatically supports new features of newer {{es}} versions; it is only possible after a release of a new client version. For example, a 8.12 client version won’t automatically support the new features of the 8.13 version of {{es}}, the 8.13 client version is required for that. {{es}} language clients are only backwards compatible with default distributions and without guarantees made.
+
+| Gem Version | | {{es}} Version | Supported |
+| --- | --- | --- | --- |
+| 7.x | → | 7.x | 7.17 |
+| 8.x | → | 8.x | 8.x |
+| main | → | main | |
+
diff --git a/docs/reference/integrations.md b/docs/reference/integrations.md
new file mode 100644
index 0000000000..2b54bab453
--- /dev/null
+++ b/docs/reference/integrations.md
@@ -0,0 +1,26 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/integrations.html
+---
+
+# Integrations [integrations]
+
+The Rubygems listed on this page make it easier to operate with {{es}} by using the Ruby client.
+
+* [Transport](/reference/transport.md)
+* [Elasticsearch API](/reference/api.md)
+* [Using OpenTelemetry](/reference/opentelemetry.md)
+* [Elastic Common Schema (ECS)](/reference/ecs.md)
+* [ActiveModel / ActiveRecord](/reference/activemodel_activerecord.md)
+* [Ruby On Rails](/reference/ruby_on_rails.md)
+* [Persistence](/reference/persistence.md)
+* [Elasticsearch DSL](/reference/dsl.md)
+
+
+
+
+
+
+
+
+
diff --git a/docs/reference/opentelemetry.md b/docs/reference/opentelemetry.md
new file mode 100644
index 0000000000..ccc4863a8c
--- /dev/null
+++ b/docs/reference/opentelemetry.md
@@ -0,0 +1,97 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/opentelemetry.html
+---
+
+# Using OpenTelemetry [opentelemetry]
+
+You can use [OpenTelemetry](https://opentelemetry.io/) to monitor the performance and behavior of your {{es}} requests through the Ruby Client. The Ruby Client comes with built-in OpenTelemetry instrumentation that emits [distributed tracing spans](docs-content://solutions/observability/apps/traces-2.md) by default. With that, applications [instrumented with OpenTelemetry](https://opentelemetry.io/docs/instrumentation/ruby/manual/) or using the [OpenTelemetry Ruby SDK](https://opentelemetry.io/docs/instrumentation/ruby/automatic/) are inherently enriched with additional spans that contain insightful information about the execution of the {{es}} requests.
+
+The native instrumentation in the Ruby Client follows the [OpenTelemetry Semantic Conventions for {{es}}](https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/). In particular, the instrumentation in the client covers the logical layer of {{es}} requests. A single span per request is created that is processed by the service through the Ruby Client. The following image shows a trace that records the handling of two different {{es}} requests: a `ping` request and a `search` request.
+
+:::{image} ../images/otel-waterfall-without-http.png
+:alt: Distributed trace with Elasticsearch spans
+:class: screenshot
+:::
+
+Usually, OpenTelemetry auto-instrumentation modules come with instrumentation support for HTTP-level communication. In this case, in addition to the logical {{es}} client requests, spans will be captured for the physical HTTP requests emitted by the client. The following image shows a trace with both, {{es}} spans (in blue) and the corresponding HTTP-level spans (in red):
+
+:::{image} ../images/otel-waterfall-with-http.png
+:alt: Distributed trace with Elasticsearch spans
+:class: screenshot
+:::
+
+Advanced Ruby Client behavior such as nodes round-robin and request retries are revealed through the combination of logical {{es}} spans and the physical HTTP spans. The following example shows a `search` request in a scenario with two nodes:
+
+:::{image} ../images/otel-waterfall-retry.png
+:alt: Distributed trace with Elasticsearch spans
+:class: screenshot
+:::
+
+The first node is unavailable and results in an HTTP error, while the retry to the second node succeeds. Both HTTP requests are subsumed by the logical {{es}} request span (in blue).
+
+
+## Setup the OpenTelemetry instrumentation [_setup_the_opentelemetry_instrumentation]
+
+When using the [OpenTelemetry Ruby SDK manually](https://opentelemetry.io/docs/instrumentation/ruby/manual) or using the [OpenTelemetry Ruby Auto-Instrumentations](https://opentelemetry.io/docs/instrumentation/ruby/automatic/), the Ruby Client’s OpenTelemetry instrumentation is enabled by default and uses the global OpenTelemetry SDK with the global tracer provider. You can provide a tracer provider via the Ruby Client configuration option `opentelemetry_tracer_provider` when instantiating the client. This is sometimes useful for testing or other specific use cases.
+
+```ruby
+client = Elasticsearch::Client.new(
+ cloud_id: '',
+ api_key: '',
+ opentelemetry_tracer_provider: tracer_provider
+)
+```
+
+
+## Configuring the OpenTelemetry instrumentation [_configuring_the_opentelemetry_instrumentation]
+
+You can configure the OpenTelemetry instrumentation through Environment Variables. The following configuration options are available.
+
+
+### Enable / Disable the OpenTelemetry instrumentation [opentelemetry-config-enable]
+
+With this configuration option you can enable (default) or disable the built-in OpenTelemetry instrumentation.
+
+**Default:** `true`
+
+| | |
+| --- | --- |
+| Environment Variable | `OTEL_RUBY_INSTRUMENTATION_ELASTICSEARCH_ENABLED` |
+
+
+### Capture search request bodies [_capture_search_request_bodies]
+
+Per default, the built-in OpenTelemetry instrumentation does not capture request bodies due to data privacy considerations. You can use this option to enable capturing of search queries from the request bodies of {{es}} search requests in case you wish to gather this information regardless. The options are to capture the raw search query, sanitize the query with a default list of sensitive keys, or not capture it at all.
+
+**Default:** `omit`
+
+**Valid Options:** `omit`, `sanitize`, `raw`
+
+| | |
+| --- | --- |
+| Environment Variable | `OTEL_RUBY_INSTRUMENTATION_ELASTICSEARCH_CAPTURE_SEARCH_QUERY` |
+
+
+### Sanitize the {{es}} search request body [_sanitize_the_es_search_request_body]
+
+You can configure the list of keys whose values are redacted when the search query is captured. Values must be comma-separated. Note in v8.3.0 and v8.3.1, the environment variable `OTEL_INSTRUMENTATION_ELASTICSEARCH_CAPTURE_SEARCH_QUERY` was available but is now deprecated in favor of the environment variable including `RUBY`.
+
+**Default:** `nil`
+
+| | |
+| --- | --- |
+| Environment Variable | `OTEL_RUBY_INSTRUMENTATION_ELASTICSEARCH_SEARCH_QUERY_SANITIZE_KEYS` |
+
+Example:
+
+```bash
+OTEL_RUBY_INSTRUMENTATION_ELASTICSEARCH_SEARCH_QUERY_SANITIZE_KEYS='sensitive-key,other-sensitive-key'
+```
+
+
+## Overhead [_overhead]
+
+The OpenTelemetry instrumentation (as any other monitoring approach) may come with a slight overhead on CPU, memory, and/or latency. The overhead may only occur when the instrumentation is enabled (default) and an OpenTelemetry SDK is active in the target application. When the instrumentation is disabled or no OpenTelemetry SDK is active within the target application, monitoring overhead is not expected when using the client.
+
+Even in cases where the instrumentation is enabled and is actively used (by an OpenTelemetry SDK), the overhead is minimal and negligible in the vast majority of cases. In edge cases where there is a noticeable overhead, the [instrumentation can be explicitly disabled](#opentelemetry-config-enable) to eliminate any potential impact on performance.
diff --git a/docs/persistence.asciidoc b/docs/reference/persistence.md
similarity index 56%
rename from docs/persistence.asciidoc
rename to docs/reference/persistence.md
index 8567a723cf..2bb497a96b 100644
--- a/docs/persistence.asciidoc
+++ b/docs/reference/persistence.md
@@ -1,42 +1,37 @@
-[[persistence]]
-=== Persistence
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/persistence.html
+---
-The `elasticsearch-persistence`
-http://rubygems.org/gems/elasticsearch-persistence[Rubygem] provides persistence
-layer for Ruby domain objects.
+# Persistence [persistence]
-It supports the repository design patterns. Versions before 6.0 also supported
-the _active record_ design pattern.
+The `elasticsearch-persistence` [Rubygem](http://rubygems.org/gems/elasticsearch-persistence) provides persistence layer for Ruby domain objects.
+It supports the repository design patterns. Versions before 6.0 also supported the *active record* design pattern.
-[discrete]
-==== Repository
-The `Elasticsearch::Persistence::Repository` module provides an implementation
-of the repository pattern and allows to save, delete, find and search objects
-stored in {es}, as well as configure mappings and settings for the index.
+## Repository [_repository]
+The `Elasticsearch::Persistence::Repository` module provides an implementation of the repository pattern and allows to save, delete, find and search objects stored in {{es}}, as well as configure mappings and settings for the index.
-[discrete]
-===== Features
-* Access to the {es} client
+### Features [_features_4]
+
+* Access to the {{es}} client
* Setting the index name, document type, and object class for deserialization
* Composing mappings and settings for the index
* Creating, deleting or refreshing the index
* Finding or searching for documents
* Providing access both to domain objects and hits for search results
-* Providing access to the {es} response for search results
+* Providing access to the {{es}} response for search results
* Defining the methods for serialization and deserialization
-[discrete]
-===== Usage
+### Usage [_usage_2]
-Let's have a simple plain old Ruby object (PORO):
+Let’s have a simple plain old Ruby object (PORO):
-[source,ruby]
-------------------------------------
+```ruby
class Note
attr_reader :attributes
@@ -48,69 +43,58 @@ class Note
@attributes
end
end
-------------------------------------
+```
Create a default, "dumb" repository, as a first step:
-[source,ruby]
-------------------------------------
+```ruby
require 'elasticsearch/persistence'
class MyRepository; include Elasticsearch::Persistence::Repository; end
repository = MyRepository.new
-------------------------------------
+```
Save a `Note` instance into the repository:
-[source,ruby]
-------------------------------------
+```ruby
note = Note.new id: 1, text: 'Test'
repository.save(note)
# PUT http://localhost:9200/repository/_doc/1 [status:201, request:0.210s, query:n/a]
# > {"id":1,"text":"Test"}
# < {"_index":"repository","_type":"note","_id":"1","_version":1,"created":true}
-------------------------------------
+```
Find it:
-[source,ruby]
-------------------------------------
+```ruby
n = repository.find(1)
# GET http://localhost:9200/repository/_doc/1 [status:200, request:0.003s, query:n/a]
# < {"_index":"repository","_type":"note","_id":"1","_version":2,"found":true, "_source" : {"id":1,"text":"Test"}}
=> 1, "text"=>"Test"}>
-------------------------------------
+```
Search for it:
-[source,ruby]
-------------------------------------
+```ruby
repository.search(query: { match: { text: 'test' } }).first
# GET http://localhost:9200/repository/_search [status:200, request:0.005s, query:0.002s]
# > {"query":{"match":{"text":"test"}}}
# < {"took":2, ... "hits":{"total":1, ... "hits":[{ ... "_source" : {"id":1,"text":"Test"}}]}}
=> 1, "text"=>"Test"}>
-------------------------------------
+```
Delete it:
-[source,ruby]
-------------------------------------
+```ruby
repository.delete(note)
# DELETE http://localhost:9200/repository/_doc/1 [status:200, request:0.014s, query:n/a]
# < {"found":true,"_index":"repository","_type":"note","_id":"1","_version":3}
=> {"found"=>true, "_index"=>"repository", "_type"=>"note", "_id"=>"1", "_version"=>2}
-------------------------------------
+```
+
+The repository module provides a number of features and facilities to configure and customize the behaviour, as well as support for extending your own, custom repository class.
-The repository module provides a number of features and facilities to configure
-and customize the behaviour, as well as support for extending your own, custom
-repository class.
+Please refer to the [documentation](https://github.com/elastic/elasticsearch-rails/tree/master/elasticsearch-persistence#the-repository-pattern) for more information.
-Please refer to the
-https://github.com/elastic/elasticsearch-rails/tree/master/elasticsearch-persistence#the-repository-pattern[documentation]
-for more information.
+Also, check out the [example application](https://github.com/elastic/elasticsearch-rails/tree/master/elasticsearch-persistence#example-application) which demonstrates the usage patterns of the *repository* approach to persistence.
-Also, check out the
-https://github.com/elastic/elasticsearch-rails/tree/master/elasticsearch-persistence#example-application[example application]
-which demonstrates the usage patterns of the _repository_ approach to
-persistence.
diff --git a/docs/reference/ruby_on_rails.md b/docs/reference/ruby_on_rails.md
new file mode 100644
index 0000000000..500fcbb762
--- /dev/null
+++ b/docs/reference/ruby_on_rails.md
@@ -0,0 +1,23 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/ruby_on_rails.html
+---
+
+# Ruby On Rails [ruby_on_rails]
+
+The `elasticsearch-rails` [Rubygem](http://rubygems.org/gems/elasticsearch-rails) provides features suitable for Ruby on Rails applications.
+
+
+## Features [_features_3]
+
+* Rake tasks for importing data from application models
+* Integration with Rails' instrumentation framework
+* Templates for generating example Rails application
+
+
+## Example applications [_example_applications]
+
+You can generate a fully working example Ruby on Rails application with templates provides.
+
+Please refer to the [documentation](https://github.com/elastic/elasticsearch-rails/tree/master/elasticsearch-rails) for more information.
+
diff --git a/docs/reference/toc.yml b/docs/reference/toc.yml
new file mode 100644
index 0000000000..be3fc7f854
--- /dev/null
+++ b/docs/reference/toc.yml
@@ -0,0 +1,24 @@
+toc:
+ - file: index.md
+ - file: getting-started.md
+ - file: installation.md
+ - file: connecting.md
+ - file: configuration.md
+ children:
+ - file: basic-config.md
+ - file: advanced-config.md
+ - file: integrations.md
+ children:
+ - file: transport.md
+ - file: api.md
+ - file: opentelemetry.md
+ - file: ecs.md
+ - file: activemodel_activerecord.md
+ - file: ruby_on_rails.md
+ - file: persistence.md
+ - file: dsl.md
+ - file: examples.md
+ - file: client-helpers.md
+ children:
+ - file: Helpers.md
+ - file: esql.md
\ No newline at end of file
diff --git a/docs/transport.asciidoc b/docs/reference/transport.md
similarity index 57%
rename from docs/transport.asciidoc
rename to docs/reference/transport.md
index b9199b19ed..24cecadde4 100644
--- a/docs/transport.asciidoc
+++ b/docs/reference/transport.md
@@ -1,26 +1,35 @@
-[[transport]]
-=== Transport
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/transport.html
+---
-The `elastic-transport` library provides a low-level Ruby client for connecting to an {es} cluster. It currently powers the https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/index.html[Elasticsearch Ruby] and the https://www.elastic.co/guide/en/enterprise-search-clients/ruby/current/index.html[Enterprise Search Ruby] clients.
+# Transport [transport]
+
+The `elastic-transport` library provides a low-level Ruby client for connecting to an {{es}} cluster. It powers the [Elasticsearch Ruby](/reference/index.md) client.
When available, it handles connecting to multiple nodes in the cluster, rotating across connections, logging and tracing requests and responses, maintaining failed connections, discovering nodes in the cluster, and provides an abstraction for data serialization and transport.
-It does not handle calling the {es} or Enterprise Search APIs.
+It does not handle calling the {{es}} APIs.
-This library uses https://github.com/lostisland/faraday[Faraday] by default as the HTTP transport implementation. We test it with Faraday versions 1.x and 2.x.
+This library uses [Faraday](https://github.com/lostisland/faraday) by default as the HTTP transport implementation. We test it with Faraday versions 1.x and 2.x.
-For optimal performance, use a HTTP library which supports persistent ("keep-alive") connections, such as https://github.com/toland/patron[patron] or https://github.com/typhoeus/typhoeus[Typhoeus]. Require the library (`require 'patron'`) in your code for Faraday 1.x or the adapter (`require 'faraday/patron'`) for Faraday 2.x, and it will be automatically used.
+For optimal performance, use a HTTP library which supports persistent ("keep-alive") connections, such as [patron](https://github.com/toland/patron) or [Typhoeus](https://github.com/typhoeus/typhoeus). Require the library (`require 'patron'`) in your code for Faraday 1.x or the adapter (`require 'faraday/patron'`) for Faraday 2.x, and it will be automatically used.
Currently these libraries are supported:
-* https://github.com/toland/patron[Patron]
-* https://github.com/typhoeus/typhoeus[Typhoeus]
-* https://rubygems.org/gems/httpclient[HTTPClient]
-* https://rubygems.org/gems/net-http-persistent[Net::HTTP::Persistent]
+* [Patron](https://github.com/toland/patron)
+* [Typhoeus](https://github.com/typhoeus/typhoeus)
+* [HTTPClient](https://rubygems.org/gems/httpclient)
+* [Net::HTTP::Persistent](https://rubygems.org/gems/net-http-persistent)
+* [Excon](https://github.com/excon/faraday-excon)
+* [Async::HTTP](https://github.com/socketry/async-http-faraday)
+
+::::{note}
+If using [Typhoeus](https://github.com/typhoeus/typhoeus), v1.4.0 or up is needed, since older versions are not compatible with Faraday 1.0.
+::::
-NOTE: Use https://github.com/typhoeus/typhoeus[Typhoeus] v1.4.0 or up since older versions are not compatible with Faraday 1.0.
-You can customize Faraday and implement your own HTTP transport. For detailed information, see the example configurations and more information <>.
+You can customize Faraday and implement your own HTTP transport. For detailed information, see the example configurations and more information [below](#transport-implementations).
Features overview:
@@ -31,87 +40,74 @@ Features overview:
* Request retries and dead connections handling
* Node reloading (based on cluster state) on errors or on demand
-Refer to <> to read about more configuration options.
+Refer to [Advanced Configuration](/reference/advanced-config.md) to read about more configuration options.
+
-[discrete]
-[[transport-install]]
-==== Installation
+## Installation [transport-install]
-Install the package from https://rubygems.org/[Rubygems]:
+Install the package from [Rubygems](https://rubygems.org/):
-[source,bash]
-----------------------------
+```bash
gem install elastic-transport
-----------------------------
+```
-To use an unreleased version, either add it to your `Gemfile` for
-http://gembundler.com/[Bundler]:
+To use an unreleased version, either add it to your `Gemfile` for [Bundler](http://gembundler.com/):
-[source,bash]
-----------------------------
+```bash
gem 'elastic-transport', git: 'git@github.com:elastic/elastic-transport-ruby.git'
-----------------------------
+```
or install it from a source code checkout:
-[source,bash]
-----------------------------
+```bash
git clone https://github.com/elastic/elastic-transport-ruby.git
cd elastic-transport
bundle install
rake install
-----------------------------
+```
-[discrete]
-[[transport-example-usage]]
-==== Example usage
-In the simplest form, connect to {es} running on http://localhost:9200 without any configuration:
+## Example usage [transport-example-usage]
-[source,rb]
-----------------------------
+In the simplest form, connect to {{es}} running on [http://localhost:9200](http://localhost:9200) without any configuration:
+
+```rb
require 'elastic/transport'
client = Elastic::Transport::Client.new
response = client.perform_request('GET', '_cluster/health')
# => #
-----------------------------
+```
-Documentation is included as RDoc annotations in the source code and available online at http://rubydoc.info/gems/elastic-transport[RubyDoc].
+Documentation is included as RDoc annotations in the source code and available online at [RubyDoc](http://rubydoc.info/gems/elastic-transport).
-[discrete]
-[[transport-implementations]]
-==== Transport implementations
-By default, the client uses the https://rubygems.org/gems/faraday[Faraday] HTTP library as a transport implementation.
+## Transport implementations [transport-implementations]
-The Client auto-detects and uses an _adapter_ for _Faraday_ based on gems loaded in your code, preferring HTTP clients with support for persistent connections. Faraday 2 changed the way adapters are used (https://github.com/lostisland/faraday/blob/main/UPGRADING.md#adapters-have-moved[read more here]). If you're using Faraday 1.x, you can require the HTTP library. To use the https://github.com/toland/patron[_Patron_] HTTP, for example, require it:
+By default, the client uses the [Faraday](https://rubygems.org/gems/faraday) HTTP library as a transport implementation.
+The Client auto-detects and uses an *adapter* for *Faraday* based on gems loaded in your code, preferring HTTP clients with support for persistent connections. Faraday 2 changed the way adapters are used ([read more here](https://github.com/lostisland/faraday/blob/main/UPGRADING.md#adapters-have-moved)). If you’re using Faraday 1.x, you can require the HTTP library. To use the [*Patron*](https://github.com/toland/patron) HTTP, for example, require it:
-To use the https://github.com/toland/patron[Patron] HTTP, for example, require it:
+To use the [Patron](https://github.com/toland/patron) HTTP, for example, require it:
-[source,rb]
-----------------------------
+```rb
require 'patron'
-----------------------------
+```
-If you're using Faraday 2.x, you need to add the corresponding adapter gem to your Gemfile and require it after you require `faraday`:
+If you’re using Faraday 2.x, you need to add the corresponding adapter gem to your Gemfile and require it after you require `faraday`:
-[source,rb]
-----------------------------
+```rb
# Gemfile
gem 'faraday-patron'
# Code
require 'faraday'
require 'faraday/patron'
-----------------------------
-
+```
Then, create a new client, and the Patron gem will be used as the "driver":
-[source,rb]
-----------------------------
+```rb
client = Elastic::Transport::Client.new
client.transport.connections.first.connection.builder.adapter
@@ -127,43 +123,39 @@ end
# => Stiletoo : 24
# => Stiletoo : 24
# => ...
-----------------------------
+```
To use a specific adapter for Faraday, pass it as the `adapter` argument:
-[source,rb]
-----------------------------
+```rb
client = Elastic::Client.new(adapter: :net_http_persistent)
client.transport.connections.first.connection.builder.handlers
# => [Faraday::Adapter::NetHttpPersistent]
-----------------------------
+```
If you see this error:
-[source,rb]
-----------------------------
+```rb
Faraday::Error: :net_http_persistent is not registered on Faraday::Adapter
-----------------------------
-When you're using Faraday 2, you need to require the adapter before instantiating the client:
+```
-[source,rb]
-----------------------------
+When you’re using Faraday 2, you need to require the adapter before instantiating the client:
+
+```rb
> client = Elasticsearch::Client.new(adapter: :net_http_persistent)
Faraday::Error: :net_http_persistent is not registered on Faraday::Adapter
> require 'faraday/net_http_persistent'
=> true
> client = Elasticsearch::Client.new(adapter: :net_http_persistent)
=> # #
-----------------------------
+```
-It's possible to customize the Curb instance by passing a block to the constructor as well (in this case, as an inline block):
+It’s possible to customize the Curb instance by passing a block to the constructor as well (in this case, as an inline block):
-[source,rb]
-----------------------------
+```rb
transport = Elastic::Transport::Transport::HTTP::Curb.new(
hosts: [ { host: 'localhost', port: '9200' } ],
& lambda { |c| c.verbose = true }
)
client = Elastic::Client.new(transport: transport)
-----------------------------
+```
You can write your own transport implementation by including the {Elastic::Transport::Transport::Base} module, implementing the required contract, and passing it to the client as the `transport_class` parameter – or by injecting it directly.
-[discrete]
-[[transport-architecture]]
-==== Transport architecture
+
+## Transport architecture [transport-architecture]
* `Elastic::Transport::Client` is composed of `Elastic::Transport::Transport`.
* `Elastic::Transport::Transport` is composed of `Elastic::Transport::Transport::Connections`, and an instance of logger, tracer, serializer and sniffer.
-* Logger and tracer can be any object conforming to Ruby logging interface, for example, an instance of https://ruby-doc.org/stdlib-1.9.3/libdoc/logger/rdoc/Logger.html[`Logger`], https://rubygems.org/gems/log4r[log4r], https://github.com/TwP/logging/[logging], and so on.
-* The `Elastic::Transport::Transport::Serializer::Base` implementations handle converting data for {es} (for example, to JSON). You can implement your own serializer.
+* Logger and tracer can be any object conforming to Ruby logging interface, for example, an instance of [`Logger`](https://ruby-doc.org/stdlib-1.9.3/libdoc/logger/rdoc/Logger.md), [log4r](https://rubygems.org/gems/log4r), [logging](https://github.com/TwP/logging/), and so on.
+* The `Elastic::Transport::Transport::Serializer::Base` implementations handle converting data for {{es}} (for example, to JSON). You can implement your own serializer.
* `Elastic::Transport::Transport::Sniffer` allows to discover nodes in the cluster and use them as connections.
* `Elastic::Transport::Transport::Connections::Collection` is composed of `Elastic::Transport::Transport::Connections::Connection` instances and a selector instance.
* `Elastic::Transport::Transport::Connections::Connection` contains the connection attributes such as hostname and port, as well as the concrete persistent "session" connected to a specific node.
* The `Elastic::Transport::Transport::Connections::Selector::Base` implementations allow to choose connections from the pool, for example, in a round-robin or random fashion. You can implement your own selector strategy.
* The `Elastic::Transport::Transport::Response` object wraps the Elasticsearch JSON response. It provides `body`, `status`, and `headers` methods but you can treat it as a hash and access the keys directly.
+
diff --git a/docs/release-notes/breaking-changes.md b/docs/release-notes/breaking-changes.md
new file mode 100644
index 0000000000..29746c6949
--- /dev/null
+++ b/docs/release-notes/breaking-changes.md
@@ -0,0 +1,44 @@
+---
+navigation_title: "Elasticsearch Ruby Client"
+---
+
+# Elasticsearch Ruby Client breaking changes [elasticsearch-ruby-client-breaking-changes]
+Before you upgrade, carefully review the Elasticsearch Ruby Client breaking changes and take the necessary steps to mitigate any issues.
+
+To learn how to upgrade, check out .
+
+% ## Next version [elasticsearch-ruby-client-nextversion-breaking-changes]
+% **Release date:** Month day, year
+
+% ::::{dropdown} Title of breaking change
+% Description of the breaking change.
+% For more information, check [PR #](PR link).
+% **Impact**
Impact of the breaking change.
+% **Action**
Steps for mitigating deprecation impact.
+% ::::
+
+## 9.0.0 [elasticsearch-ruby-client-900-breaking-changes]
+
+### Scroll APIs need to send scroll_id in request body
+
+Sending the `scroll_id` as a parameter has been deprecated since version 7.0.0. It needs to be specified in the request body for `clear_scroll` and `scroll`.
+
+**Impact**
+
+Client code using `clear_scroll` or `scroll` APIs and the deprecated `scroll_id` as a parameter needs to be updated.
+
+**Action**
+
+If you are using the `clear_scroll` or `scroll` APIs, and sending the `scroll_id` as a parameter, you need to update your code to send the `scroll_id` as part of the request body:
+```ruby
+# Before:
+client.clear_scroll(scroll_id: scroll_id)
+# Now:
+client.clear_scroll(body: { scroll_id: scroll_id })
+
+# Before:
+client.scroll(scroll_id: scroll_id)
+# Now:
+client.scroll(body: { scroll_id: scroll_id })
+```
+% ::::
diff --git a/docs/release-notes/deprecations.md b/docs/release-notes/deprecations.md
new file mode 100644
index 0000000000..04c3e83766
--- /dev/null
+++ b/docs/release-notes/deprecations.md
@@ -0,0 +1,32 @@
+---
+navigation_title: "Elasticsearch Ruby Client"
+---
+
+# Elasticsearch Ruby Client deprecations [elasticsearch-ruby-client-deprecations]
+Review the deprecated functionality for your Elasticsearch Ruby Client version. While deprecations have no immediate impact, we strongly encourage you update your implementation after you upgrade.
+
+To learn how to upgrade, check out .
+
+## 9.0.0 [elasticsearch-ruby-client-900-deprecations]
+
+_No deprecations_
+
+% ## Next version [elasticsearch-ruby-client-versionnext-deprecations]
+% **Release date:** Month day, year
+
+% ::::{dropdown} Deprecation title
+% Description of the deprecation.
+% For more information, check [PR #](PR link).
+% **Impact**
Impact of deprecation.
+% **Action**
Steps for mitigating deprecation impact.
+% ::::
+
+% ## 9.0.0 [elasticsearch-ruby-client-900-deprecations]
+% **Release date:** March 25, 2025
+
+% ::::{dropdown} Deprecation title
+% Description of the deprecation.
+% For more information, check [PR #](PR link).
+% **Impact**
Impact of deprecation.
+% **Action**
Steps for mitigating deprecation impact.
+% ::::
diff --git a/docs/release-notes/index.md b/docs/release-notes/index.md
new file mode 100644
index 0000000000..949f173eec
--- /dev/null
+++ b/docs/release-notes/index.md
@@ -0,0 +1,56 @@
+---
+navigation_title: "Elasticsearch Ruby Client"
+---
+
+# Elasticsearch Ruby Client release notes [elasticsearch-ruby-client-release-notes]
+
+Review the changes, fixes, and more in each version of Elasticsearch Ruby Client.
+
+To check for security updates, go to [Security announcements for the Elastic stack](https://discuss.elastic.co/c/announcements/security-announcements/31).
+
+% Release notes include only features, enhancements, and fixes. Add breaking changes, deprecations, and known issues to the applicable release notes sections.
+
+% ## version.next [elasticsearch-ruby-client-next-release-notes]
+% **Release date:** Month day, year
+
+% ### Features and enhancements [elasticsearch-ruby-client-next-features-enhancements]
+% *
+
+% ### Fixes [elasticsearch-ruby-client-next-fixes]
+% *
+
+## 9.0.0 [elasticsearch-ruby-client-900-release-notes]
+**Release date:** March 25, 2025
+
+### Features and enhancements [elasticsearch-ruby-client-900-features-enhancements]
+
+Ruby 3.2 and up are tested and supported for 9.0. Older versions of Ruby have reached their end of life. We follow Ruby’s own maintenance policy and officially support all currently maintained versions per [Ruby Maintenance Branches](https://www.ruby-lang.org/en/downloads/branches/). The required Ruby version is set to `2.6` to keep compatiblity wit JRuby 9.3. However, we only test the code against currently supported Ruby versions.
+
+#### Gem
+
+The size of both `elasticsearch` and `elasticsearch-api` gems is smaller than in previous versions. Some unnecessary files that were being included in the gem have now been removed. There has also been a lot of old code cleanup for the `9.x` branch.
+
+#### Elasticsearch Serverless
+
+With the release of `9.0`, the [Elasticsearch Serverless](https://github.com/elastic/elasticsearch-serverless-ruby) client has been discontinued. You can use this client to build your Elasticsearch Serverless Ruby applications. The Elasticsearch Serverless API is fully supported. The CI build for Elasticsearch Ruby runs tests to ensure compatibility with Elasticsearch Serverless.
+
+#### Elasticsearch API
+
+* The source code is now generated from [`elasticsearch-specification`](https://github.com/elastic/elasticsearch-specification/), so the API documentation is much more detailed and extensive. The value `Elasticsearch::ES_SPECIFICATION_COMMIT` is updated with the commit hash of elasticsearch-specification in which the code is based every time it's generated.
+* The API code has been updated for compatibility with Elasticsearch API v 9.0.
+* `indices.get_field_mapping` - `:fields` is a required parameter.
+* `knn_search` - This API has been removed. It was only ever experimental and was deprecated in v`8.4`. It isn't supported in 9.0, and only works when the header `compatible-with=8` is set. The search API should be used for all knn queries.
+* The functions in `utils.rb` that had names starting with double underscore have been renamed to remove these (e.g. `__listify` to `listify`).
+* **Namespaces clean up**: The API namespaces are now generated dynamically based on the elasticsearch-specification. As such, some deprecated namespace files have been removed from the codebase:
+ * The `rollup` namespace was removed. The rollup feature was never GA-ed, it has been deprecated since `8.11.0` in favor of downsampling.
+ * The `data_frame_deprecated`, `remote` namespace files have been removed, no APIs were available.
+ * The `shutdown` namespace was removed. It is designed for indirect use by ECE/ESS and ECK. Direct use is not supported.
+
+##### Testing
+
+The gem `elasticsearch-api` migrated away from the Elasticsearch REST API tests and test runner in CI. We now run the [Elasticsearch Client tests](https://github.com/elastic/elasticsearch-clients-tests/) with the [Elasticsearch Tests Runner](https://github.com/elastic/es-test-runner-ruby). This gives us more control on what we're testing and makes the Buildkite build way faster in Pull Requests and scheduled builds.
+
+### Fixes [elasticsearch-ruby-client-900-fixes]
+
+* Some old rake tasks that were not being used have been removed. The rest were streamlined, the `es` namespace has been streamlined to make it easier to run Elasticsearch with Docker during development. The `docker` task namespace was merged into `es`.
+* Elasticsearch's REST API Spec tests can still be ran with `rake test:deprecated:rest_api` and setting the corresponding value for the environment variable `TEST_SUITE` ('platinum' or 'free').
diff --git a/docs/release-notes/known-issues.md b/docs/release-notes/known-issues.md
new file mode 100644
index 0000000000..5784c8adc7
--- /dev/null
+++ b/docs/release-notes/known-issues.md
@@ -0,0 +1,24 @@
+---
+navigation_title: "Elasticsearch Ruby Client"
+
+---
+
+# Elasticsearch Ruby Client known issues [elasticsearch-ruby-client-known-issues]
+
+## 9.0.0
+
+_No known issues_
+
+% Use the following template to add entries to this page.
+
+% :::{dropdown} Title of known issue
+% **Details**
+% On [Month/Day/Year], a known issue was discovered that [description of known issue].
+
+% **Workaround**
+% Workaround description.
+
+% **Resolved**
+% On [Month/Day/Year], this issue was resolved.
+
+% :::
diff --git a/docs/release-notes/toc.yml b/docs/release-notes/toc.yml
new file mode 100644
index 0000000000..a410067947
--- /dev/null
+++ b/docs/release-notes/toc.yml
@@ -0,0 +1,5 @@
+toc:
+ - file: index.md
+ - file: known-issues.md
+ - file: breaking-changes.md
+ - file: deprecations.md
\ No newline at end of file
diff --git a/docs/release_notes/70.asciidoc b/docs/release_notes/70.asciidoc
deleted file mode 100644
index d620366ed9..0000000000
--- a/docs/release_notes/70.asciidoc
+++ /dev/null
@@ -1,195 +0,0 @@
-[[release_notes_70]]
-=== 7.0 Release notes
-
-This version contains the following changes:
-
-* Added `elastic_ruby_console` executable. It opens a console with the elasticsearch gems you have installed required.
-* Added macro benchmarking framework, available when developing. Use `rake -T` to view all available benchmarking tasks.
-
-
-[discrete]
-==== Client
-
-* Fixed failing integration test
-* Updated the Manticore development dependency
-* Fixed a failing Manticore unit test
-* Removed "turn" and switched the tests to Minitest
-* Fixed integration tests for Patron
-* Allow passing request headers in `perform_request`
-* Added integration test for passing request headers in `perform_request`
-* Added, that request headers are printed in trace output, if set
-* Fix typos in elasticsearch-transport/README.md
-* Assert that connection count is at least previous count when reloaded
-* Adjust test for change in default number of shards on ES 7
-* Abstract logging functionality into a Loggable Module (#556)
-* Convert client integration tests to rspec
-* Add flexible configuration in spec helper
-* Use helper methods in spec_helper
-* Remove minitest client integration tests in favor of rspec test
-* Convert tests to rspec and refactor client
-* minor changes to the client specs
-* Use pry-nav in development for JRuby
-* Keep arguments variable name for now
-* Skip round-robin test for now
-* Mark test as pending until there is a better way to detect rotating nodes
-* Remove client unit test in favor of rspec test
-* Comment-out round-robin test as it occasionally passes and pending is ineffective
-* Document the default host and port constant
-* Add documentation to spec_helper methods
-* Redacted password if host info is printed in error message
-* Adds tests for not including password in logged error message
-* The redacted string change will be in 6.1.1
-* Add more tests for different ways to specify client host argument
-* Do not duplicate connections in connection pool after rebuild (#591)
-* Ensure that the spec rake task is run as part of integration tests
-* Use constant to define Elasticsearch hosts and avoid yellow status when number of nodes is 1
-* Update handling of publish_address in _nodes/http response
-* Add another test for hostname/ipv6:port format
-
-
-[discrete]
-==== API
-
-* Added the `wait_for_active_shards` parameter to the "Indices Open" API
-* Added the "Indices Split" API
-* Added the `wait_for_no_initializing_shards` argument to the "Cluster Health" API
-* Added the "Cluster Remote Info" API
-* Remove the dependency on "turn"
-* Clear cluster transient settings in test setups
-* Use `YAML.load_documents` in the REST tests runner
-* Removed pinning dependency for Minitest
-* Replaced the testing framework from Test::Unit to Minites and improved test output
-* Added, that trace logs are printed when the `TRACE` environment variable is set
-* Removed the "turn" dependency from generated test_helper.rb
-* Update the "Delete By Query" API to support :slices
-* Speed up `Elasticsearch::API::Utils.__listify`
-* Speed up `Elasticsearch::API::Utils.__pathify`
-* Use "String#strip" and "String.empty?" in `Utils.__pathify`
-* Updated the inline documentation for using scripts in the "Update" API
-* Updated the "Scroll" API inline example with passing the scroll ID in the body
-* Marked the `percolate` method as deprecated and added an example for current percolator
-* Fixed, that `Utils.__report_unsupported_parameters` and `Utils.__report_unsupported_method` use `Kernel.warn` so they can be suppressed
-* Fixed the "greedy" regex in the `Utils.__rescue_from_not_found` method
-* Fixed the incorrect `create` method
-* Allow passing headers in `perform_request`
-* Set application/x-ndjson content type on Bulk and Msearch requests
-* Update the Reindex API to support :slices
-* Fixed and improved the YAML tests runner
-* Added the `include_type_name` parameter to APIs
-* Fixed the helper for unit tests
-* Removed the requirement for passing the `type` parameter to APIs
-* Removed dead code from the YAML tests runner
-* Fixed the `api:code:generate` Thor task
-* Add copy_settings as valid param to split API
-* Port api/actions tests to rspec (#543)
-* Update tests to not require type
-* Account for escape_utils not being available for JRuby
-* Add nodes/reload_secure_settings endpoint support (#546)
-* Add new params for search and msearch API
-* Retrieve stashed variable if referenced in test
-* Convert cat API tests to rspec
-* Convert cluster API tests to rspec
-* Convert indices tests to rspec
-* Fix documentation of #indices.analyze
-* Avoid instantiating an array of valid params for each request, each time it is called (#550)
-* Add headers to custom client documentation (#527)
-* Fix typos in README
-* Minor update to scroll documentation example
-* Convert snapshot, ingest, tasks, nodes api tests to rspec
-* Update source_includes and source_excludes params names for mget
-* Update source_includes and source_excludes params names for get, search, bulk, explain
-* Update source_includes and source_excludes params names for get_source
-* Mark _search endpoint as deprecated
-* Link to 6.0 documentation explicitly for _suggest deprecation
-* Update documentation for msearch
-* Update documentation for scroll_id to be in body of scroll endpoint
-* Remove reference to deprecated format option for _analyze endpoint
-* Correct endpoints used for get and put search template
-* Fix minor typo
-* Note that a non-empty body argument is required for the bulk api
-* Add note about empty body in yard documentation
-* Support if_primary_term param on index API
-* Delete test2 template in between tests in case a test is not cleanup up properly
-* Support ignore_throttled option on search API
-* Updates for types removal changes
-* Add missing update param
-* Add missing params to methods
-* Support if_primary_term param for delete
-* Delete an index and index template not cleaned up after in rest api tests
-* Update supported params for cat API endpoints
-* Update supported params for cluster API endpoints
-* Update supported params for indices API endpoints
-* Update supported params for ingest API endpoints
-* Update supported params for nodes API endpoints
-* Update supported params for snapshot API endpoints
-* Update missed node API endpoints
-* Update missed tasks API endpoints
-* Update top-level api endpoints
-* Adjust specs and code after test failures
-* Fix accidental overwrite of index code
-* Add missing param in cat/thread_pool
-* The type argument is not required in the index method
-* Delete 'nomatch' template to account for lack of test cleanup
-* Ensure that the :index param is supported for cat.segments
-* Ensure that the :name param is passed to the templates API
-
-[discrete]
-==== DSL
-
-* Add inner_hits option support for has_parent query
-* Add inner_hits option support for has_child query
-* Add inner_hits option support for has_parent filter
-* Add inner_hits option support for has_child filter
-* adds query support for nested queries in filter context (#531)
-* Convert aggregations/pipeline tests to rspec (#564)
-* Convert aggregations tests to rspec (#566)
-* Convert filters tests to rspec (#567)
-* Fix bug in applying no_match_filter to indices filter
-* Update test for current elasticsearch version
-* Fix integration tests for join field syntax
-* Update agg scripted metric test for deprecation in ES issue #29328
-* Fix script in update for #29328
-* minor: fix spacing
-* Convert queries tests to rspec (#569)
-* Add inner_hits test after cherry-picking rspec conversion
-* Remove tests already converted to rspec
-* spec directory structure should mirror code directory structure
-* Support query_string type option
-* Ensure that filters are registered when called on bool queries (#609)
-* Don't specify a type when creating mappings in tests
-
-
-[discrete]
-==== X-Pack
-
-* Embedded the source code for the `elasticsearch-xpack` Rubygem
-* Fixed the `setup` for YAML integration tests
-* Added missing X-Pack APIs
-* Improved the YAML integration test runner
-* Updated the Rakefile for running integration tests
-* Added, that password for Elasticsearch is generated
-* Fixed the Watcher example
-* Updated the README
-* Added gitignore for the `elasticsearch-xpack` Rubygem
-* Add ruby-prof as a development dependency
-* Handle multiple roles passed to get_role_mapping
-* Minor updates to xpack api methods (#586)
-* Support freeze and unfreeze APIs
-* Rewrite xpack rest api yaml test handler (#585)
-* Updates to take into account SSL settings
-* Fix mistake in testing version range so test can be skipped
-* Support set_upgrade_mode machine learning API
-* Support typed_keys and rest_total_hits_as_int params for rollup_search
-* Improve string output for xpack rest api tests
-* Fix logic in version checking
-* Support if_seq_no and if_primary_term in put_watch
-* Don't test execute_watch/60_http_input because of possible Docker issue
-* Support api key methods
-* Fix minor typo in test description
-* Fix issue with replacing argument value with an Integer value
-* Support transform_and_set in yaml tests
-* Skip two more tests
-* Run security tests against elasticsearch 7.0.0-rc2
-* Account for error when forecast_id is not provided and legacy path is used
-* Blacklist specific tests, not the whole file
-* Fix version check for skipping test
\ No newline at end of file
diff --git a/docs/release_notes/710.asciidoc b/docs/release_notes/710.asciidoc
deleted file mode 100644
index 9751bf2026..0000000000
--- a/docs/release_notes/710.asciidoc
+++ /dev/null
@@ -1,62 +0,0 @@
-[[release_notes_710]]
-=== 7.10 Release notes
-
-[discrete]
-[[release_notes_7101]]
-=== 7.10.1 Release notes
-[discrete]
-==== Client
-
-- Updates for connecting with Cloud.
-
-[discrete]
-[[release_notes_7100]]
-=== 7.10 Release notes
-
-[discrete]
-==== Client
-
-- Support for Elasticsearch version `7.10.0`.
-- Fixes a bug when building the complete endpoint URL could end with duplicate slashes `//`.
-- Fixes a bug when building the complete endpoint URL with cloud id could end with duplicate ports https://github.com/elastic/elasticsearch-ruby/issues/1081[#1081].
-
-[discrete]
-==== API
-
-- Fix in RubyDoc comments, some parameters were being duplicated.
-- Deprecation notice: Synced flush (`indices.flush_synced`) is deprecated and will be removed in 8.0. Use flush instead.
-
-[discrete]
-===== New API Endpoints
-
-- `snapshot.clone`
-
-
-[discrete]
-===== API Changes
-
-- `bulk`, `index`, `update`: new parameter `require_alias` (boolean): When true, requires destination to be an alias (default: false) for `index` and `update`. For `bulk` it sets `require_alias` for all incoming documents. Defaults to unset (false).
-
-
-[discrete]
-==== X-Pack
-
-Deprecation notice: `searchable_snapshots.repository_stats` is deprecated and is replaced by the Repositories Metering API.
-
-[discrete]
-===== New API Endpoints
-
-- `close_point_in_time`
-- `open_point_in_time`
-- `security.clear_api_key_cache`
-- `security.grant_api_key`
-
-[discrete]
-===== API Changes
-
-- `cat.ml_datafeeds`, `cat.ml_jobs`, `machine_learning.close_job`, `machine_learning.get_datafeed_stats`, `machine_learning.get_datafeeds`, `machine_learning.get_job_stats`, `machine_learning.get_jobs`, `machine_learning.get_overall_buckets`, `machine_learning.stop_datafeed`: new parameter `allow_no_match` (boolean): Whether to ignore if a wildcard expression matches no datafeeds (this includes `_all` string or when no datafeeds have been specified).
--`machine_learning.get_data_frame_analytics`: new parameter `verbose` (boolean), whether the stats response should be verbose.
-- `machine_learning.get_trained_models`: new parameter `include` (string), a comma-separate list of fields to optionally include. Valid options are 'definition' and 'total_feature_importance'. Default is none.
-- `machine_learning.stop_datafeed`: endpoint now accepts a `body`: the URL params optionally sent in the body.
-- `security.get_role`, `security/get_role_mapping`: The name parameter is now a comma-separated list of role-mapping names.
-- `machine_learning.delete_trained_model`, `machine_learning.get_trained_models`, `machine_learning.get_trained_models_stats`, `machine_learning.put_trained_model`: Internal change, url changed from `_ml/inference` to `_ml/trained_models`.
diff --git a/docs/release_notes/711.asciidoc b/docs/release_notes/711.asciidoc
deleted file mode 100644
index 9df6f1ba1f..0000000000
--- a/docs/release_notes/711.asciidoc
+++ /dev/null
@@ -1,49 +0,0 @@
-[[release_notes_711]]
-=== 7.11 Release notes
-
-[discrete]
-[[release_notes_7112]]
-=== 7.11.2 Release notes
-
-[discrete]
-==== Client
-
-* Bug fix in meta header, fixes fail when http adapter library hasn't been loaded yet: https://github.com/elastic/elasticsearch-ruby/issues/1224[Issue].
-
-[discrete]
-[[release_notes_7111]]
-=== 7.11.1 Release notes
-
-[discrete]
-==== Client
-
-* Bug fix in meta header, adds support for unknown Faraday adapters. https://github.com/elastic/elasticsearch-ruby/pull/1204[Pull Request].
-
-[discrete]
-[[release_notes_7110]]
-=== 7.11.0 Release notes
-
-[discrete]
-==== Client
-
-- Support for Elasticsearch version `7.11.0`.
-- Fixes a bug with headers in our default Faraday class. https://github.com/elastic/elasticsearch-ruby/commit/9c4afc452467cc6344359b54b98bbe5af1469219[Commit].
-- Adds the `X-Elastic-Client-Meta` HTTP header which is used by Elastic Cloud and can be disabled with the `enable_meta_header` parameter set to `false`.
-
-
-[discrete]
-==== API
-
-- `cat.tasks` - Parameter `node_id` changes name to `nodes`, a comma-separated list of node IDS or names. Parameter `parent_task` changes name to `parent_task_id`.
-- APIs that are no longer experimental: `cluster.delete_component_template`, `cluster.exists_component_template`, `cluster.get_component_template`, `cluster.put_component_template`, `indices.delete_index_template`, `indices.exists_index_template`, `indices.get_index_template`, `indices.put_index_template`, `indices.simulate_index_template`, `indices.simulate_template`.
-- Deprecation notice: The _upgrade API is no longer useful and will be removed. Instead, see `_reindex API`. Deprecated since version 8.0.0. Endpoints: `indices.get_upgrade`, `indices.upgrade`
-
-[discrete]
-==== X-Pack
-
-- New endpoints:`async_search.status`, `autoscaling.get_autoscaling_capacity` (experimental), `indices.migrate_to_data_stream`, `indices.promote_data_stream`, `machine_learning.upgrade_job_snapshot`, `rollup.rollup`, `watcher.query_watches`.
-- APIs that are no longer experimental: `eql.delete`, `eql.get`, `eql.search`,
-- APIs promoted from experimental to beta: `machine_learning.delete_data_frame_analytics`, `ml.delete_trained_model`, `machine_learning.evaluate_data_frame`, `machine_learning.explain_data_frame_analytics`, `machine_learning.get_data_frame_analytics`, `machine_learning.get_datafeed_stats`, `machine_learning.get_trained_models`, `machine_learning.get_trained_models_stats`, `machine_learning.put_data_frame_analytics`, `machine_learning.put_trained_model`, `machine_learning.start_data_frame_analytics`, `machine_learning.stop_data_frame_analytics`, `machine_learning.update_data_frame_analytics`
-- `indices.delete_data_stream`, `indices.get_data_stream` add parameter `expand_wildcards`, wether wildcard expressions should get expanded to open or closed indices (default: open). Options: open, closed, hidden, none, all.
-- `machine_learning.get_data_frame_analytics`, `machine_learning.get_datafeeds`, `machine_learning.get_jobs`, `machine_learning.get_trained_models`, `transform.get_transform` add parameter `exclude_generated` - omits fields that are illegal to set on PUT.
-- `data_frame_transform_deprecated.get_transform` (_data_frame/transforms/ is deprecated, use _transform/ in the future) adds parameter `exclude_generated` - omits generated files.
diff --git a/docs/release_notes/712.asciidoc b/docs/release_notes/712.asciidoc
deleted file mode 100644
index c23f8c560c..0000000000
--- a/docs/release_notes/712.asciidoc
+++ /dev/null
@@ -1,28 +0,0 @@
-[[release_notes_712]]
-=== 7.12 Release notes
-
-[discrete]
-==== Client
-
-- Support for Elasticsearch version 7.12.0
-- Ruby 3 is now tested, it was added to the entire test suite.
-- New official documentation pages for configuration: https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/basic-config.html[Basic Configuration] and https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/advanced-config.html[Advanced Configuration].
-- Integration tests runner refactored to keep skipped tests in a yaml file.
-
-[discrete]
-==== API
-
-- New API namespace: `features` and endpoints `features.get_features` and `snapshot.get_features`.
-- `cat.plugins` adds parameter `include_bootstrap`: Include bootstrap plugins in the response.
-- Update in `indices.close` parameter `wait_for_active_shards`: Sets the number of active shards to wait for before the operation returns. Set to `index-setting` to wait according to the index setting `index.write.wait_for_active_shards`, or `all` to wait for all shards, or an integer. Defaults to `0`.
-- `actions.search` adds parameter `min_compatible_shard_node`: The minimum compatible version that all shards involved in search should have for this request to be successful.
-
-[discrete]
-==== X-Pack
-
-- New API namespace: `text_structure` and endpoints `text_structure.find_structure`.
-- New API namespace: `logstash` and endpoints `logstash.delete_pipeline`, `logstash.get_pipeline`, `logstash.put_pipeline`.
-- New API: `eql.get_status`.
-- APIs migrated from experimental to stable: `autoscaling.delete_autoscaling_policy`, `autoscaling.get_autoscaling_capacity`, `autoscaling.get_autoscaling_policy`, `autoscaling.put_autoscaling_policy`.
-- `searchable_snapshots.mount` adds parameter `storage`: Selects the kind of local storage used to accelerate searches. Experimental, and defaults to `full_copy`.
-- `searchable_snapshots.stats` adds parameter `level`: Return stats aggregated at cluster, index or shard level (options: cluster, indices, shards).
diff --git a/docs/release_notes/713.asciidoc b/docs/release_notes/713.asciidoc
deleted file mode 100644
index 47594bdd12..0000000000
--- a/docs/release_notes/713.asciidoc
+++ /dev/null
@@ -1,53 +0,0 @@
-[[release_notes_713]]
-=== 7.13 Release notes
-
-[discrete]
-[[release_notes_7133]]
-=== 7.13.3 Release notes
-
-- API Support for Elasticsearch version 7.13.3
-
-[discrete]
-[[release_notes_7132]]
-=== 7.13.2 Release notes
-
-- Mute release, yanked from RubyGems.
-
-[discrete]
-[[release_notes_7131]]
-=== 7.13.1 Release notes
-
-[discrete]
-==== Client
-- Fixes thread safety issue in `get_connection` - https://github.com/elastic/elasticsearch-ruby/pull/1325[Pull Request].
-
-[discrete]
-[[release_notes_7130]]
-=== 7.13.0 Release notes
-
-[discrete]
-==== Client
-
-- Support for Elasticsearch version 7.13.0
-- Adds support for compatibility header for Elasticsearch. If the environment variable 'ELASTIC_CLIENT_APIVERSIONING' is set to `true` or `1`, the client will send the headers `Accept` and `Content-Type` with the following value: `application/vnd.elasticsearch+json;compatible-with=7`.
-- Better detection of Elasticsearch and Enterprise Search clients in the meta header used by cloud.
-
-[discrete]
-==== API
-
-- The REST API tests now use an artifact downloaded from the Elastic servers instead of depending of cloning `elasticsearch` locally. Check the README for more information.
-- New parameter `include_unloaded_segments` in `cat.nodes`, `nodes.stats`: If set to true segment stats will include stats for segments that are not currently loaded into memory
-- New parameter `summary` in `ingest.get_pipeline`: Return pipelines without their definitions (default: false)
-- New parameter `index_details` in `snapshot.get`: Whether to include details of each index in the snapshot, if those details are available. Defaults to false.
-- New endpoint `features.reset_features`, `ingest/geo_ip_stats`
-- New experimental endpoints: `shutdown.delete_node`, `shutdown.get_node`, `shutdown.put_node`.
-
-[discrete]
-==== X-Pack
-
-- Refactored test tasks, made it easier to run the tests by default.
-- New experimental endpoints: `fleet.global_checkpoints`, `searchable_snapshots.cache_stats`.
-- New beta endpoints: `security.clear_cached_service_tokens`, `security.create_service_token`, `security.delete_service_token`, `security.get_service_accounts`, `security.get_service_credentials`
-- New endpoints: `machine_learning.delete_trained_model_alias`, `machine_learning.preview_data_frame_analytics`, `machine_learning.put_trained_model_alias`.
-- APIs migrated from experimental or beta to stable: `machine_learning.delete_data_frame_analytics`, `machine_learning.delete_trained_model`, `machine_learning.estimate_model_memory`, `machine_learning.explain_data_frame_analytics`, `machine_learning.get_data_frame_analytics`, `machine_learning.get_data_frame_analytics_stats`, `machine_learning.get_trained_models`, `machine_learning.get_trained_models_stats`, `machine_learning.put_data_frame_analytics`, `machine_learning.put_trained_model`, `machine_learning.start_data_frame_analytics`, `machine_learning.stop_data_frame_analytics`, `machine_learning.update_data_frame_analytics`
-- New parameter `body` in `machine_learning.preview_datafeed`: The datafeed config and job config with which to execute the preview.
diff --git a/docs/release_notes/714.asciidoc b/docs/release_notes/714.asciidoc
deleted file mode 100644
index 5cdf6092cd..0000000000
--- a/docs/release_notes/714.asciidoc
+++ /dev/null
@@ -1,93 +0,0 @@
-[[release_notes_714]]
-=== 7.14 Release notes
-
-[discrete]
-[[release_notes_7141]]
-=== 7.14.1 Release notes
-
-[discrete]
-==== Client
-
- - Fixes for Manticore Implementation: Addresses custom headers on initialization (https://github.com/elastic/elasticsearch-ruby/commit/3732dd4f6de75365460fa99c1cd89668b107ef1c[3732dd4]) and fixes tracing (https://github.com/elastic/elasticsearch-ruby/commit/3c48ebd9a783988d1f71bfb9940459832ccd63e4[3c48ebd]). Related to https://github.com/elastic/elasticsearch-ruby/issues/1426[#1426] and https://github.com/elastic/elasticsearch-ruby/issues/1428[#1428].
-
-[discrete]
-[[release_notes_7140]]
-=== 7.14.0 Release notes
-
-[discrete]
-==== Client
-
-Added check that client is connected to an Elasticsearch cluster. If the client isn't connected to a supported Elasticsearch cluster the `UnsupportedProductError` exception will be raised.
-
-This release changes the way in which the transport layer and the client interact. Previously, when using `elasticsearch-transport`, `Elasticsearch::Transport::Client` had a convenient wrapper, so it could be used as `Elasticsearch::Client`. Now, we are decoupling the transport layer from the Elasticsearch client. If you're using the `elasticsearch` gem, not much will change. It will instantiate a new `Elasticsearch::Transport::Client` when you instantiate `Elasticsearch::Client` and the endpoints from `elasticsearch-api` will be available.
-
-`Elasticsearch::Client` has an `attr_accessor` for the transport instance:
-
-[source,ruby]
-------------------------------------
-> client = Elasticsearch::Client.new
-> client.transport.class
-=> Elasticsearch::Transport::Client
-> client.transport.transport.class
-=> Elasticsearch::Transport::Transport::HTTP::Faraday
-------------------------------------
-
-The interaction with `elasticsearch-api` remains unchanged. You can use the API endpoints just like before:
-
-[source,ruby]
-------------------------------------
-> client.info
-=> {"name"=>"instance",
- "cluster_name"=>"elasticsearch",
- "cluster_uuid"=>"id",
- "version"=>
- {"number"=>"7.14.0",
- ...
-},
- "tagline"=>"You Know, for Search"}
-------------------------------------
-
-Or perform request directly from the client which will return an `Elasticsearch::Transport::Response` object:
-
-[source,ruby]
-------------------------------------
-> client.perform_request('GET', '/')
-# This is the same as doing client.transport.perform_request('GET', '/')
-=> #"instance",
- "cluster_name"=>"elasticsearch",
- "cluster_uuid"=>"id",
- "version"=>
- {"number"=>"7.14.0-SNAPSHOT",
- ...
- },
- "tagline"=>"You Know, for Search"},
- @headers=
- {"content-type"=>"application/json; charset=UTF-8",
- "content-length"=>"571",
- ...
- },
- @status=200>
-------------------------------------
-
-If you have any problems, please report them in https://github.com/elastic/elasticsearch-ruby/issues/1344[this issue].
-
-[discrete]
-==== API
-
-Code is now generated from Elastic artifacts instead of checked out code of Elasticsearch. See https://github.com/elastic/elasticsearch-ruby/blob/7.14/elasticsearch-api/utils/README.md#generate[the Generator README] for more info.
-
-- Endpoints `msearch`, `msearch_template` and `search_template` remove `query_and_fetch` and `dfs_query_and_fetch` options from the `search_type` parameter.
-- New parameter `include_repository` in `snapshot.get`: (boolean) Whether to include the repository name in the snapshot info. Defaults to true.
-
-[discrete]
-==== X-Pack
-
-X-Pack is being deprecated. The first time using `xpack` on the client, a warning will be triggered. Please check https://github.com/elastic/elasticsearch-ruby/issues/1274[this issue] for more information.
-
-
-- New endpoints: `index_lifecycle_management.migrate_to_data_tiers`, `machine_learning.reset_job`, `security.saml_authenticate`, `security.saml_complete_logout`, `security.saml_invalidate`, `security.saml_logout`, `security.saml_prepare_authentication`, `security.saml_service_provider_metadata`, `sql.delete_async`, `sql.get_async`, `sql.get_async_status`, `terms_enum`.
-- New experimental endpoints: `machine_learning.infer_trained_model_deployment`, `machine_learning.start_trained_model_deployment`, `machine_learning.stop_trained_model_deployment`.
-- Deprecation: `indices.freeze` and `indices.unfreeze`: Frozen indices are deprecated because they provide no benefit given improvements in heap memory utilization. They will be removed in a future release.
-
diff --git a/docs/release_notes/715.asciidoc b/docs/release_notes/715.asciidoc
deleted file mode 100644
index 5962394afe..0000000000
--- a/docs/release_notes/715.asciidoc
+++ /dev/null
@@ -1,20 +0,0 @@
-[[release_notes_715]]
-=== 7.15 Release notes
-
-[discrete]
-==== Client
-
-- Support for Elasticsearch v7.15.0 APIs.
-- We've tested and added documentation on best practices for leveraging the client in a Function-as-a-Service (FaaS) environment to the https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/connecting.html#client-faas[official docs].
-
-[discrete]
-==== API
-- New experimental endpoints: `indices.disk_usage`. `indices.field_usage_stats`, `nodes.clear_repositories_metering_archive`, `get_repositories_metering_info`, https://www.elastic.co/guide/en/elasticsearch/reference/7.15/search-vector-tile-api.html[`search_mvt`]
-- The `index` parameter is now required for `open_point_in_time`.
-- The `index_metric` parameter in `nodes.stats` adds the `shards` option.
-
-[discrete]
-==== X-Pack
-
-- New parameters for `ml.put_job`: `ignore_unavailable`, `allow_no_indices`, `ignore_throttled`, `expand_wildcards`.
-- New endpoint: https://www.elastic.co/guide/en/elasticsearch/reference/7.15/security-api-query-api-key.html[`security.query_api_keys`].
diff --git a/docs/release_notes/716.asciidoc b/docs/release_notes/716.asciidoc
deleted file mode 100644
index 11c1b68ad7..0000000000
--- a/docs/release_notes/716.asciidoc
+++ /dev/null
@@ -1,96 +0,0 @@
-[[release_notes_716]]
-=== 7.16 Release notes
-
-[discrete]
-[[release_notes_7163]]
-=== 7.16.3 Release notes
-
-==== API
-
-Bugfix for https://github.com/elastic/elasticsearch-ruby/issues/1475[#1475], an issue where if you indexed a document with an id such as `an id`, it would get escaped to `an+id` instead of `an%20id` when using `index` or `create`. This would result in the document id being `an+id` instead of the intended value `an id`.
-
-[discrete]
-[[release_notes_7162]]
-=== 7.16.2 Release notes
-
-No release.
-
-[discrete]
-[[release_notes_7161]]
-=== 7.16.1 Release notes
-
-Patch release corresponding with Elastic Stack version 7.16.1 that addresses the Apache Log4j2 vulnerability, https://discuss.elastic.co/t/apache-log4j2-remote-code-execution-rce-vulnerability-cve-2021-44228-esa-2021-31/291476[more information].
-
-==== Client
-
-The only changes in the client since 7.16.0 are a few minor updates for the https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current/connecting.html#client-comp[Compatibility mode with 8.0]. We added the compatibility header in `7.13.0`, but now we have integration tests and compatibility tests for version `7.x` of the client with Elasticsearch `8.0`.
-
-[discrete]
-[[release_notes_7160]]
-=== 7.16.0 Release notes
-
-[discrete]
-==== Client
-
-- Adds the `delay_on_retry` parameter, a value in milliseconds to wait between each failed connection, thanks https://github.com/DinoPullerUqido[DinoPullerUqido]! https://github.com/elastic/elasticsearch-ruby/pull/1521[Pull Request] and https://github.com/elastic/elasticsearch-ruby/pull/1523[backport].
-- Adds *CA fingerprinting*. You can configure the client to only trust certificates that are signed by a specific CA certificate (CA certificate pinning) by providing a `ca_fingerprint` option. This will verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied value. The verification will be run once per connection. Code example:
-
-[source,ruby]
-------------------------------------
-ca_fingerprint = '64F2593F...'
-client = Elasticsearch::Client.new(
- host: 'https://elastic:changeme@localhost:9200/',
- transport_options: { ssl: { verify: false } },
- ca_fingerprint: ca_fingerprint
-)
-------------------------------------
-
-- Fixes compression. When `compression` is set to `true`, the client will now gzip the request body properly and use the appropiate headers. Thanks https://github.com/johnnyshields[johnnyshields]! https://github.com/elastic/elasticsearch-ruby/pull/1478[Pull Request] and https://github.com/elastic/elasticsearch-ruby/pull/1526[backport].
-- Warnings emitted by Elasticsearch are now logged via `log_warn` through the Loggable interface in the client, instead of using `Kernel.warn`. https://github.com/elastic/elasticsearch-ruby/pull/1517[Pull Request].
-
-[discrete]
-==== API
-
-- Cleaned up some deprecated code.
-- `count` - The API is documented as using `GET`, but it supports both GET and POST on the Elasticsearch side. So it was updated to only use `POST` when there's a body present, or else use `GET`. Elasticsearch would still accept a body with `GET`, but to be more semantically correct in the clients we use `POST` when there's a body.
-- `delete_index_template` was updated to support the `ignore_404` parameter to ignore 404 errors when attempting to delete a non-existing template.
-- `ingest.put_pipeline` adds new parameter `if_version`: Required version for optimistic concurrency control for pipeline updates.
-- `ml.put_trained_model`: adds new parameter `defer_definition_decompression`: If set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations.
-- `nodes.hot_threads` adds new parameter `sort`: The sort order for 'cpu' type (default: total) (options: cpu, total).
-- `open_point_in_time`: `keep_alive` is now a required parameter.
-- `search_mvt`: adds new parameter `track_total_hits`: Indicate if the number of documents that match the query should be tracked. A number can also be specified, to accurately track the total hit count up to the number.
-- `transform.preview_transform`: adds new parameter `transform_id`. Body is now optional and the API will use `GET` or `POST` depending on the presence of a body.
-
-*APIs promoted from experimental to stable since last version:*
-
-- `fleet.global_checkpoints`
-- `get_script_context`
-- `get_script_language`
-- `indices.resolve_index`
-- `monitoring.bulk`
-- `rank_eval`
-- `searchable_snapshots.mount`
-- `searchable_snapshots.stats`
-- `security.clear_cached_service_tokens`
-- `security.create_service_token`
-- `security.delete_service_token`
-- `security.get_service_accounts`
-- `security.get_service_credentials`
-- `shutdown.delete_node`
-- `shutdown.get_node`
-- `shutdown.put_node`
-- `terms_enum`
-
-*New APIs*
-
-- `fleet.mseach`
-- `fleet.search`
-- `indices.modify_data_stream`
-- `ml.infer_trained_model_deployment`
-- `ml.start_trained_model_deployment`
-- `ml.stop_trained_model_deployment`
-- `migration.get_feature_upgrade_status`
-- `migration.post_feature_upgrade_status`
-- `security.enroll_kibana`
-- `security.enroll_node`
-- `transform.updgrade_transforms`
diff --git a/docs/release_notes/717.asciidoc b/docs/release_notes/717.asciidoc
deleted file mode 100644
index 4413dd225a..0000000000
--- a/docs/release_notes/717.asciidoc
+++ /dev/null
@@ -1,29 +0,0 @@
-[[release_notes_717]]
-=== 7.17 Release notes
-
-[discrete]
-[[release_notes_7177]]
-=== 7.17.7 Release notes
-
-- Compatibility with Elasticsearch v7.17.7 APIs.
-- Tested versions of Ruby for 7.17.7: Ruby (MRI) 2.6, 2.7, 3.0 and 3.1, JRuby 9.3.
-
-[discrete]
-[[release_notes_7172]]
-=== 7.17.2, 7.17.3, 7.17.4, 7.17.5, 7.17.6 Release notes
-
-No release.
-
-[discrete]
-[[release_notes_7171]]
-=== 7.17.1 Release notes
-
-- Improves handling of YAML parsing, uses `safe_load` instead of `load` when doing the product verification (should only affect Ruby < 3.0).
-- Updates headers setup when using the Manticore adapter. This fixes an issue where the user-agent header was being foverridden even when it was being set on initialization via the transport options. https://github.com/elastic/elasticsearch-ruby/pull/1685[Pull Request], https://github.com/elastic/elasticsearch-ruby/issues/1684[issue].
-
-[discrete]
-[[release_notes_7170]]
-=== 7.17.0 Release notes
-
-- Drops Ruby 2.5 from the test matrix. Support for Ruby 2.5 was dropped March 2021.
-- Updates the product verification when the response is a `413` error.
diff --git a/docs/release_notes/75.asciidoc b/docs/release_notes/75.asciidoc
deleted file mode 100644
index ac874d0cfb..0000000000
--- a/docs/release_notes/75.asciidoc
+++ /dev/null
@@ -1,59 +0,0 @@
-[[release_notes_75]]
-=== 7.5 Release notes
-
-- Support for Elasticsearch 7.5.
-- Update API spec generator: The code for Elasticsearch OSS and X-Pack APIs is being generated from the rest api spec.
-- Specs have been updated to address new/deprecated parameters.
-- Ruby versions tested: 2.3.8, 2.4.9, 2.5.7, 2.6.5 and 2.7.0 (new).
-
-
-[discrete]
-==== API
-
-Endpoints that changed:
-
-- `_bulk`: body is now required as an argument.
-- `cat`: `local` and `master_timeout` parameters are gone.
- - `health`: New parameter `health`.
- - `indices`: Adds `time` and `include_unload_segments` parameters.
- - `nodes`: Adds `bytes`, `time` parameters.
- - `pending_tasks`: Adds `time` parameter.
- - `recovery`: Adds `active_only`, `detailed`, `index`, `time` parameters.
- - `segments`: Removes `index` parameter and it's now a url part.
- - `shards`: Adds `time` parameter.
- - `snapshots`: Adds `time` parameter.
- - `tasks`: Adds `time` parameter.
- - `templates`: The `name` parameter is now passed in as a part but not a parameter.
- - `thread_pool`: The `thread_pool_patterns` parameter is now passed in as a part but not as a parameter.
-- `cluster`
- - `put_settings`: body is required.
- - `state`: `index_templates` is gone.
- - `node_id` is now a url part.
-- `delete` - `parent` parameter is gone.
-- `delete_by_query`: `analyzer` parameters are gone, `max_docs` is a new parameter, `body` is now a required parameter.
-- `delete_by_query_rethrottle` new endpoint.
-- `delete_by_rethrottle` - uses `delete_by_query_rethrottle` and hasn't changed.
-- `exists`, `exists_source`, `explain`: `parent` parameter is gone.
-- `field_caps`: `fields` param is no longer required.
-- `get`: `parent` parameter is gone
-- `get_source`: `parent` parameter is gone
-- `index`: `body` parameter is required, `wait_for_shard` is a new parameter, `consistency`, `include_type_name`, `parent`, `percolate`, `replication`, `timestamp`, `ttl` parameters are gone
-- `indices`
- - `get`: `feature` paramatere was deprecated and is gone.
- - `delete_aliases`, `put_alias`: URL changed internally to 'aliases' instead of 'alias' but shouldn't affect the client's API.
-- `render_search_template`: `id` is now a part not a parameter
-- `search`: `fielddata_fields`, `include_type_name`, `fields`, `ignore_indices`, `lowercase_expanded_terms`, `query_cache`, `source` parameters are gone, `ccs_minimize_roundtrips`, `track_scores` are new parameters.
-- `tasks` - `list`: task_id is not supported anymore, it's in get now.
-- `termvectors`: `parent` parameter is gone.
-- `update`: `version` parameter is not supported anymore.
-
-
-[discrete]
-==== X-Pack
-
-Some urls changed internally to remove `_xpack`, but it shouldn't affect the client's API.
-
-- `explore`: `index` is now required.
-- `info`: `human` parameter is gone.
-- `migration`: some endpoints are gone: `get_assistance`, `get_assistance_test` and `upgrade_test`.
-- `watcher`: `restart` endpoint is gone.
diff --git a/docs/release_notes/76.asciidoc b/docs/release_notes/76.asciidoc
deleted file mode 100644
index bd546df82f..0000000000
--- a/docs/release_notes/76.asciidoc
+++ /dev/null
@@ -1,86 +0,0 @@
-[[release_notes_76]]
-=== 7.6 Release notes
-
-
-[discrete]
-==== Client
-
-* Support for Elasticsearch version 7.6.
-* Last release supporting Ruby 2.4. Ruby 2.4 has reached it's end of life and no more security updates will be provided, users are suggested to update to a newer version of Ruby.
-
-
-[discrete]
-===== API Key Support
-
-The client now supports API Key Authentication, check "Authentication" on the https://github.com/elastic/elasticsearch-ruby/tree/7.x/elasticsearch-transport#authentication[transport README] for information on how to use it.
-
-
-[discrete]
-===== X-Opaque-Id Support
-
-The client now supports identifying running tasks with X-Opaque-Id. Check https://github.com/elastic/elasticsearch-ruby/tree/7.x/elasticsearch-transport#identifying-running-tasks-with-x-opaque-id[transport README] for information on how to use X-Opaque-Id.
-
-
-[discrete]
-===== Faraday migrated to 1.0
-
-We're now using version 1.0 of Faraday:
-
-* The client initializer was modified but this should not disrupt final users at all, check this commit for more information.
-* Migrated error checking to remove the deprecated Faraday::Error namespace.
-* *This change is not compatible with https://github.com/typhoeus/typhoeus[Typhoeus]*. The latest release is 1.3.1, but it's https://github.com/typhoeus/typhoeus/blob/v1.3.1/lib/typhoeus/adapters/faraday.rb#L100[still using the deprecated `Faraday::Error` namespace]. This has been fixed on master, but the last release was November 6, 2018. Version 1.4.0 should be ok once it's released.
-* Note: Faraday 1.0 drops official support for JRuby. It installs fine on the tests we run with JRuby in this repo, but it's something we should pay attention to.
-
-Reference: https://github.com/lostisland/faraday/blob/master/UPGRADING.md[Upgrading - Faraday 1.0]
-
-https://github.com/elastic/elasticsearch-ruby/pull/808[Pull Request]
-
-
-[discrete]
-==== API
-
-
-[discrete]
-===== API Changes:
-
-- `cat.indices`: argument `bytes` options were: `b,k,m,g` and are now `b,k,kb,m,mb,g,gb,t,tb,p,pb`.
-- `delete_by_query`: New parameter `analyzer` - The analyzer to use for the query string.
-- `indices.put_template`: Removed parameters: `timeout`, `flat_settings`.
-- `msearch_template`: New Parameter `ccs_minimize_roundtrips` - Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution.
-- `rank_eval`: New parameter `search_type` - Search operation type (options: `query_then_fetch,dfs_query_then_fetch`).
-- `search_template`: New parameter `ccs_minimize_roundtrips` - Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution.
-
-
-[discrete]
-===== New API endpoints:
-
-- `get_script_context`
-- `get_script_languages`
-
-
-[discrete]
-===== Warnings:
-
-Synced flush is deprecated and will be removed in 8.0.
-
-
-[discrete]
-==== X-Pack
-
-
-[discrete]
-===== New API endpoints:
-
-- `ml/delete_trained_model`
-- `ml/explain_data_frame_analytics`
-- `ml/get_trained_models`
-- `ml/get_trained_models_stats`
-- `ml/put_trained_model`
-
-
-[discrete]
-===== API changes:
-
-- `license/get`: Added parameter `accept_enterprise`.
-- `ml/delete_data_frame_analytics` Added parameter `force`.
-- `monitoring/bulk` - Removed parameter `system_version`.
\ No newline at end of file
diff --git a/docs/release_notes/77.asciidoc b/docs/release_notes/77.asciidoc
deleted file mode 100644
index 87f5e1bdbb..0000000000
--- a/docs/release_notes/77.asciidoc
+++ /dev/null
@@ -1,77 +0,0 @@
-[[release_notes_77]]
-=== 7.7 Release notes
-
-This version drops support for Ruby 2.4 since it's reached it's end of life.
-
-
-[discrete]
-==== Client
-
-- Support for Elasticsearch version `7.7`
-
-
-[discrete]
-===== Custom Headers
-
-You can set custom HTTP headers on the client's initializer or pass them as a parameter to any API endpoint. https://github.com/elastic/elasticsearch-ruby/tree/7.x/elasticsearch-transport#custom-http-headers[More info and code examples].
-
-
-[discrete]
-==== API
-
-
-[discrete]
-===== API Changes
-
-- Clean: Removes up some deprecated endpoints: `abort_benchmark`, `benchmark`, `delete_by_rethrottle`, `nodes.shutdown`, `remote.info`.
-- `expand_wildcards` Whether to expand wildcard expressions to concrete indices that are open, closed or both. Options: open, closed, hidden, none, all. `hidden` option is new. It was also added to the following endpoints: `cat.aliases`, `cat.indices`.
-- `delete_by_query`: Parameter `slices` can now be set to `auto`.
-- `reindex`: Parameter `slices` can now be set to `auto`.
-- `update_by_query`: Parameter `slices` can now be set to `auto`.
-- `snapshot.cleanup_repository`: Parameter `body` is removed.
-
-
-[discrete]
-===== New API Endpoints
-
-- `cluster.delete_component_template`
-- `cluster.get_component_template`
-- `cluster.put_component_template`
-- `indices.create_data_stream` (experimental)
-- `indices.delete_data_stream` (experimental)
-- `indices.get_data_stream` (experimental)
-
-
-[discrete]
-==== X-Pack
-
-
-[discrete]
-===== API Changes
-
-- `machine_learing.get_trained_models`: New parameter `tags`
-- `machine_learning.put_datafeed`, `machine_learning.update_datafeed`: Added parameters `ignore_unavailable`, `allow_no_indices`, `ignore_throttled`, `expand_wildcards`
-- `reload_secure_settings`: New parameter `body`, an object containing the password for the keystore.
-
-
-[discrete]
-===== New API Endpoints
-
-- `async_search.delete`
-- `async_search.get`
-- `async_search.submit`
-- `cat.ml_data_frame_analytics`
-- `cat.ml_datafeeds`
-- `cat.ml_jobs`
-- `cat.ml_trained_models`
-- `cat.transform`
-- `cat.transforms`
-- `machine_learning.estimate_model_memory`
-- `transform.delete_transform`
-- `transform.get_transform`
-- `transform.get_transform_stats`
-- `transform.preview_transform`
-- `transform.put_transform`
-- `transform.start_transform`
-- `transform.stop_transform`
-- `transform.update_transform`
\ No newline at end of file
diff --git a/docs/release_notes/78.asciidoc b/docs/release_notes/78.asciidoc
deleted file mode 100644
index a5d59e985e..0000000000
--- a/docs/release_notes/78.asciidoc
+++ /dev/null
@@ -1,103 +0,0 @@
-[[release_notes_78]]
-=== 7.8 Release notes
-
-[discrete]
-[[release_notes_781]]
-=== 7.8.1 Release notes
-
-[discrete]
-==== Client
-
-- Support for Elasticsearch version `7.8.1`.
-- Bug fix: Fixed a bug on the API endpoints documentation for RubyDocs: there was an unnecessary empty new line in the documentation for parameters that have options. So the parameters before that empty newline were not being documented in RubyDocs.
-
-
-[discrete]
-==== X-Pack
-
-
-[discrete]
-===== API Changes
-
-- Update to `info` endpoint. New parameter `accept_enterprise` (boolean): If an enterprise license is installed, return the type and mode as 'enterprise' (default: false).
-
-
-[discrete]
-[[release_notes_780]]
-=== 7.8.0 Release notes
-
-[discrete]
-==== Client
-
-- Support for Elasticsearch version `7.8`.
-- Surface deprecation headers from Elasticsearch. When there's a `warning` response header in Elasticsearch's response, the client will emit a warning with `warn`.
-- Typhoeus is supported again, version 1.4+ and has been added back to the docs.
-- Adds documentation and example for integrating with Elastic APM.
-
-
-[discrete]
-==== API
-
-
-[discrete]
-===== New API Endpoints
-
-- `abort_benchmark`
-- `benchmark`
-- `cluster.delete_voting_config_exclusions`
-- `cluster.post_voting_config_exclusions`
-- `delete_by_rethrottle`
-- `nodes.shutdown`
-- `remote.info`
-
-Experimental endpoints:
-
-- `cluster.delete_component_template`
-- `cluster.exists_component_template`
-- `cluster.get_component_template`
-- `cluster.put_component_template`
-
-- `indices.delete_index_template`
-- `indices.exists_index_template`
-- `indices.get_index_template`
-- `indices.put_index_template`
-- `indices.simulate_index_template`
-
-
-[discrete]
-===== API Changes
-
-- `cat/thread_pool`: `size` is deprecated.
-- `indices.get_data_streams`: `name` is now a string instead of list, the name or wildcard expression of the requested data streams.
-- `indices.put_index_template`: new parameter: `cause` (string), user defined reason for creating/updating the index template.
-- `indices.simulate_index_template`: Two new parameters: `create`, whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one. `cause` User defined reason for dry-run creating the new template for simulation purposes.
-- `snapshot.delete_repository`: New parameter `repository`, name of the snapshot repository, wildcard (`*`) patterns are now supported.
-- `task.cancel`: new parameter `wait_for_completion` (boolean) Should the request block until the cancellation of the task and its descendant tasks is completed. Defaults to false.
-
-
-[discrete]
-==== X-Pack
-
-
-[discrete]
-===== New API Endpoints
-
-New namespace: `indices`
-
-- `indices.freeze`
-- `indices.reload_search_analyzers`
-- `indices.unfreeze`
-
-New namespace: `searchable_snapshots`
-
-- `clear_cache`
-- `mount`
-- `repository_stats`
-- `stats`
-
-
-[discrete]
-===== API Changes
-
-- `machine_learning.delete_expired_data` new param `body`: deleting expired data parameters.
-- `machine_learning.delete_data_frame_analytics` new param `timeout`: controls the time to wait until a job is deleted. Defaults to 1 minute.
diff --git a/docs/release_notes/79.asciidoc b/docs/release_notes/79.asciidoc
deleted file mode 100644
index cd780239fb..0000000000
--- a/docs/release_notes/79.asciidoc
+++ /dev/null
@@ -1,60 +0,0 @@
-[[release_notes_79]]
-=== 7.9 Release notes
-
-[discrete]
-==== Client
-
-- Support for Elasticsearch version `7.9.0`.
-- Transport/Connection: Considers attributes values for equality - https://github.com/elastic/elasticsearch-ruby/commit/06ffd03bf51f5f33a0d87e9914e66b39357d40af[Commit].
-- When an API endpoint accepts both `GET` and `POST`, the client will always use `POST` when a request body is present.
-
-[discrete]
-==== API
-
-- Documentation for API endpoints will point out when an API is experimental, beta or unstable.
-
-[discrete]
-===== New API Endpoints
-
-- New namespace: `dangling_indices`
-- `dangling_indices.delete_dangling_index`
-- `dangling_indices.import_dangling_index`
-- `dangling_indices.list_dangling_indices`
-- `indices.add_block`
-
-Experimental endpoints:
-- `indices.resolve_index`
-- `simulate_template`
-
-[discrete]
-===== API Changes
-
-- `field_caps`: adds body parameter allowing to filter indices if `index_filter` is provided.
-- `eql.search`: new parameters `wait_for_completion`, `keep_on_completion` and `keep_alive`.
-- `info`: New parameter `accept_enterprise`: If an enterprise license is installed, return the type and mode as 'enterprise' (default: false).
-- `indices.put_mapping`: new parameter `write_index_only`.
-
-[discrete]
-==== X-Pack
-
-[discrete]
-===== New API Endpoints
-
-The Ruby client now supports all the X-Pack API endpoints.
-
-- New namespace `autoscaling`: `autoscaling.delete_autoscaling_policy`, `autoscaling.get_autoscaling_decision`, `autoscaling.get_autoscaling_policy`, `autoscaling.put_autoscaling_policy`
-- New namespace `enrich`: `enrich.delete_policy`, `enrich.execute_policy`, `enrich.get_policy`, `enrich.put_policy`, `enrich.stats`
-- New namespace `eql`: `eql.delete`, `eql.get`, `eql.search`
-- New namespace `cross_cluster_replication`: `cross_cluster_replication.delete_auto_follow_pattern`, `cross_cluster_replication.follow`, `cross_cluster_replication.follow_info`, `cross_cluster_replication.follow_stats`, `cross_cluster_replication.forget_follower`, `cross_cluster_replication.get_auto_follow_pattern`, `cross_cluster_replication.pause_auto_follow_pattern`, `cross_cluster_replication.pause_follow`, `cross_cluster_replication.put_auto_follow_pattern`, `cross_cluster_replication.resume_auto_follow_pattern`, `cross_cluster_replication.resume_follow`, `cross_cluster_replication.stats`, `cross_cluster_replication.unfollow`
-- New namespace `snapshot_lifecycle_management`: `snapshot_lifecycle_management.delete_lifecycle`, `snapshot_lifecycle_management.execute_lifecycle`, `snapshot_lifecycle_management.execute_retention`, `snapshot_lifecycle_management.get_lifecycle`, `snapshot_lifecycle_management.get_stats`, `snapshot_lifecycle_management.get_status`, `snapshot_lifecycle_management.put_lifecycle`, `snapshot_lifecycle_management.start`, `snapshot_lifecycle_management.stop`
-- `indices.create_data_stream`
-- `indices.data_streams_stats`
-- `indices.delete_data_stream`
-- `indices.get_data_stream`
-- `security.clear_cached_privileges`
-- `machine_learning.update_data_frame_analytics`
-
-[discrete]
-===== API Changes
-
-- `machine_learning.delete_expired_data`: new parameters `job_id`, `requests_per_second` and `timeout`
\ No newline at end of file
diff --git a/docs/release_notes/80.asciidoc b/docs/release_notes/80.asciidoc
deleted file mode 100644
index c97e91cc7b..0000000000
--- a/docs/release_notes/80.asciidoc
+++ /dev/null
@@ -1,74 +0,0 @@
-[[release_notes_80]]
-=== 8.0 Release notes
-
-[discrete]
-[[release_notes_801]]
-=== 8.0.1 Release notes
-
-- Fixes an issue with the generated API code. When updating the code generator for 8.x, the order of `arguments.clone` in the generated code was changed. This would make it so that we would modify the parameters passed in before cloning them, which is undesired. Issue: https://github.com/elastic/elasticsearch-ruby/issues/1727[#1727].
-
-[discrete]
-[[release_notes_800]]
-=== 8.0.0 Release notes
-
-First release for the 8.x branch with a few major changes.
-
-- Tested versions of Ruby for 8.0.0: Ruby (MRI) 2.6, 2.7, 3.0 and 3.1, JRuby 9.3.
-
-[discrete]
-==== Client
-
-The code for the dependency `elasticsearch-transport` has been promoted to https://github.com/elastic/elastic-transport-ruby[its own repository] and the project and gem have been renamed to https://rubygems.org/gems/elastic-transport[`elastic-transport`]. This gem now powers https://rubygems.org/gems/elasticsearch[`elasticsearch`] and https://rubygems.org/gems/elastic-enterprise-search[`elastic-enterprise-search`]. The `elasticsearch-transport` gem won't be maintained after the last release in the `7.x` branch, in favour of `elastic-transport`.
-
-This will allow us to better address maintainance in both clients and the library itself.
-
-[discrete]
-==== API
-
-The `elasticsearch-api` library has been generated based on the {es} 8.0.0 REST specification.
-
-#### X-Pack Deprecation
-
-X-Pack has been deprecated. The `elasticsearch-xpack` gem will no longer be maintained after the last release in the `7.x` branch. The "X-Pack" integration library codebase was merged into `elasticsearch-api`. All the functionality is available from `elasticsearch-api`. The `xpack` namespace was removed for accessing any APIs other than `_xpack` (`client.xpack.info`) and `_xpack/usage` (`client.xpack.usage`). But APIs which were previously available through the `xpack` namespace e.g.: `client.xpack.machine_learning` are now only available directly: `client.machine_learning`.
-
-#### Parameter checking was removed
-
-The code in `elasticsearch-api` will no longer validate all the parameters sent. It will only validate the required parameters such as those needed to build the path for the request. But other API parameters are going to be validated by {es}. This provides better forwards and backwards compatibility in the client.
-
-#### Response object
-
-In previous versions of the client, calling an API endpoint would return the JSON body of the response. With `8.0`, we are returning a new Response object `Elasticsearch::API::Response`. It still behaves like a Hash to maintain backwards compatibility, but adds the `status` and `headers` methods from the `Elastic::Transport:Transport::Response` object:
-
-```ruby
-elastic_ruby(main)> response = client.info
-=> #"instance",
- "cluster_name"=>"elasticsearch-8-0-0-SNAPSHOT-rest-test",
- "cluster_uuid"=>"oIfRARuYRGuVYybjxQJ87w",
- "version"=>
- {"number"=>"8.0.0-SNAPSHOT",
- "build_flavor"=>"default",
- "build_type"=>"docker",
- "build_hash"=>"7e23c54eb31cc101d1a4811b9ab9c4fd33ed6a8d",
- "build_date"=>"2021-11-04T00:21:32.464485627Z",
- "build_snapshot"=>true,
- "lucene_version"=>"9.0.0",
- "minimum_wire_compatibility_version"=>"7.16.0",
- "minimum_index_compatibility_version"=>"7.0.0"},
- "tagline"=>"You Know, for Search"},
- @headers={"X-elastic-product"=>"Elasticsearch", "content-type"=>"application/json", "content-length"=>"567"},
- @status=200>>
-elastic_ruby(main)> response.status
-=> 200
-elastic_ruby(main)> response.headers
-=> {"X-elastic-product"=>"Elasticsearch", "content-type"=>"application/json", "content-length"=>"567"}
-elastic_ruby(main)> response['name']
-=> "instance"
-elastic_ruby(main)> response['tagline']
-=> "You Know, for Search"
-```
-
-Please https://github.com/elastic/elasticsearch-ruby/issues[let us know if you find any issues].
diff --git a/docs/release_notes/81.asciidoc b/docs/release_notes/81.asciidoc
deleted file mode 100644
index 871486ead8..0000000000
--- a/docs/release_notes/81.asciidoc
+++ /dev/null
@@ -1,55 +0,0 @@
-[[release_notes_81]]
-=== 8.1 Release notes
-
-[discrete]
-[[release_notes_812]]
-=== 8.1.2 Release notes
-
-[discrete]
-==== API
-
-- Fixes an issue with the generated API code. When updating the code generator for 8.x, the order of `arguments.clone` in the generated code was changed. This would make it so that we would modify the parameters passed in before cloning them, which is undesired. Issue: https://github.com/elastic/elasticsearch-ruby/issues/1727[#1727].
-
-[discrete]
-[[release_notes_811]]
-=== 8.1.1 Release notes
-
-No release, no changes on the client.
-
-[discrete]
-[[release_notes_810]]
-=== 8.1.0 Release notes
-
-- Tested versions of Ruby for 8.1.0: Ruby (MRI) 2.6, 2.7, 3.0 and 3.1, JRuby 9.3.
-
-[discrete]
-==== API
-
-Updated for compatibility with Elasticsearch 8.1's API.
-
-[discrete]
-===== New parameters:
-- `indices.forcemerge` - `wait_for_completion` Should the request wait until the force merge is completed.
-- `indices.get` - `features` Return only information on specified index features (options: aliases, mappings, settings).
-- `ingest.put_pipeline` `if_version` (Integer), required version for optimistic concurrency control for pipeline updates.
-- `ml.delete_trained_model` - `timeout` controls the amount of time to wait for the model to be deleted. `force` (Boolean) true if the model should be forcefully deleted.
-- `ml.stop_trained_model_deployment` - `allow_no_match` whether to ignore if a wildcard expression matches no deployments. (This includes `_all` string or when no deployments have been specified). `force` true if the deployment should be forcefully stopped. Adds `body` parameter, the stop deployment parameters.
-- `nodes.hot_threads` - `sort` the sort order for 'cpu' type (default: total) (options: cpu, total)
-
-[discrete]
-===== Updated parameters:
-- `indices.get_index_template` - `name` is now a String, a pattern that returned template names must match.
-- `knn_search` - `index` removes option to use empty string to perform the operation on all indices.
-- `ml.close_job`, `ml.get_job_stats`, `ml.get_jobs`, `ml.get_overall_buckets` - Remove `allow_no_jobs` parameter.
-- `ml.get_datafeed_stats`, `ml.get_datafeeds` - Remove `allow_no_datafeeds` parameter.
-- `nodes.hot_threads` - `type` parameter adds `mem` option.
-- `nodes.info` - `metric` updated to use `_all` to retrieve all metrics and `_none` to retrieve the node identity without any additional metrics. (options: settings, os, process, jvm, thread_pool, transport, http, plugins, ingest, indices, aggregations, _all, _none). `index_metric` option `shards` changes to `shard_stats`.
-- `open_point_in_time` - `keep_alive` is now a required parameter.
-- `search_mvt` - `grid_type` parameter adds `centroid` option in addition to `grid` and `point`.
-
-- New experimental APIs, designed for internal use by the fleet server project: `fleet.search`, `fleet.msearch`.
-
-[discrete]
-===== New APIs
-- OpenID Connect Authentication: `security.oidc_authenticate`, `security.oidc_logout`, `security.oidc_prepare_authentication`.
-- `transform.reset_transform`.
diff --git a/docs/release_notes/810.asciidoc b/docs/release_notes/810.asciidoc
deleted file mode 100644
index c58f777d8e..0000000000
--- a/docs/release_notes/810.asciidoc
+++ /dev/null
@@ -1,51 +0,0 @@
-[[release_notes_8100]]
-=== 8.10 Release notes
-
-[discrete]
-[[release_notes_810_0]]
-=== 8.10.0 Release notes
-
-[discrete]
-=== Client
-* Tested versions of Ruby for 8.10.0: Ruby (MRI) 3.0, 3.1 and 3.2. JRuby 9.3 and JRuby 9.4.
-
-[discrete]
-=== API
-
-[discrete]
-==== New Experimental APIs, for internal use:
-- `fleet.delete_secret`
-- `fleet.get_secret`
-- `fleet.post_secret`
-
-[discrete]
-==== New stable APIs:
-- `security.get_settings` - Retrieve settings for the security system indices
-- `security.update_settings` - Update settings for the security system indices
-
-[discrete]
-==== New Experimental API:
-- `query_ruleset.list` List query rulesets.
-
-[discrete]
-==== API Changes:
-- `indices.reload_search_analyzers` - Adds parameter `resource` changed resource to reload analyzers from if applicable
-
-Promoted from Experimental to Beta:
-
-- `security.create_cross_cluster_api_key`
-- `security.update_cross_cluster_api_key`
-
-[discrete]
-==== Synonyms namespace update:
-
-All synonym related APIs have been moved to the `synonyms` namespace and some of the endpoints have been renamed, as well as their parameters:
-
-- `synonyms.delete` => `synonyms.delete_synonym` - requires `id`, the id of the synonyms set to be deleted.
-- `synonyms.get` => `synonyms.get_synonym` - requires `id`, the name of the synonyms set to be retrieved.
-- `synonyms_set.get_synonyms_sets` => `synonyms.get_synonyms_sets`
-- `synonyms.put` => `synonyms.put_synonym` - requires `id` of the synonyms set to be created or updated.
-- `synonym_rule.put` => `synonyms.put_synonym_rule` - Parameters changed to `set_id` (the id of the synonym set to be updated with the synonym rule) and `rule_id` (the id of the synonym rule to be updated or created).
-- New Experimental API `synonyms.delete_synonym_rule` - Deletes a synonym rule in a synonym set
-- New Experimental API `synonyms.get_synonym_rule` - Retrieves a synonym rule from a synonym set
-
diff --git a/docs/release_notes/811.asciidoc b/docs/release_notes/811.asciidoc
deleted file mode 100644
index 43c43427de..0000000000
--- a/docs/release_notes/811.asciidoc
+++ /dev/null
@@ -1,24 +0,0 @@
-[[release_notes_8110]]
-=== 8.11 Release notes
-
-[discrete]
-[[release_notes_811_0]]
-=== 8.11.0 Release notes
-
-[discrete]
-=== Client
-
-* Tested versions of Ruby for 8.11.0: Ruby (MRI) 3.0, 3.1 and 3.2. JRuby 9.3 and JRuby 9.4.
-* Adds native support for *Open Telemetry*. See <> for documentation.
-* Improved documentation, now you can find more examples in Ruby in the https://www.elastic.co/guide/en/elasticsearch/reference/8.11/rest-apis.html[REST API reference].
-
-[discrete]
-=== API
-
-New Experimental APIs:
-
-* `esql.query` - Executes an ESQL request.
-* `inference.delete_model` - Delete model in the Inference API
-* `inference.get_model` - Get a model in the Inference API
-* `inference.inference` - Perform inference on a model
-* `inference.put_model` - Configure a model for use in the Inference API
diff --git a/docs/release_notes/812.asciidoc b/docs/release_notes/812.asciidoc
deleted file mode 100644
index 7fb1c52f6e..0000000000
--- a/docs/release_notes/812.asciidoc
+++ /dev/null
@@ -1,65 +0,0 @@
-[[release_notes_8_12]]
-=== 8.12 Release notes
-
-[discrete]
-[[release_notes_8_12_0]]
-=== 8.12.0 Release notes
-
-[discrete]
-==== Client
-* Tested versions of Ruby for 8.12.0: Ruby (MRI) 3.0, 3.1, 3.2 and 3.3. JRuby 9.3 and JRuby 9.4.
-
-[discrete]
-==== API
-
-API Changes:
-
-* `bulk` - Adds boolean `:list_executed_pipelines` parameter: Sets `list_executed_pipelines` for all incoming documents. Defaults to unset (false).
-* `indices.put_settings` - Adds boolean `:reopen` parameter: Whether to close and reopen the index to apply non-dynamic settings. If set to `true` the indices to which the settings are being applied will be closed temporarily and then reopened in order to apply the changes. The default is `false`.
-* `open_point_in_time` - Adds Hash `:body` parameter: an index_filter specified with the Query DSL.
-* `security.get_api_key` - Adds boolean `:active_only` parameter: flag to limit response to only active (not invalidated or expired) API keys.
-
-[discrete]
-===== Connectors
-
-Version 8.12 introduces the experimental https://www.elastic.co/guide/en/elasticsearch/reference/8.12/connector-apis.html[Connectors API].
-
-Use the following APIs to manage connectors:
-
-* `connector.post` - Creates a connector. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/create-connector-api.html[documentation].
-* `connector.put` - Creates or updates a connector. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/create-connector-api.html[documentation].
-* `connector.delete` - Deletes a connector. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/delete-connector-api.html[documentation].
-* `connector.get` - Returns the details about a connector. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/get-connector-api.html[documentation].
-* `connector.list` - Lists all connectors. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/list-connector-api.html[documentation].
-* `connector.check_in` - Updates the last_seen timestamp in the connector document. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/check-in-connector-api.html[documentation].
-* `connector.update_configuration` - Updates the connector configuration. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/update-connector-configuration-api.html[documentation].
-* `connector.update_error` - Updates the error field in the connector document. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/update-connector-error-api.html[documentation].
-* `connector.update_filtering` - Updates the filtering field in the connector document. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/update-connector-filtering-api.html[documentation].
-* `connector.last_sync` - Updates the stats of last sync in the connector document. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/update-connector-last-sync-api.html[documentation].
-* `connector.update_name` - Updates the name and/or description fields in the connector document. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/update-connector-name-description-api.html[documentation].
-* `connector.update_pipeline` - Updates the pipeline field in the connector document. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/update-connector-pipeline-api.html[documentation].
-* `connector.update_scheduling` - Updates the scheduling field in the connector document. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/update-connector-scheduling-api.html[documentation].
-
-Use the following APIs to manage sync jobs:
-
-* `connector_sync_job.cancel` - Cancels a connector sync job. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/cancel-connector-sync-job-api.html[documentation].
-* `connector_sync_job.check_in` - Checks in a connector sync job (refreshes 'last_seen'). See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/check-in-connector-sync-job-api.html[documentation].
-* `connector_sync_job.delete` - Deletes a connector sync job. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/delete-connector-sync-job-api.html[documentation].
-* `connector_sync_job.error` - Sets an error for a connector sync job. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/set-connector-sync-job-error-api.html[documentation].
-* `connector_sync_job.get` - Returns the details about a connector sync job. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/get-connector-sync-job-api.html[documentation].
-* `connector_sync_job.list` - Lists all connector sync jobs. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/list-connector-sync-jobs-api.html[documentation].
-* `connector_sync_job.post` - Creates a connector sync job. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/create-connector-sync-job-api.html[documentation].
-* `connector_sync_job.update_stats` - Updates the stats fields in the connector sync job document. See https://www.elastic.co/guide/en/elasticsearch/reference/8.12/set-connector-sync-job-stats-api.html[documentation].
-
-[discrete]
-===== Profiling
-New API for https://www.elastic.co/guide/en/observability/8.12/universal-profiling.html[Universal profiling].
-
-* `profiling.status` - Returns basic information about the status of Universal Profiling.
-
-
-[discrete]
-===== Simulate
-New experimental API:
-
-* `simulate.ingest` - Simulates running ingest with example documents. See: https://www.elastic.co/guide/en/elasticsearch/reference/8.12/simulate-ingest-api.html
diff --git a/docs/release_notes/813.asciidoc b/docs/release_notes/813.asciidoc
deleted file mode 100644
index 0c3066d3e4..0000000000
--- a/docs/release_notes/813.asciidoc
+++ /dev/null
@@ -1,76 +0,0 @@
-[[release_notes_8_13]]
-=== 8.13 Release notes
-
-[discrete]
-[[release_notes_8_13_0]]
-=== 8.13.0 Release notes
-
-[discrete]
-==== Client
-* Tested versions of Ruby for 8.13.0: Ruby (MRI) 3.0, 3.1, 3.2 and 3.3. JRuby 9.3 and JRuby 9.4.
-
-[discrete]
-==== Experimental ES|QL Helper
-
-This version provides a new experimental Helper for the ES|QL `query` API. Please check out https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/8.13/Helpers.html#esql-helper[the documentation] and https://github.com/elastic/elasticsearch-ruby/issues/new/choose[open an issue] if you encounter any problems or have any feedback.
-
-[discrete]
-==== API
-
-API Changes:
-
-* `async_search.status` - adds Time `:keep_alive` parameter: Specify the time interval in which the results (partial or final) for this search will be available.
-* `bulk` - adds boolean `:require_data_stream` parameter: When true, requires the destination to be a data stream (existing or to-be-created). Default is false.
-* `connector.list` - Adds the following parameters:
- * `:index_name` (List): A comma-separated list of connector index names to fetch connector documents for.
- * `:connector_name` (List): A comma-separated list of connector names to fetch connector documents for.
- * `:service_type` (List): A comma-separated list of connector service types to fetch connector documents for.
- * `:query` (String): A search string for querying connectors, filtering results by matching against connector names, descriptions, and index names.
-* `esql.query` - adds boolean `:drop_null_columns` parameter: Should entirely null columns be removed from the results? Their name and type will be returning in a new `all_columns` section.
-* `field_caps` - Adds `:include_empty_fields` boolean parameter: Include empty fields in result.
-* `index` - adds boolean `:require_data_stream` parameter: When true, requires the destination to be a data stream (existing or to-be-created). Default is false.
-* `indices.rollover` - adds boolean `:lazy` parameter: If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. Only allowed on data streams.
-* connector_sync_job.list - adds List `:job_type` parameter: A comma-separated list of job types.
-* `inference.delete_model`, `inference.get_model`, `inference.inference`, `inference.put_model`: renames `:model_id` parameter to `:inference_id`.
-* `termvector` will show a warning since it's been deprecated. Please use the plural version, `termvectors`.
-
-New APIs:
-
-* `indices.resolve_cluster` - Resolves the specified index expressions to return information about each cluster, including the local cluster, if included.
-* `profiling.flamegraph` - Extracts a UI-optimized structure to render flamegraphs from Universal Profiling.
-* `profiling.stacktraces` - Extracts raw stacktrace information from Universal Profiling.
-* `security.query_user` - Retrieves information for Users using a subset of query DSL
-* `text_structure.test_grok_pattern` - Tests a Grok pattern on some text.
-
-APIs Migrated from experimental to stable:
-
-* `synonyms.delete_synonym`
-* `synonyms.delete_synonym_rule`
-* `synonyms.get_synonym`
-* `synonyms.get_synonym_rule`
-* `synonyms.get_synonyms_sets`
-* `synonyms.put_synonym`
-* `synonyms.put_synonym_rule`
-
-New Experimental APIs
-
-* `connector.update_api_key_id` - Updates the API key id and/or API key secret id fields in the connector document.
-* `connector.update_index_name` - Updates the index name of the connector.
-* `connector.update_native` - Updates the is_native flag of the connector.
-* `connector.update_service_type` - Updates the service type of the connector.
-* `connector.update_status` - Updates the status of the connector.
-* `esql.async_query` - Executes an ESQL request asynchronously
-* `esql.async_query_get` - Retrieves the results of a previously submitted async query request given its ID.
-
-New Experimental namespace `connector_secret`:
-
-* `connector_secret.delete` - Deletes a connector secret.
-* `connector_secret.get` - Retrieves a secret stored by Connectors.
-* `connector_secret.post` - Creates a secret for a Connector.
-* `connector_secret.put` - Creates or updates a secret for a Connector.
-
-[discrete]
-==== Development
-
-* Migrated from `byebug` to `debug`.
-* Added extra testing for OpenTelemetry.
diff --git a/docs/release_notes/814.asciidoc b/docs/release_notes/814.asciidoc
deleted file mode 100644
index 6bb42142f9..0000000000
--- a/docs/release_notes/814.asciidoc
+++ /dev/null
@@ -1,52 +0,0 @@
-[[release_notes_8_14]]
-=== 8.14 Release notes
-
-[discrete]
-[[release_notes_8_14_0]]
-=== 8.14.0 Release notes
-
-[discrete]
-==== Client
-* Tested versions of Ruby for 8.14.0: Ruby (MRI) 3.0, 3.1, 3.2 and 3.3. JRuby 9.3 and JRuby 9.4.
-
-[discrete]
-==== API
-
-API changes:
-
-* All Connector APIs have been migrated to one common namespace `connector`:
-** `connector_secret.delete` -> `connector.secret_delete`
-** `connector_secret.get` -> `connector.secret_get`
-** `connector_secret.post` -> `connector.secret_post`
-** `connector_secret.put` -> `connector.secret_put`
-** `connector_sync_job.cancel` -> `connector.sync_job_cancel`
-** `connector_sync_job.check_in` -> `connector.sync_job_check_in`
-** `connector_sync_job.delete` -> `connector.sync_job_delete`
-** `connector_sync_job.error` -> `connector.sync_job_error`
-** `connector_sync_job.get` -> `connector.sync_job_get`
-** `connector_sync_job.post` -> `connector.sync_job_post`
-** `connector_sync_job.update_stats` -> `connector.sync_job_update_stats`
-
-* `connector.delete` - Adds Boolean parameter `:delete_sync_jobs`: Determines whether associated sync jobs are also deleted.
-* `cross_cluster_replication.delete_auto_follow_pattern`, `cross_cluster_replication.follow`, `cross_cluster_replication.follow_info`, `cross_cluster_replication.get_auto_follow_pattern`, `cross_cluster_replication.pause_auto_follow_pattern`, `cross_cluster_replication.pause_follow`, `cross_cluster_replication.put_auto_follow_pattern`, `cross_cluster_replication.resume_auto_follow_pattern`, `cross_cluster_replication.resume_follow`, `cross_cluster_replication.stats`, `cross_cluster_replication.unfollow` - Add Time parameter `:master_timeout`: Explicit operation timeout for connection to master node.
-* `cross_cluster_replication.follow_stats`, `cross_cluster_replication.forget_follower`, `cross_cluster_replication.stats` - Add Time parameter `:timeout`: Explicit operation timeout.
-* `indices/rollover` - Adds Boolean parameter `:target_failure` If set to true, the rollover action will be applied on the failure store of the data stream.
-* `inference.get_model` - Parameter `inference_id` no longer required.
-* `search_application.search` - Adds Boolean parameter `:typed_keys`: Specify whether aggregation and suggester names should be prefixed by their respective types in the response.
-* `security.get_api_key`, `security.query_api_keys` - Add Boolean parameter `:with_profile_uid`: flag to also retrieve the API Key's owner profile uid, if it exists.
-
-New APIs:
-
-- `profiling.topn_functions` - Extracts a list of topN functions from Universal Profiling.
-- `text_structure.find_field_structure` - Finds the structure of a text field in an index.
-- `text_structure/find_message_structure`- Finds the structure of a list of messages. The messages must contain data that is suitable to be ingested into Elasticsearch.
-
-APIs Migrated from experimental to stable:
-
-- `esql.async_query`
-- `esql.query`
-
-New Experimental APIs:
-
-- `connector.update_active_filtering` - Activates the draft filtering rules if they are in a validated state.
-- `connector.update_filtering_validation` - Updates the validation info of the draft filtering rules.
diff --git a/docs/release_notes/815.asciidoc b/docs/release_notes/815.asciidoc
deleted file mode 100644
index 38312a5922..0000000000
--- a/docs/release_notes/815.asciidoc
+++ /dev/null
@@ -1,99 +0,0 @@
-[[release_notes_8_15]]
-=== 8.15 Release notes
-
-[discrete]
-[[release_notes_8_15_0]]
-=== 8.15.0 Release notes
-
-[discrete]
-==== Client
-* Tested versions of Ruby for 8.15.0: Ruby (MRI) 3.0, 3.1, 3.2 and 3.3. JRuby 9.3 and JRuby 9.4.
-
-[discrete]
-==== API
-
-[discrete]
-===== API changes
-
-* `cluster.allocation_explain` - `body` is no longer a required parameter.
-* `connector.put` - (experimental API) `body` and `connector_id` are no longer required parameters.
-* `machine_learning.update_trained_model_deployment` has been promoted to stable from Beta. Adds Integer parameter `number_of_allocations`, updates the model deployment to this number of allocations.
-* `snapshot.delete` - Adds `wait_for_completion` Boolean parameter, should this request wait until the operation has completed before returning.
-
-[discrete]
-====== `master_timeout` and `timeout` parameters
-
-These parameters have been added to several APIs:
-
-* `master_timeout` timeout for processing on master node.
-* `timeout` timeout for acknowledgement of update from all nodes in cluster parameters.
-
-The APIs:
-
-* `autoscaling.delete_autoscaling_policy` - both.
-* `autoscaling.get_autoscaling_capacity`- `master_timeout`.
-* `get_autoscaling_policy` - `master_timeout`.
-* `put_autoscaling_policy` - both.
-* `enrich.delete_policy` - `master_timeout`.
-* `enrich.execute_policy` - `master_timeout`.
-* `enrich.get_policy` - `master_timeout`.
-* `enrich.put_policy` - `master_timeout`.
-* `enrich.stats` - `master_timeout`.
-* `features.reset_features` - `master_timeout`.
-* `license.delete` - both.
-* `license.post` - both.
-* `license.post_start_basic` - both.
-* `license.post_start_trial` - both.
-* `security.get_settings` - `master_timeout`.
-* `security.update_settings` - both.
-* `shutdown.get_node` - `master_timeout`.
-* `snapshot_lifecycle_management.start` - both.
-* `snapshot_lifecycle_management.stop` - both.
-* `watcher.get_settings` - `master_timeout`.
-* `watcher.start` - `master_timeout`.
-* `watcher.stop` - `master_timeout`.
-* `watcher.update_settings` - both.
-
-[discrete]
-====== Inference APIs have been renamed:
-
-* `inference.delete_model` => `inference.delete`. Also adds two new parameters:
-** `dry_run` (Boolean), if true the endpoint will not be deleted and a list of ingest processors which reference this endpoint will be returned.
-** `force` (Boolean), if true the endpoint will be forcefully stopped (regardless of whether or not it is referenced by any ingest processors or semantic text fields).
-* `inference.get_model` => `inference.get`
-* `inference.put_model` => `inference.put`
-
-[discrete]
-====== Query Rules parameters consolidated
-
-Changes in `query_ruleset` and `query_rules` APIs, These have been combined into the `query_rules` namespace:
-
-* `query_rules.delete_ruleset` - Renamed from `query_ruleset.delete`, promoted from experimental to stable.
-* `query_rules.delete_rule` - Deletes an individual query rule within a ruleset.
-* `query_rules.get_rule` - Returns the details about an individual query rule within a ruleset.
-* `query_rules.get_ruleset` - Renamed from `query_ruleset.get`, promoted from experimental to stable.
-* `query_rules.list_rulesets` - Renamed from `query_ruleset.list`, promoted from experimental to stable.
-* `query_rules.put_rule` - Creates or updates a query rule within a ruleset.
-* `query_rules.put_ruleset` - Renamed from `query_ruleset.put_ruleset`, promoted from experimental to stable.
-
-[discrete]
-===== New APIs:
-
-* `ingest.delete_geoip_database` - Deletes a geoip database configuration.
-* `ingest.get_geoip_database` - Returns geoip database configuration.
-* `ingest.put_geoip_database` - Puts the configuration for a geoip database to be downloaded.
-* `security.bulk_delete_role` - Bulk delete roles in the native realm.
-* `security.bulk_put_role` - Bulk adds and updates roles in the native realm.
-* `security.query_role` - Retrieves information for Roles using a subset of query DSL.
-* `transform.get_node_stats` - Retrieves transform usage information for transform nodes.
-
-[discrete]
-===== New Experimental APIs:
-
-* `connector.sync_job_claim` - Claims a connector sync job.
-* `connector.update_features` - Updates the connector features in the connector document.
-
-[discrete]
-==== Development
-
-- Added a build using https://github.com/elastic/es-test-runner-ruby[es-test-runner-ruby] and https://github.com/elastic/elasticsearch-clients-tests[Elasticsearch Clients Tests] which will replace the Elasticsearch YAML test runner.
diff --git a/docs/release_notes/816.asciidoc b/docs/release_notes/816.asciidoc
deleted file mode 100644
index c32e719f57..0000000000
--- a/docs/release_notes/816.asciidoc
+++ /dev/null
@@ -1,57 +0,0 @@
-[[release_notes_8_16]]
-=== 8.16 Release notes
-
-[discrete]
-[[release_notes_8_16_0]]
-=== 8.16.0 Release notes
-
-[discrete]
-==== Client
-* Tested versions of Ruby for 8.16.0: Ruby (MRI) 3.1, 3.2 and 3.3. JRuby 9.3 and JRuby 9.4.
-
-[discrete]
-==== API
-
-[discrete]
-===== API changes
-
-* `capabilities` - Adds `local_only` boolean parameter: True if only the node being called should be considered.
-* `cluster.stats`- Removes `flat_settings` parameter, adds `include_remotes` boolean parameter: Include remote cluster data into the response (default: false).
-* `indices.get_data_stream` - Adds `verbose` boolean parameter: Whether the maximum timestamp for each data stream should be calculated and returned (default: false). Adds `master_timeout` (see below).
-* `query_rules.delete_ruleset` - Accepts `ignore: 404` common parameter.
-
-These parameters have been added to several APIs:
-
-* `master_timeout` timeout for processing on master node.
-* `timeout` timeout for acknowledgement of update from all nodes in cluster parameters.
-
-Added in:
-
-* `indices.create_data_stream` - both.
-* `indices.delete_data_stream` - `master_timeout`.
-* `indices.get_data_lifecycle` - `master_timeout`.
-* `indices.get_data_stream` - `master_timeout`.
-* `indices.migrate_to_data_stream` - both.
-* `indices.promote_data_stream` - `master_timeout`.
-* `search_shards` - `master_timeout`.
-
-**APIs Promoted from Experimental to Stable:**
-
-* `indices.delete_data_lifecycle`
-* `indices.explain_data_lifecycle`
-* `indices.get_data_lifecycle`
-* `indices.put_data_lifecycle`
-* `security.create_cross_cluster_api_key`
-* `security.update_cross_cluster_api_key`
-
-**New APIs**
-
-* `ingest.delete_ip_location_database` - Deletes an ip location database configuration.
-* `ingest.get_ip_location_database` - Returns the specified ip location database configuration.
-* `ingest.put_ip_location_database` - Puts the configuration for a ip location database to be downloaded.
-
-
-**New Experimental APIs**
-
-* `inference.stream_inference` - Perform streaming inference.
-* `query_rules.test` - Tests a query ruleset to identify the rules that would match input criteria.
diff --git a/docs/release_notes/817.asciidoc b/docs/release_notes/817.asciidoc
deleted file mode 100644
index 2963eeb0d1..0000000000
--- a/docs/release_notes/817.asciidoc
+++ /dev/null
@@ -1,20 +0,0 @@
-[[release_notes_8_17]]
-=== 8.17 Release notes
-
-[discrete]
-[[release_notes_8_17_0]]
-=== 8.17.0 Release notes
-
-[discrete]
-==== Client
-* Tested versions of Ruby for 8.17.0: Ruby (MRI) 3.1, 3.2 and 3.3. JRuby 9.3 and JRuby 9.4.
-
-[discrete]
-==== API
-
-[discrete]
-===== API changes
-* `async_search.submit` - Removes `keep_alive` parameter. Adds:
-** `ccs_minimize_roundtrips` (Boolean): When doing a cross-cluster search, setting it to true may improve overall search latency, particularly when searching clusters with a large number of shards. However, when set to true, the progress of searches on the remote clusters will not be received until the search finishes on all clusters.
-** `rest_total_hits_as_int` (Boolean): Indicates whether hits.total should be rendered as an integer or an object in the rest search response.
-* `open_point_in_time` - Adds `allow_partial_search_results` (Boolean) parameter: Specify whether to tolerate shards missing when creating the point-in-time, or otherwise throw an exception (default: false).
diff --git a/docs/release_notes/82.asciidoc b/docs/release_notes/82.asciidoc
deleted file mode 100644
index 401958b6c7..0000000000
--- a/docs/release_notes/82.asciidoc
+++ /dev/null
@@ -1,49 +0,0 @@
-[[release_notes_82]]
-=== 8.2 Release notes
-
-[discrete]
-[[release_notes_822]]
-=== 8.2.2 Release notes
-
-- Updates dependency on `elastic-transport` to `~> 8.0`
-
-
-[discrete]
-[[release_notes_821]]
-=== 8.2.1 Release notes
-
-No release, no changes on the client.
-
-[discrete]
-[[release_notes_820]]
-=== 8.2.0 Release notes
-
-
-- Tested versions of Ruby for 8.2.0: Ruby (MRI) 2.7, 3.0 and 3.1, JRuby 9.3.
-
-[discrete]
-==== API
-
-Updated for compatibility with Elasticsearch 8.2's API.
-
-[discrete]
-===== New parameters:
-
-* `field_caps`
-** `filters` An optional set of filters: can include +metadata,-metadata,-nested,-multifield,-parent
-** `types` Only return results for fields that have one of the types in the list
-
-[discrete]
-===== New APIs
-
-- `cat.component_templates` - Returns information about existing component_templates templates.
-- `ml.get_memory_stats` - Returns information on how ML is using memory.
-
-[discrete]
-===== New Experimental APIs
-- `security.activate_user_profile` - Creates or updates the user profile on behalf of another user.
-- `security.disable_user_profile` - Disables a user profile so it's not visible in user profile searches.
-- `security.enable_user_profile` - Enables a user profile so it's visible in user profile searches.
-- `security.get_user_profile` - Retrieves a user profile for the given unique ID.
-- `security.suggest_user_profiles` - Get suggestions for user profiles that match specified search criteria.
-- `security.update_user_profile_data` - Update application specific data for the user profile of the given unique ID.
diff --git a/docs/release_notes/83.asciidoc b/docs/release_notes/83.asciidoc
deleted file mode 100644
index aa0b2d8d53..0000000000
--- a/docs/release_notes/83.asciidoc
+++ /dev/null
@@ -1,33 +0,0 @@
-[[release_notes_83]]
-=== 8.3 Release notes
-
-[discrete]
-[[release_notes_830]]
-=== 8.3.0 Release notes
-
-- Tested versions of Ruby for 8.3.0: Ruby (MRI) 2.7, 3.0 and 3.1, JRuby 9.3.
-
-[discrete]
-==== API
-
-- Added build hash to auto generated code. The code generator obtains the git hash from the Elasticsearch specification and adds it as a comment in the code. This allows us to track the version for each generated class.
-- Updated for compatibility with Elasticsearch 8.3's API.
-
-[discrete]
-===== API Changes
-
-* `cluster.delete_voting_config_exclusions`, `cluster.post_voting_config_exclusions` - Add new parameter `master_timeout` (Time) Timeout for submitting request to master.
-* `machine_learning.infer_trained_model_deployment` is renamed to `machine_learning.infer_trained_model`. The url `/_ml/trained_models/{model_id}/deployment/_infer` is deprecated since 8.3, use `/_ml/trained_models/{model_id}/_infer` instead.
-* `machine_learning.preview_datafeed` - Adds new parameters:
-** `start` (String) The start time from where the datafeed preview should begin
-** `end` (String) The end time when the datafeed preview should stop
-* `machine_learning.start_trained_model_deployment` - Adds new parameters:
-** `number_of_allocations` (Integer) The number of model allocations on each node where the model is deployed.
-** `threads_per_allocation` (Integer) The number of threads used by each model allocation during inference.
-** `queue_capacity` (Integer) Controls how many inference requests are allowed in the queue at a time.
-* `search_mvt` - Adds new parameter: `with_labels` (Boolean) If true, the hits and aggs layers will contain additional point features with suggested label positions for the original features
-* `snapshot.get` - Adds new parameter: `index_names` (Boolean) Whether to include the name of each index in the snapshot. Defaults to true.
-
-[discrete]
-===== New Experimental APIs
-* `security.has_privileges_user_profile` Determines whether the users associated with the specified profile IDs have all the requested privileges
diff --git a/docs/release_notes/84.asciidoc b/docs/release_notes/84.asciidoc
deleted file mode 100644
index 19901f675e..0000000000
--- a/docs/release_notes/84.asciidoc
+++ /dev/null
@@ -1,31 +0,0 @@
-[[release_notes_84]]
-=== 8.4 Release notes
-
-[discrete]
-[[release_notes_840]]
-=== 8.4.0 Release notes
-
-- Tested versions of Ruby for 8.4.0: Ruby (MRI) 2.7, 3.0 and 3.1, JRuby 9.3.
-
-[discrete]
-==== API
-
-[discrete]
-===== New APIs
-
-* `security.update_api_key` - Updates attributes of an existing API key. https://www.elastic.co/guide/en/elasticsearch/reference/8.4/security-api-update-api-key.html[Documentation].
-
-[discrete]
-===== API Changes
-* `get` - Adds new parameter `force_synthetic_source` (Boolean) Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index.
-* `machine_learning.start_trained_model_deployment` - Adds new parameter `cache_size` (String) A byte-size value for configuring the inference cache size. For example, 20mb.
-* `mget` - Adds new parameter `force_synthetic_source` (Boolean) Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index.
-* `search` - Adds new parameter `force_synthetic_source` (Boolean) Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index.
-* `snapshot.get` - Adds new parameters:
-** `sort` (String) Allows setting a sort order for the result. Defaults to start_time (options: start_time, duration, name, repository, index_count, shard_count, failed_shard_count).
-** `size` (Integer) Maximum number of snapshots to return. Defaults to 0 which means return all that match without limit.
-** `order` (String) Sort order (options: asc, desc).
-** `from_sort_value` (String) Value of the current sort column at which to start retrieval.
-** `after` (String) Offset identifier to start pagination from as returned by the 'next' field in the response body.
-** `offset` (Integer) Numeric offset to start pagination based on the snapshots matching the request. Defaults to 0.
-** `slm_policy_filter` (String) Filter snapshots by a comma-separated list of SLM policy names that snapshots belong to. Accepts wildcards. Use the special pattern '_none' to match snapshots without an SLM policy.
diff --git a/docs/release_notes/85.asciidoc b/docs/release_notes/85.asciidoc
deleted file mode 100644
index 4ba0e56497..0000000000
--- a/docs/release_notes/85.asciidoc
+++ /dev/null
@@ -1,105 +0,0 @@
-[[release_notes_85]]
-=== 8.5 Release notes
-
-[discrete]
-[[release_notes_852]]
-=== 8.5.2 Release notes
-
-[discrete]
-==== API Bugfix
-
-Fixes `security.create_service_token` API, uses `POST` when token name isn't present.
-Thanks https://github.com/carlosdelest[@carlosdelest] for reporting in https://github.com/elastic/elasticsearch-ruby/pull/1961[#1961].
-
-[discrete]
-[[release_notes_851]]
-=== 8.5.1 Release notes
-
-[discrete]
-==== Bugfixes
-
-Fixes bug when instantiating client with `api_key`: When passing in `api_key` and `transport_options` that don't include headers to the client, the `api_key` code would overwrite the arguments passed in for `transport_options`. This was fixed in https://github.com/elastic/elasticsearch-ruby/pull/1941/files[this Pull Request].
-Thanks @svdasein for reporting in https://github.com/elastic/elasticsearch-ruby/issues/1940[#1940].
-
-[discrete]
-[[release_notes_850]]
-=== 8.5.0 Release notes
-
-- Tested versions of Ruby for 8.5.0: Ruby (MRI) 2.7, 3.0 and 3.1, JRuby 9.3.
-
-[discrete]
-==== Client
-
-With the latest release of `elastic-transport` - `v8.1.0` - this gem now supports Faraday v2. Elasticsearch Ruby has an open dependency on `elastic-transport` (`'elastic-transport', '~> 8'`), so when you upgrade your gems, `8.1.0` will be installed. This supports both Faraday v1 and Faraday v2. The main change on dependencies when using Faraday v2 is all adapters, except for the default `net_http` one, have been moved out of Faraday into separate gems. This means if you're not using the default adapter and you migrate to Faraday v2, you'll need to add the adapter gems to your Gemfile.
-
-These are the gems required for the different adapters with Faraday 2, instead of the libraries on which they were based:
-
-[source,ruby]
-------------------------------------
-# HTTPCLient
-gem 'faraday-httpclient'
-
-# NetHTTPPersistent
-gem 'faraday-net_http_persistent'
-
-# Patron
-gem 'faraday-patron'
-
-# Typhoeus
-gem 'faraday-typhoeus'
-------------------------------------
-
-Things should work fine if you migrate to Faraday 2 as long as you include the adapter (unless you're using the default one `net-http`), but worst case scenario, you can always lock the version of Faraday in your project to 1.x:
-gem 'faraday', '~> 1'
-
-Be aware if migrating to Faraday v2 that it requires at least Ruby `2.6`, unlike Faraday v1 which requires `2.4`.
-
-*Troubleshooting*
-
-If you see a message like:
-
-[source,ruby]
-------------------------------------
-:adapter is not registered on Faraday::Adapter (Faraday::Error)
-------------------------------------
-Then you probably need to include the adapter library in your gemfile and require it.
-
-Please https://github.com/elastic/elasticsearch-ruby/issues[submit an issue] if you encounter any problems.
-
-[discrete]
-==== API
-
-[discrete]
-===== New APIs
-
-- `machine_learning.clear_trained_model_deployment_cache` - Clear the cached results from a trained model deployment (Beta).
-- `security.bulk_update_api_keys` - Updates the attributes of multiple existing API keys.
-
-[discrete]
-===== API Changes
-
-- `rollup.rollup` renamed to `indices.downsample`. The method now receives the `index` to downsample (Required) and instead of `rollup_index`, use target_index as the index to store downsampled data.
-
-- `security.get_api_key` and `security.query_api_keys` add `:with_limited_by` flag to show the limited-by role descriptors of API Keys.
-- `security.get_user` adds `:with_profile_uid` flag to retrieve profile uid (if exists) associated to the user.
-- `security.get_user_profile` now retrieves user profiles for given unique ID(s). `:uid` is now a list of comma-separated list of unique identifier for user profiles.
-- `text_structure.find_structure` adds `:ecs_compatibility`, optional parameter to specify the compatibility mode with ECS Grok patterns - may be either 'v1' or 'disabled'.
-
-Machine learning APIs promoted from *Experimental* to *Beta*:
-
-- `machine_learning.clear_trained_model_deployment_cache.rb`
-- `machine_learning.infer_trained_model.rb`
-- `machine_learning.put_trained_model_definition_part.rb`
-- `machine_learning.put_trained_model_vocabulary.rb`
-- `machine_learning.start_trained_model_deployment.rb`
-- `machine_learning.stop_trained_model_deployment.rb`
-
-Security usef profile APIs promoted from *Experimental* to *Stable*:
-
-- `security/activate_user_profile`
-- `security/disable_user_profile`
-- `security/enable_user_profile`
-- `security/get_user_profile`
-- `security/has_privileges_user_profile`
-- `security/suggest_user_profile`
-- `security/update_user_profile_data`
diff --git a/docs/release_notes/86.asciidoc b/docs/release_notes/86.asciidoc
deleted file mode 100644
index 88a7ad47d2..0000000000
--- a/docs/release_notes/86.asciidoc
+++ /dev/null
@@ -1,23 +0,0 @@
-[[release_notes_86]]
-=== 8.6 Release notes
-
-[discrete]
-[[release_notes_860]]
-=== 8.6.0 Release notes
-
-- Tested versions of Ruby for 8.6.0: Ruby (MRI) 2.7, 3.0, 3.1 and **3.2**. JRuby 9.3 and **JRuby 9.4**.
-
-[discrete]
-==== API
-
-[discrete]
-===== New APIs
-
-- `update_trained_model_deployment` - Updates certain properties of trained model deployment (This functionality is in Beta and is subject to change).
-
-[discrete]
-===== API Changes
-
-- `cluster.reroute` - `:metric` parameter adds `none` as an option.
-- `ml.start_trained_model_deployment` - New parameter `:priority` (String), the deployment priority
-
diff --git a/docs/release_notes/87.asciidoc b/docs/release_notes/87.asciidoc
deleted file mode 100644
index f58e0856c6..0000000000
--- a/docs/release_notes/87.asciidoc
+++ /dev/null
@@ -1,35 +0,0 @@
-[[release_notes_87]]
-=== 8.7 Release notes
-
-[discrete]
-[[release_notes_871]]
-=== 8.7.1 Release notes
-
-[discrete]
-==== API Bugfix
-
-- Updates `logstash.get_pipeline`, fixed in the specification `id` is not a required parameter, so removes raising `ArgumentError` when id is not present.
-
-
-[discrete]
-[[release_notes_870]]
-=== 8.7.0 Release notes
-
-- Tested versions of Ruby for 8.7.0: Ruby (MRI) 2.7, 3.0, 3.1 and **3.2**. JRuby 9.3 and JRuby 9.4. Ruby 2.7's end of life is coming in a few days, so this'll probably be the last release to test for Ruby 2.7.
-
-[discrete]
-==== API
-
-[discrete]
-===== New APIs
-
-- `health_report` - Returns the health of the cluster.
-- `transform.schedule_now_transform` - Schedules now a transform.
-
-[discrete]
-===== API Changes
-
-- `transform.get_transform_stats` - Adds `timeout` (Time) parameter. Controls the time to wait for the stats.
-- `transform.start_transform` - Adds `from` (String) parameter. Restricts the set of transformed entities to those changed after this time.
-- `ml.delete_job`, `ml.reset_job` - Add `delete_user_annotations` (Boolean) parameter. Should annotations added by the user be deleted.
-- `ml.clear_trained_model_deployment_cache`, `ml.infer_trained_model`, `ml.put_trained_model_definition_part`, `ml.put_trained_model_vocabulary`, `ml.start_trained_model_deployment`, `ml.stop_trained_model_deployment` - These APIs are no longer in Beta.
diff --git a/docs/release_notes/88.asciidoc b/docs/release_notes/88.asciidoc
deleted file mode 100644
index 13f093fd07..0000000000
--- a/docs/release_notes/88.asciidoc
+++ /dev/null
@@ -1,47 +0,0 @@
-[[release_notes_88]]
-=== 8.8 Release notes
-
-[discrete]
-[[release_notes_880]]
-=== 8.8.0 Release notes
-
-- Tested versions of Ruby for 8.8.0: Ruby (MRI) 3.0, 3.1 and **3.2**. JRuby 9.3 and JRuby 9.4.
-
-[discrete]
-==== API
-
-- Updates development dependency `minitest-reporters` to `>= 1.6` to include showing failures at the end of the test run.
-
-[discrete]
-===== New APIs
-
-- `watcher.get_settings` - Retrieve settings for the watcher system index.
-- `watcher.update_settings` - Update settings for the watcher system index.
-
-[discrete]
-===== New Experimental APIs
-
-- `indices.delete_data_lifecycle`- Deletes the data lifecycle of the selected data streams
-- `indices.explain_data_lifecycle` - Retrieves information about the index's current DLM lifecycle, such as any potential encountered error, time since creation etc.
-- `indices.get_data_lifecycle` - Returns the data lifecycle of the selected data streams.
-- `indices.put_data_lifecycle` - Updates the data lifecycle of the selected data streams.
-- `search_application.delete` - Deletes a search application.
-- `search_application.delete_behavioral_analytics` - Delete a behavioral analytics collection.
-- `search_application.get` - Returns the details about a search application.
-- `search_application.get_behavioral_analytics` - Returns the existing behavioral analytics collections.
-- `search_application.list` - Returns the existing search applications.
-- `search_application.post_behavioral_analytics_event` - Creates a behavioral analytics event for existing collection.
-- `search_application.put` - Creates or updates a search application.
-- `search_application.put_behavioral_analytics` - Creates a behavioral analytics collection.
-- `search_application.search` - Perform a search against a search application.
-
-[discrete]
-===== API Changes
-
-- `clear_scroll` now works with the argument `ignore: 404`. https://github.com/elastic/elasticsearch-ruby/issues/2067[Issue on GitHub].
-- The code generator was updated to fix a bug for `ignore: 404`. APIs that were supposed to support this wouldn't parse the parameters correctly. The support it now: `security.get_role`, `watcher.delete_watch`
-- `cluster.get_component_template`, `indices.get_data_stream`, `indices.get_index_template`, `indices.simulate_index_template`, `indices.simulate_template` - Add `include_defaults` (Boolean) parameter: Return all default configurations for the component template (default: false).
-- `machine_learning.put_trained_model` - Adds `wait_for_completion` (Boolean) parameter: Whether to wait for all child operations(e.g. model download) to complete, before returning or not (default: false).
-- `machine_learning.start_trained_model_deployment` - Adds `deployiment_id` (String) parameter: The Id of the new deployment. Defaults to the model_id if not set.
-- `search` - Adds `include_named_queries_score` (Boolean) parameter: Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false).
-- `transform.delete_transform` - Adds `delete_dest_index` (Boolean) parameter: When `true`, the destination index is deleted together with the transform. The default value is `false`, meaning that the destination index will not be deleted.
diff --git a/docs/release_notes/89.asciidoc b/docs/release_notes/89.asciidoc
deleted file mode 100644
index de0f431856..0000000000
--- a/docs/release_notes/89.asciidoc
+++ /dev/null
@@ -1,54 +0,0 @@
-[[release_notes_89]]
-=== 8.9 Release notes
-
-[discrete]
-[[release_notes_890]]
-=== 8.9.0 Release notes
-
-[discrete]
-=== Client
-* Tested versions of Ruby for 8.9.0: Ruby (MRI) 3.0, 3.1 and 3.2. JRuby 9.3 and JRuby 9.4.
-* Updated product validation. The code for the product validation was refactored in a few ways:
-** Just check header, does not check the version of the server.
-** Warns only once when there's a general server error.
-** Removes the call to '/' (client.info) when doing the first request, checking on the first actual request from the client.
-* Fixes User-Agent code. In the migration to 8.x, the user agent code was extracted into transport, since we're now using that library in other projects. So for the Elasticsearch Client, the user-agent would be reported as the one defined in elastic-transport. This release fixes the issue and brings back the user agent in the format that was being used in 7.x
-
-[discrete]
-=== Helpers
-This release introduces two new Helpers in the client:
-
-* BulkHelper - This helper provides a better developer experience when using the Bulk API. At its simplest, you can send it a collection of hashes in an array, and it will bulk ingest them into {es}.
-* ScrollHelper - This helper provides an easy way to get results from a Scroll.
-
-See <> to read more about them.
-
-[discrete]
-=== API
-
-[discrete]
-==== New APIs
-
-* `cluster.info` - Returns different information about the cluster.
-
-[discrete]
-==== New Experimental APIs and namespaces:
-
-This functionality is Experimental and may be changed or removed completely in a future release. Elastic will take a best effort approach to fix any issues, but experimental features are not subject to the support SLA of official GA features.
-
-* New namespace: `query_ruleset`
-** `query_ruleset.delete` - Deletes a query ruleset.
-** `query_ruleset.get` - Returns the details about a query ruleset.
-** `query_ruleset.put` - Creates or updates a query ruleset.
-* New API: `search_application.render_query` Renders a query for given search application search parameters.
-* New API: `security.create_cross_cluster_api_key` - Creates a cross-cluster API key for API key based remote cluster access.
-* New API: `security.upate_cross_cluster_api_key` - Updates attributes of an existing cross-cluster API key.
-* New namespace: `synonyms`
-** `synonyms.delete`- Deletes a synonym set
-** `synonyms.get` - Retrieves a synonym set
-** `synonyms.put` - Creates or updates a synonyms set
-* New namespace: `synonym_rule`
-** `synonym_rule.put` - Creates or updates a synonym rule in a synonym set
-* New namespace: `synonyms`
-** `synonyms_set.get` - Retrieves a summary of all defined synonym sets
-
diff --git a/docs/release_notes/index.asciidoc b/docs/release_notes/index.asciidoc
deleted file mode 100644
index 3b2ef35788..0000000000
--- a/docs/release_notes/index.asciidoc
+++ /dev/null
@@ -1,74 +0,0 @@
-[[release_notes]]
-== Release Notes
-
-[discrete]
-=== 8.x
-
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-
-[discrete]
-=== 7.x
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-* <>
-
-include::817.asciidoc[]
-include::816.asciidoc[]
-include::815.asciidoc[]
-include::814.asciidoc[]
-include::813.asciidoc[]
-include::812.asciidoc[]
-include::811.asciidoc[]
-include::810.asciidoc[]
-include::89.asciidoc[]
-include::88.asciidoc[]
-include::87.asciidoc[]
-include::86.asciidoc[]
-include::85.asciidoc[]
-include::84.asciidoc[]
-include::83.asciidoc[]
-include::82.asciidoc[]
-include::81.asciidoc[]
-include::80.asciidoc[]
-include::717.asciidoc[]
-include::716.asciidoc[]
-include::715.asciidoc[]
-include::714.asciidoc[]
-include::713.asciidoc[]
-include::712.asciidoc[]
-include::711.asciidoc[]
-include::710.asciidoc[]
-include::79.asciidoc[]
-include::78.asciidoc[]
-include::77.asciidoc[]
-include::76.asciidoc[]
-include::75.asciidoc[]
-include::70.asciidoc[]
diff --git a/docs/troubleshooting.asciidoc b/docs/troubleshooting.asciidoc
deleted file mode 100644
index a2e6a19e05..0000000000
--- a/docs/troubleshooting.asciidoc
+++ /dev/null
@@ -1,97 +0,0 @@
-[[troubleshooting]]
-== Troubleshooting
-
-Use the information in this section to troubleshoot common problems and find
-answers for frequently asked questions.
-
-
-[discrete]
-[[ruby-ts-logging]]
-=== Logging
-
-The client provides several options for logging that can help when things go
-wrong. Check out the extensive documentation on <>.
-
-If you are having trouble sending a request to {es} with the client, we suggest
-enabling `tracing` on the client and testing the cURL command that appears in
-your terminal:
-
-[source,rb]
-----------------------------
-client = Elasticsearch::Client.new(trace: true)
-client.info
-curl -X GET -H 'x-elastic-client-meta: es=8.9.0,rb=3.2.2,t=8.2.1,fd=2.7.4,nh=0.3.2, User-Agent: elastic-t
-ransport-ruby/8.2.1 (RUBY_VERSION: 3.2.2; linux x86_64; Faraday v2.7.4), Content-Type: application/json' 'http://localhost:9200//?pretty'
-----------------------------
-
-Testing the cURL command can help find out if there's a connection issue or if
-the issue is in the client code.
-
-
-[discrete]
-[[ruby-ts-connection]]
-=== Troubleshooting connection issues
-
-When working with multiple hosts, you might want to enable the
-`retry_on_failure` or `retry_on_status` options to perform a failed request on
-another node (refer to <>).
-
-For optimal performance, use a HTTP library which supports persistent
-("keep-alive") connections, such as https://github.com/toland/patron[patron] or
-https://github.com/typhoeus/typhoeus[Typhoeus]. Require the library
-(`require 'patron'`) in your code for Faraday 1.x or the adapter
-(`require 'faraday/patron'`) for Faraday 2.x, and it will be automatically used.
-
-
-[discrete]
-[[ruby-ts-adapter]]
-=== Adapter is not registered on Faraday
-
-If you see a message like:
-```
-:adapter is not registered on Faraday::Adapter (Faraday::Error)
-```
-
-Then you might need to include the adapter library in your Gemfile and require
-it. You might get this error when migrating from Faraday v1 to Faraday v2. The
-main change when using Faraday v2 is all adapters, except for the default
-`net_http` one, have been moved out into separate gems. This means if you're not
-using the default adapter and you migrate to Faraday v2, you'll need to add the
-adapter gems to your Gemfile.
-
-These are the gems required for the different adapters with Faraday 2, instead
-of the libraries on which they were based:
-
-[source,ruby]
-------------------------------------
-# HTTPCLient
-gem 'faraday-httpclient'
-
-# NetHTTPPersistent
-gem 'faraday-net_http_persistent'
-
-# Patron
-gem 'faraday-patron'
-
-# Typhoeus
-gem 'faraday-typhoeus'
-------------------------------------
-
-Migrating to Faraday 2 solves the issue as long as the adapter is included
-(unless you're using the default one `net-http`). Alternatively, you can lock
-the version of Faraday in your project to 1.x:
-`gem 'faraday', '~> 1'`
-
-IMPORTANT: Migrating to Faraday v2 requires at least Ruby `2.6`. Faraday v1
-requires `2.4`.
-
-[discrete]
-=== More Help
-
-If you need more help, visit the
-https://discuss.elastic.co/[Elastic community forums] and get answers from the
-experts in the community, including people from Elastic.
-
-If you find a bug, have feedback, or find any other issue using the client,
-https://github.com/elastic/elasticsearch-ruby/issues/new/choose[submit an issue]
-on GitHub.
\ No newline at end of file
diff --git a/elasticsearch-api/Rakefile b/elasticsearch-api/Rakefile
index 6c6fa4fcf7..957016d205 100644
--- a/elasticsearch-api/Rakefile
+++ b/elasticsearch-api/Rakefile
@@ -35,6 +35,7 @@ namespace :test do
task :unit
RSpec::Core::RakeTask.new(:unit) do |t|
t.pattern = 'spec/unit/**/*_spec.rb'
+ t.exclude_pattern = 'spec/unit/perform_request_spec.rb' unless ENV['TEST_WITH_OTEL']
end
desc 'Run unit and integration tests'
diff --git a/elasticsearch-api/lib/elasticsearch/api.rb b/elasticsearch-api/lib/elasticsearch/api.rb
index 1a8471002e..eddd6bcd1a 100644
--- a/elasticsearch-api/lib/elasticsearch/api.rb
+++ b/elasticsearch-api/lib/elasticsearch/api.rb
@@ -15,21 +15,19 @@
# specific language governing permissions and limitations
# under the License.
-require "cgi"
-require "multi_json"
-
-require "elasticsearch/api/version"
-require "elasticsearch/api/namespace/common"
-require "elasticsearch/api/utils"
+require 'cgi'
+require 'multi_json'
+require 'elasticsearch/api/version'
+require 'elasticsearch/api/utils'
require 'elasticsearch/api/response'
-Dir[ File.expand_path('../api/actions/**/*.rb', __FILE__) ].each { |f| require f }
-Dir[ File.expand_path('../api/namespace/**/*.rb', __FILE__) ].each { |f| require f }
+Dir[File.expand_path('api/actions/**/*.rb', __dir__)].each { |f| require f }
module Elasticsearch
# This is the main module for including all API endpoint functions
# It includes the namespace modules from ./api/actions
module API
+ include Elasticsearch::API::Actions
DEFAULT_SERIALIZER = MultiJson
HTTP_GET = 'GET'.freeze
@@ -37,58 +35,84 @@ module API
HTTP_POST = 'POST'.freeze
HTTP_PUT = 'PUT'.freeze
HTTP_DELETE = 'DELETE'.freeze
- UNDERSCORE_SEARCH = '_search'.freeze
- UNDERSCORE_ALL = '_all'.freeze
- DEFAULT_DOC = '_doc'.freeze
- # Auto-include all namespaces in the receiver
+ module CommonClient
+ attr_reader :client
+
+ def initialize(client)
+ @client = client
+ end
+
+ def perform_request(method, path, params = {}, body = nil, headers = nil, request_opts = {})
+ client.perform_request(method, path, params, body, headers, request_opts)
+ end
+ end
+
+ # Add new namespaces to this constant
#
- def self.included(base)
- base.send :include,
- Elasticsearch::API::Common,
- Elasticsearch::API::Actions,
- Elasticsearch::API::Cluster,
- Elasticsearch::API::Nodes,
- Elasticsearch::API::Indices,
- Elasticsearch::API::Ingest,
- Elasticsearch::API::Snapshot,
- Elasticsearch::API::Tasks,
- Elasticsearch::API::Cat,
- Elasticsearch::API::Remote,
- Elasticsearch::API::DanglingIndices,
- Elasticsearch::API::Features,
- Elasticsearch::API::AsyncSearch,
- Elasticsearch::API::Autoscaling,
- Elasticsearch::API::CrossClusterReplication,
- Elasticsearch::API::DataFrameTransformDeprecated,
- Elasticsearch::API::Enrich,
- Elasticsearch::API::Eql,
- Elasticsearch::API::Fleet,
- Elasticsearch::API::Graph,
- Elasticsearch::API::IndexLifecycleManagement,
- Elasticsearch::API::License,
- Elasticsearch::API::Logstash,
- Elasticsearch::API::Migration,
- Elasticsearch::API::MachineLearning,
- Elasticsearch::API::Rollup,
- Elasticsearch::API::SearchableSnapshots,
- Elasticsearch::API::Security,
- Elasticsearch::API::SnapshotLifecycleManagement,
- Elasticsearch::API::SQL,
- Elasticsearch::API::SSL,
- Elasticsearch::API::TextStructure,
- Elasticsearch::API::Transform,
- Elasticsearch::API::Watcher,
- Elasticsearch::API::XPack,
- Elasticsearch::API::SearchApplication,
- Elasticsearch::API::Synonyms,
- Elasticsearch::API::Esql,
- Elasticsearch::API::Inference,
- Elasticsearch::API::Simulate,
- Elasticsearch::API::Connector,
- Elasticsearch::API::QueryRules
+ API_NAMESPACES = [:async_search,
+ :cat,
+ :cross_cluster_replication,
+ :cluster,
+ :connector,
+ :dangling_indices,
+ :enrich,
+ :eql,
+ :esql,
+ :features,
+ :fleet,
+ :graph,
+ :index_lifecycle_management,
+ :indices,
+ :inference,
+ :ingest,
+ :license,
+ :logstash,
+ :migration,
+ :machine_learning,
+ :nodes,
+ :query_rules,
+ :search_application,
+ :searchable_snapshots,
+ :security,
+ :simulate,
+ :snapshot_lifecycle_management,
+ :snapshot,
+ :sql,
+ :ssl,
+ :synonyms,
+ :tasks,
+ :text_structure,
+ :transform,
+ :watcher,
+ :xpack].freeze
+
+ UPPERCASE_APIS = ['sql', 'ssl'].freeze
+ API_NAMESPACES.each do |namespace|
+ name = namespace.to_s
+ module_name = if UPPERCASE_APIS.include?(name)
+ name.upcase
+ elsif name == 'xpack'
+ 'XPack'
+ else
+ name.split('_').map(&:capitalize).join
+ end
+ class_name = "#{module_name}Client"
+
+ klass = Class.new(Object) do
+ include CommonClient, Object.const_get("Elasticsearch::API::#{module_name}::Actions")
+ end
+ Object.const_set(class_name, klass)
+ define_method(name) do
+ instance_variable_set("@#{name}", klass.new(self))
+ end
end
+ alias ml machine_learning
+ alias ilm index_lifecycle_management
+ alias ccr cross_cluster_replication
+ alias slm snapshot_lifecycle_management
+
# The serializer class
#
def self.serializer
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/async_search/delete.rb b/elasticsearch-api/lib/elasticsearch/api/actions/async_search/delete.rb
index 21fefe4527..03a15ee6eb 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/async_search/delete.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/async_search/delete.rb
@@ -15,19 +15,33 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module AsyncSearch
module Actions
- # Deletes an async search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted.
+ # Delete an async search.
+ # If the asynchronous search is still running, it is cancelled.
+ # Otherwise, the saved search results are deleted.
+ # If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege.
#
- # @option arguments [String] :id The async search ID
+ # @option arguments [String] :id A unique identifier for the async search. (*Required*)
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-async-search-submit
#
def delete(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'async_search.delete' }
@@ -47,8 +61,8 @@ def delete(arguments = {})
_id = arguments.delete(:id)
method = Elasticsearch::API::HTTP_DELETE
- path = "_async_search/#{Utils.__listify(_id)}"
- params = {}
+ path = "_async_search/#{Utils.listify(_id)}"
+ params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/async_search/get.rb b/elasticsearch-api/lib/elasticsearch/api/actions/async_search/get.rb
index 92c26c5887..bba9f18e2a 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/async_search/get.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/async_search/get.rb
@@ -15,22 +15,41 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module AsyncSearch
module Actions
- # Retrieves the results of a previously submitted async search request given its ID.
+ # Get async search results.
+ # Retrieve the results of a previously submitted asynchronous search request.
+ # If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it.
#
- # @option arguments [String] :id The async search ID
- # @option arguments [Time] :wait_for_completion_timeout Specify the time that the request should block waiting for the final response
- # @option arguments [Time] :keep_alive Specify the time interval in which the results (partial or final) for this search will be available
+ # @option arguments [String] :id A unique identifier for the async search. (*Required*)
+ # @option arguments [Time] :keep_alive The length of time that the async search should be available in the cluster.
+ # When not specified, the `keep_alive` set with the corresponding submit async request will be used.
+ # Otherwise, it is possible to override the value and extend the validity of the request.
+ # When this period expires, the search, if still running, is cancelled.
+ # If the search is completed, its saved results are deleted.
# @option arguments [Boolean] :typed_keys Specify whether aggregation and suggester names should be prefixed by their respective types in the response
+ # @option arguments [Time] :wait_for_completion_timeout Specifies to wait for the search to be completed up until the provided timeout.
+ # Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires.
+ # By default no timeout is set meaning that the currently available results will be returned without any additional wait.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-async-search-submit
#
def get(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'async_search.get' }
@@ -50,7 +69,7 @@ def get(arguments = {})
_id = arguments.delete(:id)
method = Elasticsearch::API::HTTP_GET
- path = "_async_search/#{Utils.__listify(_id)}"
+ path = "_async_search/#{Utils.listify(_id)}"
params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/async_search/status.rb b/elasticsearch-api/lib/elasticsearch/api/actions/async_search/status.rb
index 4b2008d03d..2137b0e974 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/async_search/status.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/async_search/status.rb
@@ -15,20 +15,36 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module AsyncSearch
module Actions
- # Retrieves the status of a previously submitted async search request given its ID.
+ # Get the async search status.
+ # Get the status of a previously submitted async search request given its identifier, without retrieving search results.
+ # If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to:
+ # * The user or API key that submitted the original async search request.
+ # * Users that have the `monitor` cluster privilege or greater privileges.
#
- # @option arguments [String] :id The async search ID
- # @option arguments [Time] :keep_alive Specify the time interval in which the results (partial or final) for this search will be available
+ # @option arguments [String] :id A unique identifier for the async search. (*Required*)
+ # @option arguments [Time] :keep_alive The length of time that the async search needs to be available.
+ # Ongoing async searches and any saved search results are deleted after this period. Server default: 5d.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-async-search-submit
#
def status(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'async_search.status' }
@@ -48,7 +64,7 @@ def status(arguments = {})
_id = arguments.delete(:id)
method = Elasticsearch::API::HTTP_GET
- path = "_async_search/status/#{Utils.__listify(_id)}"
+ path = "_async_search/status/#{Utils.listify(_id)}"
params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/async_search/submit.rb b/elasticsearch-api/lib/elasticsearch/api/actions/async_search/submit.rb
index 34c9b80ae1..6e7b4327e0 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/async_search/submit.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/async_search/submit.rb
@@ -15,63 +15,81 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module AsyncSearch
module Actions
- # Executes a search request asynchronously.
+ # Run an async search.
+ # When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested.
+ # Warning: Asynchronous search does not support scroll or search requests that include only the suggest section.
+ # By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error.
+ # The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting.
#
- # @option arguments [List] :index A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices
- # @option arguments [Time] :wait_for_completion_timeout Specify the time that the request should block waiting for the final response
- # @option arguments [Boolean] :keep_on_completion Control whether the response should be stored in the cluster if it completed within the provided [wait_for_completion] time (default: false)
- # @option arguments [Time] :keep_alive Update the time interval in which the results (partial or final) for this search will be available
- # @option arguments [Number] :batched_reduce_size The number of shard results that should be reduced at once on the coordinating node. This value should be used as the granularity at which progress results will be made available.
- # @option arguments [Boolean] :request_cache Specify if request cache should be used for this request or not, defaults to true
+ # @option arguments [String, Array] :index A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices
+ # @option arguments [Time] :wait_for_completion_timeout Blocks and waits until the search is completed up to a certain timeout.
+ # When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. Server default: 1s.
+ # @option arguments [Time] :keep_alive Specifies how long the async search needs to be available.
+ # Ongoing async searches and any saved search results are deleted after this period. Server default: 5d.
+ # @option arguments [Boolean] :keep_on_completion If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`.
+ # @option arguments [Boolean] :allow_no_indices Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)
+ # @option arguments [Boolean] :allow_partial_search_results Indicate if an error should be returned if there is a partial search failure or timeout
# @option arguments [String] :analyzer The analyzer to use for the query string
# @option arguments [Boolean] :analyze_wildcard Specify whether wildcard and prefix queries should be analyzed (default: false)
- # @option arguments [Boolean] :ccs_minimize_roundtrips When doing a cross-cluster search, setting it to true may improve overall search latency, particularly when searching clusters with a large number of shards. However, when set to true, the progress of searches on the remote clusters will not be received until the search finishes on all clusters.
- # @option arguments [String] :default_operator The default operator for query string query (AND or OR) (options: AND, OR)
+ # @option arguments [Integer] :batched_reduce_size Affects how often partial results become available, which happens whenever shard results are reduced.
+ # A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). Server default: 5.
+ # @option arguments [Boolean] :ccs_minimize_roundtrips The default value is the only supported value.
+ # @option arguments [String] :default_operator The default operator for query string query (AND or OR)
# @option arguments [String] :df The field to use as default where no field prefix is given in the query string
+ # @option arguments [String, Array] :docvalue_fields A comma-separated list of fields to return as the docvalue representation of a field for each hit
+ # @option arguments [String, Array] :expand_wildcards Whether to expand wildcard expression to concrete indices that are open, closed or both.
# @option arguments [Boolean] :explain Specify whether to return detailed information about score computation as part of a hit
- # @option arguments [List] :stored_fields A comma-separated list of stored fields to return as part of a hit
- # @option arguments [List] :docvalue_fields A comma-separated list of fields to return as the docvalue representation of a field for each hit
- # @option arguments [Number] :from Starting offset (default: 0)
- # @option arguments [Boolean] :ignore_unavailable Whether specified concrete indices should be ignored when unavailable (missing or closed)
# @option arguments [Boolean] :ignore_throttled Whether specified concrete, expanded or aliased indices should be ignored when throttled
- # @option arguments [Boolean] :allow_no_indices Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)
- # @option arguments [String] :expand_wildcards Whether to expand wildcard expression to concrete indices that are open, closed or both. (options: open, closed, hidden, none, all)
+ # @option arguments [Boolean] :ignore_unavailable Whether specified concrete indices should be ignored when unavailable (missing or closed)
# @option arguments [Boolean] :lenient Specify whether format-based query failures (such as providing text to a numeric field) should be ignored
+ # @option arguments [Integer] :max_concurrent_shard_requests The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests
# @option arguments [String] :preference Specify the node or shard the operation should be performed on (default: random)
- # @option arguments [Boolean] :rest_total_hits_as_int Indicates whether hits.total should be rendered as an integer or an object in the rest search response
- # @option arguments [String] :q Query in the Lucene query string syntax
- # @option arguments [List] :routing A comma-separated list of specific routing values
- # @option arguments [String] :search_type Search operation type (options: query_then_fetch, dfs_query_then_fetch)
- # @option arguments [Number] :size Number of hits to return (default: 10)
- # @option arguments [List] :sort A comma-separated list of : pairs
- # @option arguments [List] :_source True or false to return the _source field or not, or a list of fields to return
- # @option arguments [List] :_source_excludes A list of fields to exclude from the returned _source field
- # @option arguments [List] :_source_includes A list of fields to extract and return from the _source field
- # @option arguments [Number] :terminate_after The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early.
- # @option arguments [List] :stats Specific 'tag' of the request for logging and statistical purposes
- # @option arguments [String] :suggest_field Specify which field to use for suggestions
- # @option arguments [String] :suggest_mode Specify suggest mode (options: missing, popular, always)
- # @option arguments [Number] :suggest_size How many suggestions to return in response
- # @option arguments [String] :suggest_text The source text for which the suggestions should be returned
+ # @option arguments [Boolean] :request_cache Specify if request cache should be used for this request or not, defaults to true Server default: true.
+ # @option arguments [String] :routing A comma-separated list of specific routing values
+ # @option arguments [String] :search_type Search operation type
+ # @option arguments [Array] :stats Specific 'tag' of the request for logging and statistical purposes
+ # @option arguments [String, Array] :stored_fields A comma-separated list of stored fields to return as part of a hit
+ # @option arguments [String] :suggest_field Specifies which field to use for suggestions.
+ # @option arguments [String] :suggest_mode Specify suggest mode
+ # @option arguments [Integer] :suggest_size How many suggestions to return in response
+ # @option arguments [String] :suggest_text The source text for which the suggestions should be returned.
+ # @option arguments [Integer] :terminate_after The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early.
# @option arguments [Time] :timeout Explicit operation timeout
+ # @option arguments [Boolean, Integer] :track_total_hits Indicate if the number of documents that match the query should be tracked. A number can also be specified, to accurately track the total hit count up to the number.
# @option arguments [Boolean] :track_scores Whether to calculate and return scores even if they are not used for sorting
- # @option arguments [Boolean|long] :track_total_hits Indicate if the number of documents that match the query should be tracked. A number can also be specified, to accurately track the total hit count up to the number.
- # @option arguments [Boolean] :allow_partial_search_results Indicate if an error should be returned if there is a partial search failure or timeout
# @option arguments [Boolean] :typed_keys Specify whether aggregation and suggester names should be prefixed by their respective types in the response
+ # @option arguments [Boolean] :rest_total_hits_as_int Indicates whether hits.total should be rendered as an integer or an object in the rest search response
# @option arguments [Boolean] :version Specify whether to return document version as part of a hit
+ # @option arguments [Boolean, String, Array] :_source True or false to return the _source field or not, or a list of fields to return
+ # @option arguments [String, Array] :_source_excludes A list of fields to exclude from the returned _source field
+ # @option arguments [String, Array] :_source_includes A list of fields to extract and return from the _source field
# @option arguments [Boolean] :seq_no_primary_term Specify whether to return sequence number and primary term of the last modification of each hit
- # @option arguments [Number] :max_concurrent_shard_requests The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests
+ # @option arguments [String] :q Query in the Lucene query string syntax
+ # @option arguments [Integer] :size Number of hits to return (default: 10)
+ # @option arguments [Integer] :from Starting offset (default: 0)
+ # @option arguments [String, Array] :sort A comma-separated list of : pairs
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
- # @option arguments [Hash] :body The search definition using the Query DSL
+ # @option arguments [Hash] :body request body
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-async-search-submit
#
def submit(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'async_search.submit' }
@@ -84,13 +102,13 @@ def submit(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = arguments.delete(:body)
+ body = arguments.delete(:body)
_index = arguments.delete(:index)
method = Elasticsearch::API::HTTP_POST
path = if _index
- "#{Utils.__listify(_index)}/_async_search"
+ "#{Utils.listify(_index)}/_async_search"
else
'_async_search'
end
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/bulk.rb b/elasticsearch-api/lib/elasticsearch/api/actions/bulk.rb
index c1a0491414..7dd50e21a2 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/bulk.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/bulk.rb
@@ -15,32 +15,146 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Actions
- # Allows to perform multiple index/update/delete operations in a single request.
+ # Bulk index or delete documents.
+ # Perform multiple `index`, `create`, `delete`, and `update` actions in a single request.
+ # This reduces overhead and can greatly increase indexing speed.
+ # If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:
+ # * To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action.
+ # * To use the `index` action, you must have the `create`, `index`, or `write` index privilege.
+ # * To use the `delete` action, you must have the `delete` or `write` index privilege.
+ # * To use the `update` action, you must have the `index` or `write` index privilege.
+ # * To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.
+ # * To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege.
+ # Automatic data stream creation requires a matching index template with data stream enabled.
+ # The actions are specified in the request body using a newline delimited JSON (NDJSON) structure:
+ #
+ # ```
+ # action_and_meta_data
+ #
+ # optional_source
+ #
+ # action_and_meta_data
+ #
+ # optional_source
+ #
+ # ....
+ # action_and_meta_data
+ #
+ # optional_source
+ #
+ # ```
+ #
+ # The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API.
+ # A `create` action fails if a document with the same ID already exists in the target
+ # An `index` action adds or replaces a document as necessary.
+ # NOTE: Data streams support only the `create` action.
+ # To update or delete a document in a data stream, you must target the backing index containing the document.
+ # An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line.
+ # A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API.
+ # NOTE: The final line of data must end with a newline character (`\n`).
+ # Each newline character may be preceded by a carriage return (`\r`).
+ # When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`.
+ # Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed.
+ # If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument.
+ # A note on the format: the idea here is to make processing as fast as possible.
+ # As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side.
+ # Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.
+ # There is no "correct" number of actions to perform in a single bulk request.
+ # Experiment with different settings to find the optimal size for your particular workload.
+ # Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size.
+ # It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch.
+ # For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.
+ # **Client suppport for bulk requests**
+ # Some of the officially supported clients provide helpers to assist with bulk requests and reindexing:
+ # * Go: Check out `esutil.BulkIndexer`
+ # * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll`
+ # * Python: Check out `elasticsearch.helpers.*`
+ # * JavaScript: Check out `client.helpers.*`
+ # * .NET: Check out `BulkAllObservable`
+ # * PHP: Check out bulk indexing.
+ # * Ruby: Check out `Elasticsearch::Helpers::BulkHelper`
+ # **Submitting bulk requests with cURL**
+ # If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`.
+ # The latter doesn't preserve newlines. For example:
+ #
+ # ```
+ # $ cat requests
+ # { "index" : { "_index" : "test", "_id" : "1" } }
+ # { "field1" : "value1" }
+ # $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
+ # {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
+ # ```
+ #
+ # **Optimistic concurrency control**
+ # Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines.
+ # The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.
+ # **Versioning**
+ # Each bulk item can include the version value using the `version` field.
+ # It automatically follows the behavior of the index or delete operation based on the `_version` mapping.
+ # It also support the `version_type`.
+ # **Routing**
+ # Each bulk item can include the routing value using the `routing` field.
+ # It automatically follows the behavior of the index or delete operation based on the `_routing` mapping.
+ # NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.
+ # **Wait for active shards**
+ # When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request.
+ # **Refresh**
+ # Control when the changes made by this request are visible to search.
+ # NOTE: Only the shards that receive the bulk request will be affected by refresh.
+ # Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards.
+ # The request will only wait for those three shards to refresh.
+ # The other two shards that make up the index do not participate in the `_bulk` request at all.
+ # You might want to disable the refresh interval temporarily to improve indexing throughput for large bulk requests.
+ # Refer to the linked documentation for step-by-step instructions using the index settings API.
#
- # @option arguments [String] :index Default index for items which don't provide one
- # @option arguments [String] :wait_for_active_shards Sets the number of shard copies that must be active before proceeding with the bulk operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)
- # @option arguments [String] :refresh If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. (options: true, false, wait_for)
- # @option arguments [String] :routing Specific routing value
- # @option arguments [Time] :timeout Explicit operation timeout
- # @option arguments [List] :_source True or false to return the _source field or not, or default list of fields to return, can be overridden on each sub-request
- # @option arguments [List] :_source_excludes Default list of fields to exclude from the returned _source field, can be overridden on each sub-request
- # @option arguments [List] :_source_includes Default list of fields to extract and return from the _source field, can be overridden on each sub-request
- # @option arguments [String] :pipeline The pipeline id to preprocess incoming documents with
- # @option arguments [Boolean] :require_alias If true, the request’s actions must target an index alias. Defaults to false.
- # @option arguments [Boolean] :require_data_stream If true, the request's actions must target a data stream (existing or to-be-created). Default to false
- # @option arguments [Boolean] :list_executed_pipelines Sets list_executed_pipelines for all incoming documents. Defaults to unset (false)
- # @option arguments [Boolean] :include_source_on_error True or false if to include the document source in the error message in case of parsing errors. Defaults to true.
+ # @option arguments [String] :index The name of the data stream, index, or index alias to perform bulk actions on.
+ # @option arguments [Boolean] :include_source_on_error True or false if to include the document source in the error message in case of parsing errors. Server default: true.
+ # @option arguments [Boolean] :list_executed_pipelines If `true`, the response will include the ingest pipelines that were run for each index or create.
+ # @option arguments [String] :pipeline The pipeline identifier to use to preprocess incoming documents.
+ # If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request.
+ # If a final pipeline is configured, it will always run regardless of the value of this parameter.
+ # @option arguments [String] :refresh If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search.
+ # If `wait_for`, wait for a refresh to make this operation visible to search.
+ # If `false`, do nothing with refreshes.
+ # Valid values: `true`, `false`, `wait_for`. Server default: false.
+ # @option arguments [String] :routing A custom value that is used to route operations to a specific shard.
+ # @option arguments [Boolean, String, Array] :_source Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return.
+ # @option arguments [String, Array] :_source_excludes A comma-separated list of source fields to exclude from the response.
+ # You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter.
+ # If the `_source` parameter is `false`, this parameter is ignored.
+ # @option arguments [String, Array] :_source_includes A comma-separated list of source fields to include in the response.
+ # If this parameter is specified, only these source fields are returned.
+ # You can exclude fields from this subset using the `_source_excludes` query parameter.
+ # If the `_source` parameter is `false`, this parameter is ignored.
+ # @option arguments [Time] :timeout The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards.
+ # The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing.
+ # The actual wait time could be longer, particularly when multiple waits occur. Server default: 1m.
+ # @option arguments [Integer, String] :wait_for_active_shards The number of shard copies that must be active before proceeding with the operation.
+ # Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`).
+ # The default is `1`, which waits for each primary shard to be active. Server default: 1.
+ # @option arguments [Boolean] :require_alias If `true`, the request's actions must target an index alias.
+ # @option arguments [Boolean] :require_data_stream If `true`, the request's actions must target a data stream (existing or to be created).
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
- # @option arguments [String|Array] :body The operation definition and data (action-data pairs), separated by newlines. Array of Strings, Header/Data pairs,
- # or the conveniency "combined" format can be passed, refer to Elasticsearch::API::Utils.__bulkify documentation.
+ # @option arguments [String|Array] :body operations. Array of Strings, Header/Data pairs, or the conveniency "combined" format can be passed, refer to Elasticsearch::API::Utils.bulkify documentation.
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-bulk
#
def bulk(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'bulk' }
@@ -55,25 +169,25 @@ def bulk(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = arguments.delete(:body)
+ body = arguments.delete(:body)
_index = arguments.delete(:index)
method = Elasticsearch::API::HTTP_POST
path = if _index
- "#{Utils.__listify(_index)}/_bulk"
+ "#{Utils.listify(_index)}/_bulk"
else
'_bulk'
end
params = Utils.process_params(arguments)
payload = if body.is_a? Array
- Elasticsearch::API::Utils.__bulkify(body)
+ Elasticsearch::API::Utils.bulkify(body)
else
body
end
- headers.merge!('Content-Type' => 'application/x-ndjson')
+ Utils.update_ndjson_headers!(headers, transport.options.dig(:transport_options, :headers))
Elasticsearch::API::Response.new(
perform_request(method, path, params, payload, headers, request_opts)
)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/aliases.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/aliases.rb
index 8c13623618..b7ff709894 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/aliases.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/aliases.rb
@@ -15,26 +15,48 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Shows information about currently configured aliases to indices including filter and routing infos.
+ # Get aliases.
+ # Get the cluster's index aliases, including filter and routing information.
+ # This API does not return data stream aliases.
+ # IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.
#
- # @option arguments [List] :name A comma-separated list of alias names to return
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [Boolean] :v Verbose mode. Display column headers
- # @option arguments [String] :expand_wildcards Whether to expand wildcard expression to concrete indices that are open, closed or both. (options: open, closed, hidden, none, all)
- # @option arguments [Time] :master_timeout Timeout for waiting for new cluster state in case it is blocked
+ # @option arguments [String, Array] :name A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`.
+ # @option arguments [String, Array] :h A comma-separated list of columns names to display. It supports simple wildcards.
+ # @option arguments [String, Array] :s List of columns that determine how the table should be sorted.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [String, Array] :expand_wildcards The type of index that wildcard patterns can match.
+ # If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.
+ # It supports comma-separated values, such as `open,hidden`.
+ # @option arguments [Time] :master_timeout The period to wait for a connection to the master node.
+ # If the master node is not available before the timeout expires, the request fails and returns an error.
+ # To indicated that the request should never timeout, you can set it to `-1`. Server default: 30s.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-alias.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-aliases
#
def aliases(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.aliases' }
@@ -53,12 +75,12 @@ def aliases(arguments = {})
method = Elasticsearch::API::HTTP_GET
path = if _name
- "_cat/aliases/#{Utils.__listify(_name)}"
+ "_cat/aliases/#{Utils.listify(_name)}"
else
'_cat/aliases'
end
params = Utils.process_params(arguments)
- params[:h] = Utils.__listify(params[:h]) if params[:h]
+ params[:h] = Utils.listify(params[:h]) if params[:h]
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/allocation.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/allocation.rb
index f37b53e12d..83e81de14d 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/allocation.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/allocation.rb
@@ -15,27 +15,47 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Provides a snapshot of how many shards are allocated to each data node and how much disk space they are using.
+ # Get shard allocation information.
+ # Get a snapshot of the number of shards allocated to each data node and their disk space.
+ # IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.
#
- # @option arguments [List] :node_id A comma-separated list of node IDs or names to limit the returned information
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [String] :bytes The unit in which to display byte values (options: b, k, kb, m, mb, g, gb, t, tb, p, pb)
- # @option arguments [Boolean] :local Return local information, do not retrieve the state from master node (default: false)
- # @option arguments [Time] :master_timeout Explicit operation timeout for connection to master node
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String, Array] :node_id A comma-separated list of node identifiers or names used to limit the returned information.
+ # @option arguments [String] :bytes The unit used to display byte values.
+ # @option arguments [String, Array] :h A comma-separated list of columns names to display. It supports simple wildcards.
+ # @option arguments [String, Array] :s List of columns that determine how the table should be sorted.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the
+ # local cluster state. If `false` the list of selected nodes are computed
+ # from the cluster state of the master node. In both cases the coordinating
+ # node will send requests for further information to each selected node.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-allocation.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-allocation
#
def allocation(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.allocation' }
@@ -54,12 +74,12 @@ def allocation(arguments = {})
method = Elasticsearch::API::HTTP_GET
path = if _node_id
- "_cat/allocation/#{Utils.__listify(_node_id)}"
+ "_cat/allocation/#{Utils.listify(_node_id)}"
else
'_cat/allocation'
end
params = Utils.process_params(arguments)
- params[:h] = Utils.__listify(params[:h]) if params[:h]
+ params[:h] = Utils.listify(params[:h]) if params[:h]
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/component_templates.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/component_templates.rb
index 0765a09a55..62fb05455f 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/component_templates.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/component_templates.rb
@@ -15,26 +15,50 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Returns information about existing component_templates templates.
+ # Get component templates.
+ # Get information about component templates in a cluster.
+ # Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.
+ # IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.
+ # They are not intended for use by applications. For application consumption, use the get component template API.
#
- # @option arguments [String] :name A pattern that returned component template names must match
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [Boolean] :local Return local information, do not retrieve the state from master node (default: false)
- # @option arguments [Time] :master_timeout Explicit operation timeout for connection to master node
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String] :name The name of the component template.
+ # It accepts wildcard expressions.
+ # If it is omitted, all component templates are returned.
+ # @option arguments [String, Array] :h A comma-separated list of columns names to display. It supports simple wildcards.
+ # @option arguments [String, Array] :s List of columns that determine how the table should be sorted.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the
+ # local cluster state. If `false` the list of selected nodes are computed
+ # from the cluster state of the master node. In both cases the coordinating
+ # node will send requests for further information to each selected node.
+ # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. Server default: 30s.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-component-templates.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-component-templates
#
def component_templates(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.component_templates' }
@@ -53,7 +77,7 @@ def component_templates(arguments = {})
method = Elasticsearch::API::HTTP_GET
path = if _name
- "_cat/component_templates/#{Utils.__listify(_name)}"
+ "_cat/component_templates/#{Utils.listify(_name)}"
else
'_cat/component_templates'
end
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/count.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/count.rb
index 8afd4cade6..109e7c3f5f 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/count.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/count.rb
@@ -15,24 +15,45 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Provides quick access to the document count of the entire cluster, or individual indices.
+ # Get a document count.
+ # Get quick access to a document count for a data stream, an index, or an entire cluster.
+ # The document count only includes live documents, not deleted documents which have not yet been removed by the merge process.
+ # IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.
+ # They are not intended for use by applications. For application consumption, use the count API.
#
- # @option arguments [List] :index A comma-separated list of index names to limit the returned information
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and aliases used to limit the request.
+ # It supports wildcards (`*`).
+ # To target all data streams and indices, omit this parameter or use `*` or `_all`.
+ # @option arguments [String, Array] :h A comma-separated list of columns names to display. It supports simple wildcards.
+ # @option arguments [String, Array] :s List of columns that determine how the table should be sorted.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-count.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-count
#
def count(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.count' }
@@ -45,18 +66,18 @@ def count(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
_index = arguments.delete(:index)
method = Elasticsearch::API::HTTP_GET
path = if _index
- "_cat/count/#{Utils.__listify(_index)}"
+ "_cat/count/#{Utils.listify(_index)}"
else
'_cat/count'
end
params = Utils.process_params(arguments)
- params[:h] = Utils.__listify(params[:h]) if params[:h]
+ params[:h] = Utils.listify(params[:h]) if params[:h]
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/fielddata.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/fielddata.rb
index 592958c962..a68d445155 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/fielddata.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/fielddata.rb
@@ -15,25 +15,44 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Shows how much heap memory is currently being used by fielddata on every data node in the cluster.
+ # Get field data cache information.
+ # Get the amount of heap memory currently used by the field data cache on every data node in the cluster.
+ # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.
+ # They are not intended for use by applications. For application consumption, use the nodes stats API.
#
- # @option arguments [List] :fields A comma-separated list of fields to return the fielddata size
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [String] :bytes The unit in which to display byte values (options: b, k, kb, m, mb, g, gb, t, tb, p, pb)
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String, Array] :fields Comma-separated list of fields used to limit returned information.
+ # To retrieve all fields, omit this parameter.
+ # @option arguments [String] :bytes The unit used to display byte values.
+ # @option arguments [String, Array] :h A comma-separated list of columns names to display. It supports simple wildcards.
+ # @option arguments [String, Array] :s List of columns that determine how the table should be sorted.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-fielddata.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-fielddata
#
def fielddata(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.fielddata' }
@@ -52,7 +71,7 @@ def fielddata(arguments = {})
method = Elasticsearch::API::HTTP_GET
path = if _fields
- "_cat/fielddata/#{Utils.__listify(_fields)}"
+ "_cat/fielddata/#{Utils.listify(_fields)}"
else
'_cat/fielddata'
end
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/health.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/health.rb
index 59309d5084..d0b4b83b84 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/health.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/health.rb
@@ -15,25 +15,49 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Returns a concise representation of the cluster health.
+ # Get the cluster health status.
+ # IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.
+ # They are not intended for use by applications. For application consumption, use the cluster health API.
+ # This API is often used to check malfunctioning clusters.
+ # To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats:
+ # `HH:MM:SS`, which is human-readable but includes no date information;
+ # `Unix epoch time`, which is machine-sortable and includes date information.
+ # The latter format is useful for cluster recoveries that take multiple days.
+ # You can use the cat health API to verify cluster health across multiple nodes.
+ # You also can use the API to track the recovery of a large cluster over a longer period of time.
#
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [String] :time The unit in which to display time values (options: d, h, m, s, ms, micros, nanos)
- # @option arguments [Boolean] :ts Set to false to disable timestamping
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String] :time The unit used to display time values.
+ # @option arguments [Boolean] :ts If true, returns `HH:MM:SS` and Unix epoch timestamps. Server default: true.
+ # @option arguments [String, Array] :h A comma-separated list of columns names to display. It supports simple wildcards.
+ # @option arguments [String, Array] :s List of columns that determine how the table should be sorted.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-health.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-health
#
def health(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.health' }
@@ -41,12 +65,12 @@ def health(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
method = Elasticsearch::API::HTTP_GET
path = '_cat/health'
params = Utils.process_params(arguments)
- params[:h] = Utils.__listify(params[:h]) if params[:h]
+ params[:h] = Utils.listify(params[:h]) if params[:h]
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/help.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/help.rb
index 0d9f22f752..d5745922f7 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/help.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/help.rb
@@ -15,20 +15,19 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Returns help for the Cat APIs.
+ # Get CAT help.
+ # Get help for the CAT APIs.
#
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-cat
#
def help(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.help' }
@@ -36,11 +35,11 @@ def help(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
method = Elasticsearch::API::HTTP_GET
path = '_cat'
- params = Utils.process_params(arguments)
+ params = {}
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/indices.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/indices.rb
index accc79e8cb..3d64ac82be 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/indices.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/indices.rb
@@ -15,31 +15,58 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Returns information about indices: number of primaries and replicas, document counts, disk size, ...
+ # Get index information.
+ # Get high-level information about indices in a cluster, including backing indices for data streams.
+ # Use this request to get the following information for each index in a cluster:
+ # - shard count
+ # - document count
+ # - deleted document count
+ # - primary store size
+ # - total store size of all shards, including shard replicas
+ # These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.
+ # To get an accurate count of Elasticsearch documents, use the cat count or count APIs.
+ # CAT APIs are only intended for human consumption using the command line or Kibana console.
+ # They are not intended for use by applications. For application consumption, use an index endpoint.
#
- # @option arguments [List] :index A comma-separated list of index names to limit the returned information
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [String] :bytes The unit in which to display byte values (options: b, k, kb, m, mb, g, gb, t, tb, p, pb)
- # @option arguments [Time] :master_timeout Explicit operation timeout for connection to master node
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [String] :health A health status ("green", "yellow", or "red" to filter only indices matching the specified health status (options: green, yellow, red)
- # @option arguments [Boolean] :help Return help information
- # @option arguments [Boolean] :pri Set to true to return stats only for primary shards
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [String] :time The unit in which to display time values (options: d, h, m, s, ms, micros, nanos)
- # @option arguments [Boolean] :v Verbose mode. Display column headers
- # @option arguments [Boolean] :include_unloaded_segments If set to true segment stats will include stats for segments that are not currently loaded into memory
- # @option arguments [String] :expand_wildcards Whether to expand wildcard expression to concrete indices that are open, closed or both. (options: open, closed, hidden, none, all)
+ # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and aliases used to limit the request.
+ # Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.
+ # @option arguments [String] :bytes The unit used to display byte values.
+ # @option arguments [String, Array] :expand_wildcards The type of index that wildcard patterns can match.
+ # @option arguments [String] :health The health status used to limit returned indices. By default, the response includes indices of any health status.
+ # @option arguments [Boolean] :include_unloaded_segments If true, the response includes information from segments that are not loaded into memory.
+ # @option arguments [Boolean] :pri If true, the response only includes information from primary shards.
+ # @option arguments [String] :time The unit used to display time values.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s.
+ # @option arguments [String, Array] :h A comma-separated list of columns names to display. It supports simple wildcards.
+ # @option arguments [String, Array] :s List of columns that determine how the table should be sorted.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-indices.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-indices
#
def indices(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.indices' }
@@ -52,18 +79,18 @@ def indices(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
_index = arguments.delete(:index)
method = Elasticsearch::API::HTTP_GET
path = if _index
- "_cat/indices/#{Utils.__listify(_index)}"
+ "_cat/indices/#{Utils.listify(_index)}"
else
'_cat/indices'
end
params = Utils.process_params(arguments)
- params[:h] = Utils.__listify(params[:h]) if params[:h]
+ params[:h] = Utils.listify(params[:h]) if params[:h]
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/master.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/master.rb
index f8512afd3b..bab7a71a5b 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/master.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/master.rb
@@ -15,25 +15,45 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Returns information about the master node.
+ # Get master node information.
+ # Get information about the master node, including the ID, bound IP address, and name.
+ # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.
#
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [Boolean] :local Return local information, do not retrieve the state from master node (default: false)
- # @option arguments [Time] :master_timeout Explicit operation timeout for connection to master node
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String, Array] :h A comma-separated list of columns names to display. It supports simple wildcards.
+ # @option arguments [String, Array] :s List of columns that determine how the table should be sorted.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the
+ # local cluster state. If `false` the list of selected nodes are computed
+ # from the cluster state of the master node. In both cases the coordinating
+ # node will send requests for further information to each selected node.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-master.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-master
#
def master(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.master' }
@@ -41,7 +61,7 @@ def master(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
method = Elasticsearch::API::HTTP_GET
path = '_cat/master'
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_data_frame_analytics.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_data_frame_analytics.rb
index 72e1f69773..885ddda57d 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_data_frame_analytics.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_data_frame_analytics.rb
@@ -15,27 +15,45 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Gets configuration and usage information about data frame analytics jobs.
+ # Get data frame analytics jobs.
+ # Get configuration and usage information about data frame analytics jobs.
+ # IMPORTANT: CAT APIs are only intended for human consumption using the Kibana
+ # console or command line. They are not intended for use by applications. For
+ # application consumption, use the get data frame analytics jobs statistics API.
#
# @option arguments [String] :id The ID of the data frame analytics to fetch
# @option arguments [Boolean] :allow_no_match Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified)
- # @option arguments [String] :bytes The unit in which to display byte values (options: b, k, kb, m, mb, g, gb, t, tb, p, pb)
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [String] :time The unit in which to display time values (options: d, h, m, s, ms, micros, nanos)
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String] :bytes The unit in which to display byte values
+ # @option arguments [String, Array] :h Comma-separated list of column names to display. Server default: create_time,id,state,type.
+ # @option arguments [String, Array] :s Comma-separated list of column names or column aliases used to sort the
+ # response.
+ # @option arguments [String] :time Unit used to display time values.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-dfanalytics.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-ml-data-frame-analytics
#
def ml_data_frame_analytics(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.ml_data_frame_analytics' }
@@ -54,7 +72,7 @@ def ml_data_frame_analytics(arguments = {})
method = Elasticsearch::API::HTTP_GET
path = if _id
- "_cat/ml/data_frame/analytics/#{Utils.__listify(_id)}"
+ "_cat/ml/data_frame/analytics/#{Utils.listify(_id)}"
else
'_cat/ml/data_frame/analytics'
end
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_datafeeds.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_datafeeds.rb
index c00d25ba2c..2892c1f817 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_datafeeds.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_datafeeds.rb
@@ -15,26 +15,52 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Gets configuration and usage information about datafeeds.
+ # Get datafeeds.
+ # Get configuration and usage information about datafeeds.
+ # This API returns a maximum of 10,000 datafeeds.
+ # If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`
+ # cluster privileges to use this API.
+ # IMPORTANT: CAT APIs are only intended for human consumption using the Kibana
+ # console or command line. They are not intended for use by applications. For
+ # application consumption, use the get datafeed statistics API.
#
- # @option arguments [String] :datafeed_id The ID of the datafeeds stats to fetch
- # @option arguments [Boolean] :allow_no_match Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified)
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [String] :time The unit in which to display time values (options: d, h, m, s, ms, micros, nanos)
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String] :datafeed_id A numerical character string that uniquely identifies the datafeed.
+ # @option arguments [Boolean] :allow_no_match Specifies what to do when the request:
+ # - Contains wildcard expressions and there are no datafeeds that match.
+ # - Contains the `_all` string or no identifiers and there are no matches.
+ # - Contains wildcard expressions and there are only partial matches.
+ # If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when
+ # there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only
+ # partial matches. Server default: true.
+ # @option arguments [String, Array] :h Comma-separated list of column names to display. Server default: ['bc', 'id', 'sc', 's'].
+ # @option arguments [String, Array] :s Comma-separated list of column names or column aliases used to sort the response.
+ # @option arguments [String] :time The unit used to display time values.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-datafeeds.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-ml-datafeeds
#
def ml_datafeeds(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.ml_datafeeds' }
@@ -53,7 +79,7 @@ def ml_datafeeds(arguments = {})
method = Elasticsearch::API::HTTP_GET
path = if _datafeed_id
- "_cat/ml/datafeeds/#{Utils.__listify(_datafeed_id)}"
+ "_cat/ml/datafeeds/#{Utils.listify(_datafeed_id)}"
else
'_cat/ml/datafeeds'
end
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_jobs.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_jobs.rb
index 0a9f41ff97..591db3b10d 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_jobs.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_jobs.rb
@@ -15,27 +15,53 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Gets configuration and usage information about anomaly detection jobs.
+ # Get anomaly detection jobs.
+ # Get configuration and usage information for anomaly detection jobs.
+ # This API returns a maximum of 10,000 jobs.
+ # If the Elasticsearch security features are enabled, you must have `monitor_ml`,
+ # `monitor`, `manage_ml`, or `manage` cluster privileges to use this API.
+ # IMPORTANT: CAT APIs are only intended for human consumption using the Kibana
+ # console or command line. They are not intended for use by applications. For
+ # application consumption, use the get anomaly detection job statistics API.
#
- # @option arguments [String] :job_id The ID of the jobs stats to fetch
- # @option arguments [Boolean] :allow_no_match Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified)
- # @option arguments [String] :bytes The unit in which to display byte values (options: b, k, kb, m, mb, g, gb, t, tb, p, pb)
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [String] :time The unit in which to display time values (options: d, h, m, s, ms, micros, nanos)
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String] :job_id Identifier for the anomaly detection job.
+ # @option arguments [Boolean] :allow_no_match Specifies what to do when the request:
+ # - Contains wildcard expressions and there are no jobs that match.
+ # - Contains the `_all` string or no identifiers and there are no matches.
+ # - Contains wildcard expressions and there are only partial matches.
+ # If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there
+ # are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial
+ # matches. Server default: true.
+ # @option arguments [String] :bytes The unit used to display byte values.
+ # @option arguments [String, Array] :h Comma-separated list of column names to display. Server default: buckets.count,data.processed_records,forecasts.total,id,model.bytes,model.memory_status,state.
+ # @option arguments [String, Array] :s Comma-separated list of column names or column aliases used to sort the response.
+ # @option arguments [String] :time The unit used to display time values.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-anomaly-detectors.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-ml-jobs
#
def ml_jobs(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.ml_jobs' }
@@ -54,7 +80,7 @@ def ml_jobs(arguments = {})
method = Elasticsearch::API::HTTP_GET
path = if _job_id
- "_cat/ml/anomaly_detectors/#{Utils.__listify(_job_id)}"
+ "_cat/ml/anomaly_detectors/#{Utils.listify(_job_id)}"
else
'_cat/ml/anomaly_detectors'
end
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_trained_models.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_trained_models.rb
index 3af9c15a30..51c1bff7fc 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_trained_models.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/ml_trained_models.rb
@@ -15,29 +15,48 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Gets configuration and usage information about inference trained models.
+ # Get trained models.
+ # Get configuration and usage information about inference trained models.
+ # IMPORTANT: CAT APIs are only intended for human consumption using the Kibana
+ # console or command line. They are not intended for use by applications. For
+ # application consumption, use the get trained models statistics API.
#
- # @option arguments [String] :model_id The ID of the trained models stats to fetch
- # @option arguments [Boolean] :allow_no_match Whether to ignore if a wildcard expression matches no trained models. (This includes `_all` string or when no trained models have been specified)
- # @option arguments [Integer] :from skips a number of trained models
- # @option arguments [Integer] :size specifies a max number of trained models to get
- # @option arguments [String] :bytes The unit in which to display byte values (options: b, k, kb, m, mb, g, gb, t, tb, p, pb)
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [String] :time The unit in which to display time values (options: d, h, m, s, ms, micros, nanos)
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String] :model_id A unique identifier for the trained model.
+ # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches.
+ # If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches.
+ # If `false`, the API returns a 404 status code when there are no matches or only partial matches. Server default: true.
+ # @option arguments [String] :bytes The unit used to display byte values.
+ # @option arguments [String, Array] :h A comma-separated list of column names to display.
+ # @option arguments [String, Array] :s A comma-separated list of column names or aliases used to sort the response.
+ # @option arguments [Integer] :from Skips the specified number of transforms.
+ # @option arguments [Integer] :size The maximum number of transforms to display.
+ # @option arguments [String] :time Unit used to display time values.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-trained-model.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-ml-trained-models
#
def ml_trained_models(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.ml_trained_models' }
@@ -56,7 +75,7 @@ def ml_trained_models(arguments = {})
method = Elasticsearch::API::HTTP_GET
path = if _model_id
- "_cat/ml/trained_models/#{Utils.__listify(_model_id)}"
+ "_cat/ml/trained_models/#{Utils.listify(_model_id)}"
else
'_cat/ml/trained_models'
end
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodeattrs.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodeattrs.rb
index 54102162ef..b8a66fe596 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodeattrs.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodeattrs.rb
@@ -15,25 +15,45 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Returns information about custom node attributes.
+ # Get node attribute information.
+ # Get information about custom node attributes.
+ # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.
#
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [Boolean] :local Return local information, do not retrieve the state from master node (default: false)
- # @option arguments [Time] :master_timeout Explicit operation timeout for connection to master node
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String, Array] :h A comma-separated list of columns names to display. It supports simple wildcards.
+ # @option arguments [String, Array] :s List of columns that determine how the table should be sorted.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the
+ # local cluster state. If `false` the list of selected nodes are computed
+ # from the cluster state of the master node. In both cases the coordinating
+ # node will send requests for further information to each selected node.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodeattrs.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-nodeattrs
#
def nodeattrs(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.nodeattrs' }
@@ -41,7 +61,7 @@ def nodeattrs(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
method = Elasticsearch::API::HTTP_GET
path = '_cat/nodeattrs'
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodes.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodes.rb
index 1411277c44..c5bb82c713 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodes.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/nodes.rb
@@ -15,28 +15,46 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Returns basic statistics about performance of cluster nodes.
+ # Get node information.
+ # Get information about the nodes in a cluster.
+ # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.
#
- # @option arguments [String] :bytes The unit in which to display byte values (options: b, k, kb, m, mb, g, gb, t, tb, p, pb)
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [Boolean] :full_id Return the full node ID instead of the shortened version (default: false)
- # @option arguments [Time] :master_timeout Explicit operation timeout for connection to master node
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [String] :time The unit in which to display time values (options: d, h, m, s, ms, micros, nanos)
- # @option arguments [Boolean] :v Verbose mode. Display column headers
- # @option arguments [Boolean] :include_unloaded_segments If set to true segment stats will include stats for segments that are not currently loaded into memory
+ # @option arguments [String] :bytes The unit used to display byte values.
+ # @option arguments [Boolean, String] :full_id If `true`, return the full node ID. If `false`, return the shortened node ID. Server default: false.
+ # @option arguments [Boolean] :include_unloaded_segments If true, the response includes information from segments that are not loaded into memory.
+ # @option arguments [String, Array] :h A comma-separated list of columns names to display.
+ # It supports simple wildcards. Server default: ip,hp,rp,r,m,n,cpu,l.
+ # @option arguments [String, Array] :s A comma-separated list of column names or aliases that determines the sort order.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. Server default: 30s.
+ # @option arguments [String] :time The unit used to display time values.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodes.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-nodes
#
def nodes(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.nodes' }
@@ -44,12 +62,12 @@ def nodes(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
method = Elasticsearch::API::HTTP_GET
path = '_cat/nodes'
params = Utils.process_params(arguments)
- params[:h] = Utils.__listify(params[:h], escape: false) if params[:h]
+ params[:h] = Utils.listify(params[:h], escape: false) if params[:h]
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/pending_tasks.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/pending_tasks.rb
index 67acfd7817..8a536c501b 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/pending_tasks.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/pending_tasks.rb
@@ -15,26 +15,46 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Returns a concise representation of the cluster pending tasks.
+ # Get pending task information.
+ # Get information about cluster-level changes that have not yet taken effect.
+ # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.
#
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [Boolean] :local Return local information, do not retrieve the state from master node (default: false)
- # @option arguments [Time] :master_timeout Explicit operation timeout for connection to master node
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [String] :time The unit in which to display time values (options: d, h, m, s, ms, micros, nanos)
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String, Array] :h A comma-separated list of columns names to display. It supports simple wildcards.
+ # @option arguments [String, Array] :s List of columns that determine how the table should be sorted.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the
+ # local cluster state. If `false` the list of selected nodes are computed
+ # from the cluster state of the master node. In both cases the coordinating
+ # node will send requests for further information to each selected node.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s.
+ # @option arguments [String] :time Unit used to display time values.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-pending-tasks.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-pending-tasks
#
def pending_tasks(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.pending_tasks' }
@@ -42,12 +62,12 @@ def pending_tasks(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
method = Elasticsearch::API::HTTP_GET
path = '_cat/pending_tasks'
params = Utils.process_params(arguments)
- params[:h] = Utils.__listify(params[:h]) if params[:h]
+ params[:h] = Utils.listify(params[:h]) if params[:h]
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/plugins.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/plugins.rb
index fbde5d60ed..c47fecbf32 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/plugins.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/plugins.rb
@@ -15,26 +15,46 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Returns information about installed plugins across nodes node.
+ # Get plugin information.
+ # Get a list of plugins running on each node of a cluster.
+ # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.
#
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [Boolean] :local Return local information, do not retrieve the state from master node (default: false)
- # @option arguments [Time] :master_timeout Explicit operation timeout for connection to master node
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
+ # @option arguments [String, Array] :h A comma-separated list of columns names to display. It supports simple wildcards.
+ # @option arguments [String, Array] :s List of columns that determine how the table should be sorted.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
# @option arguments [Boolean] :include_bootstrap Include bootstrap plugins in the response
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the
+ # local cluster state. If `false` the list of selected nodes are computed
+ # from the cluster state of the master node. In both cases the coordinating
+ # node will send requests for further information to each selected node.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-plugins.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-plugins
#
def plugins(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.plugins' }
@@ -42,7 +62,7 @@ def plugins(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
method = Elasticsearch::API::HTTP_GET
path = '_cat/plugins'
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/recovery.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/recovery.rb
index 5e6865e47d..b5f1672346 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/recovery.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/recovery.rb
@@ -15,28 +15,49 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Returns information about index shard recoveries, both on-going completed.
+ # Get shard recovery information.
+ # Get information about ongoing and completed shard recoveries.
+ # Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing.
+ # For data streams, the API returns information about the stream’s backing indices.
+ # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.
#
- # @option arguments [List] :index Comma-separated list or wildcard expression of index names to limit the returned information
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [Boolean] :active_only If `true`, the response only includes ongoing shard recoveries
- # @option arguments [String] :bytes The unit in which to display byte values (options: b, k, kb, m, mb, g, gb, t, tb, p, pb)
- # @option arguments [Boolean] :detailed If `true`, the response includes detailed information about shard recoveries
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [String] :time The unit in which to display time values (options: d, h, m, s, ms, micros, nanos)
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and aliases used to limit the request.
+ # Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.
+ # @option arguments [Boolean] :active_only If `true`, the response only includes ongoing shard recoveries.
+ # @option arguments [String] :bytes The unit used to display byte values.
+ # @option arguments [Boolean] :detailed If `true`, the response includes detailed information about shard recoveries.
+ # @option arguments [String, Array] :h A comma-separated list of columns names to display.
+ # It supports simple wildcards. Server default: ip,hp,rp,r,m,n,cpu,l.
+ # @option arguments [String, Array] :s A comma-separated list of column names or aliases that determines the sort order.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [String] :time The unit used to display time values.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-recovery.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-recovery
#
def recovery(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.recovery' }
@@ -49,18 +70,18 @@ def recovery(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
_index = arguments.delete(:index)
method = Elasticsearch::API::HTTP_GET
path = if _index
- "_cat/recovery/#{Utils.__listify(_index)}"
+ "_cat/recovery/#{Utils.listify(_index)}"
else
'_cat/recovery'
end
params = Utils.process_params(arguments)
- params[:h] = Utils.__listify(params[:h]) if params[:h]
+ params[:h] = Utils.listify(params[:h]) if params[:h]
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/repositories.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/repositories.rb
index 482ceff4b3..7e055cbb11 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/repositories.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/repositories.rb
@@ -15,25 +15,45 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Returns information about snapshot repositories registered in the cluster.
+ # Get snapshot repository information.
+ # Get a list of snapshot repositories for a cluster.
+ # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.
#
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [Boolean] :local Return local information, do not retrieve the state from master node
- # @option arguments [Time] :master_timeout Explicit operation timeout for connection to master node
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards.
+ # @option arguments [String, Array] :s List of columns that determine how the table should be sorted.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the
+ # local cluster state. If `false` the list of selected nodes are computed
+ # from the cluster state of the master node. In both cases the coordinating
+ # node will send requests for further information to each selected node.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-repositories.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-repositories
#
def repositories(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.repositories' }
@@ -41,7 +61,7 @@ def repositories(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
method = Elasticsearch::API::HTTP_GET
path = '_cat/repositories'
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/segments.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/segments.rb
index 8898f52e26..7c446237ae 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/segments.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/segments.rb
@@ -15,25 +15,51 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Provides low-level information about the segments in the shards of an index.
+ # Get segment information.
+ # Get low-level information about the Lucene segments in index shards.
+ # For data streams, the API returns information about the backing indices.
+ # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.
#
- # @option arguments [List] :index A comma-separated list of index names to limit the returned information
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [String] :bytes The unit in which to display byte values (options: b, k, kb, m, mb, g, gb, t, tb, p, pb)
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and aliases used to limit the request.
+ # Supports wildcards (`*`).
+ # To target all data streams and indices, omit this parameter or use `*` or `_all`.
+ # @option arguments [String] :bytes The unit used to display byte values.
+ # @option arguments [String, Array] :h A comma-separated list of columns names to display.
+ # It supports simple wildcards. Server default: ip,hp,rp,r,m,n,cpu,l.
+ # @option arguments [String, Array] :s A comma-separated list of column names or aliases that determines the sort order.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the
+ # local cluster state. If `false` the list of selected nodes are computed
+ # from the cluster state of the master node. In both cases the coordinating
+ # node will send requests for further information to each selected node.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-segments.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-segments
#
def segments(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.segments' }
@@ -46,13 +72,13 @@ def segments(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
_index = arguments.delete(:index)
method = Elasticsearch::API::HTTP_GET
path = if _index
- "_cat/segments/#{Utils.__listify(_index)}"
+ "_cat/segments/#{Utils.listify(_index)}"
else
'_cat/segments'
end
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/shards.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/shards.rb
index 4e4cae17d5..4ee2e2196d 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/shards.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/shards.rb
@@ -15,27 +15,47 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Provides a detailed view of shard allocation on nodes.
+ # Get shard information.
+ # Get information about the shards in a cluster.
+ # For data streams, the API returns information about the backing indices.
+ # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.
#
- # @option arguments [List] :index A comma-separated list of index names to limit the returned information
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [String] :bytes The unit in which to display byte values (options: b, k, kb, m, mb, g, gb, t, tb, p, pb)
- # @option arguments [Time] :master_timeout Explicit operation timeout for connection to master node
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [String] :time The unit in which to display time values (options: d, h, m, s, ms, micros, nanos)
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String, Array] :index A comma-separated list of data streams, indices, and aliases used to limit the request.
+ # Supports wildcards (`*`).
+ # To target all data streams and indices, omit this parameter or use `*` or `_all`.
+ # @option arguments [String] :bytes The unit used to display byte values.
+ # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards.
+ # @option arguments [String, Array] :s A comma-separated list of column names or aliases that determines the sort order.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. Server default: 30s.
+ # @option arguments [String] :time The unit used to display time values.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-shards.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-shards
#
def shards(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.shards' }
@@ -48,18 +68,18 @@ def shards(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
_index = arguments.delete(:index)
method = Elasticsearch::API::HTTP_GET
path = if _index
- "_cat/shards/#{Utils.__listify(_index)}"
+ "_cat/shards/#{Utils.listify(_index)}"
else
'_cat/shards'
end
params = Utils.process_params(arguments)
- params[:h] = Utils.__listify(params[:h]) if params[:h]
+ params[:h] = Utils.listify(params[:h]) if params[:h]
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/snapshots.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/snapshots.rb
index 3ae1c00888..d5b83ead0c 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/snapshots.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/snapshots.rb
@@ -15,27 +15,49 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Returns all snapshots in a specific repository.
+ # Get snapshot information.
+ # Get information about the snapshots stored in one or more repositories.
+ # A snapshot is a backup of an index or running Elasticsearch cluster.
+ # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.
#
- # @option arguments [List] :repository Name of repository from which to fetch the snapshot information
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [Boolean] :ignore_unavailable Set to true to ignore unavailable snapshots
- # @option arguments [Time] :master_timeout Explicit operation timeout for connection to master node
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [String] :time The unit in which to display time values (options: d, h, m, s, ms, micros, nanos)
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String, Array] :repository A comma-separated list of snapshot repositories used to limit the request.
+ # Accepts wildcard expressions.
+ # `_all` returns all repositories.
+ # If any repository fails during the request, Elasticsearch returns an error.
+ # @option arguments [Boolean] :ignore_unavailable If `true`, the response does not include information from unavailable snapshots.
+ # @option arguments [String, Array] :h A comma-separated list of columns names to display.
+ # It supports simple wildcards. Server default: ip,hp,rp,r,m,n,cpu,l.
+ # @option arguments [String, Array] :s List of columns that determine how the table should be sorted.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s.
+ # @option arguments [String] :time Unit used to display time values.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-snapshots.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-snapshots
#
def snapshots(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.snapshots' }
@@ -54,7 +76,7 @@ def snapshots(arguments = {})
method = Elasticsearch::API::HTTP_GET
path = if _repository
- "_cat/snapshots/#{Utils.__listify(_repository)}"
+ "_cat/snapshots/#{Utils.listify(_repository)}"
else
'_cat/snapshots'
end
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/tasks.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/tasks.rb
index 4a60aea199..5e12fcecd8 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/tasks.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/tasks.rb
@@ -15,32 +15,52 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Returns information about the tasks currently executing on one or more nodes in the cluster.
+ # Get task information.
+ # Get information about tasks currently running in the cluster.
+ # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.
# This functionality is Experimental and may be changed or removed
# completely in a future release. Elastic will take a best effort approach
# to fix any issues, but experimental features are not subject to the
# support SLA of official GA features.
#
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [List] :nodes A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes
- # @option arguments [List] :actions A comma-separated list of actions that should be returned. Leave empty to return all.
- # @option arguments [Boolean] :detailed Return detailed task information (default: false)
- # @option arguments [String] :parent_task_id Return tasks with specified parent task id (node_id:task_number). Set to -1 to return all.
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [String] :time The unit in which to display time values (options: d, h, m, s, ms, micros, nanos)
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [Array] :actions The task action names, which are used to limit the response.
+ # @option arguments [Boolean] :detailed If `true`, the response includes detailed information about shard recoveries.
+ # @option arguments [Array] :nodes Unique node identifiers, which are used to limit the response.
+ # @option arguments [String] :parent_task_id The parent task identifier, which is used to limit the response.
+ # @option arguments [String, Array] :h A comma-separated list of columns names to display. It supports simple wildcards.
+ # @option arguments [String, Array] :s List of columns that determine how the table should be sorted.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [String] :time Unit used to display time values.
+ # @option arguments [Time] :timeout Period to wait for a response.
+ # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s.
+ # @option arguments [Boolean] :wait_for_completion If `true`, the request blocks until the task has completed.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-tasks
#
def tasks(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.tasks' }
@@ -48,7 +68,7 @@ def tasks(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
method = Elasticsearch::API::HTTP_GET
path = '_cat/tasks'
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/templates.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/templates.rb
index 42423d5f2b..89a74aacf1 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/templates.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/templates.rb
@@ -15,26 +15,48 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Returns information about existing templates.
+ # Get index template information.
+ # Get information about the index templates in a cluster.
+ # You can use index templates to apply index settings and field mappings to new indices at creation.
+ # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.
#
- # @option arguments [String] :name A pattern that returned template names must match
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [Boolean] :local Return local information, do not retrieve the state from master node (default: false)
- # @option arguments [Time] :master_timeout Explicit operation timeout for connection to master node
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String] :name The name of the template to return.
+ # Accepts wildcard expressions. If omitted, all templates are returned.
+ # @option arguments [String, Array] :h A comma-separated list of columns names to display. It supports simple wildcards.
+ # @option arguments [String, Array] :s List of columns that determine how the table should be sorted.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the
+ # local cluster state. If `false` the list of selected nodes are computed
+ # from the cluster state of the master node. In both cases the coordinating
+ # node will send requests for further information to each selected node.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-templates.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-templates
#
def templates(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.templates' }
@@ -53,7 +75,7 @@ def templates(arguments = {})
method = Elasticsearch::API::HTTP_GET
path = if _name
- "_cat/templates/#{Utils.__listify(_name)}"
+ "_cat/templates/#{Utils.listify(_name)}"
else
'_cat/templates'
end
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/thread_pool.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/thread_pool.rb
index f3ffd705f2..bbe195c06b 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/thread_pool.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/thread_pool.rb
@@ -15,28 +15,49 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Returns cluster-wide thread pool statistics per node.
- # By default the active, queue and rejected statistics are returned for all thread pools.
+ # Get thread pool statistics.
+ # Get thread pool statistics for each node in a cluster.
+ # Returned information includes all built-in thread pools and custom thread pools.
+ # IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.
#
- # @option arguments [List] :thread_pool_patterns A comma-separated list of regular-expressions to filter the thread pools in the output
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [String] :time The unit in which to display time values (options: d, h, m, s, ms, micros, nanos)
- # @option arguments [Boolean] :local Return local information, do not retrieve the state from master node (default: false)
- # @option arguments [Time] :master_timeout Explicit operation timeout for connection to master node
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String, Array] :thread_pool_patterns A comma-separated list of thread pool names used to limit the request.
+ # Accepts wildcard expressions.
+ # @option arguments [String, Array] :h List of columns to appear in the response. Supports simple wildcards.
+ # @option arguments [String, Array] :s A comma-separated list of column names or aliases that determines the sort order.
+ # Sorting defaults to ascending and can be changed by setting `:asc`
+ # or `:desc` as a suffix to the column name.
+ # @option arguments [String] :time The unit used to display time values.
+ # @option arguments [Boolean] :local If `true`, the request computes the list of selected nodes from the
+ # local cluster state. If `false` the list of selected nodes are computed
+ # from the cluster state of the master node. In both cases the coordinating
+ # node will send requests for further information to each selected node.
+ # @option arguments [Time] :master_timeout The period to wait for a connection to the master node. Server default: 30s.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-thread-pool.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-thread-pool
#
def thread_pool(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.thread_pool' }
@@ -55,12 +76,12 @@ def thread_pool(arguments = {})
method = Elasticsearch::API::HTTP_GET
path = if _thread_pool_patterns
- "_cat/thread_pool/#{Utils.__listify(_thread_pool_patterns)}"
+ "_cat/thread_pool/#{Utils.listify(_thread_pool_patterns)}"
else
'_cat/thread_pool'
end
params = Utils.process_params(arguments)
- params[:h] = Utils.__listify(params[:h]) if params[:h]
+ params[:h] = Utils.listify(params[:h]) if params[:h]
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cat/transforms.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cat/transforms.rb
index 172e7445ae..6b4963e7ba 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cat/transforms.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cat/transforms.rb
@@ -15,28 +15,48 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cat
module Actions
- # Gets configuration and usage information about transforms.
+ # Get transform information.
+ # Get configuration and usage information about transforms.
+ # CAT APIs are only intended for human consumption using the Kibana
+ # console or command line. They are not intended for use by applications. For
+ # application consumption, use the get transform statistics API.
#
- # @option arguments [String] :transform_id The id of the transform for which to get stats. '_all' or '*' implies all transforms
- # @option arguments [Integer] :from skips a number of transform configs, defaults to 0
- # @option arguments [Integer] :size specifies a max number of transforms to get, defaults to 100
- # @option arguments [Boolean] :allow_no_match Whether to ignore if a wildcard expression matches no transforms. (This includes `_all` string or when no transforms have been specified)
- # @option arguments [String] :format a short version of the Accept header, e.g. json, yaml
- # @option arguments [List] :h Comma-separated list of column names to display
- # @option arguments [Boolean] :help Return help information
- # @option arguments [List] :s Comma-separated list of column names or column aliases to sort by
- # @option arguments [String] :time The unit in which to display time values (options: d, h, m, s, ms, micros, nanos)
- # @option arguments [Boolean] :v Verbose mode. Display column headers
+ # @option arguments [String] :transform_id A transform identifier or a wildcard expression.
+ # If you do not specify one of these options, the API returns information for all transforms.
+ # @option arguments [Boolean] :allow_no_match Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches.
+ # If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches.
+ # If `false`, the request returns a 404 status code when there are no matches or only partial matches. Server default: true.
+ # @option arguments [Integer] :from Skips the specified number of transforms. Server default: 0.
+ # @option arguments [String, Array] :h Comma-separated list of column names to display. Server default: changes_last_detection_time,checkpoint,checkpoint_progress,documents_processed,id,last_search_time,state.
+ # @option arguments [String, Array] :s Comma-separated list of column names or column aliases used to sort the response.
+ # @option arguments [String] :time The unit used to display time values.
+ # @option arguments [Integer] :size The maximum number of transforms to obtain. Server default: 100.
+ # @option arguments [String] :format Specifies the format to return the columnar data in, can be set to
+ # `text`, `json`, `cbor`, `yaml`, or `smile`. Server default: text.
+ # @option arguments [Boolean] :help When set to `true` will output available columns. This option
+ # can't be combined with any other query string option.
+ # @option arguments [Boolean] :v When set to `true` will enable verbose output.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-transforms.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-transforms
#
def transforms(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cat.transforms' }
@@ -55,7 +75,7 @@ def transforms(arguments = {})
method = Elasticsearch::API::HTTP_GET
path = if _transform_id
- "_cat/transforms/#{Utils.__listify(_transform_id)}"
+ "_cat/transforms/#{Utils.listify(_transform_id)}"
else
'_cat/transforms'
end
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/clear_scroll.rb b/elasticsearch-api/lib/elasticsearch/api/actions/clear_scroll.rb
index 793bff1eab..40c7ea8b60 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/clear_scroll.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/clear_scroll.rb
@@ -15,33 +15,42 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Actions
- # Explicitly clears the search context for a scroll.
+ # Clear a scrolling search.
+ # Clear the search context and results for a scrolling search.
#
- # @option arguments [List] :scroll_id A comma-separated list of scroll IDs to clear *Deprecated*
+ # @option arguments [String, Array] :scroll_id A comma-separated list of scroll IDs to clear.
+ # To clear all scroll IDs, use `_all`.
+ # IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
- # @option arguments [Hash] :body A comma-separated list of scroll IDs to clear if none was specified via the scroll_id parameter
+ # @option arguments [Hash] :body request body
#
# *Deprecation notice*:
# A scroll id can be quite large and should be specified as part of the body
# Deprecated since version 7.0.0
#
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-scroll-api.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-clear-scroll
#
def clear_scroll(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'clear_scroll' }
- defined_params = [:scroll_id].each_with_object({}) do |variable, set_variables|
- set_variables[variable] = arguments[variable] if arguments.key?(variable)
- end
- request_opts[:defined_params] = defined_params unless defined_params.empty?
-
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
@@ -50,15 +59,11 @@ def clear_scroll(arguments = {})
_scroll_id = arguments.delete(:scroll_id)
method = Elasticsearch::API::HTTP_DELETE
- path = if _scroll_id
- "_search/scroll/#{Utils.__listify(_scroll_id)}"
- else
- '_search/scroll'
- end
+ path = '_search/scroll'
params = Utils.process_params(arguments)
if Array(arguments[:ignore]).include?(404)
- Utils.__rescue_from_not_found do
+ Utils.rescue_from_not_found do
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/close_point_in_time.rb b/elasticsearch-api/lib/elasticsearch/api/actions/close_point_in_time.rb
index 05b7b5935a..70f109ce4c 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/close_point_in_time.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/close_point_in_time.rb
@@ -15,18 +15,33 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Actions
- # Close a point in time
+ # Close a point in time.
+ # A point in time must be opened explicitly before being used in search requests.
+ # The `keep_alive` parameter tells Elasticsearch how long it should persist.
+ # A point in time is automatically closed when the `keep_alive` period has elapsed.
+ # However, keeping points in time has a cost; close them as soon as they are no longer required for search requests.
#
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
- # @option arguments [Hash] :body a point-in-time id to close
+ # @option arguments [Hash] :body request body
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-open-point-in-time
#
def close_point_in_time(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'close_point_in_time' }
@@ -34,11 +49,11 @@ def close_point_in_time(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = arguments.delete(:body)
+ body = arguments.delete(:body)
method = Elasticsearch::API::HTTP_DELETE
path = '_pit'
- params = {}
+ params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/allocation_explain.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/allocation_explain.rb
index 08abe8949a..c6933e3647 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/allocation_explain.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/allocation_explain.rb
@@ -15,22 +15,38 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cluster
module Actions
- # Provides explanations for shard allocations in the cluster.
+ # Explain the shard allocations.
+ # Get explanations for shard allocations in the cluster.
+ # For unassigned shards, it provides an explanation for why the shard is unassigned.
+ # For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node.
+ # This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise.
+ # Refer to the linked documentation for examples of how to troubleshoot allocation issues using this API.
#
- # @option arguments [Time] :master_timeout Timeout for connection to master node
- # @option arguments [Boolean] :include_yes_decisions Return 'YES' decisions in explanation (default: false)
- # @option arguments [Boolean] :include_disk_info Return information about disk usage and shard sizes (default: false)
+ # @option arguments [Boolean] :include_disk_info If true, returns information about disk usage and shard sizes.
+ # @option arguments [Boolean] :include_yes_decisions If true, returns YES decisions in explanation.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
- # @option arguments [Hash] :body The index, shard, and primary flag to explain. Empty means 'explain a randomly-chosen unassigned shard'
+ # @option arguments [Hash] :body request body
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-allocation-explain.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-allocation-explain
#
def allocation_explain(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cluster.allocation_explain' }
@@ -38,7 +54,7 @@ def allocation_explain(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = arguments.delete(:body)
+ body = arguments.delete(:body)
method = if body
Elasticsearch::API::HTTP_POST
@@ -46,7 +62,7 @@ def allocation_explain(arguments = {})
Elasticsearch::API::HTTP_GET
end
- path = '_cluster/allocation/explain'
+ path = '_cluster/allocation/explain'
params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/delete_component_template.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/delete_component_template.rb
index a0a62dd669..6fcb984a3d 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/delete_component_template.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/delete_component_template.rb
@@ -15,21 +15,35 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cluster
module Actions
- # Deletes a component template
+ # Delete component templates.
+ # Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.
#
- # @option arguments [String] :name The name of the template
- # @option arguments [Time] :timeout Explicit operation timeout
- # @option arguments [Time] :master_timeout Specify timeout for connection to master
+ # @option arguments [String, Array] :name Comma-separated list or wildcard expression of component template names used to limit the request. (*Required*)
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node.
+ # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s.
+ # @option arguments [Time] :timeout Period to wait for a response.
+ # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-put-component-template
#
def delete_component_template(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cluster.delete_component_template' }
@@ -49,7 +63,7 @@ def delete_component_template(arguments = {})
_name = arguments.delete(:name)
method = Elasticsearch::API::HTTP_DELETE
- path = "_component_template/#{Utils.__listify(_name)}"
+ path = "_component_template/#{Utils.listify(_name)}"
params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/delete_voting_config_exclusions.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/delete_voting_config_exclusions.rb
index 4609b743de..9b920c1573 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/delete_voting_config_exclusions.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/delete_voting_config_exclusions.rb
@@ -15,20 +15,37 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cluster
module Actions
- # Clears cluster voting config exclusions.
+ # Clear cluster voting config exclusions.
+ # Remove master-eligible nodes from the voting configuration exclusion list.
#
- # @option arguments [Boolean] :wait_for_removal Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list.
- # @option arguments [Time] :master_timeout Timeout for submitting request to master
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s.
+ # @option arguments [Boolean] :wait_for_removal Specifies whether to wait for all excluded nodes to be removed from the
+ # cluster before clearing the voting configuration exclusions list.
+ # Defaults to true, meaning that all excluded nodes must be removed from
+ # the cluster before this API takes any action. If set to false then the
+ # voting configuration exclusions list is cleared even if some excluded
+ # nodes are still in the cluster. Server default: true.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/voting-config-exclusions.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-post-voting-config-exclusions
#
def delete_voting_config_exclusions(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cluster.delete_voting_config_exclusions' }
@@ -36,7 +53,7 @@ def delete_voting_config_exclusions(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
method = Elasticsearch::API::HTTP_DELETE
path = '_cluster/voting_config_exclusions'
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/exists_component_template.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/exists_component_template.rb
index 505979897d..de33f04612 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/exists_component_template.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/exists_component_template.rb
@@ -15,21 +15,37 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cluster
module Actions
- # Returns information about whether a particular component template exist
+ # Check component templates.
+ # Returns information about whether a particular component template exists.
#
- # @option arguments [String] :name The name of the template
- # @option arguments [Time] :master_timeout Timeout for waiting for new cluster state in case it is blocked
- # @option arguments [Boolean] :local Return local information, do not retrieve the state from master node (default: false) *Deprecated*
+ # @option arguments [String, Array] :name Comma-separated list of component template names used to limit the request.
+ # Wildcard (*) expressions are supported. (*Required*)
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. If no response is
+ # received before the timeout expires, the request fails and returns an
+ # error. Server default: 30s.
+ # @option arguments [Boolean] :local If true, the request retrieves information from the local node only.
+ # Defaults to false, which means information is retrieved from the master node.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-put-component-template
#
def exists_component_template(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cluster.exists_component_template' }
@@ -49,7 +65,7 @@ def exists_component_template(arguments = {})
_name = arguments.delete(:name)
method = Elasticsearch::API::HTTP_HEAD
- path = "_component_template/#{Utils.__listify(_name)}"
+ path = "_component_template/#{Utils.listify(_name)}"
params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/get_component_template.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/get_component_template.rb
index d4570f23aa..7c55d33324 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/get_component_template.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/get_component_template.rb
@@ -15,22 +15,39 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cluster
module Actions
- # Returns one or more component templates
+ # Get component templates.
+ # Get information about component templates.
#
- # @option arguments [List] :name The comma separated names of the component templates
- # @option arguments [Time] :master_timeout Timeout for waiting for new cluster state in case it is blocked
- # @option arguments [Boolean] :local Return local information, do not retrieve the state from master node (default: false) *Deprecated*
+ # @option arguments [String] :name Comma-separated list of component template names used to limit the request.
+ # Wildcard (`*`) expressions are supported.
+ # @option arguments [Boolean] :flat_settings If `true`, returns settings in flat format.
+ # @option arguments [String, Array] :settings_filter Filter out results, for example to filter out sensitive information. Supports wildcards or full settings keys
# @option arguments [Boolean] :include_defaults Return all default configurations for the component template (default: false)
+ # @option arguments [Boolean] :local If `true`, the request retrieves information from the local node only.
+ # If `false`, information is retrieved from the master node.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node.
+ # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-put-component-template
#
def get_component_template(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cluster.get_component_template' }
@@ -49,7 +66,7 @@ def get_component_template(arguments = {})
method = Elasticsearch::API::HTTP_GET
path = if _name
- "_component_template/#{Utils.__listify(_name)}"
+ "_component_template/#{Utils.listify(_name)}"
else
'_component_template'
end
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/get_settings.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/get_settings.rb
index 12beaf9acc..aa262c7d05 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/get_settings.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/get_settings.rb
@@ -15,22 +15,36 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cluster
module Actions
- # Returns cluster settings.
+ # Get cluster-wide settings.
+ # By default, it returns only settings that have been explicitly defined.
#
- # @option arguments [Boolean] :flat_settings Return settings in flat format (default: false)
- # @option arguments [Time] :master_timeout Timeout for waiting for new cluster state in case it is blocked
- # @option arguments [Time] :timeout Explicit operation timeout
- # @option arguments [Boolean] :include_defaults Whether to return all default clusters setting.
+ # @option arguments [Boolean] :flat_settings If `true`, returns settings in flat format.
+ # @option arguments [Boolean] :include_defaults If `true`, returns default cluster settings from the local node.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node.
+ # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s.
+ # @option arguments [Time] :timeout Period to wait for a response.
+ # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-get-settings.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-get-settings
#
def get_settings(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cluster.get_settings' }
@@ -38,7 +52,7 @@ def get_settings(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
method = Elasticsearch::API::HTTP_GET
path = '_cluster/settings'
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/health.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/health.rb
index 3a585036b6..279064c8bd 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/health.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/health.rb
@@ -15,30 +15,48 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cluster
module Actions
- # Returns basic information about the health of the cluster.
+ # Get the cluster health status.
+ # You can also use the API to get the health status of only specified data streams and indices.
+ # For data streams, the API retrieves the health status of the stream’s backing indices.
+ # The cluster health status is: green, yellow or red.
+ # On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated.
+ # The index level status is controlled by the worst shard status.
+ # One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level.
+ # The cluster status is controlled by the worst index status.
#
- # @option arguments [List] :index Limit the information returned to a specific index
- # @option arguments [String] :expand_wildcards Whether to expand wildcard expression to concrete indices that are open, closed or both. (options: open, closed, hidden, none, all)
- # @option arguments [String] :level Specify the level of detail for returned information (options: cluster, indices, shards)
- # @option arguments [Boolean] :local Return local information, do not retrieve the state from master node (default: false)
- # @option arguments [Time] :master_timeout Explicit operation timeout for connection to master node
- # @option arguments [Time] :timeout Explicit operation timeout
- # @option arguments [String] :wait_for_active_shards Wait until the specified number of shards is active
- # @option arguments [String] :wait_for_nodes Wait until the specified number of nodes is available
- # @option arguments [String] :wait_for_events Wait until all currently queued events with the given priority are processed (options: immediate, urgent, high, normal, low, languid)
- # @option arguments [Boolean] :wait_for_no_relocating_shards Whether to wait until there are no relocating shards in the cluster
- # @option arguments [Boolean] :wait_for_no_initializing_shards Whether to wait until there are no initializing shards in the cluster
- # @option arguments [String] :wait_for_status Wait until cluster is in a specific state (options: green, yellow, red)
+ # @option arguments [String, Array] :index Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`.
+ # @option arguments [String, Array] :expand_wildcards Whether to expand wildcard expression to concrete indices that are open, closed or both.
+ # @option arguments [String] :level Can be one of cluster, indices or shards. Controls the details level of the health information returned. Server default: cluster.
+ # @option arguments [Boolean] :local If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s.
+ # @option arguments [Time] :timeout Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s.
+ # @option arguments [Integer, String] :wait_for_active_shards A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. Server default: 0.
+ # @option arguments [String] :wait_for_events Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed.
+ # @option arguments [String, Integer] :wait_for_nodes The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and yellow > red. By default, will not wait for any status.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-health
#
def health(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cluster.health' }
@@ -51,13 +69,13 @@ def health(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
_index = arguments.delete(:index)
method = Elasticsearch::API::HTTP_GET
path = if _index
- "_cluster/health/#{Utils.__listify(_index)}"
+ "_cluster/health/#{Utils.listify(_index)}"
else
'_cluster/health'
end
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/info.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/info.rb
index 83b2e81d83..d90e7a5f20 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/info.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/info.rb
@@ -15,19 +15,31 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cluster
module Actions
- # Returns different information about the cluster.
+ # Get cluster info.
+ # Returns basic information about the cluster.
#
- # @option arguments [List] :target Limit the information returned to the specified target. (options: _all, http, ingest, thread_pool, script)
+ # @option arguments [String, Array] :target Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. (*Required*)
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-info.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-info
#
def info(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cluster.info' }
@@ -47,8 +59,8 @@ def info(arguments = {})
_target = arguments.delete(:target)
method = Elasticsearch::API::HTTP_GET
- path = "_info/#{Utils.__listify(_target)}"
- params = {}
+ path = "_info/#{Utils.listify(_target)}"
+ params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/pending_tasks.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/pending_tasks.rb
index 783626ef7d..58239f28b2 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/pending_tasks.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/pending_tasks.rb
@@ -15,21 +15,37 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cluster
module Actions
- # Returns a list of any cluster-level changes (e.g. create index, update mapping,
- # allocate or fail shard) which have not yet been executed.
+ # Get the pending cluster tasks.
+ # Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect.
+ # NOTE: This API returns a list of any pending updates to the cluster state.
+ # These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests.
+ # However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API.
#
- # @option arguments [Boolean] :local Return local information, do not retrieve the state from master node (default: false)
- # @option arguments [Time] :master_timeout Specify timeout for connection to master
+ # @option arguments [Boolean] :local If `true`, the request retrieves information from the local node only.
+ # If `false`, information is retrieved from the master node.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node.
+ # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-pending.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-pending-tasks
#
def pending_tasks(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cluster.pending_tasks' }
@@ -37,7 +53,7 @@ def pending_tasks(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
method = Elasticsearch::API::HTTP_GET
path = '_cluster/pending_tasks'
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/post_voting_config_exclusions.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/post_voting_config_exclusions.rb
index dd9edf0cbe..8acf112429 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/post_voting_config_exclusions.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/post_voting_config_exclusions.rb
@@ -15,22 +15,52 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cluster
module Actions
- # Updates the cluster voting config exclusions by node ids or node names.
+ # Update voting configuration exclusions.
+ # Update the cluster voting config exclusions by node IDs or node names.
+ # By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks.
+ # If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually.
+ # The API adds an entry for each specified node to the cluster’s voting configuration exclusions list.
+ # It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes.
+ # Clusters should have no voting configuration exclusions in normal operation.
+ # Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`.
+ # This API waits for the nodes to be fully removed from the cluster before it returns.
+ # If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster.
+ # A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`.
+ # If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration.
+ # In that case, you may safely retry the call.
+ # NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period.
+ # They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes.
#
- # @option arguments [String] :node_ids A comma-separated list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify ?node_names.
- # @option arguments [String] :node_names A comma-separated list of the names of the nodes to exclude from the voting configuration. If specified, you may not also specify ?node_ids.
- # @option arguments [Time] :timeout Explicit operation timeout
- # @option arguments [Time] :master_timeout Timeout for submitting request to master
+ # @option arguments [String, Array] :node_names A comma-separated list of the names of the nodes to exclude from the
+ # voting configuration. If specified, you may not also specify node_ids.
+ # @option arguments [String, Array] :node_ids A comma-separated list of the persistent ids of the nodes to exclude
+ # from the voting configuration. If specified, you may not also specify node_names.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. Server default: 30s.
+ # @option arguments [Time] :timeout When adding a voting configuration exclusion, the API waits for the
+ # specified nodes to be excluded from the voting configuration before
+ # returning. If the timeout expires before the appropriate condition
+ # is satisfied, the request fails and returns an error. Server default: 30s.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/voting-config-exclusions.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-post-voting-config-exclusions
#
def post_voting_config_exclusions(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cluster.post_voting_config_exclusions' }
@@ -38,7 +68,7 @@ def post_voting_config_exclusions(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
method = Elasticsearch::API::HTTP_POST
path = '_cluster/voting_config_exclusions'
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/put_component_template.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/put_component_template.rb
index e6cf352034..7253562369 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/put_component_template.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/put_component_template.rb
@@ -15,23 +15,52 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cluster
module Actions
- # Creates or updates a component template
+ # Create or update a component template.
+ # Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.
+ # An index template can be composed of multiple component templates.
+ # To use a component template, specify it in an index template’s `composed_of` list.
+ # Component templates are only applied to new data streams and indices as part of a matching index template.
+ # Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.
+ # Component templates are only used during index creation.
+ # For data streams, this includes data stream creation and the creation of a stream’s backing indices.
+ # Changes to component templates do not affect existing indices, including a stream’s backing indices.
+ # You can use C-style `/* *\/` block comments in component templates.
+ # You can include comments anywhere in the request body except before the opening curly bracket.
+ # **Applying component templates**
+ # You cannot directly apply a component template to a data stream or index.
+ # To be applied, a component template must be included in an index template's `composed_of` list.
#
- # @option arguments [String] :name The name of the template
- # @option arguments [Boolean] :create Whether the index template should only be added if new or can also replace an existing one
- # @option arguments [Time] :timeout Explicit operation timeout
- # @option arguments [Time] :master_timeout Specify timeout for connection to master
+ # @option arguments [String] :name Name of the component template to create.
+ # Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`.
+ # Elastic Agent uses these templates to configure backing indices for its data streams.
+ # If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version.
+ # If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. (*Required*)
+ # @option arguments [Boolean] :create If `true`, this request cannot replace or update existing component templates.
+ # @option arguments [String] :cause User defined reason for create the component template. Server default: api.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node.
+ # If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
- # @option arguments [Hash] :body The template definition (*Required*)
+ # @option arguments [Hash] :body request body
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-put-component-template
#
def put_component_template(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cluster.put_component_template' }
@@ -52,7 +81,7 @@ def put_component_template(arguments = {})
_name = arguments.delete(:name)
method = Elasticsearch::API::HTTP_PUT
- path = "_component_template/#{Utils.__listify(_name)}"
+ path = "_component_template/#{Utils.listify(_name)}"
params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/put_settings.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/put_settings.rb
index 22854aaf9b..d314331c50 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/put_settings.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/put_settings.rb
@@ -15,22 +15,46 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cluster
module Actions
- # Updates the cluster settings.
+ # Update the cluster settings.
+ # Configure and update dynamic settings on a running cluster.
+ # You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`.
+ # Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart.
+ # You can also reset transient or persistent settings by assigning them a null value.
+ # If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value.
+ # For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting.
+ # However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting.
+ # TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster.
+ # If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings.
+ # Only use `elasticsearch.yml` for static cluster settings and node settings.
+ # The API doesn’t require a restart and ensures a setting’s value is the same on all nodes.
+ # WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead.
+ # If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration.
#
# @option arguments [Boolean] :flat_settings Return settings in flat format (default: false)
- # @option arguments [Time] :master_timeout Explicit operation timeout for connection to master node
- # @option arguments [Time] :timeout Explicit operation timeout
+ # @option arguments [Time] :master_timeout Explicit operation timeout for connection to master node Server default: 30s.
+ # @option arguments [Time] :timeout Explicit operation timeout Server default: 30s.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
- # @option arguments [Hash] :body The settings to be updated. Can be either `transient` or `persistent` (survives cluster restart). (*Required*)
+ # @option arguments [Hash] :body request body
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-put-settings
#
def put_settings(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cluster.put_settings' }
@@ -40,7 +64,7 @@ def put_settings(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = arguments.delete(:body) || {}
+ body = arguments.delete(:body)
method = Elasticsearch::API::HTTP_PUT
path = '_cluster/settings'
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/remote_info.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/remote_info.rb
index 63d1f824f7..2479373e1f 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/remote_info.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/remote_info.rb
@@ -15,18 +15,31 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cluster
module Actions
- # Returns the information about configured remote clusters.
+ # Get remote cluster information.
+ # Get information about configured remote clusters.
+ # The API returns connection and endpoint information keyed by the configured remote cluster alias.
#
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-remote-info.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-remote-info
#
def remote_info(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cluster.remote_info' }
@@ -34,11 +47,11 @@ def remote_info(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
method = Elasticsearch::API::HTTP_GET
path = '_remote/info'
- params = {}
+ params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/reroute.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/reroute.rb
index 450384932f..028dbe02a9 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/reroute.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/reroute.rb
@@ -15,25 +15,46 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cluster
module Actions
- # Allows to manually change the allocation of individual shards in the cluster.
+ # Reroute the cluster.
+ # Manually change the allocation of individual shards in the cluster.
+ # For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node.
+ # It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state.
+ # For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out.
+ # The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting.
+ # If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing.
+ # The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated.
+ # This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes.
+ # Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards.
#
- # @option arguments [Boolean] :dry_run Simulate the operation only and return the resulting state
- # @option arguments [Boolean] :explain Return an explanation of why the commands can or cannot be executed
- # @option arguments [Boolean] :retry_failed Retries allocation of shards that are blocked due to too many subsequent allocation failures
- # @option arguments [List] :metric Limit the information returned to the specified metrics. Defaults to all but metadata (options: _all, blocks, metadata, nodes, none, routing_table, master_node, version)
- # @option arguments [Time] :master_timeout Explicit operation timeout for connection to master node
- # @option arguments [Time] :timeout Explicit operation timeout
+ # @option arguments [Boolean] :dry_run If true, then the request simulates the operation.
+ # It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes.
+ # @option arguments [Boolean] :explain If true, then the response contains an explanation of why the commands can or cannot run.
+ # @option arguments [String, Array] :metric Limits the information returned to the specified metrics. Server default: all.
+ # @option arguments [Boolean] :retry_failed If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures.
+ # @option arguments [Time] :master_timeout Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s.
+ # @option arguments [Time] :timeout Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Server default: 30s.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
- # @option arguments [Hash] :body The definition of `commands` to perform (`move`, `cancel`, `allocate`)
+ # @option arguments [Hash] :body request body
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-reroute.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-reroute
#
def reroute(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cluster.reroute' }
@@ -41,7 +62,7 @@ def reroute(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = arguments.delete(:body) || {}
+ body = arguments.delete(:body)
method = Elasticsearch::API::HTTP_POST
path = '_cluster/reroute'
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/state.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/state.rb
index 24d7eb487f..7d8b521b93 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/state.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/state.rb
@@ -15,33 +15,57 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cluster
module Actions
- # Returns a comprehensive information about the state of the cluster.
+ # Get the cluster state.
+ # Get comprehensive information about the state of the cluster.
+ # The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster.
+ # The elected master node ensures that every node in the cluster has a copy of the same cluster state.
+ # This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes.
+ # You may need to consult the Elasticsearch source code to determine the precise meaning of the response.
+ # By default the API will route requests to the elected master node since this node is the authoritative source of cluster states.
+ # You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter.
+ # Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data.
+ # If you use this API repeatedly, your cluster may become unstable.
+ # WARNING: The response is a representation of an internal data structure.
+ # Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version.
+ # Do not query this API using external monitoring tools.
+ # Instead, obtain the information you require using other more stable cluster APIs.
#
- # @option arguments [List] :metric Limit the information returned to the specified metrics (options: _all, blocks, metadata, nodes, routing_table, routing_nodes, master_node, version)
- # @option arguments [List] :index A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices
- # @option arguments [Boolean] :local Return local information, do not retrieve the state from master node (default: false)
- # @option arguments [Time] :master_timeout Specify timeout for connection to master
+ # @option arguments [String, Array] :metric Limit the information returned to the specified metrics
+ # @option arguments [String, Array] :index A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices
+ # @option arguments [Boolean] :allow_no_indices Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) Server default: true.
+ # @option arguments [String, Array] :expand_wildcards Whether to expand wildcard expression to concrete indices that are open, closed or both.
# @option arguments [Boolean] :flat_settings Return settings in flat format (default: false)
- # @option arguments [Number] :wait_for_metadata_version Wait for the metadata version to be equal or greater than the specified metadata version
- # @option arguments [Time] :wait_for_timeout The maximum time to wait for wait_for_metadata_version before timing out
# @option arguments [Boolean] :ignore_unavailable Whether specified concrete indices should be ignored when unavailable (missing or closed)
- # @option arguments [Boolean] :allow_no_indices Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)
- # @option arguments [String] :expand_wildcards Whether to expand wildcard expression to concrete indices that are open, closed or both. (options: open, closed, hidden, none, all)
+ # @option arguments [Boolean] :local Return local information, do not retrieve the state from master node (default: false)
+ # @option arguments [Time] :master_timeout Specify timeout for connection to master Server default: 30s.
+ # @option arguments [Integer] :wait_for_metadata_version Wait for the metadata version to be equal or greater than the specified metadata version
+ # @option arguments [Time] :wait_for_timeout The maximum time to wait for wait_for_metadata_version before timing out
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-state
#
def state(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cluster.state' }
- defined_params = %i[metric index].each_with_object({}) do |variable, set_variables|
+ defined_params = [:metric, :index].each_with_object({}) do |variable, set_variables|
set_variables[variable] = arguments[variable] if arguments.key?(variable)
end
request_opts[:defined_params] = defined_params unless defined_params.empty?
@@ -57,9 +81,9 @@ def state(arguments = {})
method = Elasticsearch::API::HTTP_GET
path = if _metric && _index
- "_cluster/state/#{Utils.__listify(_metric)}/#{Utils.__listify(_index)}"
+ "_cluster/state/#{Utils.listify(_metric)}/#{Utils.listify(_index)}"
elsif _metric
- "_cluster/state/#{Utils.__listify(_metric)}"
+ "_cluster/state/#{Utils.listify(_metric)}"
else
'_cluster/state'
end
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/stats.rb b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/stats.rb
index a960c912f6..596882b57b 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/cluster/stats.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/cluster/stats.rb
@@ -15,21 +15,35 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Cluster
module Actions
- # Returns high-level overview of cluster statistics.
+ # Get cluster statistics.
+ # Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).
#
- # @option arguments [List] :node_id A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes
- # @option arguments [Boolean] :include_remotes Include remote cluster data into the response (default: false)
- # @option arguments [Time] :timeout Explicit operation timeout
+ # @option arguments [String, Array] :node_id Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster.
+ # @option arguments [Boolean] :include_remotes Include remote cluster data into the response
+ # @option arguments [Time] :timeout Period to wait for each node to respond.
+ # If a node does not respond before its timeout expires, the response does not include its stats.
+ # However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-stats
#
def stats(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'cluster.stats' }
@@ -48,7 +62,7 @@ def stats(arguments = {})
method = Elasticsearch::API::HTTP_GET
path = if _node_id
- "_cluster/stats/nodes/#{Utils.__listify(_node_id)}"
+ "_cluster/stats/nodes/#{Utils.listify(_node_id)}"
else
'_cluster/stats'
end
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/check_in.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/check_in.rb
index e2c46f6ffd..73696d48e7 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/check_in.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/check_in.rb
@@ -15,23 +15,35 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Connector
module Actions
- # Updates the last_seen timestamp in the connector document.
+ # Check in a connector.
+ # Update the `last_seen` field in the connector and set it to the current timestamp.
# This functionality is Experimental and may be changed or removed
# completely in a future release. Elastic will take a best effort approach
# to fix any issues, but experimental features are not subject to the
# support SLA of official GA features.
#
- # @option arguments [String] :connector_id The unique identifier of the connector to be updated.
+ # @option arguments [String] :connector_id The unique identifier of the connector to be checked in (*Required*)
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/check-in-connector-api.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-check-in
#
def check_in(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'connector.check_in' }
@@ -51,8 +63,8 @@ def check_in(arguments = {})
_connector_id = arguments.delete(:connector_id)
method = Elasticsearch::API::HTTP_PUT
- path = "_connector/#{Utils.__listify(_connector_id)}/_check_in"
- params = {}
+ path = "_connector/#{Utils.listify(_connector_id)}/_check_in"
+ params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/delete.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/delete.rb
index a10c9e3058..c79595925d 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/delete.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/delete.rb
@@ -15,25 +15,40 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Connector
module Actions
- # Deletes a connector.
- # This functionality is Experimental and may be changed or removed
- # completely in a future release. Elastic will take a best effort approach
- # to fix any issues, but experimental features are not subject to the
- # support SLA of official GA features.
+ # Delete a connector.
+ # Removes a connector and associated sync jobs.
+ # This is a destructive action that is not recoverable.
+ # NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector.
+ # These need to be removed manually.
+ # This functionality is in Beta and is subject to change. The design and
+ # code is less mature than official GA features and is being provided
+ # as-is with no warranties. Beta features are not subject to the support
+ # SLA of official GA features.
#
- # @option arguments [String] :connector_id The unique identifier of the connector to be deleted.
- # @option arguments [Boolean] :hard If true, the connector doc is deleted. If false, connector doc is marked as deleted (soft-deleted).
- # @option arguments [Boolean] :delete_sync_jobs Determines whether associated sync jobs are also deleted.
+ # @option arguments [String] :connector_id The unique identifier of the connector to be deleted (*Required*)
+ # @option arguments [Boolean] :delete_sync_jobs A flag indicating if associated sync jobs should be also removed. Defaults to false.
+ # @option arguments [Boolean] :hard A flag indicating if the connector should be hard deleted.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-connector-api.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-delete
#
def delete(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'connector.delete' }
@@ -53,7 +68,7 @@ def delete(arguments = {})
_connector_id = arguments.delete(:connector_id)
method = Elasticsearch::API::HTTP_DELETE
- path = "_connector/#{Utils.__listify(_connector_id)}"
+ path = "_connector/#{Utils.listify(_connector_id)}"
params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/get.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/get.rb
index 23b7109386..4c04f4a86f 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/get.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/get.rb
@@ -15,24 +15,36 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Connector
module Actions
- # Returns the details about a connector.
- # This functionality is Experimental and may be changed or removed
- # completely in a future release. Elastic will take a best effort approach
- # to fix any issues, but experimental features are not subject to the
- # support SLA of official GA features.
+ # Get a connector.
+ # Get the details about a connector.
+ # This functionality is in Beta and is subject to change. The design and
+ # code is less mature than official GA features and is being provided
+ # as-is with no warranties. Beta features are not subject to the support
+ # SLA of official GA features.
#
- # @option arguments [String] :connector_id The unique identifier of the connector to be returned.
- # @option arguments [Boolean] :include_deleted A flag indicating whether to return connectors that have been soft-deleted.
+ # @option arguments [String] :connector_id The unique identifier of the connector (*Required*)
+ # @option arguments [Boolean] :include_deleted A flag to indicate if the desired connector should be fetched, even if it was soft-deleted.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/get-connector-api.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-get
#
def get(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'connector.get' }
@@ -52,7 +64,7 @@ def get(arguments = {})
_connector_id = arguments.delete(:connector_id)
method = Elasticsearch::API::HTTP_GET
- path = "_connector/#{Utils.__listify(_connector_id)}"
+ path = "_connector/#{Utils.listify(_connector_id)}"
params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/last_sync.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/last_sync.rb
deleted file mode 100644
index 1a342f408f..0000000000
--- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/last_sync.rb
+++ /dev/null
@@ -1,66 +0,0 @@
-# Licensed to Elasticsearch B.V. under one or more contributor
-# license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright
-# ownership. Elasticsearch B.V. licenses this file to you under
-# the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
-module Elasticsearch
- module API
- module Connector
- module Actions
- # Updates the stats of last sync in the connector document.
- # This functionality is Experimental and may be changed or removed
- # completely in a future release. Elastic will take a best effort approach
- # to fix any issues, but experimental features are not subject to the
- # support SLA of official GA features.
- #
- # @option arguments [String] :connector_id The unique identifier of the connector to be updated.
- # @option arguments [Hash] :headers Custom HTTP headers
- # @option arguments [Hash] :body Object with stats related to the last connector sync run. (*Required*)
- #
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/update-connector-last-sync-api.html
- #
- def last_sync(arguments = {})
- request_opts = { endpoint: arguments[:endpoint] || 'connector.last_sync' }
-
- defined_params = [:connector_id].each_with_object({}) do |variable, set_variables|
- set_variables[variable] = arguments[variable] if arguments.key?(variable)
- end
- request_opts[:defined_params] = defined_params unless defined_params.empty?
-
- raise ArgumentError, "Required argument 'body' missing" unless arguments[:body]
- raise ArgumentError, "Required argument 'connector_id' missing" unless arguments[:connector_id]
-
- arguments = arguments.clone
- headers = arguments.delete(:headers) || {}
-
- body = arguments.delete(:body)
-
- _connector_id = arguments.delete(:connector_id)
-
- method = Elasticsearch::API::HTTP_PUT
- path = "_connector/#{Utils.__listify(_connector_id)}/_last_sync"
- params = {}
-
- Elasticsearch::API::Response.new(
- perform_request(method, path, params, body, headers, request_opts)
- )
- end
- end
- end
- end
-end
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/list.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/list.rb
index e79a2ca64e..531dc56d0a 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/list.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/list.rb
@@ -15,29 +15,41 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Connector
module Actions
- # Lists all connectors.
- # This functionality is Experimental and may be changed or removed
- # completely in a future release. Elastic will take a best effort approach
- # to fix any issues, but experimental features are not subject to the
- # support SLA of official GA features.
+ # Get all connectors.
+ # Get information about all connectors.
+ # This functionality is in Beta and is subject to change. The design and
+ # code is less mature than official GA features and is being provided
+ # as-is with no warranties. Beta features are not subject to the support
+ # SLA of official GA features.
#
# @option arguments [Integer] :from Starting offset (default: 0)
- # @option arguments [Integer] :size Specifies a max number of results to get (default: 100)
- # @option arguments [List] :index_name A comma-separated list of connector index names to fetch connector documents for
- # @option arguments [List] :connector_name A comma-separated list of connector names to fetch connector documents for
- # @option arguments [List] :service_type A comma-separated list of connector service types to fetch connector documents for
- # @option arguments [String] :query A search string for querying connectors, filtering results by matching against connector names, descriptions, and index names
- # @option arguments [Boolean] :include_deleted A flag indicating whether to return connectors that have been soft-deleted.
+ # @option arguments [Integer] :size Specifies a max number of results to get
+ # @option arguments [String, Array] :index_name A comma-separated list of connector index names to fetch connector documents for
+ # @option arguments [String, Array] :connector_name A comma-separated list of connector names to fetch connector documents for
+ # @option arguments [String, Array] :service_type A comma-separated list of connector service types to fetch connector documents for
+ # @option arguments [Boolean] :include_deleted A flag to indicate if the desired connector should be fetched, even if it was soft-deleted.
+ # @option arguments [String] :query A wildcard query string that filters connectors with matching name, description or index name
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/list-connector-api.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-list
#
def list(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'connector.list' }
@@ -45,7 +57,7 @@ def list(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = nil
+ body = nil
method = Elasticsearch::API::HTTP_GET
path = '_connector'
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/post.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/post.rb
index 6e2c055541..fc41b1ebf5 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/post.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/post.rb
@@ -15,23 +15,37 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Connector
module Actions
- # Creates a connector.
- # This functionality is Experimental and may be changed or removed
- # completely in a future release. Elastic will take a best effort approach
- # to fix any issues, but experimental features are not subject to the
- # support SLA of official GA features.
+ # Create a connector.
+ # Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure.
+ # Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud.
+ # Self-managed connectors (Connector clients) are self-managed on your infrastructure.
+ # This functionality is in Beta and is subject to change. The design and
+ # code is less mature than official GA features and is being provided
+ # as-is with no warranties. Beta features are not subject to the support
+ # SLA of official GA features.
#
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
- # @option arguments [Hash] :body The connector configuration.
+ # @option arguments [Hash] :body request body
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/create-connector-api.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-put
#
def post(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'connector.post' }
@@ -39,11 +53,11 @@ def post(arguments = {})
arguments = arguments.clone
headers = arguments.delete(:headers) || {}
- body = arguments.delete(:body)
+ body = arguments.delete(:body)
method = Elasticsearch::API::HTTP_POST
path = '_connector'
- params = {}
+ params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/put.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/put.rb
index 038f37ab3c..78ac3452f3 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/put.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/put.rb
@@ -15,24 +15,35 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Connector
module Actions
- # Creates or updates a connector.
- # This functionality is Experimental and may be changed or removed
- # completely in a future release. Elastic will take a best effort approach
- # to fix any issues, but experimental features are not subject to the
- # support SLA of official GA features.
+ # Create or update a connector.
+ # This functionality is in Beta and is subject to change. The design and
+ # code is less mature than official GA features and is being provided
+ # as-is with no warranties. Beta features are not subject to the support
+ # SLA of official GA features.
#
- # @option arguments [String] :connector_id The unique identifier of the connector to be created or updated.
+ # @option arguments [String] :connector_id The unique identifier of the connector to be created or updated. ID is auto-generated if not provided.
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
- # @option arguments [Hash] :body The connector configuration.
+ # @option arguments [Hash] :body request body
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/create-connector-api.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-put
#
def put(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'connector.put' }
@@ -51,11 +62,11 @@ def put(arguments = {})
method = Elasticsearch::API::HTTP_PUT
path = if _connector_id
- "_connector/#{Utils.__listify(_connector_id)}"
+ "_connector/#{Utils.listify(_connector_id)}"
else
'_connector'
end
- params = {}
+ params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_cancel.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_cancel.rb
index 3a6bcebf48..4655fdb013 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_cancel.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_cancel.rb
@@ -15,23 +15,36 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Connector
module Actions
- # Cancels a connector sync job.
- # This functionality is Experimental and may be changed or removed
- # completely in a future release. Elastic will take a best effort approach
- # to fix any issues, but experimental features are not subject to the
- # support SLA of official GA features.
+ # Cancel a connector sync job.
+ # Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time.
+ # The connector service is then responsible for setting the status of connector sync jobs to cancelled.
+ # This functionality is in Beta and is subject to change. The design and
+ # code is less mature than official GA features and is being provided
+ # as-is with no warranties. Beta features are not subject to the support
+ # SLA of official GA features.
#
- # @option arguments [String] :connector_sync_job_id The unique identifier of the connector sync job to be canceled
+ # @option arguments [String] :connector_sync_job_id The unique identifier of the connector sync job (*Required*)
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/cancel-connector-sync-job-api.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-cancel
#
def sync_job_cancel(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'connector.sync_job_cancel' }
@@ -54,8 +67,8 @@ def sync_job_cancel(arguments = {})
_connector_sync_job_id = arguments.delete(:connector_sync_job_id)
method = Elasticsearch::API::HTTP_PUT
- path = "_connector/_sync_job/#{Utils.__listify(_connector_sync_job_id)}/_cancel"
- params = {}
+ path = "_connector/_sync_job/#{Utils.listify(_connector_sync_job_id)}/_cancel"
+ params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_check_in.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_check_in.rb
index 90e935c527..0019715262 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_check_in.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_check_in.rb
@@ -15,23 +15,37 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Connector
module Actions
- # Checks in a connector sync job (refreshes 'last_seen').
+ # Check in a connector sync job.
+ # Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index.
+ # To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure.
+ # This service runs automatically on Elastic Cloud for Elastic managed connectors.
# This functionality is Experimental and may be changed or removed
# completely in a future release. Elastic will take a best effort approach
# to fix any issues, but experimental features are not subject to the
# support SLA of official GA features.
#
- # @option arguments [String] :connector_sync_job_id The unique identifier of the connector sync job to be checked in
+ # @option arguments [String] :connector_sync_job_id The unique identifier of the connector sync job to be checked in. (*Required*)
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/check-in-connector-sync-job-api.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-check-in
#
def sync_job_check_in(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'connector.sync_job_check_in' }
@@ -54,8 +68,8 @@ def sync_job_check_in(arguments = {})
_connector_sync_job_id = arguments.delete(:connector_sync_job_id)
method = Elasticsearch::API::HTTP_PUT
- path = "_connector/_sync_job/#{Utils.__listify(_connector_sync_job_id)}/_check_in"
- params = {}
+ path = "_connector/_sync_job/#{Utils.listify(_connector_sync_job_id)}/_check_in"
+ params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_claim.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_claim.rb
index f524141fc3..a6a827494b 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_claim.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_claim.rb
@@ -15,24 +15,41 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Connector
module Actions
- # Claims a connector sync job.
+ # Claim a connector sync job.
+ # This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time.
+ # Additionally, it can set the `sync_cursor` property for the sync job.
+ # This API is not intended for direct connector management by users.
+ # It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch.
+ # To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure.
+ # This service runs automatically on Elastic Cloud for Elastic managed connectors.
# This functionality is Experimental and may be changed or removed
# completely in a future release. Elastic will take a best effort approach
# to fix any issues, but experimental features are not subject to the
# support SLA of official GA features.
#
- # @option arguments [String] :connector_sync_job_id The unique identifier of the connector sync job to be claimed.
+ # @option arguments [String] :connector_sync_job_id The unique identifier of the connector sync job. (*Required*)
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
- # @option arguments [Hash] :body Data to claim a sync job. (*Required*)
+ # @option arguments [Hash] :body request body
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/claim-connector-sync-job-api.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-claim
#
def sync_job_claim(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'connector.sync_job_claim' }
@@ -57,8 +74,8 @@ def sync_job_claim(arguments = {})
_connector_sync_job_id = arguments.delete(:connector_sync_job_id)
method = Elasticsearch::API::HTTP_PUT
- path = "_connector/_sync_job/#{Utils.__listify(_connector_sync_job_id)}/_claim"
- params = {}
+ path = "_connector/_sync_job/#{Utils.listify(_connector_sync_job_id)}/_claim"
+ params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_delete.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_delete.rb
index 3a7ed141ca..1d9f342d57 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_delete.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_delete.rb
@@ -15,23 +15,36 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Connector
module Actions
- # Deletes a connector sync job.
- # This functionality is Experimental and may be changed or removed
- # completely in a future release. Elastic will take a best effort approach
- # to fix any issues, but experimental features are not subject to the
- # support SLA of official GA features.
+ # Delete a connector sync job.
+ # Remove a connector sync job and its associated data.
+ # This is a destructive action that is not recoverable.
+ # This functionality is in Beta and is subject to change. The design and
+ # code is less mature than official GA features and is being provided
+ # as-is with no warranties. Beta features are not subject to the support
+ # SLA of official GA features.
#
- # @option arguments [String] :connector_sync_job_id The unique identifier of the connector sync job to be deleted.
+ # @option arguments [String] :connector_sync_job_id The unique identifier of the connector sync job to be deleted (*Required*)
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-connector-sync-job-api.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-delete
#
def sync_job_delete(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'connector.sync_job_delete' }
@@ -54,8 +67,8 @@ def sync_job_delete(arguments = {})
_connector_sync_job_id = arguments.delete(:connector_sync_job_id)
method = Elasticsearch::API::HTTP_DELETE
- path = "_connector/_sync_job/#{Utils.__listify(_connector_sync_job_id)}"
- params = {}
+ path = "_connector/_sync_job/#{Utils.listify(_connector_sync_job_id)}"
+ params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_error.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_error.rb
index 73ed91cbd3..42ff4de588 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_error.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_error.rb
@@ -15,24 +15,38 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Connector
module Actions
- # Sets an error for a connector sync job.
+ # Set a connector sync job error.
+ # Set the `error` field for a connector sync job and set its `status` to `error`.
+ # To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure.
+ # This service runs automatically on Elastic Cloud for Elastic managed connectors.
# This functionality is Experimental and may be changed or removed
# completely in a future release. Elastic will take a best effort approach
# to fix any issues, but experimental features are not subject to the
# support SLA of official GA features.
#
- # @option arguments [String] :connector_sync_job_id The unique identifier of the connector sync job to set an error for.
+ # @option arguments [String] :connector_sync_job_id The unique identifier for the connector sync job. (*Required*)
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
- # @option arguments [Hash] :body The error to set in the connector sync job. (*Required*)
+ # @option arguments [Hash] :body request body
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/set-connector-sync-job-error-api.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-error
#
def sync_job_error(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'connector.sync_job_error' }
@@ -57,8 +71,8 @@ def sync_job_error(arguments = {})
_connector_sync_job_id = arguments.delete(:connector_sync_job_id)
method = Elasticsearch::API::HTTP_PUT
- path = "_connector/_sync_job/#{Utils.__listify(_connector_sync_job_id)}/_error"
- params = {}
+ path = "_connector/_sync_job/#{Utils.listify(_connector_sync_job_id)}/_error"
+ params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_get.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_get.rb
index 22e7554776..51ffcbd2ae 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_get.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_get.rb
@@ -15,23 +15,34 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Connector
module Actions
- # Returns the details about a connector sync job.
- # This functionality is Experimental and may be changed or removed
- # completely in a future release. Elastic will take a best effort approach
- # to fix any issues, but experimental features are not subject to the
- # support SLA of official GA features.
+ # Get a connector sync job.
+ # This functionality is in Beta and is subject to change. The design and
+ # code is less mature than official GA features and is being provided
+ # as-is with no warranties. Beta features are not subject to the support
+ # SLA of official GA features.
#
- # @option arguments [String] :connector_sync_job_id The unique identifier of the connector sync job to be returned.
+ # @option arguments [String] :connector_sync_job_id The unique identifier of the connector sync job (*Required*)
+ # @option arguments [Boolean] :error_trace When set to `true` Elasticsearch will include the full stack trace of errors
+ # when they occur.
+ # @option arguments [String, Array] :filter_path Comma-separated list of filters in dot notation which reduce the response
+ # returned by Elasticsearch.
+ # @option arguments [Boolean] :human When set to `true` will return statistics in a format suitable for humans.
+ # For example `"exists_time": "1h"` for humans and
+ # `"eixsts_time_in_millis": 3600000` for computers. When disabled the human
+ # readable values will be omitted. This makes sense for responses being consumed
+ # only by machines.
+ # @option arguments [Boolean] :pretty If set to `true` the returned JSON will be "pretty-formatted". Only use
+ # this option for debugging only.
# @option arguments [Hash] :headers Custom HTTP headers
#
- # @see https://www.elastic.co/guide/en/elasticsearch/reference/current/get-connector-sync-job-api.html
+ # @see https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-get
#
def sync_job_get(arguments = {})
request_opts = { endpoint: arguments[:endpoint] || 'connector.sync_job_get' }
@@ -54,8 +65,8 @@ def sync_job_get(arguments = {})
_connector_sync_job_id = arguments.delete(:connector_sync_job_id)
method = Elasticsearch::API::HTTP_GET
- path = "_connector/_sync_job/#{Utils.__listify(_connector_sync_job_id)}"
- params = {}
+ path = "_connector/_sync_job/#{Utils.listify(_connector_sync_job_id)}"
+ params = Utils.process_params(arguments)
Elasticsearch::API::Response.new(
perform_request(method, path, params, body, headers, request_opts)
diff --git a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_list.rb b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_list.rb
index 222314a516..2d3d719112 100644
--- a/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_list.rb
+++ b/elasticsearch-api/lib/elasticsearch/api/actions/connector/sync_job_list.rb
@@ -15,27 +15,39 @@
# specific language governing permissions and limitations
# under the License.
#
-# Auto generated from build hash f284cc16f4d4b4289bc679aa1529bb504190fe80
-# @see https://github.com/elastic/elasticsearch/tree/main/rest-api-spec
-#
+# This code was automatically generated from the Elasticsearch Specification
+# See https://github.com/elastic/elasticsearch-specification
+# See Elasticsearch::ES_SPECIFICATION_COMMIT for commit hash.
module Elasticsearch
module API
module Connector
module Actions
- # Lists all connector sync jobs.
- # This functionality is Experimental and may be changed or removed
- # completely in a future release. Elastic will take a best effort approach
- # to fix any issues, but experimental features are not subject to the
- # support SLA of official GA features.
+ # Get all connector sync jobs.
+ # Get information about all stored connector sync jobs listed by their creation date in ascending order.
+ # This functionality is in Beta and is subject to change. The design and
+ # code is less mature than official GA features and is being provided
+ # as-is with no warranties. Beta features are not subject to the support
+ # SLA of official GA features.
#
# @option arguments [Integer] :from Starting offset (default: 0)
- # @option arguments [Integer] :size specifies a max number of results to get (default: 100)
- # @option arguments [String] :status Sync job status, which sync jobs are fetched for
- # @option arguments [String] :connector_id Id of the connector to fetch the sync jobs for
- # @option arguments [List] :job_type A comma-separated list of job types
+ # @option arguments [Integer] :size Specifies a max number of results to get
+ # @option arguments [String] :status A sync job status to fetch connector sync jobs for
+ # @option arguments [String] :connector_id A connector id to fetch connector sync jobs for
+ # @option arguments [String, Array