diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000000..c1d8714607
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,17 @@
+
+
+- [ ] You have read the [Spring Data contribution guidelines](https://github.com/spring-projects/spring-data-build/blob/master/CONTRIBUTING.adoc).
+- [ ] **There is a ticket in the bug tracker for the project in our [issue tracker](https://github.com/spring-projects/spring-data-elasticsearch/issues)**. Add the issue number to the _Closes #issue-number_ line below
+- [ ] You use the code formatters provided [here](https://github.com/spring-projects/spring-data-build/tree/master/etc/ide) and have them applied to your changes. Don’t submit any formatting related changes.
+- [ ] You submit test cases (unit or integration tests) that back your changes.
+- [ ] You added yourself as author in the headers of the classes you touched. Amend the date range in the Apache license header if needed. For new types, add the license header (copy from another file and set the current year only).
+
+Closes #issue-number
diff --git a/.gitignore b/.gitignore
index 20068080ec..449f58ea44 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,21 +1,36 @@
-atlassian-ide-plugin.xml
-
-## Ignore svn files
-.svn
-
-## ignore any target dir
-target
-
-##ignore only top level data dir - local node data files for unit tests
-/data
-
-## Ignore project files created by Eclipse
-.settings
-.project
-.classpath
-
-## Ignore project files created by IntelliJ IDEA
-*.iml
-*.ipr
-*.iws
-.idea
+.DS_Store
+*.graphml
+.springBeans
+
+atlassian-ide-plugin.xml
+
+## Ignore svn files
+.svn
+
+## ignore any target dir
+target
+
+## Ignore project files created by Eclipse
+.settings
+.project
+.classpath
+
+## Ignore project files created by IntelliJ IDEA
+*.iml
+*.ipr
+*.iws
+.idea
+/.env
+
+
+/zap.env
+/localdocker.env
+.localdocker-env
+
+build/
+node_modules
+node
+package-lock.json
+
+.mvn/.develocity
+/src/test/resources/testcontainers-local.properties
diff --git a/.mvn/extensions.xml b/.mvn/extensions.xml
new file mode 100644
index 0000000000..e0857eaa25
--- /dev/null
+++ b/.mvn/extensions.xml
@@ -0,0 +1,8 @@
+
+
+
+ io.spring.develocity.conventions
+ develocity-conventions-maven-extension
+ 0.0.22
+
+
diff --git a/.mvn/jvm.config b/.mvn/jvm.config
new file mode 100644
index 0000000000..e27f6e8f5e
--- /dev/null
+++ b/.mvn/jvm.config
@@ -0,0 +1,14 @@
+--add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED
+--add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED
+--add-exports jdk.compiler/com.sun.tools.javac.main=ALL-UNNAMED
+--add-exports jdk.compiler/com.sun.tools.javac.model=ALL-UNNAMED
+--add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED
+--add-exports jdk.compiler/com.sun.tools.javac.processing=ALL-UNNAMED
+--add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED
+--add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED
+--add-opens jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED
+--add-opens jdk.compiler/com.sun.tools.javac.comp=ALL-UNNAMED
+--add-opens=java.base/java.util=ALL-UNNAMED
+--add-opens=java.base/java.lang.reflect=ALL-UNNAMED
+--add-opens=java.base/java.text=ALL-UNNAMED
+--add-opens=java.desktop/java.awt.font=ALL-UNNAMED
diff --git a/.mvn/wrapper/MavenWrapperDownloader.java b/.mvn/wrapper/MavenWrapperDownloader.java
new file mode 100644
index 0000000000..64a46202ac
--- /dev/null
+++ b/.mvn/wrapper/MavenWrapperDownloader.java
@@ -0,0 +1,115 @@
+
+/*
+ * Copyright 2007-present the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.net.*;
+import java.io.*;
+import java.nio.channels.*;
+import java.util.Properties;
+
+public class MavenWrapperDownloader {
+
+ private static final String WRAPPER_VERSION = "0.5.6";
+ /**
+ * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
+ */
+ private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
+ + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
+
+ /**
+ * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to use instead of the default
+ * one.
+ */
+ private static final String MAVEN_WRAPPER_PROPERTIES_PATH = ".mvn/wrapper/maven-wrapper.properties";
+
+ /**
+ * Path where the maven-wrapper.jar will be saved to.
+ */
+ private static final String MAVEN_WRAPPER_JAR_PATH = ".mvn/wrapper/maven-wrapper.jar";
+
+ /**
+ * Name of the property which should be used to override the default download url for the wrapper.
+ */
+ private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
+
+ public static void main(String args[]) {
+ System.out.println("- Downloader started");
+ File baseDirectory = new File(args[0]);
+ System.out.println("- Using transport directory: " + baseDirectory.getAbsolutePath());
+
+ // If the maven-wrapper.properties exists, read it and check if it contains a custom
+ // wrapperUrl parameter.
+ File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
+ String url = DEFAULT_DOWNLOAD_URL;
+ if (mavenWrapperPropertyFile.exists()) {
+ FileInputStream mavenWrapperPropertyFileInputStream = null;
+ try {
+ mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
+ Properties mavenWrapperProperties = new Properties();
+ mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
+ url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
+ } catch (IOException e) {
+ System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
+ } finally {
+ try {
+ if (mavenWrapperPropertyFileInputStream != null) {
+ mavenWrapperPropertyFileInputStream.close();
+ }
+ } catch (IOException e) {
+ // Ignore ...
+ }
+ }
+ }
+ System.out.println("- Downloading from: " + url);
+
+ File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
+ if (!outputFile.getParentFile().exists()) {
+ if (!outputFile.getParentFile().mkdirs()) {
+ System.out.println("- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
+ }
+ }
+ System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
+ try {
+ downloadFileFromURL(url, outputFile);
+ System.out.println("Done");
+ System.exit(0);
+ } catch (Throwable e) {
+ System.out.println("- Error downloading");
+ e.printStackTrace();
+ System.exit(1);
+ }
+ }
+
+ private static void downloadFileFromURL(String urlString, File destination) throws Exception {
+ if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
+ String username = System.getenv("MVNW_USERNAME");
+ char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
+ Authenticator.setDefault(new Authenticator() {
+ @Override
+ protected PasswordAuthentication getPasswordAuthentication() {
+ return new PasswordAuthentication(username, password);
+ }
+ });
+ }
+ URL website = new URL(urlString);
+ ReadableByteChannel rbc;
+ rbc = Channels.newChannel(website.openStream());
+ FileOutputStream fos = new FileOutputStream(destination);
+ fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
+ fos.close();
+ rbc.close();
+ }
+
+}
diff --git a/.mvn/wrapper/maven-wrapper.jar b/.mvn/wrapper/maven-wrapper.jar
new file mode 100644
index 0000000000..2cc7d4a55c
Binary files /dev/null and b/.mvn/wrapper/maven-wrapper.jar differ
diff --git a/.mvn/wrapper/maven-wrapper.properties b/.mvn/wrapper/maven-wrapper.properties
new file mode 100644
index 0000000000..e075a74d86
--- /dev/null
+++ b/.mvn/wrapper/maven-wrapper.properties
@@ -0,0 +1,3 @@
+#Thu Nov 07 09:47:28 CET 2024
+wrapperUrl=https\://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar
+distributionUrl=https\://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.9/apache-maven-3.9.9-bin.zip
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index f5c99a7f66..0000000000
--- a/.travis.yml
+++ /dev/null
@@ -1 +0,0 @@
-language: java
\ No newline at end of file
diff --git a/CI.adoc b/CI.adoc
new file mode 100644
index 0000000000..56af9d15ee
--- /dev/null
+++ b/CI.adoc
@@ -0,0 +1,43 @@
+= Continuous Integration
+
+image:https://jenkins.spring.io/buildStatus/icon?job=spring-data-elasticsearch%2Fmain&subject=2020.0.0%20(main)[link=https://jenkins.spring.io/view/SpringData/job/spring-data-elasticsearch/]
+image:https://jenkins.spring.io/buildStatus/icon?job=spring-data-elasticsearch%2F4.0.x&subject=Neumann%20(4.0.x)[link=https://jenkins.spring.io/view/SpringData/job/spring-data-elasticsearch/]
+image:https://jenkins.spring.io/buildStatus/icon?job=spring-data-elasticsearch%2F3.2.x&subject=Moore%20(3.2.x)[link=https://jenkins.spring.io/view/SpringData/job/spring-data-elasticsearch/]
+
+== Running CI tasks locally
+
+Since this pipeline is purely Docker-based, it's easy to:
+
+* Debug what went wrong on your local machine.
+* Test out a a tweak to your `verify.sh` script before sending it out.
+* Experiment against a new image before submitting your pull request.
+
+All of these use cases are great reasons to essentially run what the CI server does on your local machine.
+
+IMPORTANT: To do this you must have Docker installed on your machine.
+
+1. `docker run -it --mount type=bind,source="$(pwd)",target=/spring-data-elasticsearch-github adoptopenjdk/openjdk8:latest /bin/bash`
++
+This will launch the Docker image and mount your source code at `spring-data-elasticsearch-github`.
++
+2. `cd spring-data-elasticsearch-github`
++
+Next, run your tests from inside the container:
++
+3. `./mvnw clean dependency:list test -Dsort` (or whatever profile you need to test out)
+
+Since the container is binding to your source, you can make edits from your IDE and continue to run build jobs.
+
+If you need to package things up, do this:
+
+1. `docker run -it -v /var/run/docker.sock:/var/run/docker.sock --mount type=bind,source="$(pwd)",target=/spring-data-elasticsearch-github adoptopenjdk/openjdk8:latest /bin/bash`
++
+This will launch the Docker image and mount your source code at `spring-data-elasticsearch-github`.
++
+2. `cd spring-data-elasticsearch-github`
++
+Next, try to package everything up from inside the container:
++
+3. `./mvnw -Pci,snapshot -Dmaven.test.skip=true clean package`
+
+NOTE: Docker containers can eat up disk space fast! From time to time, run `docker system prune` to clean out old images.
diff --git a/CONTRIBUTING.adoc b/CONTRIBUTING.adoc
new file mode 100644
index 0000000000..1cff01d255
--- /dev/null
+++ b/CONTRIBUTING.adoc
@@ -0,0 +1,10 @@
+= Spring Data contribution guidelines
+
+You find the contribution guidelines for Spring Data projects https://github.com/spring-projects/spring-data-build/blob/main/CONTRIBUTING.adoc[here].
+**Please read these carefully!**
+
+Do not submit a Pull Request before having created an issue and having discussed it. This prevents you from doing work that might be rejected.
+
+== Running the test locally
+
+In order to run the tests locally with `./mvnw test` you need to have docker running because Spring Data Elasticsearch uses https://www.testcontainers.org/[Testcontainers] to start a local running Elasticsearch instance.
diff --git a/Jenkinsfile b/Jenkinsfile
new file mode 100644
index 0000000000..1d2500ed1e
--- /dev/null
+++ b/Jenkinsfile
@@ -0,0 +1,132 @@
+def p = [:]
+node {
+ checkout scm
+ p = readProperties interpolate: true, file: 'ci/pipeline.properties'
+}
+
+pipeline {
+ agent none
+
+ triggers {
+ pollSCM 'H/10 * * * *'
+ upstream(upstreamProjects: "spring-data-commons/main", threshold: hudson.model.Result.SUCCESS)
+ }
+
+ options {
+ disableConcurrentBuilds()
+ buildDiscarder(logRotator(numToKeepStr: '14'))
+ }
+
+ stages {
+ stage("test: baseline (main)") {
+ when {
+ beforeAgent(true)
+ anyOf {
+ branch(pattern: "main|(\\d\\.\\d\\.x)", comparator: "REGEXP")
+ not { triggeredBy 'UpstreamCause' }
+ }
+ }
+ agent {
+ label 'data'
+ }
+ options { timeout(time: 30, unit: 'MINUTES') }
+
+ environment {
+ ARTIFACTORY = credentials("${p['artifactory.credentials']}")
+ DEVELOCITY_ACCESS_KEY = credentials("${p['develocity.access-key']}")
+ }
+
+ steps {
+ script {
+ docker.withRegistry(p['docker.proxy.registry'], p['docker.proxy.credentials']) {
+ docker.image(p['docker.java.main.image']).inside(p['docker.java.inside.docker']) {
+ sh "PROFILE=none JENKINS_USER_NAME=${p['jenkins.user.name']} ci/verify.sh"
+ sh "JENKINS_USER_NAME=${p['jenkins.user.name']} ci/clean.sh"
+ }
+ }
+ }
+ }
+ }
+
+ stage("Test other configurations") {
+ when {
+ beforeAgent(true)
+ allOf {
+ branch(pattern: "main|(\\d\\.\\d\\.x)", comparator: "REGEXP")
+ not { triggeredBy 'UpstreamCause' }
+ }
+ }
+ parallel {
+ stage("test: baseline (next)") {
+ agent {
+ label 'data'
+ }
+ options { timeout(time: 30, unit: 'MINUTES') }
+ environment {
+ ARTIFACTORY = credentials("${p['artifactory.credentials']}")
+ DEVELOCITY_ACCESS_KEY = credentials("${p['develocity.access-key']}")
+ }
+ steps {
+ script {
+ docker.withRegistry(p['docker.proxy.registry'], p['docker.proxy.credentials']) {
+ docker.image(p['docker.java.next.image']).inside(p['docker.java.inside.docker']) {
+ sh "PROFILE=none JENKINS_USER_NAME=${p['jenkins.user.name']} ci/verify.sh"
+ sh "JENKINS_USER_NAME=${p['jenkins.user.name']} ci/clean.sh"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ stage('Release to artifactory') {
+ when {
+ beforeAgent(true)
+ anyOf {
+ branch(pattern: "main|(\\d\\.\\d\\.x)", comparator: "REGEXP")
+ not { triggeredBy 'UpstreamCause' }
+ }
+ }
+ agent {
+ label 'data'
+ }
+ options { timeout(time: 20, unit: 'MINUTES') }
+ environment {
+ ARTIFACTORY = credentials("${p['artifactory.credentials']}")
+ DEVELOCITY_ACCESS_KEY = credentials("${p['develocity.access-key']}")
+ }
+ steps {
+ script {
+ docker.withRegistry(p['docker.proxy.registry'], p['docker.proxy.credentials']) {
+ docker.image(p['docker.java.main.image']).inside(p['docker.java.inside.docker']) {
+ sh 'MAVEN_OPTS="-Duser.name=' + "${p['jenkins.user.name']}" + ' -Duser.home=/tmp/jenkins-home" ' +
+ "./mvnw -s settings.xml -Pci,artifactory " +
+ "-Ddevelocity.storage.directory=/tmp/jenkins-home/.develocity-root " +
+ "-Dartifactory.server=${p['artifactory.url']} " +
+ "-Dartifactory.username=${ARTIFACTORY_USR} " +
+ "-Dartifactory.password=${ARTIFACTORY_PSW} " +
+ "-Dartifactory.staging-repository=${p['artifactory.repository.snapshot']} " +
+ "-Dartifactory.build-name=spring-data-elasticsearch " +
+ "-Dartifactory.build-number=spring-data-elasticsearch-${BRANCH_NAME}-build-${BUILD_NUMBER} " +
+ "-Dmaven.repo.local=/tmp/jenkins-home/.m2/spring-data-elasticsearch " +
+ "-Dmaven.test.skip=true clean deploy -U -B"
+ }
+ }
+ }
+ }
+ }
+ }
+
+ post {
+ changed {
+ script {
+ emailext(
+ subject: "[${currentBuild.fullDisplayName}] ${currentBuild.currentResult}",
+ mimeType: 'text/html',
+ recipientProviders: [[$class: 'CulpritsRecipientProvider'], [$class: 'RequesterRecipientProvider']],
+ body: "${currentBuild.fullDisplayName} is reported as ${currentBuild.currentResult}")
+ }
+ }
+ }
+}
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 0000000000..ff77379631
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.adoc b/README.adoc
new file mode 100644
index 0000000000..0242089d82
--- /dev/null
+++ b/README.adoc
@@ -0,0 +1,179 @@
+= Spring Data for Elasticsearch image:https://jenkins.spring.io/buildStatus/icon?job=spring-data-elasticsearch%2Fmain&subject=Build[link=https://jenkins.spring.io/view/SpringData/job/spring-data-elasticsearch/] https://gitter.im/spring-projects/spring-data[image:https://badges.gitter.im/spring-projects/spring-data.svg[Gitter]] image:https://img.shields.io/badge/Revved%20up%20by-Develocity-06A0CE?logo=Gradle&labelColor=02303A["Revved up by Develocity", link="https://ge.spring.io/scans?search.rootProjectNames=Spring%20Data%20Elasticsearch"]
+
+The primary goal of the https://projects.spring.io/spring-data[Spring Data] project is to make it easier to build Spring-powered applications that use new data access technologies such as non-relational databases, map-reduce frameworks, and cloud based data services.
+
+The Spring Data Elasticsearch project provides integration with the https://www.elastic.co/[Elasticsearch] search engine.
+Key functional areas of Spring Data Elasticsearch are a POJO centric model for interacting with Elasticsearch Documents and easily writing a Repository style data access layer.
+
+This project is lead and maintained by the community.
+
+== Features
+
+* Spring configuration support using Java based `@Configuration` classes or an XML namespace for an ES client instances.
+* `ElasticsearchOperations` class and implementations that increases productivity performing common ES operations.
+Includes integrated object mapping between documents and POJOs.
+* Feature Rich Object Mapping integrated with Spring’s Conversion Service
+* Annotation based mapping metadata
+* Automatic implementation of `Repository` interfaces including support for custom search methods.
+* CDI support for repositories
+
+== Code of Conduct
+
+This project is governed by the https://github.com/spring-projects/.github/blob/e3cc2ff230d8f1dca06535aa6b5a4a23815861d4/CODE_OF_CONDUCT.md[Spring Code of Conduct].
+By participating, you are expected to uphold this code of conduct.
+Please report unacceptable behavior to spring-code-of-conduct@pivotal.io.
+
+== Getting Started
+
+Here is a quick teaser of an application using Spring Data Repositories in Java:
+
+[source,java]
+----
+public interface PersonRepository extends CrudRepository {
+
+ List findByLastname(String lastname);
+
+ List findByFirstnameLike(String firstname);
+}
+
+@Service
+public class MyService {
+
+ private final PersonRepository repository;
+
+ public MyService(PersonRepository repository) {
+ this.repository = repository;
+ }
+
+ public void doWork() {
+
+ repository.deleteAll();
+
+ Person person = new Person();
+ person.setFirstname("Oliver");
+ person.setLastname("Gierke");
+ repository.save(person);
+
+ List lastNameResults = repository.findByLastname("Gierke");
+ List firstNameResults = repository.findByFirstnameLike("Oli");
+ }
+}
+----
+
+=== Using the RestClient
+
+Please check the https://docs.spring.io/spring-data/elasticsearch/docs/current/reference/html/#elasticsearch.clients.configuration[official documentation].
+
+=== Maven configuration
+
+Add the Maven dependency:
+
+[source,xml]
+----
+
+ org.springframework.data
+ spring-data-elasticsearch
+ ${version}
+
+----
+
+**Compatibility Matrix**
+
+The compatibility between Spring Data Elasticsearch, Elasticsearch client drivers and Spring Boot versions can be found in the https://docs.spring.io/spring-data/elasticsearch/docs/current/reference/html/#preface.versions[reference documentation].
+
+To use the Release candidate versions of the upcoming major version, use our Maven milestone repository and declare the appropriate dependency version:
+
+[source,xml]
+----
+
+ org.springframework.data
+ spring-data-elasticsearch
+ ${version}.RCx
+
+
+
+ spring-snapshot
+ Spring Snapshot Repository
+ https://repo.spring.io/milestone
+
+----
+
+If you'd rather like the latest snapshots of the upcoming major version, use our Maven snapshot repository and declare the appropriate dependency version:
+
+[source,xml]
+----
+
+ org.springframework.data
+ spring-data-elasticsearch
+ ${version}-SNAPSHOT
+
+
+
+ spring-snapshot
+ Spring Snapshot Repository
+ https://repo.spring.io/snapshot
+
+----
+
+== Getting Help
+
+Having trouble with Spring Data?
+We’d love to help!
+
+* Check the
+https://docs.spring.io/spring-data/elasticsearch/docs/current/reference/html/[reference documentation], and https://docs.spring.io/spring-data/elasticsearch/docs/current/api/[Javadocs].
+* Learn the Spring basics – Spring Data builds on Spring Framework, check the https://spring.io[spring.io] web-site for a wealth of reference documentation.
+If you are just starting out with Spring, try one of the https://spring.io/guides[guides].
+* Ask a question or chat with the community on https://app.gitter.im/#/room/#spring-projects_spring-data:gitter.im[Gitter].
+* Report bugs with Spring Data for Elasticsearch at https://github.com/spring-projects/spring-data-elasticsearch/issues[https://github.com/spring-projects/spring-data-elasticsearch/issues].
+
+== Reporting Issues
+
+Spring Data uses GitHub as issue tracking system to record bugs and feature requests.
+If you want to raise an issue, please follow the recommendations below:
+
+* Before you log a bug, please search the
+https://github.com/spring-projects/spring-data-elasticsearch/issues[issue tracker] to see if someone has already reported the problem.
+* If the issue doesn't already exist, https://github.com/spring-projects/spring-data-elasticsearch/issues/new[create a new issue].
+* Please provide as much information as possible with the issue report, we like to know the version of Spring Data Elasticsearch that you are using and JVM version.
+* If you need to paste code, or include a stack trace use Markdown +++```+++ escapes before and after your text.
+* If possible try to create a test-case or project that replicates the issue.
+Attach a link to your code or a compressed file containing your code.
+
+== Building from Source
+
+You don’t need to build from source to use Spring Data (binaries in https://repo.spring.io[repo.spring.io]), but if you want to try out the latest and greatest, Spring Data can be easily built with the https://github.com/takari/maven-wrapper[maven wrapper].
+
+You need JDK 17 or above to build the _main_ branch.
+For the branches up to and including release 4.4, JDK 8 is required.
+
+[source,bash]
+----
+ $ ./mvnw clean install
+----
+
+If you want to build with the regular `mvn` command, you will need https://maven.apache.org/run-maven/index.html[Maven v3.5.0 or above].
+
+_Also see link:CONTRIBUTING.adoc[CONTRIBUTING.adoc] if you wish to submit pull requests, and in particular please sign the https://cla.pivotal.io/sign/spring[Contributor’s Agreement] before submitting your first pull request._
+
+IMPORTANT: When contributing, please make sure an issue exists in https://github.com/spring-projects/spring-data-elasticsearch/issues[issue tracker] and comment on this issue with how you want to address it.
+By this we not only know that someone is working on an issue, we can also align architectural questions and possible solutions before work is invested . We so can prevent that much work is put into Pull Requests that have little or no chances of being merged.
+
+=== Building reference documentation
+
+Building the documentation builds also the project without running tests.
+
+[source,bash]
+----
+ $ ./mvnw clean install -Pantora
+----
+
+The generated documentation is available from `target/site/index.html`.
+
+== Examples
+
+For examples on using the Spring Data for Elasticsearch, see the https://github.com/spring-projects/spring-data-examples/tree/main/elasticsearch/example[spring-data-examples] project.
+
+== License
+
+Spring Data for Elasticsearch Open Source software released under the https://www.apache.org/licenses/LICENSE-2.0.html[Apache 2.0 license].
diff --git a/README.md b/README.md
deleted file mode 100644
index 1bbaf34432..0000000000
--- a/README.md
+++ /dev/null
@@ -1,225 +0,0 @@
-Spring Data Elasticsearch
-=========================
-
-Spring Data implementation for ElasticSearch
-
-[](http://travis-ci.org/BioMedCentralLtd/spring-data-elasticsearch)
-
-Spring Data makes it easier to build Spring-powered applications that use new data access technologies such as non-relational databases, map-reduce frameworks, and cloud based data services as well as provide improved support for relational database technologies.
-
-The Spring Data Elasticsearch project provides integration with the [elasticsearch](http://www.elasticsearch.org/) search engine.
-
-Guide
-------------
-
-* [Reference Documentation](https://github.com/BioMedCentralLtd/spring-data-elasticsearch/blob/master/site/reference.zip?raw=true)
-* [PDF Documentation](https://github.com/BioMedCentralLtd/spring-data-elasticsearch/blob/master/site/reference/pdf/spring-data-elasticsearch-reference.pdf?raw=true)
-* [API Documentation](https://github.com/BioMedCentralLtd/spring-data-elasticsearch/blob/master/site/apidocs.zip?raw=true)
-* [Spring Data Project](http://www.springsource.org/spring-data)
-* [Sample Test Application](https://github.com/BioMedCentralLtd/spring-data-elasticsearch-sample-application)
-
-Test Coverage
--------------
-* Class 92%
-* Method 80%
-* Line 74%
-* Block 74%
-
-[Emma Test Coverage Report] (https://github.com/BioMedCentralLtd/spring-data-elasticsearch/blob/master/site/emma.zip?raw=true)
-
-Quick Start
------------
-### Dependency
-```java
-
- org.springframework.data
- spring-data-elasticsearch
- 1.0.0.BUILD-SNAPSHOT
-
-```
-
-### ElasticsearchTemplate
-ElasticsearchTemplate is the central support class for elasticsearch operations.
-
-
-### ElasticsearchRepository
-A default implementation of ElasticsearchRepository, aligning to the generic Repository Interfaces, is provided. Spring can do the Repository implementation for you depending on method names in the interface definition.
-
-The ElasticsearchCrudRepository extends PagingAndSortingRepository
-
-```java
- public interface ElasticsearchCrudRepository extends ElasticsearchRepository, PagingAndSortingRepository {
- }
-```
-
-Extending ElasticsearchRepository for custom methods
-
-```java
- public interface BookRepository extends Repository {
-
- //Equivalent Json Query will be "{ "bool" : { "must" :[{ "field" : {"name" : "?"} },{ "field" : {"price" : "?"} }]} }"
- List findByNameAndPrice(String name, Integer price);
-
- //Equivalent Json Query will be "{"bool" : {"should" : [ {"field" : "name" : "?"}}, {"field" : {"price" : "?"}} ]}}"
- List findByNameOrPrice(String name, Integer price);
-
- //Equivalent Json Query will be "{"bool" : {"must" : {"field" : {"name" : "?"}}}}"
- Page findByName(String name,Pageable page);
-
- //Equivalent Json Query will be "{"bool" : {"must_not" : {"field" : {"name" : "?"}}}}"
- Page findByNameNot(String name,Pageable page);
-
- //Equivalent Json Query will be "{"bool" : {"must" : {"range" : {"price" : {"from" : ?,"to" : ?,"include_lower" : true,"include_upper" : true}}}}}"
- Page findByPriceBetween(int price,Pageable page);
-
-
- //Equivalent Json Query will be "{"bool" : {"must" : {"field" : {"name" : {"query" : "?*","analyze_wildcard" : true}}}}"
- Page findByNameLike(String name,Pageable page);
-
-
- @Query("{\"bool\" : {\"must\" : {\"field\" : {\"message\" : \"?0\"}}}}")
- Page findByMessage(String message, Pageable pageable);
- }
-```
-
-Indexing a single document using Elasticsearch Template
-
-```java
- String documentId = "123456";
- SampleEntity sampleEntity = new SampleEntity();
- sampleEntity.setId(documentId);
- sampleEntity.setMessage("some message");
- IndexQuery indexQuery = new IndexQuery();
- indexQuery.setId(documentId);
- indexQuery.setObject(sampleEntity);
- elasticsearchTemplate.index(indexQuery);
-```
-
-Indexing multiple Document(bulk index) using Elasticsearch Template
-
-```java
- @Autowired
- private ElasticsearchTemplate elasticsearchTemplate;
-
- List indexQueries = new ArrayList();
- //first document
- String documentId = "123456";
- SampleEntity sampleEntity1 = new SampleEntity();
- sampleEntity1.setId(documentId);
- sampleEntity1.setMessage("some message");
-
- IndexQuery indexQuery1 = new IndexQuery();
- indexQuery1.setId(documentId);
- indexQuery1.setObject(sampleEntity1);
- indexQueries.add(indexQuery1);
-
- //second document
- String documentId2 = "123457";
- SampleEntity sampleEntity2 = new SampleEntity();
- sampleEntity2.setId(documentId2);
- sampleEntity2.setMessage("some message");
- IndexQuery indexQuery2 = new IndexQuery();
- indexQuery2.setId(documentId2);
- indexQuery2.setObject(sampleEntity2);
- indexQueries.add(indexQuery2);
- //bulk index
- elasticsearchTemplate.bulkIndex(indexQueries);
-```
-
-Searching entities using Elasticsearch Template
-
-```java
- @Autowired
- private ElasticsearchTemplate elasticsearchTemplate;
-
- SearchQuery searchQuery = new NativeSearchQueryBuilder()
- .withQuery(fieldQuery("id", documentId))
- .build();
- Page sampleEntities = elasticsearchTemplate.queryForPage(searchQuery,SampleEntity.class);
-```
-
-Indexing a single document with Repository
-
-```java
- @Resource
- private SampleElasticsearchRepository repository;
-
- String documentId = "123456";
- SampleEntity sampleEntity = new SampleEntity();
- sampleEntity.setId(documentId);
- sampleEntity.setMessage("some message");
-
- repository.save(sampleEntity);
-```
-
-Indexing multiple Document(bulk index) using Repository
-
-```java
- @Resource
- private SampleElasticsearchRepository repository;
-
- String documentId = "123456";
- SampleEntity sampleEntity1 = new SampleEntity();
- sampleEntity1.setId(documentId);
- sampleEntity1.setMessage("some message");
-
- String documentId2 = "123457"
- SampleEntity sampleEntity2 = new SampleEntity();
- sampleEntity2.setId(documentId2);
- sampleEntity2.setMessage("test message");
-
- List sampleEntities = Arrays.asList(sampleEntity1, sampleEntity2);
-
- //bulk index
- repository.save(sampleEntities);
-```
-
-### XML Namespace
-
-You can set up repository scanning via xml configuration, which will happily create your repositories.
-
-Using Node Client
-
-```xml
-
-
-
-
-
-
-
-
-
-
-```
-
-Using Transport Client
-
-```xml
-
-
-
-
-
-
-
-
-
-
-
-
-```
-
-### Contact Details
-
-* Rizwan Idrees (rizwan.idrees@biomedcentral.com)
-* Abdul Waheed (abdul.mohammed@biomedcentral.com)
-* Mohsin Husen (mohsin.husen@biomedcentral.com)
diff --git a/SECURITY.adoc b/SECURITY.adoc
new file mode 100644
index 0000000000..2694f228b5
--- /dev/null
+++ b/SECURITY.adoc
@@ -0,0 +1,9 @@
+# Security Policy
+
+## Supported Versions
+
+Please see the https://spring.io/projects/spring-data-elasticsearch[Spring Data Elasticsearch] project page for supported versions.
+
+## Reporting a Vulnerability
+
+Please don't raise security vulnerabilities here. Head over to https://pivotal.io/security to learn how to disclose them responsibly.
diff --git a/TESTING.adoc b/TESTING.adoc
new file mode 100644
index 0000000000..f30c7efe34
--- /dev/null
+++ b/TESTING.adoc
@@ -0,0 +1,20 @@
+= Testing
+
+== Unit tests
+
+Unit tests in the project are run with
+
+----
+./mvnw test
+----
+
+== Integration tests
+
+Integration tests are executed when
+----
+./mvnw verify
+----
+is run. There must be _docker_ running, as the integration tests use docker to start an Elasticsearch server.
+
+Integration tests are tests that have the Junit5 Tag `@Tag("integration-test")` on the test class. Normally this should not be set explicitly, but the annotation `@SpringIntegrationTest` should be used. This not only marks the test as integration test, but integrates an automatic setup of an Elasticsearch Testcontainer and integrate this with Spring, so
+that the required Beans can be automatically injected. Check _src/test/java/org/springframework/data/elasticsearch/JUnit5SampleRestClientBasedTests.java_ as a reference setup
diff --git a/ci/clean.sh b/ci/clean.sh
new file mode 100755
index 0000000000..ca174330ee
--- /dev/null
+++ b/ci/clean.sh
@@ -0,0 +1,8 @@
+#!/bin/bash -x
+
+set -euo pipefail
+
+export JENKINS_USER=${JENKINS_USER_NAME}
+
+MAVEN_OPTS="-Duser.name=${JENKINS_USER} -Duser.home=/tmp/jenkins-home" \
+ ./mvnw -s settings.xml clean -Dscan=false -Dmaven.repo.local=/tmp/jenkins-home/.m2/spring-data-elasticsearch -Ddevelocity.storage.directory=/tmp/jenkins-home/.develocity-root
diff --git a/ci/pipeline.properties b/ci/pipeline.properties
new file mode 100644
index 0000000000..cde4a8e881
--- /dev/null
+++ b/ci/pipeline.properties
@@ -0,0 +1,31 @@
+# Java versions
+java.main.tag=24.0.1_9-jdk-noble
+java.next.tag=24.0.1_9-jdk-noble
+
+# Docker container images - standard
+docker.java.main.image=library/eclipse-temurin:${java.main.tag}
+docker.java.next.image=library/eclipse-temurin:${java.next.tag}
+
+# Supported versions of MongoDB
+docker.mongodb.6.0.version=6.0.23
+docker.mongodb.7.0.version=7.0.20
+docker.mongodb.8.0.version=8.0.9
+
+# Supported versions of Redis
+docker.redis.6.version=6.2.13
+docker.redis.7.version=7.2.4
+
+# Docker environment settings
+docker.java.inside.basic=-v $HOME:/tmp/jenkins-home
+docker.java.inside.docker=-u root -v /var/run/docker.sock:/var/run/docker.sock -v /usr/bin/docker:/usr/bin/docker -v $HOME:/tmp/jenkins-home
+
+# Credentials
+docker.registry=
+docker.credentials=hub.docker.com-springbuildmaster
+docker.proxy.registry=https://docker-hub.usw1.packages.broadcom.com
+docker.proxy.credentials=usw1_packages_broadcom_com-jenkins-token
+artifactory.credentials=02bd1690-b54f-4c9f-819d-a77cb7a9822c
+artifactory.url=https://repo.spring.io
+artifactory.repository.snapshot=libs-snapshot-local
+develocity.access-key=gradle_enterprise_secret_access_key
+jenkins.user.name=spring-builds+jenkins
diff --git a/ci/verify.sh b/ci/verify.sh
new file mode 100755
index 0000000000..46afc80280
--- /dev/null
+++ b/ci/verify.sh
@@ -0,0 +1,10 @@
+#!/bin/bash -x
+
+set -euo pipefail
+
+mkdir -p /tmp/jenkins-home/.m2/spring-data-elasticsearch
+export JENKINS_USER=${JENKINS_USER_NAME}
+
+MAVEN_OPTS="-Duser.name=${JENKINS_USER} -Duser.home=/tmp/jenkins-home" \
+ ./mvnw -s settings.xml \
+ -P${PROFILE} clean dependency:list verify -Dsort -U -B -Dmaven.repo.local=/tmp/jenkins-home/.m2/spring-data-elasticsearch -Ddevelocity.storage.directory=/tmp/jenkins-home/.develocity-root
diff --git a/formatting.xml b/formatting.xml
deleted file mode 100644
index b60c70a4ef..0000000000
--- a/formatting.xml
+++ /dev/null
@@ -1,820 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/mvnw b/mvnw
new file mode 100755
index 0000000000..9091adf188
--- /dev/null
+++ b/mvnw
@@ -0,0 +1,310 @@
+#!/bin/sh
+# ----------------------------------------------------------------------------
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ----------------------------------------------------------------------------
+
+# ----------------------------------------------------------------------------
+# Maven Start Up Batch script
+#
+# Required ENV vars:
+# ------------------
+# JAVA_HOME - location of a JDK home dir
+#
+# Optional ENV vars
+# -----------------
+# M2_HOME - location of maven2's installed home dir
+# MAVEN_OPTS - parameters passed to the Java VM when running Maven
+# e.g. to debug Maven itself, use
+# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+# ----------------------------------------------------------------------------
+
+if [ -z "$MAVEN_SKIP_RC" ] ; then
+
+ if [ -f /etc/mavenrc ] ; then
+ . /etc/mavenrc
+ fi
+
+ if [ -f "$HOME/.mavenrc" ] ; then
+ . "$HOME/.mavenrc"
+ fi
+
+fi
+
+# OS specific support. $var _must_ be set to either true or false.
+cygwin=false;
+darwin=false;
+mingw=false
+case "`uname`" in
+ CYGWIN*) cygwin=true ;;
+ MINGW*) mingw=true;;
+ Darwin*) darwin=true
+ # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
+ # See https://developer.apple.com/library/mac/qa/qa1170/_index.html
+ if [ -z "$JAVA_HOME" ]; then
+ if [ -x "/usr/libexec/java_home" ]; then
+ export JAVA_HOME="`/usr/libexec/java_home`"
+ else
+ export JAVA_HOME="/Library/Java/Home"
+ fi
+ fi
+ ;;
+esac
+
+if [ -z "$JAVA_HOME" ] ; then
+ if [ -r /etc/gentoo-release ] ; then
+ JAVA_HOME=`java-config --jre-home`
+ fi
+fi
+
+if [ -z "$M2_HOME" ] ; then
+ ## resolve links - $0 may be a link to maven's home
+ PRG="$0"
+
+ # need this for relative symlinks
+ while [ -h "$PRG" ] ; do
+ ls=`ls -ld "$PRG"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ PRG="$link"
+ else
+ PRG="`dirname "$PRG"`/$link"
+ fi
+ done
+
+ saveddir=`pwd`
+
+ M2_HOME=`dirname "$PRG"`/..
+
+ # make it fully qualified
+ M2_HOME=`cd "$M2_HOME" && pwd`
+
+ cd "$saveddir"
+ # echo Using m2 at $M2_HOME
+fi
+
+# For Cygwin, ensure paths are in UNIX format before anything is touched
+if $cygwin ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --unix "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
+fi
+
+# For Mingw, ensure paths are in UNIX format before anything is touched
+if $mingw ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME="`(cd "$M2_HOME"; pwd)`"
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
+fi
+
+if [ -z "$JAVA_HOME" ]; then
+ javaExecutable="`which javac`"
+ if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
+ # readlink(1) is not available as standard on Solaris 10.
+ readLink=`which readlink`
+ if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
+ if $darwin ; then
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
+ else
+ javaExecutable="`readlink -f \"$javaExecutable\"`"
+ fi
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaHome=`expr "$javaHome" : '\(.*\)/bin'`
+ JAVA_HOME="$javaHome"
+ export JAVA_HOME
+ fi
+ fi
+fi
+
+if [ -z "$JAVACMD" ] ; then
+ if [ -n "$JAVA_HOME" ] ; then
+ if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+ # IBM's JDK on AIX uses strange locations for the executables
+ JAVACMD="$JAVA_HOME/jre/sh/java"
+ else
+ JAVACMD="$JAVA_HOME/bin/java"
+ fi
+ else
+ JAVACMD="`which java`"
+ fi
+fi
+
+if [ ! -x "$JAVACMD" ] ; then
+ echo "Error: JAVA_HOME is not defined correctly." >&2
+ echo " We cannot execute $JAVACMD" >&2
+ exit 1
+fi
+
+if [ -z "$JAVA_HOME" ] ; then
+ echo "Warning: JAVA_HOME environment variable is not set."
+fi
+
+CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
+
+# traverses directory structure from process work directory to filesystem root
+# first directory with .mvn subdirectory is considered project transport directory
+find_maven_basedir() {
+
+ if [ -z "$1" ]
+ then
+ echo "Path not specified to find_maven_basedir"
+ return 1
+ fi
+
+ basedir="$1"
+ wdir="$1"
+ while [ "$wdir" != '/' ] ; do
+ if [ -d "$wdir"/.mvn ] ; then
+ basedir=$wdir
+ break
+ fi
+ # workaround for JBEAP-8937 (on Solaris 10/Sparc)
+ if [ -d "${wdir}" ]; then
+ wdir=`cd "$wdir/.."; pwd`
+ fi
+ # end of workaround
+ done
+ echo "${basedir}"
+}
+
+# concatenates all lines of a file
+concat_lines() {
+ if [ -f "$1" ]; then
+ echo "$(tr -s '\n' ' ' < "$1")"
+ fi
+}
+
+BASE_DIR=`find_maven_basedir "$(pwd)"`
+if [ -z "$BASE_DIR" ]; then
+ exit 1;
+fi
+
+##########################################################################################
+# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+# This allows using the maven wrapper in projects that prohibit checking in binary data.
+##########################################################################################
+if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found .mvn/wrapper/maven-wrapper.jar"
+ fi
+else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
+ fi
+ if [ -n "$MVNW_REPOURL" ]; then
+ jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
+ else
+ jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
+ fi
+ while IFS="=" read key value; do
+ case "$key" in (wrapperUrl) jarUrl="$value"; break ;;
+ esac
+ done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties"
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Downloading from: $jarUrl"
+ fi
+ wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
+ if $cygwin; then
+ wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"`
+ fi
+
+ if command -v wget > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found wget ... using wget"
+ fi
+ if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
+ wget "$jarUrl" -O "$wrapperJarPath"
+ else
+ wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath"
+ fi
+ elif command -v curl > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found curl ... using curl"
+ fi
+ if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
+ curl -o "$wrapperJarPath" "$jarUrl" -f
+ else
+ curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f
+ fi
+
+ else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Falling back to using Java to download"
+ fi
+ javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
+ # For Cygwin, switch paths to Windows format before running javac
+ if $cygwin; then
+ javaClass=`cygpath --path --windows "$javaClass"`
+ fi
+ if [ -e "$javaClass" ]; then
+ if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Compiling MavenWrapperDownloader.java ..."
+ fi
+ # Compiling the Java class
+ ("$JAVA_HOME/bin/javac" "$javaClass")
+ fi
+ if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ # Running the downloader
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Running MavenWrapperDownloader.java ..."
+ fi
+ ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR")
+ fi
+ fi
+ fi
+fi
+##########################################################################################
+# End of extension
+##########################################################################################
+
+export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
+if [ "$MVNW_VERBOSE" = true ]; then
+ echo $MAVEN_PROJECTBASEDIR
+fi
+MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
+
+# For Cygwin, switch paths to Windows format before running java
+if $cygwin; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --path --windows "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
+ [ -n "$MAVEN_PROJECTBASEDIR" ] &&
+ MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
+fi
+
+# Provide a "standardized" way to retrieve the CLI args that will
+# work with both Windows and non-Windows executions.
+MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
+export MAVEN_CMD_LINE_ARGS
+
+WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+exec "$JAVACMD" \
+ $MAVEN_OPTS \
+ -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
+ "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
+ ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
diff --git a/mvnw.cmd b/mvnw.cmd
new file mode 100644
index 0000000000..86115719e5
--- /dev/null
+++ b/mvnw.cmd
@@ -0,0 +1,182 @@
+@REM ----------------------------------------------------------------------------
+@REM Licensed to the Apache Software Foundation (ASF) under one
+@REM or more contributor license agreements. See the NOTICE file
+@REM distributed with this work for additional information
+@REM regarding copyright ownership. The ASF licenses this file
+@REM to you under the Apache License, Version 2.0 (the
+@REM "License"); you may not use this file except in compliance
+@REM with the License. You may obtain a copy of the License at
+@REM
+@REM http://www.apache.org/licenses/LICENSE-2.0
+@REM
+@REM Unless required by applicable law or agreed to in writing,
+@REM software distributed under the License is distributed on an
+@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+@REM KIND, either express or implied. See the License for the
+@REM specific language governing permissions and limitations
+@REM under the License.
+@REM ----------------------------------------------------------------------------
+
+@REM ----------------------------------------------------------------------------
+@REM Maven Start Up Batch script
+@REM
+@REM Required ENV vars:
+@REM JAVA_HOME - location of a JDK home dir
+@REM
+@REM Optional ENV vars
+@REM M2_HOME - location of maven2's installed home dir
+@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
+@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending
+@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
+@REM e.g. to debug Maven itself, use
+@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+@REM ----------------------------------------------------------------------------
+
+@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
+@echo off
+@REM set title of command window
+title %0
+@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on'
+@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
+
+@REM set %HOME% to equivalent of $HOME
+if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
+
+@REM Execute a user defined script before this one
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
+@REM check for pre script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
+if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
+:skipRcPre
+
+@setlocal
+
+set ERROR_CODE=0
+
+@REM To isolate internal variables from possible post scripts, we use another setlocal
+@setlocal
+
+@REM ==== START VALIDATION ====
+if not "%JAVA_HOME%" == "" goto OkJHome
+
+echo.
+echo Error: JAVA_HOME not found in your environment. >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+:OkJHome
+if exist "%JAVA_HOME%\bin\java.exe" goto init
+
+echo.
+echo Error: JAVA_HOME is set to an invalid directory. >&2
+echo JAVA_HOME = "%JAVA_HOME%" >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+@REM ==== END VALIDATION ====
+
+:init
+
+@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
+@REM Fallback to current working directory if not found.
+
+set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
+IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
+
+set EXEC_DIR=%CD%
+set WDIR=%EXEC_DIR%
+:findBaseDir
+IF EXIST "%WDIR%"\.mvn goto baseDirFound
+cd ..
+IF "%WDIR%"=="%CD%" goto baseDirNotFound
+set WDIR=%CD%
+goto findBaseDir
+
+:baseDirFound
+set MAVEN_PROJECTBASEDIR=%WDIR%
+cd "%EXEC_DIR%"
+goto endDetectBaseDir
+
+:baseDirNotFound
+set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
+cd "%EXEC_DIR%"
+
+:endDetectBaseDir
+
+IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
+
+@setlocal EnableExtensions EnableDelayedExpansion
+for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
+@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
+
+:endReadAdditionalConfig
+
+SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
+set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
+set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
+
+FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO (
+ IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
+)
+
+@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
+if exist %WRAPPER_JAR% (
+ if "%MVNW_VERBOSE%" == "true" (
+ echo Found %WRAPPER_JAR%
+ )
+) else (
+ if not "%MVNW_REPOURL%" == "" (
+ SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
+ )
+ if "%MVNW_VERBOSE%" == "true" (
+ echo Couldn't find %WRAPPER_JAR%, downloading it ...
+ echo Downloading from: %DOWNLOAD_URL%
+ )
+
+ powershell -Command "&{"^
+ "$webclient = new-object System.Net.WebClient;"^
+ "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^
+ "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^
+ "}"^
+ "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^
+ "}"
+ if "%MVNW_VERBOSE%" == "true" (
+ echo Finished downloading %WRAPPER_JAR%
+ )
+)
+@REM End of extension
+
+@REM Provide a "standardized" way to retrieve the CLI args that will
+@REM work with both Windows and non-Windows executions.
+set MAVEN_CMD_LINE_ARGS=%*
+
+%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
+if ERRORLEVEL 1 goto error
+goto end
+
+:error
+set ERROR_CODE=1
+
+:end
+@endlocal & set ERROR_CODE=%ERROR_CODE%
+
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
+@REM check for post script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
+if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
+:skipRcPost
+
+@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
+if "%MAVEN_BATCH_PAUSE%" == "on" pause
+
+if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
+
+exit /B %ERROR_CODE%
diff --git a/package.json b/package.json
new file mode 100644
index 0000000000..4689506b3f
--- /dev/null
+++ b/package.json
@@ -0,0 +1,10 @@
+{
+ "dependencies": {
+ "antora": "3.2.0-alpha.6",
+ "@antora/atlas-extension": "1.0.0-alpha.2",
+ "@antora/collector-extension": "1.0.0-alpha.7",
+ "@asciidoctor/tabs": "1.0.0-beta.6",
+ "@springio/antora-extensions": "1.13.0",
+ "@springio/asciidoctor-extensions": "1.0.0-alpha.11"
+ }
+}
diff --git a/pom.xml b/pom.xml
index b74901c260..4fcfd20c49 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1,172 +1,488 @@
-
-
- 4.0.0
-
- org.springframework.data
- spring-data-elasticsearch
- 1.0.0.BUILD-SNAPSHOT
-
-
- org.springframework.data.build
- spring-data-parent
- 1.1.0.BUILD-SNAPSHOT
- ../spring-data-build/parent/pom.xml
-
-
- Spring Data Elasticsearch
- Spring Data Implementation for Elasticsearch
- https://github.com/SpringSource/spring-data-elasticsearch
-
-
-
- DATAES
-
- 3.2.1
- 2.6
- 0.90.2
- 1.9.2
- 1.6.0.BUILD-SNAPSHOT
-
-
-
-
-
-
-
- org.springframework
- spring-context
- ${spring}
-
-
- commons-logging
- commons-logging
-
-
-
-
-
- org.springframework
- spring-tx
- ${spring}
-
-
-
-
- cglib
- cglib
- 2.2.2
- test
-
-
-
-
- org.springframework.data
- spring-data-commons
- ${springdata.commons}
-
-
-
-
- commons-lang
- commons-lang
- ${commonslang}
-
-
- commons-collections
- commons-collections
- ${commonscollections}
-
-
-
-
- joda-time
- joda-time
- ${jodatime}
-
-
-
-
- org.elasticsearch
- elasticsearch
- ${elasticsearch}
-
-
-
-
- org.codehaus.jackson
- jackson-mapper-asl
- ${jackson}
-
-
-
-
- javax.enterprise
- cdi-api
- ${cdi}
- provided
- true
-
-
-
-
- org.springframework
- spring-test
- ${spring}
- test
-
-
- org.apache.openwebbeans.test
- cditest-owb
- ${webbeans}
- test
-
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-assembly-plugin
-
-
- org.codehaus.mojo
- wagon-maven-plugin
-
-
-
-
-
-
- biomedcentral
- BioMed Central Development Team
- +0
-
-
-
-
-
- spring-libs-snapshot
- http://repo.springsource.org/lib-snapshot-local
-
-
-
-
- https://github.com/SpringSource/spring-data-elasticsearch
- scm:git:git://github.com/SpringSource/spring-data-elasticsearch.git
- scm:git:ssh://git@github.com:SpringSource/spring-data-elasticsearch.git
-
-
-
-
- Bamboo
- http://build.springsource.org/browse/SPRINGDATAES
-
-
-
- JIRA
- https://jira.springsource.org/browse/DATAES
-
-
-
+
+
+
+ 4.0.0
+
+ org.springframework.data
+ spring-data-elasticsearch
+ 6.0.0-SNAPSHOT
+
+
+ org.springframework.data.build
+ spring-data-parent
+ 4.0.0-SNAPSHOT
+
+
+ Spring Data Elasticsearch
+ Spring Data Implementation for Elasticsearch
+ https://github.com/spring-projects/spring-data-elasticsearch
+
+
+ 4.0.0-SNAPSHOT
+
+
+ 9.0.2
+
+ 0.19.0
+ 2.23.1
+ 1.5.3
+ 1.20.0
+ 3.9.1
+
+ spring.data.elasticsearch
+
+
+ test
+ integration-test
+
+
+
+
+ biomedcentral
+ BioMed Central Development Team
+ +0
+
+
+ cstrobl
+ Christoph Strobl
+ cstrobl at pivotal.io
+ Pivotal
+ https://www.pivotal.io
+
+ Developer
+
+ +1
+
+
+ mpaluch
+ Mark Paluch
+ mpaluch at pivotal.io
+ Pivotal
+ https://www.pivotal.io
+
+ Developer
+
+ +1
+
+
+
+
+ https://github.com/spring-projects/spring-data-elasticsearch
+ scm:git:git://github.com/spring-projects/spring-data-elasticsearch.git
+ scm:git:ssh://git@github.com/spring-projects/spring-data-elasticsearch.git
+
+
+
+
+ Bamboo
+ https://build.spring.io/browse/SPRINGDATAES
+
+
+
+ GitHub
+ https://github.com/spring-projects/spring-data-elasticsearch/issues
+
+
+
+
+
+
+ org.springframework
+ spring-context
+
+
+
+ org.springframework
+ spring-tx
+
+
+
+
+ org.springframework.data
+ spring-data-commons
+ ${springdata.commons}
+
+
+
+
+ org.springframework
+ spring-webflux
+ true
+
+
+
+ io.projectreactor
+ reactor-test
+ test
+
+
+
+ co.elastic.clients
+ elasticsearch-java
+ ${elasticsearch-java}
+
+
+ commons-logging
+ commons-logging
+
+
+
+
+
+ org.elasticsearch.client
+ elasticsearch-rest-client
+ ${elasticsearch-java}
+
+
+ commons-logging
+ commons-logging
+
+
+
+
+
+ com.querydsl
+ querydsl-core
+ ${querydsl}
+ true
+
+
+
+
+ com.fasterxml.jackson.core
+ jackson-core
+
+
+ com.fasterxml.jackson.core
+ jackson-databind
+
+
+
+
+
+ javax.interceptor
+ javax.interceptor-api
+ 1.2.2
+ test
+
+
+
+ jakarta.enterprise
+ jakarta.enterprise.cdi-api
+ provided
+ true
+
+
+
+ jakarta.annotation
+ jakarta.annotation-api
+ ${jakarta-annotation-api}
+ test
+
+
+
+ org.apache.openwebbeans
+ openwebbeans-se
+ ${webbeans}
+ test
+
+
+
+
+ org.jetbrains.kotlin
+ kotlin-stdlib
+ true
+
+
+
+ org.jetbrains.kotlin
+ kotlin-reflect
+ true
+
+
+
+ org.jetbrains.kotlinx
+ kotlinx-coroutines-core
+ true
+
+
+
+ org.jetbrains.kotlinx
+ kotlinx-coroutines-reactor
+ true
+
+
+
+
+ org.springframework
+ spring-test
+ test
+
+
+ ch.qos.logback
+ logback-classic
+
+
+
+
+
+ org.jetbrains.kotlinx
+ kotlinx-coroutines-test
+ test
+ true
+
+
+
+ org.slf4j
+ log4j-over-slf4j
+ ${slf4j}
+ test
+
+
+ org.apache.logging.log4j
+ log4j-core
+ ${log4j}
+ test
+
+
+ org.apache.logging.log4j
+ log4j-to-slf4j
+ ${log4j}
+ test
+
+
+
+ org.skyscreamer
+ jsonassert
+ ${jsonassert}
+ test
+
+
+
+ org.wiremock
+ wiremock
+ ${wiremock}
+ test
+
+
+
+ commons-logging
+ commons-logging
+
+
+ org.ow2.asm
+ asm
+
+
+
+
+
+ io.specto
+ hoverfly-java-junit5
+ ${hoverfly}
+ test
+
+
+
+
+ org.apache.xbean
+ xbean-asm5-shaded
+ 4.5
+ test
+
+
+
+ javax.servlet
+ javax.servlet-api
+ 3.1.0
+ test
+
+
+
+ org.mockito
+ mockito-junit-jupiter
+ ${mockito}
+ test
+
+
+
+ org.testcontainers
+ elasticsearch
+ ${testcontainers}
+ test
+
+
+
+
+ commons-codec
+ commons-codec
+ 1.15
+ test
+
+
+
+ com.tngtech.archunit
+ archunit-junit5
+ ${archunit}
+ test
+
+
+
+
+
+
+
+ src/main/resources
+ true
+
+ **/versions.properties
+
+
+
+ src/main/resources
+ false
+
+ **/versions.properties
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+
+ true
+ false
+
+ **/*Tests.java
+ **/*Test.java
+
+
+ false
+
+
+
+
+
+ default-test
+ ${mvn.unit-test.goal}
+
+ test
+
+
+ integration-test
+
+
+
+
+ integration-test-elasticsearch
+ ${mvn.integration-test-elasticsearch.goal}
+
+ test
+
+
+ integration-test
+
+ elasticsearch
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-assembly-plugin
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+
+ org.apache.logging.log4j
+ log4j-core
+ ${log4j}
+
+
+
+
+
+
+
+
+
+ ci
+
+
+
+ org.apache.maven.plugins
+ maven-checkstyle-plugin
+
+
+
+
+
+
+
+
+
+
+ **/*
+
+ .git/**/*,target/**/*,**/target/**/*,.idea/**/*,**/spring.schemas,**/*.svg,mvnw,mvnw.cmd,**/*.policy
+
+ ./
+
+
+
+
+
+
+
+ antora-process-resources
+
+
+
+ src/main/antora/resources/antora-resources
+ true
+
+
+
+
+
+
+ antora
+
+
+
+ org.antora
+ antora-maven-plugin
+
+
+
+
+
+
+
+
+ spring-snapshot
+ https://repo.spring.io/snapshot
+
+ true
+
+
+ false
+
+
+
+ spring-milestone
+ https://repo.spring.io/milestone
+
+
+
+
diff --git a/settings.xml b/settings.xml
new file mode 100644
index 0000000000..b3227cc110
--- /dev/null
+++ b/settings.xml
@@ -0,0 +1,29 @@
+
+
+
+
+ spring-plugins-release
+ ${env.ARTIFACTORY_USR}
+ ${env.ARTIFACTORY_PSW}
+
+
+ spring-libs-snapshot
+ ${env.ARTIFACTORY_USR}
+ ${env.ARTIFACTORY_PSW}
+
+
+ spring-libs-milestone
+ ${env.ARTIFACTORY_USR}
+ ${env.ARTIFACTORY_PSW}
+
+
+ spring-libs-release
+ ${env.ARTIFACTORY_USR}
+ ${env.ARTIFACTORY_PSW}
+
+
+
+
\ No newline at end of file
diff --git a/site/apidocs.zip b/site/apidocs.zip
deleted file mode 100644
index da0aa2a90d..0000000000
Binary files a/site/apidocs.zip and /dev/null differ
diff --git a/site/emma.zip b/site/emma.zip
deleted file mode 100644
index 1641e738d1..0000000000
Binary files a/site/emma.zip and /dev/null differ
diff --git a/site/reference.zip b/site/reference.zip
deleted file mode 100644
index 434302f458..0000000000
Binary files a/site/reference.zip and /dev/null differ
diff --git a/site/reference/pdf/spring-data-elasticsearch-reference.pdf b/site/reference/pdf/spring-data-elasticsearch-reference.pdf
deleted file mode 100644
index 55ed7520e8..0000000000
Binary files a/site/reference/pdf/spring-data-elasticsearch-reference.pdf and /dev/null differ
diff --git a/src/docbkx/index.xml b/src/docbkx/index.xml
deleted file mode 100644
index 05f60dde7b..0000000000
--- a/src/docbkx/index.xml
+++ /dev/null
@@ -1,62 +0,0 @@
-
-
-
-
- Spring Data Elasticsearch
-
-
- BioMed Central
- Development Team
-
-
-
-
- Copies of this document may be made for your own use and for
- distribution to others, provided that you do not
- charge any fee for
- such copies and further provided that each copy
- contains this
- Copyright Notice, whether
- distributed in print or electronically.
-
-
-
-
- 2013
- The original author(s)
-
-
-
-
-
-
-
-
- Reference Documentation
-
-
-
-
-
-
-
-
-
-
- Appendix
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/src/docbkx/preface.xml b/src/docbkx/preface.xml
deleted file mode 100644
index 2872ded1c6..0000000000
--- a/src/docbkx/preface.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-
-
-
- Preface
- The Spring Data Elasticsearch project applies core Spring
- concepts to
- the
- development of solutions using the Elasticsearch Search
- Engine.
- We have povided a "template" as a high-level abstraction for
- storing,querying,sorting and faceting documents. You will notice
- similarities
- to the Spring data solr and
- mongodb support in the Spring Framework.
-
-
- Project Metadata
-
-
-
- Version Control -
- git://github.com/BioMedCentralLtd/spring-data-elasticsearch.git
-
-
-
-
-
-
- Requirements
-
- Requires
- Elasticsearch
- 0.20.2 and above or optional dependency or not even that if you are
- using Embedded Node Client
-
-
-
\ No newline at end of file
diff --git a/src/docbkx/reference/data-elasticsearch.xml b/src/docbkx/reference/data-elasticsearch.xml
deleted file mode 100644
index ee34605704..0000000000
--- a/src/docbkx/reference/data-elasticsearch.xml
+++ /dev/null
@@ -1,487 +0,0 @@
-
-
-
- Elasticsearch Repositories
-
- This chapter includes details of the Elasticsearch repository
- implementation.
-
-
-
- Introduction
-
-
- Spring Namespace
-
-
- The Spring Data Elasticsearch module contains a custom namespace
- allowing
- definition of repository beans as well as elements for
- instantiating
- a
- ElasticsearchServer
- .
-
-
-
- Using the
- repositories
- element looks up Spring Data repositories as described in
-
- .
-
-
-
- Setting up Elasticsearch repositories using Namespace
- <?xml version="1.0" encoding="UTF-8"?>
-<beans xmlns="http://www.springframework.org/schema/beans"
-xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-xmlns:elasticsearch="http://www.springframework.org/schema/data/elasticsearch"
-xsi:schemaLocation="http://www.springframework.org/schema/beans-http://www.springframework.org/schema/beans/spring-beans-3.1.xsd-http://www.springframework.org/schema/data/elasticsearch-http://www.springframework.org/schema/data/elasticsearch/spring-elasticsearch-1.0.xsd">
-
-<elasticsearch:repositories base-package="com.acme.repositories" />
-</beans>
-
-
-
- Using the
- Transport Client
- or
- Node Client
- element registers an instance of
- Elasticsearch Server
- in the context.
-
-
- Transport Client using Namespace
- <?xml version="1.0" encoding="UTF-8"?>
-<beans xmlns="http://www.springframework.org/schema/beans"
-xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-xmlns:elasticsearch="http://www.springframework.org/schema/data/elasticsearch"
-xsi:schemaLocation="http://www.springframework.org/schema/beans-http://www.springframework.org/schema/beans/spring-beans-3.1.xsd-http://www.springframework.org/schema/data/elasticsearch-http://www.springframework.org/schema/data/elasticsearch/spring-elasticsearch-1.0.xsd">
-
-<elasticsearch:transport-client id="client" cluster-nodes="localhost:9300,someip:9300" />
-</beans>
-
-
-
- Node Client using Namespace
- <?xml version="1.0" encoding="UTF-8"?>
-<beans xmlns="http://www.springframework.org/schema/beans"
-xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-xmlns:elasticsearch="http://www.springframework.org/schema/data/elasticsearch"
-xsi:schemaLocation="http://www.springframework.org/schema/beans-http://www.springframework.org/schema/beans/spring-beans-3.1.xsd-http://www.springframework.org/schema/data/elasticsearch-http://www.springframework.org/schema/data/elasticsearch/spring-elasticsearch-1.0.xsd">
-
-<elasticsearch:node-client id="client" local="true"" />
-</beans>
-
-
-
-
- Annotation based configuration
- The Spring Data Elasticsearch repositories support cannot only
- be
- activated through an XML namespace but also using an annotation
- through JavaConfig.
-
-
- Spring Data Elasticsearch repositories using JavaConfig
-
-
-@Configuration
-@EnableElasticsearchRepositories(basePackages = "org/springframework/data/elasticsearch/repositories")
-static class Config {
-
-@Bean
-public ElasticsearchOperations elasticsearchTemplate() {
-return new ElasticsearchTemplate(nodeBuilder().local(true).node().client());
- }
-}
-
- The configuration above sets up an
- Embedded Elasticsearch Server
- which is used by the
- ElasticsearchTemplate
- . Spring Data Elasticsearch Repositories are activated using the
- @EnableElasticsearchRepositories
- annotation, which
- essentially carries the same attributes as the XML
- namespace does. If no
- base package is configured, it will use the
- one
- the configuration class
- resides in.
-
-
-
-
- Elasticsearch Repositores using CDI
- The Spring Data Elasticsearch repositories can also be set up
- using CDI
- functionality.
-
-
- Spring Data Elasticsearch repositories using JavaConfig
-
- class ElasticsearchTemplateProducer {
-
-@Produces
-@ApplicationScoped
-public ElasticsearchOperations createElasticsearchTemplate() {
- return new ElasticsearchTemplate(nodeBuilder().local(true).node().client());
- }
-}
-
-class ProductService {
-
-private ProductRepository repository;
-
-public Page<Product> findAvailableBookByName(String name, Pageable pageable) {
- return repository.findByAvailableTrueAndNameStartingWith(name, pageable);
-}
-
-@Inject
-public void setRepository(ProductRepository repository) {
- this.repository = repository;
- }
-}
-
-
-
-
- Query methods
-
- Query lookup strategies
-
- The Elasticsearch module supports all basic query building
- feature as String,Abstract,Criteria or
- have
- it being derived from the
- method name.
-
-
-
- Declared queries
-
- Deriving the query from the method name is not always sufficient
- and/or may result in unreadable method names. In this case one
- might make either use of
- @Query
- annotation (see
-
- ).
-
-
-
-
-
- Query creation
-
-
- Generally the query creation mechanism for Elasticsearch works as
- described
- in
-
- . Here's a short example
- of what a Elasticsearch query method
- translates into:
-
- Query creation from method names
- public interface BookRepository extends Repository<Book, String> {
- List<Book> findByNameAndPrice(String name, Integer price);
-}
-
- The method name above will be translated into the following
- Elasticsearch json query
-
-
- { "bool" :
- { "must" :
- [
- { "field" : {"name" : "?"} },
- { "field" : {"price" : "?"} }
- ] } }
-
-
-
- A list of supported keywords for Elasticsearch is shown below.
-
-
-
-
- Using @Query Annotation
-
-
- Declare query at the method using the
- @Query
- annotation.
-
-
- public interface BookRepository extends ElasticsearchRepository<Book, String> {
- @Query("{"bool" : {"must" : {"field" : {"name" : "?0"}}}}")
- Page<Book> findByName(String name,Pageable pageable);
-}
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/src/docbkx/reference/elasticsearch-misc.xml b/src/docbkx/reference/elasticsearch-misc.xml
deleted file mode 100644
index 78aece6864..0000000000
--- a/src/docbkx/reference/elasticsearch-misc.xml
+++ /dev/null
@@ -1,85 +0,0 @@
-
-
-
- Miscellaneous Elasticsearch Operation Support
-
-
- This chapter covers additional support for Elasticsearch operations
- that cannot be directly accessed via the repository
- interface.
- It is
- recommended to add those operations as custom
- implementation as
- described in
-
- .
-
-
-
- Filter Builder
-
- Filter Builder improves query speed.
-
-
-
-private ElasticsearchTemplate elasticsearchTemplate;
-SearchQuery searchQuery = new NativeSearchQueryBuilder()
-.withQuery(matchAllQuery())
-.withFilter(boolFilter().must(termFilter("id", documentId)))
-.build();
-Page<SampleEntity> sampleEntities = elasticsearchTemplate.queryForPage(searchQuery,SampleEntity.class);
-
-
-
-
- Using Scan And Scroll For Big Result Set
-
- Elasticsearch has scan and scroll feature for getting big result set
- in chunks.
- ElasticsearchTemplate
- has scan and scroll methods that can be used as below.
-
-
-
- Using Scan and Scroll
-
-
-SearchQuery searchQuery = new NativeSearchQueryBuilder()
-.withQuery(matchAllQuery())
-.withIndices("test-index")
-.withTypes("test-type")
-.withPageable(new PageRequest(0,1))
-.build();
-String scrollId = elasticsearchTemplate.scan(searchQuery,1000,false);
-List<SampleEntity> sampleEntities = new ArrayList<SampleEntity>();
-boolean hasRecords = true;
-while (hasRecords){
-Page<SampleEntity> page = elasticsearchTemplate.scroll(scrollId, 5000L , new ResultsMapper<SampleEntity>() {
-@Override
-public Page<SampleEntity> mapResults(SearchResponse response) {
-List<SampleEntity> chunk = new ArrayList<SampleEntity>();
-for(SearchHit searchHit : response.getHits()){
- if(response.getHits().getHits().length <= 0) {
- return null;
- }
- SampleEntity user = new SampleEntity();
- user.setId(searchHit.getId());
- user.setMessage((String)searchHit.getSource().get("message"));
- chunk.add(user);
- }
- return new PageImpl<SampleEntity>(chunk);
-}
- });
- if(page != null) {
- sampleEntities.addAll(page.getContent());
- hasRecords = page.hasNextPage();
- }
- else{
- hasRecords = false;
- }
- }
-}
-
-
-
\ No newline at end of file
diff --git a/src/docbkx/reference/repositories.xml b/src/docbkx/reference/repositories.xml
deleted file mode 100644
index b473c297e8..0000000000
--- a/src/docbkx/reference/repositories.xml
+++ /dev/null
@@ -1,1463 +0,0 @@
-
-
-
- Repositories
-
-
- Introduction
-
- Implementing a data access layer of an application has been
- cumbersome for quite a while. Too much boilerplate code had to be
- written.
- Domain classes were anemic and not designed in a real object oriented or
- domain driven manner.
-
-
- Using both of these technologies makes developers life a lot
- easier
- regarding rich domain model's persistence. Nevertheless the amount of
- boilerplate code to implement repositories especially is still quite
- high.
- So the goal of the repository abstraction of Spring Data is to reduce
- the
- effort to implement data access layers for various persistence stores
- significantly.
-
-
- The following chapters will introduce the core concepts and
- interfaces of Spring Data repositories in general for detailled
- information on the specific features of a particular store consult
- the
- later chapters of this document.
-
-
-
- As this part of the documentation is pulled in from Spring Data
- Commons we have to decide for a particular module to be used as
- example.
- The configuration and code samples in this chapter are using the JPA
- module. Make sure you adapt e.g. the XML namespace declaration,
- types to
- be extended to the equivalents of the module you're actually
- using.
-
-
-
-
-
- Core concepts
-
-
- The central interface in Spring Data repository abstraction is
- Repository
- (probably not that much of a
- surprise). It is typeable to the domain class to manage as well as the id
- type of the domain class. This interface mainly acts as marker interface
- to capture the types to deal with and help us when discovering
- interfaces
- that extend this one. Beyond that there's
- CrudRepository
- which provides some
- sophisticated functionality around CRUD for the entity being
- managed.
-
-
-
-
- CrudRepository
- interface
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- public interface CrudRepository<T, ID extends Serializable>
- extends Repository<T, ID> {
-
- T save(T entity);
-
- T findOne(ID primaryKey);
-
- Iterable<T> findAll();
-
- Long count();
-
- void delete(T entity);
-
- boolean exists(ID primaryKey);
-
- // … more functionality omitted.
- }
-
-
-
- Saves the given entity.
-
-
-
- Returns the entity identified by the given id.
-
-
-
- Returns all entities.
-
-
-
- Returns the number of entities.
-
-
-
- Deletes the given entity.
-
-
-
- Returns whether an entity with the given id exists.
-
-
-
-
-
- Usually we will have persistence technology specific
- sub-interfaces
- to include additional technology specific methods. We will now ship
- implementations for a variety of Spring Data modules that implement
- this
- interface.
-
-
-
- On top of the
- CrudRepository
- there is
- a
- PagingAndSortingRepository
- abstraction
- that adds additional methods to ease paginated access to entities:
-
-
-
- PagingAndSortingRepository
-
- public interface PagingAndSortingRepository<T, ID extends Serializable> extends CrudRepository<T, ID> {
-
- Iterable<T> findAll(Sort sort);
-
- Page<T> findAll(Pageable pageable);
- }
-
-
-
- Accessing the second page of
- User
- by a page
- size of 20 you could simply do something like this:
-
-
- PagingAndSortingRepository<User, Long> repository = // … get access to a bean
- Page<User> users = repository.findAll(new PageRequest(1, 20));
-
-
-
- Query methods
-
- Next to standard CRUD functionality repositories are usually
- queries
- on the underlying datastore. With Spring Data declaring those queries
- becomes a four-step process:
-
-
-
-
-
- Declare an interface extending
- Repository
- or one of its sub-interfaces
- and type it to the domain class it shall handle.
-
-
- public interface PersonRepository extends Repository<User, Long> { … }
-
-
-
- Declare query methods on the interface.
-
- List<Person> findByLastname(String lastname);
-
-
-
- Setup Spring to create proxy instances for those
- interfaces.
-
-
- <?xml version="1.0" encoding="UTF-8"?>
- <beans:beans xmlns:beans="http://www.springframework.org/schema/beans"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xmlns="http://www.springframework.org/schema/data/jpa"
- xsi:schemaLocation="http://www.springframework.org/schema/beans-%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20http://www.springframework.org/schema/beans/spring-beans.xsd-%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20http://www.springframework.org/schema/data/jpa-%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20http://www.springframework.org/schema/data/jpa/spring-jpa.xsd">
-
- <repositories base-package="com.acme.repositories" />
-
- </beans>
-
-
-
- Note that we use the JPA namespace here just by example. If
- you're using the repository abstraction for any other store you need
- to change this to the appropriate namespace declaration of your
- store module which should be exchanging
- jpa
- in favor of
- e.g.
- mongodb
- .
-
-
-
-
-
- Get the repository instance injected and use it.
-
- public class SomeClient {
-
- @Autowired
- private PersonRepository repository;
-
- public void doSomething() {
- List<Person> persons = repository.findByLastname("Matthews");
- }
-
-
-
- At this stage we barely scratched the surface of what's possible
- with the repositories but the general approach should be clear. Let's
- go
- through each of these steps and figure out details and various options
- that you have at each stage.
-
-
-
- Defining repository interfaces
-
-
- As a very first step you define a domain class specific repository
- interface. It's got to extend
- Repository
- and be typed to the domain class and an ID type. If you want to
- expose
- CRUD methods for that domain type, extend
- CrudRepository
- instead of
- Repository
- .
-
-
-
- Fine tuning repository definition
-
-
- Usually you will have your repository interface extend
- Repository
- ,
- CrudRepository
- or
- PagingAndSortingRepository
- . If you
- don't like extending Spring Data interfaces at all you can also
- annotate your repository interface with
- @RepositoryDefinition
- . Extending
- CrudRepository
- will expose a complete
- set of methods to manipulate your entities. If you would rather be
- selective about the methods being exposed, simply copy the ones you
- want to expose from
- CrudRepository
- into
- your domain repository.
-
-
-
- Selectively exposing CRUD methods
-
- interface MyBaseRepository<T, ID extends Serializable> extends Repository<T, ID> {
- T findOne(ID id);
- T save(T entity);
- }
-
- interface UserRepository extends MyBaseRepository<User, Long> {
-
- User findByEmailAddress(EmailAddress emailAddress);
- }
-
-
-
- In the first step we define a common base interface for all our
- domain repositories and expose
- findOne(…)
- as
- well as
- save(…)
- .These methods will be routed
- into the base repository implementation of the store of your choice
- because they are matching the method signatures in
- CrudRepository
- . So our
- UserRepository
- will now be able to save
- users, find single ones by id as well as triggering a query to find
- User
- s by their email address.
-
-
-
-
-
- Defining query methods
-
-
- Query lookup strategies
-
- The next thing we have to discuss is the definition of query
- methods. There are two main ways that the repository proxy is able
- to
- come up with the store specific query from the method name. The first
- option is to derive the query from the method name directly, the
- second is using some kind of additionally created query. What
- detailed
- options are available pretty much depends on the actual store,
- however, there's got to be some algorithm that decides what actual
- query is created.
-
-
-
- There are three strategies available for the repository
- infrastructure to resolve the query. The strategy to be used can be
- configured at the namespace through the
- query-lookup-strategy
- attribute. However, It might be the
- case that some of the strategies are not supported for specific
- datastores. Here are your options:
-
-
-
- CREATE
-
-
- This strategy will try to construct a store specific query
- from the query method's name. The general approach is to remove a
- given set of well-known prefixes from the method name and parse
- the
- rest of the method. Read more about query construction in
-
- .
-
-
-
-
- USE_DECLARED_QUERY
-
- This strategy tries to find a declared query which will be
- used for execution first. The query could be defined by an
- annotation somewhere or declared by other means. Please consult
- the
- documentation of the specific store to find out what options are
- available for that store. If the repository infrastructure does not
- find a declared query for the method at bootstrap time it will
- fail.
-
-
-
-
- CREATE_IF_NOT_FOUND (default)
-
-
- This strategy is actually a combination of
- CREATE
- and
- USE_DECLARED_QUERY
- . It will try to lookup a
- declared query first but create a custom method name based query if
- no declared query was found. This is the default lookup strategy and
- thus will be used if you don't configure anything explicitly. It
- allows quick query definition by method names but also custom
- tuning
- of these queries by introducing declared queries as needed.
-
-
-
-
-
- Query creation
-
-
- The query builder mechanism built into Spring Data repository
- infrastructure is useful to build constraining queries over
- entities
- of the repository. We will strip the prefixes
- findBy
- ,
- find
- ,
- readBy
- ,
- read
- ,
- getBy
- as well as
- get
- from the method and
- start parsing the rest of it. At a very basic level you can define
- conditions on entity properties and concatenate them with
- AND
- and
- OR
- .
-
-
-
- Query creation from method names
-
-
- public interface PersonRepository extends Repository<User, Long> {
-
- List<Person> findByEmailAddressAndLastname(EmailAddress emailAddress, String lastname);
- }
-
-
-
-
- The actual result of parsing that method will of course depend
- on the persistence store we create the query for, however, there are
- some general things to notice. The expressions are usually property
- traversals combined with operators that can be concatenated. As you
- can see in the example you can combine property expressions with
- And
- and Or. Beyond that you also get support for various operators like
- Between
- ,
- LessThan
- ,
- GreaterThan
- ,
- Like
- for the
- property expressions. As the operators supported can vary from
- datastore to datastore please consult the according part of the
- reference documentation.
-
-
-
- Property expressions
-
-
- Property expressions can just refer to a direct property of
- the managed entity (as you just saw in the example above). On query
- creation time we already make sure that the parsed property is at
- a
- property of the managed domain class. However, you can also define
- constraints by traversing nested properties. Assume
- Person
- s have
- Address
- es
- with
- ZipCode
- s. In that case a method name
- of
-
-
- List<Person> findByAddressZipCode(ZipCode zipCode);
-
-
- will create the property traversal
- x.address.zipCode
- . The resolution algorithm starts with
- interpreting the entire part (
- AddressZipCode
- ) as
- property and checks the domain class for a property with that name
- (uncapitalized). If it succeeds it just uses that. If not it
- starts
- splitting up the source at the camel case parts from the right side
- into a head and a tail and tries to find the according property,
- e.g.
- AddressZip
- and
- Code
- . If
- we find a property with that head we take the tail and continue
- building the tree down from there. As in our case the first split
- does not match we move the split point to the left
- (
- Address
- ,
- ZipCode
- ).
-
-
-
- Although this should work for most cases, there might be cases
- where the algorithm could select the wrong property. Suppose our
- Person
- class has an
- addressZip
- property as well. Then our algorithm would match in the first
- split
- round already and essentially choose the wrong property and finally
- fail (as the type of
- addressZip
- probably has
- no code property). To resolve this ambiguity you can use
- _
- inside your method name to manually define
- traversal points. So our method name would end up like so:
-
-
- List<Person> findByAddress_ZipCode(ZipCode zipCode);
-
-
-
-
-
- Special parameter handling
-
- To hand parameters to your query you simply define method
- parameters as already seen in the examples above. Besides that we
- will
- recognizes certain specific types to apply pagination and sorting to
- your queries dynamically.
-
-
-
- Using Pageable and Sort in query methods
-
- Page<User> findByLastname(String lastname, Pageable pageable);
-
- List<User> findByLastname(String lastname, Sort sort);
-
- List<User> findByLastname(String lastname, Pageable pageable);
-
-
-
- The first method allows you to pass a
- Pageable
- instance to the query method to dynamically add paging to your
- statically defined query.
- Sorting
- options are handed via
- the
- Pageable
- instance too. If you only
- need sorting, simply add a
- Sort
- parameter to your method.
- As you also can see, simply returning a
- List
- is possible as well. We will then
- not retrieve the additional metadata required to build the actual
- Page
- instance but rather simply
- restrict the query to lookup only the given range of entities.
-
-
-
- To find out how many pages you get for a query entirely we
- have to trigger an additional count query. This will be derived
- from
- the query you actually trigger by default.
-
-
-
-
-
-
- Creating repository instances
-
- So now the question is how to create instances and bean
- definitions for the repository interfaces defined.
-
-
-
- XML Configuration
-
- The easiest way to do so is by using the Spring namespace that
- is shipped with each Spring Data module that supports the
- repository
- mechanism. Each of those includes a repositories element that allows
- you to simply define a base package that Spring will scan for
- you.
-
-
- <?xml version="1.0" encoding="UTF-8"?>
- <beans:beans xmlns:beans="http://www.springframework.org/schema/beans"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xmlns="http://www.springframework.org/schema/data/jpa"
- xsi:schemaLocation="http://www.springframework.org/schema/beans-%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20http://www.springframework.org/schema/beans/spring-beans.xsd-%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20http://www.springframework.org/schema/data/jpa-%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20http://www.springframework.org/schema/data/jpa/spring-jpa.xsd">
-
- <repositories base-package="com.acme.repositories" />
-
- </beans:beans>
-
-
- In this case we instruct Spring to scan
- com.acme.repositories
- and all its sub packages for
- interfaces extending
- Repository
- or one
- of its sub-interfaces. For each interface found it will register the
- persistence technology specific
- FactoryBean
- to create the according
- proxies that handle invocations of the query methods. Each of these
- beans will be registered under a bean name that is derived from the
- interface name, so an interface of
- UserRepository
- would be registered
- under
- userRepository
- . The
- base-package
- attribute allows the use of wildcards, so that you can have a
- pattern
- of scanned packages.
-
-
-
- Using filters
-
-
- By default we will pick up every interface extending the
- persistence technology specific
- Repository
- sub-interface located
- underneath the configured base package and create a bean instance
- for it. However, you might want finer grained control over which
- interfaces bean instances get created for. To do this we support
- the
- use of
- <include-filter />
- and
- <exclude-filter />
- elements inside
- <repositories />
- . The semantics are exactly
- equivalent to the elements in Spring's context namespace. For
- details see
- Spring reference documentation
- on these
- elements.
-
-
- E.g. to exclude certain interfaces from instantiation as
- repository, you could use the following configuration:
-
-
-
- Using exclude-filter element
-
- <repositories base-package="com.acme.repositories">
- <context:exclude-filter type="regex" expression=".*SomeRepository" />
- </repositories>
-
-
- This would exclude all interfaces ending in
- SomeRepository
- from being
- instantiated.
-
-
-
-
-
-
- JavaConfig
-
-
- The repository infrastructure can also be triggered using a
- store-specific
- @Enable${store}Repositories
- annotation
- on a JavaConfig class. For an introduction into Java based
- configuration of the Spring container please have a look at the
- reference documentation.
-
-
- JavaConfig in the Spring reference documentation -
-
-
-
-
-
- A sample configuration to enable Spring Data repositories
- would
- look something like this.
-
-
-
- Sample annotation based repository configuration
-
- @Configuration
- @EnableJpaRepositories("com.acme.repositories")
- class ApplicationConfiguration {
-
- @Bean
- public EntityManagerFactory entityManagerFactory() {
- // …
- }
- }
-
-
-
- Note that the sample uses the JPA specific annotation which
- would have to be exchanged dependingon which store module you actually
- use. The same applies to the definition of the
- EntityManagerFactory
- bean. Please
- consult the sections covering the store-specific configuration.
-
-
-
-
- Standalone usage
-
-
- You can also use the repository infrastructure outside of a
- Spring container usage. You will still need to have some of the Spring
- libraries on your classpath but you can generally setup
- repositories
- programmatically as well. The Spring Data modules providing repository
- support ship a persistence technology specific
- RepositoryFactory
- that can be used as
- follows:
-
-
-
- Standalone usage of repository factory
-
- RepositoryFactorySupport factory = … // Instantiate factory here
- UserRepository repository = factory.getRepository(UserRepository.class);
-
-
-
-
-
-
- Custom implementations
-
-
- Adding behaviour to single repositories
-
- Often it is necessary to provide a custom implementation for a
- few
- repository methods. Spring Data repositories easily allow you to provide
- custom repository code and integrate it with generic CRUD
- abstraction
- and query method functionality. To enrich a repository with custom
- functionality you have to define an interface and an implementation
- for
- that functionality first and let the repository interface you provided
- so far extend that custom interface.
-
-
-
- Interface for custom repository functionality
-
- interface UserRepositoryCustom {
-
- public void someCustomMethod(User user);
- }
-
-
-
- Implementation of custom repository functionality
-
-
- class UserRepositoryImpl implements UserRepositoryCustom {
-
- public void someCustomMethod(User user) {
- // Your custom implementation
- }
- }
- Note that the implementation itself does not depend on
- Spring Data and can be a regular Spring bean. So you can use standard
- dependency injection behaviour to inject references to other beans,
- take part in aspects and so on.
-
-
-
-
- Changes to the your basic repository interface
-
-
- public interface UserRepository extends CrudRepository<User, Long>, UserRepositoryCustom {
-
- // Declare query methods here
- }
- Let your standard repository interface extend the custom
- one. This makes CRUD and custom functionality available to
- clients.
-
-
-
-
- Configuration
-
-
- If you use namespace configuration the repository infrastructure
- tries to autodetect custom implementations by looking up classes in
- the package we found a repository using the naming conventions
- appending the namespace element's attribute
- repository-impl-postfix
- to the classname. This suffix
- defaults to
- Impl
- .
-
-
-
- Configuration example
-
-
- <repositories base-package="com.acme.repository" />
-
- <repositories base-package="com.acme.repository" repository-impl-postfix="FooBar" />
-
-
-
-
- The first configuration example will try to lookup a class
- com.acme.repository.UserRepositoryImpl
- to act
- as custom repository implementation, where the second example will
- try
- to lookup
- com.acme.repository.UserRepositoryFooBar
- .
-
-
-
-
- Manual wiring
-
- The approach above works perfectly well if your custom
- implementation uses annotation based configuration and autowiring
- entirely as it will be treated as any other Spring bean. If your
- custom implementation bean needs some special wiring you simply
- declare the bean and name it after the conventions just described.
- We
- will then pick up the custom bean by name rather than creating an
- instance.
-
-
-
- Manual wiring of custom implementations (I)
-
- <repositories base-package="com.acme.repository" />
-
- <beans:bean id="userRepositoryImpl" class="…">
- <!-- further configuration -->
- </beans:bean>
-
-
-
-
-
- Adding custom behaviour to all repositories
-
- In other cases you might want to add a single method to all of
- your repository interfaces. So the approach just shown is not
- feasible.
- The first step to achieve this is adding and intermediate interface to
- declare the shared behaviour
-
-
-
- An interface declaring custom shared behaviour
-
-
-
- public interface MyRepository<T, ID extends Serializable>
- extends JpaRepository<T, ID> {
-
- void sharedCustomMethod(ID id);
- }
-
-
-
-
- Now your individual repository interfaces will extend this
- intermediate interface instead of the
- Repository
- interface to include the
- functionality declared. The second step is to create an implementation
- of this interface that extends the persistence technology specific
- repository base class which will then act as a custom base class for
- the
- repository proxies.
-
-
-
-
- The default behaviour of the Spring
- <repositories
- />
- namespace is to provide an implementation for all
- interfaces that fall under the
- base-package
- . This means
- that if left in it's current state, an implementation instance of
- MyRepository
- will be created by Spring.
- This is of course not desired as it is just supposed to act as an
- intermediary between
- Repository
- and the
- actual repository interfaces you want to define for each entity. To
- exclude an interface extending
- Repository
- from being instantiated as a
- repository instance it can either be annotate it with
- @NoRepositoryBean
- or moved out side of
- the configured
- base-package
- .
-
-
-
-
- Custom repository base class
-
-
- public class MyRepositoryImpl<T, ID extends Serializable>
- extends SimpleJpaRepository<T, ID> implements MyRepository<T, ID> {
-
- private EntityManager entityManager;
-
- // There are two constructors to choose from, either can be used.
- public MyRepositoryImpl(Class<T> domainClass, EntityManager entityManager) {
- super(domainClass, entityManager);
-
- // This is the recommended method for accessing inherited class dependencies.
- this.entityManager = entityManager;
- }
-
- public void sharedCustomMethod(ID id) {
- // implementation goes here
- }
- }
-
-
-
- The last step is to create a custom repository factory to replace
- the default
- RepositoryFactoryBean
- that will in
- turn produce a custom
- RepositoryFactory
- . The new
- repository factory will then provide your
- MyRepositoryImpl
- as the implementation of any
- interfaces that extend the
- Repository
- interface, replacing the
- SimpleJpaRepository
- implementation you just extended.
-
-
-
- Custom repository factory bean
-
-
- public class MyRepositoryFactoryBean<R extends JpaRepository<T, I>, T, I extends Serializable>
- extends JpaRepositoryFactoryBean<R, T, I> {
-
- protected RepositoryFactorySupport createRepositoryFactory(EntityManager entityManager) {
-
- return new MyRepositoryFactory(entityManager);
- }
-
- private static class MyRepositoryFactory<T, I extends Serializable> extends JpaRepositoryFactory {
-
- private EntityManager entityManager;
-
- public MyRepositoryFactory(EntityManager entityManager) {
- super(entityManager);
-
- this.entityManager = entityManager;
- }
-
- protected Object getTargetRepository(RepositoryMetadata metadata) {
-
- return new MyRepositoryImpl<T, I>((Class<T>) metadata.getDomainClass(), entityManager);
- }
-
- protected Class<?> getRepositoryBaseClass(RepositoryMetadata metadata) {
-
- // The RepositoryMetadata can be safely ignored, it is used by the JpaRepositoryFactory
- //to check for QueryDslJpaRepository's which is out of scope.
- return MyRepository.class;
- }
- }
- }
-
-
-
- Finally you can either declare beans of the custom factory
- directly or use the
- factory-class
- attribute of the Spring
- namespace to tell the repository infrastructure to use your custom
- factory implementation.
-
-
-
- Using the custom factory with the namespace
-
- <repositories base-package="com.acme.repository"
- factory-class="com.acme.MyRepositoryFactoryBean" />
-
-
-
-
-
- Extensions
-
- This chapter documents a set of Spring Data extensions that
- enable
- Spring Data usage in a variety of contexts. Currently most of the
- integration is targeted towards Spring MVC.
-
-
-
- Domain class web binding for Spring MVC
-
- Given you are developing a Spring MVC web applications you
- typically have to resolve domain class ids from URLs. By default
- it's
- your task to transform that request parameter or URL part into the
- domain class to hand it layers below then or execute business logic
- on
- the entities directly. This should look something like this:
-
-
- @Controller
- @RequestMapping("/users")
- public class UserController {
-
- private final UserRepository userRepository;
-
- public UserController(UserRepository userRepository) {
- userRepository = userRepository;
- }
-
- @RequestMapping("/{id}")
- public String showUserForm(@PathVariable("id") Long id, Model model) {
-
- // Do null check for id
- User user = userRepository.findOne(id);
- // Do null check for user
- // Populate model
- return "user";
- }
- }
-
-
- First you pretty much have to declare a repository dependency for
- each controller to lookup the entity managed by the controller or
- repository respectively. Beyond that looking up the entity is
- boilerplate as well as it's always a
- findOne(…)
- call. Fortunately Spring provides means to register custom
- converting
- components that allow conversion between a
- String
- value to an arbitrary type.
-
-
-
- PropertyEditors
-
-
- For versions up to Spring 3.0 simple Java
- PropertyEditor
- s had to be used. Thus,
- we offer a
- DomainClassPropertyEditorRegistrar
- ,
- that will look up all Spring Data repositories registered in the
- ApplicationContext
- and register a
- custom
- PropertyEditor
- for the managed
- domain class
-
-
- <bean class="….web.servlet.mvc.annotation.AnnotationMethodHandlerAdapter">
- <property name="webBindingInitializer">
- <bean class="….web.bind.support.ConfigurableWebBindingInitializer">
- <property name="propertyEditorRegistrars">
- <bean class="org.springframework.data.repository.support.DomainClassPropertyEditorRegistrar" />
- </property>
- </bean>
- </property>
- </bean>
-
- If you have configured Spring MVC like this you can turn your
- controller into the following that reduces a lot of the clutter and
- boilerplate.
-
-
- @Controller
- @RequestMapping("/users")
- public class UserController {
-
- @RequestMapping("/{id}")
- public String showUserForm(@PathVariable("id") User user, Model model) {
-
- // Do null check for user
- // Populate model
- return "userForm";
- }
- }
-
-
-
- ConversionService
-
-
- As of Spring 3.0 the
- PropertyEditor
- support is superseeded
- by a new conversion infrstructure that leaves all the drawbacks of
- PropertyEditor
- s behind and uses a
- stateless X to Y conversion approach. We now ship with a
- DomainClassConverter
- that pretty much mimics
- the behaviour of
- DomainClassPropertyEditorRegistrar
- . To register
- the converter you have to declare
- ConversionServiceFactoryBean
- , register the
- converter and tell the Spring MVC namespace to use the configured
- conversion service:
-
-
- <mvc:annotation-driven conversion-service="conversionService" />
-
- <bean id="conversionService" class="….context.support.ConversionServiceFactoryBean">
- <property name="converters">
- <list>
- <bean class="org.springframework.data.repository.support.DomainClassConverter">
- <constructor-arg ref="conversionService" />
- </bean>
- </list>
- </property>
- </bean>
-
-
-
-
- Web pagination
-
- @Controller
- @RequestMapping("/users")
- public class UserController {
-
- // DI code omitted
-
- @RequestMapping
- public String showUsers(Model model, HttpServletRequest request) {
-
- int page = Integer.parseInt(request.getParameter("page"));
- int pageSize = Integer.parseInt(request.getParameter("pageSize"));
- model.addAttribute("users", userService.getUsers(pageable));
- return "users";
- }
- }
-
-
- As you can see the naive approach requires the method to contain
- an
- HttpServletRequest
- parameter that has
- to be parsed manually. We even omitted an appropriate failure handling
- which would make the code even more verbose. The bottom line is that
- the
- controller actually shouldn't have to handle the functionality of
- extracting pagination information from the request. So we include a
- PageableArgumentResolver
- that will do the work
- for you.
-
-
- <bean class="….web.servlet.mvc.annotation.AnnotationMethodHandlerAdapter">
- <property name="customArgumentResolvers">
- <list>
- <bean class="org.springframework.data.web.PageableArgumentResolver" />
- </list>
- </property>
- </bean>
-
- This configuration allows you to simplify controllers down to
- something like this:
-
-
- @Controller
- @RequestMapping("/users")
- public class UserController {
-
- @RequestMapping
- public String showUsers(Model model, Pageable pageable) {
-
- model.addAttribute("users", userDao.readAll(pageable));
- return "users";
- }
- }
-
-
- The
- PageableArgumentResolver
- will
- automatically resolve request parameters to build a
- PageRequest
- instance. By default it will expect
- the following structure for the request parameters:
-
-
-
-
- Request parameters evaluated by
- PageableArgumentResolver
-
-
-
-
-
-
-
-
-
-
- page
-
-
- The page you want to retrieve
-
-
-
-
- page.size
-
-
- The size of the page you want to retrieve
-
-
-
-
- page.sort
-
-
- The property that should be sorted by
-
-
-
-
- page.sort.dir
-
-
- The direction that should be used for sorting
-
-
-
-
-
-
-
-
- 1
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/src/main/antora/antora-playbook.yml b/src/main/antora/antora-playbook.yml
new file mode 100644
index 0000000000..1a4f73c1e6
--- /dev/null
+++ b/src/main/antora/antora-playbook.yml
@@ -0,0 +1,40 @@
+# PACKAGES antora@3.2.0-alpha.2 @antora/atlas-extension:1.0.0-alpha.1 @antora/collector-extension@1.0.0-alpha.3 @springio/antora-extensions@1.1.0-alpha.2 @asciidoctor/tabs@1.0.0-alpha.12 @opendevise/antora-release-line-extension@1.0.0-alpha.2
+#
+# The purpose of this Antora playbook is to build the docs in the current branch.
+antora:
+ extensions:
+ - require: '@springio/antora-extensions'
+ root_component_name: 'data-elasticsearch'
+site:
+ title: Spring Data Elasticsearch
+ url: https://docs.spring.io/spring-data-elasticsearch/reference/
+content:
+ sources:
+ - url: ./../../..
+ branches: HEAD
+ start_path: src/main/antora
+ worktrees: true
+ - url: https://github.com/spring-projects/spring-data-commons
+ # Refname matching:
+ # https://docs.antora.org/antora/latest/playbook/content-refname-matching/
+ branches: [ main, 3.4.x, 3.3.x ]
+ start_path: src/main/antora
+asciidoc:
+ attributes:
+ hide-uri-scheme: '@'
+ tabs-sync-option: '@'
+ extensions:
+ - '@asciidoctor/tabs'
+ - '@springio/asciidoctor-extensions'
+ - '@springio/asciidoctor-extensions/javadoc-extension'
+ sourcemap: true
+urls:
+ latest_version_segment: ''
+runtime:
+ log:
+ failure_level: warn
+ format: pretty
+ui:
+ bundle:
+ url: https://github.com/spring-io/antora-ui-spring/releases/download/v0.4.16/ui-bundle.zip
+ snapshot: true
diff --git a/src/main/antora/antora.yml b/src/main/antora/antora.yml
new file mode 100644
index 0000000000..2348fca613
--- /dev/null
+++ b/src/main/antora/antora.yml
@@ -0,0 +1,17 @@
+name: data-elasticsearch
+version: true
+title: Spring Data Elasticsearch
+nav:
+ - modules/ROOT/nav.adoc
+ext:
+ collector:
+ - run:
+ command: ./mvnw validate process-resources -am -Pantora-process-resources
+ local: true
+ scan:
+ dir: target/classes/
+ - run:
+ command: ./mvnw package -Pdistribute
+ local: true
+ scan:
+ dir: target/antora
diff --git a/src/main/antora/modules/ROOT/nav.adoc b/src/main/antora/modules/ROOT/nav.adoc
new file mode 100644
index 0000000000..fa1ee8110d
--- /dev/null
+++ b/src/main/antora/modules/ROOT/nav.adoc
@@ -0,0 +1,47 @@
+* xref:index.adoc[Overview]
+** xref:commons/upgrade.adoc[]
+** xref:migration-guides.adoc[]
+*** xref:migration-guides/migration-guide-3.2-4.0.adoc[]
+*** xref:migration-guides/migration-guide-4.0-4.1.adoc[]
+*** xref:migration-guides/migration-guide-4.1-4.2.adoc[]
+*** xref:migration-guides/migration-guide-4.2-4.3.adoc[]
+*** xref:migration-guides/migration-guide-4.3-4.4.adoc[]
+*** xref:migration-guides/migration-guide-4.4-5.0.adoc[]
+*** xref:migration-guides/migration-guide-5.0-5.1.adoc[]
+*** xref:migration-guides/migration-guide-5.1-5.2.adoc[]
+*** xref:migration-guides/migration-guide-5.2-5.3.adoc[]
+*** xref:migration-guides/migration-guide-5.3-5.4.adoc[]
+*** xref:migration-guides/migration-guide-5.4-5.5.adoc[]
+*** xref:migration-guides/migration-guide-5.5-6.0.adoc[]
+
+
+* xref:elasticsearch.adoc[]
+** xref:elasticsearch/clients.adoc[]
+** xref:elasticsearch/object-mapping.adoc[]
+** xref:elasticsearch/template.adoc[]
+** xref:elasticsearch/reactive-template.adoc[]
+** xref:elasticsearch/entity-callbacks.adoc[]
+** xref:elasticsearch/auditing.adoc[]
+** xref:elasticsearch/join-types.adoc[]
+** xref:elasticsearch/routing.adoc[]
+** xref:elasticsearch/misc.adoc[]
+** xref:elasticsearch/scripted-and-runtime-fields.adoc[]
+
+* xref:repositories.adoc[]
+** xref:repositories/core-concepts.adoc[]
+** xref:repositories/definition.adoc[]
+** xref:elasticsearch/repositories/elasticsearch-repositories.adoc[]
+** xref:elasticsearch/repositories/reactive-elasticsearch-repositories.adoc[]
+** xref:repositories/create-instances.adoc[]
+** xref:repositories/query-methods-details.adoc[]
+** xref:elasticsearch/repositories/elasticsearch-repository-queries.adoc[]
+** xref:repositories/projections.adoc[]
+** xref:repositories/custom-implementations.adoc[]
+** xref:repositories/core-domain-events.adoc[]
+** xref:repositories/null-handling.adoc[]
+** xref:elasticsearch/repositories/cdi-integration.adoc[]
+** xref:repositories/query-keywords-reference.adoc[]
+** xref:repositories/query-return-types-reference.adoc[]
+
+* xref:attachment$api/java/index.html[Javadoc,role=link-external,window=_blank]
+* https://github.com/spring-projects/spring-data-commons/wiki[Wiki,role=link-external,window=_blank]
diff --git a/src/main/antora/modules/ROOT/pages/commons/upgrade.adoc b/src/main/antora/modules/ROOT/pages/commons/upgrade.adoc
new file mode 100644
index 0000000000..51a9189aa0
--- /dev/null
+++ b/src/main/antora/modules/ROOT/pages/commons/upgrade.adoc
@@ -0,0 +1 @@
+include::{commons}@data-commons::page$upgrade.adoc[]
diff --git a/src/main/antora/modules/ROOT/pages/elasticsearch.adoc b/src/main/antora/modules/ROOT/pages/elasticsearch.adoc
new file mode 100644
index 0000000000..fe0bddbf20
--- /dev/null
+++ b/src/main/antora/modules/ROOT/pages/elasticsearch.adoc
@@ -0,0 +1,16 @@
+[[elasticsearch.core]]
+= Elasticsearch Support
+:page-section-summary-toc: 1
+
+Spring Data support for Elasticsearch contains a wide range of features:
+
+* Spring configuration support for various xref:elasticsearch/clients.adoc[Elasticsearch clients].
+* The xref:elasticsearch/template.adoc[`ElasticsearchTemplate` and `ReactiveElasticsearchTemplate`] helper classes that provide object mapping between ES index operations and POJOs.
+* xref:elasticsearch/template.adoc#exception-translation[Exception translation] into Spring's portable {springDocsUrl}data-access.html#dao-exceptions[Data Access Exception Hierarchy].
+* Feature rich xref:elasticsearch/object-mapping.adoc[object mapping] integrated with _Spring's_ {springDocsUrl}core.html#core-convert[Conversion Service].
+* xref:elasticsearch/object-mapping.adoc#elasticsearch.mapping.meta-model.annotations[Annotation-based mapping] metadata that is extensible to support other metadata formats.
+* Java-based xref:elasticsearch/template.adoc#cassandra.template.query[query, criteria, and update DSLs].
+* Automatic implementation of xref:repositories.adoc[imperative and reactive `Repository` interfaces] including support for xref:repositories/custom-implementations.adoc[custom query methods].
+
+For most data-oriented tasks, you can use the `[Reactive]ElasticsearchTemplate` or the `Repository` support, both of which use the rich object-mapping functionality.
+Spring Data Elasticsearch uses consistent naming conventions on objects in various APIs to those found in the DataStax Java Driver so that they are familiar and so that you can map your existing knowledge onto the Spring APIs.
diff --git a/src/main/antora/modules/ROOT/pages/elasticsearch/auditing.adoc b/src/main/antora/modules/ROOT/pages/elasticsearch/auditing.adoc
new file mode 100644
index 0000000000..f9633dec4f
--- /dev/null
+++ b/src/main/antora/modules/ROOT/pages/elasticsearch/auditing.adoc
@@ -0,0 +1,85 @@
+[[elasticsearch.auditing]]
+= Elasticsearch Auditing
+
+[[elasticsearch.auditing.preparing]]
+== Preparing entities
+
+In order for the auditing code to be able to decide whether an entity instance is new, the entity must implement the `Persistable` interface which is defined as follows:
+
+[source,java]
+----
+package org.springframework.data.domain;
+
+import org.jspecify.annotations.Nullable;
+
+public interface Persistable {
+ @Nullable
+ ID getId();
+
+ boolean isNew();
+}
+----
+
+As the existence of an Id is not a sufficient criterion to determine if an enitity is new in Elasticsearch, additional information is necessary. One way is to use the creation-relevant auditing fields for this decision:
+
+A `Person` entity might look as follows - omitting getter and setter methods for brevity:
+
+[source,java]
+----
+@Document(indexName = "person")
+public class Person implements Persistable {
+ @Id private Long id;
+ private String lastName;
+ private String firstName;
+ @CreatedDate
+ @Field(type = FieldType.Date, format = DateFormat.basic_date_time)
+ private Instant createdDate;
+ @CreatedBy
+ private String createdBy
+ @Field(type = FieldType.Date, format = DateFormat.basic_date_time)
+ @LastModifiedDate
+ private Instant lastModifiedDate;
+ @LastModifiedBy
+ private String lastModifiedBy;
+
+ public Long getId() { // <.>
+ return id;
+ }
+
+ @Override
+ public boolean isNew() {
+ return id == null || (createdDate == null && createdBy == null); // <.>
+ }
+}
+----
+<.> the getter is the required implementation from the interface
+<.> an object is new if it either has no `id` or none of fields containing creation attributes are set.
+
+[[elasticsearch.auditing.activating]]
+== Activating auditing
+
+After the entities have been set up and providing the `AuditorAware` - or `ReactiveAuditorAware` - the Auditing must be activated by setting the `@EnableElasticsearchAuditing` on a configuration class:
+
+[source,java]
+----
+@Configuration
+@EnableElasticsearchRepositories
+@EnableElasticsearchAuditing
+class MyConfiguration {
+ // configuration code
+}
+----
+
+When using the reactive stack this must be:
+[source,java]
+----
+@Configuration
+@EnableReactiveElasticsearchRepositories
+@EnableReactiveElasticsearchAuditing
+class MyConfiguration {
+ // configuration code
+}
+----
+
+If your code contains more than one `AuditorAware` bean for different types, you must provide the name of the bean to use as an argument to the `auditorAwareRef` parameter of the
+ `@EnableElasticsearchAuditing` annotation.
diff --git a/src/main/antora/modules/ROOT/pages/elasticsearch/clients.adoc b/src/main/antora/modules/ROOT/pages/elasticsearch/clients.adoc
new file mode 100644
index 0000000000..0cf7d5ea3c
--- /dev/null
+++ b/src/main/antora/modules/ROOT/pages/elasticsearch/clients.adoc
@@ -0,0 +1,234 @@
+[[elasticsearch.clients]]
+= Elasticsearch Clients
+
+This chapter illustrates configuration and usage of supported Elasticsearch client implementations.
+
+Spring Data Elasticsearch operates upon an Elasticsearch client (provided by Elasticsearch client libraries) that is connected to a single Elasticsearch node or a cluster.
+Although the Elasticsearch Client can be used directly to work with the cluster, applications using Spring Data Elasticsearch normally use the higher level abstractions of xref:elasticsearch/template.adoc[Elasticsearch Operations] and xref:elasticsearch/repositories/elasticsearch-repositories.adoc[Elasticsearch Repositories].
+
+[[elasticsearch.clients.restclient]]
+== Imperative Rest Client
+
+To use the imperative (non-reactive) client, a configuration bean must be configured like this:
+
+====
+[source,java]
+----
+import org.springframework.data.elasticsearch.client.elc.ElasticsearchConfiguration;
+
+@Configuration
+public class MyClientConfig extends ElasticsearchConfiguration {
+
+ @Override
+ public ClientConfiguration clientConfiguration() {
+ return ClientConfiguration.builder() <.>
+ .connectedTo("localhost:9200")
+ .build();
+ }
+}
+----
+
+<.> for a detailed description of the builder methods see xref:elasticsearch/clients.adoc#elasticsearch.clients.configuration[Client Configuration]
+====
+
+The javadoc:org.springframework.data.elasticsearch.client.elc.ElasticsearchConfiguration[]] class allows further configuration by overriding for example the `jsonpMapper()` or `transportOptions()` methods.
+
+
+The following beans can then be injected in other Spring components:
+
+====
+[source,java]
+----
+import org.springframework.beans.factory.annotation.Autowired;@Autowired
+ElasticsearchOperations operations; <.>
+
+@Autowired
+ElasticsearchClient elasticsearchClient; <.>
+
+@Autowired
+RestClient restClient; <.>
+
+@Autowired
+JsonpMapper jsonpMapper; <.>
+----
+
+<.> an implementation of javadoc:org.springframework.data.elasticsearch.core.ElasticsearchOperations[]
+<.> the `co.elastic.clients.elasticsearch.ElasticsearchClient` that is used.
+<.> the low level `RestClient` from the Elasticsearch libraries
+<.> the `JsonpMapper` user by the Elasticsearch `Transport`
+====
+
+Basically one should just use the javadoc:org.springframework.data.elasticsearch.core.ElasticsearchOperations[] to interact with the Elasticsearch cluster.
+When using repositories, this instance is used under the hood as well.
+
+[[elasticsearch.clients.reactiverestclient]]
+== Reactive Rest Client
+
+When working with the reactive stack, the configuration must be derived from a different class:
+
+====
+[source,java]
+----
+import org.springframework.data.elasticsearch.client.elc.ReactiveElasticsearchConfiguration;
+
+@Configuration
+public class MyClientConfig extends ReactiveElasticsearchConfiguration {
+
+ @Override
+ public ClientConfiguration clientConfiguration() {
+ return ClientConfiguration.builder() <.>
+ .connectedTo("localhost:9200")
+ .build();
+ }
+}
+----
+
+<.> for a detailed description of the builder methods see xref:elasticsearch/clients.adoc#elasticsearch.clients.configuration[Client Configuration]
+====
+
+The javadoc:org.springframework.data.elasticsearch.client.elc.ReactiveElasticsearchConfiguration[] class allows further configuration by overriding for example the `jsonpMapper()` or `transportOptions()` methods.
+
+The following beans can then be injected in other Spring components:
+
+====
+[source,java]
+----
+@Autowired
+ReactiveElasticsearchOperations operations; <.>
+
+@Autowired
+ReactiveElasticsearchClient elasticsearchClient; <.>
+
+@Autowired
+RestClient restClient; <.>
+
+@Autowired
+JsonpMapper jsonpMapper; <.>
+----
+
+the following can be injected:
+
+<.> an implementation of javadoc:org.springframework.data.elasticsearch.core.ReactiveElasticsearchOperations[]
+<.> the `org.springframework.data.elasticsearch.client.elc.ReactiveElasticsearchClient` that is used.
+This is a reactive implementation based on the Elasticsearch client implementation.
+<.> the low level `RestClient` from the Elasticsearch libraries
+<.> the `JsonpMapper` user by the Elasticsearch `Transport`
+====
+
+Basically one should just use the javadoc:org.springframework.data.elasticsearch.core.ReactiveElasticsearchOperations[] to interact with the Elasticsearch cluster.
+When using repositories, this instance is used under the hood as well.
+
+[[elasticsearch.clients.configuration]]
+== Client Configuration
+
+Client behaviour can be changed via the javadoc:org.springframework.data.elasticsearch.client.ClientConfiguration[] that allows to set options for SSL, connect and socket timeouts, headers and other parameters.
+
+.Client Configuration
+====
+[source,java]
+----
+import org.springframework.data.elasticsearch.client.ClientConfiguration;
+import org.springframework.data.elasticsearch.support.HttpHeaders;
+
+import static org.springframework.data.elasticsearch.client.elc.ElasticsearchClients.*;
+
+HttpHeaders httpHeaders = new HttpHeaders();
+httpHeaders.add("some-header", "on every request") <.>
+
+ClientConfiguration clientConfiguration = ClientConfiguration.builder()
+ .connectedTo("localhost:9200", "localhost:9291") <.>
+ .usingSsl() <.>
+ .withProxy("localhost:8888") <.>
+ .withPathPrefix("ela") <.>
+ .withConnectTimeout(Duration.ofSeconds(5)) <.>
+ .withSocketTimeout(Duration.ofSeconds(3)) <.>
+ .withDefaultHeaders(defaultHeaders) <.>
+ .withBasicAuth(username, password) <.>
+ .withHeaders(() -> { <.>
+ HttpHeaders headers = new HttpHeaders();
+ headers.add("currentTime", LocalDateTime.now().format(DateTimeFormatter.ISO_LOCAL_DATE_TIME));
+ return headers;
+ })
+ .withClientConfigurer( <.>
+ ElasticsearchHttpClientConfigurationCallback.from(clientBuilder -> {
+ // ...
+ return clientBuilder;
+ }))
+ . // ... other options
+ .build();
+
+----
+
+<.> Define default headers, if they need to be customized
+<.> Use the builder to provide cluster addresses, set default `HttpHeaders` or enable SSL.
+<.> Optionally enable SSL.There exist overloads of this function that can take a `SSLContext` or as an alternative the fingerprint of the certificate as it is output by Elasticsearch 8 on startup.
+<.> Optionally set a proxy.
+<.> Optionally set a path prefix, mostly used when different clusters a behind some reverse proxy.
+<.> Set the connection timeout.
+<.> Set the socket timeout.
+<.> Optionally set headers.
+<.> Add basic authentication.
+<.> A `Supplier` function can be specified which is called every time before a request is sent to Elasticsearch - here, as an example, the current time is written in a header.
+<.> a function to configure the created client (see xref:elasticsearch/clients.adoc#elasticsearch.clients.configuration.callbacks[Client configuration callbacks]), can be added multiple times.
+====
+
+IMPORTANT: Adding a Header supplier as shown in above example allows to inject headers that may change over the time, like authentication JWT tokens.
+If this is used in the reactive setup, the supplier function *must not* block!
+
+[[elasticsearch.clients.configuration.callbacks]]
+=== Client configuration callbacks
+
+The javadoc:org.springframework.data.elasticsearch.client.ClientConfiguration[] class offers the most common parameters to configure the client.
+In the case this is not enough, the user can add callback functions by using the `withClientConfigurer(ClientConfigurationCallback>)` method.
+
+The following callbacks are provided:
+
+[[elasticsearch.clients.configuration.callbacks.rest]]
+==== Configuration of the low level Elasticsearch `RestClient`:
+
+This callback provides a `org.elasticsearch.client.RestClientBuilder` that can be used to configure the Elasticsearch
+`RestClient`:
+====
+[source,java]
+----
+ClientConfiguration.builder()
+ .connectedTo("localhost:9200", "localhost:9291")
+ .withClientConfigurer(ElasticsearchClients.ElasticsearchRestClientConfigurationCallback.from(restClientBuilder -> {
+ // configure the Elasticsearch RestClient
+ return restClientBuilder;
+ }))
+ .build();
+----
+====
+
+[[elasticsearch.clients.configurationcallbacks.httpasync]]
+==== Configuration of the HttpAsyncClient used by the low level Elasticsearch `RestClient`:
+
+This callback provides a `org.apache.http.impl.nio.client.HttpAsyncClientBuilder` to configure the HttpCLient that is
+used by the `RestClient`.
+
+====
+[source,java]
+----
+ClientConfiguration.builder()
+ .connectedTo("localhost:9200", "localhost:9291")
+ .withClientConfigurer(ElasticsearchClients.ElasticsearchHttpClientConfigurationCallback.from(httpAsyncClientBuilder -> {
+ // configure the HttpAsyncClient
+ return httpAsyncClientBuilder;
+ }))
+ .build();
+----
+====
+
+[[elasticsearch.clients.logging]]
+== Client Logging
+
+To see what is actually sent to and received from the server `Request` / `Response` logging on the transport level needs to be turned on as outlined in the snippet below.
+This can be enabled in the Elasticsearch client by setting the level of the `tracer` package to "trace" (see
+https://www.elastic.co/guide/en/elasticsearch/client/java-api-client/current/java-rest-low-usage-logging.html)
+
+.Enable transport layer logging
+[source,xml]
+----
+
+----
diff --git a/src/main/antora/modules/ROOT/pages/elasticsearch/elasticsearch-new.adoc b/src/main/antora/modules/ROOT/pages/elasticsearch/elasticsearch-new.adoc
new file mode 100644
index 0000000000..d4a9c565d0
--- /dev/null
+++ b/src/main/antora/modules/ROOT/pages/elasticsearch/elasticsearch-new.adoc
@@ -0,0 +1,121 @@
+[[new-features]]
+= What's new
+
+[[new-features.6-0-0]]
+== New in Spring Data Elasticsearch 6.0
+
+* Upgarde to Spring 7
+* Switch to jspecify nullability annotations
+* Upgrade to Elasticsearch 9.0.2
+
+
+[[new-features.5-5-0]]
+== New in Spring Data Elasticsearch 5.5
+
+* Upgrade to Elasticsearch 8.18.1.
+* Add support for the `@SearchTemplateQuery` annotation on repository methods.
+* Scripted field properties of type collection can be populated from scripts returning arrays.
+
+[[new-features.5-4-0]]
+== New in Spring Data Elasticsearch 5.4
+
+* Upgrade to Elasticsearch 8.15.3.
+* Allow to customize the mapped type name for `@InnerField` and `@Field` annotations.
+* Support for Elasticsearch SQL.
+* Add support for retrieving request executionDuration.
+
+[[new-features.5-3-0]]
+== New in Spring Data Elasticsearch 5.3
+
+* Upgrade to Elasticsearch 8.13.2.
+* Add support for highlight queries in highlighting.
+* Add shard statistics to the `SearchHit` class.
+* Add support for multi search template API.
+* Add support for SpEL in @Query.
+* Add support for field aliases in the index mapping.
+* Add support for has_child and has_parent queries.
+
+[[new-features.5-2-0]]
+== New in Spring Data Elasticsearch 5.2
+
+* Upgrade to Elasticsearch 8.11.1
+* The `JsonpMapper` for Elasticsearch is now configurable and provided as bean.
+* Improved AOT runtime hints for Elasticsearch client library classes.
+* Add Kotlin extensions and repository coroutine support.
+* Introducing `VersionConflictException` class thrown in case thatElasticsearch reports an 409 error with a version conflict.
+* Enable MultiField annotation on property getter
+* Support nested sort option
+* Improved scripted und runtime field support
+* Improved refresh policy support
+
+[[new-features.5-1-0]]
+== New in Spring Data Elasticsearch 5.1
+
+* Upgrade to Elasticsearch 8.7.1
+* Allow specification of the TLS certificate when connecting to an Elasticsearch 8 cluster
+
+[[new-features.5-0-0]]
+== New in Spring Data Elasticsearch 5.0
+
+* Upgrade to Java 17 baseline
+* Upgrade to Spring Framework 6
+* Upgrade to Elasticsearch 8.5.0
+* Use the new Elasticsearch client library
+
+[[new-features.4-4-0]]
+== New in Spring Data Elasticsearch 4.4
+
+* Introduction of new imperative and reactive clients using the classes from the new Elasticsearch Java client
+* Upgrade to Elasticsearch 7.17.3.
+
+[[new-features.4-3-0]]
+== New in Spring Data Elasticsearch 4.3
+
+* Upgrade to Elasticsearch 7.15.2.
+* Allow runtime_fields to be defined in the index mapping.
+* Add native support for range field types by using a range object.
+* Add repository search for nullable or empty properties.
+* Enable custom converters for single fields.
+* Supply a custom `Sort.Order` providing Elasticsearch specific parameters.
+
+[[new-features.4-2-0]]
+== New in Spring Data Elasticsearch 4.2
+
+* Upgrade to Elasticsearch 7.10.0.
+* Support for custom routing values
+
+[[new-features.4-1-0]]
+== New in Spring Data Elasticsearch 4.1
+
+* Uses Spring 5.3.
+* Upgrade to Elasticsearch 7.9.3.
+* Improved API for alias management.
+* Introduction of `ReactiveIndexOperations` for index management.
+* Index templates support.
+* Support for Geo-shape data with GeoJson.
+
+[[new-features.4-0-0]]
+== New in Spring Data Elasticsearch 4.0
+
+* Uses Spring 5.2.
+* Upgrade to Elasticsearch 7.6.2.
+* Deprecation of `TransportClient` usage.
+* Implements most of the mapping-types available for the index mappings.
+* Removal of the Jackson `ObjectMapper`, now using the xref:elasticsearch/object-mapping.adoc#elasticsearch.mapping.meta-model[MappingElasticsearchConverter]
+* Cleanup of the API in the `*Operations` interfaces, grouping and renaming methods so that they match the Elasticsearch API, deprecating the old methods, aligning with other Spring Data modules.
+* Introduction of `SearchHit` class to represent a found document together with the relevant result metadata for this document (i.e. _sortValues_).
+* Introduction of the `SearchHits` class to represent a whole search result together with the metadata for the complete search result (i.e. _max_score_).
+* Introduction of `SearchPage` class to represent a paged result containing a `SearchHits` instance.
+* Introduction of the `GeoDistanceOrder` class to be able to create sorting by geographical distance
+* Implementation of Auditing Support
+* Implementation of lifecycle entity callbacks
+
+[[new-features.3-2-0]]
+== New in Spring Data Elasticsearch 3.2
+
+* Secured Elasticsearch cluster support with Basic Authentication and SSL transport.
+* Upgrade to Elasticsearch 6.8.1.
+* Reactive programming support with xref:elasticsearch/repositories/reactive-elasticsearch-repositories.adoc[Reactive Elasticsearch Repositories] and xref:.
+* Introduction of the xref:elasticsearch/object-mapping.adoc#elasticsearch.mapping.meta-model[ElasticsearchEntityMapper] as an alternative to the Jackson `ObjectMapper`.
+* Field name customization in `@Field`.
+* Support for Delete by Query.
diff --git a/src/main/antora/modules/ROOT/pages/elasticsearch/entity-callbacks.adoc b/src/main/antora/modules/ROOT/pages/elasticsearch/entity-callbacks.adoc
new file mode 100644
index 0000000000..cbc08eee39
--- /dev/null
+++ b/src/main/antora/modules/ROOT/pages/elasticsearch/entity-callbacks.adoc
@@ -0,0 +1,42 @@
+include::{commons}@data-commons::page$entity-callbacks.adoc[]
+
+[[elasticsearch.entity-callbacks]]
+== Store specific EntityCallbacks
+
+Spring Data Elasticsearch uses the `EntityCallback` API internally for its auditing support and reacts on the following callbacks:
+
+.Supported Entity Callbacks
+[%header,cols="4"]
+|===
+| Callback
+| Method
+| Description
+| Order
+
+| Reactive/BeforeConvertCallback
+| `onBeforeConvert(T entity, IndexCoordinates index)`
+| Invoked before a domain object is converted to `org.springframework.data.elasticsearch.core.document.Document`.
+Can return the `entity` or a modified entity which then will be converted.
+| `Ordered.LOWEST_PRECEDENCE`
+
+| Reactive/AfterLoadCallback
+| `onAfterLoad(Document document, Class type, IndexCoordinates indexCoordinates)`
+| Invoked after the result from Elasticsearch has been read into a `org.springframework.data.elasticsearch.core.document.Document`.
+| `Ordered.LOWEST_PRECEDENCE`
+
+| Reactive/AfterConvertCallback
+| `onAfterConvert(T entity, Document document, IndexCoordinates indexCoordinates)`
+| Invoked after a domain object is converted from `org.springframework.data.elasticsearch.core.document.Document` on reading result data from Elasticsearch.
+| `Ordered.LOWEST_PRECEDENCE`
+
+| Reactive/AuditingEntityCallback
+| `onBeforeConvert(Object entity, IndexCoordinates index)`
+| Marks an auditable entity _created_ or _modified_
+| 100
+
+| Reactive/AfterSaveCallback
+| `T onAfterSave(T entity, IndexCoordinates index)`
+| Invoked after a domain object is saved.
+| `Ordered.LOWEST_PRECEDENCE`
+
+|===
diff --git a/src/main/antora/modules/ROOT/pages/elasticsearch/join-types.adoc b/src/main/antora/modules/ROOT/pages/elasticsearch/join-types.adoc
new file mode 100644
index 0000000000..a1bc3df192
--- /dev/null
+++ b/src/main/antora/modules/ROOT/pages/elasticsearch/join-types.adoc
@@ -0,0 +1,239 @@
+[[elasticsearch.jointype]]
+= Join-Type implementation
+
+Spring Data Elasticsearch supports the https://www.elastic.co/guide/en/elasticsearch/reference/current/parent-join.html[Join data type] for creating the corresponding index mappings and for storing the relevant information.
+
+[[elasticsearch.jointype.setting-up]]
+== Setting up the data
+
+For an entity to be used in a parent child join relationship, it must have a property of type `JoinField` which must be annotated.
+Let's assume a `Statement` entity where a statement may be a _question_, an _answer_, a _comment_ or a _vote_ (a _Builder_ is also shown in this example, it's not necessary, but later used in the sample code):
+
+====
+[source,java]
+----
+@Document(indexName = "statements")
+@Routing("routing") <.>
+public class Statement {
+ @Id
+ private String id;
+
+ @Field(type = FieldType.Text)
+ private String text;
+
+ @Field(type = FieldType.Keyword)
+ private String routing;
+
+ @JoinTypeRelations(
+ relations =
+ {
+ @JoinTypeRelation(parent = "question", children = {"answer", "comment"}), <.>
+ @JoinTypeRelation(parent = "answer", children = "vote") <.>
+ }
+ )
+ private JoinField relation; <.>
+
+ private Statement() {
+ }
+
+ public static StatementBuilder builder() {
+ return new StatementBuilder();
+ }
+
+ public String getId() {
+ return id;
+ }
+
+ public void setId(String id) {
+ this.id = id;
+ }
+
+ public String getRouting() {
+ return routing;
+ }
+
+ public void setRouting(String routing) {
+ this.routing = routing;
+ }
+
+ public String getText() {
+ return text;
+ }
+
+ public void setText(String text) {
+ this.text = text;
+ }
+
+ public JoinField getRelation() {
+ return relation;
+ }
+
+ public void setRelation(JoinField relation) {
+ this.relation = relation;
+ }
+
+ public static final class StatementBuilder {
+ private String id;
+ private String text;
+ private String routing;
+ private JoinField relation;
+
+ private StatementBuilder() {
+ }
+
+ public StatementBuilder withId(String id) {
+ this.id = id;
+ return this;
+ }
+
+ public StatementBuilder withRouting(String routing) {
+ this.routing = routing;
+ return this;
+ }
+
+ public StatementBuilder withText(String text) {
+ this.text = text;
+ return this;
+ }
+
+ public StatementBuilder withRelation(JoinField relation) {
+ this.relation = relation;
+ return this;
+ }
+
+ public Statement build() {
+ Statement statement = new Statement();
+ statement.setId(id);
+ statement.setRouting(routing);
+ statement.setText(text);
+ statement.setRelation(relation);
+ return statement;
+ }
+ }
+}
+----
+<.> for routing related info see xref:elasticsearch/routing.adoc[Routing values]
+<.> a question can have answers and comments
+<.> an answer can have votes
+<.> the `JoinField` property is used to combine the name (_question_, _answer_, _comment_ or _vote_) of the relation with the parent id.
+The generic type must be the same as the `@Id` annotated property.
+====
+
+Spring Data Elasticsearch will build the following mapping for this class:
+
+====
+[source,json]
+----
+{
+ "statements": {
+ "mappings": {
+ "properties": {
+ "_class": {
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "routing": {
+ "type": "keyword"
+ },
+ "relation": {
+ "type": "join",
+ "eager_global_ordinals": true,
+ "relations": {
+ "question": [
+ "answer",
+ "comment"
+ ],
+ "answer": "vote"
+ }
+ },
+ "text": {
+ "type": "text"
+ }
+ }
+ }
+ }
+}
+----
+====
+
+[[elasticsearch.jointype.storing]]
+== Storing data
+
+Given a repository for this class the following code inserts a question, two answers, a comment and a vote:
+
+====
+[source,java]
+----
+void init() {
+ repository.deleteAll();
+
+ Statement savedWeather = repository.save(
+ Statement.builder()
+ .withText("How is the weather?")
+ .withRelation(new JoinField<>("question")) <1>
+ .build());
+
+ Statement sunnyAnswer = repository.save(
+ Statement.builder()
+ .withText("sunny")
+ .withRelation(new JoinField<>("answer", savedWeather.getId())) <2>
+ .build());
+
+ repository.save(
+ Statement.builder()
+ .withText("rainy")
+ .withRelation(new JoinField<>("answer", savedWeather.getId())) <3>
+ .build());
+
+ repository.save(
+ Statement.builder()
+ .withText("I don't like the rain")
+ .withRelation(new JoinField<>("comment", savedWeather.getId())) <4>
+ .build());
+
+ repository.save(
+ Statement.builder()
+ .withText("+1 for the sun")
+ .withRouting(savedWeather.getId())
+ .withRelation(new JoinField<>("vote", sunnyAnswer.getId())) <5>
+ .build());
+}
+----
+<1> create a question statement
+<2> the first answer to the question
+<3> the second answer
+<4> a comment to the question
+<5> a vote for the first answer, this needs to have the routing set to the weather document, see xref:elasticsearch/routing.adoc[Routing values].
+====
+
+[[elasticsearch.jointype.retrieving]]
+== Retrieving data
+
+Currently native queries must be used to query the data, so there is no support from standard repository methods. xref:repositories/custom-implementations.adoc[] can be used instead.
+
+The following code shows as an example how to retrieve all entries that have a _vote_ (which must be _answers_, because only answers can have a vote) using an `ElasticsearchOperations` instance:
+
+====
+[source,java]
+----
+SearchHits hasVotes() {
+
+ Query query = NativeQuery.builder()
+ .withQuery(co.elastic.clients.elasticsearch._types.query_dsl.Query.of(qb -> qb
+ .hasChild(hc -> hc
+ .type("answer")
+ .queryName("vote")
+ .query(matchAllQueryAsQuery())
+ .scoreMode(ChildScoreMode.None)
+ )))
+ .build();
+
+ return operations.search(query, Statement.class);
+}
+----
+====
diff --git a/src/main/antora/modules/ROOT/pages/elasticsearch/misc.adoc b/src/main/antora/modules/ROOT/pages/elasticsearch/misc.adoc
new file mode 100644
index 0000000000..7f3ac8f0ff
--- /dev/null
+++ b/src/main/antora/modules/ROOT/pages/elasticsearch/misc.adoc
@@ -0,0 +1,453 @@
+[[elasticsearch.misc]]
+= Miscellaneous Elasticsearch Operation Support
+
+This chapter covers additional support for Elasticsearch operations that cannot be directly accessed via the repository interface.
+It is recommended to add those operations as custom implementation as described in xref:repositories/custom-implementations.adoc[] .
+
+[[elasticsearc.misc.index.settings]]
+== Index settings
+
+When creating Elasticsearch indices with Spring Data Elasticsearch different index settings can be defined by using the `@Setting` annotation.
+The following arguments are available:
+
+* `useServerConfiguration` does not send any settings parameters, so the Elasticsearch server configuration determines them.
+* `settingPath` refers to a JSON file defining the settings that must be resolvable in the classpath
+* `shards` the number of shards to use, defaults to _1_
+* `replicas` the number of replicas, defaults to _1_
+* `refreshIntervall`, defaults to _"1s"_
+* `indexStoreType`, defaults to _"fs"_
+
+It is as well possible to define https://www.elastic.co/guide/en/elasticsearch/reference/7.11/index-modules-index-sorting.html[index sorting] (check the linked Elasticsearch documentation for the possible field types and values):
+
+====
+[source,java]
+----
+@Document(indexName = "entities")
+@Setting(
+ sortFields = { "secondField", "firstField" }, <.>
+ sortModes = { Setting.SortMode.max, Setting.SortMode.min }, <.>
+ sortOrders = { Setting.SortOrder.desc, Setting.SortOrder.asc },
+ sortMissingValues = { Setting.SortMissing._last, Setting.SortMissing._first })
+class Entity {
+ @Nullable
+ @Id private String id;
+
+ @Nullable
+ @Field(name = "first_field", type = FieldType.Keyword)
+ private String firstField;
+
+ @Nullable @Field(name = "second_field", type = FieldType.Keyword)
+ private String secondField;
+
+ // getter and setter...
+}
+----
+
+<.> when defining sort fields, use the name of the Java property (_firstField_), not the name that might be defined for Elasticsearch (_first_field_)
+<.> `sortModes`, `sortOrders` and `sortMissingValues` are optional, but if they are set, the number of entries must match the number of `sortFields` elements
+====
+
+[[elasticsearch.misc.mappings]]
+== Index Mapping
+
+When Spring Data Elasticsearch creates the index mapping with the `IndexOperations.createMapping()` methods, it uses the annotations described in xref:elasticsearch/object-mapping.adoc#elasticsearch.mapping.meta-model.annotations[Mapping Annotation Overview], especially the `@Field` annotation.
+In addition to that it is possible to add the `@Mapping` annotation to a class.
+This annotation has the following properties:
+
+* `mappingPath` a classpath resource in JSON format; if this is not empty it is used as the mapping, no other mapping processing is done.
+* `enabled` when set to false, this flag is written to the mapping and no further processing is done.
+* `dateDetection` and `numericDetection` set the corresponding properties in the mapping when not set to `DEFAULT`.
+* `dynamicDateFormats` when this String array is not empty, it defines the date formats used for automatic date detection.
+* `runtimeFieldsPath` a classpath resource in JSON format containing the definition of runtime fields which is written to the index mappings, for example:
+
+====
+[source,json]
+----
+{
+ "day_of_week": {
+ "type": "keyword",
+ "script": {
+ "source": "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))"
+ }
+ }
+}
+----
+====
+
+[[elasticsearch.misc.filter]]
+== Filter Builder
+
+Filter Builder improves query speed.
+
+====
+[source,java]
+----
+private ElasticsearchOperations operations;
+
+IndexCoordinates index = IndexCoordinates.of("sample-index");
+
+Query query = NativeQuery.builder()
+ .withQuery(q -> q
+ .matchAll(ma -> ma))
+ .withFilter( q -> q
+ .bool(b -> b
+ .must(m -> m
+ .term(t -> t
+ .field("id")
+ .value(documentId))
+ )))
+ .build();
+
+SearchHits sampleEntities = operations.search(query, SampleEntity.class, index);
+----
+====
+
+[[elasticsearch.scroll]]
+== Using Scroll For Big Result Set
+
+Elasticsearch has a scroll API for getting big result set in chunks.
+This is internally used by Spring Data Elasticsearch to provide the implementations of the ` SearchHitsIterator SearchOperations.searchForStream(Query query, Class clazz, IndexCoordinates index)` method.
+
+====
+[source,java]
+----
+IndexCoordinates index = IndexCoordinates.of("sample-index");
+
+Query searchQuery = NativeQuery.builder()
+ .withQuery(q -> q
+ .matchAll(ma -> ma))
+ .withFields("message")
+ .withPageable(PageRequest.of(0, 10))
+ .build();
+
+SearchHitsIterator stream = elasticsearchOperations.searchForStream(searchQuery, SampleEntity.class,
+index);
+
+List sampleEntities = new ArrayList<>();
+while (stream.hasNext()) {
+ sampleEntities.add(stream.next());
+}
+
+stream.close();
+----
+====
+
+There are no methods in the `SearchOperations` API to access the scroll id, if it should be necessary to access this, the following methods of the `AbstractElasticsearchTemplate` can be used (this is the base implementation for the different `ElasticsearchOperations` implementations):
+
+====
+[source,java]
+----
+
+@Autowired ElasticsearchOperations operations;
+
+AbstractElasticsearchTemplate template = (AbstractElasticsearchTemplate)operations;
+
+IndexCoordinates index = IndexCoordinates.of("sample-index");
+
+Query query = NativeQuery.builder()
+ .withQuery(q -> q
+ .matchAll(ma -> ma))
+ .withFields("message")
+ .withPageable(PageRequest.of(0, 10))
+ .build();
+
+SearchScrollHits scroll = template.searchScrollStart(1000, query, SampleEntity.class, index);
+
+String scrollId = scroll.getScrollId();
+List sampleEntities = new ArrayList<>();
+while (scroll.hasSearchHits()) {
+ sampleEntities.addAll(scroll.getSearchHits());
+ scrollId = scroll.getScrollId();
+ scroll = template.searchScrollContinue(scrollId, 1000, SampleEntity.class);
+}
+template.searchScrollClear(scrollId);
+----
+====
+
+To use the Scroll API with repository methods, the return type must defined as `Stream` in the Elasticsearch Repository.
+The implementation of the method will then use the scroll methods from the ElasticsearchTemplate.
+
+====
+[source,java]
+----
+interface SampleEntityRepository extends Repository {
+
+ Stream findBy();
+
+}
+----
+====
+
+[[elasticsearch.misc.sorts]]
+== Sort options
+
+In addition to the default sort options described in xref:repositories/query-methods-details.adoc#repositories.paging-and-sorting[Paging and Sorting], Spring Data Elasticsearch provides the class `org.springframework.data.elasticsearch.core.query.Order` which derives from `org.springframework.data.domain.Sort.Order`.
+It offers additional parameters that can be sent to Elasticsearch when specifying the sorting of the result (see https://www.elastic.co/guide/en/elasticsearch/reference/7.15/sort-search-results.html).
+
+There also is the `org.springframework.data.elasticsearch.core.query.GeoDistanceOrder` class which can be used to have the result of a search operation ordered by geographical distance.
+
+If the class to be retrieved has a `GeoPoint` property named _location_, the following `Sort` would sort the results by distance to the given point:
+
+====
+[source,java]
+----
+Sort.by(new GeoDistanceOrder("location", new GeoPoint(48.137154, 11.5761247)))
+----
+====
+
+[[elasticsearch.misc.runtime-fields]]
+== Runtime Fields
+
+From version 7.12 on Elasticsearch has added the feature of runtime fields (https://www.elastic.co/guide/en/elasticsearch/reference/7.12/runtime.html).
+Spring Data Elasticsearch supports this in two ways:
+
+[[elasticsearch.misc.runtime-fields.index-mappings]]
+=== Runtime field definitions in the index mappings
+
+The first way to define runtime fields is by adding the definitions to the index mappings (see https://www.elastic.co/guide/en/elasticsearch/reference/7.12/runtime-mapping-fields.html).
+To use this approach in Spring Data Elasticsearch the user must provide a JSON file that contains the corresponding definition, for example:
+
+.runtime-fields.json
+====
+[source,json]
+----
+{
+ "day_of_week": {
+ "type": "keyword",
+ "script": {
+ "source": "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ROOT))"
+ }
+ }
+}
+----
+====
+
+The path to this JSON file, which must be present on the classpath, must then be set in the `@Mapping` annotation of the entity:
+
+====
+[source,java]
+----
+@Document(indexName = "runtime-fields")
+@Mapping(runtimeFieldsPath = "/runtime-fields.json")
+public class RuntimeFieldEntity {
+ // properties, getter, setter,...
+}
+
+----
+====
+
+[[elasticsearch.misc.runtime-fields.query]]
+=== Runtime fields definitions set on a Query
+
+The second way to define runtime fields is by adding the definitions to a search query (see https://www.elastic.co/guide/en/elasticsearch/reference/7.12/runtime-search-request.html).
+The following code example shows how to do this with Spring Data Elasticsearch :
+
+The entity used is a simple object that has a `price` property:
+
+====
+[source,java]
+----
+@Document(indexName = "some_index_name")
+public class SomethingToBuy {
+
+ private @Id @Nullable String id;
+ @Nullable @Field(type = FieldType.Text) private String description;
+ @Nullable @Field(type = FieldType.Double) private Double price;
+
+ // getter and setter
+}
+
+----
+====
+
+The following query uses a runtime field that calculates a `priceWithTax` value by adding 19% to the price and uses this value in the search query to find all entities where `priceWithTax` is higher or equal than a given value:
+
+====
+[source,java]
+----
+RuntimeField runtimeField = new RuntimeField("priceWithTax", "double", "emit(doc['price'].value * 1.19)");
+Query query = new CriteriaQuery(new Criteria("priceWithTax").greaterThanEqual(16.5));
+query.addRuntimeField(runtimeField);
+
+SearchHits searchHits = operations.search(query, SomethingToBuy.class);
+----
+====
+
+This works with every implementation of the `Query` interface.
+
+[[elasticsearch.misc.point-in-time]]
+== Point In Time (PIT) API
+
+`ElasticsearchOperations` supports the point in time API of Elasticsearch (see https://www.elastic.co/guide/en/elasticsearch/reference/8.3/point-in-time-api.html).
+The following code snippet shows how to use this feature with a fictional `Person` class:
+
+====
+[source,java]
+----
+ElasticsearchOperations operations; // autowired
+Duration tenSeconds = Duration.ofSeconds(10);
+
+String pit = operations.openPointInTime(IndexCoordinates.of("person"), tenSeconds); <.>
+
+// create query for the pit
+Query query1 = new CriteriaQueryBuilder(Criteria.where("lastName").is("Smith"))
+ .withPointInTime(new Query.PointInTime(pit, tenSeconds)) <.>
+ .build();
+SearchHits searchHits1 = operations.search(query1, Person.class);
+// do something with the data
+
+// create 2nd query for the pit, use the id returned in the previous result
+Query query2 = new CriteriaQueryBuilder(Criteria.where("lastName").is("Miller"))
+ .withPointInTime(
+ new Query.PointInTime(searchHits1.getPointInTimeId(), tenSeconds)) <.>
+ .build();
+SearchHits searchHits2 = operations.search(query2, Person.class);
+// do something with the data
+
+operations.closePointInTime(searchHits2.getPointInTimeId()); <.>
+
+----
+
+<.> create a point in time for an index (can be multiple names) and a keep-alive duration and retrieve its id
+<.> pass that id into the query to search together with the next keep-alive value
+<.> for the next query, use the id returned from the previous search
+<.> when done, close the point in time using the last returned id
+====
+
+[[elasticsearch.misc.searchtemplates]]
+== Search Template support
+
+Use of the search template API is supported.
+To use this, it first is necessary to create a stored script.
+The `ElasticsearchOperations` interface extends `ScriptOperations` which provides the necessary functions.
+The example used here assumes that we have `Person` entity with a property named `firstName`.
+A search template script can be saved like this:
+
+====
+[source,java]
+----
+import org.springframework.data.elasticsearch.core.ElasticsearchOperations;
+import org.springframework.data.elasticsearch.core.script.Script;
+
+operations.putScript( <.>
+ Script.builder()
+ .withId("person-firstname") <.>
+ .withLanguage("mustache") <.>
+ .withSource(""" <.>
+ {
+ "query": {
+ "bool": {
+ "must": [
+ {
+ "match": {
+ "firstName": "{{firstName}}" <.>
+ }
+ }
+ ]
+ }
+ },
+ "from": "{{from}}", <.>
+ "size": "{{size}}" <.>
+ }
+ """)
+ .build()
+);
+----
+
+<.> Use the `putScript()` method to store a search template script
+<.> The name / id of the script
+<.> Scripts that are used in search templates must be in the _mustache_ language.
+<.> The script source
+<.> The search parameter in the script
+<.> Paging request offset
+<.> Paging request size
+====
+
+To use a search template in a search query, Spring Data Elasticsearch provides the `SearchTemplateQuery`, an implementation of the `org.springframework.data.elasticsearch.core.query.Query` interface.
+
+NOTE: Although `SearchTemplateQuery` is an implementation of the `Query` interface, not all of the functionality provided by the base class is available for a `SearchTemplateQuery` like setting a `Pageable` or a `Sort`. Values for this functionality must be added to the stored script like shown in the following example for paging parameters. If these values are set on the `Query` object, they will be ignored.
+
+In the following code, we will add a call using a search template query to a custom repository implementation (see
+xref:repositories/custom-implementations.adoc[]) as an example how this can be integrated into a repository call.
+
+We first define the custom repository fragment interface:
+
+====
+[source,java]
+----
+interface PersonCustomRepository {
+ SearchPage findByFirstNameWithSearchTemplate(String firstName, Pageable pageable);
+}
+----
+====
+
+The implementation of this repository fragment looks like this:
+
+====
+[source,java]
+----
+public class PersonCustomRepositoryImpl implements PersonCustomRepository {
+
+ private final ElasticsearchOperations operations;
+
+ public PersonCustomRepositoryImpl(ElasticsearchOperations operations) {
+ this.operations = operations;
+ }
+
+ @Override
+ public SearchPage findByFirstNameWithSearchTemplate(String firstName, Pageable pageable) {
+
+ var query = SearchTemplateQuery.builder() <.>
+ .withId("person-firstname") <.>
+ .withParams(
+ Map.of( <.>
+ "firstName", firstName,
+ "from", pageable.getOffset(),
+ "size", pageable.getPageSize()
+ )
+ )
+ .build();
+
+ SearchHits searchHits = operations.search(query, Person.class); <.>
+
+ return SearchHitSupport.searchPageFor(searchHits, pageable);
+ }
+}
+----
+
+<.> Create a `SearchTemplateQuery`
+<.> Provide the id of the search template
+<.> The parameters are passed in a `Map`
+<.> Do the search in the same way as with the other query types.
+====
+
+[[elasticsearch.misc.nested-sort]]
+== Nested sort
+Spring Data Elasticsearch supports sorting within nested objects (https://www.elastic.co/guide/en/elasticsearch/reference/8.9/sort-search-results.html#nested-sorting)
+
+The following example, taken from the `org.springframework.data.elasticsearch.core.query.sort.NestedSortIntegrationTests` class, shows how to define the nested sort.
+
+====
+[source,java]
+----
+var filter = StringQuery.builder("""
+ { "term": {"movies.actors.sex": "m"} }
+ """).build();
+var order = new org.springframework.data.elasticsearch.core.query.Order(Sort.Direction.DESC,
+ "movies.actors.yearOfBirth")
+ .withNested(
+ Nested.builder("movies")
+ .withNested(
+ Nested.builder("movies.actors")
+ .withFilter(filter)
+ .build())
+ .build());
+
+var query = Query.findAll().addSort(Sort.by(order));
+
+----
+====
+
+About the filter query: It is not possible to use a `CriteriaQuery` here, as this query would be converted into a Elasticsearch nested query which does not work in the filter context. So only `StringQuery` or `NativeQuery` can be used here. When using one of these, like the term query above, the Elasticsearch field names must be used, so take care, when these are redefined with the `@Field(name="...")` definition.
+
+For the definition of the order path and the nested paths, the Java entity property names should be used.
diff --git a/src/main/antora/modules/ROOT/pages/elasticsearch/object-mapping.adoc b/src/main/antora/modules/ROOT/pages/elasticsearch/object-mapping.adoc
new file mode 100644
index 0000000000..6ca12728c0
--- /dev/null
+++ b/src/main/antora/modules/ROOT/pages/elasticsearch/object-mapping.adoc
@@ -0,0 +1,478 @@
+[[elasticsearch.mapping]]
+= Elasticsearch Object Mapping
+
+Spring Data Elasticsearch Object Mapping is the process that maps a Java object - the domain entity - into the JSON representation that is stored in Elasticsearch and back.
+The class that is internally used for this mapping is the
+`MappingElasticsearchConverter`.
+
+[[elasticsearch.mapping.meta-model]]
+== Meta Model Object Mapping
+
+The Metamodel based approach uses domain type information for reading/writing from/to Elasticsearch.
+This allows to register `Converter` instances for specific domain type mapping.
+
+[[elasticsearch.mapping.meta-model.annotations]]
+=== Mapping Annotation Overview
+
+The `MappingElasticsearchConverter` uses metadata to drive the mapping of objects to documents.
+The metadata is taken from the entity's properties which can be annotated.
+
+The following annotations are available:
+
+* `@Document`: Applied at the class level to indicate this class is a candidate for mapping to the database.
+The most important attributes are (check the API documentation for the complete list of attributes):
+** `indexName`: the name of the index to store this entity in.
+This can contain a SpEL template expression like `"log-#{T(java.time.LocalDate).now().toString()}"`
+** `createIndex`: flag whether to create an index on repository bootstrapping.
+Default value is _true_.
+See xref:elasticsearch/repositories/elasticsearch-repositories.adoc#elasticsearch.repositories.autocreation[Automatic creation of indices with the corresponding mapping]
+
+
+* `@Id`: Applied at the field level to mark the field used for identity purpose.
+* `@Transient`, `@ReadOnlyProperty`, `@WriteOnlyProperty`: see the following section xref:elasticsearch/object-mapping.adoc#elasticsearch.mapping.meta-model.annotations.read-write[Controlling which properties are written to and read from Elasticsearch] for detailed information.
+* `@PersistenceConstructor`: Marks a given constructor - even a package protected one - to use when instantiating the object from the database.
+Constructor arguments are mapped by name to the key values in the retrieved Document.
+* `@Field`: Applied at the field level and defines properties of the field, most of the attributes map to the respective https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html[Elasticsearch Mapping] definitions (the following list is not complete, check the annotation Javadoc for a complete reference):
+** `name`: The name of the field as it will be represented in the Elasticsearch document, if not set, the Java field name is used.
+** `type`: The field type, can be one of _Text, Keyword, Long, Integer, Short, Byte, Double, Float, Half_Float, Scaled_Float, Date, Date_Nanos, Boolean, Binary, Integer_Range, Float_Range, Long_Range, Double_Range, Date_Range, Ip_Range, Object, Nested, Ip, TokenCount, Percolator, Flattened, Search_As_You_Type_.
+See https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html[Elasticsearch Mapping Types].
+If the field type is not specified, it defaults to `FieldType.Auto`.
+This means, that no mapping entry is written for the property and that Elasticsearch will add a mapping entry dynamically when the first data for this property is stored (check the Elasticsearch documentation for dynamic mapping rules).
+** `format`: One or more built-in date formats, see the next section xref:elasticsearch/object-mapping.adoc#elasticsearch.mapping.meta-model.annotations.date-formats[Date format mapping].
+** `pattern`: One or more custom date formats, see the next section xref:elasticsearch/object-mapping.adoc#elasticsearch.mapping.meta-model.annotations.date-formats[Date format mapping].
+** `store`: Flag whether the original field value should be store in Elasticsearch, default value is _false_.
+** `analyzer`, `searchAnalyzer`, `normalizer` for specifying custom analyzers and normalizer.
+* `@GeoPoint`: Marks a field as _geo_point_ datatype.
+Can be omitted if the field is an instance of the `GeoPoint` class.
+* `@ValueConverter` defines a class to be used to convert the given property.
+In difference to a registered Spring `Converter` this only converts the annotated property and not every property of the given type.
+
+The mapping metadata infrastructure is defined in a separate spring-data-commons project that is technology agnostic.
+
+[[elasticsearch.mapping.meta-model.annotations.read-write]]
+==== Controlling which properties are written to and read from Elasticsearch
+
+This section details the annotations that define if the value of a property is written to or read from Elasticsearch.
+
+`@Transient`: A property annotated with this annotation will not be written to the mapping, it's value will not be sent to Elasticsearch and when documents are returned from Elasticsearch, this property will not be set in the resulting entity.
+
+`@ReadOnlyProperty`: A property with this annotation will not have its value written to Elasticsearch, but when returning data, the property will be filled with the value returned in the document from Elasticsearch.
+One use case for this are runtime fields defined in the index mapping.
+
+`@WriteOnlyProperty`: A property with this annotation will have its value stored in Elasticsearch but will not be set with any value when reading document.
+This can be used for example for synthesized fields which should go into the Elasticsearch index but are not used elsewhere.
+
+[[elasticsearch.mapping.meta-model.annotations.date-formats]]
+==== Date format mapping
+
+Properties that derive from `TemporalAccessor` or are of type `java.util.Date` must either have a `@Field` annotation of type `FieldType.Date` or a custom converter must be registered for this type.
+This paragraph describes the use of
+`FieldType.Date`.
+
+There are two attributes of the `@Field` annotation that define which date format information is written to the mapping (also see https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-date-format.html#built-in-date-formats[Elasticsearch Built In Formats] and https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-date-format.html#custom-date-formats[Elasticsearch Custom Date Formats])
+
+The `format` attribute is used to define at least one of the predefined formats.
+If it is not defined, then a default value of __date_optional_time_ and _epoch_millis_ is used.
+
+The `pattern` attribute can be used to add additional custom format strings.
+If you want to use only custom date formats, you must set the `format` property to empty `{}`.
+
+The following table shows the different attributes and the mapping created from their values:
+
+[cols=2*,options=header]
+|===
+| annotation
+| format string in Elasticsearch mapping
+
+| @Field(type=FieldType.Date)
+| "date_optional_time\|\|epoch_millis",
+
+| @Field(type=FieldType.Date, format=DateFormat.basic_date)
+| "basic_date"
+
+| @Field(type=FieldType.Date, format={DateFormat.basic_date, DateFormat.basic_time})
+| "basic_date\|\|basic_time"
+
+| @Field(type=FieldType.Date, pattern="dd.MM.uuuu")
+| "date_optional_time\|\|epoch_millis\|\|dd.MM.uuuu",
+
+| @Field(type=FieldType.Date, format={}, pattern="dd.MM.uuuu")
+| "dd.MM.uuuu"
+
+|===
+
+NOTE: If you are using a custom date format, you need to use _uuuu_ for the year instead of _yyyy_.
+This is due to a https://www.elastic.co/guide/en/elasticsearch/reference/current/migrate-to-java-time.html#java-time-migration-incompatible-date-formats[change in Elasticsearch 7].
+
+Check the code of the `org.springframework.data.elasticsearch.annotations.DateFormat` enum for a complete list of predefined values and their patterns.
+
+[[elasticsearch.mapping.meta-model.annotations.range]]
+==== Range types
+
+When a field is annotated with a type of one of _Integer_Range, Float_Range, Long_Range, Double_Range, Date_Range,_ or _Ip_Range_ the field must be an instance of a class that will be mapped to an Elasticsearch range, for example:
+
+====
+[source,java]
+----
+class SomePersonData {
+
+ @Field(type = FieldType.Integer_Range)
+ private ValidAge validAge;
+
+ // getter and setter
+}
+
+class ValidAge {
+ @Field(name="gte")
+ private Integer from;
+
+ @Field(name="lte")
+ private Integer to;
+
+ // getter and setter
+}
+----
+====
+
+As an alternative Spring Data Elasticsearch provides a `Range` class so that the previous example can be written as:
+
+====
+[source,java]
+----
+class SomePersonData {
+
+ @Field(type = FieldType.Integer_Range)
+ private Range validAge;
+
+ // getter and setter
+}
+----
+====
+
+Supported classes for the type `` are `Integer`, `Long`, `Float`, `Double`, `Date` and classes that implement the
+`TemporalAccessor` interface.
+
+[[elasticsearch.mapping.meta-model.annotations.mapped-names]]
+==== Mapped field names
+
+Without further configuration, Spring Data Elasticsearch will use the property name of an object as field name in Elasticsearch.
+This can be changed for individual field by using the `@Field` annotation on that property.
+
+It is also possible to define a `FieldNamingStrategy` in the configuration of the client (xref:elasticsearch/clients.adoc[Elasticsearch Clients]).
+If for example a `SnakeCaseFieldNamingStrategy` is configured, the property _sampleProperty_ of the object would be mapped to _sample_property_ in Elasticsearch.
+A `FieldNamingStrategy` applies to all entities; it can be overwritten by setting a specific name with `@Field` on a property.
+
+[[elasticsearch.mapping.meta-model.annotations.non-field-backed-properties]]
+==== Non-field-backed properties
+
+Normally the properties used in an entity are fields of the entity class.
+There might be cases, when a property value is calculated in the entity and should be stored in Elasticsearch.
+In this case, the getter method (`getProperty()`) can be annotated with the `@Field` annotation, in addition to that the method must be annotated with `@AccessType(AccessType.Type
+.PROPERTY)`.
+The third annotation that is needed in such a case is `@WriteOnlyProperty`, as such a value is only written to Elasticsearch.
+A full example:
+
+====
+[source,java]
+----
+@Field(type = Keyword)
+@WriteOnlyProperty
+@AccessType(AccessType.Type.PROPERTY)
+public String getProperty() {
+ return "some value that is calculated here";
+}
+----
+====
+
+[[elasticsearch.mapping.meta-model.annotations.misc]]
+==== Other property annotations
+
+[[indexedindexname]]
+===== @IndexedIndexName
+
+This annotation can be set on a String property of an entity.
+This property will not be written to the mapping, it will not be stored in Elasticsearch and its value will not be read from an Elasticsearch document.
+After an entity is persisted, for example with a call to `ElasticsearchOperations.save(T entity)`, the entity returned from that call will contain the name of the index that an entity was saved to in that property.
+This is useful when the index name is dynamically set by a bean, or when writing to a write alias.
+
+Putting some value into such a property does not set the index into which an entity is stored!
+
+[[elasticsearch.mapping.meta-model.rules]]
+=== Mapping Rules
+
+[[elasticsearch.mapping.meta-model.rules.typehints]]
+==== Type Hints
+
+Mapping uses _type hints_ embedded in the document sent to the server to allow generic type mapping.
+Those type hints are represented as `_class` attributes within the document and are written for each aggregate root.
+
+.Type Hints
+====
+[source,java]
+----
+public class Person { <1>
+ @Id String id;
+ String firstname;
+ String lastname;
+}
+----
+
+[source,json]
+----
+{
+ "_class" : "com.example.Person", <1>
+ "id" : "cb7bef",
+ "firstname" : "Sarah",
+ "lastname" : "Connor"
+}
+----
+
+<1> By default the domain types class name is used for the type hint.
+====
+
+Type hints can be configured to hold custom information.
+Use the `@TypeAlias` annotation to do so.
+
+NOTE: Make sure to add types with `@TypeAlias` to the initial entity set (`AbstractElasticsearchConfiguration#getInitialEntitySet`) to already have entity information available when first reading data from the store.
+
+.Type Hints with Alias
+====
+[source,java]
+----
+@TypeAlias("human") <1>
+public class Person {
+
+ @Id String id;
+ // ...
+}
+----
+
+[source,json]
+----
+{
+ "_class" : "human", <1>
+ "id" : ...
+}
+----
+
+<1> The configured alias is used when writing the entity.
+====
+
+NOTE: Type hints will not be written for nested Objects unless the properties type is `Object`, an interface or the actual value type does not match the properties declaration.
+
+[[disabling-type-hints]]
+===== Disabling Type Hints
+
+It may be necessary to disable writing of type hints when the index that should be used already exists without having the type hints defined in its mapping and with the mapping mode set to strict.
+In this case, writing the type hint will produce an error, as the field cannot be added automatically.
+
+Type hints can be disabled for the whole application by overriding the method `writeTypeHints()` in a configuration class derived from `AbstractElasticsearchConfiguration` (see xref:elasticsearch/clients.adoc[Elasticsearch Clients]).
+
+As an alternative they can be disabled for a single index with the `@Document` annotation:
+
+====
+[source,java]
+----
+@Document(indexName = "index", writeTypeHint = WriteTypeHint.FALSE)
+----
+====
+
+WARNING: We strongly advise against disabling Type Hints.
+Only do this if you are forced to.
+Disabling type hints can lead to documents not being retrieved correctly from Elasticsearch in case of polymorphic data or document retrieval may fail completely.
+
+[[elasticsearch.mapping.meta-model.rules.geospatial]]
+==== Geospatial Types
+
+Geospatial types like `Point` & `GeoPoint` are converted into _lat/lon_ pairs.
+
+.Geospatial types
+====
+[source,java]
+----
+public class Address {
+ String city, street;
+ Point location;
+}
+----
+
+[source,json]
+----
+{
+ "city" : "Los Angeles",
+ "street" : "2800 East Observatory Road",
+ "location" : { "lat" : 34.118347, "lon" : -118.3026284 }
+}
+----
+====
+
+[[elasticsearch.mapping.meta-model.rules.geojson]]
+==== GeoJson Types
+
+Spring Data Elasticsearch supports the GeoJson types by providing an interface `GeoJson` and implementations for the different geometries.
+They are mapped to Elasticsearch documents according to the GeoJson specification.
+The corresponding properties of the entity are specified in the index mappings as `geo_shape` when the index mappings is written. (check the https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html[Elasticsearch documentation] as well)
+
+.GeoJson types
+====
+[source,java]
+----
+public class Address {
+
+ String city, street;
+ GeoJsonPoint location;
+}
+----
+
+[source,json]
+----
+{
+ "city": "Los Angeles",
+ "street": "2800 East Observatory Road",
+ "location": {
+ "type": "Point",
+ "coordinates": [-118.3026284, 34.118347]
+ }
+}
+----
+====
+
+The following GeoJson types are implemented:
+
+* `GeoJsonPoint`
+* `GeoJsonMultiPoint`
+* `GeoJsonLineString`
+* `GeoJsonMultiLineString`
+* `GeoJsonPolygon`
+* `GeoJsonMultiPolygon`
+* `GeoJsonGeometryCollection`
+
+[[elasticsearch.mapping.meta-model.rules.collections]]
+==== Collections
+
+For values inside Collections apply the same mapping rules as for aggregate roots when it comes to _type hints_ and xref:elasticsearch/object-mapping.adoc#elasticsearch.mapping.meta-model.conversions[Custom Conversions].
+
+.Collections
+====
+[source,java]
+----
+public class Person {
+
+ // ...
+
+ List friends;
+
+}
+----
+
+[source,json]
+----
+{
+ // ...
+
+ "friends" : [ { "firstname" : "Kyle", "lastname" : "Reese" } ]
+}
+----
+====
+
+[[elasticsearch.mapping.meta-model.rules.maps]]
+==== Maps
+
+For values inside Maps apply the same mapping rules as for aggregate roots when it comes to _type hints_ and xref:elasticsearch/object-mapping.adoc#elasticsearch.mapping.meta-model.conversions[Custom Conversions].
+However the Map key needs to a String to be processed by Elasticsearch.
+
+.Collections
+====
+[source,java]
+----
+public class Person {
+
+ // ...
+
+ Map knownLocations;
+
+}
+----
+
+[source,json]
+----
+{
+ // ...
+
+ "knownLocations" : {
+ "arrivedAt" : {
+ "city" : "Los Angeles",
+ "street" : "2800 East Observatory Road",
+ "location" : { "lat" : 34.118347, "lon" : -118.3026284 }
+ }
+ }
+}
+----
+====
+
+[[elasticsearch.mapping.meta-model.conversions]]
+=== Custom Conversions
+
+Looking at the `Configuration` from the xref:elasticsearch/object-mapping.adoc#elasticsearch.mapping.meta-model[previous section] `ElasticsearchCustomConversions` allows registering specific rules for mapping domain and simple types.
+
+.Meta Model Object Mapping Configuration
+====
+[source,java]
+----
+@Configuration
+public class Config extends ElasticsearchConfiguration {
+
+ @Override
+ public ClientConfiguration clientConfiguration() {
+ return ClientConfiguration.builder() //
+ .connectedTo("localhost:9200") //
+ .build();
+ }
+
+ @Bean
+ @Override
+ public ElasticsearchCustomConversions elasticsearchCustomConversions() {
+ return new ElasticsearchCustomConversions(
+ Arrays.asList(new AddressToMap(), new MapToAddress())); <1>
+ }
+
+ @WritingConverter <2>
+ static class AddressToMap implements Converter> {
+
+ @Override
+ public Map convert(Address source) {
+
+ LinkedHashMap target = new LinkedHashMap<>();
+ target.put("ciudad", source.getCity());
+ // ...
+
+ return target;
+ }
+ }
+
+ @ReadingConverter <3>
+ static class MapToAddress implements Converter