diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md
new file mode 100644
index 0000000000..0425515608
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug-report.md
@@ -0,0 +1,55 @@
+---
+name: Bug Report
+about: If things aren't working as expected.
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+## Bug Report
+
+
+
+#### What did you do?
+
+
+
+#### What did you expect to see?
+
+
+
+#### What did you see instead? Under which circumstances?
+
+
+
+#### Environment
+
+**Kubernetes cluster type:**
+
+
+
+`$ Mention java-operator-sdk version from pom.xml file`
+
+
+
+`$ java -version`
+
+
+
+`$ kubectl version`
+
+
+
+#### Possible Solution
+
+
+
+#### Additional context
+
+
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/enhancement-request.md b/.github/ISSUE_TEMPLATE/enhancement-request.md
new file mode 100644
index 0000000000..11538f9d59
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/enhancement-request.md
@@ -0,0 +1,20 @@
+---
+name: Enhancement request
+about: Suggest an idea for this project
+title: ''
+labels: enhancement
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000000..60fc50a926
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,16 @@
+# To get started with Dependabot version updates, you'll need to specify which
+# package ecosystems to update and where the package manifests are located.
+# Please see the documentation for all configuration options:
+# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
+
+version: 2
+updates:
+ - package-ecosystem: "maven"
+ directory: "/"
+ schedule:
+ interval: "daily"
+
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "daily"
diff --git a/.github/main.workflow b/.github/main.workflow
deleted file mode 100644
index 2748505a6e..0000000000
--- a/.github/main.workflow
+++ /dev/null
@@ -1,10 +0,0 @@
-workflow "Maven build" {
- resolves = ["GitHub Action for Maven"]
- on = "push"
-}
-
-action "GitHub Action for Maven" {
- uses = "LucaFeger/action-maven-cli@765e218a50f02a12a7596dc9e7321fc385888a27"
- runs = "mvn"
- args = "package"
-}
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
new file mode 100644
index 0000000000..25b234846a
--- /dev/null
+++ b/.github/workflows/build.yml
@@ -0,0 +1,48 @@
+name: Build with Kubernetes
+
+env:
+ MAVEN_ARGS: -V -ntp -e
+
+on:
+ workflow_call:
+
+jobs:
+ integration_tests:
+ strategy:
+ matrix:
+ java: [ 17, 21, 25 ]
+ # Use the latest versions supported by minikube, otherwise GitHub it will
+ # end up in a throttling requests from minikube and workflow will fail.
+ # Minikube does such requests only if a version is not officially supported.
+ kubernetes: [ '1.30.12', '1.31.8', '1.32.4','1.33.1' ]
+ uses: ./.github/workflows/integration-tests.yml
+ with:
+ java-version: ${{ matrix.java }}
+ kube-version: ${{ matrix.kubernetes }}
+
+ httpclient-tests:
+ strategy:
+ matrix:
+ httpclient: [ 'vertx', 'jdk', 'jetty' ]
+ uses: ./.github/workflows/integration-tests.yml
+ with:
+ java-version: 25
+ kube-version: '1.32.0'
+ http-client: ${{ matrix.httpclient }}
+ experimental: true
+
+ special_integration_tests:
+ name: "Special integration tests (${{ matrix.java }})"
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ java: [ 17, 21, 25 ]
+ steps:
+ - uses: actions/checkout@v5
+ - name: Set up Java and Maven
+ uses: actions/setup-java@v5
+ with:
+ distribution: temurin
+ java-version: ${{ matrix.java }}
+ - name: Run Special Integration Tests
+ run: ./mvnw ${MAVEN_ARGS} -B package -P minimal-watch-timeout-dependent-it --file pom.xml
diff --git a/.github/workflows/e2e-test.yml b/.github/workflows/e2e-test.yml
new file mode 100644
index 0000000000..7aa92a409c
--- /dev/null
+++ b/.github/workflows/e2e-test.yml
@@ -0,0 +1,60 @@
+# Integration and end to end tests which runs locally and deploys the Operator to a Kubernetes
+# (Minikube) cluster and creates custom resources to verify the operator's functionality
+name: End to End tests
+on:
+ pull_request:
+ paths-ignore:
+ - 'docs/**'
+ - 'adr/**'
+ branches: [ main, next ]
+ push:
+ paths-ignore:
+ - 'docs/**'
+ - 'adr/**'
+ branches:
+ - main
+ - next
+
+jobs:
+ sample_operators_tests:
+ strategy:
+ matrix:
+ sample:
+ - "sample-operators/mysql-schema"
+ - "sample-operators/tomcat-operator"
+ - "sample-operators/webpage"
+ - "sample-operators/leader-election"
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v5
+
+ - name: Setup Minikube-Kubernetes
+ uses: manusa/actions-setup-minikube@v2.14.0
+ with:
+ minikube version: v1.36.0
+ # Use the latest versions supported by minikube, otherwise GitHub it will
+ # end up in a throttling requests from minikube and workflow will fail.
+ # Minikube does such requests only if a version is not officially supported.
+ kubernetes version: v1.33.1
+ github token: ${{ secrets.GITHUB_TOKEN }}
+ driver: docker
+
+ - name: Set up Java and Maven
+ uses: actions/setup-java@v5
+ with:
+ java-version: 25
+ distribution: temurin
+ cache: 'maven'
+
+ - name: Build SDK
+ run: mvn install -DskipTests
+
+ - name: Run integration tests in local mode
+ run: |
+ mvn test -P end-to-end-tests -pl ${{ matrix.sample }}
+
+ - name: Run E2E tests as a deployment
+ run: |
+ eval $(minikube -p minikube docker-env)
+ mvn jib:dockerBuild test -P end-to-end-tests -Dtest.deployment=remote -pl ${{ matrix.sample }}
diff --git a/.github/workflows/hugo.yaml b/.github/workflows/hugo.yaml
new file mode 100644
index 0000000000..2c0a63d50d
--- /dev/null
+++ b/.github/workflows/hugo.yaml
@@ -0,0 +1,85 @@
+# Sample workflow for building and deploying a Hugo site to GitHub Pages
+name: Deploy Hugo site to Pages
+
+on:
+ # Runs on pushes targeting the default branch
+ push:
+ branches:
+ - main
+
+ # Allows you to run this workflow manually from the Actions tab
+ workflow_dispatch:
+
+# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
+permissions:
+ contents: read
+ pages: write
+ id-token: write
+
+# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
+# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
+concurrency:
+ group: "pages"
+ cancel-in-progress: false
+
+# Default to bash
+defaults:
+ run:
+ shell: bash
+
+jobs:
+ # Build job
+ build:
+ runs-on: ubuntu-latest
+ env:
+ HUGO_VERSION: 0.145.0
+ steps:
+ - name: Install Hugo CLI
+ run: |
+ wget -O ${{ runner.temp }}/hugo.deb https://github.com/gohugoio/hugo/releases/download/v${HUGO_VERSION}/hugo_extended_${HUGO_VERSION}_linux-amd64.deb \
+ && sudo dpkg -i ${{ runner.temp }}/hugo.deb
+ - name: Install Dart Sass
+ run: sudo snap install dart-sass
+ - name: Checkout
+ uses: actions/checkout@v5
+ with:
+ submodules: recursive
+ fetch-depth: 0
+ - name: Setup Pages
+ id: pages
+ uses: actions/configure-pages@v5
+ - name: Install Node.js dependencies
+ working-directory: ./docs
+ run: |
+ [[ -f package-lock.json || -f npm-shrinkwrap.json ]] && npm ci || true
+ npm install -D autoprefixer
+ npm install -D postcss-cli
+ npm install -D postcss
+ - name: Build with Hugo
+ env:
+ # For maximum backward compatibility with Hugo modules
+ HUGO_ENVIRONMENT: production
+ HUGO_ENV: production
+ TZ: America/Los_Angeles
+ working-directory: ./docs
+ run: |
+ hugo \
+ --gc \
+ --minify \
+ --baseURL "${{ steps.pages.outputs.base_url }}/"
+ - name: Upload artifact
+ uses: actions/upload-pages-artifact@v4
+ with:
+ path: ./docs/public
+
+ # Deployment job
+ deploy:
+ environment:
+ name: github-pages
+ url: ${{ steps.deployment.outputs.page_url }}
+ runs-on: ubuntu-latest
+ needs: build
+ steps:
+ - name: Deploy to GitHub Pages
+ id: deployment
+ uses: actions/deploy-pages@v4
diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml
new file mode 100644
index 0000000000..fdb8897c07
--- /dev/null
+++ b/.github/workflows/integration-tests.yml
@@ -0,0 +1,57 @@
+name: Parameterized Integration Tests
+
+on:
+ workflow_call:
+ inputs:
+ java-version:
+ type: string
+ required: true
+ kube-version:
+ type: string
+ required: true
+ http-client:
+ type: string
+ required: false
+ default: 'vertx'
+ experimental:
+ type: boolean
+ required: false
+ default: false
+ checkout-ref:
+ type: string
+ required: false
+ default: ''
+
+jobs:
+ integration_tests:
+ name: Integration tests (${{ inputs.java-version }}, ${{ inputs.kube-version }}, ${{ inputs.http-client }})
+ runs-on: ubuntu-latest
+ continue-on-error: ${{ inputs.experimental }}
+ timeout-minutes: 40
+ steps:
+ - uses: actions/checkout@v5
+ with:
+ ref: ${{ inputs.checkout-ref }}
+ - name: Set up Java and Maven
+ uses: actions/setup-java@v5
+ with:
+ distribution: temurin
+ java-version: ${{ inputs.java-version }}
+ cache: 'maven'
+ - name: Set up Minikube
+ uses: manusa/actions-setup-minikube@v2.14.0
+ with:
+ minikube version: 'v1.36.0'
+ kubernetes version: '${{ inputs.kube-version }}'
+ github token: ${{ github.token }}
+
+ - name: "${{inputs.it-category}} integration tests (kube: ${{ inputs.kube-version }} / java: ${{ inputs.java-version }} / client: ${{ inputs.http-client }})"
+ run: |
+ if [ -z "${{inputs.it-category}}" ]; then
+ it_profile="integration-tests"
+ else
+ it_profile="integration-tests-${{inputs.it-category}}"
+ fi
+ echo "Using profile: ${it_profile}"
+ ./mvnw ${MAVEN_ARGS} -T1C -B install -DskipTests -Pno-apt --file pom.xml
+ ./mvnw ${MAVEN_ARGS} -T1C -B package -P${it_profile} -Dfabric8-httpclient-impl.name=${{inputs.http-client}} --file pom.xml
diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml
new file mode 100644
index 0000000000..79660cfb1b
--- /dev/null
+++ b/.github/workflows/pr.yml
@@ -0,0 +1,34 @@
+name: Verify Pull Request
+
+env:
+ MAVEN_ARGS: -V -ntp -e
+
+concurrency:
+ group: ${{ github.ref }}-${{ github.workflow }}
+ cancel-in-progress: true
+on:
+ pull_request:
+ paths-ignore:
+ - 'docs/**'
+ - 'adr/**'
+ branches: [ main, v1, v2, v3, next ]
+ workflow_dispatch:
+jobs:
+ check_format_and_unit_tests:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v5
+ - name: Set up Java and Maven
+ uses: actions/setup-java@v5
+ with:
+ distribution: temurin
+ java-version: 25
+ cache: 'maven'
+ - name: Check code format
+ run: |
+ ./mvnw ${MAVEN_ARGS} spotless:check --file pom.xml
+ - name: Run unit tests
+ run: ./mvnw ${MAVEN_ARGS} clean install -Pno-apt --file pom.xml
+
+ build:
+ uses: ./.github/workflows/build.yml
diff --git a/.github/workflows/release-project-in-dir.yml b/.github/workflows/release-project-in-dir.yml
new file mode 100644
index 0000000000..0313aebe4d
--- /dev/null
+++ b/.github/workflows/release-project-in-dir.yml
@@ -0,0 +1,83 @@
+name: Release project in specified directory
+
+on:
+ workflow_call:
+ inputs:
+ project_dir:
+ type: string
+ required: true
+ version_branch:
+ type: string
+ required: true
+
+env:
+# set the target pom to use the input directory as root
+ MAVEN_ARGS: -V -ntp -e -f ${{ inputs.project_dir }}/pom.xml
+
+jobs:
+ publish:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout "${{inputs.version_branch}}" branch
+ uses: actions/checkout@v5
+ with:
+ ref: "${{inputs.version_branch}}"
+
+ - name: Set up Java and Maven
+ uses: actions/setup-java@v5
+ with:
+ java-version: 17
+ distribution: temurin
+ cache: 'maven'
+ server-id: central
+ server-username: MAVEN_USERNAME
+ server-password: MAVEN_CENTRAL_TOKEN
+ gpg-private-key: ${{ secrets.GPG_PRIVATE_KEY }}
+ gpg-passphrase: MAVEN_GPG_PASSPHRASE
+
+ - name: Change version to release version
+ # Assume that RELEASE_VERSION will have form like: "v1.0.1". So we cut the "v"
+ run: |
+ mvn ${MAVEN_ARGS} versions:set -DnewVersion="${RELEASE_VERSION:1}" versions:commit -DprocessAllModules
+ env:
+ RELEASE_VERSION: ${{ github.event.release.tag_name }}
+
+ - name: Publish to Apache Maven Central
+ run: mvn package deploy -Prelease
+ env:
+ MAVEN_USERNAME: ${{ secrets.NEXUS_USERNAME }}
+ MAVEN_CENTRAL_TOKEN: ${{ secrets.NEXUS_PASSWORD }}
+ MAVEN_GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
+
+ # This is separate job because there were issues with git after release step, was not able to commit changes.
+ update-working-version:
+ runs-on: ubuntu-latest
+ needs: publish
+ if: "!contains(github.event.release.tag_name, 'RC')" # not sure we should keep this the RC part
+ steps:
+ - name: Checkout "${{inputs.version_branch}}" branch
+ uses: actions/checkout@v5
+ with:
+ ref: "${{inputs.version_branch}}"
+
+ - name: Set up Java and Maven
+ uses: actions/setup-java@v5
+ with:
+ java-version: 17
+ distribution: temurin
+ cache: 'maven'
+
+ - name: Update version to new SNAPSHOT version
+ run: |
+ mvn ${MAVEN_ARGS} build-helper:parse-version versions:set -DnewVersion=\${parsedVersion.majorVersion}.\${parsedVersion.minorVersion}.\${parsedVersion.nextIncrementalVersion}-SNAPSHOT versions:commit -DprocessAllModules
+ git config --local user.email "action@github.com"
+ git config --local user.name "GitHub Action"
+ git commit -m "Set new SNAPSHOT version into pom files." -a
+ env:
+ RELEASE_VERSION: ${{ github.event.release.tag_name }}
+
+ - name: Push changes to branch
+ uses: ad-m/github-push-action@master
+ with:
+ branch: "${{inputs.version_branch}}"
+ github_token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 0000000000..e7826ce613
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,51 @@
+name: Release to Maven Central
+env:
+ MAVEN_ARGS: -V -ntp -e
+on:
+ release:
+ types: [ released ]
+jobs:
+
+ prepare-release:
+ runs-on: ubuntu-latest
+ env:
+ tmp_version_branch: ''
+ outputs:
+ version_branch: ${{ steps.set-version-branch.outputs.version_branch }}
+ steps:
+ - if: ${{ startsWith(github.event.release.tag_name, 'v1.' ) }}
+ run: |
+ echo "Setting version_branch to v1"
+ echo "tmp_version_branch=v1" >> "$GITHUB_ENV"
+ - if: ${{ startsWith(github.event.release.tag_name, 'v2.' ) }}
+ run: |
+ echo "Setting version_branch to v2"
+ echo "tmp_version_branch=v2" >> "$GITHUB_ENV"
+ - if: ${{ startsWith(github.event.release.tag_name, 'v3.' ) }}
+ run: |
+ echo "Setting version_branch to v3"
+ echo "tmp_version_branch=v3" >> "$GITHUB_ENV"
+ - if: ${{ startsWith(github.event.release.tag_name, 'v4.' ) }}
+ run: |
+ echo "Setting version_branch to v4"
+ echo "tmp_version_branch=v4" >> "$GITHUB_ENV"
+ - if: ${{ startsWith(github.event.release.tag_name, 'v5.' ) }}
+ run: |
+ echo "Setting version_branch to main"
+ echo "tmp_version_branch=main" >> "$GITHUB_ENV"
+ - if: ${{ env.tmp_version_branch == '' }}
+ name: Fail if version_branch is not set
+ run: |
+ echo "Failed to find appropriate branch to release ${{github.event.release.tag_name}} from"
+ exit 1
+ - id: set-version-branch
+ name: Set version_branch if matched
+ run: echo "version_branch=${{env.tmp_version_branch}}" >> $GITHUB_OUTPUT
+
+ release-sdk:
+ needs: prepare-release
+ uses: ./.github/workflows/release-project-in-dir.yml
+ secrets: inherit
+ with:
+ version_branch: ${{needs.prepare-release.outputs.version_branch}}
+ project_dir: '.'
diff --git a/.github/workflows/snapshot-releases.yml b/.github/workflows/snapshot-releases.yml
new file mode 100644
index 0000000000..0f560dd2cb
--- /dev/null
+++ b/.github/workflows/snapshot-releases.yml
@@ -0,0 +1,50 @@
+name: Test & Release Snapshot to Maven Central
+
+env:
+ MAVEN_ARGS: -V -ntp -e
+
+concurrency:
+ group: ${{ github.ref }}-${{ github.workflow }}
+ cancel-in-progress: true
+on:
+ push:
+ paths-ignore:
+ - 'docs/**'
+ branches: [ main, v1, v2, v3, next ]
+ workflow_dispatch:
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v5
+ - name: Set up Java and Maven
+ uses: actions/setup-java@v5
+ with:
+ distribution: temurin
+ java-version: 21
+ cache: 'maven'
+ - name: Build and test project
+ run: ./mvnw ${MAVEN_ARGS} clean install --file pom.xml
+ release-snapshot:
+ runs-on: ubuntu-latest
+ needs: test
+ steps:
+ - uses: actions/checkout@v5
+ - name: Set up Java and Maven
+ uses: actions/setup-java@v5
+ with:
+ java-version: 21
+ distribution: temurin
+ cache: 'maven'
+ server-id: central
+ server-username: MAVEN_USERNAME
+ server-password: MAVEN_CENTRAL_TOKEN
+ gpg-private-key: ${{ secrets.GPG_PRIVATE_KEY }}
+ gpg-passphrase: MAVEN_GPG_PASSPHRASE
+
+ - name: Publish to Apache Maven Central
+ run: mvn package deploy -Prelease
+ env:
+ MAVEN_USERNAME: ${{ secrets.NEXUS_USERNAME }}
+ MAVEN_CENTRAL_TOKEN: ${{ secrets.NEXUS_PASSWORD }}
+ MAVEN_GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
diff --git a/.github/workflows/sonar.yml b/.github/workflows/sonar.yml
new file mode 100644
index 0000000000..132575edaa
--- /dev/null
+++ b/.github/workflows/sonar.yml
@@ -0,0 +1,44 @@
+name: Sonar
+
+env:
+ MAVEN_ARGS: -V -ntp -e
+
+concurrency:
+ group: ${{ github.ref }}-${{ github.workflow }}
+ cancel-in-progress: true
+on:
+ push:
+ paths-ignore:
+ - 'docs/**'
+ - 'adr/**'
+ branches: [ main ]
+ pull_request:
+ paths-ignore:
+ - 'docs/**'
+ - 'adr/**'
+ types: [ opened, synchronize, reopened ]
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ if: ${{ ( github.event_name == 'push' ) || ( github.event_name == 'pull_request' && github.event.pull_request.head.repo.owner.login == 'java-operator-sdk' ) }}
+ steps:
+ - uses: actions/checkout@v5
+ - name: Set up Java and Maven
+ uses: actions/setup-java@v5
+ with:
+ distribution: temurin
+ java-version: 25
+ cache: 'maven'
+ - name: Cache SonarCloud packages
+ uses: actions/cache@v4
+ with:
+ path: ~/.sonar/cache
+ key: ${{ runner.os }}-sonar
+ restore-keys: ${{ runner.os }}-sonar
+ - name: Build and analyze
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Needed to get PR information, if any
+ SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
+ run: mvn -B org.jacoco:jacoco-maven-plugin:prepare-agent clean install verify org.jacoco:jacoco-maven-plugin:report org.sonarsource.scanner.maven:sonar-maven-plugin:sonar -Dsonar.projectKey=java-operator-sdk_java-operator-sdk
+
diff --git a/.gitignore b/.gitignore
index db846856f4..638e4a93f2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,17 @@
target/
*.iml
-.idea/
\ No newline at end of file
+.idea/
+
+# Eclipse
+.settings/
+.classpath
+.project
+.cache/
+
+# VSCode
+.factorypath
+
+.mvn/wrapper/maven-wrapper.jar
+
+.java-version
+.aider*
diff --git a/.mvn/wrapper/MavenWrapperDownloader.java b/.mvn/wrapper/MavenWrapperDownloader.java
new file mode 100644
index 0000000000..b901097f2d
--- /dev/null
+++ b/.mvn/wrapper/MavenWrapperDownloader.java
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2007-present the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.net.*;
+import java.io.*;
+import java.nio.channels.*;
+import java.util.Properties;
+
+public class MavenWrapperDownloader {
+
+ private static final String WRAPPER_VERSION = "0.5.6";
+ /**
+ * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
+ */
+ private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
+ + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
+
+ /**
+ * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
+ * use instead of the default one.
+ */
+ private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
+ ".mvn/wrapper/maven-wrapper.properties";
+
+ /**
+ * Path where the maven-wrapper.jar will be saved to.
+ */
+ private static final String MAVEN_WRAPPER_JAR_PATH =
+ ".mvn/wrapper/maven-wrapper.jar";
+
+ /**
+ * Name of the property which should be used to override the default download url for the wrapper.
+ */
+ private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
+
+ public static void main(String args[]) {
+ System.out.println("- Downloader started");
+ File baseDirectory = new File(args[0]);
+ System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
+
+ // If the maven-wrapper.properties exists, read it and check if it contains a custom
+ // wrapperUrl parameter.
+ File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
+ String url = DEFAULT_DOWNLOAD_URL;
+ if(mavenWrapperPropertyFile.exists()) {
+ FileInputStream mavenWrapperPropertyFileInputStream = null;
+ try {
+ mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
+ Properties mavenWrapperProperties = new Properties();
+ mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
+ url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
+ } catch (IOException e) {
+ System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
+ } finally {
+ try {
+ if(mavenWrapperPropertyFileInputStream != null) {
+ mavenWrapperPropertyFileInputStream.close();
+ }
+ } catch (IOException e) {
+ // Ignore ...
+ }
+ }
+ }
+ System.out.println("- Downloading from: " + url);
+
+ File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
+ if(!outputFile.getParentFile().exists()) {
+ if(!outputFile.getParentFile().mkdirs()) {
+ System.out.println(
+ "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
+ }
+ }
+ System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
+ try {
+ downloadFileFromURL(url, outputFile);
+ System.out.println("Done");
+ System.exit(0);
+ } catch (Throwable e) {
+ System.out.println("- Error downloading");
+ e.printStackTrace();
+ System.exit(1);
+ }
+ }
+
+ private static void downloadFileFromURL(String urlString, File destination) throws Exception {
+ if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
+ String username = System.getenv("MVNW_USERNAME");
+ char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
+ Authenticator.setDefault(new Authenticator() {
+ @Override
+ protected PasswordAuthentication getPasswordAuthentication() {
+ return new PasswordAuthentication(username, password);
+ }
+ });
+ }
+ URL website = new URL(urlString);
+ ReadableByteChannel rbc;
+ rbc = Channels.newChannel(website.openStream());
+ FileOutputStream fos = new FileOutputStream(destination);
+ fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
+ fos.close();
+ rbc.close();
+ }
+
+}
diff --git a/.mvn/wrapper/maven-wrapper.properties b/.mvn/wrapper/maven-wrapper.properties
new file mode 100644
index 0000000000..8c79a83ae4
--- /dev/null
+++ b/.mvn/wrapper/maven-wrapper.properties
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.4/apache-maven-3.8.4-bin.zip
+wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.1.0/maven-wrapper-3.1.0.jar
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index c500b6ec91..0000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: java
-dist: bionic
-cache:
- directories:
- - $HOME/.m2
-before_install:
-- echo $GPGKEY | base64 --decode | gpg --import
-script:
-- mvn deploy --settings=maven-settings.xml -Prelease
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 0000000000..797938cf3c
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,12 @@
+{
+ "java.format.settings.url": "https://raw.githubusercontent.com/google/styleguide/gh-pages/eclipse-java-google-style.xml",
+ "java.completion.importOrder": [
+ "java",
+ "javax",
+ "org",
+ "io",
+ "com",
+ "",
+ "#",
+ ]
+}
\ No newline at end of file
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..f3cd19aa9d
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,134 @@
+# Code of Conduct
+
+All participants to the Java Operator SDK project are required to comply with
+the following code of conduct, which is based on v2.0 of the [Contributor
+Covenant](https://www.contributor-covenant.org/).
+
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, religion, or sexual identity
+and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the
+ overall community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or
+ advances of any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email
+ address, without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to adam.sandor@container-solutions.com or any of the project admins.
+
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series
+of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or
+permanent ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within
+the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.0, available at
+https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
+
+Community Impact Guidelines were inspired by [Mozilla's code of conduct
+enforcement ladder](https://github.com/mozilla/diversity).
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see the FAQ at
+https://www.contributor-covenant.org/faq. Translations are available at
+https://www.contributor-covenant.org/translations.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000000..c3a9e63545
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,75 @@
+# Contributing To Java Operator SDK
+
+Firstly, big thanks for considering contributing to the project. We really hope to make this into a
+community project and to do that we need your help!
+
+## Code of Conduct
+
+We are serious about making this a welcoming, happy project. We will not tolerate discrimination,
+aggressive or insulting behaviour.
+
+To this end, the project and everyone participating in it is bound by the [Code of
+Conduct](CODE_OF_CONDUCT.md). By participating, you are expected to uphold this code. Please report
+unacceptable behaviour to any of the project admins or adam.sandor@container-solutions.com.
+
+## Bugs
+
+If you find a bug, please [open an issue](https://github.com/operator-framework/java-operator-sdk/issues)! Do try
+to include all the details needed to recreate your problem. This is likely to include:
+
+ - The version of the Operator SDK being used
+ - The exact platform and version of the platform that you're running on
+ - The steps taken to cause the bug
+
+## Building Features and Documentation
+
+If you're looking for something to work on, take look at the issue tracker, in particular any items
+labelled [good first issue](https://github.com/operator-framework/java-operator-sdk/labels/good%20first%20issue).
+Please leave a comment on the issue to mention that you have started work, in order to avoid
+multiple people working on the same issue.
+
+If you have an idea for a feature - whether or not you have time to work on it - please also open an
+issue describing your feature and label it "enhancement". We can then discuss it as a community and
+see what can be done. Please be aware that some features may not align with the project goals and
+might therefore be closed. In particular, please don't start work on a new feature without
+discussing it first to avoid wasting effort. We do commit to listening to all proposals and will do
+our best to work something out!
+
+Once you've got the go ahead to work on a feature, you can start work. Feel free to communicate with
+team via updates on the issue tracker or the [Discord channel](https://discord.gg/DacEhAy) and ask for feedback, pointers etc.
+Once you're happy with your code, go ahead and open a Pull Request.
+
+## Pull Request Process
+
+First, please format your commit messages so that they follow the [conventional commit](https://www.conventionalcommits.org/en/v1.0.0/) format.
+
+On opening a PR, a GitHub action will execute the test suite against the new code. All code is
+required to pass the tests, and new code must be accompanied by new tests.
+
+All PRs have to be reviewed and signed off by another developer before being merged to the master
+branch. This review will likely ask for some changes to the code - please don't be alarmed or upset
+at this; it is expected that all PRs will need tweaks and a normal part of the process.
+
+The PRs are checked to be compliant with the Java Google code style.
+
+Be aware that all Operator SDK code is released under the [Apache 2.0 licence](LICENSE).
+
+## Development environment setup
+
+### Code style
+
+The SDK modules and samples are formatted to follow the Java Google code style.
+On every `compile` the code gets formatted automatically,
+however, to make things simpler (i.e. avoid getting a PR rejected simply because of code style issues), you can import one of the following code style schemes based on the IDE you use:
+
+- for *Intellij IDEA*
+ install [google-java-format](https://plugins.jetbrains.com/plugin/8527-google-java-format) plugin
+- for *Eclipse*
+ follow [these intructions](https://github.com/google/google-java-format?tab=readme-ov-file#eclipse)
+
+## Thanks
+
+These guidelines were best on several sources, including
+[Atom](https://github.com/atom/atom/blob/master/CONTRIBUTING.md), [PurpleBooth's
+advice](https://gist.github.com/PurpleBooth/b24679402957c63ec426) and the [Contributor
+Covenant](https://www.contributor-covenant.org/).
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000000..3a63d7d7d0
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1,13 @@
+approvers:
+- csviri
+- metacosm
+- andreaTP
+- xstefank
+reviewers:
+- gyfora
+- mbalassi
+- scrocquesel
+- csviri
+- metacosm
+- xstefank
+
diff --git a/README.md b/README.md
index 0442d211fb..5bb2758ae5 100644
--- a/README.md
+++ b/README.md
@@ -1,171 +1,85 @@
-# java-operator-sdk
-[](https://travis-ci.org/ContainerSolutions/java-operator-sdk)
+# 
-SDK for building Kubernetes Operators in Java. Inspired by [operator-sdk](https://github.com/operator-framework/operator-sdk).
-In this first iteration we aim to provide a framework which handles the reconciliation loop by dispatching events to
-a Controller written by the user of the framework.
+
+[](https://kubernetes.slack.com/archives/CAW0GV7A5 "get invite here: https://communityinviter.com/apps/kubernetes/community" )
+[](https://discord.com/channels/723455000604573736)
-The Controller only contains the logic to create, update and delete the actual resources related to the CRD.
+# Build Kubernetes Operators in Java Without Hassle
-## Roadmap
+Java Operator SDK is a production-ready framework that makes implementing Kubernetes Operators in Java easy.
-Feature we would like to implement and invite the community to help us implement in the future:
+It provides a controller runtime, support for testing operators, and related tooling. In addition to that implementing
+conversion hooks and dynamic admission controllers are supported as a separate project
+(and much more, see related projects section).
-* ~~Spring Boot support~~
-* Testing support
-* Class generation from CRD to POJO
+Under the hood it uses the excellent [Fabric8 Kubernetes Client](https://github.com/fabric8io/kubernetes-client),
+which provides additional facilities, like generating CRD from source code (and vice versa).
-## Usage
+
-We have several sample Operators under the samples directory:
-* *basic*: Minimal Operator implementation which only parses the Custom Resource and prints to stdout.
-Implemented with and without Spring Boot support. The two samples share the common module.
-* *webserver*: More realistic example creating an nginx webserver from a Custom Resource containing html code.
+Java Operator SDK is a CNCF project as part of [Operator Framework](https://github.com/operator-framework).
-Add dependency to your project:
+## Documentation
-```xml
-
- com.github.containersolutions
- operator-framework
- {see https://search.maven.org/search?q=a:operator-framework for latest version}
-
-```
+Documentation can be found on the **[JOSDK WebSite](https://javaoperatorsdk.io/)**.
-Main method initializing the Operator and registering a controller..
+## Contact us
-```java
-public class Runner {
+Join us on [Discord](https://discord.gg/DacEhAy) or feel free to ask any question on
+[Kubernetes Slack Operator Channel](https://kubernetes.slack.com/archives/CAW0GV7A5)
- public static void main(String[] args) {
- Operator operator = new Operator(new DefaultKubernetesClient());
- operator.registerController(new CustomServiceController());
- }
-}
-```
+**Meet us** every other Tuesday 15:00 CEST (from 29.10.2024) at our **community meeting** on [Zoom](https://zoom.us/j/8415370125)
+(Password in the Discord channel, or just ask for it there!)
-The Controller implements the business logic and describes all the classes needed to handle the CRD.
+## How to Contribute
-```java
-@Controller(customResourceClass = WebServer.class,
- crdName = "webservers.sample.javaoperatorsdk",
- customResourceListClass = WebServerList.class,
- customResourceDonebaleClass = WebServerDoneable.class)
-public class WebServerController implements ResourceController {
+See the [contribution](https://javaoperatorsdk.io/docs/contributing) guide on the website.
- @Override
- public boolean deleteResource(CustomService resource) {
- // ... your logic ...
- return true;
- }
-
- // Return the changed resource, so it gets updated. See javadoc for details.
- @Override
- public Optional createOrUpdateResource(CustomService resource) {
- // ... your logic ...
- return resource;
- }
-}
-```
+## What is Java Operator SDK
-Our custom resource java representation
+Java Operator SDK is a higher level framework and related tooling to support writing Kubernetes Operators in Java.
+It makes it easy to implement best practices and patterns for an Operator. Features include:
-```java
-public class WebServer extends CustomResource {
+* Optimal handling Kubernetes API events
+* Handling dependent resources, related events, and caching.
+* Automatic Retries
+* Smart event scheduling
+* Easy to use Error Handling
+* ... and everything that a batteries included framework needs
- private WebServerSpec spec;
+For all features and their usage see the [related sections on the website](https://javaoperatorsdk.io/docs/documentation/).
- private WebServerStatus status;
+## Related Projects
- public WebServerSpec getSpec() {
- return spec;
- }
+* Quarkus Extension: https://github.com/quarkiverse/quarkus-operator-sdk
+* Spring Boot Starter: https://github.com/java-operator-sdk/operator-framework-spring-boot-starter
+* Kubernetes Glue Operator: https://github.com/java-operator-sdk/kubernetes-glue-operator
+ Meta-operator that builds upon to use JOSDK Workflows and Dependent Resources features and
+ allows to create operators by simply applying a custom resource, thus, is a language independent way.
+* Kubernetes Webhooks Framework: https://github.com/java-operator-sdk/kubernetes-webooks-framework
+ Framework to implement Admission Controllers and Conversion Hooks.
+* Operator SDK plugin: https://github.com/operator-framework/java-operator-plugins
- public void setSpec(WebServerSpec spec) {
- this.spec = spec;
- }
+## Projects using JOSDK
- public WebServerStatus getStatus() {
- return status;
- }
+While we know of multiple projects using JOSDK in production, we don't want to presume these
+projects want to advertise that fact here. For this reason, we ask that if you'd like your project
+to be featured in this section, please open a PR, adding a link to and short description of your
+project, as shown below:
- public void setStatus(WebServerStatus status) {
- this.status = status;
- }
-}
-
-public class WebServerSpec {
-
- private String html;
-
- public String getHtml() {
- return html;
- }
-
- public void setHtml(String html) {
- this.html = html;
- }
-}
-```
-
-## Spring Boot Support
-
-We provide a spring boot starter to automatically handle bean registration, and registering various components as beans.
-To use it just include the following dependency to you project:
-
-```
-
- com.github.containersolutions
- spring-boot-operator-framework-starter
- [version]
-
-```
-
-Note that controllers needs to be registered as beans in the Spring context. For example adding the `@Component` annotation
-on the classes will work.
-See Spring docs for for details, also our spring-boot with component scanning.
-All controllers that are registered as a bean, gets automatically registered to operator.
-
-Kubernetes client creation using properties is also supported, for complete list see: [Link for config class]
-
-
-## Implementation / Design details
-
-This library relies on the amazing [kubernetes-client](https://github.com/fabric8io/kubernetes-client) from fabric8.
-Most of the heavy lifting is actually done by kubernetes-client.
-
-What the framework adds on top of the bare client:
-* Management of events from the Kubernetes API. All events are inserted into a queue by the EventScheduler. The
-framework makes sure only the latest event for a certain resource is processed. This is especially important since
-on startup the operator can receive a whole series of obsolete events.
-* Retrying of failed actions. When an event handler throws an exception the event is put back in the queue.
-* A clean interface to the user of the framework to receive events related to the Controller's resource.
-
-### Dealing with Consistency
-
-#### Run Single Instance
-
-There should be always just one instance of an operator running at a time (think process). If there there would be
-two ore more, in general it could lead to concurrency issues. Note that we are also doing optimistic locking when we update a resource.
-In this way the operator is not highly available. However for operators this not necessary an issue,
-if the operator just gets restarted after it went down.
-
-#### Operator Restarts
-
-When an operator is started we got events for every resource (of a type we listen to) already on the cluster. Even if the resource is not changed
-(We use `kubectl get ... --watch` in the background). This can be a huge amount of resources depending on your use case.
-So it could be a good case just have a status field on the resource which is checked, if there anything needs to be done.
-
-#### At Least Once
-
-To implement controller logic, we have to override two methods: `createOrUpdateResource` and `deleteResource`.
-These methods are called if a resource is create/changed or marked for deletion. In most cases these methods will be
-called just once, but in some rare cases can happen that are called more then once. In practice this means that the
-implementation needs to be **idempotent**.
-
-#### Deleting a Resource
-
-During deletion process we use [Kubernetes finalizers](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#finalizers
-"Kubernetes docs") finalizers. This is required, since it can happen that the operator is not running while the delete
-of resource is executed (think `oc delete`). In this case we would not catch the delete event. So we automatically add a
-finalizer first time we update the resource if its not there.
+- [kroxylicious](https://github.com/kroxylicious/kroxylicious/tree/main/kroxylicious-operator) Kafka proxy operator
+- [ExposedApp operator](https://github.com/halkyonio/exposedapp-rhdblog): a sample operator
+ written to illustrate JOSDK concepts and its Quarkus extension in the ["Write Kubernetes
+ Operators in Java with the Java Operator SDK" blog series](https://developers.redhat.com/articles/2022/02/15/write-kubernetes-java-java-operator-sdk#).
+- [Keycloak operator](https://github.com/keycloak/keycloak/tree/main/operator): the official
+ Keycloak operator, built with Quarkus and JOSDK.
+- [Apache Flink Kubernetes operator](https://github.com/apache/flink-kubernetes-operator) is the market leader among Flink operators.
+- [Apache Spark Kubernetes Operator](https://github.com/apache/spark-kubernetes-operator) emerging operator for Spark.
+- [Strimzi Access operator](https://github.com/strimzi/kafka-access-operator). While the core Strimzi operator development predates
+ JOSDK, but new components like the Access operator is using the framework.
+- [EureKubeOperator](https://medium.com/@heesuk.dev/implementing-kubernetes-operator-for-eureka-service-discovery-integration-by-java-operator-sdk-d21d8087c38e): integrates service discovery of Eureka and Kubernetes using the framework - developed by 11street. It is not released as an open source yet but is very interesting to read about this problem and how it is solved by an operator written with JOSDK.
+- [Locust k8s operator](https://github.com/AbdelrhmanHamouda/locust-k8s-operator): Cloud native solution to run performance tests on any Kubernetes cluster.
+- [Strimzi Schema Registry Operator](https://github.com/shangyuantech/strimzi-registry-ksql-operator): A Schema Registry Operator based on JOSDK for running the Confluent Schema Registry with a Strimzi-based Kafka cluster.
+- [Airflow Dag Operator](https://github.com/cdmikechen/airflow-dag-operator): Use JOSDK(Quarkus Extension) to replace Airflow Git Sync strategy. The main idea of the project is to start a synchronization container on each airflow pod to synchronize the DAG/files into the DAG folder.
+- [Glasskube Operator](https://github.com/glasskube/operator): simplifies the deployment, maintenance and upgrade of popular open source business tools. It is written in Kotlin and uses the JOSDK and fabric8 Kubernetes client with Kotlin-based DSL.
+- [Debezium Operator](https://github.com/debezium/debezium-operator): Debezium Operator adds Change-Data-Capture capabilities to your Kubernetes or Openshift cluster by providing an easy way to run and manage [Debezium Server](https://debezium.io/documentation/reference/stable/operations/debezium-server.html) instances.
diff --git a/adr/001-Introducing-ADRs.md b/adr/001-Introducing-ADRs.md
new file mode 100644
index 0000000000..3a432e8094
--- /dev/null
+++ b/adr/001-Introducing-ADRs.md
@@ -0,0 +1,29 @@
+# Using Architectural Decision Records
+
+In order to into to document and facilitate discussion over architecture and other design question of the project
+we introduce usage of [ADR](https://adr.github.io/).
+
+In each ADR file, write these sections:
+
+# Title
+
+## Status
+
+What is the status, such as proposed, accepted, rejected, deprecated, superseded, etc.?
+
+## Context
+
+What is the issue that we're seeing that is motivating this decision or change?
+
+## Decision
+
+What is the change that we're proposing and/or doing?
+
+## Consequences
+
+What becomes easier or more difficult to do because of this change?
+
+## Notes
+
+Other notes optionally added to the ADR.
+Soo other good materials for the ADRs:
\ No newline at end of file
diff --git a/adr/002-Custom-Resource-Deserialization-Problem.md b/adr/002-Custom-Resource-Deserialization-Problem.md
new file mode 100644
index 0000000000..0f648105fc
--- /dev/null
+++ b/adr/002-Custom-Resource-Deserialization-Problem.md
@@ -0,0 +1,44 @@
+# Multi Version Custom Resources Deserialization Problem
+
+## Status
+
+accepted
+
+## Context
+
+In case there are multiple versions of a custom resource it can happen that a controller/informer tracking
+such a resource might run into deserialization problem as shown
+in [this integration test](https://github.com/operator-framework/java-operator-sdk/blob/07aab1a9914d865364d7236e496ef9ba5b50699e/operator-framework/src/test/java/io/javaoperatorsdk/operator/MultiVersionCRDIT.java#L55-L55)
+.
+Such case is possible (as seen in the test) if there are no conversion hooks in place, so the two custom resources
+which have different version are stored in the original form (not converted) and are not compatible.
+In this case, if there is no further filtering (by labels) informer receives both, but naturally not able to deserialize
+one of them.
+
+How should the framework or the underlying informer behave?
+
+Alternatives:
+
+1. The informer should skip the resource and should continue to process the resources with the correct version.
+2. Informer stops and makes a notification callback.
+
+## Decision
+
+From the JOSDK perspective, it is fine if the informer stops, and the users decides if the whole operator should stop
+(usually the preferred way). The reason, that this is an obvious issue on platform level (not on operator/controller
+level). Thus, the controller should not receive such custom resources in the first place, so the problem should be
+addressed at the platform level. Possibly introducing conversion hooks, or labeling for the target resource.
+
+## Consequences
+
+If an Informer stops on such deserialization error, even explicitly restarting it won't solve the problem, since
+would fail again on the same error.
+
+## Notes
+
+- The informer implementation in fabric8 client changed in this regard, before it was not stopping on deserialization
+ error, but as described this change in behavior is completely acceptable.
+
+- the deserializer can be set to be more lenient by configuring the Serialization Unmatched Field Type module:
+ `Serialization.UNMATCHED_FIELD_TYPE_MODULE.setRestrictToTemplates(true);`. In general is not desired to
+ process custom resources that are not deserialized correctly.
\ No newline at end of file
diff --git a/bootstrapper-maven-plugin/pom.xml b/bootstrapper-maven-plugin/pom.xml
new file mode 100644
index 0000000000..c306dcea35
--- /dev/null
+++ b/bootstrapper-maven-plugin/pom.xml
@@ -0,0 +1,100 @@
+
+
+ 4.0.0
+
+
+ io.javaoperatorsdk
+ java-operator-sdk
+ 5.1.5-SNAPSHOT
+
+
+ bootstrapper
+ maven-plugin
+ Operator SDK - Bootstrapper Maven Plugin
+ Operator SDK - Bootstrapper Maven Plugin
+
+
+ 3.15.1
+ 3.9.11
+ 3.0.0
+ 3.15.1
+
+
+
+
+ org.apache.maven
+ maven-plugin-api
+ ${maven-plugin-api.version}
+ provided
+
+
+ org.apache.maven.plugin-tools
+ maven-plugin-annotations
+ ${maven-plugin-annotations.version}
+ provided
+
+
+ org.slf4j
+ slf4j-api
+
+
+ org.apache.logging.log4j
+ log4j-slf4j2-impl
+ test
+
+
+ org.apache.logging.log4j
+ log4j-core
+ test
+
+
+ org.junit.jupiter
+ junit-jupiter-api
+
+
+ org.junit.jupiter
+ junit-jupiter-engine
+
+
+ commons-io
+ commons-io
+ 2.20.0
+
+
+ com.github.spullara.mustache.java
+ compiler
+
+
+ org.assertj
+ assertj-core
+ test
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-plugin-plugin
+ ${maven-plugin-plugin.version}
+
+ josdk-bootstrapper
+
+
+
+ org.codehaus.mojo
+ templating-maven-plugin
+ ${templating-maven-plugin.version}
+
+
+ filtering-java-templates
+
+ filter-sources
+
+
+
+
+
+
+
+
diff --git a/bootstrapper-maven-plugin/src/main/java-templates/io/javaoperatorsdk/operator/bootstrapper/Versions.java b/bootstrapper-maven-plugin/src/main/java-templates/io/javaoperatorsdk/operator/bootstrapper/Versions.java
new file mode 100644
index 0000000000..1656ee28f7
--- /dev/null
+++ b/bootstrapper-maven-plugin/src/main/java-templates/io/javaoperatorsdk/operator/bootstrapper/Versions.java
@@ -0,0 +1,10 @@
+package io.javaoperatorsdk.bootstrapper;
+
+public final class Versions {
+
+ private Versions() {}
+
+ public static final String JOSDK = "${project.version}";
+ public static final String KUBERNETES_CLIENT = "${fabric8-client.version}";
+
+}
diff --git a/bootstrapper-maven-plugin/src/main/java/io/javaoperatorsdk/boostrapper/Bootstrapper.java b/bootstrapper-maven-plugin/src/main/java/io/javaoperatorsdk/boostrapper/Bootstrapper.java
new file mode 100644
index 0000000000..7339d7e9aa
--- /dev/null
+++ b/bootstrapper-maven-plugin/src/main/java/io/javaoperatorsdk/boostrapper/Bootstrapper.java
@@ -0,0 +1,157 @@
+package io.javaoperatorsdk.boostrapper;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import org.apache.commons.io.FileUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.javaoperatorsdk.bootstrapper.Versions;
+
+import com.github.mustachejava.DefaultMustacheFactory;
+import com.github.mustachejava.MustacheFactory;
+
+public class Bootstrapper {
+
+ private static final Logger log = LoggerFactory.getLogger(Bootstrapper.class);
+
+ private final MustacheFactory mustacheFactory = new DefaultMustacheFactory();
+
+ // .gitignore gets excluded from resource, using here a prefixed version
+ private static final Map TOP_LEVEL_STATIC_FILES =
+ Map.of("_.gitignore", ".gitignore", "README.md", "README.md");
+ private static final List JAVA_FILES =
+ List.of("CustomResource.java", "Reconciler.java", "Spec.java", "Status.java");
+
+ public void create(File targetDir, String groupId, String artifactId) {
+ try {
+ log.info("Generating project to: {}", targetDir.getPath());
+ var projectDir = new File(targetDir, artifactId);
+ FileUtils.forceMkdir(projectDir);
+ addStaticFiles(projectDir);
+ addTemplatedFiles(projectDir, groupId, artifactId);
+ addJavaFiles(projectDir, groupId, artifactId);
+ addResourceFiles(projectDir, groupId, artifactId);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private void addResourceFiles(File projectDir, String groupId, String artifactId) {
+ try {
+ var target = new File(projectDir, "src/main/resources");
+ FileUtils.forceMkdir(target);
+ addTemplatedFile(target, "log4j2.xml", groupId, artifactId, target, null);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private void addJavaFiles(File projectDir, String groupId, String artifactId) {
+ try {
+ var packages = groupId.replace(".", File.separator);
+ var targetDir = new File(projectDir, "src/main/java/" + packages);
+ var targetTestDir = new File(projectDir, "src/test/java/" + packages);
+ FileUtils.forceMkdir(targetDir);
+ var classFileNamePrefix = artifactClassId(artifactId);
+ JAVA_FILES.forEach(
+ f ->
+ addTemplatedFile(
+ projectDir, f, groupId, artifactId, targetDir, classFileNamePrefix + f));
+
+ addTemplatedFile(projectDir, "Runner.java", groupId, artifactId, targetDir, null);
+ addTemplatedFile(
+ projectDir, "ConfigMapDependentResource.java", groupId, artifactId, targetDir, null);
+ addTemplatedFile(
+ projectDir,
+ "ReconcilerIntegrationTest.java",
+ groupId,
+ artifactId,
+ targetTestDir,
+ artifactClassId(artifactId) + "ReconcilerIntegrationTest.java");
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private void addTemplatedFiles(File projectDir, String groupId, String artifactId) {
+ addTemplatedFile(projectDir, "pom.xml", groupId, artifactId);
+ addTemplatedFile(projectDir, "k8s/test-resource.yaml", groupId, artifactId);
+ }
+
+ private void addTemplatedFile(
+ File projectDir, String fileName, String groupId, String artifactId) {
+ addTemplatedFile(projectDir, fileName, groupId, artifactId, null, null);
+ }
+
+ private void addTemplatedFile(
+ File projectDir,
+ String fileName,
+ String groupId,
+ String artifactId,
+ File targetDir,
+ String targetFileName) {
+ try {
+ var values =
+ Map.of(
+ "groupId",
+ groupId,
+ "artifactId",
+ artifactId,
+ "artifactClassId",
+ artifactClassId(artifactId),
+ "josdkVersion",
+ Versions.JOSDK,
+ "fabric8Version",
+ Versions.KUBERNETES_CLIENT);
+
+ var mustache = mustacheFactory.compile("templates/" + fileName);
+ var targetFile =
+ new File(
+ targetDir == null ? projectDir : targetDir,
+ targetFileName == null ? fileName : targetFileName);
+ FileUtils.forceMkdir(targetFile.getParentFile());
+ var writer = new FileWriter(targetFile);
+ mustache.execute(writer, values);
+ writer.flush();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private void addStaticFiles(File projectDir) {
+ TOP_LEVEL_STATIC_FILES.forEach((key, value) -> addStaticFile(projectDir, key, value));
+ }
+
+ private void addStaticFile(File targetDir, String fileName, String targetFileName) {
+ addStaticFile(targetDir, fileName, targetFileName, null);
+ }
+
+ private void addStaticFile(
+ File targetDir, String fileName, String targetFilename, String subDir) {
+ String sourcePath = subDir == null ? "/static/" : "/static/" + subDir;
+ String path = sourcePath + fileName;
+ try (var is = Bootstrapper.class.getResourceAsStream(path)) {
+ targetDir = subDir == null ? targetDir : new File(targetDir, subDir);
+ if (subDir != null) {
+ FileUtils.forceMkdir(targetDir);
+ }
+ FileUtils.copyInputStreamToFile(is, new File(targetDir, targetFilename));
+ } catch (IOException e) {
+ throw new RuntimeException("File path: " + path, e);
+ }
+ }
+
+ public static String artifactClassId(String artifactId) {
+ var parts = artifactId.split("-");
+ return Arrays.stream(parts)
+ .map(p -> p.substring(0, 1).toUpperCase() + p.substring(1))
+ .collect(Collectors.joining(""));
+ }
+}
diff --git a/bootstrapper-maven-plugin/src/main/java/io/javaoperatorsdk/boostrapper/BootstrapperMojo.java b/bootstrapper-maven-plugin/src/main/java/io/javaoperatorsdk/boostrapper/BootstrapperMojo.java
new file mode 100644
index 0000000000..cb470f7e87
--- /dev/null
+++ b/bootstrapper-maven-plugin/src/main/java/io/javaoperatorsdk/boostrapper/BootstrapperMojo.java
@@ -0,0 +1,23 @@
+package io.javaoperatorsdk.boostrapper;
+
+import java.io.File;
+
+import org.apache.maven.plugin.AbstractMojo;
+import org.apache.maven.plugin.MojoExecutionException;
+import org.apache.maven.plugins.annotations.Mojo;
+import org.apache.maven.plugins.annotations.Parameter;
+
+@Mojo(name = "create", requiresProject = false)
+public class BootstrapperMojo extends AbstractMojo {
+
+ @Parameter(defaultValue = "${projectGroupId}")
+ protected String projectGroupId;
+
+ @Parameter(defaultValue = "${projectArtifactId}")
+ protected String projectArtifactId;
+
+ public void execute() throws MojoExecutionException {
+ String userDir = System.getProperty("user.dir");
+ new Bootstrapper().create(new File(userDir), projectGroupId, projectArtifactId);
+ }
+}
diff --git a/bootstrapper-maven-plugin/src/main/resources/log4j2.xml b/bootstrapper-maven-plugin/src/main/resources/log4j2.xml
new file mode 100644
index 0000000000..124aef7838
--- /dev/null
+++ b/bootstrapper-maven-plugin/src/main/resources/log4j2.xml
@@ -0,0 +1,16 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/bootstrapper-maven-plugin/src/main/resources/static/README.md b/bootstrapper-maven-plugin/src/main/resources/static/README.md
new file mode 100644
index 0000000000..7746a9a5d2
--- /dev/null
+++ b/bootstrapper-maven-plugin/src/main/resources/static/README.md
@@ -0,0 +1,3 @@
+# Generated Project Skeleton
+
+A simple operator that copies the value in a spec to a ConfigMap.
\ No newline at end of file
diff --git a/bootstrapper-maven-plugin/src/main/resources/static/_.gitignore b/bootstrapper-maven-plugin/src/main/resources/static/_.gitignore
new file mode 100644
index 0000000000..9a6a0350f2
--- /dev/null
+++ b/bootstrapper-maven-plugin/src/main/resources/static/_.gitignore
@@ -0,0 +1,41 @@
+#Maven
+target/
+pom.xml.tag
+pom.xml.releaseBackup
+pom.xml.versionsBackup
+release.properties
+.flattened-pom.xml
+
+# Eclipse
+.project
+.classpath
+.settings/
+bin/
+
+# IntelliJ
+.idea
+*.ipr
+*.iml
+*.iws
+
+# NetBeans
+nb-configuration.xml
+
+# Visual Studio Code
+.vscode
+.factorypath
+
+# OSX
+.DS_Store
+
+# Vim
+*.swp
+*.swo
+
+# patch
+*.orig
+*.rej
+
+# Local environment
+.env
+
diff --git a/bootstrapper-maven-plugin/src/main/resources/templates/ConfigMapDependentResource.java b/bootstrapper-maven-plugin/src/main/resources/templates/ConfigMapDependentResource.java
new file mode 100644
index 0000000000..59eae8b01c
--- /dev/null
+++ b/bootstrapper-maven-plugin/src/main/resources/templates/ConfigMapDependentResource.java
@@ -0,0 +1,32 @@
+package {{groupId}};
+
+import java.util.HashMap;
+import java.util.Map;
+
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.fabric8.kubernetes.api.model.ConfigMapBuilder;
+import io.fabric8.kubernetes.api.model.ObjectMetaBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.CRUDKubernetesDependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import {{groupId}}.{{artifactClassId}}CustomResource;
+
+@KubernetesDependent
+public class ConfigMapDependentResource
+ extends CRUDKubernetesDependentResource {
+
+ public static final String KEY = "key";
+
+ @Override
+ protected ConfigMap desired({{artifactClassId}}CustomResource primary,
+ Context<{{artifactClassId}}CustomResource> context) {
+ return new ConfigMapBuilder()
+ .withMetadata(
+ new ObjectMetaBuilder()
+ .withName(primary.getMetadata().getName())
+ .withNamespace(primary.getMetadata().getNamespace())
+ .build())
+ .withData(Map.of(KEY, primary.getSpec().getValue()))
+ .build();
+ }
+}
\ No newline at end of file
diff --git a/bootstrapper-maven-plugin/src/main/resources/templates/CustomResource.java b/bootstrapper-maven-plugin/src/main/resources/templates/CustomResource.java
new file mode 100644
index 0000000000..e17dcc0450
--- /dev/null
+++ b/bootstrapper-maven-plugin/src/main/resources/templates/CustomResource.java
@@ -0,0 +1,11 @@
+package {{groupId}};
+
+import io.fabric8.kubernetes.client.CustomResource;
+import io.fabric8.kubernetes.api.model.Namespaced;
+import io.fabric8.kubernetes.model.annotation.Group;
+import io.fabric8.kubernetes.model.annotation.Version;
+
+@Group("{{groupId}}")
+@Version("v1")
+public class {{artifactClassId}}CustomResource extends CustomResource<{{artifactClassId}}Spec,{{artifactClassId}}Status> implements Namespaced {
+}
diff --git a/bootstrapper-maven-plugin/src/main/resources/templates/Reconciler.java b/bootstrapper-maven-plugin/src/main/resources/templates/Reconciler.java
new file mode 100644
index 0000000000..f7583be4ee
--- /dev/null
+++ b/bootstrapper-maven-plugin/src/main/resources/templates/Reconciler.java
@@ -0,0 +1,20 @@
+package {{groupId}};
+
+import io.javaoperatorsdk.operator.api.reconciler.Reconciler;
+import io.javaoperatorsdk.operator.api.reconciler.UpdateControl;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.Dependent;
+import io.javaoperatorsdk.operator.api.reconciler.Workflow;
+
+import java.util.Map;
+import java.util.Optional;
+
+@Workflow(dependents = {@Dependent(type = ConfigMapDependentResource.class)})
+public class {{artifactClassId}}Reconciler implements Reconciler<{{artifactClassId}}CustomResource> {
+
+ public UpdateControl<{{artifactClassId}}CustomResource> reconcile({{artifactClassId}}CustomResource primary,
+ Context<{{artifactClassId}}CustomResource> context) {
+
+ return UpdateControl.noUpdate();
+ }
+}
diff --git a/bootstrapper-maven-plugin/src/main/resources/templates/ReconcilerIntegrationTest.java b/bootstrapper-maven-plugin/src/main/resources/templates/ReconcilerIntegrationTest.java
new file mode 100644
index 0000000000..865fe9c594
--- /dev/null
+++ b/bootstrapper-maven-plugin/src/main/resources/templates/ReconcilerIntegrationTest.java
@@ -0,0 +1,60 @@
+package {{groupId}};
+
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.fabric8.kubernetes.api.model.ObjectMetaBuilder;
+import io.javaoperatorsdk.operator.junit.LocallyRunOperatorExtension;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.RegisterExtension;
+
+import static {{groupId}}.ConfigMapDependentResource.KEY;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.awaitility.Awaitility.await;
+
+class {{artifactClassId}}ReconcilerIntegrationTest {
+
+ public static final String RESOURCE_NAME = "test1";
+ public static final String INITIAL_VALUE = "initial value";
+ public static final String CHANGED_VALUE = "changed value";
+
+ @RegisterExtension
+ LocallyRunOperatorExtension extension =
+ LocallyRunOperatorExtension.builder()
+ .withReconciler({{artifactClassId}}Reconciler.class)
+ .build();
+
+ @Test
+ void testCRUDOperations() {
+ var cr = extension.create(testResource());
+
+ await().untilAsserted(() -> {
+ var cm = extension.get(ConfigMap.class, RESOURCE_NAME);
+ assertThat(cm).isNotNull();
+ assertThat(cm.getData()).containsEntry(KEY, INITIAL_VALUE);
+ });
+
+ cr.getSpec().setValue(CHANGED_VALUE);
+ cr = extension.replace(cr);
+
+ await().untilAsserted(() -> {
+ var cm = extension.get(ConfigMap.class, RESOURCE_NAME);
+ assertThat(cm.getData()).containsEntry(KEY, CHANGED_VALUE);
+ });
+
+ extension.delete(cr);
+
+ await().untilAsserted(() -> {
+ var cm = extension.get(ConfigMap.class, RESOURCE_NAME);
+ assertThat(cm).isNull();
+ });
+ }
+
+ {{artifactClassId}}CustomResource testResource() {
+ var resource = new {{artifactClassId}}CustomResource();
+ resource.setMetadata(new ObjectMetaBuilder()
+ .withName(RESOURCE_NAME)
+ .build());
+ resource.setSpec(new {{artifactClassId}}Spec());
+ resource.getSpec().setValue(INITIAL_VALUE);
+ return resource;
+ }
+}
diff --git a/bootstrapper-maven-plugin/src/main/resources/templates/Runner.java b/bootstrapper-maven-plugin/src/main/resources/templates/Runner.java
new file mode 100644
index 0000000000..41be6d6976
--- /dev/null
+++ b/bootstrapper-maven-plugin/src/main/resources/templates/Runner.java
@@ -0,0 +1,18 @@
+package {{groupId}};
+
+import io.javaoperatorsdk.operator.Operator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class Runner {
+
+ private static final Logger log = LoggerFactory.getLogger(Runner.class);
+
+ public static void main(String[] args) {
+ Operator operator = new Operator();
+ operator.register(new {{artifactClassId}}Reconciler());
+ operator.start();
+ log.info("Operator started.");
+ }
+}
diff --git a/bootstrapper-maven-plugin/src/main/resources/templates/Spec.java b/bootstrapper-maven-plugin/src/main/resources/templates/Spec.java
new file mode 100644
index 0000000000..13d82dad51
--- /dev/null
+++ b/bootstrapper-maven-plugin/src/main/resources/templates/Spec.java
@@ -0,0 +1,14 @@
+package {{groupId}};
+
+public class {{artifactClassId}}Spec {
+
+ private String value;
+
+ public String getValue() {
+ return value;
+ }
+
+ public void setValue(String value) {
+ this.value = value;
+ }
+}
diff --git a/bootstrapper-maven-plugin/src/main/resources/templates/Status.java b/bootstrapper-maven-plugin/src/main/resources/templates/Status.java
new file mode 100644
index 0000000000..52bd0fd4d2
--- /dev/null
+++ b/bootstrapper-maven-plugin/src/main/resources/templates/Status.java
@@ -0,0 +1,5 @@
+package {{groupId}};
+
+public class {{artifactClassId}}Status {
+
+}
diff --git a/bootstrapper-maven-plugin/src/main/resources/templates/k8s/test-resource.yaml b/bootstrapper-maven-plugin/src/main/resources/templates/k8s/test-resource.yaml
new file mode 100644
index 0000000000..ec7987512e
--- /dev/null
+++ b/bootstrapper-maven-plugin/src/main/resources/templates/k8s/test-resource.yaml
@@ -0,0 +1,6 @@
+apiVersion: {{groupId}}/v1
+kind: {{artifactClassId}}CustomResource
+metadata:
+ name: test1
+spec:
+ value: test
\ No newline at end of file
diff --git a/bootstrapper-maven-plugin/src/main/resources/templates/log4j2.xml b/bootstrapper-maven-plugin/src/main/resources/templates/log4j2.xml
new file mode 100644
index 0000000000..9fde311940
--- /dev/null
+++ b/bootstrapper-maven-plugin/src/main/resources/templates/log4j2.xml
@@ -0,0 +1,16 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/bootstrapper-maven-plugin/src/main/resources/templates/pom.xml b/bootstrapper-maven-plugin/src/main/resources/templates/pom.xml
new file mode 100644
index 0000000000..09e8ed0ef8
--- /dev/null
+++ b/bootstrapper-maven-plugin/src/main/resources/templates/pom.xml
@@ -0,0 +1,98 @@
+
+
+ 4.0.0
+
+ {{groupId}}
+ {{artifactId}}
+ 0.1.0-SNAPSHOT
+ sample-webpage-operator
+ jar
+
+
+ 17
+ ${java.version}
+ ${java.version}
+ {{josdkVersion}}
+ 2.0.17
+ 5.9.2
+ 2.20.0
+ {{fabric8Version}}
+
+
+
+
+
+ io.javaoperatorsdk
+ operator-framework-bom
+ ${josdk.version}
+ pom
+ import
+
+
+
+
+
+
+ io.javaoperatorsdk
+ operator-framework
+ ${josdk.version}
+
+
+ io.javaoperatorsdk
+ operator-framework-junit-5
+ ${josdk.version}
+ test
+
+
+ org.slf4j
+ slf4j-api
+ ${slf4j.version}
+
+
+ org.apache.logging.log4j
+ log4j-slf4j2-impl
+ ${log4j.version}
+
+
+ org.apache.logging.log4j
+ log4j-core
+ ${log4j.version}
+
+
+ org.junit.jupiter
+ junit-jupiter-api
+ test
+ ${junit.version}
+
+
+ org.junit.jupiter
+ junit-jupiter-engine
+ test
+ ${junit.version}
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+ 3.11.0
+
+
+ io.fabric8
+ crd-generator-maven-plugin
+ ${fabric8-client.version}
+
+
+
+ generate
+
+
+
+
+
+
+
+
diff --git a/bootstrapper-maven-plugin/src/test/java/io/javaoperatorsdk/bootstrapper/BootstrapperTest.java b/bootstrapper-maven-plugin/src/test/java/io/javaoperatorsdk/bootstrapper/BootstrapperTest.java
new file mode 100644
index 0000000000..f7840c1585
--- /dev/null
+++ b/bootstrapper-maven-plugin/src/test/java/io/javaoperatorsdk/bootstrapper/BootstrapperTest.java
@@ -0,0 +1,53 @@
+package io.javaoperatorsdk.bootstrapper;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+
+import org.junit.jupiter.api.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.javaoperatorsdk.boostrapper.Bootstrapper;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+class BootstrapperTest {
+
+ private static final Logger log = LoggerFactory.getLogger(BootstrapperTest.class);
+
+ Bootstrapper bootstrapper = new Bootstrapper();
+
+ @Test
+ void copiesFilesToTarget() {
+ bootstrapper.create(new File("target"), "io.sample", "test-project");
+
+ var targetDir = new File("target", "test-project");
+ assertThat(targetDir.list()).contains("pom.xml");
+ assertProjectCompiles();
+ }
+
+ private void assertProjectCompiles() {
+ try {
+ var process =
+ Runtime.getRuntime()
+ .exec(
+ "mvn clean install -f target/test-project/pom.xml -DskipTests"
+ + " -Dspotless.apply.skip");
+
+ BufferedReader stdOut = new BufferedReader(new InputStreamReader(process.getInputStream()));
+
+ log.info("Maven output:");
+ String logLine;
+ while ((logLine = stdOut.readLine()) != null) {
+ log.info(logLine);
+ }
+ var res = process.waitFor();
+ log.info("exit code: {}", res);
+ assertThat(res).isZero();
+ } catch (IOException | InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ }
+}
diff --git a/caffeine-bounded-cache-support/pom.xml b/caffeine-bounded-cache-support/pom.xml
new file mode 100644
index 0000000000..76f3db9abc
--- /dev/null
+++ b/caffeine-bounded-cache-support/pom.xml
@@ -0,0 +1,93 @@
+
+
+ 4.0.0
+
+ io.javaoperatorsdk
+ java-operator-sdk
+ 5.1.5-SNAPSHOT
+
+
+ caffeine-bounded-cache-support
+ Operator SDK - Caffeine Bounded Cache Support
+
+
+
+ io.javaoperatorsdk
+ operator-framework-core
+
+
+ com.github.ben-manes.caffeine
+ caffeine
+
+
+ io.javaoperatorsdk
+ operator-framework
+ test
+
+
+ io.javaoperatorsdk
+ operator-framework-junit-5
+ ${project.version}
+ test
+
+
+ org.apache.logging.log4j
+ log4j-slf4j2-impl
+ test
+
+
+ org.apache.logging.log4j
+ log4j-core
+ ${log4j.version}
+ test
+
+
+ io.fabric8
+ kubernetes-httpclient-okhttp
+ test
+
+
+
+
+
+
+ maven-compiler-plugin
+ ${maven-compiler-plugin.version}
+
+
+
+ default-compile
+
+ compile
+
+ compile
+
+
+ -proc:none
+
+
+
+
+
+
+ io.fabric8
+ crd-generator-maven-plugin
+ ${fabric8-client.version}
+
+
+
+ generate
+
+ process-test-classes
+
+ ${project.build.testOutputDirectory}
+ WITH_ALL_DEPENDENCIES_AND_TESTS
+
+
+
+
+
+
+
+
diff --git a/caffeine-bounded-cache-support/src/main/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedCache.java b/caffeine-bounded-cache-support/src/main/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedCache.java
new file mode 100644
index 0000000000..c7ac96cb20
--- /dev/null
+++ b/caffeine-bounded-cache-support/src/main/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedCache.java
@@ -0,0 +1,30 @@
+package io.javaoperatorsdk.operator.processing.event.source.cache;
+
+import com.github.benmanes.caffeine.cache.Cache;
+
+/** Caffeine cache wrapper to be used in a {@link BoundedItemStore} */
+public class CaffeineBoundedCache implements BoundedCache {
+
+ private final Cache cache;
+
+ public CaffeineBoundedCache(Cache cache) {
+ this.cache = cache;
+ }
+
+ @Override
+ public R get(K key) {
+ return cache.getIfPresent(key);
+ }
+
+ @Override
+ public R remove(K key) {
+ var value = cache.getIfPresent(key);
+ cache.invalidate(key);
+ return value;
+ }
+
+ @Override
+ public void put(K key, R object) {
+ cache.put(key, object);
+ }
+}
diff --git a/caffeine-bounded-cache-support/src/main/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedItemStores.java b/caffeine-bounded-cache-support/src/main/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedItemStores.java
new file mode 100644
index 0000000000..89fbcef70f
--- /dev/null
+++ b/caffeine-bounded-cache-support/src/main/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedItemStores.java
@@ -0,0 +1,51 @@
+package io.javaoperatorsdk.operator.processing.event.source.cache;
+
+import java.time.Duration;
+
+import io.fabric8.kubernetes.api.model.HasMetadata;
+import io.fabric8.kubernetes.client.KubernetesClient;
+
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
+
+/**
+ * A factory for Caffeine-backed {@link
+ * BoundedItemStore}. The implementation uses a {@link CaffeineBoundedCache} to store resources and
+ * progressively evict them if they haven't been used in a while. The idea about
+ * CaffeinBoundedItemStore-s is that, caffeine will cache the resources which were recently used,
+ * and will evict resource, which are not used for a while. This is ideal for startup performance
+ * and efficiency when all resources should be cached to avoid undue load on the API server. This is
+ * why setting a maximal cache size is not practical and the approach of evicting least recently
+ * used resources was chosen. However, depending on controller implementations and domains, it could
+ * happen that some / many of these resources are then seldom or even reconciled anymore. In that
+ * situation, large amounts of memory might be consumed to cache resources that are never used
+ * again.
+ *
+ *
Note that if a resource is reconciled and is not present anymore in cache, it will
+ * transparently be fetched again from the API server. Similarly, since associated secondary
+ * resources are usually reconciled too, they might need to be fetched and populated to the cache,
+ * and will remain there for some time, for subsequent reconciliations.
+ */
+public class CaffeineBoundedItemStores {
+
+ private CaffeineBoundedItemStores() {}
+
+ /**
+ * @param client Kubernetes Client
+ * @param rClass resource class
+ * @param accessExpireDuration the duration after resources is evicted from cache if not accessed.
+ * @return the ItemStore implementation
+ * @param resource type
+ */
+ @SuppressWarnings("unused")
+ public static BoundedItemStore boundedItemStore(
+ KubernetesClient client, Class rClass, Duration accessExpireDuration) {
+ Cache cache = Caffeine.newBuilder().expireAfterAccess(accessExpireDuration).build();
+ return boundedItemStore(client, rClass, cache);
+ }
+
+ public static BoundedItemStore boundedItemStore(
+ KubernetesClient client, Class rClass, Cache cache) {
+ return new BoundedItemStore<>(new CaffeineBoundedCache<>(cache), rClass, client);
+ }
+}
diff --git a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/BoundedCacheTestBase.java b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/BoundedCacheTestBase.java
new file mode 100644
index 0000000000..532e5237f8
--- /dev/null
+++ b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/BoundedCacheTestBase.java
@@ -0,0 +1,107 @@
+package io.javaoperatorsdk.operator.processing.event.source.cache;
+
+import java.time.Duration;
+import java.util.stream.IntStream;
+
+import org.junit.jupiter.api.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.fabric8.kubernetes.client.CustomResource;
+import io.javaoperatorsdk.operator.junit.LocallyRunOperatorExtension;
+import io.javaoperatorsdk.operator.processing.event.source.cache.sample.namespacescope.BoundedCacheTestSpec;
+import io.javaoperatorsdk.operator.processing.event.source.cache.sample.namespacescope.BoundedCacheTestStatus;
+
+import static io.javaoperatorsdk.operator.processing.event.source.cache.sample.AbstractTestReconciler.DATA_KEY;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.awaitility.Awaitility.await;
+
+public abstract class BoundedCacheTestBase<
+ P extends CustomResource> {
+
+ private static final Logger log = LoggerFactory.getLogger(BoundedCacheTestBase.class);
+
+ public static final int NUMBER_OF_RESOURCE_TO_TEST = 3;
+ public static final String RESOURCE_NAME_PREFIX = "test-";
+ public static final String INITIAL_DATA_PREFIX = "data-";
+ public static final String UPDATED_PREFIX = "updatedPrefix";
+
+ @Test
+ void reconciliationWorksWithLimitedCache() {
+ createTestResources();
+
+ assertConfigMapData(INITIAL_DATA_PREFIX);
+
+ updateTestResources();
+
+ assertConfigMapData(UPDATED_PREFIX);
+
+ deleteTestResources();
+
+ assertConfigMapsDeleted();
+ }
+
+ private void assertConfigMapsDeleted() {
+ await()
+ .atMost(Duration.ofSeconds(120))
+ .untilAsserted(
+ () ->
+ IntStream.range(0, NUMBER_OF_RESOURCE_TO_TEST)
+ .forEach(
+ i -> {
+ var cm = extension().get(ConfigMap.class, RESOURCE_NAME_PREFIX + i);
+ assertThat(cm).isNull();
+ }));
+ }
+
+ private void deleteTestResources() {
+ IntStream.range(0, NUMBER_OF_RESOURCE_TO_TEST)
+ .forEach(
+ i -> {
+ var cm = extension().get(customResourceClass(), RESOURCE_NAME_PREFIX + i);
+ var deleted = extension().delete(cm);
+ if (!deleted) {
+ log.warn("Custom resource might not be deleted: {}", cm);
+ }
+ });
+ }
+
+ private void updateTestResources() {
+ IntStream.range(0, NUMBER_OF_RESOURCE_TO_TEST)
+ .forEach(
+ i -> {
+ var cm = extension().get(ConfigMap.class, RESOURCE_NAME_PREFIX + i);
+ cm.getData().put(DATA_KEY, UPDATED_PREFIX + i);
+ extension().replace(cm);
+ });
+ }
+
+ void assertConfigMapData(String dataPrefix) {
+ await()
+ .untilAsserted(
+ () ->
+ IntStream.range(0, NUMBER_OF_RESOURCE_TO_TEST)
+ .forEach(i -> assertConfigMap(i, dataPrefix)));
+ }
+
+ private void assertConfigMap(int i, String prefix) {
+ var cm = extension().get(ConfigMap.class, RESOURCE_NAME_PREFIX + i);
+ assertThat(cm).isNotNull();
+ assertThat(cm.getData().get(DATA_KEY)).isEqualTo(prefix + i);
+ }
+
+ private void createTestResources() {
+ IntStream.range(0, NUMBER_OF_RESOURCE_TO_TEST)
+ .forEach(
+ i -> {
+ extension().create(createTestResource(i));
+ });
+ }
+
+ abstract P createTestResource(int index);
+
+ abstract Class
context) {
+ var cm =
+ new ConfigMapBuilder()
+ .withMetadata(
+ new ObjectMetaBuilder()
+ .withName(resource.getMetadata().getName())
+ .withNamespace(resource.getSpec().getTargetNamespace())
+ .build())
+ .withData(Map.of(DATA_KEY, resource.getSpec().getData()))
+ .build();
+ cm.addOwnerReference(resource);
+ context.getClient().configMaps().resource(cm).create();
+ }
+
+ @Override
+ public List> prepareEventSources(EventSourceContext
context) {
+
+ var boundedItemStore =
+ boundedItemStore(
+ new KubernetesClientBuilder().build(),
+ ConfigMap.class,
+ Duration.ofMinutes(1),
+ 1); // setting max size for testing purposes
+
+ var es =
+ new InformerEventSource<>(
+ InformerEventSourceConfiguration.from(ConfigMap.class, primaryClass())
+ .withItemStore(boundedItemStore)
+ .withSecondaryToPrimaryMapper(
+ Mappers.fromOwnerReferences(
+ context.getPrimaryResourceClass(),
+ this instanceof BoundedCacheClusterScopeTestReconciler))
+ .build(),
+ context);
+
+ return List.of(es);
+ }
+
+ private void ensureStatus(P resource) {
+ if (resource.getStatus() == null) {
+ resource.setStatus(new BoundedCacheTestStatus());
+ }
+ }
+
+ public static BoundedItemStore boundedItemStore(
+ KubernetesClient client,
+ Class rClass,
+ Duration accessExpireDuration,
+ // max size is only for testing purposes
+ long cacheMaxSize) {
+ Cache cache =
+ Caffeine.newBuilder()
+ .expireAfterAccess(accessExpireDuration)
+ .maximumSize(cacheMaxSize)
+ .build();
+ return CaffeineBoundedItemStores.boundedItemStore(client, rClass, cache);
+ }
+
+ protected abstract Class
primaryClass();
+}
diff --git a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/clusterscope/BoundedCacheClusterScopeTestCustomResource.java b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/clusterscope/BoundedCacheClusterScopeTestCustomResource.java
new file mode 100644
index 0000000000..6fc9a5babc
--- /dev/null
+++ b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/clusterscope/BoundedCacheClusterScopeTestCustomResource.java
@@ -0,0 +1,14 @@
+package io.javaoperatorsdk.operator.processing.event.source.cache.sample.clusterscope;
+
+import io.fabric8.kubernetes.client.CustomResource;
+import io.fabric8.kubernetes.model.annotation.Group;
+import io.fabric8.kubernetes.model.annotation.ShortNames;
+import io.fabric8.kubernetes.model.annotation.Version;
+import io.javaoperatorsdk.operator.processing.event.source.cache.sample.namespacescope.BoundedCacheTestSpec;
+import io.javaoperatorsdk.operator.processing.event.source.cache.sample.namespacescope.BoundedCacheTestStatus;
+
+@Group("sample.javaoperatorsdk")
+@Version("v1")
+@ShortNames("bccs")
+public class BoundedCacheClusterScopeTestCustomResource
+ extends CustomResource {}
diff --git a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/clusterscope/BoundedCacheClusterScopeTestReconciler.java b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/clusterscope/BoundedCacheClusterScopeTestReconciler.java
new file mode 100644
index 0000000000..93f103cbf2
--- /dev/null
+++ b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/clusterscope/BoundedCacheClusterScopeTestReconciler.java
@@ -0,0 +1,14 @@
+package io.javaoperatorsdk.operator.processing.event.source.cache.sample.clusterscope;
+
+import io.javaoperatorsdk.operator.api.reconciler.*;
+import io.javaoperatorsdk.operator.processing.event.source.cache.sample.AbstractTestReconciler;
+
+@ControllerConfiguration
+public class BoundedCacheClusterScopeTestReconciler
+ extends AbstractTestReconciler {
+
+ @Override
+ protected Class primaryClass() {
+ return BoundedCacheClusterScopeTestCustomResource.class;
+ }
+}
diff --git a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestCustomResource.java b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestCustomResource.java
new file mode 100644
index 0000000000..9b77aa7bf8
--- /dev/null
+++ b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestCustomResource.java
@@ -0,0 +1,13 @@
+package io.javaoperatorsdk.operator.processing.event.source.cache.sample.namespacescope;
+
+import io.fabric8.kubernetes.api.model.Namespaced;
+import io.fabric8.kubernetes.client.CustomResource;
+import io.fabric8.kubernetes.model.annotation.Group;
+import io.fabric8.kubernetes.model.annotation.ShortNames;
+import io.fabric8.kubernetes.model.annotation.Version;
+
+@Group("sample.javaoperatorsdk")
+@Version("v1")
+@ShortNames("bct")
+public class BoundedCacheTestCustomResource
+ extends CustomResource implements Namespaced {}
diff --git a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestReconciler.java b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestReconciler.java
new file mode 100644
index 0000000000..6b95665585
--- /dev/null
+++ b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestReconciler.java
@@ -0,0 +1,14 @@
+package io.javaoperatorsdk.operator.processing.event.source.cache.sample.namespacescope;
+
+import io.javaoperatorsdk.operator.api.reconciler.ControllerConfiguration;
+import io.javaoperatorsdk.operator.processing.event.source.cache.sample.AbstractTestReconciler;
+
+@ControllerConfiguration
+public class BoundedCacheTestReconciler
+ extends AbstractTestReconciler {
+
+ @Override
+ protected Class primaryClass() {
+ return BoundedCacheTestCustomResource.class;
+ }
+}
diff --git a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestSpec.java b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestSpec.java
new file mode 100644
index 0000000000..63e5876267
--- /dev/null
+++ b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestSpec.java
@@ -0,0 +1,25 @@
+package io.javaoperatorsdk.operator.processing.event.source.cache.sample.namespacescope;
+
+public class BoundedCacheTestSpec {
+
+ private String data;
+ private String targetNamespace;
+
+ public String getData() {
+ return data;
+ }
+
+ public BoundedCacheTestSpec setData(String data) {
+ this.data = data;
+ return this;
+ }
+
+ public String getTargetNamespace() {
+ return targetNamespace;
+ }
+
+ public BoundedCacheTestSpec setTargetNamespace(String targetNamespace) {
+ this.targetNamespace = targetNamespace;
+ return this;
+ }
+}
diff --git a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestStatus.java b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestStatus.java
new file mode 100644
index 0000000000..5aa5ca2258
--- /dev/null
+++ b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestStatus.java
@@ -0,0 +1,3 @@
+package io.javaoperatorsdk.operator.processing.event.source.cache.sample.namespacescope;
+
+public class BoundedCacheTestStatus {}
diff --git a/caffeine-bounded-cache-support/src/test/resources/log4j2.xml b/caffeine-bounded-cache-support/src/test/resources/log4j2.xml
new file mode 100644
index 0000000000..f23cf772dd
--- /dev/null
+++ b/caffeine-bounded-cache-support/src/test/resources/log4j2.xml
@@ -0,0 +1,13 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/.gitignore b/docs/.gitignore
new file mode 100644
index 0000000000..40b67f41a7
--- /dev/null
+++ b/docs/.gitignore
@@ -0,0 +1,5 @@
+/public
+resources/
+node_modules/
+package-lock.json
+.hugo_build.lock
\ No newline at end of file
diff --git a/docs/.nvmrc b/docs/.nvmrc
new file mode 100644
index 0000000000..b009dfb9d9
--- /dev/null
+++ b/docs/.nvmrc
@@ -0,0 +1 @@
+lts/*
diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md
new file mode 100644
index 0000000000..5ea571c69d
--- /dev/null
+++ b/docs/CONTRIBUTING.md
@@ -0,0 +1,57 @@
+# Contributing to Java Operator SDK Documentation
+
+Thank you for your interest in improving the Java Operator SDK documentation! We welcome contributions from the community and appreciate your help in making our documentation better.
+
+## How to Contribute
+
+### Getting Started
+
+1. **Fork the repository** and clone your fork locally
+2. **Create a new branch** for your changes
+3. **Make your improvements** to the documentation
+4. **Test your changes** locally using `hugo server`
+5. **Submit a pull request** with a clear description of your changes
+
+### Types of Contributions
+
+We welcome various types of contributions:
+
+- **Content improvements**: Fix typos, clarify explanations, add examples
+- **New documentation**: Add missing sections or entirely new guides
+- **Structural improvements**: Better organization, navigation, or formatting
+- **Translation**: Help translate documentation to other languages
+
+## Guidelines
+
+### Writing Style
+
+- Use clear, concise language
+- Write in active voice when possible
+- Define technical terms when first used
+- Include practical examples where helpful
+- Keep sentences and paragraphs reasonably short
+
+### Technical Requirements
+
+- Test all code examples to ensure they work
+- Use proper markdown formatting
+- Follow existing documentation structure and conventions
+- Ensure links work and point to current resources
+
+## Legal Requirements
+
+### Contributor License Agreement
+
+All contributions must be accompanied by a Contributor License Agreement (CLA). You (or your employer) retain the copyright to your contribution; the CLA simply gives us permission to use and redistribute your contributions as part of the project.
+
+Visit to see your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one (even for a different project), you probably don't need to do it again.
+
+### Code Review Process
+
+All submissions, including those by project members, require review. We use GitHub pull requests for this purpose. Please consult [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more information on using pull requests.
+
+## Community Guidelines
+
+This project follows [Google's Open Source Community Guidelines](https://opensource.google.com/conduct/).
diff --git a/docs/Dockerfile b/docs/Dockerfile
new file mode 100644
index 0000000000..232d8f70c4
--- /dev/null
+++ b/docs/Dockerfile
@@ -0,0 +1,4 @@
+FROM floryn90/hugo:ext-alpine
+
+RUN apk add git && \
+ git config --global --add safe.directory /src
diff --git a/docs/LICENSE b/docs/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/docs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000000..14f675b53b
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,82 @@
+# Java Operator SDK Documentation
+
+This repository contains the documentation website for the Java Operator SDK (JOSDK), built using Hugo and the Docsy theme.
+
+## About Java Operator SDK
+
+Java Operator SDK is a framework that makes it easy to build Kubernetes operators in Java. It provides APIs designed to feel natural to Java developers and handles common operator challenges automatically, allowing you to focus on your business logic.
+
+## Development Setup
+
+This documentation site uses Hugo v0.125.7 with the Docsy theme.
+
+## Prerequisites
+
+- Hugo v0.125.7 or later (extended version required)
+- Node.js and npm (for PostCSS processing)
+- Git
+
+## Local Development
+
+### Quick Start
+
+1. Clone this repository
+2. Install dependencies:
+ ```bash
+ npm install
+ ```
+3. Start the development server:
+ ```bash
+ hugo server
+ ```
+4. Open your browser to `http://localhost:1313`
+
+### Using Docker
+
+You can also run the documentation site using Docker:
+
+1. Build the container:
+ ```bash
+ docker-compose build
+ ```
+2. Run the container:
+ ```bash
+ docker-compose up
+ ```
+ > **Note**: You can combine both commands with `docker-compose up --build`
+
+3. Access the site at `http://localhost:1313`
+
+To stop the container, press **Ctrl + C** in your terminal.
+
+To clean up Docker resources:
+```bash
+docker-compose rm
+```
+
+## Contributing
+
+We welcome contributions to improve the documentation! Please see our [contribution guidelines](CONTRIBUTING.md) for details on how to get started.
+
+## Troubleshooting
+
+### Module Compatibility Error
+If you see an error about module compatibility, ensure you're using Hugo v0.110.0 or higher:
+```console
+Error: Error building site: failed to extract shortcode: template for shortcode "blocks/cover" not found
+```
+
+### SCSS Processing Error
+If you encounter SCSS-related errors, make sure you have the extended version of Hugo installed:
+```console
+Error: TOCSS: failed to transform "scss/main.scss"
+```
+
+### Go Binary Not Found
+If you see "binary with name 'go' not found", install the Go programming language from [golang.org](https://golang.org).
+
+## Links
+
+- [Hugo Documentation](https://gohugo.io/documentation/)
+- [Docsy Theme Documentation](https://www.docsy.dev/docs/)
+- [Java Operator SDK GitHub Repository](https://github.com/operator-framework/java-operator-sdk)
diff --git a/docs/assets/icons/logo.svg b/docs/assets/icons/logo.svg
new file mode 100644
index 0000000000..0048fdf4d6
--- /dev/null
+++ b/docs/assets/icons/logo.svg
@@ -0,0 +1,18 @@
+
\ No newline at end of file
diff --git a/docs/assets/scss/_variables_project.scss b/docs/assets/scss/_variables_project.scss
new file mode 100644
index 0000000000..35523dc3f4
--- /dev/null
+++ b/docs/assets/scss/_variables_project.scss
@@ -0,0 +1,10 @@
+/*
+
+Add styles or override variables from the theme here.
+
+*/
+
+//$primary: #fc9c62;;
+$primary: #da5504;
+$secondary: #fc9c62
+//$secondary: white;
\ No newline at end of file
diff --git a/docs/config.yaml b/docs/config.yaml
new file mode 100644
index 0000000000..9070e384f0
--- /dev/null
+++ b/docs/config.yaml
@@ -0,0 +1,15 @@
+# THIS IS A TEST CONFIG ONLY!
+# FOR THE CONFIGURATION OF YOUR SITE USE hugo.yaml.
+#
+# As of Docsy 0.7.0, Hugo 0.110.0 or later must be used.
+#
+# The sole purpose of this config file is to detect Hugo-module builds that use
+# an older version of Hugo.
+#
+# DO NOT add any config parameters to this file. You can safely delete this file
+# if your project is using the required Hugo version.
+
+module:
+ hugoVersion:
+ extended: true
+ min: 0.110.0
diff --git a/docs/content/en/_index.md b/docs/content/en/_index.md
new file mode 100644
index 0000000000..f375ebfb97
--- /dev/null
+++ b/docs/content/en/_index.md
@@ -0,0 +1,69 @@
+---
+title: Java Operator SDK Documentation
+---
+
+{{< blocks/cover title="Java Operator SDK" image_anchor="top" height="full" >}}
+
+ Learn More
+
+
+ Download
+
+
Kubernetes operators in Java made easy!
+{{< blocks/link-down color="info" >}}
+{{< /blocks/cover >}}
+
+
+{{% blocks/lead color="gray" %}}
+Whether you want to build applications that operate themselves or provision infrastructure from Java code, Kubernetes Operators are the way to go.
+Java Operator SDK is based on the fabric8 Kubernetes client and will make it easy for Java developers to embrace this new way of automation.
+{{% /blocks/lead %}}
+
+
+{{% blocks/section color="secondary" type="row" %}}
+{{% blocks/feature icon="fab fa-slack" title="Contact us on Slack" url="https://kubernetes.slack.com/archives/CAW0GV7A5" %}}
+Feel free to reach out on [Kubernetes Slack](https://kubernetes.slack.com/archives/CAW0GV7A5)
+
+Ask any question, we are happy to answer!
+{{% /blocks/feature %}}
+
+
+{{% blocks/feature icon="fab fa-github" title="Contributions welcome!" url="https://github.com/operator-framework/java-operator-sdk" %}}
+We do a [Pull Request](https://github.com/operator-framework/java-operator-sdk/pulls) contributions workflow on **GitHub**. New users are always welcome!
+{{% /blocks/feature %}}
+
+
+{{% blocks/feature icon="fa-brands fa-bluesky" title="Follow us on BlueSky!" url="https://bsky.app/profile/javaoperatorsdk.bsky.social" %}}
+For announcement of latest features etc.
+{{% /blocks/feature %}}
+
+
+{{% /blocks/section %}}
+
+
+{{% blocks/section %}}
+
+Sponsored by:
+{.h1 .text-center}
+
+ &
+{.h1 .text-center}
+
+{{% /blocks/section %}}
+
+
+{{% blocks/section type="row" %}}
+
+{{% blocks/feature icon="no_icon" %}}
+{{% /blocks/feature %}}
+
+{{% blocks/feature icon="no_icon" %}}
+Java Operator SDK is a [Cloud Native Computing Foundation](https://www.cncf.io) incubating project as part of [Operator Framework](https://www.cncf.io/projects/operator-framework/)
+{.h3 .text-center}
+
+
+
+{{% /blocks/feature %}}
+
+{{% /blocks/section %}}
+
diff --git a/docs/content/en/blog/_index.md b/docs/content/en/blog/_index.md
new file mode 100644
index 0000000000..e792e415fe
--- /dev/null
+++ b/docs/content/en/blog/_index.md
@@ -0,0 +1,8 @@
+---
+title: Blog
+menu: {main: {weight: 2}}
+---
+
+This is the **blog** section. It has two categories: News and Releases.
+
+Content is coming soon.
diff --git a/docs/content/en/blog/news/_index.md b/docs/content/en/blog/news/_index.md
new file mode 100644
index 0000000000..aaf1c2adcd
--- /dev/null
+++ b/docs/content/en/blog/news/_index.md
@@ -0,0 +1,4 @@
+---
+title: Posts
+weight: 220
+---
diff --git a/docs/content/en/blog/news/etcd-as-app-db.md b/docs/content/en/blog/news/etcd-as-app-db.md
new file mode 100644
index 0000000000..c6306ddffc
--- /dev/null
+++ b/docs/content/en/blog/news/etcd-as-app-db.md
@@ -0,0 +1,115 @@
+---
+title: Using k8s' ETCD as your application DB
+date: 2025-01-16
+---
+
+# FAQ: Is Kubernetes’ ETCD the Right Database for My Application?
+
+## Answer
+
+While the idea of moving your application data to Custom Resources (CRs) aligns with the "Cloud Native" philosophy, it often introduces more challenges than benefits. Let’s break it down:
+
+---
+
+### Top Reasons Why Storing Data in ETCD Through CRs Looks Appealing
+
+1. **Storing application data as CRs enables treating your application’s data like infrastructure:**
+ - **GitOps compatibility:** Declarative content can be stored in Git repositories, ensuring reproducibility.
+ - **Infrastructure alignment:** Application data can follow the same workflow as other infrastructure components.
+
+---
+
+### Challenges of Using Kubernetes’ ETCD as Your Application’s Database
+
+#### Technical Limitations:
+
+- **Data Size Limitations 🔴:**
+ - Each CR is capped at 1.5 MB by default. Raising this limit is possible but impacts cluster performance.
+ - Kubernetes ETCD has a storage cap of 2 GB by default. Adjusting this limit affects the cluster globally, with potential performance degradation.
+
+- **API Server Load Considerations 🟡:**
+ - The Kubernetes API server is designed to handle infrastructure-level requests.
+ - Storing application data in CRs might add significant load to the API server, requiring it to be scaled appropriately to handle both infrastructure and application demands.
+ - This added load can impact cluster performance and increase operational complexity.
+
+- **Guarantees 🟡:**
+ - Efficient queries are hard to implement and there is no support for them.
+ - ACID properties are challenging to leverage and everything holds mostly in read-only mode.
+
+#### Operational Impact:
+
+- **Lost Flexibility 🟡:**
+ - Modifying application data requires complex YAML editing and full redeployment.
+ - This contrasts with traditional databases that often feature user-friendly web UIs or APIs for real-time updates.
+
+- **Infrastructure Complexity 🟠:**
+ - Backup, restore, and lifecycle management for application data are typically separate from deployment workflows.
+ - Storing both in ETCD mixes these concerns, complicating operations and standardization.
+
+#### Security:
+
+- **Governance and Security 🔴:**
+ - Sensitive data stored in plain YAML may lack adequate encryption or access controls.
+ - Applying governance policies over text-based files can become a significant challenge.
+
+---
+
+### When Might Using CRs Make Sense?
+
+For small, safe subsets of data—such as application configurations—using CRs might be appropriate. However, this approach requires a detailed evaluation of the trade-offs.
+
+---
+
+### Conclusion
+
+While it’s tempting to unify application data with infrastructure control via CRs, this introduces risks that can outweigh the benefits. For most applications, separating concerns by using a dedicated database is the more robust, scalable, and manageable solution.
+
+---
+
+### A Practical Example
+
+A typical “user” described in JSON:
+
+```json
+{
+ "username": "myname",
+ "enabled": true,
+ "email": "myname@test.com",
+ "firstName": "MyFirstName",
+ "lastName": "MyLastName",
+ "credentials": [
+ {
+ "type": "password",
+ "value": "test"
+ },
+ {
+ "type": "token",
+ "value": "oidc"
+ }
+ ],
+ "realmRoles": [
+ "user",
+ "viewer",
+ "admin"
+ ],
+ "clientRoles": {
+ "account": [
+ "view-profile",
+ "change-group",
+ "manage-account"
+ ]
+ }
+}
+```
+
+This example represents about **0.5 KB of data**, meaning (with standard settings) a maximum of ~2000 users can be defined in the same CR.
+Additionally:
+
+- It contains **sensitive information**, which should be securely stored.
+- Regulatory rules (like GDPR) apply.
+
+---
+
+### References
+
+- [Using etcd as primary store database](https://stackoverflow.com/questions/41063238/using-etcd-as-primary-store-database)
diff --git a/docs/content/en/blog/news/nonssa-vs-ssa.md b/docs/content/en/blog/news/nonssa-vs-ssa.md
new file mode 100644
index 0000000000..8ea7497771
--- /dev/null
+++ b/docs/content/en/blog/news/nonssa-vs-ssa.md
@@ -0,0 +1,117 @@
+---
+title: From legacy approach to server-side apply
+date: 2025-02-25
+author: >-
+ [Attila Mészáros](https://github.com/csviri)
+---
+
+From version 5 of Java Operator SDK [server side apply](https://kubernetes.io/docs/reference/using-api/server-side-apply/)
+is a first-class feature and is used by default to update resources.
+As we will see, unfortunately (or fortunately), using it requires changes for your reconciler implementation.
+
+For this reason, we prepared a feature flag, which you can flip if you are not prepared to migrate yet:
+[`ConfigurationService.useSSAToPatchPrimaryResource`](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ConfigurationService.java#L493)
+
+Setting this flag to false will make the operations done by `UpdateControl` using the former approach (not SSA).
+Similarly, the finalizer handling won't utilize SSA handling.
+The plan is to keep this flag and allow the use of the former approach (non-SSA) also in future releases.
+
+For dependent resources, a separate flag exists (this was true also before v5) to use SSA or not:
+[`ConfigurationService.ssaBasedCreateUpdateMatchForDependentResources`](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ConfigurationService.java#L373)
+
+
+## Resource handling without and with SSA
+
+Until version 5, changing primary resources through `UpdateControl` did not use server-side apply.
+So usually, the implementation of the reconciler looked something like this:
+
+```java
+
+ @Override
+ public UpdateControl reconcile(WebPage webPage, Context context) {
+
+ reconcileLogicForManagedResources(webPage);
+ webPage.setStatus(updatedStatusForWebPage(webPage));
+
+ return UpdateControl.patchStatus(webPage);
+ }
+
+```
+
+In other words, after the reconciliation of managed resources, the reconciler updates the status of the
+primary resource passed as an argument to the reconciler.
+Such changes on the primary are fine since we don't work directly with the cached object, the argument is
+already cloned.
+
+So, how does this change with SSA?
+For SSA, the updates should contain (only) the "fully specified intent".
+In other words, we should only fill in the values we care about.
+In practice, it means creating a **fresh copy** of the resource and setting only what is necessary:
+
+```java
+
+@Override
+public UpdateControl reconcile(WebPage webPage, Context context) {
+
+ reconcileLogicForManagedResources(webPage);
+
+ WebPage statusPatch = new WebPage();
+ statusPatch.setMetadata(new ObjectMetaBuilder()
+ .withName(webPage.getMetadata().getName())
+ .withNamespace(webPage.getMetadata().getNamespace())
+ .build());
+ statusPatch.setStatus(updatedStatusForWebPage(webPage));
+
+ return UpdateControl.patchStatus(statusPatch);
+}
+```
+
+Note that we just filled out the status here since we patched the status (not the resource spec).
+Since the status is a sub-resource in Kubernetes, it will only update the status part.
+
+Every controller you register will have its default [field manager](https://kubernetes.io/docs/reference/using-api/server-side-apply/#managers).
+You can override the field manager name using [`ControllerConfiguration.fieldManager`](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ControllerConfiguration.java#L89).
+That will set the field manager for the primary resource and dependent resources as well.
+
+## Migrating to SSA
+
+Using the legacy or the new SSA way of resource management works well.
+However, migrating existing resources to SSA might be a challenge.
+We strongly recommend testing the migration, thus implementing an integration test where
+a custom resource is created using the legacy approach and is managed by the new approach.
+
+We prepared an integration test to demonstrate how such migration, even in a simple case, can go wrong,
+and how to fix it.
+
+To fix some cases, you might need to [strip managed fields](https://kubernetes.io/docs/reference/using-api/server-side-apply/#clearing-managedfields)
+from the custom resource.
+
+See [`StatusPatchSSAMigrationIT`](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/statuspatchnonlocking/StatusPatchSSAMigrationIT.java) for details.
+
+Feel free to report common issues, so we can prepare some utilities to handle them.
+
+## Optimistic concurrency control
+
+When you create a resource for SSA as mentioned above, the framework will apply changes even if the underlying resource
+or status subresource is changed while the reconciliation was running.
+First, it always forces the conflicts in the background as advised in [Kubernetes docs](https://kubernetes.io/docs/reference/using-api/server-side-apply/#using-server-side-apply-in-a-controller),
+ in addition to that since the resource version is not set it won't do optimistic locking. If you still
+want to have optimistic locking for the patch, use the resource version of the original resource:
+
+```java
+@Override
+public UpdateControl reconcile(WebPage webPage, Context context) {
+
+ reconcileLogicForManagedResources(webPage);
+
+ WebPage statusPatch = new WebPage();
+ statusPatch.setMetadata(new ObjectMetaBuilder()
+ .withName(webPage.getMetadata().getName())
+ .withNamespace(webPage.getMetadata().getNamespace())
+ .withResourceVersion(webPage.getMetadata().getResourceVersion())
+ .build());
+ statusPatch.setStatus(updatedStatusForWebPage(webPage));
+
+ return UpdateControl.patchStatus(statusPatch);
+}
+```
diff --git a/docs/content/en/blog/news/primary-cache-for-next-recon.md b/docs/content/en/blog/news/primary-cache-for-next-recon.md
new file mode 100644
index 0000000000..67326a6f17
--- /dev/null
+++ b/docs/content/en/blog/news/primary-cache-for-next-recon.md
@@ -0,0 +1,92 @@
+---
+title: How to guarantee allocated values for next reconciliation
+date: 2025-05-22
+author: >-
+ [Attila Mészáros](https://github.com/csviri) and [Chris Laprun](https://github.com/metacosm)
+---
+
+We recently released v5.1 of Java Operator SDK (JOSDK). One of the highlights of this release is related to a topic of
+so-called
+[allocated values](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#representing-allocated-values
+).
+
+To describe the problem, let's say that our controller needs to create a resource that has a generated identifier, i.e.
+a resource which identifier cannot be directly derived from the custom resource's desired state as specified in its
+`spec` field. To record the fact that the resource was successfully created, and to avoid attempting to
+recreate the resource again in subsequent reconciliations, it is typical for this type of controller to store the
+generated identifier in the custom resource's `status` field.
+
+The Java Operator SDK relies on the informers' cache to retrieve resources. These caches, however, are only guaranteed
+to be eventually consistent. It could happen that, if some other event occurs, that would result in a new
+reconciliation, **before** the update that's been made to our resource status has the chance to be propagated first to
+the cluster and then back to the informer cache, that the resource in the informer cache does **not** contain the latest
+version as modified by the reconciler. This would result in a new reconciliation where the generated identifier would be
+missing from the resource status and, therefore, another attempt to create the resource by the reconciler, which is not
+what we'd like.
+
+Java Operator SDK now provides a utility class [
+`PrimaryUpdateAndCacheUtils`](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/PrimaryUpdateAndCacheUtils.java)
+to handle this particular use case. Using that overlay cache, your reconciler is guaranteed to see the most up-to-date
+version of the resource on the next reconciliation:
+
+```java
+
+@Override
+public UpdateControl reconcile(
+ StatusPatchCacheCustomResource resource,
+ Context context) {
+
+ // omitted code
+
+ var freshCopy = createFreshCopy(resource); // need fresh copy just because we use the SSA version of update
+ freshCopy
+ .getStatus()
+ .setValue(statusWithAllocatedValue());
+
+ // using the utility instead of update control to patch the resource status
+ var updated =
+ PrimaryUpdateAndCacheUtils.ssaPatchStatusAndCacheResource(resource, freshCopy, context);
+ return UpdateControl.noUpdate();
+}
+```
+
+How does `PrimaryUpdateAndCacheUtils` work?
+There are multiple ways to solve this problem, but ultimately, we only provide the solution described below. If you
+want to dig deep in alternatives, see
+this [PR](https://github.com/operator-framework/java-operator-sdk/pull/2800/files).
+
+The trick is to intercept the resource that the reconciler updated and cache that version in an additional cache on top
+of the informer's cache. Subsequently, if the reconciler needs to read the resource, the SDK will first check if it is
+in the overlay cache and read it from there if present, otherwise read it from the informer's cache. If the informer
+receives an event with a fresh resource, we always remove the resource from the overlay cache, since that is a more
+recent resource. But this **works only** if the reconciler updates the resource using **optimistic locking**.
+If the update fails on conflict, because the resource has already been updated on the cluster before we got
+the chance to get our update in, we simply wait and poll the informer cache until the new resource version from the
+server appears in the informer's cache,
+and then try to apply our updates to the resource again using the updated version from the server, again with optimistic
+locking.
+
+So why is optimistic locking required? We hinted at it above, but the gist of it, is that if another party updates the
+resource before we get a chance to, we wouldn't be able to properly handle the resulting situation correctly in all
+cases. The informer would receive that new event before our own update would get a chance to propagate. Without
+optimistic locking, there wouldn't be a fail-proof way to determine which update should prevail (i.e. which occurred
+first), in particular in the event of the informer losing the connection to the cluster or other edge cases (the joys of
+distributed computing!).
+
+Optimistic locking simplifies the situation and provides us with stronger guarantees: if the update succeeds, then we
+can be sure we have the proper resource version in our caches. The next event will contain our update in all cases.
+Because we know that, we can also be sure that we can evict the cached resource in the overlay cache whenever we receive
+a new event. The overlay cache is only used if the SDK detects that the original resource (i.e. the one before we
+applied our status update in the example above) is still in the informer's cache.
+
+The following diagram sums up the process:
+
+```mermaid
+flowchart TD
+ A["Update Resource with Lock"] --> B{"Is Successful"}
+ B -- Fails on conflict --> D["Poll the Informer cache until resource updated"]
+ D --> A
+ B -- Yes --> n2{"Original resource still in informer cache?"}
+ n2 -- Yes --> C["Cache the resource in overlay cache"]
+ n2 -- No --> n3["Informer cache already contains up-to-date version, do not use overlay cache"]
+```
diff --git a/docs/content/en/blog/releases/_index.md b/docs/content/en/blog/releases/_index.md
new file mode 100644
index 0000000000..dbf2ee1729
--- /dev/null
+++ b/docs/content/en/blog/releases/_index.md
@@ -0,0 +1,4 @@
+---
+title: Releases
+weight: 230
+---
diff --git a/docs/content/en/blog/releases/v5-release-beta1.md b/docs/content/en/blog/releases/v5-release-beta1.md
new file mode 100644
index 0000000000..7dd133cc1d
--- /dev/null
+++ b/docs/content/en/blog/releases/v5-release-beta1.md
@@ -0,0 +1,6 @@
+---
+title: Version 5 Released! (beta1)
+date: 2024-12-06
+---
+
+See release notes [here](v5-release.md).
\ No newline at end of file
diff --git a/docs/content/en/blog/releases/v5-release.md b/docs/content/en/blog/releases/v5-release.md
new file mode 100644
index 0000000000..6d14dfb73a
--- /dev/null
+++ b/docs/content/en/blog/releases/v5-release.md
@@ -0,0 +1,397 @@
+---
+title: Version 5 Released!
+date: 2025-01-06
+---
+
+We are excited to announce that Java Operator SDK v5 has been released. This significant effort contains
+various features and enhancements accumulated since the last major release and required changes in our APIs.
+Within this post, we will go through all the main changes and help you upgrade to this new version, and provide
+a rationale behind the changes if necessary.
+
+We will omit descriptions of changes that should only require simple code updates; please do contact
+us if you encounter issues anyway.
+
+You can see an introduction and some important changes and rationale behind them from [KubeCon](https://youtu.be/V0NYHt2yjcM?t=1238).
+
+## Various Changes
+
+- From this release, the minimal Java version is 17.
+- Various deprecated APIs are removed. Migration should be easy.
+
+## All Changes
+
+You can see all changes [here](https://github.com/operator-framework/java-operator-sdk/compare/v4.9.7...v5.0.0).
+
+## Changes in low-level APIs
+
+### Server Side Apply (SSA)
+
+[Server Side Apply](https://kubernetes.io/docs/reference/using-api/server-side-apply/) is now a first-class citizen in
+the framework and
+the default approach for patching the status resource. This means that patching a resource or its status through
+`UpdateControl` and adding
+the finalizer in the background will both use SSA.
+
+Migration from a non-SSA based patching to an SSA based one can be problematic. Make sure you test the transition when
+you migrate from older version of the frameworks.
+To continue to use a non-SSA based on,
+set [ConfigurationService.useSSAToPatchPrimaryResource](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ConfigurationService.java#L462)
+to `false`.
+
+See some identified problematic migration cases and how to handle them
+in [StatusPatchSSAMigrationIT](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/statuspatchnonlocking/StatusPatchSSAMigrationIT.java).
+
+For more detailed description, see our [blog post](../news/nonssa-vs-ssa.md) on SSA.
+
+### Event Sources related changes
+
+#### Multi-cluster support in InformerEventSource
+
+`InformerEventSource` now supports watching remote clusters. You can simply pass a `KubernetesClient` instance
+initialized to connect to a different cluster from the one where the controller runs when configuring your event source.
+See [InformerEventSourceConfiguration.withKubernetesClient](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/informer/InformerEventSourceConfiguration.java)
+
+Such an informer behaves exactly as a regular one. Owner references won't work in this situation, though, so you have to
+specify a `SecondaryToPrimaryMapper` (probably based on labels or annotations).
+
+See related integration
+test [here](https://github.com/operator-framework/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/informerremotecluster)
+
+#### SecondaryToPrimaryMapper now checks resource types
+
+The owner reference based mappers are now checking the type (`kind` and `apiVersion`) of the resource when resolving the
+mapping. This is important
+since a resource may have owner references to a different resource type with the same name.
+
+See implementation
+details [here](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/informer/Mappers.java#L74-L75)
+
+#### InformerEventSource-related changes
+
+There are multiple smaller changes to `InformerEventSource` and related classes:
+
+1. `InformerConfiguration` is renamed
+ to [
+ `InformerEventSourceConfiguration`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/informer/InformerEventSourceConfiguration.java)
+2. `InformerEventSourceConfiguration` doesn't require `EventSourceContext` to be initialized anymore.
+
+#### All EventSource are now ResourceEventSources
+
+The [
+`EventSource`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/EventSource.java)
+abstraction is now always aware of the resources and
+handles accessing (the cached) resources, filtering, and additional capabilities. Before v5, such capabilities were
+present only in a sub-class called `ResourceEventSource`,
+but we decided to merge and remove `ResourceEventSource` since this has a nice impact on other parts of the system in
+terms of architecture.
+
+If you still need to create an `EventSource` that only supports triggering of your reconciler,
+see [
+`TimerEventSource`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/timer/TimerEventSource.java)
+for an example of how this can be accomplished.
+
+#### Naming event sources
+
+[
+`EventSource`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/EventSource.java#L45)
+are now named. This reduces the ambiguity that might have existed when trying to refer to an `EventSource`.
+
+### ControllerConfiguration annotation related changes
+
+You no longer have to annotate the reconciler with `@ControllerConfiguration` annotation.
+This annotation is (one) way to override the default properties of a controller.
+If the annotation is not present, the default values from the annotation are used.
+
+PR: https://github.com/operator-framework/java-operator-sdk/pull/2203
+
+In addition to that, the informer-related configurations are now extracted into
+a separate [
+`@Informer`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/informer/Informer.java)
+annotation within [
+`@ControllerConfiguration`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/ControllerConfiguration.java#L24).
+Hopefully this explicits which part of the configuration affects the informer associated with primary resource.
+Similarly, the same `@Informer` annotation is used when configuring the informer associated with a managed
+`KubernetesDependentResource` via the
+[
+`KubernetesDependent`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/KubernetesDependent.java#L33)
+annotation.
+
+### EventSourceInitializer and ErrorStatusHandler are removed
+
+Both the `EventSourceInitializer` and `ErrorStatusHandler` interfaces are removed, and their methods moved directly
+under [
+`Reconciler`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/Reconciler.java#L30-L56).
+
+If possible, we try to avoid such marker interfaces since it is hard to deduce related usage just by looking at the
+source code.
+You can now simply override those methods when implementing the `Reconciler` interface.
+
+### Cloning accessing secondary resources
+
+When accessing the secondary resources using [
+`Context.getSecondaryResource(s)(...)`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/Context.java#L19-L29),
+the resources are no longer cloned by default, since
+cloning could have an impact on performance. This means that you now need to ensure that these any changes
+are now made directly to the underlying cached resource. This should be avoided since the same resource instance may be
+present for other reconciliation cycles and would
+no longer represent the state on the server.
+
+If you want to still clone resources by default,
+set [
+`ConfigurationService.cloneSecondaryResourcesWhenGettingFromCache`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ConfigurationService.java#L484)
+to `true`.
+
+### Removed automated observed generation handling
+
+The automatic observed generation handling feature was removed since it is easy to implement inside the reconciler, but
+it made
+the implementation much more complex, especially if the framework would have to support it both for served side apply
+and client side apply.
+
+You can check a sample implementation how to do it manually in
+this [integration test](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/manualobservedgeneration/).
+
+## Dependent Resource related changes
+
+### ResourceDiscriminator is removed and related changes
+
+The primary reason `ResourceDiscriminator` was introduced was to cover the case when there are
+more than one dependent resources of a given type associated with a given primary resource. In this situation, JOSDK
+needed a generic mechanism to
+identify which resources on the cluster should be associated with which dependent resource implementation.
+We improved this association mechanism, thus rendering `ResourceDiscriminator` obsolete.
+
+As a replacement, the dependent resource will select the target resource based on the desired state.
+See the generic implementation in [
+`AbstractDependentResource`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/AbstractDependentResource.java#L135-L144).
+Calculating the desired state can be costly and might depend on other resources. For `KubernetesDependentResource`
+it is usually enough to provide the name and namespace (if namespace-scoped) of the target resource, which is what the
+`KubernetesDependentResource` implementation does by default. If you can determine which secondary to target without
+computing the desired state via its associated `ResourceID`, then we encourage you to override the
+[
+`ResourceID targetSecondaryResourceID()`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/KubernetesDependentResource.java#L234-L244)
+method as shown
+in [this example](https://github.com/operator-framework/java-operator-sdk/blob/c7901303c5304e6017d050f05cbb3d4930bdfe44/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/multipledrsametypenodiscriminator/MultipleManagedDependentNoDiscriminatorConfigMap1.java#L24-L35)
+
+### Read-only bulk dependent resources
+
+Read-only bulk dependent resources are now supported; this was a request from multiple users, but it required changes to
+the underlying APIs.
+Please check the documentation for further details.
+
+See also the
+related [integration test](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/bulkdependent/readonly).
+
+### Multiple Dependents with Activation Condition
+
+Until now, activation conditions had a limitation that only one condition was allowed for a specific resource type.
+For example, two `ConfigMap` dependent resources were not allowed, both with activation conditions. The underlying issue
+was with the informer registration process. When an activation condition is evaluated as "met" in the background,
+the informer is registered dynamically for the target resource type. However, we need to avoid registering multiple
+informers of the same kind. To prevent this the dependent resource must specify
+the [name of the informer](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/multipledependentwithactivation/ConfigMapDependentResource2.java#L12).
+
+See the complete
+example [here](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/multipledependentwithactivation).
+
+### `getSecondaryResource` is Activation condition aware
+
+When an activation condition for a resource type is not met, no associated informer might be registered for that
+resource type. However, in this situation, calling `Context.getSecondaryResource`
+and its alternatives would previously throw an exception. This was, however, rather confusing and a better user
+experience would be to return an empty value instead of throwing an error. We changed this behavior in v5 to make it
+more user-friendly and attempting to retrieve a secondary resource that is gated by an activation condition will now
+return an empty value as if the associated informer existed.
+
+See related [issue](https://github.com/operator-framework/java-operator-sdk/issues/2198) for details.
+
+## Workflow related changes
+
+### `@Workflow` annotation
+
+The managed workflow definition is now a separate `@Workflow` annotation; it is no longer part of
+`@ControllerConfiguration`.
+
+See sample
+usage [here](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/sample-operators/webpage/src/main/java/io/javaoperatorsdk/operator/sample/WebPageManagedDependentsReconciler.java#L14-L20)
+
+### Explicit workflow invocation
+
+Before v5, the managed dependents part of a workflow would always be reconciled before the primary `Reconciler`
+`reconcile` or `cleanup` methods were called. It is now possible to explictly ask for a workflow reconciliation in your
+primary `Reconciler`, thus allowing you to control when the workflow is reconciled. This mean you can perform all kind
+of operations - typically validations - before executing the workflow, as shown in the sample below:
+
+```java
+
+@Workflow(explicitInvocation = true,
+ dependents = @Dependent(type = ConfigMapDependent.class))
+@ControllerConfiguration
+public class WorkflowExplicitCleanupReconciler
+ implements Reconciler,
+ Cleaner {
+
+ @Override
+ public UpdateControl reconcile(
+ WorkflowExplicitCleanupCustomResource resource,
+ Context context) {
+
+ context.managedWorkflowAndDependentResourceContext().reconcileManagedWorkflow();
+
+ return UpdateControl.noUpdate();
+ }
+
+ @Override
+ public DeleteControl cleanup(WorkflowExplicitCleanupCustomResource resource,
+ Context context) {
+
+ context.managedWorkflowAndDependentResourceContext().cleanupManageWorkflow();
+ // this can be checked
+ // context.managedWorkflowAndDependentResourceContext().getWorkflowCleanupResult()
+ return DeleteControl.defaultDelete();
+ }
+}
+```
+
+To turn on this mode of execution, set [
+`explicitInvocation`](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/Workflow.java#L26)
+flag to `true` in the managed workflow definition.
+
+See the following integration tests
+for [
+`invocation`](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/workflowexplicitinvocation)
+and [
+`cleanup`](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/workflowexplicitcleanup).
+
+### Explicit exception handling
+
+If an exception happens during a workflow reconciliation, the framework automatically throws it further.
+You can now set [
+`handleExceptionsInReconciler`](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/Workflow.java#L40)
+to true for a workflow and check the thrown exceptions explicitly
+in the execution results.
+
+```java
+
+@Workflow(handleExceptionsInReconciler = true,
+ dependents = @Dependent(type = ConfigMapDependent.class))
+@ControllerConfiguration
+public class HandleWorkflowExceptionsInReconcilerReconciler
+ implements Reconciler,
+ Cleaner {
+
+ private volatile boolean errorsFoundInReconcilerResult = false;
+ private volatile boolean errorsFoundInCleanupResult = false;
+
+ @Override
+ public UpdateControl reconcile(
+ HandleWorkflowExceptionsInReconcilerCustomResource resource,
+ Context context) {
+
+ errorsFoundInReconcilerResult = context.managedWorkflowAndDependentResourceContext()
+ .getWorkflowReconcileResult().erroredDependentsExist();
+
+ // check errors here:
+ Map errors = context.getErroredDependents();
+
+ return UpdateControl.noUpdate();
+ }
+}
+```
+
+See integration
+test [here](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/workflowsilentexceptionhandling).
+
+### CRDPresentActivationCondition
+
+Activation conditions are typically used to check if the cluster has specific capabilities (e.g., is cert-manager
+available).
+Such a check can be done by verifying if a particular custom resource definition (CRD) is present on the cluster. You
+can now use the generic [
+`CRDPresentActivationCondition`](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/workflow/CRDPresentActivationCondition.java)
+for this
+purpose, it will check if the CRD of a target resource type of a dependent resource exists on the cluster.
+
+See usage in integration
+test [here](https://github.com/operator-framework/java-operator-sdk/blob/refs/heads/next/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/crdpresentactivation).
+
+## Fabric8 client updated to 7.0
+
+The Fabric8 client has been updated to version 7.0.0. This is a new major version which implies that some API might have
+changed. Please take a look at the [Fabric8 client 7.0.0 migration guide](https://github.com/fabric8io/kubernetes-client/blob/main/doc/MIGRATION-v7.md).
+
+### CRD generator changes
+
+Starting with v5.0 (in accordance with changes made to the Fabric8 client in version 7.0.0), the CRD generator will use the maven plugin instead of the annotation processor as was previously the case.
+In many instances, you can simply configure the plugin by adding the following stanza to your project's POM build configuration:
+
+```xml
+
+ io.fabric8
+ crd-generator-maven-plugin
+ ${fabric8-client.version}
+
+
+
+ generate
+
+
+
+
+
+```
+*NOTE*: If you use the SDK's JUnit extension for your tests, you might also need to configure the CRD generator plugin to access your test `CustomResource` implementations as follows:
+```xml
+
+
+ io.fabric8
+ crd-generator-maven-plugin
+ ${fabric8-client.version}
+
+
+
+ generate
+
+ process-test-classes
+
+ ${project.build.testOutputDirectory}
+ WITH_ALL_DEPENDENCIES_AND_TESTS
+
+
+
+
+
+```
+
+Please refer to the [CRD generator documentation](https://github.com/fabric8io/kubernetes-client/blob/main/doc/CRD-generator.md) for more details.
+
+
+## Experimental
+
+### Check if the following reconciliation is imminent
+
+You can now check if the subsequent reconciliation will happen right after the current one because the SDK has already
+received an event that will trigger a new reconciliation
+This information is available from
+the [
+`Context`](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/Context.java#L69).
+
+Note that this could be useful, for example, in situations when a heavy task would be repeated in the follow-up
+reconciliation. In the current
+reconciliation, you can check this flag and return to avoid unneeded processing. Note that this is a semi-experimental
+feature, so please let us know
+if you found this helpful.
+
+```java
+
+@Override
+public UpdateControl reconcile(MyCustomResource resource, Context context) {
+
+ if (context.isNextReconciliationImminent()) {
+ // your logic, maybe return?
+ }
+}
+```
+
+See
+related [integration test](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/nextreconciliationimminent).
\ No newline at end of file
diff --git a/docs/content/en/community/_index.md b/docs/content/en/community/_index.md
new file mode 100644
index 0000000000..fa42c2d974
--- /dev/null
+++ b/docs/content/en/community/_index.md
@@ -0,0 +1,6 @@
+---
+title: Community
+menu: {main: {weight: 3}}
+---
+
+
diff --git a/docs/content/en/docs/_index.md b/docs/content/en/docs/_index.md
new file mode 100755
index 0000000000..5c7b74ab4b
--- /dev/null
+++ b/docs/content/en/docs/_index.md
@@ -0,0 +1,6 @@
+---
+title: Documentation
+linkTitle: Docs
+menu: {main: {weight: 1}}
+weight: 1
+---
diff --git a/docs/content/en/docs/contributing/_index.md b/docs/content/en/docs/contributing/_index.md
new file mode 100644
index 0000000000..0ab40d55b1
--- /dev/null
+++ b/docs/content/en/docs/contributing/_index.md
@@ -0,0 +1,68 @@
+---
+title: Contributing
+weight: 110
+---
+
+Thank you for considering contributing to the Java Operator SDK project! We're building a vibrant community and need help from people like you to make it happen.
+
+## Code of Conduct
+
+We're committed to making this a welcoming, inclusive project. We do not tolerate discrimination, aggressive or insulting behavior.
+
+This project and all participants are bound by our [Code of Conduct]({{baseurl}}/coc). By participating, you're expected to uphold this code. Please report unacceptable behavior to any project admin.
+
+## Reporting Bugs
+
+Found a bug? Please [open an issue](https://github.com/java-operator-sdk/java-operator-sdk/issues)! Include all details needed to recreate the problem:
+
+- Operator SDK version being used
+- Exact platform and version you're running on
+- Steps to reproduce the bug
+- Reproducer code (very helpful for quick diagnosis and fixes)
+
+## Contributing Features and Documentation
+
+Looking for something to work on? Check the issue tracker, especially items labeled [good first issue](https://github.com/java-operator-sdk/java-operator-sdk/labels/good%20first%20issue). Please comment on the issue when you start work to avoid duplicated effort.
+
+### Feature Ideas
+
+Have a feature idea? Open an issue labeled "enhancement" even if you can't work on it immediately. We'll discuss it as a community and see what's possible.
+
+**Important**: Some features may not align with project goals. Please discuss new features before starting work to avoid wasted effort. We commit to listening to all proposals and working something out when possible.
+
+### Development Process
+
+Once you have approval to work on a feature:
+1. Communicate progress via issue updates or our [Discord channel](https://discord.gg/DacEhAy)
+2. Ask for feedback and pointers as needed
+3. Open a Pull Request when ready
+
+## Pull Request Process
+
+### Commit Messages
+Format commit messages following [conventional commit](https://www.conventionalcommits.org/en/v1.0.0/) format.
+
+### Testing and Review
+- GitHub Actions will run the test suite on your PR
+- All code must pass tests
+- New code must include new tests
+- All PRs require review and sign-off from another developer
+- Expect requests for changes - this is normal and part of the process
+- PRs must comply with Java Google code style
+
+### Licensing
+All Operator SDK code is released under the [Apache 2.0 licence](LICENSE).
+
+## Development Environment Setup
+
+### Code Style
+
+SDK modules and samples follow Java Google code style. Code gets formatted automatically on every `compile`, but to avoid PR rejections due to style issues, set up your IDE:
+
+**IntelliJ IDEA**: Install the [google-java-format](https://plugins.jetbrains.com/plugin/8527-google-java-format) plugin
+
+**Eclipse**: Follow [these instructions](https://github.com/google/google-java-format?tab=readme-ov-file#eclipse)
+
+## Acknowledgments
+
+These guidelines were inspired by [Atom](https://github.com/atom/atom/blob/master/CONTRIBUTING.md), [PurpleBooth's advice](https://gist.github.com/PurpleBooth/b24679402957c63ec426), and the [Contributor Covenant](https://www.contributor-covenant.org/).
diff --git a/docs/content/en/docs/documentation/_index.md b/docs/content/en/docs/documentation/_index.md
new file mode 100644
index 0000000000..59373c6974
--- /dev/null
+++ b/docs/content/en/docs/documentation/_index.md
@@ -0,0 +1,25 @@
+---
+title: Documentation
+weight: 40
+---
+
+# JOSDK Documentation
+
+This section contains detailed documentation for all Java Operator SDK features and concepts. Whether you're building your first operator or need advanced configuration options, you'll find comprehensive guides here.
+
+## Core Concepts
+
+- **[Implementing a Reconciler](reconciler/)** - The heart of any operator
+- **[Architecture](architecture/)** - How JOSDK works under the hood
+- **[Dependent Resources & Workflows](dependent-resource-and-workflows/)** - Managing resource relationships
+- **[Configuration](configuration/)** - Customizing operator behavior
+- **[Error Handling & Retries](error-handling-retries/)** - Managing failures gracefully
+
+## Advanced Features
+
+- **[Eventing](eventing/)** - Understanding the event-driven model
+- **[Accessing Resources in Caches](working-with-es-caches/) - How to access resources in caches
+- **[Observability](observability/)** - Monitoring and debugging your operators
+- **[Other Features](features/)** - Additional capabilities and integrations
+
+Each guide includes practical examples and best practices to help you build robust, production-ready operators.
diff --git a/docs/content/en/docs/documentation/architecture.md b/docs/content/en/docs/documentation/architecture.md
new file mode 100644
index 0000000000..4108849c04
--- /dev/null
+++ b/docs/content/en/docs/documentation/architecture.md
@@ -0,0 +1,36 @@
+---
+title: Architecture and Internals
+weight: 85
+---
+
+This document provides an overview of the Java Operator SDK's internal structure and components to help developers understand and contribute to the project. While not a comprehensive reference, it introduces core concepts that should make other components easier to understand.
+
+## The Big Picture and Core Components
+
+
+
+An [Operator](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/Operator.java) is a set of independent [controllers](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/Controller.java).
+
+The `Controller` class is an internal class managed by the framework and typically shouldn't be interacted with directly. It manages all processing units involved with reconciling a single type of Kubernetes resource.
+
+### Core Components
+
+- **[Reconciler](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/Reconciler.java)** - The primary entry point for developers to implement reconciliation logic
+- **[EventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/EventSource.java)** - Represents a source of events that might trigger reconciliation
+- **[EventSourceManager](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/EventSourceManager.java)** - Aggregates all event sources for a controller and manages their lifecycle
+- **[ControllerResourceEventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/controller/ControllerResourceEventSource.java)** - Central event source that watches primary resources associated with a given controller for changes, propagates events and caches state
+- **[EventProcessor](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/EventProcessor.java)** - Processes incoming events sequentially per resource while allowing concurrent overall processing. Handles rescheduling and retrying
+- **[ReconcilerDispatcher](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/ReconciliationDispatcher.java)** - Dispatches requests to appropriate `Reconciler` methods and handles reconciliation results, making necessary Kubernetes API calls
+
+## Typical Workflow
+
+A typical workflow follows these steps:
+
+1. **Event Generation**: An `EventSource` produces an event and propagates it to the `EventProcessor`
+2. **Resource Reading**: The resource associated with the event is read from the internal cache
+3. **Reconciliation Submission**: If the resource isn't already being processed, a reconciliation request is submitted to the executor service in a different thread (encapsulated in a `ControllerExecution` instance)
+4. **Dispatching**: The `ReconcilerDispatcher` is called, which dispatches the call to the appropriate `Reconciler` method with all required information
+5. **Reconciler Execution**: Once the `Reconciler` completes, the `ReconcilerDispatcher` makes appropriate Kubernetes API server calls based on the returned result
+6. **Finalization**: The `EventProcessor` is called back to finalize execution and update the controller's state
+7. **Rescheduling Check**: The `EventProcessor` checks if the request needs rescheduling or retrying, and whether subsequent events were received for the same resource
+8. **Completion**: When no further action is needed, event processing is finished
diff --git a/docs/content/en/docs/documentation/configuration.md b/docs/content/en/docs/documentation/configuration.md
new file mode 100644
index 0000000000..888804628f
--- /dev/null
+++ b/docs/content/en/docs/documentation/configuration.md
@@ -0,0 +1,154 @@
+---
+title: Configurations
+weight: 55
+---
+
+The Java Operator SDK (JOSDK) provides abstractions that work great out of the box. However, we recognize that default behavior isn't always suitable for every use case. Numerous configuration options help you tailor the framework to your specific needs.
+
+Configuration options operate at several levels:
+- **Operator-level** using `ConfigurationService`
+- **Reconciler-level** using `ControllerConfiguration`
+- **DependentResource-level** using the `DependentResourceConfigurator` interface
+- **EventSource-level** where some event sources (like `InformerEventSource`) need fine-tuning to identify which events trigger the associated reconciler
+
+## Operator-Level Configuration
+
+Configuration that impacts the entire operator is performed via the `ConfigurationService` class. `ConfigurationService` is an abstract class with different implementations based on which framework flavor you use (e.g., Quarkus Operator SDK replaces the default implementation). Configurations initialize with sensible defaults but can be changed during initialization.
+
+For example, to disable CRD validation on startup and configure leader election:
+
+```java
+Operator operator = new Operator( override -> override
+ .checkingCRDAndValidateLocalModel(false)
+ .withLeaderElectionConfiguration(new LeaderElectionConfiguration("bar", "barNS")));
+```
+
+## Reconciler-Level Configuration
+
+While reconcilers are typically configured using the `@ControllerConfiguration` annotation, you can also override configuration at runtime when registering the reconciler with the operator. You can either:
+- Pass a completely new `ControllerConfiguration` instance
+- Override specific aspects using a `ControllerConfigurationOverrider` `Consumer` (preferred)
+
+```java
+Operator operator;
+Reconciler reconciler;
+...
+operator.register(reconciler, configOverrider ->
+ configOverrider.withFinalizer("my-nifty-operator/finalizer").withLabelSelector("foo=bar"));
+```
+
+## Dynamically Changing Target Namespaces
+
+A controller can be configured to watch a specific set of namespaces in addition of the
+namespace in which it is currently deployed or the whole cluster. The framework supports
+dynamically changing the list of these namespaces while the operator is running.
+When a reconciler is registered, an instance of
+[`RegisteredController`](https://github.com/java-operator-sdk/java-operator-sdk/blob/ec37025a15046d8f409c77616110024bf32c3416/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/RegisteredController.java#L5)
+is returned, providing access to the methods allowing users to change watched namespaces as the
+operator is running.
+
+A typical scenario would probably involve extracting the list of target namespaces from a
+`ConfigMap` or some other input but this part is out of the scope of the framework since this is
+use-case specific. For example, reacting to changes to a `ConfigMap` would probably involve
+registering an associated `Informer` and then calling the `changeNamespaces` method on
+`RegisteredController`.
+
+```java
+
+public static void main(String[] args) {
+ KubernetesClient client = new DefaultKubernetesClient();
+ Operator operator = new Operator(client);
+ RegisteredController registeredController = operator.register(new WebPageReconciler(client));
+ operator.installShutdownHook();
+ operator.start();
+
+ // call registeredController further while operator is running
+}
+
+```
+
+If watched namespaces change for a controller, it might be desirable to propagate these changes to
+`InformerEventSources` associated with the controller. In order to express this,
+`InformerEventSource` implementations interested in following such changes need to be
+configured appropriately so that the `followControllerNamespaceChanges` method returns `true`:
+
+```java
+
+@ControllerConfiguration
+public class MyReconciler implements Reconciler {
+
+ @Override
+ public Map prepareEventSources(
+ EventSourceContext context) {
+
+ InformerEventSource configMapES =
+ new InformerEventSource<>(InformerEventSourceConfiguration.from(ConfigMap.class, TestCustomResource.class)
+ .withNamespacesInheritedFromController(context)
+ .build(), context);
+
+ return EventSourceUtils.nameEventSources(configMapES);
+ }
+
+}
+```
+
+As seen in the above code snippet, the informer will have the initial namespaces inherited from
+controller, but also will adjust the target namespaces if it changes for the controller.
+
+See also
+the [integration test](https://github.com/operator-framework/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/changenamespace)
+for this feature.
+
+## DependentResource-level configuration
+
+It is possible to define custom annotations to configure custom `DependentResource` implementations. In order to provide
+such a configuration mechanism for your own `DependentResource` implementations, they must be annotated with the
+`@Configured` annotation. This annotation defines 3 fields that tie everything together:
+
+- `by`, which specifies which annotation class will be used to configure your dependents,
+- `with`, which specifies the class holding the configuration object for your dependents and
+- `converter`, which specifies the `ConfigurationConverter` implementation in charge of converting the annotation
+ specified by the `by` field into objects of the class specified by the `with` field.
+
+`ConfigurationConverter` instances implement a single `configFrom` method, which will receive, as expected, the
+annotation instance annotating the dependent resource instance to be configured, but it can also extract information
+from the `DependentResourceSpec` instance associated with the `DependentResource` class so that metadata from it can be
+used in the configuration, as well as the parent `ControllerConfiguration`, if needed. The role of
+`ConfigurationConverter` implementations is to extract the annotation information, augment it with metadata from the
+`DependentResourceSpec` and the configuration from the parent controller on which the dependent is defined, to finally
+create the configuration object that the `DependentResource` instances will use.
+
+However, one last element is required to finish the configuration process: the target `DependentResource` class must
+implement the `ConfiguredDependentResource` interface, parameterized with the annotation class defined by the
+`@Configured` annotation `by` field. This interface is called by the framework to inject the configuration at the
+appropriate time and retrieve the configuration, if it's available.
+
+For example, `KubernetesDependentResource`, a core implementation that the framework provides, can be configured via the
+`@KubernetesDependent` annotation. This set up is configured as follows:
+
+```java
+
+@Configured(
+ by = KubernetesDependent.class,
+ with = KubernetesDependentResourceConfig.class,
+ converter = KubernetesDependentConverter.class)
+public abstract class KubernetesDependentResource
+ extends AbstractEventSourceHolderDependentResource>
+ implements ConfiguredDependentResource> {
+ // code omitted
+}
+```
+
+The `@Configured` annotation specifies that `KubernetesDependentResource` instances can be configured by using the
+`@KubernetesDependent` annotation, which gets converted into a `KubernetesDependentResourceConfig` object by a
+`KubernetesDependentConverter`. That configuration object is then injected by the framework in the
+`KubernetesDependentResource` instance, after it's been created, because the class implements the
+`ConfiguredDependentResource` interface, properly parameterized.
+
+For more information on how to use this feature, we recommend looking at how this mechanism is implemented for
+`KubernetesDependentResource` in the core framework, `SchemaDependentResource` in the samples or `CustomAnnotationDep`
+in the `BaseConfigurationServiceTest` test class.
+
+## EventSource-level configuration
+
+TODO
diff --git a/docs/content/en/docs/documentation/dependent-resource-and-workflows/_index.md b/docs/content/en/docs/documentation/dependent-resource-and-workflows/_index.md
new file mode 100644
index 0000000000..9446f7ceca
--- /dev/null
+++ b/docs/content/en/docs/documentation/dependent-resource-and-workflows/_index.md
@@ -0,0 +1,9 @@
+---
+title: Dependent resources and workflows
+weight: 70
+---
+
+Dependent resources and workflows are features sometimes referenced as higher
+level abstractions. These two related concepts provides an abstraction
+over reconciliation of a single resource (Dependent resource) and the
+orchestration of such resources (Workflows).
\ No newline at end of file
diff --git a/docs/content/en/docs/documentation/dependent-resource-and-workflows/dependent-resources.md b/docs/content/en/docs/documentation/dependent-resource-and-workflows/dependent-resources.md
new file mode 100644
index 0000000000..7416949869
--- /dev/null
+++ b/docs/content/en/docs/documentation/dependent-resource-and-workflows/dependent-resources.md
@@ -0,0 +1,465 @@
+---
+title: Dependent resources
+weight: 75
+---
+
+## Motivations and Goals
+
+Most operators need to deal with secondary resources when trying to realize the desired state
+described by the primary resource they are in charge of. For example, the Kubernetes-native
+`Deployment` controller needs to manage `ReplicaSet` instances as part of a `Deployment`'s
+reconciliation process. In this instance, `ReplicatSet` is considered a secondary resource for
+the `Deployment` controller.
+
+Controllers that deal with secondary resources typically need to perform the following steps, for
+each secondary resource:
+
+```mermaid
+flowchart TD
+
+compute[Compute desired secondary resource based on primary state] --> A
+A{Secondary resource exists?}
+A -- Yes --> match
+A -- No --> Create --> Done
+
+match{Matches desired state?}
+match -- Yes --> Done
+match -- No --> Update --> Done
+```
+
+While these steps are not difficult in and of themselves, there are some subtleties that can lead to
+bugs or sub-optimal code if not done right. As this process is pretty much similar for each
+dependent resource, it makes sense for the SDK to offer some level of support to remove the
+boilerplate code associated with encoding these repetitive actions. It should
+be possible to handle common cases (such as dealing with Kubernetes-native secondary resources) in a
+semi-declarative way with only a minimal amount of code, JOSDK taking care of wiring everything
+accordingly.
+
+Moreover, in order for your reconciler to get informed of events on these secondary resources, you
+need to configure and create event sources and maintain them. JOSDK already makes it rather easy
+to deal with these, but dependent resources makes it even simpler.
+
+Finally, there are also opportunities for the SDK to transparently add features that are even
+trickier to get right, such as immediate caching of updated or created resources (so that your
+reconciler doesn't need to wait for a cluster roundtrip to continue its work) and associated
+event filtering (so that something your reconciler just changed doesn't re-trigger a
+reconciliation, for example).
+
+## Design
+
+### `DependentResource` vs. `AbstractDependentResource`
+
+The new
+[`DependentResource`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/dependent/DependentResource.java)
+interface lies at the core of the design and strives to encapsulate the logic that is required
+to reconcile the state of the associated secondary resource based on the state of the primary
+one. For most cases, this logic will follow the flow expressed above and JOSDK provides a very
+convenient implementation of this logic in the form of the
+[`AbstractDependentResource`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/AbstractDependentResource.java)
+class. If your logic doesn't fit this pattern, though, you can still provide your
+own `reconcile` method implementation. While the benefits of using dependent resources are less
+obvious in that case, this allows you to separate the logic necessary to deal with each
+secondary resource in its own class that can then be tested in isolation via unit tests. You can
+also use the declarative support with your own implementations as we shall see later on.
+
+`AbstractDependentResource` is designed so that classes extending it specify which functionality
+they support by implementing trait interfaces. This design has been selected to express the fact
+that not all secondary resources are completely under the control of the primary reconciler:
+some dependent resources are only ever created or updated for example and we needed a way to let
+JOSDK know when that is the case. We therefore provide trait interfaces: `Creator`,
+`Updater` and `Deleter` to express that the `DependentResource` implementation will provide custom
+functionality to create, update and delete its associated secondary resources, respectively. If
+these traits are not implemented then parts of the logic described above is never triggered: if
+your implementation doesn't implement `Creator`, for example, `AbstractDependentResource` will
+never try to create the associated secondary resource, even if it doesn't exist. It is even
+possible to not implement any of these traits and therefore create read-only dependent resources
+that will trigger your reconciler whenever a user interacts with them but that are never
+modified by your reconciler itself - however note that read-only dependent resources rarely make
+sense, as it is usually simpler to register an event source for the target resource.
+
+All subclasses
+of [`AbstractDependentResource`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/AbstractDependentResource.java)
+can also implement
+the [`Matcher`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/Matcher.java)
+interface to customize how the SDK decides whether or not the actual state of the dependent
+matches the desired state. This makes it convenient to use these abstract base classes for your
+implementation, only customizing the matching logic. Note that in many cases, there is no need
+to customize that logic as the SDK already provides convenient default implementations in the
+form
+of [`DesiredEqualsMatcher`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/DesiredEqualsMatcher.java)
+and
+[`GenericKubernetesResourceMatcher`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/GenericKubernetesResourceMatcher.java)
+implementations, respectively. If you want to provide custom logic, you only need your
+`DependentResource` implementation to implement the `Matcher` interface as below, which shows
+how to customize the default matching logic for Kubernetes resources to also consider annotations
+and labels, which are ignored by default:
+
+```java
+public class MyDependentResource extends KubernetesDependentResource
+ implements Matcher {
+ // your implementation
+
+ public Result match(MyDependent actualResource, MyPrimary primary,
+ Context context) {
+ return GenericKubernetesResourceMatcher.match(this, actualResource, primary, context, true);
+ }
+}
+```
+
+### Batteries included: convenient DependentResource implementations!
+
+JOSDK also offers several other convenient implementations building on top of
+`AbstractDependentResource` that you can use as starting points for your own implementations.
+
+One such implementation is the `KubernetesDependentResource` class that makes it really easy to work
+with Kubernetes-native resources. In this case, you usually only need to provide an implementation
+for the `desired` method to tell JOSDK what the desired state of your secondary resource should
+be based on the specified primary resource state.
+
+JOSDK takes care of everything else using default implementations that you can override in case you
+need more precise control of what's going on.
+
+We also provide implementations that make it easy to cache
+(`AbstractExternalDependentResource`) or poll for changes in external resources
+(`PollingDependentResource`, `PerResourcePollingDependentResource`). All the provided
+implementations can be found in the `io/javaoperatorsdk/operator/processing/dependent` package of
+the `operator-framework-core` module.
+
+### Sample Kubernetes Dependent Resource
+
+A typical use case, when a Kubernetes resource is fully managed - Created, Read, Updated and
+Deleted (or set to be garbage collected). The following example shows how to create a
+`Deployment` dependent resource:
+
+```java
+
+@KubernetesDependent(informer = @Informer(labelSelector = SELECTOR))
+class DeploymentDependentResource extends CRUDKubernetesDependentResource {
+
+ @Override
+ protected Deployment desired(WebPage webPage, Context context) {
+ var deploymentName = deploymentName(webPage);
+ Deployment deployment = loadYaml(Deployment.class, getClass(), "deployment.yaml");
+ deployment.getMetadata().setName(deploymentName);
+ deployment.getMetadata().setNamespace(webPage.getMetadata().getNamespace());
+ deployment.getSpec().getSelector().getMatchLabels().put("app", deploymentName);
+
+ deployment.getSpec().getTemplate().getMetadata().getLabels()
+ .put("app", deploymentName);
+ deployment.getSpec().getTemplate().getSpec().getVolumes().get(0)
+ .setConfigMap(new ConfigMapVolumeSourceBuilder().withName(configMapName(webPage)).build());
+ return deployment;
+ }
+}
+```
+
+The only thing that you need to do is to extend the `CRUDKubernetesDependentResource` and
+specify the desired state for your secondary resources based on the state of the primary one. In
+the example above, we're handling the state of a `Deployment` secondary resource associated with
+a `WebPage` custom (primary) resource.
+
+The `@KubernetesDependent` annotation can be used to further configure **managed** dependent
+resource that are extending `KubernetesDependentResource`.
+
+See the full source
+code [here](https://github.com/operator-framework/java-operator-sdk/blob/main/sample-operators/webpage/src/main/java/io/javaoperatorsdk/operator/sample/dependentresource/DeploymentDependentResource.java)
+.
+
+## Managed Dependent Resources
+
+As mentioned previously, one goal of this implementation is to make it possible to declaratively
+create and wire dependent resources. You can annotate your reconciler with `@Dependent`
+annotations that specify which `DependentResource` implementation it depends upon.
+JOSDK will take the appropriate steps to wire everything together and call your
+`DependentResource` implementations `reconcile` method before your primary resource is reconciled.
+This makes sense in most use cases where the logic associated with the primary resource is
+usually limited to status handling based on the state of the secondary resources and the
+resources are not dependent on each other. As an alternative, you can also invoke reconciliation explicitly,
+event for managed workflows.
+
+See [Workflows](https://javaoperatorsdk.io/docs/documentation/dependent-resource-and-workflows/workflows/) for more details on how the dependent
+resources are reconciled.
+
+This behavior and automated handling is referred to as "managed" because the `DependentResource`
+instances are managed by JOSDK, an example of which can be seen below:
+
+```java
+
+@Workflow(
+ dependents = {
+ @Dependent(type = ConfigMapDependentResource.class),
+ @Dependent(type = DeploymentDependentResource.class),
+ @Dependent(type = ServiceDependentResource.class),
+ @Dependent(
+ type = IngressDependentResource.class,
+ reconcilePrecondition = ExposedIngressCondition.class)
+ })
+public class WebPageManagedDependentsReconciler
+ implements Reconciler, ErrorStatusHandler {
+
+ // omitted code
+
+ @Override
+ public UpdateControl reconcile(WebPage webPage, Context context) {
+
+ final var name = context.getSecondaryResource(ConfigMap.class).orElseThrow()
+ .getMetadata().getName();
+ webPage.setStatus(createStatus(name));
+ return UpdateControl.patchStatus(webPage);
+ }
+}
+```
+
+See the full source code of
+sample [here](https://github.com/operator-framework/java-operator-sdk/blob/main/sample-operators/webpage/src/main/java/io/javaoperatorsdk/operator/sample/WebPageManagedDependentsReconciler.java)
+.
+
+## Standalone Dependent Resources
+
+It is also possible to wire dependent resources programmatically. In practice this means that the
+developer is responsible for initializing and managing the dependent resources as well as calling
+their `reconcile` method. However, this makes it possible for developers to fully customize the
+reconciliation process. Standalone dependent resources should be used in cases when the managed use
+case does not fit. You can, of course, also use [Workflows](https://javaoperatorsdk.io/docs/documentation/dependent-resource-and-workflows/workflows/) when managing
+resources programmatically.
+
+You can see a commented example of how to do
+so [here](https://github.com/operator-framework/java-operator-sdk/blob/main/sample-operators/webpage/src/main/java/io/javaoperatorsdk/operator/sample/WebPageStandaloneDependentsReconciler.java).
+
+## Creating/Updating Kubernetes Resources
+
+From version 4.4 of the framework the resources are created and updated
+using [Server Side Apply](https://kubernetes.io/docs/reference/using-api/server-side-apply/)
+, thus the desired state is simply sent using this approach to update the actual resource.
+
+## Comparing desired and actual state (matching)
+
+During the reconciliation of a dependent resource, the desired state is matched with the actual
+state from the caches. The dependent resource only gets updated on the server if the actual,
+observed state differs from the desired one. Comparing these two states is a complex problem
+when dealing with Kubernetes resources because a strict equality check is usually not what is
+wanted due to the fact that multiple fields might be automatically updated or added by
+the platform (
+by [dynamic admission controllers](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/)
+or validation webhooks, for example). Solving this problem in a generic way is therefore a tricky
+proposition.
+
+JOSDK provides such a generic matching implementation which is used by default:
+[SSABasedGenericKubernetesResourceMatcher](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/SSABasedGenericKubernetesResourceMatcher.java)
+This implementation relies on the managed fields used by the Server Side Apply feature to
+compare only the values of the fields that the controller manages. This ensures that only
+semantically relevant fields are compared. See javadoc for further details.
+
+JOSDK versions prior to 4.4 were using a different matching algorithm as implemented in
+[GenericKubernetesResourceMatcher](https://github.com/java-operator-sdk/java-operator-sdk/blob/e16559fd41bbb8bef6ce9d1f47bffa212a941b09/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/GenericKubernetesResourceMatcher.java).
+
+Since SSA is a complex feature, JOSDK implements a feature flag allowing users to switch between
+these implementations. See
+in [ConfigurationService](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ConfigurationService.java#L332-L358).
+
+It is, however, important to note that these implementations are default, generic
+implementations that the framework can provide expected behavior out of the box. In many
+situations, these will work just fine but it is also possible to provide matching algorithms
+optimized for specific use cases. This is easily done by simply overriding
+the `match(...)` [method](https://github.com/java-operator-sdk/java-operator-sdk/blob/e16559fd41bbb8bef6ce9d1f47bffa212a941b09/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/KubernetesDependentResource.java#L156-L156).
+
+It is also possible to bypass the matching logic altogether to simply rely on the server-side
+apply mechanism if always sending potentially unchanged resources to the cluster is not an issue.
+JOSDK's matching mechanism allows to spare some potentially useless calls to the Kubernetes API
+server. To bypass the matching feature completely, simply override the `match` method to always
+return `false`, thus telling JOSDK that the actual state never matches the desired one, making
+it always update the resources using SSA.
+
+WARNING: Older versions of Kubernetes before 1.25 would create an additional resource version for every SSA update
+performed with certain resources - even though there were no actual changes in the stored resource - leading to infinite
+reconciliations. This behavior was seen with Secrets using `stringData`, Ingresses using empty string fields, and
+StatefulSets using volume claim templates. The operator framework has added built-in handling for the StatefulSet issue.
+If you encounter this issue on an older Kubernetes version, consider changing your desired state, turning off SSA for
+that resource, or even upgrading your Kubernetes version. If you encounter it on a newer Kubernetes version, please log
+an issue with the JOSDK and with upstream Kubernetes.
+
+## Telling JOSDK how to find which secondary resources are associated with a given primary resource
+
+[`KubernetesDependentResource`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/KubernetesDependentResource.java)
+automatically maps secondary resource to a primary by owner reference. This behavior can be
+customized by implementing
+[`SecondaryToPrimaryMapper`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/SecondaryToPrimaryMapper.java)
+by the dependent resource.
+
+See sample in one of the integration
+tests [here](https://github.com/operator-framework/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/primaryindexer)
+.
+
+## Multiple Dependent Resources of Same Type
+
+When dealing with multiple dependent resources of same type, the dependent resource implementation
+needs to know which specific resource should be targeted when reconciling a given dependent
+resource, since there could be multiple instances of that type which could possibly be used, each
+associated with the same primary resource. In this situation, JOSDK automatically selects the appropriate secondary
+resource matching the desired state associated with the primary resource. This makes sense because the desired
+state computation already needs to be able to discriminate among multiple related secondary resources to tell JOSDK how
+they should be reconciled.
+
+There might be cases, though, where it might be problematic to call the `desired` method several times (for example, because it is costly to do so),
+it is always possible to override this automated discrimination using several means (consider in this priority order):
+
+- Override the `targetSecondaryResourceID` method, if your `DependentResource` extends `KubernetesDependentResource`,
+ where it's very often possible to easily determine the `ResourceID` of the secondary resource. This would probably be
+ the easiest solution if you're working with Kubernetes resources.
+- Override the `selectTargetSecondaryResource` method, if your `DependentResource` extends `AbstractDependentResource`.
+ This should be relatively simple to override this method to optimize the matching to your needs. You can see an
+ example of such an implementation in
+ the [`ExternalWithStateDependentResource`](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/externalstate/ExternalWithStateDependentResource.java)
+ class.
+- As last resort, you can implement your own `getSecondaryResource` method on your `DependentResource` implementation from scratch.
+
+### Sharing an Event Source Between Dependent Resources
+
+Dependent resources usually also provide event sources. When dealing with multiple dependents of
+the same type, one needs to decide whether these dependent resources should track the same
+resources and therefore share a common event source, or, to the contrary, track completely
+separate resources, in which case using separate event sources is advised.
+
+Dependents can therefore reuse existing, named event sources by referring to their name. In the
+declarative case, assuming a `configMapSource` `EventSource` has already been declared, this
+would look as follows:
+
+```
+ @Dependent(type = MultipleManagedDependentResourceConfigMap1.class,
+ useEventSourceWithName = "configMapSource")
+```
+
+A sample is provided as an integration test both:
+for [managed](https://github.com/operator-framework/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/multipledrsametypenodiscriminator)
+
+For [standalone](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/multipledependentresource)
+cases.
+
+## Bulk Dependent Resources
+
+So far, all the cases we've considered were dealing with situations where the number of
+dependent resources needed to reconcile the state expressed by the primary resource is known
+when writing the code for the operator. There are, however, cases where the number of dependent
+resources to be created depends on information found in the primary resource.
+
+These cases are covered by the "bulk" dependent resources feature. To create such dependent
+resources, your implementation should extend `AbstractDependentResource` (at least indirectly) and
+implement the
+[`BulkDependentResource`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/BulkDependentResource.java)
+interface.
+
+Various examples are provided
+as [integration tests](https://github.com/operator-framework/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/bulkdependent)
+.
+
+To see how bulk dependent resources interact with workflow conditions, please refer to this
+[integration test](https://github.com/operator-framework/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/bulkdependent/conidition).
+
+## External State Tracking Dependent Resources
+
+It is sometimes necessary for a controller to track external (i.e. non-Kubernetes) state to
+properly manage some dependent resources. For example, your controller might need to track the
+state of a REST API resource, which, after being created, would be refer to by its identifier.
+Such identifier would need to be tracked by your controller to properly retrieve the state of
+the associated resource and/or assess if such a resource exists. While there are several ways to
+support this use case, we recommend storing such information in a dedicated Kubernetes resources
+(usually a `ConfigMap` or a `Secret`), so that it can be manipulated with common Kubernetes
+mechanisms.
+
+This particular use case is supported by the
+[`AbstractExternalDependentResource`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/AbstractExternalDependentResource.java)
+class that you can extend to suit your needs, as well as implement the
+[`DependentResourceWithExplicitState`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/DependentResourceWithExplicitState.java)
+interface. Note that most of the JOSDK-provided dependent resource implementations such as
+`PollingDependentResource` or `PerResourcePollingDependentResource` already extends
+`AbstractExternalDependentResource`, thus supporting external state tracking out of the box.
+
+See [integration test](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/externalstate/ExternalStateDependentIT.java)
+as a sample.
+
+For a better understanding it might be worth to study
+a [sample implementation](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/externalstate/ExternalStateReconciler.java)
+without dependent resources.
+
+Please also refer to the [docs](/docs/patterns-and-best-practices#managing-state) for managing state in
+general.
+
+## Combining Bulk and External State Tracking Dependent Resources
+
+Both bulk and external state tracking features can be combined. In that
+case, a separate, state-tracking resource will be created for each bulk dependent resource
+created. For example, if three bulk dependent resources associated with external state are created,
+three associated `ConfigMaps` (assuming `ConfigMaps` are used as a state-tracking resource) will
+also be created, one per dependent resource.
+
+See [integration test](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/externalstate/externalstatebulkdependent)
+as a sample.
+
+## GenericKubernetesResource based Dependent Resources
+
+In rare circumstances resource handling where there is no class representation or just typeless handling might be
+needed.
+Fabric8 Client
+provides [GenericKubernetesResource](https://github.com/fabric8io/kubernetes-client/blob/main/doc/CHEATSHEET.md#resource-typeless-api)
+to support that.
+
+For dependent resource this is supported
+by [GenericKubernetesDependentResource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/GenericKubernetesDependentResource.java#L8-L8)
+. See
+samples [here](https://github.com/java-operator-sdk/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/sample/generickubernetesresource).
+
+## Other Dependent Resource Features
+
+### Caching and Event Handling in [KubernetesDependentResource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/KubernetesDependentResource.java)
+
+1. When a Kubernetes resource is created or updated the related informer (more precisely
+ the `InformerEventSource`), eventually will receive an event and will cache the up-to-date
+ resource. Typically, though, there might be a small time window when calling the
+ `getResource()` of the dependent resource or getting the resource from the `EventSource`
+ itself won't return the just updated resource, in the case where the associated event hasn't
+ been received from the Kubernetes API. The `KubernetesDependentResource` implementation,
+ however, addresses this issue, so you don't have to worry about it by making sure that it or
+ the related `InformerEventSource` always return the up-to-date resource.
+
+2. Another feature of `KubernetesDependentResource` is to make sure that if a resource is created or
+ updated during the reconciliation, this particular change, which normally would trigger the
+ reconciliation again (since the resource has changed on the server), will, in fact, not
+ trigger the reconciliation again since we already know the state is as expected. This is a small
+ optimization. For example if during a reconciliation a `ConfigMap` is updated using dependent
+ resources, this won't trigger a new reconciliation. Such a reconciliation is indeed not
+ needed since the change originated from our reconciler. For this system to work properly,
+ though, it is required that changes are received only by one event source (this is a best
+ practice in general) - so for example if there are two config map dependents, either
+ there should be a shared event source between them, or a label selector on the event sources
+ to select only the relevant events, see
+ in [related integration test](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/orderedmanageddependent/ConfigMapDependentResource2.java)
+ .
+
+## "Read-only" Dependent Resources vs. Event Source
+
+See Integration test for a read-only
+dependent [here](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/primarytosecondaydependent/ConfigMapDependent.java).
+
+Some secondary resources only exist as input for the reconciliation process and are never
+updated *by a controller* (they might, and actually usually do, get updated by users interacting
+with the resources directly, however). This might be the case, for example, of a `ConfigMap`that is
+used to configure common characteristics of multiple resources in one convenient place.
+
+In such situations, one might wonder whether it makes sense to create a dependent resource in
+this case or simply use an `EventSource` so that the primary resource gets reconciled whenever a
+user changes the resource. Typical dependent resources provide a desired state that the
+reconciliation process attempts to match. In the case of so-called read-only dependents, though,
+there is no such desired state because the operator / controller will never update the resource
+itself, just react to external changes to it. An `EventSource` would achieve the same result.
+
+Using a dependent resource for that purpose instead of a simple `EventSource`, however, provides
+several benefits:
+
+- dependents can be created declaratively, while an event source would need to be manually created
+- if dependents are already used in a controller, it makes sense to unify the handling of all
+ secondary resources as dependents from a code organization perspective
+- dependent resources can also interact with the workflow feature, thus allowing the read-only
+ resource to participate in conditions, in particular to decide whether the primary
+ resource needs/can be reconciled using reconcile pre-conditions, block the progression of the workflow altogether with
+ ready post-conditions or have other dependents depend on them, in essence, read-only dependents can participate in
+ workflows just as any other dependents.
diff --git a/docs/content/en/docs/documentation/dependent-resource-and-workflows/workflows.md b/docs/content/en/docs/documentation/dependent-resource-and-workflows/workflows.md
new file mode 100644
index 0000000000..c5ee83a446
--- /dev/null
+++ b/docs/content/en/docs/documentation/dependent-resource-and-workflows/workflows.md
@@ -0,0 +1,403 @@
+---
+title: Workflows
+weight: 80
+---
+
+## Overview
+
+Kubernetes (k8s) does not have the notion of a resource "depending on" on another k8s resource,
+at least not in terms of the order in which these resources should be reconciled. Kubernetes
+operators typically need to reconcile resources in order because these resources' state often
+depends on the state of other resources or cannot be processed until these other resources reach
+a given state or some condition holds true for them. Dealing with such scenarios are therefore
+rather common for operators and the purpose of the workflow feature of the Java Operator SDK
+(JOSDK) is to simplify supporting such cases in a declarative way. Workflows build on top of the
+[dependent resources](https://javaoperatorsdk.io/docs/documentation/dependent-resource-and-workflows/dependent-resources/) feature.
+While dependent resources focus on how a given secondary resource should be reconciled,
+workflows focus on orchestrating how these dependent resources should be reconciled.
+
+Workflows describe how as a set of
+[dependent resources](https://javaoperatorsdk.io/docs/documentation/dependent-resource-and-workflows/dependent-resources/) (DR) depend on one
+another, along with the conditions that need to hold true at certain stages of the
+reconciliation process.
+
+## Elements of Workflow
+
+- **Dependent resource** (DR) - are the resources being managed in a given reconciliation logic.
+- **Depends-on relation** - a `B` DR depends on another `A` DR if `B` needs to be reconciled
+ after `A`.
+- **Reconcile precondition** - is a condition on a given DR that needs to be become true before the
+ DR is reconciled. This also allows to define optional resources that would, for example, only be
+ created if a flag in a custom resource `.spec` has some specific value.
+- **Ready postcondition** - is a condition on a given DR to prevent the workflow from
+ proceeding until the condition checking whether the DR is ready holds true
+- **Delete postcondition** - is a condition on a given DR to check if the reconciliation of
+ dependents can proceed after the DR is supposed to have been deleted
+- **Activation condition** - is a special condition meant to specify under which condition the DR is used in the
+ workflow. A typical use-case for this feature is to only activate some dependents depending on the presence of
+ optional resources / features on the target cluster. Without this activation condition, JOSDK would attempt to
+ register an informer for these optional resources, which would cause an error in the case where the resource is
+ missing. With this activation condition, you can now conditionally register informers depending on whether the
+ condition holds or not. This is a very useful feature when your operator needs to handle different flavors of the
+ platform (e.g. OpenShift vs plain Kubernetes) and/or change its behavior based on the availability of optional
+ resources / features (e.g. CertManager, a specific Ingress controller, etc.).
+
+ A generic activation condition is provided out of the box, called
+ [CRDPresentActivationCondition](https://github.com/operator-framework/java-operator-sdk/blob/ba5e33527bf9e3ea0bd33025ccb35e677f9d44b4/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/workflow/CRDPresentActivationCondition.java)
+ that will prevent the associated dependent resource from being activated if the Custom Resource Definition associated
+ with the dependent's resource type is not present on the cluster.
+ See related [integration test](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/crdpresentactivation).
+
+ To have multiple resources of same type with an activation condition is a bit tricky, since you
+ don't want to have multiple `InformerEventSource` for the same type, you have to explicitly
+ name the informer for the Dependent Resource (`@KubernetesDependent(informerConfig = @InformerConfig(name = "configMapInformer"))`)
+ for all resource of same type with activation condition. This will make sure that only one is registered.
+ See details at [low level api](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/EventSourceRetriever.java#L20-L52).
+
+### Result conditions
+
+While simple conditions are usually enough, it might happen you want to convey extra information as a result of the
+evaluation of the conditions (e.g., to report error messages or because the result of the condition evaluation might be
+interesting for other purposes). In this situation, you should implement `DetailedCondition` instead of `Condition` and
+provide an implementation of the `detailedIsMet` method, which allows you to return a more detailed `Result` object via
+which you can provide extra information. The `DetailedCondition.Result` interface provides factory method for your
+convenience but you can also provide your own implementation if required.
+
+You can access the results for conditions from the `WorkflowResult` instance that is returned whenever a workflow is
+evaluated. You can access that result from the `ManagedWorkflowAndDependentResourceContext` accessible from the
+reconciliation `Context`. You can then access individual condition results using the `
+getDependentConditionResult` methods. You can see an example of this
+in [this integration test](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/workflowallfeature/WorkflowAllFeatureReconciler.java).
+
+## Defining Workflows
+
+Similarly to dependent resources, there are two ways to define workflows, in managed and standalone
+manner.
+
+### Managed
+
+Annotations can be used to declaratively define a workflow for a `Reconciler`. Similarly to how
+things are done for dependent resources, managed workflows execute before the `reconcile` method
+is called. The result of the reconciliation can be accessed via the `Context` object that is
+passed to the `reconcile` method.
+
+The following sample shows a hypothetical use case to showcase all the elements: the primary
+`TestCustomResource` resource handled by our `Reconciler` defines two dependent resources, a
+`Deployment` and a `ConfigMap`. The `ConfigMap` depends on the `Deployment` so will be
+reconciled after it. Moreover, the `Deployment` dependent resource defines a ready
+post-condition, meaning that the `ConfigMap` will not be reconciled until the condition defined
+by the `Deployment` becomes `true`. Additionally, the `ConfigMap` dependent also defines a
+reconcile pre-condition, so it also won't be reconciled until that condition becomes `true`. The
+`ConfigMap` also defines a delete post-condition, which means that the workflow implementation
+will only consider the `ConfigMap` deleted until that post-condition becomes `true`.
+
+```java
+
+@Workflow(dependents = {
+ @Dependent(name = DEPLOYMENT_NAME, type = DeploymentDependentResource.class,
+ readyPostcondition = DeploymentReadyCondition.class),
+ @Dependent(type = ConfigMapDependentResource.class,
+ reconcilePrecondition = ConfigMapReconcileCondition.class,
+ deletePostcondition = ConfigMapDeletePostCondition.class,
+ activationCondition = ConfigMapActivationCondition.class,
+ dependsOn = DEPLOYMENT_NAME)
+})
+@ControllerConfiguration
+public class SampleWorkflowReconciler implements Reconciler,
+ Cleaner {
+
+ public static final String DEPLOYMENT_NAME = "deployment";
+
+ @Override
+ public UpdateControl reconcile(
+ WorkflowAllFeatureCustomResource resource,
+ Context context) {
+
+ resource.getStatus()
+ .setReady(
+ context.managedWorkflowAndDependentResourceContext() // accessing workflow reconciliation results
+ .getWorkflowReconcileResult()
+ .allDependentResourcesReady());
+ return UpdateControl.patchStatus(resource);
+ }
+
+ @Override
+ public DeleteControl cleanup(WorkflowAllFeatureCustomResource resource,
+ Context context) {
+ // emitted code
+
+ return DeleteControl.defaultDelete();
+ }
+}
+
+```
+
+### Standalone
+
+In this mode workflow is built manually
+using [standalone dependent resources](https://javaoperatorsdk.io/docs/documentation/dependent-resource-and-workflows/dependent-resources/#standalone-dependent-resources)
+. The workflow is created using a builder, that is explicitly called in the reconciler (from web
+page sample):
+
+```java
+
+@ControllerConfiguration(
+ labelSelector = WebPageDependentsWorkflowReconciler.DEPENDENT_RESOURCE_LABEL_SELECTOR)
+public class WebPageDependentsWorkflowReconciler
+ implements Reconciler, ErrorStatusHandler {
+
+ public static final String DEPENDENT_RESOURCE_LABEL_SELECTOR = "!low-level";
+ private static final Logger log =
+ LoggerFactory.getLogger(WebPageDependentsWorkflowReconciler.class);
+
+ private KubernetesDependentResource configMapDR;
+ private KubernetesDependentResource deploymentDR;
+ private KubernetesDependentResource serviceDR;
+ private KubernetesDependentResource ingressDR;
+
+ private final Workflow workflow;
+
+ public WebPageDependentsWorkflowReconciler(KubernetesClient kubernetesClient) {
+ initDependentResources(kubernetesClient);
+ workflow = new WorkflowBuilder()
+ .addDependentResource(configMapDR)
+ .addDependentResource(deploymentDR)
+ .addDependentResource(serviceDR)
+ .addDependentResource(ingressDR).withReconcilePrecondition(new ExposedIngressCondition())
+ .build();
+ }
+
+ @Override
+ public Map prepareEventSources(EventSourceContext context) {
+ return EventSourceUtils.nameEventSources(
+ configMapDR.initEventSource(context),
+ deploymentDR.initEventSource(context),
+ serviceDR.initEventSource(context),
+ ingressDR.initEventSource(context));
+ }
+
+ @Override
+ public UpdateControl reconcile(WebPage webPage, Context context) {
+
+ var result = workflow.reconcile(webPage, context);
+
+ webPage.setStatus(createStatus(result));
+ return UpdateControl.patchStatus(webPage);
+ }
+ // omitted code
+}
+
+```
+
+## Workflow Execution
+
+This section describes how a workflow is executed in details, how the ordering is determined and
+how conditions and errors affect the behavior. The workflow execution is divided in two parts
+similarly to how `Reconciler` and `Cleaner` behavior are separated.
+[Cleanup](https://javaoperatorsdk.io/docs/documentation/reconciler/#implementing-a-reconciler-and-cleaner-interfaces) is
+executed if a resource is marked for deletion.
+
+## Common Principles
+
+- **As complete as possible execution** - when a workflow is reconciled, it tries to reconcile as
+ many resources as possible. Thus, if an error happens or a ready condition is not met for a
+ resources, all the other independent resources will be still reconciled. This is the opposite
+ to a fail-fast approach. The assumption is that eventually in this way the overall state will
+ converge faster towards the desired state than would be the case if the reconciliation was
+ aborted as soon as an error occurred.
+- **Concurrent reconciliation of independent resources** - the resources which doesn't depend on
+ others are processed concurrently. The level of concurrency is customizable, could be set to
+ one if required. By default, workflows use the executor service
+ from [ConfigurationService](https://github.com/java-operator-sdk/java-operator-sdk/blob/6f2a252952d3a91f6b0c3c38e5e6cc28f7c0f7b3/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ConfigurationService.java#L120-L120)
+
+## Reconciliation
+
+This section describes how a workflow is executed, considering first which rules apply, then
+demonstrated using examples:
+
+### Rules
+
+1. A workflow is a Directed Acyclic Graph (DAG) build from the DRs and their associated
+ `depends-on` relations.
+2. Root nodes, i.e. nodes in the graph that do not depend on other nodes are reconciled first,
+ in a parallel manner.
+3. A DR is reconciled if it does not depend on any other DRs, or *ALL* the DRs it depends on are
+ reconciled and ready. If a DR defines a reconcile pre-condition and/or an activation condition,
+ then these condition must become `true` before the DR is reconciled.
+4. A DR is considered *ready* if it got successfully reconciled and any ready post-condition it
+ might define is `true`.
+5. If a DR's reconcile pre-condition is not met, this DR is deleted. All the DRs that depend
+ on the dependent resource are also recursively deleted. This implies that
+ DRs are deleted in reverse order compared the one in which they are reconciled. The reason
+ for this behavior is (Will make a more detailed blog post about the design decision, much deeper
+ than the reference documentation)
+ The reasoning behind this behavior is as follows: a DR with a reconcile pre-condition is only
+ reconciled if the condition holds `true`. This means that if the condition is `false` and the
+ resource didn't exist already, then the associated resource would not be created. To ensure
+ idempotency (i.e. with the same input state, we should have the same output state), from this
+ follows that if the condition doesn't hold `true` anymore, the associated resource needs to
+ be deleted because the resource shouldn't exist/have been created.
+6. If a DR's activation condition is not met, it won't be reconciled or deleted. If other DR's depend on it, those will
+ be recursively deleted in a way similar to reconcile pre-conditions. Event sources for a dependent resource with
+ activation condition are registered/de-registered dynamically, thus during the reconciliation.
+7. For a DR to be deleted by a workflow, it needs to implement the `Deleter` interface, in which
+ case its `delete` method will be called, unless it also implements the `GarbageCollected`
+ interface. If a DR doesn't implement `Deleter` it is considered as automatically deleted. If
+ a delete post-condition exists for this DR, it needs to become `true` for the workflow to
+ consider the DR as successfully deleted.
+
+### Samples
+
+Notation: The arrows depicts reconciliation ordering, thus following the reverse direction of the
+`depends-on` relation:
+`1 --> 2` mean `DR 2` depends-on `DR 1`.
+
+#### Reconcile Sample
+
+```mermaid
+stateDiagram-v2
+1 --> 2
+1 --> 3
+2 --> 4
+3 --> 4
+```
+
+
+- Root nodes (i.e. nodes that don't depend on any others) are reconciled first. In this example,
+ DR `1` is reconciled first since it doesn't depend on others.
+ After that both DR `2` and `3` are reconciled concurrently, then DR `4` once both are
+ reconciled successfully.
+- If DR `2` had a ready condition and if it evaluated to as `false`, DR `4` would not be reconciled.
+ However `1`,`2` and `3` would be.
+- If `1` had a `false` ready condition, neither `2`,`3` or `4` would be reconciled.
+- If `2`'s reconciliation resulted in an error, `4` would not be reconciled, but `3`
+ would be (and `1` as well, of course).
+
+#### Sample with Reconcile Precondition
+
+
+```mermaid
+stateDiagram-v2
+1 --> 2
+1 --> 3
+3 --> 4
+3 --> 5
+```
+
+
+- If `3` has a reconcile pre-condition that is not met, `1` and `2` would be reconciled. However,
+ DR `3`,`4`,`5` would be deleted: `4` and `5` would be deleted concurrently but `3` would only
+ be deleted if `4` and `5` were deleted successfully (i.e. without error) and all existing
+ delete post-conditions were met.
+- If `5` had a delete post-condition that was `false`, `3` would not be deleted but `4`
+ would still be because they don't depend on one another.
+- Similarly, if `5`'s deletion resulted in an error, `3` would not be deleted but `4` would be.
+
+## Cleanup
+
+Cleanup works identically as delete for resources in reconciliation in case reconcile pre-condition
+is not met, just for the whole workflow.
+
+### Rules
+
+1. Delete is called on a DR if there is no DR that depends on it
+2. If a DR has DRs that depend on it, it will only be deleted if all these DRs are successfully
+ deleted without error and any delete post-condition is `true`.
+3. A DR is "manually" deleted (i.e. it's `Deleter.delete` method is called) if it implements the
+ `Deleter` interface but does not implement `GarbageCollected`. If a DR does not implement
+ `Deleter` interface, it is considered as deleted automatically.
+
+### Sample
+
+```mermaid
+stateDiagram-v2
+1 --> 2
+1 --> 3
+2 --> 4
+3 --> 4
+```
+
+- The DRs are deleted in the following order: `4` is deleted first, then `2` and `3` are deleted
+ concurrently, and, only after both are successfully deleted, `1` is deleted.
+- If `2` had a delete post-condition that was `false`, `1` would not be deleted. `4` and `3`
+ would be deleted.
+- If `2` was in error, DR `1` would not be deleted. DR `4` and `3` would be deleted.
+- if `4` was in error, no other DR would be deleted.
+
+## Error Handling
+
+As mentioned before if an error happens during a reconciliation, the reconciliation of other
+dependent resources will still happen, assuming they don't depend on the one that failed. If
+case multiple DRs fail, the workflow would throw an
+['AggregatedOperatorException'](https://github.com/java-operator-sdk/java-operator-sdk/blob/86e5121d56ed4ecb3644f2bc8327166f4f7add72/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/AggregatedOperatorException.java)
+containing all the related exceptions.
+
+The exceptions can be handled
+by [`ErrorStatusHandler`](https://github.com/java-operator-sdk/java-operator-sdk/blob/14620657fcacc8254bb96b4293eded84c20ba685/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/ErrorStatusHandler.java)
+
+## Waiting for the actual deletion of Kubernetes Dependent Resources
+
+Let's consider a case when a Kubernetes Dependent Resources (KDR) depends on another resource, on cleanup
+the resources will be deleted in reverse order, thus the KDR will be deleted first.
+However, the workflow implementation currently simply asks the Kubernetes API server to delete the resource. This is,
+however, an asynchronous process, meaning that the deletion might not occur immediately, in particular if the resource
+uses finalizers that block the deletion or if the deletion itself takes some time. From the SDK's perspective, though,
+the deletion has been requested and it moves on to other tasks without waiting for the resource to be actually deleted
+from the server (which might never occur if it uses finalizers which are not removed).
+In situations like these, if your logic depends on resources being actually removed from the cluster before a
+cleanup workflow can proceed correctly, you need to block the workflow progression using a delete post-condition that
+checks that the resource is actually removed or that it, at least, doesn't have any finalizers any longer. JOSDK
+provides such a delete post-condition implementation in the form of
+[`KubernetesResourceDeletedCondition`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/workflow/KubernetesResourceDeletedCondition.java)
+
+Also, check usage in an [integration test](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/manageddependentdeletecondition/ManagedDependentDefaultDeleteConditionReconciler.java).
+
+In such cases the Kubernetes Dependent Resource should extend `CRUDNoGCKubernetesDependentResource`
+and NOT `CRUDKubernetesDependentResource` since otherwise the Kubernetes Garbage Collector would delete the resources.
+In other words if a Kubernetes Dependent Resource depends on another dependent resource, it should not implement
+`GargageCollected` interface, otherwise the deletion order won't be guaranteed.
+
+
+## Explicit Managed Workflow Invocation
+
+Managed workflows, i.e. ones that are declared via annotations and therefore completely managed by JOSDK, are reconciled
+before the primary resource. Each dependent resource that can be reconciled (according to the workflow configuration)
+will therefore be reconciled before the primary reconciler is called to reconcile the primary resource. There are,
+however, situations where it would be be useful to perform additional steps before the workflow is reconciled, for
+example to validate the current state, execute arbitrary logic or even skip reconciliation altogether. Explicit
+invocation of managed workflow was therefore introduced to solve these issues.
+
+To use this feature, you need to set the `explicitInvocation` field to `true` on the `@Workflow` annotation and then
+call the `reconcileManagedWorkflow` method from the `
+ManagedWorkflowAndDependentResourceContext` retrieved from the reconciliation `Context` provided as part of your primary
+resource reconciler `reconcile` method arguments.
+
+See
+related [integration test](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/workflowexplicitinvocation)
+for more details.
+
+For `cleanup`, if the `Cleaner` interface is implemented, the `cleanupManageWorkflow()` needs to be called explicitly.
+However, if `Cleaner` interface is not implemented, it will be called implicitly.
+See
+related [integration test](https://github.com/operator-framework/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/workflowexplicitcleanup).
+
+While nothing prevents calling the workflow multiple times in a reconciler, it isn't typical or even recommended to do
+so. Conversely, if explicit invocation is requested but `reconcileManagedWorkflow` is not called in the primary resource
+reconciler, the workflow won't be reconciled at all.
+
+## Notes and Caveats
+
+- Delete is almost always called on every resource during the cleanup. However, it might be the case
+ that the resources were already deleted in a previous run, or not even created. This should
+ not be a problem, since dependent resources usually cache the state of the resource, so are
+ already aware that the resource does not exist and that nothing needs to be done if delete is
+ called.
+- If a resource has owner references, it will be automatically deleted by the Kubernetes garbage
+ collector if the owner resource is marked for deletion. This might not be desirable, to make
+ sure that delete is handled by the workflow don't use garbage collected kubernetes dependent
+ resource, use for
+ example [`CRUDNoGCKubernetesDependentResource`](https://github.com/java-operator-sdk/java-operator-sdk/blob/86e5121d56ed4ecb3644f2bc8327166f4f7add72/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/CRUDNoGCKubernetesDependentResource.java)
+ .
+- No state is persisted regarding the workflow execution. Every reconciliation causes all the
+ resources to be reconciled again, in other words the whole workflow is again evaluated.
+
diff --git a/docs/content/en/docs/documentation/error-handling-retries.md b/docs/content/en/docs/documentation/error-handling-retries.md
new file mode 100644
index 0000000000..eeecf54751
--- /dev/null
+++ b/docs/content/en/docs/documentation/error-handling-retries.md
@@ -0,0 +1,146 @@
+---
+title: Error handling and retries
+weight: 46
+---
+
+## How Automatic Retries Work
+
+JOSDK automatically schedules retries whenever your `Reconciler` throws an exception. This robust retry mechanism helps handle transient issues like network problems or temporary resource unavailability.
+
+### Default Retry Behavior
+
+The default retry implementation covers most typical use cases with exponential backoff:
+
+```java
+GenericRetry.defaultLimitedExponentialRetry()
+ .setInitialInterval(5000) // Start with 5-second delay
+ .setIntervalMultiplier(1.5D) // Increase delay by 1.5x each retry
+ .setMaxAttempts(5); // Maximum 5 attempts
+```
+
+### Configuration Options
+
+**Using the `@GradualRetry` annotation:**
+
+```java
+@ControllerConfiguration
+@GradualRetry(maxAttempts = 3, initialInterval = 2000)
+public class MyReconciler implements Reconciler {
+ // reconciler implementation
+}
+```
+
+**Custom retry implementation:**
+
+Specify a custom retry class in the `@ControllerConfiguration` annotation:
+
+```java
+@ControllerConfiguration(retry = MyCustomRetry.class)
+public class MyReconciler implements Reconciler {
+ // reconciler implementation
+}
+```
+
+Your custom retry class must:
+- Provide a no-argument constructor for automatic instantiation
+- Optionally implement `AnnotationConfigurable` for configuration from annotations. See [`GenericRetry`](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/retry/GenericRetry.java#)
+ implementation for more details.
+
+### Accessing Retry Information
+
+The [Context](https://github.com/java-operator-sdk/java-operator-sdk/blob/master/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/Context.java) object provides retry state information:
+
+```java
+@Override
+public UpdateControl reconcile(MyResource resource, Context context) {
+ if (context.isLastAttempt()) {
+ // Handle final retry attempt differently
+ resource.getStatus().setErrorMessage("Failed after all retry attempts");
+ return UpdateControl.patchStatus(resource);
+ }
+
+ // Normal reconciliation logic
+ // ...
+}
+```
+
+### Important Retry Behavior Notes
+
+- **Retry limits don't block new events**: When retry limits are reached, new reconciliations still occur for new events
+- **No retry on limit reached**: If an error occurs after reaching the retry limit, no additional retries are scheduled until new events arrive
+- **Event-driven recovery**: Fresh events can restart the retry cycle, allowing recovery from previously failed states
+
+A successful execution resets the retry state.
+
+### Reconciler Error Handler
+
+In order to facilitate error reporting you can override [`updateErrorStatus`](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/Reconciler.java#L52)
+method in `Reconciler`:
+
+```java
+public class MyReconciler implements Reconciler {
+
+ @Override
+ public ErrorStatusUpdateControl updateErrorStatus(
+ WebPage resource, Context context, Exception e) {
+ return handleError(resource, e);
+ }
+
+}
+```
+
+The `updateErrorStatus` method is called in case an exception is thrown from the `Reconciler`. It is
+also called even if no retry policy is configured, just after the reconciler execution.
+`RetryInfo.getAttemptCount()` is zero after the first reconciliation attempt, since it is not a
+result of a retry (regardless of whether a retry policy is configured).
+
+`ErrorStatusUpdateControl` tells the SDK what to do and how to perform the status
+update on the primary resource, which is always performed as a status sub-resource request. Note that
+this update request will also produce an event and result in a reconciliation if the
+controller is not generation-aware.
+
+This feature is only available for the `reconcile` method of the `Reconciler` interface, since
+there should not be updates to resources that have been marked for deletion.
+
+Retry can be skipped in cases of unrecoverable errors:
+
+```java
+ ErrorStatusUpdateControl.patchStatus(customResource).withNoRetry();
+```
+
+### Correctness and Automatic Retries
+
+While it is possible to deactivate automatic retries, this is not desirable unless there is a particular reason.
+Errors naturally occur, whether it be transient network errors or conflicts
+when a given resource is handled by a `Reconciler` but modified simultaneously by a user in
+a different process. Automatic retries handle these cases nicely and will eventually result in a
+successful reconciliation.
+
+## Retry, Rescheduling and Event Handling Common Behavior
+
+Retry, reschedule, and standard event processing form a relatively complex system, each of these
+functionalities interacting with the others. In the following, we describe the interplay of
+these features:
+
+1. A successful execution resets a retry and the rescheduled executions that were present before
+ the reconciliation. However, the reconciliation outcome can instruct a new rescheduling (`UpdateControl` or `DeleteControl`).
+
+ For example, if a reconciliation had previously been rescheduled for after some amount of time, but an event triggered
+ the reconciliation (or cleanup) in the meantime, the scheduled execution would be automatically cancelled, i.e.
+ rescheduling a reconciliation does not guarantee that one will occur precisely at that time; it simply guarantees that a reconciliation will occur at the latest.
+ Of course, it's always possible to reschedule a new reconciliation at the end of that "automatic" reconciliation.
+
+ Similarly, if a retry was scheduled, any event from the cluster triggering a successful execution in the meantime
+ would cancel the scheduled retry (because there's now no point in retrying something that already succeeded)
+
+2. In case an exception is thrown, a retry is initiated. However, if an event is received
+ meanwhile, it will be reconciled instantly, and this execution won't count as a retry attempt.
+3. If the retry limit is reached (so no more automatic retry would happen), but a new event
+ received, the reconciliation will still happen, but won't reset the retry, and will still be
+ marked as the last attempt in the retry info. The point (1) still holds - thus successful reconciliation will reset the retry - but no retry will happen in case of an error.
+
+The thing to remember when it comes to retrying or rescheduling is that JOSDK tries to avoid unnecessary work. When
+you reschedule an operation, you instruct JOSDK to perform that operation by the end of the rescheduling
+delay at the latest. If something occurred on the cluster that triggers that particular operation (reconciliation or cleanup), then
+JOSDK considers that there's no point in attempting that operation again at the end of the specified delay since there
+is no point in doing so anymore. The same idea also applies to retries.
diff --git a/docs/content/en/docs/documentation/eventing.md b/docs/content/en/docs/documentation/eventing.md
new file mode 100644
index 0000000000..77daeb6fa3
--- /dev/null
+++ b/docs/content/en/docs/documentation/eventing.md
@@ -0,0 +1,327 @@
+---
+title: Event sources and related topics
+weight: 47
+---
+
+## Handling Related Events with Event Sources
+
+See also
+this [blog post](https://csviri.medium.com/java-operator-sdk-introduction-to-event-sources-a1aab5af4b7b)
+.
+
+Event sources are a relatively simple yet powerful and extensible concept to trigger controller
+executions, usually based on changes to dependent resources. You typically need an event source
+when you want your `Reconciler` to be triggered when something occurs to secondary resources
+that might affect the state of your primary resource. This is needed because a given
+`Reconciler` will only listen by default to events affecting the primary resource type it is
+configured for. Event sources act as listen to events affecting these secondary resources so
+that a reconciliation of the associated primary resource can be triggered when needed. Note that
+these secondary resources need not be Kubernetes resources. Typically, when dealing with
+non-Kubernetes objects or services, we can extend our operator to handle webhooks or websockets
+or to react to any event coming from a service we interact with. This allows for very efficient
+controller implementations because reconciliations are then only triggered when something occurs
+on resources affecting our primary resources thus doing away with the need to periodically
+reschedule reconciliations.
+
+
+
+There are few interesting points here:
+
+The `CustomResourceEventSource` event source is a special one, responsible for handling events
+pertaining to changes affecting our primary resources. This `EventSource` is always registered
+for every controller automatically by the SDK. It is important to note that events always relate
+to a given primary resource. Concurrency is still handled for you, even in the presence of
+`EventSource` implementations, and the SDK still guarantees that there is no concurrent execution of
+the controller for any given primary resource (though, of course, concurrent/parallel executions
+of events pertaining to other primary resources still occur as expected).
+
+### Caching and Event Sources
+
+Kubernetes resources are handled in a declarative manner. The same also holds true for event
+sources. For example, if we define an event source to watch for changes of a Kubernetes Deployment
+object using an `InformerEventSource`, we always receive the whole associated object from the
+Kubernetes API. This object might be needed at any point during our reconciliation process and
+it's best to retrieve it from the event source directly when possible instead of fetching it
+from the Kubernetes API since the event source guarantees that it will provide the latest
+version. Not only that, but many event source implementations also cache resources they handle
+so that it's possible to retrieve the latest version of resources without needing to make any
+calls to the Kubernetes API, thus allowing for very efficient controller implementations.
+
+Note after an operator starts, caches are already populated by the time the first reconciliation
+is processed for the `InformerEventSource` implementation. However, this does not necessarily
+hold true for all event source implementations (`PerResourceEventSource` for example). The SDK
+provides methods to handle this situation elegantly, allowing you to check if an object is
+cached, retrieving it from a provided supplier if not. See
+related [method](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/polling/PerResourcePollingEventSource.java#L146)
+.
+
+### Registering Event Sources
+
+To register event sources, your `Reconciler` has to override the `prepareEventSources` and return
+list of event sources to register. One way to see this in action is
+to look at the
+[WebPage example](https://github.com/operator-framework/java-operator-sdk/blob/main/sample-operators/webpage/src/main/java/io/javaoperatorsdk/operator/sample/WebPageReconciler.java)
+(irrelevant details omitted):
+
+```java
+
+import java.util.List;
+
+@ControllerConfiguration
+public class WebappReconciler
+ implements Reconciler, Cleaner, EventSourceInitializer {
+ // ommitted code
+
+ @Override
+ public List> prepareEventSources(EventSourceContext context) {
+ InformerEventSourceConfiguration configuration =
+ InformerEventSourceConfiguration.from(Deployment.class, Webapp.class)
+ .withLabelSelector(SELECTOR)
+ .build();
+ return List.of(new InformerEventSource<>(configuration, context));
+ }
+}
+```
+
+In the example above an `InformerEventSource` is configured and registered.
+`InformerEventSource` is one of the bundled `EventSource` implementations that JOSDK provides to
+cover common use cases.
+
+### Managing Relation between Primary and Secondary Resources
+
+Event sources let your operator know when a secondary resource has changed and that your
+operator might need to reconcile this new information. However, in order to do so, the SDK needs
+to somehow retrieve the primary resource associated with which ever secondary resource triggered
+the event. In the `Webapp` example above, when an event occurs on a tracked `Deployment`, the
+SDK needs to be able to identify which `Webapp` resource is impacted by that change.
+
+Seasoned Kubernetes users already know one way to track this parent-child kind of relationship:
+using owner references. Indeed, that's how the SDK deals with this situation by default as well,
+that is, if your controller properly set owner references on your secondary resources, the SDK
+will be able to follow that reference back to your primary resource automatically without you
+having to worry about it.
+
+However, owner references cannot always be used as they are restricted to operating within a
+single namespace (i.e. you cannot have an owner reference to a resource in a different namespace)
+and are, by essence, limited to Kubernetes resources so you're out of luck if your secondary
+resources live outside of a cluster.
+
+This is why JOSDK provides the `SecondaryToPrimaryMapper` interface so that you can provide
+alternative ways for the SDK to identify which primary resource needs to be reconciled when
+something occurs to your secondary resources. We even provide some of these alternatives in the
+[Mappers](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/informer/Mappers.java)
+class.
+
+Note that, while a set of `ResourceID` is returned, this set usually consists only of one
+element. It is however possible to return multiple values or even no value at all to cover some
+rare corner cases. Returning an empty set means that the mapper considered the secondary
+resource event as irrelevant and the SDK will thus not trigger a reconciliation of the primary
+resource in that situation.
+
+Adding a `SecondaryToPrimaryMapper` is typically sufficient when there is a one-to-many relationship
+between primary and secondary resources. The secondary resources can be mapped to its primary
+owner, and this is enough information to also get these secondary resources from the `Context`
+object that's passed to your `Reconciler`.
+
+There are however cases when this isn't sufficient and you need to provide an explicit mapping
+between a primary resource and its associated secondary resources using an implementation of the
+`PrimaryToSecondaryMapper` interface. This is typically needed when there are many-to-one or
+many-to-many relationships between primary and secondary resources, e.g. when the primary resource
+is referencing secondary resources.
+See [PrimaryToSecondaryIT](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/primarytosecondary/PrimaryToSecondaryIT.java)
+integration test for a sample.
+
+### Built-in EventSources
+
+There are multiple event-sources provided out of the box, the following are some more central ones:
+
+#### `InformerEventSource`
+
+[InformerEventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/informer/InformerEventSource.java)
+is probably the most important `EventSource` implementation to know about. When you create an
+`InformerEventSource`, JOSDK will automatically create and register a `SharedIndexInformer`, a
+fabric8 Kubernetes client class, that will listen for events associated with the resource type
+you configured your `InformerEventSource` with. If you want to listen to Kubernetes resource
+events, `InformerEventSource` is probably the only thing you need to use. It's highly
+configurable so you can tune it to your needs. Take a look at
+[InformerEventSourceConfiguration](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/informer/InformerEventSourceConfiguration.java)
+and associated classes for more details but some interesting features we can mention here is the
+ability to filter events so that you can only get notified for events you care about. A
+particularly interesting feature of the `InformerEventSource`, as opposed to using your own
+informer-based listening mechanism is that caches are particularly well optimized preventing
+reconciliations from being triggered when not needed and allowing efficient operators to be written.
+
+#### `PerResourcePollingEventSource`
+
+[PerResourcePollingEventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/polling/PerResourcePollingEventSource.java)
+is used to poll external APIs, which don't support webhooks or other event notifications. It
+extends the abstract
+[ExternalResourceCachingEventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/ExternalResourceCachingEventSource.java)
+to support caching.
+See [MySQL Schema sample](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/sample-operators/mysql-schema/src/main/java/io/javaoperatorsdk/operator/sample/MySQLSchemaReconciler.java)
+for usage.
+
+#### `PollingEventSource`
+
+[PollingEventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/polling/PollingEventSource.java)
+is similar to `PerResourceCachingEventSource` except that, contrary to that event source, it
+doesn't poll a specific API separately per resource, but periodically and independently of
+actually observed primary resources.
+
+#### Inbound event sources
+
+[SimpleInboundEventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/inbound/SimpleInboundEventSource.java)
+and
+[CachingInboundEventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/inbound/CachingInboundEventSource.java)
+are used to handle incoming events from webhooks and messaging systems.
+
+#### `ControllerResourceEventSource`
+
+[ControllerResourceEventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/controller/ControllerResourceEventSource.java)
+is a special `EventSource` implementation that you will never have to deal with directly. It is,
+however, at the core of the SDK is automatically added for you: this is the main event source
+that listens for changes to your primary resources and triggers your `Reconciler` when needed.
+It features smart caching and is really optimized to minimize Kubernetes API accesses and avoid
+triggering unduly your `Reconciler`.
+
+More on the philosophy of the non Kubernetes API related event source see in
+issue [#729](https://github.com/java-operator-sdk/java-operator-sdk/issues/729).
+
+
+## InformerEventSource Multi-Cluster Support
+
+It is possible to handle resources for remote cluster with `InformerEventSource`. To do so,
+simply set a client that connects to a remote cluster:
+
+```java
+
+InformerEventSourceConfiguration configuration =
+ InformerEventSourceConfiguration.from(SecondaryResource.class, PrimaryResource.class)
+ .withKubernetesClient(remoteClusterClient)
+ .withSecondaryToPrimaryMapper(Mappers.fromDefaultAnnotations());
+
+```
+
+You will also need to specify a `SecondaryToPrimaryMapper`, since the default one
+is based on owner references and won't work across cluster instances. You could, for example, use the provided implementation that relies on annotations added to the secondary resources to identify the associated primary resource.
+
+See related [integration test](https://github.com/operator-framework/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/informerremotecluster).
+
+
+## Generation Awareness and Event Filtering
+
+A best practice when an operator starts up is to reconcile all the associated resources because
+changes might have occurred to the resources while the operator was not running.
+
+When this first reconciliation is done successfully, the next reconciliation is triggered if either
+dependent resources are changed or the primary resource `.spec` field is changed. If other fields
+like `.metadata` are changed on the primary resource, the reconciliation could be skipped. This
+behavior is supported out of the box and reconciliation is by default not triggered if
+changes to the primary resource do not increase the `.metadata.generation` field.
+Note that changes to `.metada.generation` are automatically handled by Kubernetes.
+
+To turn off this feature, set `generationAwareEventProcessing` to `false` for the `Reconciler`.
+
+
+## Max Interval Between Reconciliations
+
+When informers / event sources are properly set up, and the `Reconciler` implementation is
+correct, no additional reconciliation triggers should be needed. However, it's
+a [common practice](https://github.com/java-operator-sdk/java-operator-sdk/issues/848#issuecomment-1016419966)
+to have a failsafe periodic trigger in place, just to make sure resources are nevertheless
+reconciled after a certain amount of time. This functionality is in place by default, with a
+rather high time interval (currently 10 hours) after which a reconciliation will be
+automatically triggered even in the absence of other events. See how to override this using the
+standard annotation:
+
+```java
+@ControllerConfiguration(maxReconciliationInterval = @MaxReconciliationInterval(
+ interval = 50,
+ timeUnit = TimeUnit.MILLISECONDS))
+public class MyReconciler implements Reconciler {}
+```
+
+The event is not propagated at a fixed rate, rather it's scheduled after each reconciliation. So the
+next reconciliation will occur at most within the specified interval after the last reconciliation.
+
+This feature can be turned off by setting `maxReconciliationInterval`
+to [`Constants.NO_MAX_RECONCILIATION_INTERVAL`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/Constants.java#L20-L20)
+or any non-positive number.
+
+The automatic retries are not affected by this feature so a reconciliation will be re-triggered
+on error, according to the specified retry policy, regardless of this maximum interval setting.
+
+## Rate Limiting
+
+It is possible to rate limit reconciliation on a per-resource basis. The rate limit also takes
+precedence over retry/re-schedule configurations: for example, even if a retry was scheduled for
+the next second but this request would make the resource go over its rate limit, the next
+reconciliation will be postponed according to the rate limiting rules. Note that the
+reconciliation is never cancelled, it will just be executed as early as possible based on rate
+limitations.
+
+Rate limiting is by default turned **off**, since correct configuration depends on the reconciler
+implementation, in particular, on how long a typical reconciliation takes.
+(The parallelism of reconciliation itself can be
+limited [`ConfigurationService`](https://github.com/java-operator-sdk/java-operator-sdk/blob/ce4d996ee073ebef5715737995fc3d33f4751275/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ConfigurationService.java#L120-L120)
+by configuring the `ExecutorService` appropriately.)
+
+A default rate limiter implementation is provided, see:
+[`PeriodRateLimiter`](https://github.com/java-operator-sdk/java-operator-sdk/blob/ce4d996ee073ebef5715737995fc3d33f4751275/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/rate/PeriodRateLimiter.java#L14-L14)
+.
+Users can override it by implementing their own
+[`RateLimiter`](https://github.com/java-operator-sdk/java-operator-sdk/blob/ce4d996ee073ebef5715737995fc3d33f4751275/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/rate/RateLimiter.java)
+and specifying this custom implementation using the `rateLimiter` field of the
+`@ControllerConfiguration` annotation. Similarly to the `Retry` implementations,
+`RateLimiter` implementations must provide an accessible, no-arg constructor for instantiation
+purposes and can further be automatically configured from your own, provided annotation provided
+your `RateLimiter` implementation also implements the `AnnotationConfigurable` interface,
+parameterized by your custom annotation type.
+
+To configure the default rate limiter use the `@RateLimited` annotation on your
+`Reconciler` class. The following configuration limits each resource to reconcile at most twice
+within a 3 second interval:
+
+```java
+
+@RateLimited(maxReconciliations = 2, within = 3, unit = TimeUnit.SECONDS)
+@ControllerConfiguration
+public class MyReconciler implements Reconciler {
+
+}
+```
+
+Thus, if a given resource was reconciled twice in one second, no further reconciliation for this
+resource will happen before two seconds have elapsed. Note that, since rate is limited on a
+per-resource basis, other resources can still be reconciled at the same time, as long, of course,
+that they stay within their own rate limits.
+
+## Optimizing Caches
+
+One of the ideas around the operator pattern is that all the relevant resources are cached, thus reconciliation is
+usually very fast (especially if no resources are updated in the process) since the operator is then mostly working with
+in-memory state. However for large clusters, caching huge amount of primary and secondary resources might consume lots
+of memory. JOSDK provides ways to mitigate this issue and optimize the memory usage of controllers. While these features
+are working and tested, we need feedback from real production usage.
+
+### Bounded Caches for Informers
+
+Limiting caches for informers - thus for Kubernetes resources - is supported by ensuring that resources are in the cache
+for a limited time, via a cache eviction of least recently used resources. This means that when resources are created
+and frequently reconciled, they stay "hot" in the cache. However, if, over time, a given resource "cools" down, i.e. it
+becomes less and less used to the point that it might not be reconciled anymore, it will eventually get evicted from the
+cache to free up memory. If such an evicted resource were to become reconciled again, the bounded cache implementation
+would then fetch it from the API server and the "hot/cold" cycle would start anew.
+
+Since all resources need to be reconciled when a controller start, it is not practical to set a maximal cache size as
+it's desirable that all resources be cached as soon as possible to make the initial reconciliation process on start as
+fast and efficient as possible, avoiding undue load on the API server. It's therefore more interesting to gradually
+evict cold resources than try to limit cache sizes.
+
+See usage of the related implementation using [Caffeine](https://github.com/ben-manes/caffeine) cache in integration
+tests
+for [primary resources](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/AbstractTestReconciler.java).
+
+See
+also [CaffeineBoundedItemStores](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/caffeine-bounded-cache-support/src/main/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedItemStores.java)
+for more details.
\ No newline at end of file
diff --git a/docs/content/en/docs/documentation/features.md b/docs/content/en/docs/documentation/features.md
new file mode 100644
index 0000000000..8c8909c8b2
--- /dev/null
+++ b/docs/content/en/docs/documentation/features.md
@@ -0,0 +1,55 @@
+---
+title: Other Features
+weight: 57
+---
+
+The Java Operator SDK (JOSDK) is a high-level framework and tooling suite for implementing Kubernetes operators. By default, features follow best practices in an opinionated way. However, configuration options and feature flags are available to fine-tune or disable these features.
+
+## Support for Well-Known Kubernetes Resources
+
+Controllers can be registered for standard Kubernetes resources (not just custom resources), such as `Ingress`, `Deployment`, and others.
+
+See the [integration test](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/deployment) for an example of reconciling deployments.
+
+```java
+public class DeploymentReconciler
+ implements Reconciler, TestExecutionInfoProvider {
+
+ @Override
+ public UpdateControl reconcile(
+ Deployment resource, Context context) {
+ // omitted code
+ }
+}
+```
+
+## Leader Election
+
+Operators are typically deployed with a single active instance. However, you can deploy multiple instances where only one (the "leader") processes events. This is achieved through "leader election."
+
+While all instances run and start their event sources to populate caches, only the leader processes events. If the leader crashes, other instances are already warmed up and ready to take over when a new leader is elected.
+
+See sample configuration in the [E2E test](https://github.com/java-operator-sdk/java-operator-sdk/blob/8865302ac0346ee31f2d7b348997ec2913d5922b/sample-operators/leader-election/src/main/java/io/javaoperatorsdk/operator/sample/LeaderElectionTestOperator.java#L21-L23).
+
+## Automatic CRD Generation
+
+**Note:** This feature is provided by the [Fabric8 Kubernetes Client](https://github.com/fabric8io/kubernetes-client), not JOSDK itself.
+
+To automatically generate CRD manifests from your annotated Custom Resource classes, add this dependency to your project:
+
+```xml
+
+
+ io.fabric8
+ crd-generator-apt
+ provided
+
+```
+
+The CRD will be generated in `target/classes/META-INF/fabric8` (or `target/test-classes/META-INF/fabric8` for test scope) with the CRD name suffixed by the generated spec version.
+
+For example, a CR using the `java-operator-sdk.io` group with a `mycrs` plural form will result in these files:
+- `mycrs.java-operator-sdk.io-v1.yml`
+- `mycrs.java-operator-sdk.io-v1beta1.yml`
+
+**Note for Quarkus users:** If you're using the `quarkus-operator-sdk` extension, you don't need to add any extra dependency for CRD generation - the extension handles this automatically.
diff --git a/docs/content/en/docs/documentation/observability.md b/docs/content/en/docs/documentation/observability.md
new file mode 100644
index 0000000000..27a68086d5
--- /dev/null
+++ b/docs/content/en/docs/documentation/observability.md
@@ -0,0 +1,112 @@
+---
+title: Observability
+weight: 55
+---
+
+## Runtime Info
+
+[RuntimeInfo](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/RuntimeInfo.java#L16-L16)
+is used mainly to check the actual health of event sources. Based on this information it is easy to implement custom
+liveness probes.
+
+[stopOnInformerErrorDuringStartup](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ConfigurationService.java#L168-L168)
+setting, where this flag usually needs to be set to false, in order to control the exact liveness properties.
+
+See also an example implementation in the
+[WebPage sample](https://github.com/java-operator-sdk/java-operator-sdk/blob/3e2e7c4c834ef1c409d636156b988125744ca911/sample-operators/webpage/src/main/java/io/javaoperatorsdk/operator/sample/WebPageOperator.java#L38-L43)
+
+## Contextual Info for Logging with MDC
+
+Logging is enhanced with additional contextual information using
+[MDC](http://www.slf4j.org/manual.html#mdc). The following attributes are available in most
+parts of reconciliation logic and during the execution of the controller:
+
+| MDC Key | Value added from primary resource |
+|:---------------------------|:----------------------------------|
+| `resource.apiVersion` | `.apiVersion` |
+| `resource.kind` | `.kind` |
+| `resource.name` | `.metadata.name` |
+| `resource.namespace` | `.metadata.namespace` |
+| `resource.resourceVersion` | `.metadata.resourceVersion` |
+| `resource.generation` | `.metadata.generation` |
+| `resource.uid` | `.metadata.uid` |
+
+For more information about MDC see this [link](https://www.baeldung.com/mdc-in-log4j-2-logback).
+
+## Metrics
+
+JOSDK provides built-in support for metrics reporting on what is happening with your reconcilers in the form of
+the `Metrics` interface which can be implemented to connect to your metrics provider of choice, JOSDK calling the
+methods as it goes about reconciling resources. By default, a no-operation implementation is provided thus providing a
+no-cost sane default. A [micrometer](https://micrometer.io)-based implementation is also provided.
+
+You can use a different implementation by overriding the default one provided by the default `ConfigurationService`, as
+follows:
+
+```java
+Metrics metrics; // initialize your metrics implementation
+Operator operator = new Operator(client, o -> o.withMetrics(metrics));
+```
+
+### Micrometer implementation
+
+The micrometer implementation is typically created using one of the provided factory methods which, depending on which
+is used, will return either a ready to use instance or a builder allowing users to customized how the implementation
+behaves, in particular when it comes to the granularity of collected metrics. It is, for example, possible to collect
+metrics on a per-resource basis via tags that are associated with meters. This is the default, historical behavior but
+this will change in a future version of JOSDK because this dramatically increases the cardinality of metrics, which
+could lead to performance issues.
+
+To create a `MicrometerMetrics` implementation that behaves how it has historically behaved, you can just create an
+instance via:
+
+```java
+MeterRegistry registry; // initialize your registry implementation
+Metrics metrics = new MicrometerMetrics(registry);
+```
+
+Note, however, that this constructor is deprecated and we encourage you to use the factory methods instead, which either
+return a fully pre-configured instance or a builder object that will allow you to configure more easily how the instance
+will behave. You can, for example, configure whether or not the implementation should collect metrics on a per-resource
+basis, whether or not associated meters should be removed when a resource is deleted and how the clean-up is performed.
+See the relevant classes documentation for more details.
+
+For example, the following will create a `MicrometerMetrics` instance configured to collect metrics on a per-resource
+basis, deleting the associated meters after 5 seconds when a resource is deleted, using up to 2 threads to do so.
+
+```java
+MicrometerMetrics.newPerResourceCollectingMicrometerMetricsBuilder(registry)
+ .withCleanUpDelayInSeconds(5)
+ .withCleaningThreadNumber(2)
+ .build();
+```
+
+### Operator SDK metrics
+
+The micrometer implementation records the following metrics:
+
+| Meter name | Type | Tag names | Description |
+|-------------------------------------------------------------|----------------|-------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------|
+| operator.sdk.reconciliations.executions.`` | gauge | group, version, kind | Number of executions of the named reconciler |
+| operator.sdk.reconciliations.queue.size.`` | gauge | group, version, kind | How many resources are queued to get reconciled by named reconciler |
+| operator.sdk.`