cache;
diff --git a/caffeine-bounded-cache-support/src/main/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedItemStores.java b/caffeine-bounded-cache-support/src/main/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedItemStores.java
index a58d58bd2a..89fbcef70f 100644
--- a/caffeine-bounded-cache-support/src/main/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedItemStores.java
+++ b/caffeine-bounded-cache-support/src/main/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedItemStores.java
@@ -9,9 +9,9 @@
import com.github.benmanes.caffeine.cache.Caffeine;
/**
- * A factory for Caffeine-backed
- * {@link BoundedItemStore}. The implementation uses a {@link CaffeineBoundedCache} to store
- * resources and progressively evict them if they haven't been used in a while. The idea about
+ * A factory for Caffeine-backed {@link
+ * BoundedItemStore}. The implementation uses a {@link CaffeineBoundedCache} to store resources and
+ * progressively evict them if they haven't been used in a while. The idea about
* CaffeinBoundedItemStore-s is that, caffeine will cache the resources which were recently used,
* and will evict resource, which are not used for a while. This is ideal for startup performance
* and efficiency when all resources should be cached to avoid undue load on the API server. This is
@@ -20,11 +20,11 @@
* happen that some / many of these resources are then seldom or even reconciled anymore. In that
* situation, large amounts of memory might be consumed to cache resources that are never used
* again.
- *
- * Note that if a resource is reconciled and is not present anymore in cache, it will transparently
- * be fetched again from the API server. Similarly, since associated secondary resources are usually
- * reconciled too, they might need to be fetched and populated to the cache, and will remain there
- * for some time, for subsequent reconciliations.
+ *
+ *
Note that if a resource is reconciled and is not present anymore in cache, it will
+ * transparently be fetched again from the API server. Similarly, since associated secondary
+ * resources are usually reconciled too, they might need to be fetched and populated to the cache,
+ * and will remain there for some time, for subsequent reconciliations.
*/
public class CaffeineBoundedItemStores {
@@ -39,11 +39,8 @@ private CaffeineBoundedItemStores() {}
*/
@SuppressWarnings("unused")
public static BoundedItemStore boundedItemStore(
- KubernetesClient client, Class rClass,
- Duration accessExpireDuration) {
- Cache cache = Caffeine.newBuilder()
- .expireAfterAccess(accessExpireDuration)
- .build();
+ KubernetesClient client, Class rClass, Duration accessExpireDuration) {
+ Cache cache = Caffeine.newBuilder().expireAfterAccess(accessExpireDuration).build();
return boundedItemStore(client, rClass, cache);
}
@@ -51,5 +48,4 @@ public static BoundedItemStore boundedItemStore(
KubernetesClient client, Class rClass, Cache cache) {
return new BoundedItemStore<>(new CaffeineBoundedCache<>(cache), rClass, client);
}
-
}
diff --git a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/BoundedCacheTestBase.java b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/BoundedCacheTestBase.java
index 21adf81cc0..532e5237f8 100644
--- a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/BoundedCacheTestBase.java
+++ b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/BoundedCacheTestBase.java
@@ -17,7 +17,8 @@
import static org.assertj.core.api.Assertions.assertThat;
import static org.awaitility.Awaitility.await;
-public abstract class BoundedCacheTestBase> {
+public abstract class BoundedCacheTestBase<
+ P extends CustomResource> {
private static final Logger log = LoggerFactory.getLogger(BoundedCacheTestBase.class);
@@ -42,34 +43,46 @@ void reconciliationWorksWithLimitedCache() {
}
private void assertConfigMapsDeleted() {
- await().atMost(Duration.ofSeconds(30))
- .untilAsserted(() -> IntStream.range(0, NUMBER_OF_RESOURCE_TO_TEST).forEach(i -> {
- var cm = extension().get(ConfigMap.class, RESOURCE_NAME_PREFIX + i);
- assertThat(cm).isNull();
- }));
+ await()
+ .atMost(Duration.ofSeconds(120))
+ .untilAsserted(
+ () ->
+ IntStream.range(0, NUMBER_OF_RESOURCE_TO_TEST)
+ .forEach(
+ i -> {
+ var cm = extension().get(ConfigMap.class, RESOURCE_NAME_PREFIX + i);
+ assertThat(cm).isNull();
+ }));
}
private void deleteTestResources() {
- IntStream.range(0, NUMBER_OF_RESOURCE_TO_TEST).forEach(i -> {
- var cm = extension().get(customResourceClass(), RESOURCE_NAME_PREFIX + i);
- var deleted = extension().delete(cm);
- if (!deleted) {
- log.warn("Custom resource might not be deleted: {}", cm);
- }
- });
+ IntStream.range(0, NUMBER_OF_RESOURCE_TO_TEST)
+ .forEach(
+ i -> {
+ var cm = extension().get(customResourceClass(), RESOURCE_NAME_PREFIX + i);
+ var deleted = extension().delete(cm);
+ if (!deleted) {
+ log.warn("Custom resource might not be deleted: {}", cm);
+ }
+ });
}
private void updateTestResources() {
- IntStream.range(0, NUMBER_OF_RESOURCE_TO_TEST).forEach(i -> {
- var cm = extension().get(ConfigMap.class, RESOURCE_NAME_PREFIX + i);
- cm.getData().put(DATA_KEY, UPDATED_PREFIX + i);
- extension().replace(cm);
- });
+ IntStream.range(0, NUMBER_OF_RESOURCE_TO_TEST)
+ .forEach(
+ i -> {
+ var cm = extension().get(ConfigMap.class, RESOURCE_NAME_PREFIX + i);
+ cm.getData().put(DATA_KEY, UPDATED_PREFIX + i);
+ extension().replace(cm);
+ });
}
void assertConfigMapData(String dataPrefix) {
- await().untilAsserted(() -> IntStream.range(0, NUMBER_OF_RESOURCE_TO_TEST)
- .forEach(i -> assertConfigMap(i, dataPrefix)));
+ await()
+ .untilAsserted(
+ () ->
+ IntStream.range(0, NUMBER_OF_RESOURCE_TO_TEST)
+ .forEach(i -> assertConfigMap(i, dataPrefix)));
}
private void assertConfigMap(int i, String prefix) {
@@ -79,9 +92,11 @@ private void assertConfigMap(int i, String prefix) {
}
private void createTestResources() {
- IntStream.range(0, NUMBER_OF_RESOURCE_TO_TEST).forEach(i -> {
- extension().create(createTestResource(i));
- });
+ IntStream.range(0, NUMBER_OF_RESOURCE_TO_TEST)
+ .forEach(
+ i -> {
+ extension().create(createTestResource(i));
+ });
}
abstract P createTestResource(int index);
@@ -89,7 +104,4 @@ private void createTestResources() {
abstract Class customResourceClass();
abstract LocallyRunOperatorExtension extension();
-
-
-
}
diff --git a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedCacheClusterScopeIT.java b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedCacheClusterScopeIT.java
index 252b20f4a4..0c16c1227b 100644
--- a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedCacheClusterScopeIT.java
+++ b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedCacheClusterScopeIT.java
@@ -19,21 +19,22 @@ public class CaffeineBoundedCacheClusterScopeIT
@RegisterExtension
LocallyRunOperatorExtension extension =
LocallyRunOperatorExtension.builder()
- .withReconciler(new BoundedCacheClusterScopeTestReconciler(), o -> {
- o.withItemStore(boundedItemStore(
- new KubernetesClientBuilder().build(),
- BoundedCacheClusterScopeTestCustomResource.class,
- Duration.ofMinutes(1),
- 1));
- })
+ .withReconciler(
+ new BoundedCacheClusterScopeTestReconciler(),
+ o -> {
+ o.withItemStore(
+ boundedItemStore(
+ new KubernetesClientBuilder().build(),
+ BoundedCacheClusterScopeTestCustomResource.class,
+ Duration.ofMinutes(1),
+ 1));
+ })
.build();
@Override
BoundedCacheClusterScopeTestCustomResource createTestResource(int index) {
var res = new BoundedCacheClusterScopeTestCustomResource();
- res.setMetadata(new ObjectMetaBuilder()
- .withName(RESOURCE_NAME_PREFIX + index)
- .build());
+ res.setMetadata(new ObjectMetaBuilder().withName(RESOURCE_NAME_PREFIX + index).build());
res.setSpec(new BoundedCacheTestSpec());
res.getSpec().setData(INITIAL_DATA_PREFIX + index);
res.getSpec().setTargetNamespace(extension.getNamespace());
diff --git a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedCacheNamespacedIT.java b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedCacheNamespacedIT.java
index ae7f8f5873..534d7b2027 100644
--- a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedCacheNamespacedIT.java
+++ b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedCacheNamespacedIT.java
@@ -18,19 +18,22 @@ class CaffeineBoundedCacheNamespacedIT
@RegisterExtension
LocallyRunOperatorExtension extension =
- LocallyRunOperatorExtension.builder().withReconciler(new BoundedCacheTestReconciler(), o -> {
- o.withItemStore(boundedItemStore(
- new KubernetesClientBuilder().build(), BoundedCacheTestCustomResource.class,
- Duration.ofMinutes(1),
- 1));
- })
+ LocallyRunOperatorExtension.builder()
+ .withReconciler(
+ new BoundedCacheTestReconciler(),
+ o -> {
+ o.withItemStore(
+ boundedItemStore(
+ new KubernetesClientBuilder().build(),
+ BoundedCacheTestCustomResource.class,
+ Duration.ofMinutes(1),
+ 1));
+ })
.build();
BoundedCacheTestCustomResource createTestResource(int index) {
var res = new BoundedCacheTestCustomResource();
- res.setMetadata(new ObjectMetaBuilder()
- .withName(RESOURCE_NAME_PREFIX + index)
- .build());
+ res.setMetadata(new ObjectMetaBuilder().withName(RESOURCE_NAME_PREFIX + index).build());
res.setSpec(new BoundedCacheTestSpec());
res.getSpec().setData(INITIAL_DATA_PREFIX + index);
res.getSpec().setTargetNamespace(extension.getNamespace());
@@ -46,5 +49,4 @@ Class customResourceClass() {
LocallyRunOperatorExtension extension() {
return extension;
}
-
}
diff --git a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/AbstractTestReconciler.java b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/AbstractTestReconciler.java
index b6e3ba2c8f..b059ac033b 100644
--- a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/AbstractTestReconciler.java
+++ b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/AbstractTestReconciler.java
@@ -1,6 +1,7 @@
package io.javaoperatorsdk.operator.processing.event.source.cache.sample;
import java.time.Duration;
+import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
@@ -13,7 +14,7 @@
import io.fabric8.kubernetes.client.CustomResource;
import io.fabric8.kubernetes.client.KubernetesClient;
import io.fabric8.kubernetes.client.KubernetesClientBuilder;
-import io.javaoperatorsdk.operator.api.config.informer.InformerConfiguration;
+import io.javaoperatorsdk.operator.api.config.informer.InformerEventSourceConfiguration;
import io.javaoperatorsdk.operator.api.reconciler.*;
import io.javaoperatorsdk.operator.processing.event.source.EventSource;
import io.javaoperatorsdk.operator.processing.event.source.cache.BoundedItemStore;
@@ -27,8 +28,9 @@
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
-public abstract class AbstractTestReconciler>
- implements Reconciler
, EventSourceInitializer
{
+public abstract class AbstractTestReconciler<
+ P extends CustomResource>
+ implements Reconciler {
private static final Logger log =
LoggerFactory.getLogger(BoundedCacheClusterScopeTestReconciler.class);
@@ -36,9 +38,7 @@ public abstract class AbstractTestReconciler
reconcile(
- P resource,
- Context
context) {
+ public UpdateControl
reconcile(P resource, Context
context) {
var maybeConfigMap = context.getSecondaryResource(ConfigMap.class);
maybeConfigMap.ifPresentOrElse(
cm -> updateConfigMapIfNeeded(cm, resource, context),
@@ -57,32 +57,41 @@ protected void updateConfigMapIfNeeded(ConfigMap cm, P resource, Context
cont
}
protected void createConfigMap(P resource, Context
context) {
- var cm = new ConfigMapBuilder()
- .withMetadata(new ObjectMetaBuilder()
- .withName(resource.getMetadata().getName())
- .withNamespace(resource.getSpec().getTargetNamespace())
- .build())
- .withData(Map.of(DATA_KEY, resource.getSpec().getData()))
- .build();
+ var cm =
+ new ConfigMapBuilder()
+ .withMetadata(
+ new ObjectMetaBuilder()
+ .withName(resource.getMetadata().getName())
+ .withNamespace(resource.getSpec().getTargetNamespace())
+ .build())
+ .withData(Map.of(DATA_KEY, resource.getSpec().getData()))
+ .build();
cm.addOwnerReference(resource);
context.getClient().configMaps().resource(cm).create();
}
@Override
- public Map prepareEventSources(
- EventSourceContext context) {
+ public List> prepareEventSources(EventSourceContext context) {
var boundedItemStore =
- boundedItemStore(new KubernetesClientBuilder().build(),
- ConfigMap.class, Duration.ofMinutes(1), 1); // setting max size for testing purposes
-
- var es = new InformerEventSource<>(InformerConfiguration.from(ConfigMap.class, context)
- .withItemStore(boundedItemStore)
- .withSecondaryToPrimaryMapper(
- Mappers.fromOwnerReference(this instanceof BoundedCacheClusterScopeTestReconciler))
- .build(), context);
-
- return EventSourceInitializer.nameEventSources(es);
+ boundedItemStore(
+ new KubernetesClientBuilder().build(),
+ ConfigMap.class,
+ Duration.ofMinutes(1),
+ 1); // setting max size for testing purposes
+
+ var es =
+ new InformerEventSource<>(
+ InformerEventSourceConfiguration.from(ConfigMap.class, primaryClass())
+ .withItemStore(boundedItemStore)
+ .withSecondaryToPrimaryMapper(
+ Mappers.fromOwnerReferences(
+ context.getPrimaryResourceClass(),
+ this instanceof BoundedCacheClusterScopeTestReconciler))
+ .build(),
+ context);
+
+ return List.of(es);
}
private void ensureStatus(P resource) {
@@ -92,14 +101,18 @@ private void ensureStatus(P resource) {
}
public static BoundedItemStore boundedItemStore(
- KubernetesClient client, Class rClass,
+ KubernetesClient client,
+ Class rClass,
Duration accessExpireDuration,
// max size is only for testing purposes
long cacheMaxSize) {
- Cache cache = Caffeine.newBuilder()
- .expireAfterAccess(accessExpireDuration)
- .maximumSize(cacheMaxSize)
- .build();
+ Cache cache =
+ Caffeine.newBuilder()
+ .expireAfterAccess(accessExpireDuration)
+ .maximumSize(cacheMaxSize)
+ .build();
return CaffeineBoundedItemStores.boundedItemStore(client, rClass, cache);
}
+
+ protected abstract Class primaryClass();
}
diff --git a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/clusterscope/BoundedCacheClusterScopeTestCustomResource.java b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/clusterscope/BoundedCacheClusterScopeTestCustomResource.java
index a77416715e..6fc9a5babc 100644
--- a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/clusterscope/BoundedCacheClusterScopeTestCustomResource.java
+++ b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/clusterscope/BoundedCacheClusterScopeTestCustomResource.java
@@ -11,5 +11,4 @@
@Version("v1")
@ShortNames("bccs")
public class BoundedCacheClusterScopeTestCustomResource
- extends CustomResource {
-}
+ extends CustomResource {}
diff --git a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/clusterscope/BoundedCacheClusterScopeTestReconciler.java b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/clusterscope/BoundedCacheClusterScopeTestReconciler.java
index a154659164..93f103cbf2 100644
--- a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/clusterscope/BoundedCacheClusterScopeTestReconciler.java
+++ b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/clusterscope/BoundedCacheClusterScopeTestReconciler.java
@@ -4,7 +4,11 @@
import io.javaoperatorsdk.operator.processing.event.source.cache.sample.AbstractTestReconciler;
@ControllerConfiguration
-public class BoundedCacheClusterScopeTestReconciler extends
- AbstractTestReconciler {
+public class BoundedCacheClusterScopeTestReconciler
+ extends AbstractTestReconciler {
+ @Override
+ protected Class primaryClass() {
+ return BoundedCacheClusterScopeTestCustomResource.class;
+ }
}
diff --git a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestCustomResource.java b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestCustomResource.java
index a5e37917ba..9b77aa7bf8 100644
--- a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestCustomResource.java
+++ b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestCustomResource.java
@@ -10,5 +10,4 @@
@Version("v1")
@ShortNames("bct")
public class BoundedCacheTestCustomResource
- extends CustomResource implements Namespaced {
-}
+ extends CustomResource implements Namespaced {}
diff --git a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestReconciler.java b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestReconciler.java
index 211877b361..6b95665585 100644
--- a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestReconciler.java
+++ b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestReconciler.java
@@ -7,4 +7,8 @@
public class BoundedCacheTestReconciler
extends AbstractTestReconciler {
+ @Override
+ protected Class primaryClass() {
+ return BoundedCacheTestCustomResource.class;
+ }
}
diff --git a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestStatus.java b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestStatus.java
index 03a311529e..5aa5ca2258 100644
--- a/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestStatus.java
+++ b/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/namespacescope/BoundedCacheTestStatus.java
@@ -1,6 +1,3 @@
package io.javaoperatorsdk.operator.processing.event.source.cache.sample.namespacescope;
-import io.javaoperatorsdk.operator.api.ObservedGenerationAwareStatus;
-
-public class BoundedCacheTestStatus extends ObservedGenerationAwareStatus {
-}
+public class BoundedCacheTestStatus {}
diff --git a/contributing/eclipse-google-style.xml b/contributing/eclipse-google-style.xml
deleted file mode 100644
index 64340b1054..0000000000
--- a/contributing/eclipse-google-style.xml
+++ /dev/null
@@ -1,337 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/contributing/eclipse.importorder b/contributing/eclipse.importorder
deleted file mode 100644
index 8a156041e9..0000000000
--- a/contributing/eclipse.importorder
+++ /dev/null
@@ -1,7 +0,0 @@
-0=java
-1=javax
-2=org
-3=io
-4=com
-5=
-6=\#
diff --git a/docsy/.gitignore b/docs/.gitignore
similarity index 100%
rename from docsy/.gitignore
rename to docs/.gitignore
diff --git a/docsy/.nvmrc b/docs/.nvmrc
similarity index 100%
rename from docsy/.nvmrc
rename to docs/.nvmrc
diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md
new file mode 100644
index 0000000000..5ea571c69d
--- /dev/null
+++ b/docs/CONTRIBUTING.md
@@ -0,0 +1,57 @@
+# Contributing to Java Operator SDK Documentation
+
+Thank you for your interest in improving the Java Operator SDK documentation! We welcome contributions from the community and appreciate your help in making our documentation better.
+
+## How to Contribute
+
+### Getting Started
+
+1. **Fork the repository** and clone your fork locally
+2. **Create a new branch** for your changes
+3. **Make your improvements** to the documentation
+4. **Test your changes** locally using `hugo server`
+5. **Submit a pull request** with a clear description of your changes
+
+### Types of Contributions
+
+We welcome various types of contributions:
+
+- **Content improvements**: Fix typos, clarify explanations, add examples
+- **New documentation**: Add missing sections or entirely new guides
+- **Structural improvements**: Better organization, navigation, or formatting
+- **Translation**: Help translate documentation to other languages
+
+## Guidelines
+
+### Writing Style
+
+- Use clear, concise language
+- Write in active voice when possible
+- Define technical terms when first used
+- Include practical examples where helpful
+- Keep sentences and paragraphs reasonably short
+
+### Technical Requirements
+
+- Test all code examples to ensure they work
+- Use proper markdown formatting
+- Follow existing documentation structure and conventions
+- Ensure links work and point to current resources
+
+## Legal Requirements
+
+### Contributor License Agreement
+
+All contributions must be accompanied by a Contributor License Agreement (CLA). You (or your employer) retain the copyright to your contribution; the CLA simply gives us permission to use and redistribute your contributions as part of the project.
+
+Visit to see your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one (even for a different project), you probably don't need to do it again.
+
+### Code Review Process
+
+All submissions, including those by project members, require review. We use GitHub pull requests for this purpose. Please consult [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more information on using pull requests.
+
+## Community Guidelines
+
+This project follows [Google's Open Source Community Guidelines](https://opensource.google.com/conduct/).
diff --git a/docsy/Dockerfile b/docs/Dockerfile
similarity index 100%
rename from docsy/Dockerfile
rename to docs/Dockerfile
diff --git a/docsy/LICENSE b/docs/LICENSE
similarity index 100%
rename from docsy/LICENSE
rename to docs/LICENSE
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000000..14f675b53b
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,82 @@
+# Java Operator SDK Documentation
+
+This repository contains the documentation website for the Java Operator SDK (JOSDK), built using Hugo and the Docsy theme.
+
+## About Java Operator SDK
+
+Java Operator SDK is a framework that makes it easy to build Kubernetes operators in Java. It provides APIs designed to feel natural to Java developers and handles common operator challenges automatically, allowing you to focus on your business logic.
+
+## Development Setup
+
+This documentation site uses Hugo v0.125.7 with the Docsy theme.
+
+## Prerequisites
+
+- Hugo v0.125.7 or later (extended version required)
+- Node.js and npm (for PostCSS processing)
+- Git
+
+## Local Development
+
+### Quick Start
+
+1. Clone this repository
+2. Install dependencies:
+ ```bash
+ npm install
+ ```
+3. Start the development server:
+ ```bash
+ hugo server
+ ```
+4. Open your browser to `http://localhost:1313`
+
+### Using Docker
+
+You can also run the documentation site using Docker:
+
+1. Build the container:
+ ```bash
+ docker-compose build
+ ```
+2. Run the container:
+ ```bash
+ docker-compose up
+ ```
+ > **Note**: You can combine both commands with `docker-compose up --build`
+
+3. Access the site at `http://localhost:1313`
+
+To stop the container, press **Ctrl + C** in your terminal.
+
+To clean up Docker resources:
+```bash
+docker-compose rm
+```
+
+## Contributing
+
+We welcome contributions to improve the documentation! Please see our [contribution guidelines](CONTRIBUTING.md) for details on how to get started.
+
+## Troubleshooting
+
+### Module Compatibility Error
+If you see an error about module compatibility, ensure you're using Hugo v0.110.0 or higher:
+```console
+Error: Error building site: failed to extract shortcode: template for shortcode "blocks/cover" not found
+```
+
+### SCSS Processing Error
+If you encounter SCSS-related errors, make sure you have the extended version of Hugo installed:
+```console
+Error: TOCSS: failed to transform "scss/main.scss"
+```
+
+### Go Binary Not Found
+If you see "binary with name 'go' not found", install the Go programming language from [golang.org](https://golang.org).
+
+## Links
+
+- [Hugo Documentation](https://gohugo.io/documentation/)
+- [Docsy Theme Documentation](https://www.docsy.dev/docs/)
+- [Java Operator SDK GitHub Repository](https://github.com/operator-framework/java-operator-sdk)
diff --git a/docsy/assets/icons/logo.svg b/docs/assets/icons/logo.svg
similarity index 100%
rename from docsy/assets/icons/logo.svg
rename to docs/assets/icons/logo.svg
diff --git a/docsy/assets/scss/_variables_project.scss b/docs/assets/scss/_variables_project.scss
similarity index 100%
rename from docsy/assets/scss/_variables_project.scss
rename to docs/assets/scss/_variables_project.scss
diff --git a/docsy/config.yaml b/docs/config.yaml
similarity index 100%
rename from docsy/config.yaml
rename to docs/config.yaml
diff --git a/docs/content/en/_index.md b/docs/content/en/_index.md
new file mode 100644
index 0000000000..f375ebfb97
--- /dev/null
+++ b/docs/content/en/_index.md
@@ -0,0 +1,69 @@
+---
+title: Java Operator SDK Documentation
+---
+
+{{< blocks/cover title="Java Operator SDK" image_anchor="top" height="full" >}}
+
+ Learn More
+
+
+ Download
+
+Kubernetes operators in Java made easy!
+{{< blocks/link-down color="info" >}}
+{{< /blocks/cover >}}
+
+
+{{% blocks/lead color="gray" %}}
+Whether you want to build applications that operate themselves or provision infrastructure from Java code, Kubernetes Operators are the way to go.
+Java Operator SDK is based on the fabric8 Kubernetes client and will make it easy for Java developers to embrace this new way of automation.
+{{% /blocks/lead %}}
+
+
+{{% blocks/section color="secondary" type="row" %}}
+{{% blocks/feature icon="fab fa-slack" title="Contact us on Slack" url="https://kubernetes.slack.com/archives/CAW0GV7A5" %}}
+Feel free to reach out on [Kubernetes Slack](https://kubernetes.slack.com/archives/CAW0GV7A5)
+
+Ask any question, we are happy to answer!
+{{% /blocks/feature %}}
+
+
+{{% blocks/feature icon="fab fa-github" title="Contributions welcome!" url="https://github.com/operator-framework/java-operator-sdk" %}}
+We do a [Pull Request](https://github.com/operator-framework/java-operator-sdk/pulls) contributions workflow on **GitHub**. New users are always welcome!
+{{% /blocks/feature %}}
+
+
+{{% blocks/feature icon="fa-brands fa-bluesky" title="Follow us on BlueSky!" url="https://bsky.app/profile/javaoperatorsdk.bsky.social" %}}
+For announcement of latest features etc.
+{{% /blocks/feature %}}
+
+
+{{% /blocks/section %}}
+
+
+{{% blocks/section %}}
+
+Sponsored by:
+{.h1 .text-center}
+
+
&
+{.h1 .text-center}
+
+{{% /blocks/section %}}
+
+
+{{% blocks/section type="row" %}}
+
+{{% blocks/feature icon="no_icon" %}}
+{{% /blocks/feature %}}
+
+{{% blocks/feature icon="no_icon" %}}
+Java Operator SDK is a [Cloud Native Computing Foundation](https://www.cncf.io) incubating project as part of [Operator Framework](https://www.cncf.io/projects/operator-framework/)
+{.h3 .text-center}
+
+
+
+{{% /blocks/feature %}}
+
+{{% /blocks/section %}}
+
diff --git a/docs/content/en/blog/_index.md b/docs/content/en/blog/_index.md
new file mode 100644
index 0000000000..e792e415fe
--- /dev/null
+++ b/docs/content/en/blog/_index.md
@@ -0,0 +1,8 @@
+---
+title: Blog
+menu: {main: {weight: 2}}
+---
+
+This is the **blog** section. It has two categories: News and Releases.
+
+Content is coming soon.
diff --git a/docs/content/en/blog/news/_index.md b/docs/content/en/blog/news/_index.md
new file mode 100644
index 0000000000..aaf1c2adcd
--- /dev/null
+++ b/docs/content/en/blog/news/_index.md
@@ -0,0 +1,4 @@
+---
+title: Posts
+weight: 220
+---
diff --git a/docs/content/en/blog/news/etcd-as-app-db.md b/docs/content/en/blog/news/etcd-as-app-db.md
new file mode 100644
index 0000000000..c6306ddffc
--- /dev/null
+++ b/docs/content/en/blog/news/etcd-as-app-db.md
@@ -0,0 +1,115 @@
+---
+title: Using k8s' ETCD as your application DB
+date: 2025-01-16
+---
+
+# FAQ: Is Kubernetes’ ETCD the Right Database for My Application?
+
+## Answer
+
+While the idea of moving your application data to Custom Resources (CRs) aligns with the "Cloud Native" philosophy, it often introduces more challenges than benefits. Let’s break it down:
+
+---
+
+### Top Reasons Why Storing Data in ETCD Through CRs Looks Appealing
+
+1. **Storing application data as CRs enables treating your application’s data like infrastructure:**
+ - **GitOps compatibility:** Declarative content can be stored in Git repositories, ensuring reproducibility.
+ - **Infrastructure alignment:** Application data can follow the same workflow as other infrastructure components.
+
+---
+
+### Challenges of Using Kubernetes’ ETCD as Your Application’s Database
+
+#### Technical Limitations:
+
+- **Data Size Limitations 🔴:**
+ - Each CR is capped at 1.5 MB by default. Raising this limit is possible but impacts cluster performance.
+ - Kubernetes ETCD has a storage cap of 2 GB by default. Adjusting this limit affects the cluster globally, with potential performance degradation.
+
+- **API Server Load Considerations 🟡:**
+ - The Kubernetes API server is designed to handle infrastructure-level requests.
+ - Storing application data in CRs might add significant load to the API server, requiring it to be scaled appropriately to handle both infrastructure and application demands.
+ - This added load can impact cluster performance and increase operational complexity.
+
+- **Guarantees 🟡:**
+ - Efficient queries are hard to implement and there is no support for them.
+ - ACID properties are challenging to leverage and everything holds mostly in read-only mode.
+
+#### Operational Impact:
+
+- **Lost Flexibility 🟡:**
+ - Modifying application data requires complex YAML editing and full redeployment.
+ - This contrasts with traditional databases that often feature user-friendly web UIs or APIs for real-time updates.
+
+- **Infrastructure Complexity 🟠:**
+ - Backup, restore, and lifecycle management for application data are typically separate from deployment workflows.
+ - Storing both in ETCD mixes these concerns, complicating operations and standardization.
+
+#### Security:
+
+- **Governance and Security 🔴:**
+ - Sensitive data stored in plain YAML may lack adequate encryption or access controls.
+ - Applying governance policies over text-based files can become a significant challenge.
+
+---
+
+### When Might Using CRs Make Sense?
+
+For small, safe subsets of data—such as application configurations—using CRs might be appropriate. However, this approach requires a detailed evaluation of the trade-offs.
+
+---
+
+### Conclusion
+
+While it’s tempting to unify application data with infrastructure control via CRs, this introduces risks that can outweigh the benefits. For most applications, separating concerns by using a dedicated database is the more robust, scalable, and manageable solution.
+
+---
+
+### A Practical Example
+
+A typical “user” described in JSON:
+
+```json
+{
+ "username": "myname",
+ "enabled": true,
+ "email": "myname@test.com",
+ "firstName": "MyFirstName",
+ "lastName": "MyLastName",
+ "credentials": [
+ {
+ "type": "password",
+ "value": "test"
+ },
+ {
+ "type": "token",
+ "value": "oidc"
+ }
+ ],
+ "realmRoles": [
+ "user",
+ "viewer",
+ "admin"
+ ],
+ "clientRoles": {
+ "account": [
+ "view-profile",
+ "change-group",
+ "manage-account"
+ ]
+ }
+}
+```
+
+This example represents about **0.5 KB of data**, meaning (with standard settings) a maximum of ~2000 users can be defined in the same CR.
+Additionally:
+
+- It contains **sensitive information**, which should be securely stored.
+- Regulatory rules (like GDPR) apply.
+
+---
+
+### References
+
+- [Using etcd as primary store database](https://stackoverflow.com/questions/41063238/using-etcd-as-primary-store-database)
diff --git a/docs/content/en/blog/news/nonssa-vs-ssa.md b/docs/content/en/blog/news/nonssa-vs-ssa.md
new file mode 100644
index 0000000000..8ea7497771
--- /dev/null
+++ b/docs/content/en/blog/news/nonssa-vs-ssa.md
@@ -0,0 +1,117 @@
+---
+title: From legacy approach to server-side apply
+date: 2025-02-25
+author: >-
+ [Attila Mészáros](https://github.com/csviri)
+---
+
+From version 5 of Java Operator SDK [server side apply](https://kubernetes.io/docs/reference/using-api/server-side-apply/)
+is a first-class feature and is used by default to update resources.
+As we will see, unfortunately (or fortunately), using it requires changes for your reconciler implementation.
+
+For this reason, we prepared a feature flag, which you can flip if you are not prepared to migrate yet:
+[`ConfigurationService.useSSAToPatchPrimaryResource`](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ConfigurationService.java#L493)
+
+Setting this flag to false will make the operations done by `UpdateControl` using the former approach (not SSA).
+Similarly, the finalizer handling won't utilize SSA handling.
+The plan is to keep this flag and allow the use of the former approach (non-SSA) also in future releases.
+
+For dependent resources, a separate flag exists (this was true also before v5) to use SSA or not:
+[`ConfigurationService.ssaBasedCreateUpdateMatchForDependentResources`](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ConfigurationService.java#L373)
+
+
+## Resource handling without and with SSA
+
+Until version 5, changing primary resources through `UpdateControl` did not use server-side apply.
+So usually, the implementation of the reconciler looked something like this:
+
+```java
+
+ @Override
+ public UpdateControl reconcile(WebPage webPage, Context context) {
+
+ reconcileLogicForManagedResources(webPage);
+ webPage.setStatus(updatedStatusForWebPage(webPage));
+
+ return UpdateControl.patchStatus(webPage);
+ }
+
+```
+
+In other words, after the reconciliation of managed resources, the reconciler updates the status of the
+primary resource passed as an argument to the reconciler.
+Such changes on the primary are fine since we don't work directly with the cached object, the argument is
+already cloned.
+
+So, how does this change with SSA?
+For SSA, the updates should contain (only) the "fully specified intent".
+In other words, we should only fill in the values we care about.
+In practice, it means creating a **fresh copy** of the resource and setting only what is necessary:
+
+```java
+
+@Override
+public UpdateControl reconcile(WebPage webPage, Context context) {
+
+ reconcileLogicForManagedResources(webPage);
+
+ WebPage statusPatch = new WebPage();
+ statusPatch.setMetadata(new ObjectMetaBuilder()
+ .withName(webPage.getMetadata().getName())
+ .withNamespace(webPage.getMetadata().getNamespace())
+ .build());
+ statusPatch.setStatus(updatedStatusForWebPage(webPage));
+
+ return UpdateControl.patchStatus(statusPatch);
+}
+```
+
+Note that we just filled out the status here since we patched the status (not the resource spec).
+Since the status is a sub-resource in Kubernetes, it will only update the status part.
+
+Every controller you register will have its default [field manager](https://kubernetes.io/docs/reference/using-api/server-side-apply/#managers).
+You can override the field manager name using [`ControllerConfiguration.fieldManager`](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ControllerConfiguration.java#L89).
+That will set the field manager for the primary resource and dependent resources as well.
+
+## Migrating to SSA
+
+Using the legacy or the new SSA way of resource management works well.
+However, migrating existing resources to SSA might be a challenge.
+We strongly recommend testing the migration, thus implementing an integration test where
+a custom resource is created using the legacy approach and is managed by the new approach.
+
+We prepared an integration test to demonstrate how such migration, even in a simple case, can go wrong,
+and how to fix it.
+
+To fix some cases, you might need to [strip managed fields](https://kubernetes.io/docs/reference/using-api/server-side-apply/#clearing-managedfields)
+from the custom resource.
+
+See [`StatusPatchSSAMigrationIT`](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/statuspatchnonlocking/StatusPatchSSAMigrationIT.java) for details.
+
+Feel free to report common issues, so we can prepare some utilities to handle them.
+
+## Optimistic concurrency control
+
+When you create a resource for SSA as mentioned above, the framework will apply changes even if the underlying resource
+or status subresource is changed while the reconciliation was running.
+First, it always forces the conflicts in the background as advised in [Kubernetes docs](https://kubernetes.io/docs/reference/using-api/server-side-apply/#using-server-side-apply-in-a-controller),
+ in addition to that since the resource version is not set it won't do optimistic locking. If you still
+want to have optimistic locking for the patch, use the resource version of the original resource:
+
+```java
+@Override
+public UpdateControl reconcile(WebPage webPage, Context context) {
+
+ reconcileLogicForManagedResources(webPage);
+
+ WebPage statusPatch = new WebPage();
+ statusPatch.setMetadata(new ObjectMetaBuilder()
+ .withName(webPage.getMetadata().getName())
+ .withNamespace(webPage.getMetadata().getNamespace())
+ .withResourceVersion(webPage.getMetadata().getResourceVersion())
+ .build());
+ statusPatch.setStatus(updatedStatusForWebPage(webPage));
+
+ return UpdateControl.patchStatus(statusPatch);
+}
+```
diff --git a/docs/content/en/blog/news/primary-cache-for-next-recon.md b/docs/content/en/blog/news/primary-cache-for-next-recon.md
new file mode 100644
index 0000000000..67326a6f17
--- /dev/null
+++ b/docs/content/en/blog/news/primary-cache-for-next-recon.md
@@ -0,0 +1,92 @@
+---
+title: How to guarantee allocated values for next reconciliation
+date: 2025-05-22
+author: >-
+ [Attila Mészáros](https://github.com/csviri) and [Chris Laprun](https://github.com/metacosm)
+---
+
+We recently released v5.1 of Java Operator SDK (JOSDK). One of the highlights of this release is related to a topic of
+so-called
+[allocated values](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#representing-allocated-values
+).
+
+To describe the problem, let's say that our controller needs to create a resource that has a generated identifier, i.e.
+a resource which identifier cannot be directly derived from the custom resource's desired state as specified in its
+`spec` field. To record the fact that the resource was successfully created, and to avoid attempting to
+recreate the resource again in subsequent reconciliations, it is typical for this type of controller to store the
+generated identifier in the custom resource's `status` field.
+
+The Java Operator SDK relies on the informers' cache to retrieve resources. These caches, however, are only guaranteed
+to be eventually consistent. It could happen that, if some other event occurs, that would result in a new
+reconciliation, **before** the update that's been made to our resource status has the chance to be propagated first to
+the cluster and then back to the informer cache, that the resource in the informer cache does **not** contain the latest
+version as modified by the reconciler. This would result in a new reconciliation where the generated identifier would be
+missing from the resource status and, therefore, another attempt to create the resource by the reconciler, which is not
+what we'd like.
+
+Java Operator SDK now provides a utility class [
+`PrimaryUpdateAndCacheUtils`](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/PrimaryUpdateAndCacheUtils.java)
+to handle this particular use case. Using that overlay cache, your reconciler is guaranteed to see the most up-to-date
+version of the resource on the next reconciliation:
+
+```java
+
+@Override
+public UpdateControl reconcile(
+ StatusPatchCacheCustomResource resource,
+ Context context) {
+
+ // omitted code
+
+ var freshCopy = createFreshCopy(resource); // need fresh copy just because we use the SSA version of update
+ freshCopy
+ .getStatus()
+ .setValue(statusWithAllocatedValue());
+
+ // using the utility instead of update control to patch the resource status
+ var updated =
+ PrimaryUpdateAndCacheUtils.ssaPatchStatusAndCacheResource(resource, freshCopy, context);
+ return UpdateControl.noUpdate();
+}
+```
+
+How does `PrimaryUpdateAndCacheUtils` work?
+There are multiple ways to solve this problem, but ultimately, we only provide the solution described below. If you
+want to dig deep in alternatives, see
+this [PR](https://github.com/operator-framework/java-operator-sdk/pull/2800/files).
+
+The trick is to intercept the resource that the reconciler updated and cache that version in an additional cache on top
+of the informer's cache. Subsequently, if the reconciler needs to read the resource, the SDK will first check if it is
+in the overlay cache and read it from there if present, otherwise read it from the informer's cache. If the informer
+receives an event with a fresh resource, we always remove the resource from the overlay cache, since that is a more
+recent resource. But this **works only** if the reconciler updates the resource using **optimistic locking**.
+If the update fails on conflict, because the resource has already been updated on the cluster before we got
+the chance to get our update in, we simply wait and poll the informer cache until the new resource version from the
+server appears in the informer's cache,
+and then try to apply our updates to the resource again using the updated version from the server, again with optimistic
+locking.
+
+So why is optimistic locking required? We hinted at it above, but the gist of it, is that if another party updates the
+resource before we get a chance to, we wouldn't be able to properly handle the resulting situation correctly in all
+cases. The informer would receive that new event before our own update would get a chance to propagate. Without
+optimistic locking, there wouldn't be a fail-proof way to determine which update should prevail (i.e. which occurred
+first), in particular in the event of the informer losing the connection to the cluster or other edge cases (the joys of
+distributed computing!).
+
+Optimistic locking simplifies the situation and provides us with stronger guarantees: if the update succeeds, then we
+can be sure we have the proper resource version in our caches. The next event will contain our update in all cases.
+Because we know that, we can also be sure that we can evict the cached resource in the overlay cache whenever we receive
+a new event. The overlay cache is only used if the SDK detects that the original resource (i.e. the one before we
+applied our status update in the example above) is still in the informer's cache.
+
+The following diagram sums up the process:
+
+```mermaid
+flowchart TD
+ A["Update Resource with Lock"] --> B{"Is Successful"}
+ B -- Fails on conflict --> D["Poll the Informer cache until resource updated"]
+ D --> A
+ B -- Yes --> n2{"Original resource still in informer cache?"}
+ n2 -- Yes --> C["Cache the resource in overlay cache"]
+ n2 -- No --> n3["Informer cache already contains up-to-date version, do not use overlay cache"]
+```
diff --git a/docs/content/en/blog/releases/_index.md b/docs/content/en/blog/releases/_index.md
new file mode 100644
index 0000000000..dbf2ee1729
--- /dev/null
+++ b/docs/content/en/blog/releases/_index.md
@@ -0,0 +1,4 @@
+---
+title: Releases
+weight: 230
+---
diff --git a/docs/content/en/blog/releases/v5-release-beta1.md b/docs/content/en/blog/releases/v5-release-beta1.md
new file mode 100644
index 0000000000..7dd133cc1d
--- /dev/null
+++ b/docs/content/en/blog/releases/v5-release-beta1.md
@@ -0,0 +1,6 @@
+---
+title: Version 5 Released! (beta1)
+date: 2024-12-06
+---
+
+See release notes [here](v5-release.md).
\ No newline at end of file
diff --git a/docs/content/en/blog/releases/v5-release.md b/docs/content/en/blog/releases/v5-release.md
new file mode 100644
index 0000000000..6d14dfb73a
--- /dev/null
+++ b/docs/content/en/blog/releases/v5-release.md
@@ -0,0 +1,397 @@
+---
+title: Version 5 Released!
+date: 2025-01-06
+---
+
+We are excited to announce that Java Operator SDK v5 has been released. This significant effort contains
+various features and enhancements accumulated since the last major release and required changes in our APIs.
+Within this post, we will go through all the main changes and help you upgrade to this new version, and provide
+a rationale behind the changes if necessary.
+
+We will omit descriptions of changes that should only require simple code updates; please do contact
+us if you encounter issues anyway.
+
+You can see an introduction and some important changes and rationale behind them from [KubeCon](https://youtu.be/V0NYHt2yjcM?t=1238).
+
+## Various Changes
+
+- From this release, the minimal Java version is 17.
+- Various deprecated APIs are removed. Migration should be easy.
+
+## All Changes
+
+You can see all changes [here](https://github.com/operator-framework/java-operator-sdk/compare/v4.9.7...v5.0.0).
+
+## Changes in low-level APIs
+
+### Server Side Apply (SSA)
+
+[Server Side Apply](https://kubernetes.io/docs/reference/using-api/server-side-apply/) is now a first-class citizen in
+the framework and
+the default approach for patching the status resource. This means that patching a resource or its status through
+`UpdateControl` and adding
+the finalizer in the background will both use SSA.
+
+Migration from a non-SSA based patching to an SSA based one can be problematic. Make sure you test the transition when
+you migrate from older version of the frameworks.
+To continue to use a non-SSA based on,
+set [ConfigurationService.useSSAToPatchPrimaryResource](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ConfigurationService.java#L462)
+to `false`.
+
+See some identified problematic migration cases and how to handle them
+in [StatusPatchSSAMigrationIT](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/statuspatchnonlocking/StatusPatchSSAMigrationIT.java).
+
+For more detailed description, see our [blog post](../news/nonssa-vs-ssa.md) on SSA.
+
+### Event Sources related changes
+
+#### Multi-cluster support in InformerEventSource
+
+`InformerEventSource` now supports watching remote clusters. You can simply pass a `KubernetesClient` instance
+initialized to connect to a different cluster from the one where the controller runs when configuring your event source.
+See [InformerEventSourceConfiguration.withKubernetesClient](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/informer/InformerEventSourceConfiguration.java)
+
+Such an informer behaves exactly as a regular one. Owner references won't work in this situation, though, so you have to
+specify a `SecondaryToPrimaryMapper` (probably based on labels or annotations).
+
+See related integration
+test [here](https://github.com/operator-framework/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/informerremotecluster)
+
+#### SecondaryToPrimaryMapper now checks resource types
+
+The owner reference based mappers are now checking the type (`kind` and `apiVersion`) of the resource when resolving the
+mapping. This is important
+since a resource may have owner references to a different resource type with the same name.
+
+See implementation
+details [here](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/informer/Mappers.java#L74-L75)
+
+#### InformerEventSource-related changes
+
+There are multiple smaller changes to `InformerEventSource` and related classes:
+
+1. `InformerConfiguration` is renamed
+ to [
+ `InformerEventSourceConfiguration`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/informer/InformerEventSourceConfiguration.java)
+2. `InformerEventSourceConfiguration` doesn't require `EventSourceContext` to be initialized anymore.
+
+#### All EventSource are now ResourceEventSources
+
+The [
+`EventSource`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/EventSource.java)
+abstraction is now always aware of the resources and
+handles accessing (the cached) resources, filtering, and additional capabilities. Before v5, such capabilities were
+present only in a sub-class called `ResourceEventSource`,
+but we decided to merge and remove `ResourceEventSource` since this has a nice impact on other parts of the system in
+terms of architecture.
+
+If you still need to create an `EventSource` that only supports triggering of your reconciler,
+see [
+`TimerEventSource`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/timer/TimerEventSource.java)
+for an example of how this can be accomplished.
+
+#### Naming event sources
+
+[
+`EventSource`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/EventSource.java#L45)
+are now named. This reduces the ambiguity that might have existed when trying to refer to an `EventSource`.
+
+### ControllerConfiguration annotation related changes
+
+You no longer have to annotate the reconciler with `@ControllerConfiguration` annotation.
+This annotation is (one) way to override the default properties of a controller.
+If the annotation is not present, the default values from the annotation are used.
+
+PR: https://github.com/operator-framework/java-operator-sdk/pull/2203
+
+In addition to that, the informer-related configurations are now extracted into
+a separate [
+`@Informer`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/informer/Informer.java)
+annotation within [
+`@ControllerConfiguration`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/ControllerConfiguration.java#L24).
+Hopefully this explicits which part of the configuration affects the informer associated with primary resource.
+Similarly, the same `@Informer` annotation is used when configuring the informer associated with a managed
+`KubernetesDependentResource` via the
+[
+`KubernetesDependent`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/KubernetesDependent.java#L33)
+annotation.
+
+### EventSourceInitializer and ErrorStatusHandler are removed
+
+Both the `EventSourceInitializer` and `ErrorStatusHandler` interfaces are removed, and their methods moved directly
+under [
+`Reconciler`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/Reconciler.java#L30-L56).
+
+If possible, we try to avoid such marker interfaces since it is hard to deduce related usage just by looking at the
+source code.
+You can now simply override those methods when implementing the `Reconciler` interface.
+
+### Cloning accessing secondary resources
+
+When accessing the secondary resources using [
+`Context.getSecondaryResource(s)(...)`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/Context.java#L19-L29),
+the resources are no longer cloned by default, since
+cloning could have an impact on performance. This means that you now need to ensure that these any changes
+are now made directly to the underlying cached resource. This should be avoided since the same resource instance may be
+present for other reconciliation cycles and would
+no longer represent the state on the server.
+
+If you want to still clone resources by default,
+set [
+`ConfigurationService.cloneSecondaryResourcesWhenGettingFromCache`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ConfigurationService.java#L484)
+to `true`.
+
+### Removed automated observed generation handling
+
+The automatic observed generation handling feature was removed since it is easy to implement inside the reconciler, but
+it made
+the implementation much more complex, especially if the framework would have to support it both for served side apply
+and client side apply.
+
+You can check a sample implementation how to do it manually in
+this [integration test](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/manualobservedgeneration/).
+
+## Dependent Resource related changes
+
+### ResourceDiscriminator is removed and related changes
+
+The primary reason `ResourceDiscriminator` was introduced was to cover the case when there are
+more than one dependent resources of a given type associated with a given primary resource. In this situation, JOSDK
+needed a generic mechanism to
+identify which resources on the cluster should be associated with which dependent resource implementation.
+We improved this association mechanism, thus rendering `ResourceDiscriminator` obsolete.
+
+As a replacement, the dependent resource will select the target resource based on the desired state.
+See the generic implementation in [
+`AbstractDependentResource`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/AbstractDependentResource.java#L135-L144).
+Calculating the desired state can be costly and might depend on other resources. For `KubernetesDependentResource`
+it is usually enough to provide the name and namespace (if namespace-scoped) of the target resource, which is what the
+`KubernetesDependentResource` implementation does by default. If you can determine which secondary to target without
+computing the desired state via its associated `ResourceID`, then we encourage you to override the
+[
+`ResourceID targetSecondaryResourceID()`](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/KubernetesDependentResource.java#L234-L244)
+method as shown
+in [this example](https://github.com/operator-framework/java-operator-sdk/blob/c7901303c5304e6017d050f05cbb3d4930bdfe44/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/multipledrsametypenodiscriminator/MultipleManagedDependentNoDiscriminatorConfigMap1.java#L24-L35)
+
+### Read-only bulk dependent resources
+
+Read-only bulk dependent resources are now supported; this was a request from multiple users, but it required changes to
+the underlying APIs.
+Please check the documentation for further details.
+
+See also the
+related [integration test](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/bulkdependent/readonly).
+
+### Multiple Dependents with Activation Condition
+
+Until now, activation conditions had a limitation that only one condition was allowed for a specific resource type.
+For example, two `ConfigMap` dependent resources were not allowed, both with activation conditions. The underlying issue
+was with the informer registration process. When an activation condition is evaluated as "met" in the background,
+the informer is registered dynamically for the target resource type. However, we need to avoid registering multiple
+informers of the same kind. To prevent this the dependent resource must specify
+the [name of the informer](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/multipledependentwithactivation/ConfigMapDependentResource2.java#L12).
+
+See the complete
+example [here](https://github.com/operator-framework/java-operator-sdk/blob/1635c9ea338f8e89bacc547808d2b409de8734cf/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/multipledependentwithactivation).
+
+### `getSecondaryResource` is Activation condition aware
+
+When an activation condition for a resource type is not met, no associated informer might be registered for that
+resource type. However, in this situation, calling `Context.getSecondaryResource`
+and its alternatives would previously throw an exception. This was, however, rather confusing and a better user
+experience would be to return an empty value instead of throwing an error. We changed this behavior in v5 to make it
+more user-friendly and attempting to retrieve a secondary resource that is gated by an activation condition will now
+return an empty value as if the associated informer existed.
+
+See related [issue](https://github.com/operator-framework/java-operator-sdk/issues/2198) for details.
+
+## Workflow related changes
+
+### `@Workflow` annotation
+
+The managed workflow definition is now a separate `@Workflow` annotation; it is no longer part of
+`@ControllerConfiguration`.
+
+See sample
+usage [here](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/sample-operators/webpage/src/main/java/io/javaoperatorsdk/operator/sample/WebPageManagedDependentsReconciler.java#L14-L20)
+
+### Explicit workflow invocation
+
+Before v5, the managed dependents part of a workflow would always be reconciled before the primary `Reconciler`
+`reconcile` or `cleanup` methods were called. It is now possible to explictly ask for a workflow reconciliation in your
+primary `Reconciler`, thus allowing you to control when the workflow is reconciled. This mean you can perform all kind
+of operations - typically validations - before executing the workflow, as shown in the sample below:
+
+```java
+
+@Workflow(explicitInvocation = true,
+ dependents = @Dependent(type = ConfigMapDependent.class))
+@ControllerConfiguration
+public class WorkflowExplicitCleanupReconciler
+ implements Reconciler,
+ Cleaner {
+
+ @Override
+ public UpdateControl reconcile(
+ WorkflowExplicitCleanupCustomResource resource,
+ Context context) {
+
+ context.managedWorkflowAndDependentResourceContext().reconcileManagedWorkflow();
+
+ return UpdateControl.noUpdate();
+ }
+
+ @Override
+ public DeleteControl cleanup(WorkflowExplicitCleanupCustomResource resource,
+ Context context) {
+
+ context.managedWorkflowAndDependentResourceContext().cleanupManageWorkflow();
+ // this can be checked
+ // context.managedWorkflowAndDependentResourceContext().getWorkflowCleanupResult()
+ return DeleteControl.defaultDelete();
+ }
+}
+```
+
+To turn on this mode of execution, set [
+`explicitInvocation`](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/Workflow.java#L26)
+flag to `true` in the managed workflow definition.
+
+See the following integration tests
+for [
+`invocation`](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/workflowexplicitinvocation)
+and [
+`cleanup`](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/workflowexplicitcleanup).
+
+### Explicit exception handling
+
+If an exception happens during a workflow reconciliation, the framework automatically throws it further.
+You can now set [
+`handleExceptionsInReconciler`](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/Workflow.java#L40)
+to true for a workflow and check the thrown exceptions explicitly
+in the execution results.
+
+```java
+
+@Workflow(handleExceptionsInReconciler = true,
+ dependents = @Dependent(type = ConfigMapDependent.class))
+@ControllerConfiguration
+public class HandleWorkflowExceptionsInReconcilerReconciler
+ implements Reconciler,
+ Cleaner {
+
+ private volatile boolean errorsFoundInReconcilerResult = false;
+ private volatile boolean errorsFoundInCleanupResult = false;
+
+ @Override
+ public UpdateControl reconcile(
+ HandleWorkflowExceptionsInReconcilerCustomResource resource,
+ Context context) {
+
+ errorsFoundInReconcilerResult = context.managedWorkflowAndDependentResourceContext()
+ .getWorkflowReconcileResult().erroredDependentsExist();
+
+ // check errors here:
+ Map errors = context.getErroredDependents();
+
+ return UpdateControl.noUpdate();
+ }
+}
+```
+
+See integration
+test [here](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/workflowsilentexceptionhandling).
+
+### CRDPresentActivationCondition
+
+Activation conditions are typically used to check if the cluster has specific capabilities (e.g., is cert-manager
+available).
+Such a check can be done by verifying if a particular custom resource definition (CRD) is present on the cluster. You
+can now use the generic [
+`CRDPresentActivationCondition`](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/workflow/CRDPresentActivationCondition.java)
+for this
+purpose, it will check if the CRD of a target resource type of a dependent resource exists on the cluster.
+
+See usage in integration
+test [here](https://github.com/operator-framework/java-operator-sdk/blob/refs/heads/next/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/crdpresentactivation).
+
+## Fabric8 client updated to 7.0
+
+The Fabric8 client has been updated to version 7.0.0. This is a new major version which implies that some API might have
+changed. Please take a look at the [Fabric8 client 7.0.0 migration guide](https://github.com/fabric8io/kubernetes-client/blob/main/doc/MIGRATION-v7.md).
+
+### CRD generator changes
+
+Starting with v5.0 (in accordance with changes made to the Fabric8 client in version 7.0.0), the CRD generator will use the maven plugin instead of the annotation processor as was previously the case.
+In many instances, you can simply configure the plugin by adding the following stanza to your project's POM build configuration:
+
+```xml
+
+ io.fabric8
+ crd-generator-maven-plugin
+ ${fabric8-client.version}
+
+
+
+ generate
+
+
+
+
+
+```
+*NOTE*: If you use the SDK's JUnit extension for your tests, you might also need to configure the CRD generator plugin to access your test `CustomResource` implementations as follows:
+```xml
+
+
+ io.fabric8
+ crd-generator-maven-plugin
+ ${fabric8-client.version}
+
+
+
+ generate
+
+ process-test-classes
+
+ ${project.build.testOutputDirectory}
+ WITH_ALL_DEPENDENCIES_AND_TESTS
+
+
+
+
+
+```
+
+Please refer to the [CRD generator documentation](https://github.com/fabric8io/kubernetes-client/blob/main/doc/CRD-generator.md) for more details.
+
+
+## Experimental
+
+### Check if the following reconciliation is imminent
+
+You can now check if the subsequent reconciliation will happen right after the current one because the SDK has already
+received an event that will trigger a new reconciliation
+This information is available from
+the [
+`Context`](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/Context.java#L69).
+
+Note that this could be useful, for example, in situations when a heavy task would be repeated in the follow-up
+reconciliation. In the current
+reconciliation, you can check this flag and return to avoid unneeded processing. Note that this is a semi-experimental
+feature, so please let us know
+if you found this helpful.
+
+```java
+
+@Override
+public UpdateControl reconcile(MyCustomResource resource, Context context) {
+
+ if (context.isNextReconciliationImminent()) {
+ // your logic, maybe return?
+ }
+}
+```
+
+See
+related [integration test](https://github.com/operator-framework/java-operator-sdk/blob/664cb7109fe62f9822997d578ae7f57f17ef8c26/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/nextreconciliationimminent).
\ No newline at end of file
diff --git a/docs/content/en/community/_index.md b/docs/content/en/community/_index.md
new file mode 100644
index 0000000000..fa42c2d974
--- /dev/null
+++ b/docs/content/en/community/_index.md
@@ -0,0 +1,6 @@
+---
+title: Community
+menu: {main: {weight: 3}}
+---
+
+
diff --git a/docs/content/en/docs/_index.md b/docs/content/en/docs/_index.md
new file mode 100755
index 0000000000..5c7b74ab4b
--- /dev/null
+++ b/docs/content/en/docs/_index.md
@@ -0,0 +1,6 @@
+---
+title: Documentation
+linkTitle: Docs
+menu: {main: {weight: 1}}
+weight: 1
+---
diff --git a/docs/content/en/docs/contributing/_index.md b/docs/content/en/docs/contributing/_index.md
new file mode 100644
index 0000000000..0ab40d55b1
--- /dev/null
+++ b/docs/content/en/docs/contributing/_index.md
@@ -0,0 +1,68 @@
+---
+title: Contributing
+weight: 110
+---
+
+Thank you for considering contributing to the Java Operator SDK project! We're building a vibrant community and need help from people like you to make it happen.
+
+## Code of Conduct
+
+We're committed to making this a welcoming, inclusive project. We do not tolerate discrimination, aggressive or insulting behavior.
+
+This project and all participants are bound by our [Code of Conduct]({{baseurl}}/coc). By participating, you're expected to uphold this code. Please report unacceptable behavior to any project admin.
+
+## Reporting Bugs
+
+Found a bug? Please [open an issue](https://github.com/java-operator-sdk/java-operator-sdk/issues)! Include all details needed to recreate the problem:
+
+- Operator SDK version being used
+- Exact platform and version you're running on
+- Steps to reproduce the bug
+- Reproducer code (very helpful for quick diagnosis and fixes)
+
+## Contributing Features and Documentation
+
+Looking for something to work on? Check the issue tracker, especially items labeled [good first issue](https://github.com/java-operator-sdk/java-operator-sdk/labels/good%20first%20issue). Please comment on the issue when you start work to avoid duplicated effort.
+
+### Feature Ideas
+
+Have a feature idea? Open an issue labeled "enhancement" even if you can't work on it immediately. We'll discuss it as a community and see what's possible.
+
+**Important**: Some features may not align with project goals. Please discuss new features before starting work to avoid wasted effort. We commit to listening to all proposals and working something out when possible.
+
+### Development Process
+
+Once you have approval to work on a feature:
+1. Communicate progress via issue updates or our [Discord channel](https://discord.gg/DacEhAy)
+2. Ask for feedback and pointers as needed
+3. Open a Pull Request when ready
+
+## Pull Request Process
+
+### Commit Messages
+Format commit messages following [conventional commit](https://www.conventionalcommits.org/en/v1.0.0/) format.
+
+### Testing and Review
+- GitHub Actions will run the test suite on your PR
+- All code must pass tests
+- New code must include new tests
+- All PRs require review and sign-off from another developer
+- Expect requests for changes - this is normal and part of the process
+- PRs must comply with Java Google code style
+
+### Licensing
+All Operator SDK code is released under the [Apache 2.0 licence](LICENSE).
+
+## Development Environment Setup
+
+### Code Style
+
+SDK modules and samples follow Java Google code style. Code gets formatted automatically on every `compile`, but to avoid PR rejections due to style issues, set up your IDE:
+
+**IntelliJ IDEA**: Install the [google-java-format](https://plugins.jetbrains.com/plugin/8527-google-java-format) plugin
+
+**Eclipse**: Follow [these instructions](https://github.com/google/google-java-format?tab=readme-ov-file#eclipse)
+
+## Acknowledgments
+
+These guidelines were inspired by [Atom](https://github.com/atom/atom/blob/master/CONTRIBUTING.md), [PurpleBooth's advice](https://gist.github.com/PurpleBooth/b24679402957c63ec426), and the [Contributor Covenant](https://www.contributor-covenant.org/).
diff --git a/docs/content/en/docs/documentation/_index.md b/docs/content/en/docs/documentation/_index.md
new file mode 100644
index 0000000000..59373c6974
--- /dev/null
+++ b/docs/content/en/docs/documentation/_index.md
@@ -0,0 +1,25 @@
+---
+title: Documentation
+weight: 40
+---
+
+# JOSDK Documentation
+
+This section contains detailed documentation for all Java Operator SDK features and concepts. Whether you're building your first operator or need advanced configuration options, you'll find comprehensive guides here.
+
+## Core Concepts
+
+- **[Implementing a Reconciler](reconciler/)** - The heart of any operator
+- **[Architecture](architecture/)** - How JOSDK works under the hood
+- **[Dependent Resources & Workflows](dependent-resource-and-workflows/)** - Managing resource relationships
+- **[Configuration](configuration/)** - Customizing operator behavior
+- **[Error Handling & Retries](error-handling-retries/)** - Managing failures gracefully
+
+## Advanced Features
+
+- **[Eventing](eventing/)** - Understanding the event-driven model
+- **[Accessing Resources in Caches](working-with-es-caches/) - How to access resources in caches
+- **[Observability](observability/)** - Monitoring and debugging your operators
+- **[Other Features](features/)** - Additional capabilities and integrations
+
+Each guide includes practical examples and best practices to help you build robust, production-ready operators.
diff --git a/docs/content/en/docs/documentation/architecture.md b/docs/content/en/docs/documentation/architecture.md
new file mode 100644
index 0000000000..4108849c04
--- /dev/null
+++ b/docs/content/en/docs/documentation/architecture.md
@@ -0,0 +1,36 @@
+---
+title: Architecture and Internals
+weight: 85
+---
+
+This document provides an overview of the Java Operator SDK's internal structure and components to help developers understand and contribute to the project. While not a comprehensive reference, it introduces core concepts that should make other components easier to understand.
+
+## The Big Picture and Core Components
+
+
+
+An [Operator](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/Operator.java) is a set of independent [controllers](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/Controller.java).
+
+The `Controller` class is an internal class managed by the framework and typically shouldn't be interacted with directly. It manages all processing units involved with reconciling a single type of Kubernetes resource.
+
+### Core Components
+
+- **[Reconciler](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/Reconciler.java)** - The primary entry point for developers to implement reconciliation logic
+- **[EventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/EventSource.java)** - Represents a source of events that might trigger reconciliation
+- **[EventSourceManager](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/EventSourceManager.java)** - Aggregates all event sources for a controller and manages their lifecycle
+- **[ControllerResourceEventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/controller/ControllerResourceEventSource.java)** - Central event source that watches primary resources associated with a given controller for changes, propagates events and caches state
+- **[EventProcessor](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/EventProcessor.java)** - Processes incoming events sequentially per resource while allowing concurrent overall processing. Handles rescheduling and retrying
+- **[ReconcilerDispatcher](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/ReconciliationDispatcher.java)** - Dispatches requests to appropriate `Reconciler` methods and handles reconciliation results, making necessary Kubernetes API calls
+
+## Typical Workflow
+
+A typical workflow follows these steps:
+
+1. **Event Generation**: An `EventSource` produces an event and propagates it to the `EventProcessor`
+2. **Resource Reading**: The resource associated with the event is read from the internal cache
+3. **Reconciliation Submission**: If the resource isn't already being processed, a reconciliation request is submitted to the executor service in a different thread (encapsulated in a `ControllerExecution` instance)
+4. **Dispatching**: The `ReconcilerDispatcher` is called, which dispatches the call to the appropriate `Reconciler` method with all required information
+5. **Reconciler Execution**: Once the `Reconciler` completes, the `ReconcilerDispatcher` makes appropriate Kubernetes API server calls based on the returned result
+6. **Finalization**: The `EventProcessor` is called back to finalize execution and update the controller's state
+7. **Rescheduling Check**: The `EventProcessor` checks if the request needs rescheduling or retrying, and whether subsequent events were received for the same resource
+8. **Completion**: When no further action is needed, event processing is finished
diff --git a/docs/content/en/docs/documentation/configuration.md b/docs/content/en/docs/documentation/configuration.md
new file mode 100644
index 0000000000..888804628f
--- /dev/null
+++ b/docs/content/en/docs/documentation/configuration.md
@@ -0,0 +1,154 @@
+---
+title: Configurations
+weight: 55
+---
+
+The Java Operator SDK (JOSDK) provides abstractions that work great out of the box. However, we recognize that default behavior isn't always suitable for every use case. Numerous configuration options help you tailor the framework to your specific needs.
+
+Configuration options operate at several levels:
+- **Operator-level** using `ConfigurationService`
+- **Reconciler-level** using `ControllerConfiguration`
+- **DependentResource-level** using the `DependentResourceConfigurator` interface
+- **EventSource-level** where some event sources (like `InformerEventSource`) need fine-tuning to identify which events trigger the associated reconciler
+
+## Operator-Level Configuration
+
+Configuration that impacts the entire operator is performed via the `ConfigurationService` class. `ConfigurationService` is an abstract class with different implementations based on which framework flavor you use (e.g., Quarkus Operator SDK replaces the default implementation). Configurations initialize with sensible defaults but can be changed during initialization.
+
+For example, to disable CRD validation on startup and configure leader election:
+
+```java
+Operator operator = new Operator( override -> override
+ .checkingCRDAndValidateLocalModel(false)
+ .withLeaderElectionConfiguration(new LeaderElectionConfiguration("bar", "barNS")));
+```
+
+## Reconciler-Level Configuration
+
+While reconcilers are typically configured using the `@ControllerConfiguration` annotation, you can also override configuration at runtime when registering the reconciler with the operator. You can either:
+- Pass a completely new `ControllerConfiguration` instance
+- Override specific aspects using a `ControllerConfigurationOverrider` `Consumer` (preferred)
+
+```java
+Operator operator;
+Reconciler reconciler;
+...
+operator.register(reconciler, configOverrider ->
+ configOverrider.withFinalizer("my-nifty-operator/finalizer").withLabelSelector("foo=bar"));
+```
+
+## Dynamically Changing Target Namespaces
+
+A controller can be configured to watch a specific set of namespaces in addition of the
+namespace in which it is currently deployed or the whole cluster. The framework supports
+dynamically changing the list of these namespaces while the operator is running.
+When a reconciler is registered, an instance of
+[`RegisteredController`](https://github.com/java-operator-sdk/java-operator-sdk/blob/ec37025a15046d8f409c77616110024bf32c3416/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/RegisteredController.java#L5)
+is returned, providing access to the methods allowing users to change watched namespaces as the
+operator is running.
+
+A typical scenario would probably involve extracting the list of target namespaces from a
+`ConfigMap` or some other input but this part is out of the scope of the framework since this is
+use-case specific. For example, reacting to changes to a `ConfigMap` would probably involve
+registering an associated `Informer` and then calling the `changeNamespaces` method on
+`RegisteredController`.
+
+```java
+
+public static void main(String[] args) {
+ KubernetesClient client = new DefaultKubernetesClient();
+ Operator operator = new Operator(client);
+ RegisteredController registeredController = operator.register(new WebPageReconciler(client));
+ operator.installShutdownHook();
+ operator.start();
+
+ // call registeredController further while operator is running
+}
+
+```
+
+If watched namespaces change for a controller, it might be desirable to propagate these changes to
+`InformerEventSources` associated with the controller. In order to express this,
+`InformerEventSource` implementations interested in following such changes need to be
+configured appropriately so that the `followControllerNamespaceChanges` method returns `true`:
+
+```java
+
+@ControllerConfiguration
+public class MyReconciler implements Reconciler {
+
+ @Override
+ public Map prepareEventSources(
+ EventSourceContext context) {
+
+ InformerEventSource configMapES =
+ new InformerEventSource<>(InformerEventSourceConfiguration.from(ConfigMap.class, TestCustomResource.class)
+ .withNamespacesInheritedFromController(context)
+ .build(), context);
+
+ return EventSourceUtils.nameEventSources(configMapES);
+ }
+
+}
+```
+
+As seen in the above code snippet, the informer will have the initial namespaces inherited from
+controller, but also will adjust the target namespaces if it changes for the controller.
+
+See also
+the [integration test](https://github.com/operator-framework/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/changenamespace)
+for this feature.
+
+## DependentResource-level configuration
+
+It is possible to define custom annotations to configure custom `DependentResource` implementations. In order to provide
+such a configuration mechanism for your own `DependentResource` implementations, they must be annotated with the
+`@Configured` annotation. This annotation defines 3 fields that tie everything together:
+
+- `by`, which specifies which annotation class will be used to configure your dependents,
+- `with`, which specifies the class holding the configuration object for your dependents and
+- `converter`, which specifies the `ConfigurationConverter` implementation in charge of converting the annotation
+ specified by the `by` field into objects of the class specified by the `with` field.
+
+`ConfigurationConverter` instances implement a single `configFrom` method, which will receive, as expected, the
+annotation instance annotating the dependent resource instance to be configured, but it can also extract information
+from the `DependentResourceSpec` instance associated with the `DependentResource` class so that metadata from it can be
+used in the configuration, as well as the parent `ControllerConfiguration`, if needed. The role of
+`ConfigurationConverter` implementations is to extract the annotation information, augment it with metadata from the
+`DependentResourceSpec` and the configuration from the parent controller on which the dependent is defined, to finally
+create the configuration object that the `DependentResource` instances will use.
+
+However, one last element is required to finish the configuration process: the target `DependentResource` class must
+implement the `ConfiguredDependentResource` interface, parameterized with the annotation class defined by the
+`@Configured` annotation `by` field. This interface is called by the framework to inject the configuration at the
+appropriate time and retrieve the configuration, if it's available.
+
+For example, `KubernetesDependentResource`, a core implementation that the framework provides, can be configured via the
+`@KubernetesDependent` annotation. This set up is configured as follows:
+
+```java
+
+@Configured(
+ by = KubernetesDependent.class,
+ with = KubernetesDependentResourceConfig.class,
+ converter = KubernetesDependentConverter.class)
+public abstract class KubernetesDependentResource
+ extends AbstractEventSourceHolderDependentResource>
+ implements ConfiguredDependentResource> {
+ // code omitted
+}
+```
+
+The `@Configured` annotation specifies that `KubernetesDependentResource` instances can be configured by using the
+`@KubernetesDependent` annotation, which gets converted into a `KubernetesDependentResourceConfig` object by a
+`KubernetesDependentConverter`. That configuration object is then injected by the framework in the
+`KubernetesDependentResource` instance, after it's been created, because the class implements the
+`ConfiguredDependentResource` interface, properly parameterized.
+
+For more information on how to use this feature, we recommend looking at how this mechanism is implemented for
+`KubernetesDependentResource` in the core framework, `SchemaDependentResource` in the samples or `CustomAnnotationDep`
+in the `BaseConfigurationServiceTest` test class.
+
+## EventSource-level configuration
+
+TODO
diff --git a/docs/content/en/docs/documentation/dependent-resource-and-workflows/_index.md b/docs/content/en/docs/documentation/dependent-resource-and-workflows/_index.md
new file mode 100644
index 0000000000..9446f7ceca
--- /dev/null
+++ b/docs/content/en/docs/documentation/dependent-resource-and-workflows/_index.md
@@ -0,0 +1,9 @@
+---
+title: Dependent resources and workflows
+weight: 70
+---
+
+Dependent resources and workflows are features sometimes referenced as higher
+level abstractions. These two related concepts provides an abstraction
+over reconciliation of a single resource (Dependent resource) and the
+orchestration of such resources (Workflows).
\ No newline at end of file
diff --git a/docs/content/en/docs/documentation/dependent-resource-and-workflows/dependent-resources.md b/docs/content/en/docs/documentation/dependent-resource-and-workflows/dependent-resources.md
new file mode 100644
index 0000000000..7416949869
--- /dev/null
+++ b/docs/content/en/docs/documentation/dependent-resource-and-workflows/dependent-resources.md
@@ -0,0 +1,465 @@
+---
+title: Dependent resources
+weight: 75
+---
+
+## Motivations and Goals
+
+Most operators need to deal with secondary resources when trying to realize the desired state
+described by the primary resource they are in charge of. For example, the Kubernetes-native
+`Deployment` controller needs to manage `ReplicaSet` instances as part of a `Deployment`'s
+reconciliation process. In this instance, `ReplicatSet` is considered a secondary resource for
+the `Deployment` controller.
+
+Controllers that deal with secondary resources typically need to perform the following steps, for
+each secondary resource:
+
+```mermaid
+flowchart TD
+
+compute[Compute desired secondary resource based on primary state] --> A
+A{Secondary resource exists?}
+A -- Yes --> match
+A -- No --> Create --> Done
+
+match{Matches desired state?}
+match -- Yes --> Done
+match -- No --> Update --> Done
+```
+
+While these steps are not difficult in and of themselves, there are some subtleties that can lead to
+bugs or sub-optimal code if not done right. As this process is pretty much similar for each
+dependent resource, it makes sense for the SDK to offer some level of support to remove the
+boilerplate code associated with encoding these repetitive actions. It should
+be possible to handle common cases (such as dealing with Kubernetes-native secondary resources) in a
+semi-declarative way with only a minimal amount of code, JOSDK taking care of wiring everything
+accordingly.
+
+Moreover, in order for your reconciler to get informed of events on these secondary resources, you
+need to configure and create event sources and maintain them. JOSDK already makes it rather easy
+to deal with these, but dependent resources makes it even simpler.
+
+Finally, there are also opportunities for the SDK to transparently add features that are even
+trickier to get right, such as immediate caching of updated or created resources (so that your
+reconciler doesn't need to wait for a cluster roundtrip to continue its work) and associated
+event filtering (so that something your reconciler just changed doesn't re-trigger a
+reconciliation, for example).
+
+## Design
+
+### `DependentResource` vs. `AbstractDependentResource`
+
+The new
+[`DependentResource`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/dependent/DependentResource.java)
+interface lies at the core of the design and strives to encapsulate the logic that is required
+to reconcile the state of the associated secondary resource based on the state of the primary
+one. For most cases, this logic will follow the flow expressed above and JOSDK provides a very
+convenient implementation of this logic in the form of the
+[`AbstractDependentResource`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/AbstractDependentResource.java)
+class. If your logic doesn't fit this pattern, though, you can still provide your
+own `reconcile` method implementation. While the benefits of using dependent resources are less
+obvious in that case, this allows you to separate the logic necessary to deal with each
+secondary resource in its own class that can then be tested in isolation via unit tests. You can
+also use the declarative support with your own implementations as we shall see later on.
+
+`AbstractDependentResource` is designed so that classes extending it specify which functionality
+they support by implementing trait interfaces. This design has been selected to express the fact
+that not all secondary resources are completely under the control of the primary reconciler:
+some dependent resources are only ever created or updated for example and we needed a way to let
+JOSDK know when that is the case. We therefore provide trait interfaces: `Creator`,
+`Updater` and `Deleter` to express that the `DependentResource` implementation will provide custom
+functionality to create, update and delete its associated secondary resources, respectively. If
+these traits are not implemented then parts of the logic described above is never triggered: if
+your implementation doesn't implement `Creator`, for example, `AbstractDependentResource` will
+never try to create the associated secondary resource, even if it doesn't exist. It is even
+possible to not implement any of these traits and therefore create read-only dependent resources
+that will trigger your reconciler whenever a user interacts with them but that are never
+modified by your reconciler itself - however note that read-only dependent resources rarely make
+sense, as it is usually simpler to register an event source for the target resource.
+
+All subclasses
+of [`AbstractDependentResource`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/AbstractDependentResource.java)
+can also implement
+the [`Matcher`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/Matcher.java)
+interface to customize how the SDK decides whether or not the actual state of the dependent
+matches the desired state. This makes it convenient to use these abstract base classes for your
+implementation, only customizing the matching logic. Note that in many cases, there is no need
+to customize that logic as the SDK already provides convenient default implementations in the
+form
+of [`DesiredEqualsMatcher`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/DesiredEqualsMatcher.java)
+and
+[`GenericKubernetesResourceMatcher`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/GenericKubernetesResourceMatcher.java)
+implementations, respectively. If you want to provide custom logic, you only need your
+`DependentResource` implementation to implement the `Matcher` interface as below, which shows
+how to customize the default matching logic for Kubernetes resources to also consider annotations
+and labels, which are ignored by default:
+
+```java
+public class MyDependentResource extends KubernetesDependentResource
+ implements Matcher {
+ // your implementation
+
+ public Result match(MyDependent actualResource, MyPrimary primary,
+ Context context) {
+ return GenericKubernetesResourceMatcher.match(this, actualResource, primary, context, true);
+ }
+}
+```
+
+### Batteries included: convenient DependentResource implementations!
+
+JOSDK also offers several other convenient implementations building on top of
+`AbstractDependentResource` that you can use as starting points for your own implementations.
+
+One such implementation is the `KubernetesDependentResource` class that makes it really easy to work
+with Kubernetes-native resources. In this case, you usually only need to provide an implementation
+for the `desired` method to tell JOSDK what the desired state of your secondary resource should
+be based on the specified primary resource state.
+
+JOSDK takes care of everything else using default implementations that you can override in case you
+need more precise control of what's going on.
+
+We also provide implementations that make it easy to cache
+(`AbstractExternalDependentResource`) or poll for changes in external resources
+(`PollingDependentResource`, `PerResourcePollingDependentResource`). All the provided
+implementations can be found in the `io/javaoperatorsdk/operator/processing/dependent` package of
+the `operator-framework-core` module.
+
+### Sample Kubernetes Dependent Resource
+
+A typical use case, when a Kubernetes resource is fully managed - Created, Read, Updated and
+Deleted (or set to be garbage collected). The following example shows how to create a
+`Deployment` dependent resource:
+
+```java
+
+@KubernetesDependent(informer = @Informer(labelSelector = SELECTOR))
+class DeploymentDependentResource extends CRUDKubernetesDependentResource {
+
+ @Override
+ protected Deployment desired(WebPage webPage, Context context) {
+ var deploymentName = deploymentName(webPage);
+ Deployment deployment = loadYaml(Deployment.class, getClass(), "deployment.yaml");
+ deployment.getMetadata().setName(deploymentName);
+ deployment.getMetadata().setNamespace(webPage.getMetadata().getNamespace());
+ deployment.getSpec().getSelector().getMatchLabels().put("app", deploymentName);
+
+ deployment.getSpec().getTemplate().getMetadata().getLabels()
+ .put("app", deploymentName);
+ deployment.getSpec().getTemplate().getSpec().getVolumes().get(0)
+ .setConfigMap(new ConfigMapVolumeSourceBuilder().withName(configMapName(webPage)).build());
+ return deployment;
+ }
+}
+```
+
+The only thing that you need to do is to extend the `CRUDKubernetesDependentResource` and
+specify the desired state for your secondary resources based on the state of the primary one. In
+the example above, we're handling the state of a `Deployment` secondary resource associated with
+a `WebPage` custom (primary) resource.
+
+The `@KubernetesDependent` annotation can be used to further configure **managed** dependent
+resource that are extending `KubernetesDependentResource`.
+
+See the full source
+code [here](https://github.com/operator-framework/java-operator-sdk/blob/main/sample-operators/webpage/src/main/java/io/javaoperatorsdk/operator/sample/dependentresource/DeploymentDependentResource.java)
+.
+
+## Managed Dependent Resources
+
+As mentioned previously, one goal of this implementation is to make it possible to declaratively
+create and wire dependent resources. You can annotate your reconciler with `@Dependent`
+annotations that specify which `DependentResource` implementation it depends upon.
+JOSDK will take the appropriate steps to wire everything together and call your
+`DependentResource` implementations `reconcile` method before your primary resource is reconciled.
+This makes sense in most use cases where the logic associated with the primary resource is
+usually limited to status handling based on the state of the secondary resources and the
+resources are not dependent on each other. As an alternative, you can also invoke reconciliation explicitly,
+event for managed workflows.
+
+See [Workflows](https://javaoperatorsdk.io/docs/documentation/dependent-resource-and-workflows/workflows/) for more details on how the dependent
+resources are reconciled.
+
+This behavior and automated handling is referred to as "managed" because the `DependentResource`
+instances are managed by JOSDK, an example of which can be seen below:
+
+```java
+
+@Workflow(
+ dependents = {
+ @Dependent(type = ConfigMapDependentResource.class),
+ @Dependent(type = DeploymentDependentResource.class),
+ @Dependent(type = ServiceDependentResource.class),
+ @Dependent(
+ type = IngressDependentResource.class,
+ reconcilePrecondition = ExposedIngressCondition.class)
+ })
+public class WebPageManagedDependentsReconciler
+ implements Reconciler, ErrorStatusHandler {
+
+ // omitted code
+
+ @Override
+ public UpdateControl reconcile(WebPage webPage, Context context) {
+
+ final var name = context.getSecondaryResource(ConfigMap.class).orElseThrow()
+ .getMetadata().getName();
+ webPage.setStatus(createStatus(name));
+ return UpdateControl.patchStatus(webPage);
+ }
+}
+```
+
+See the full source code of
+sample [here](https://github.com/operator-framework/java-operator-sdk/blob/main/sample-operators/webpage/src/main/java/io/javaoperatorsdk/operator/sample/WebPageManagedDependentsReconciler.java)
+.
+
+## Standalone Dependent Resources
+
+It is also possible to wire dependent resources programmatically. In practice this means that the
+developer is responsible for initializing and managing the dependent resources as well as calling
+their `reconcile` method. However, this makes it possible for developers to fully customize the
+reconciliation process. Standalone dependent resources should be used in cases when the managed use
+case does not fit. You can, of course, also use [Workflows](https://javaoperatorsdk.io/docs/documentation/dependent-resource-and-workflows/workflows/) when managing
+resources programmatically.
+
+You can see a commented example of how to do
+so [here](https://github.com/operator-framework/java-operator-sdk/blob/main/sample-operators/webpage/src/main/java/io/javaoperatorsdk/operator/sample/WebPageStandaloneDependentsReconciler.java).
+
+## Creating/Updating Kubernetes Resources
+
+From version 4.4 of the framework the resources are created and updated
+using [Server Side Apply](https://kubernetes.io/docs/reference/using-api/server-side-apply/)
+, thus the desired state is simply sent using this approach to update the actual resource.
+
+## Comparing desired and actual state (matching)
+
+During the reconciliation of a dependent resource, the desired state is matched with the actual
+state from the caches. The dependent resource only gets updated on the server if the actual,
+observed state differs from the desired one. Comparing these two states is a complex problem
+when dealing with Kubernetes resources because a strict equality check is usually not what is
+wanted due to the fact that multiple fields might be automatically updated or added by
+the platform (
+by [dynamic admission controllers](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/)
+or validation webhooks, for example). Solving this problem in a generic way is therefore a tricky
+proposition.
+
+JOSDK provides such a generic matching implementation which is used by default:
+[SSABasedGenericKubernetesResourceMatcher](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/SSABasedGenericKubernetesResourceMatcher.java)
+This implementation relies on the managed fields used by the Server Side Apply feature to
+compare only the values of the fields that the controller manages. This ensures that only
+semantically relevant fields are compared. See javadoc for further details.
+
+JOSDK versions prior to 4.4 were using a different matching algorithm as implemented in
+[GenericKubernetesResourceMatcher](https://github.com/java-operator-sdk/java-operator-sdk/blob/e16559fd41bbb8bef6ce9d1f47bffa212a941b09/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/GenericKubernetesResourceMatcher.java).
+
+Since SSA is a complex feature, JOSDK implements a feature flag allowing users to switch between
+these implementations. See
+in [ConfigurationService](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ConfigurationService.java#L332-L358).
+
+It is, however, important to note that these implementations are default, generic
+implementations that the framework can provide expected behavior out of the box. In many
+situations, these will work just fine but it is also possible to provide matching algorithms
+optimized for specific use cases. This is easily done by simply overriding
+the `match(...)` [method](https://github.com/java-operator-sdk/java-operator-sdk/blob/e16559fd41bbb8bef6ce9d1f47bffa212a941b09/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/KubernetesDependentResource.java#L156-L156).
+
+It is also possible to bypass the matching logic altogether to simply rely on the server-side
+apply mechanism if always sending potentially unchanged resources to the cluster is not an issue.
+JOSDK's matching mechanism allows to spare some potentially useless calls to the Kubernetes API
+server. To bypass the matching feature completely, simply override the `match` method to always
+return `false`, thus telling JOSDK that the actual state never matches the desired one, making
+it always update the resources using SSA.
+
+WARNING: Older versions of Kubernetes before 1.25 would create an additional resource version for every SSA update
+performed with certain resources - even though there were no actual changes in the stored resource - leading to infinite
+reconciliations. This behavior was seen with Secrets using `stringData`, Ingresses using empty string fields, and
+StatefulSets using volume claim templates. The operator framework has added built-in handling for the StatefulSet issue.
+If you encounter this issue on an older Kubernetes version, consider changing your desired state, turning off SSA for
+that resource, or even upgrading your Kubernetes version. If you encounter it on a newer Kubernetes version, please log
+an issue with the JOSDK and with upstream Kubernetes.
+
+## Telling JOSDK how to find which secondary resources are associated with a given primary resource
+
+[`KubernetesDependentResource`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/KubernetesDependentResource.java)
+automatically maps secondary resource to a primary by owner reference. This behavior can be
+customized by implementing
+[`SecondaryToPrimaryMapper`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/SecondaryToPrimaryMapper.java)
+by the dependent resource.
+
+See sample in one of the integration
+tests [here](https://github.com/operator-framework/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/primaryindexer)
+.
+
+## Multiple Dependent Resources of Same Type
+
+When dealing with multiple dependent resources of same type, the dependent resource implementation
+needs to know which specific resource should be targeted when reconciling a given dependent
+resource, since there could be multiple instances of that type which could possibly be used, each
+associated with the same primary resource. In this situation, JOSDK automatically selects the appropriate secondary
+resource matching the desired state associated with the primary resource. This makes sense because the desired
+state computation already needs to be able to discriminate among multiple related secondary resources to tell JOSDK how
+they should be reconciled.
+
+There might be cases, though, where it might be problematic to call the `desired` method several times (for example, because it is costly to do so),
+it is always possible to override this automated discrimination using several means (consider in this priority order):
+
+- Override the `targetSecondaryResourceID` method, if your `DependentResource` extends `KubernetesDependentResource`,
+ where it's very often possible to easily determine the `ResourceID` of the secondary resource. This would probably be
+ the easiest solution if you're working with Kubernetes resources.
+- Override the `selectTargetSecondaryResource` method, if your `DependentResource` extends `AbstractDependentResource`.
+ This should be relatively simple to override this method to optimize the matching to your needs. You can see an
+ example of such an implementation in
+ the [`ExternalWithStateDependentResource`](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/externalstate/ExternalWithStateDependentResource.java)
+ class.
+- As last resort, you can implement your own `getSecondaryResource` method on your `DependentResource` implementation from scratch.
+
+### Sharing an Event Source Between Dependent Resources
+
+Dependent resources usually also provide event sources. When dealing with multiple dependents of
+the same type, one needs to decide whether these dependent resources should track the same
+resources and therefore share a common event source, or, to the contrary, track completely
+separate resources, in which case using separate event sources is advised.
+
+Dependents can therefore reuse existing, named event sources by referring to their name. In the
+declarative case, assuming a `configMapSource` `EventSource` has already been declared, this
+would look as follows:
+
+```
+ @Dependent(type = MultipleManagedDependentResourceConfigMap1.class,
+ useEventSourceWithName = "configMapSource")
+```
+
+A sample is provided as an integration test both:
+for [managed](https://github.com/operator-framework/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/multipledrsametypenodiscriminator)
+
+For [standalone](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/multipledependentresource)
+cases.
+
+## Bulk Dependent Resources
+
+So far, all the cases we've considered were dealing with situations where the number of
+dependent resources needed to reconcile the state expressed by the primary resource is known
+when writing the code for the operator. There are, however, cases where the number of dependent
+resources to be created depends on information found in the primary resource.
+
+These cases are covered by the "bulk" dependent resources feature. To create such dependent
+resources, your implementation should extend `AbstractDependentResource` (at least indirectly) and
+implement the
+[`BulkDependentResource`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/BulkDependentResource.java)
+interface.
+
+Various examples are provided
+as [integration tests](https://github.com/operator-framework/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/bulkdependent)
+.
+
+To see how bulk dependent resources interact with workflow conditions, please refer to this
+[integration test](https://github.com/operator-framework/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/bulkdependent/conidition).
+
+## External State Tracking Dependent Resources
+
+It is sometimes necessary for a controller to track external (i.e. non-Kubernetes) state to
+properly manage some dependent resources. For example, your controller might need to track the
+state of a REST API resource, which, after being created, would be refer to by its identifier.
+Such identifier would need to be tracked by your controller to properly retrieve the state of
+the associated resource and/or assess if such a resource exists. While there are several ways to
+support this use case, we recommend storing such information in a dedicated Kubernetes resources
+(usually a `ConfigMap` or a `Secret`), so that it can be manipulated with common Kubernetes
+mechanisms.
+
+This particular use case is supported by the
+[`AbstractExternalDependentResource`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/AbstractExternalDependentResource.java)
+class that you can extend to suit your needs, as well as implement the
+[`DependentResourceWithExplicitState`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/DependentResourceWithExplicitState.java)
+interface. Note that most of the JOSDK-provided dependent resource implementations such as
+`PollingDependentResource` or `PerResourcePollingDependentResource` already extends
+`AbstractExternalDependentResource`, thus supporting external state tracking out of the box.
+
+See [integration test](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/externalstate/ExternalStateDependentIT.java)
+as a sample.
+
+For a better understanding it might be worth to study
+a [sample implementation](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/externalstate/ExternalStateReconciler.java)
+without dependent resources.
+
+Please also refer to the [docs](/docs/patterns-and-best-practices#managing-state) for managing state in
+general.
+
+## Combining Bulk and External State Tracking Dependent Resources
+
+Both bulk and external state tracking features can be combined. In that
+case, a separate, state-tracking resource will be created for each bulk dependent resource
+created. For example, if three bulk dependent resources associated with external state are created,
+three associated `ConfigMaps` (assuming `ConfigMaps` are used as a state-tracking resource) will
+also be created, one per dependent resource.
+
+See [integration test](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/externalstate/externalstatebulkdependent)
+as a sample.
+
+## GenericKubernetesResource based Dependent Resources
+
+In rare circumstances resource handling where there is no class representation or just typeless handling might be
+needed.
+Fabric8 Client
+provides [GenericKubernetesResource](https://github.com/fabric8io/kubernetes-client/blob/main/doc/CHEATSHEET.md#resource-typeless-api)
+to support that.
+
+For dependent resource this is supported
+by [GenericKubernetesDependentResource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/GenericKubernetesDependentResource.java#L8-L8)
+. See
+samples [here](https://github.com/java-operator-sdk/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/sample/generickubernetesresource).
+
+## Other Dependent Resource Features
+
+### Caching and Event Handling in [KubernetesDependentResource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/KubernetesDependentResource.java)
+
+1. When a Kubernetes resource is created or updated the related informer (more precisely
+ the `InformerEventSource`), eventually will receive an event and will cache the up-to-date
+ resource. Typically, though, there might be a small time window when calling the
+ `getResource()` of the dependent resource or getting the resource from the `EventSource`
+ itself won't return the just updated resource, in the case where the associated event hasn't
+ been received from the Kubernetes API. The `KubernetesDependentResource` implementation,
+ however, addresses this issue, so you don't have to worry about it by making sure that it or
+ the related `InformerEventSource` always return the up-to-date resource.
+
+2. Another feature of `KubernetesDependentResource` is to make sure that if a resource is created or
+ updated during the reconciliation, this particular change, which normally would trigger the
+ reconciliation again (since the resource has changed on the server), will, in fact, not
+ trigger the reconciliation again since we already know the state is as expected. This is a small
+ optimization. For example if during a reconciliation a `ConfigMap` is updated using dependent
+ resources, this won't trigger a new reconciliation. Such a reconciliation is indeed not
+ needed since the change originated from our reconciler. For this system to work properly,
+ though, it is required that changes are received only by one event source (this is a best
+ practice in general) - so for example if there are two config map dependents, either
+ there should be a shared event source between them, or a label selector on the event sources
+ to select only the relevant events, see
+ in [related integration test](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/orderedmanageddependent/ConfigMapDependentResource2.java)
+ .
+
+## "Read-only" Dependent Resources vs. Event Source
+
+See Integration test for a read-only
+dependent [here](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/dependent/primarytosecondaydependent/ConfigMapDependent.java).
+
+Some secondary resources only exist as input for the reconciliation process and are never
+updated *by a controller* (they might, and actually usually do, get updated by users interacting
+with the resources directly, however). This might be the case, for example, of a `ConfigMap`that is
+used to configure common characteristics of multiple resources in one convenient place.
+
+In such situations, one might wonder whether it makes sense to create a dependent resource in
+this case or simply use an `EventSource` so that the primary resource gets reconciled whenever a
+user changes the resource. Typical dependent resources provide a desired state that the
+reconciliation process attempts to match. In the case of so-called read-only dependents, though,
+there is no such desired state because the operator / controller will never update the resource
+itself, just react to external changes to it. An `EventSource` would achieve the same result.
+
+Using a dependent resource for that purpose instead of a simple `EventSource`, however, provides
+several benefits:
+
+- dependents can be created declaratively, while an event source would need to be manually created
+- if dependents are already used in a controller, it makes sense to unify the handling of all
+ secondary resources as dependents from a code organization perspective
+- dependent resources can also interact with the workflow feature, thus allowing the read-only
+ resource to participate in conditions, in particular to decide whether the primary
+ resource needs/can be reconciled using reconcile pre-conditions, block the progression of the workflow altogether with
+ ready post-conditions or have other dependents depend on them, in essence, read-only dependents can participate in
+ workflows just as any other dependents.
diff --git a/docs/content/en/docs/documentation/dependent-resource-and-workflows/workflows.md b/docs/content/en/docs/documentation/dependent-resource-and-workflows/workflows.md
new file mode 100644
index 0000000000..c5ee83a446
--- /dev/null
+++ b/docs/content/en/docs/documentation/dependent-resource-and-workflows/workflows.md
@@ -0,0 +1,403 @@
+---
+title: Workflows
+weight: 80
+---
+
+## Overview
+
+Kubernetes (k8s) does not have the notion of a resource "depending on" on another k8s resource,
+at least not in terms of the order in which these resources should be reconciled. Kubernetes
+operators typically need to reconcile resources in order because these resources' state often
+depends on the state of other resources or cannot be processed until these other resources reach
+a given state or some condition holds true for them. Dealing with such scenarios are therefore
+rather common for operators and the purpose of the workflow feature of the Java Operator SDK
+(JOSDK) is to simplify supporting such cases in a declarative way. Workflows build on top of the
+[dependent resources](https://javaoperatorsdk.io/docs/documentation/dependent-resource-and-workflows/dependent-resources/) feature.
+While dependent resources focus on how a given secondary resource should be reconciled,
+workflows focus on orchestrating how these dependent resources should be reconciled.
+
+Workflows describe how as a set of
+[dependent resources](https://javaoperatorsdk.io/docs/documentation/dependent-resource-and-workflows/dependent-resources/) (DR) depend on one
+another, along with the conditions that need to hold true at certain stages of the
+reconciliation process.
+
+## Elements of Workflow
+
+- **Dependent resource** (DR) - are the resources being managed in a given reconciliation logic.
+- **Depends-on relation** - a `B` DR depends on another `A` DR if `B` needs to be reconciled
+ after `A`.
+- **Reconcile precondition** - is a condition on a given DR that needs to be become true before the
+ DR is reconciled. This also allows to define optional resources that would, for example, only be
+ created if a flag in a custom resource `.spec` has some specific value.
+- **Ready postcondition** - is a condition on a given DR to prevent the workflow from
+ proceeding until the condition checking whether the DR is ready holds true
+- **Delete postcondition** - is a condition on a given DR to check if the reconciliation of
+ dependents can proceed after the DR is supposed to have been deleted
+- **Activation condition** - is a special condition meant to specify under which condition the DR is used in the
+ workflow. A typical use-case for this feature is to only activate some dependents depending on the presence of
+ optional resources / features on the target cluster. Without this activation condition, JOSDK would attempt to
+ register an informer for these optional resources, which would cause an error in the case where the resource is
+ missing. With this activation condition, you can now conditionally register informers depending on whether the
+ condition holds or not. This is a very useful feature when your operator needs to handle different flavors of the
+ platform (e.g. OpenShift vs plain Kubernetes) and/or change its behavior based on the availability of optional
+ resources / features (e.g. CertManager, a specific Ingress controller, etc.).
+
+ A generic activation condition is provided out of the box, called
+ [CRDPresentActivationCondition](https://github.com/operator-framework/java-operator-sdk/blob/ba5e33527bf9e3ea0bd33025ccb35e677f9d44b4/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/workflow/CRDPresentActivationCondition.java)
+ that will prevent the associated dependent resource from being activated if the Custom Resource Definition associated
+ with the dependent's resource type is not present on the cluster.
+ See related [integration test](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/crdpresentactivation).
+
+ To have multiple resources of same type with an activation condition is a bit tricky, since you
+ don't want to have multiple `InformerEventSource` for the same type, you have to explicitly
+ name the informer for the Dependent Resource (`@KubernetesDependent(informerConfig = @InformerConfig(name = "configMapInformer"))`)
+ for all resource of same type with activation condition. This will make sure that only one is registered.
+ See details at [low level api](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/EventSourceRetriever.java#L20-L52).
+
+### Result conditions
+
+While simple conditions are usually enough, it might happen you want to convey extra information as a result of the
+evaluation of the conditions (e.g., to report error messages or because the result of the condition evaluation might be
+interesting for other purposes). In this situation, you should implement `DetailedCondition` instead of `Condition` and
+provide an implementation of the `detailedIsMet` method, which allows you to return a more detailed `Result` object via
+which you can provide extra information. The `DetailedCondition.Result` interface provides factory method for your
+convenience but you can also provide your own implementation if required.
+
+You can access the results for conditions from the `WorkflowResult` instance that is returned whenever a workflow is
+evaluated. You can access that result from the `ManagedWorkflowAndDependentResourceContext` accessible from the
+reconciliation `Context`. You can then access individual condition results using the `
+getDependentConditionResult` methods. You can see an example of this
+in [this integration test](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/workflowallfeature/WorkflowAllFeatureReconciler.java).
+
+## Defining Workflows
+
+Similarly to dependent resources, there are two ways to define workflows, in managed and standalone
+manner.
+
+### Managed
+
+Annotations can be used to declaratively define a workflow for a `Reconciler`. Similarly to how
+things are done for dependent resources, managed workflows execute before the `reconcile` method
+is called. The result of the reconciliation can be accessed via the `Context` object that is
+passed to the `reconcile` method.
+
+The following sample shows a hypothetical use case to showcase all the elements: the primary
+`TestCustomResource` resource handled by our `Reconciler` defines two dependent resources, a
+`Deployment` and a `ConfigMap`. The `ConfigMap` depends on the `Deployment` so will be
+reconciled after it. Moreover, the `Deployment` dependent resource defines a ready
+post-condition, meaning that the `ConfigMap` will not be reconciled until the condition defined
+by the `Deployment` becomes `true`. Additionally, the `ConfigMap` dependent also defines a
+reconcile pre-condition, so it also won't be reconciled until that condition becomes `true`. The
+`ConfigMap` also defines a delete post-condition, which means that the workflow implementation
+will only consider the `ConfigMap` deleted until that post-condition becomes `true`.
+
+```java
+
+@Workflow(dependents = {
+ @Dependent(name = DEPLOYMENT_NAME, type = DeploymentDependentResource.class,
+ readyPostcondition = DeploymentReadyCondition.class),
+ @Dependent(type = ConfigMapDependentResource.class,
+ reconcilePrecondition = ConfigMapReconcileCondition.class,
+ deletePostcondition = ConfigMapDeletePostCondition.class,
+ activationCondition = ConfigMapActivationCondition.class,
+ dependsOn = DEPLOYMENT_NAME)
+})
+@ControllerConfiguration
+public class SampleWorkflowReconciler implements Reconciler,
+ Cleaner {
+
+ public static final String DEPLOYMENT_NAME = "deployment";
+
+ @Override
+ public UpdateControl reconcile(
+ WorkflowAllFeatureCustomResource resource,
+ Context context) {
+
+ resource.getStatus()
+ .setReady(
+ context.managedWorkflowAndDependentResourceContext() // accessing workflow reconciliation results
+ .getWorkflowReconcileResult()
+ .allDependentResourcesReady());
+ return UpdateControl.patchStatus(resource);
+ }
+
+ @Override
+ public DeleteControl cleanup(WorkflowAllFeatureCustomResource resource,
+ Context context) {
+ // emitted code
+
+ return DeleteControl.defaultDelete();
+ }
+}
+
+```
+
+### Standalone
+
+In this mode workflow is built manually
+using [standalone dependent resources](https://javaoperatorsdk.io/docs/documentation/dependent-resource-and-workflows/dependent-resources/#standalone-dependent-resources)
+. The workflow is created using a builder, that is explicitly called in the reconciler (from web
+page sample):
+
+```java
+
+@ControllerConfiguration(
+ labelSelector = WebPageDependentsWorkflowReconciler.DEPENDENT_RESOURCE_LABEL_SELECTOR)
+public class WebPageDependentsWorkflowReconciler
+ implements Reconciler, ErrorStatusHandler {
+
+ public static final String DEPENDENT_RESOURCE_LABEL_SELECTOR = "!low-level";
+ private static final Logger log =
+ LoggerFactory.getLogger(WebPageDependentsWorkflowReconciler.class);
+
+ private KubernetesDependentResource configMapDR;
+ private KubernetesDependentResource deploymentDR;
+ private KubernetesDependentResource serviceDR;
+ private KubernetesDependentResource ingressDR;
+
+ private final Workflow workflow;
+
+ public WebPageDependentsWorkflowReconciler(KubernetesClient kubernetesClient) {
+ initDependentResources(kubernetesClient);
+ workflow = new WorkflowBuilder()
+ .addDependentResource(configMapDR)
+ .addDependentResource(deploymentDR)
+ .addDependentResource(serviceDR)
+ .addDependentResource(ingressDR).withReconcilePrecondition(new ExposedIngressCondition())
+ .build();
+ }
+
+ @Override
+ public Map prepareEventSources(EventSourceContext context) {
+ return EventSourceUtils.nameEventSources(
+ configMapDR.initEventSource(context),
+ deploymentDR.initEventSource(context),
+ serviceDR.initEventSource(context),
+ ingressDR.initEventSource(context));
+ }
+
+ @Override
+ public UpdateControl reconcile(WebPage webPage, Context context) {
+
+ var result = workflow.reconcile(webPage, context);
+
+ webPage.setStatus(createStatus(result));
+ return UpdateControl.patchStatus(webPage);
+ }
+ // omitted code
+}
+
+```
+
+## Workflow Execution
+
+This section describes how a workflow is executed in details, how the ordering is determined and
+how conditions and errors affect the behavior. The workflow execution is divided in two parts
+similarly to how `Reconciler` and `Cleaner` behavior are separated.
+[Cleanup](https://javaoperatorsdk.io/docs/documentation/reconciler/#implementing-a-reconciler-and-cleaner-interfaces) is
+executed if a resource is marked for deletion.
+
+## Common Principles
+
+- **As complete as possible execution** - when a workflow is reconciled, it tries to reconcile as
+ many resources as possible. Thus, if an error happens or a ready condition is not met for a
+ resources, all the other independent resources will be still reconciled. This is the opposite
+ to a fail-fast approach. The assumption is that eventually in this way the overall state will
+ converge faster towards the desired state than would be the case if the reconciliation was
+ aborted as soon as an error occurred.
+- **Concurrent reconciliation of independent resources** - the resources which doesn't depend on
+ others are processed concurrently. The level of concurrency is customizable, could be set to
+ one if required. By default, workflows use the executor service
+ from [ConfigurationService](https://github.com/java-operator-sdk/java-operator-sdk/blob/6f2a252952d3a91f6b0c3c38e5e6cc28f7c0f7b3/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ConfigurationService.java#L120-L120)
+
+## Reconciliation
+
+This section describes how a workflow is executed, considering first which rules apply, then
+demonstrated using examples:
+
+### Rules
+
+1. A workflow is a Directed Acyclic Graph (DAG) build from the DRs and their associated
+ `depends-on` relations.
+2. Root nodes, i.e. nodes in the graph that do not depend on other nodes are reconciled first,
+ in a parallel manner.
+3. A DR is reconciled if it does not depend on any other DRs, or *ALL* the DRs it depends on are
+ reconciled and ready. If a DR defines a reconcile pre-condition and/or an activation condition,
+ then these condition must become `true` before the DR is reconciled.
+4. A DR is considered *ready* if it got successfully reconciled and any ready post-condition it
+ might define is `true`.
+5. If a DR's reconcile pre-condition is not met, this DR is deleted. All the DRs that depend
+ on the dependent resource are also recursively deleted. This implies that
+ DRs are deleted in reverse order compared the one in which they are reconciled. The reason
+ for this behavior is (Will make a more detailed blog post about the design decision, much deeper
+ than the reference documentation)
+ The reasoning behind this behavior is as follows: a DR with a reconcile pre-condition is only
+ reconciled if the condition holds `true`. This means that if the condition is `false` and the
+ resource didn't exist already, then the associated resource would not be created. To ensure
+ idempotency (i.e. with the same input state, we should have the same output state), from this
+ follows that if the condition doesn't hold `true` anymore, the associated resource needs to
+ be deleted because the resource shouldn't exist/have been created.
+6. If a DR's activation condition is not met, it won't be reconciled or deleted. If other DR's depend on it, those will
+ be recursively deleted in a way similar to reconcile pre-conditions. Event sources for a dependent resource with
+ activation condition are registered/de-registered dynamically, thus during the reconciliation.
+7. For a DR to be deleted by a workflow, it needs to implement the `Deleter` interface, in which
+ case its `delete` method will be called, unless it also implements the `GarbageCollected`
+ interface. If a DR doesn't implement `Deleter` it is considered as automatically deleted. If
+ a delete post-condition exists for this DR, it needs to become `true` for the workflow to
+ consider the DR as successfully deleted.
+
+### Samples
+
+Notation: The arrows depicts reconciliation ordering, thus following the reverse direction of the
+`depends-on` relation:
+`1 --> 2` mean `DR 2` depends-on `DR 1`.
+
+#### Reconcile Sample
+
+```mermaid
+stateDiagram-v2
+1 --> 2
+1 --> 3
+2 --> 4
+3 --> 4
+```
+
+
+- Root nodes (i.e. nodes that don't depend on any others) are reconciled first. In this example,
+ DR `1` is reconciled first since it doesn't depend on others.
+ After that both DR `2` and `3` are reconciled concurrently, then DR `4` once both are
+ reconciled successfully.
+- If DR `2` had a ready condition and if it evaluated to as `false`, DR `4` would not be reconciled.
+ However `1`,`2` and `3` would be.
+- If `1` had a `false` ready condition, neither `2`,`3` or `4` would be reconciled.
+- If `2`'s reconciliation resulted in an error, `4` would not be reconciled, but `3`
+ would be (and `1` as well, of course).
+
+#### Sample with Reconcile Precondition
+
+
+```mermaid
+stateDiagram-v2
+1 --> 2
+1 --> 3
+3 --> 4
+3 --> 5
+```
+
+
+- If `3` has a reconcile pre-condition that is not met, `1` and `2` would be reconciled. However,
+ DR `3`,`4`,`5` would be deleted: `4` and `5` would be deleted concurrently but `3` would only
+ be deleted if `4` and `5` were deleted successfully (i.e. without error) and all existing
+ delete post-conditions were met.
+- If `5` had a delete post-condition that was `false`, `3` would not be deleted but `4`
+ would still be because they don't depend on one another.
+- Similarly, if `5`'s deletion resulted in an error, `3` would not be deleted but `4` would be.
+
+## Cleanup
+
+Cleanup works identically as delete for resources in reconciliation in case reconcile pre-condition
+is not met, just for the whole workflow.
+
+### Rules
+
+1. Delete is called on a DR if there is no DR that depends on it
+2. If a DR has DRs that depend on it, it will only be deleted if all these DRs are successfully
+ deleted without error and any delete post-condition is `true`.
+3. A DR is "manually" deleted (i.e. it's `Deleter.delete` method is called) if it implements the
+ `Deleter` interface but does not implement `GarbageCollected`. If a DR does not implement
+ `Deleter` interface, it is considered as deleted automatically.
+
+### Sample
+
+```mermaid
+stateDiagram-v2
+1 --> 2
+1 --> 3
+2 --> 4
+3 --> 4
+```
+
+- The DRs are deleted in the following order: `4` is deleted first, then `2` and `3` are deleted
+ concurrently, and, only after both are successfully deleted, `1` is deleted.
+- If `2` had a delete post-condition that was `false`, `1` would not be deleted. `4` and `3`
+ would be deleted.
+- If `2` was in error, DR `1` would not be deleted. DR `4` and `3` would be deleted.
+- if `4` was in error, no other DR would be deleted.
+
+## Error Handling
+
+As mentioned before if an error happens during a reconciliation, the reconciliation of other
+dependent resources will still happen, assuming they don't depend on the one that failed. If
+case multiple DRs fail, the workflow would throw an
+['AggregatedOperatorException'](https://github.com/java-operator-sdk/java-operator-sdk/blob/86e5121d56ed4ecb3644f2bc8327166f4f7add72/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/AggregatedOperatorException.java)
+containing all the related exceptions.
+
+The exceptions can be handled
+by [`ErrorStatusHandler`](https://github.com/java-operator-sdk/java-operator-sdk/blob/14620657fcacc8254bb96b4293eded84c20ba685/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/ErrorStatusHandler.java)
+
+## Waiting for the actual deletion of Kubernetes Dependent Resources
+
+Let's consider a case when a Kubernetes Dependent Resources (KDR) depends on another resource, on cleanup
+the resources will be deleted in reverse order, thus the KDR will be deleted first.
+However, the workflow implementation currently simply asks the Kubernetes API server to delete the resource. This is,
+however, an asynchronous process, meaning that the deletion might not occur immediately, in particular if the resource
+uses finalizers that block the deletion or if the deletion itself takes some time. From the SDK's perspective, though,
+the deletion has been requested and it moves on to other tasks without waiting for the resource to be actually deleted
+from the server (which might never occur if it uses finalizers which are not removed).
+In situations like these, if your logic depends on resources being actually removed from the cluster before a
+cleanup workflow can proceed correctly, you need to block the workflow progression using a delete post-condition that
+checks that the resource is actually removed or that it, at least, doesn't have any finalizers any longer. JOSDK
+provides such a delete post-condition implementation in the form of
+[`KubernetesResourceDeletedCondition`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/workflow/KubernetesResourceDeletedCondition.java)
+
+Also, check usage in an [integration test](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/manageddependentdeletecondition/ManagedDependentDefaultDeleteConditionReconciler.java).
+
+In such cases the Kubernetes Dependent Resource should extend `CRUDNoGCKubernetesDependentResource`
+and NOT `CRUDKubernetesDependentResource` since otherwise the Kubernetes Garbage Collector would delete the resources.
+In other words if a Kubernetes Dependent Resource depends on another dependent resource, it should not implement
+`GargageCollected` interface, otherwise the deletion order won't be guaranteed.
+
+
+## Explicit Managed Workflow Invocation
+
+Managed workflows, i.e. ones that are declared via annotations and therefore completely managed by JOSDK, are reconciled
+before the primary resource. Each dependent resource that can be reconciled (according to the workflow configuration)
+will therefore be reconciled before the primary reconciler is called to reconcile the primary resource. There are,
+however, situations where it would be be useful to perform additional steps before the workflow is reconciled, for
+example to validate the current state, execute arbitrary logic or even skip reconciliation altogether. Explicit
+invocation of managed workflow was therefore introduced to solve these issues.
+
+To use this feature, you need to set the `explicitInvocation` field to `true` on the `@Workflow` annotation and then
+call the `reconcileManagedWorkflow` method from the `
+ManagedWorkflowAndDependentResourceContext` retrieved from the reconciliation `Context` provided as part of your primary
+resource reconciler `reconcile` method arguments.
+
+See
+related [integration test](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/workflowexplicitinvocation)
+for more details.
+
+For `cleanup`, if the `Cleaner` interface is implemented, the `cleanupManageWorkflow()` needs to be called explicitly.
+However, if `Cleaner` interface is not implemented, it will be called implicitly.
+See
+related [integration test](https://github.com/operator-framework/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/workflow/workflowexplicitcleanup).
+
+While nothing prevents calling the workflow multiple times in a reconciler, it isn't typical or even recommended to do
+so. Conversely, if explicit invocation is requested but `reconcileManagedWorkflow` is not called in the primary resource
+reconciler, the workflow won't be reconciled at all.
+
+## Notes and Caveats
+
+- Delete is almost always called on every resource during the cleanup. However, it might be the case
+ that the resources were already deleted in a previous run, or not even created. This should
+ not be a problem, since dependent resources usually cache the state of the resource, so are
+ already aware that the resource does not exist and that nothing needs to be done if delete is
+ called.
+- If a resource has owner references, it will be automatically deleted by the Kubernetes garbage
+ collector if the owner resource is marked for deletion. This might not be desirable, to make
+ sure that delete is handled by the workflow don't use garbage collected kubernetes dependent
+ resource, use for
+ example [`CRUDNoGCKubernetesDependentResource`](https://github.com/java-operator-sdk/java-operator-sdk/blob/86e5121d56ed4ecb3644f2bc8327166f4f7add72/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/dependent/kubernetes/CRUDNoGCKubernetesDependentResource.java)
+ .
+- No state is persisted regarding the workflow execution. Every reconciliation causes all the
+ resources to be reconciled again, in other words the whole workflow is again evaluated.
+
diff --git a/docs/content/en/docs/documentation/error-handling-retries.md b/docs/content/en/docs/documentation/error-handling-retries.md
new file mode 100644
index 0000000000..eeecf54751
--- /dev/null
+++ b/docs/content/en/docs/documentation/error-handling-retries.md
@@ -0,0 +1,146 @@
+---
+title: Error handling and retries
+weight: 46
+---
+
+## How Automatic Retries Work
+
+JOSDK automatically schedules retries whenever your `Reconciler` throws an exception. This robust retry mechanism helps handle transient issues like network problems or temporary resource unavailability.
+
+### Default Retry Behavior
+
+The default retry implementation covers most typical use cases with exponential backoff:
+
+```java
+GenericRetry.defaultLimitedExponentialRetry()
+ .setInitialInterval(5000) // Start with 5-second delay
+ .setIntervalMultiplier(1.5D) // Increase delay by 1.5x each retry
+ .setMaxAttempts(5); // Maximum 5 attempts
+```
+
+### Configuration Options
+
+**Using the `@GradualRetry` annotation:**
+
+```java
+@ControllerConfiguration
+@GradualRetry(maxAttempts = 3, initialInterval = 2000)
+public class MyReconciler implements Reconciler {
+ // reconciler implementation
+}
+```
+
+**Custom retry implementation:**
+
+Specify a custom retry class in the `@ControllerConfiguration` annotation:
+
+```java
+@ControllerConfiguration(retry = MyCustomRetry.class)
+public class MyReconciler implements Reconciler {
+ // reconciler implementation
+}
+```
+
+Your custom retry class must:
+- Provide a no-argument constructor for automatic instantiation
+- Optionally implement `AnnotationConfigurable` for configuration from annotations. See [`GenericRetry`](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/retry/GenericRetry.java#)
+ implementation for more details.
+
+### Accessing Retry Information
+
+The [Context](https://github.com/java-operator-sdk/java-operator-sdk/blob/master/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/Context.java) object provides retry state information:
+
+```java
+@Override
+public UpdateControl reconcile(MyResource resource, Context context) {
+ if (context.isLastAttempt()) {
+ // Handle final retry attempt differently
+ resource.getStatus().setErrorMessage("Failed after all retry attempts");
+ return UpdateControl.patchStatus(resource);
+ }
+
+ // Normal reconciliation logic
+ // ...
+}
+```
+
+### Important Retry Behavior Notes
+
+- **Retry limits don't block new events**: When retry limits are reached, new reconciliations still occur for new events
+- **No retry on limit reached**: If an error occurs after reaching the retry limit, no additional retries are scheduled until new events arrive
+- **Event-driven recovery**: Fresh events can restart the retry cycle, allowing recovery from previously failed states
+
+A successful execution resets the retry state.
+
+### Reconciler Error Handler
+
+In order to facilitate error reporting you can override [`updateErrorStatus`](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/Reconciler.java#L52)
+method in `Reconciler`:
+
+```java
+public class MyReconciler implements Reconciler {
+
+ @Override
+ public ErrorStatusUpdateControl updateErrorStatus(
+ WebPage resource, Context context, Exception e) {
+ return handleError(resource, e);
+ }
+
+}
+```
+
+The `updateErrorStatus` method is called in case an exception is thrown from the `Reconciler`. It is
+also called even if no retry policy is configured, just after the reconciler execution.
+`RetryInfo.getAttemptCount()` is zero after the first reconciliation attempt, since it is not a
+result of a retry (regardless of whether a retry policy is configured).
+
+`ErrorStatusUpdateControl` tells the SDK what to do and how to perform the status
+update on the primary resource, which is always performed as a status sub-resource request. Note that
+this update request will also produce an event and result in a reconciliation if the
+controller is not generation-aware.
+
+This feature is only available for the `reconcile` method of the `Reconciler` interface, since
+there should not be updates to resources that have been marked for deletion.
+
+Retry can be skipped in cases of unrecoverable errors:
+
+```java
+ ErrorStatusUpdateControl.patchStatus(customResource).withNoRetry();
+```
+
+### Correctness and Automatic Retries
+
+While it is possible to deactivate automatic retries, this is not desirable unless there is a particular reason.
+Errors naturally occur, whether it be transient network errors or conflicts
+when a given resource is handled by a `Reconciler` but modified simultaneously by a user in
+a different process. Automatic retries handle these cases nicely and will eventually result in a
+successful reconciliation.
+
+## Retry, Rescheduling and Event Handling Common Behavior
+
+Retry, reschedule, and standard event processing form a relatively complex system, each of these
+functionalities interacting with the others. In the following, we describe the interplay of
+these features:
+
+1. A successful execution resets a retry and the rescheduled executions that were present before
+ the reconciliation. However, the reconciliation outcome can instruct a new rescheduling (`UpdateControl` or `DeleteControl`).
+
+ For example, if a reconciliation had previously been rescheduled for after some amount of time, but an event triggered
+ the reconciliation (or cleanup) in the meantime, the scheduled execution would be automatically cancelled, i.e.
+ rescheduling a reconciliation does not guarantee that one will occur precisely at that time; it simply guarantees that a reconciliation will occur at the latest.
+ Of course, it's always possible to reschedule a new reconciliation at the end of that "automatic" reconciliation.
+
+ Similarly, if a retry was scheduled, any event from the cluster triggering a successful execution in the meantime
+ would cancel the scheduled retry (because there's now no point in retrying something that already succeeded)
+
+2. In case an exception is thrown, a retry is initiated. However, if an event is received
+ meanwhile, it will be reconciled instantly, and this execution won't count as a retry attempt.
+3. If the retry limit is reached (so no more automatic retry would happen), but a new event
+ received, the reconciliation will still happen, but won't reset the retry, and will still be
+ marked as the last attempt in the retry info. The point (1) still holds - thus successful reconciliation will reset the retry - but no retry will happen in case of an error.
+
+The thing to remember when it comes to retrying or rescheduling is that JOSDK tries to avoid unnecessary work. When
+you reschedule an operation, you instruct JOSDK to perform that operation by the end of the rescheduling
+delay at the latest. If something occurred on the cluster that triggers that particular operation (reconciliation or cleanup), then
+JOSDK considers that there's no point in attempting that operation again at the end of the specified delay since there
+is no point in doing so anymore. The same idea also applies to retries.
diff --git a/docs/content/en/docs/documentation/eventing.md b/docs/content/en/docs/documentation/eventing.md
new file mode 100644
index 0000000000..77daeb6fa3
--- /dev/null
+++ b/docs/content/en/docs/documentation/eventing.md
@@ -0,0 +1,327 @@
+---
+title: Event sources and related topics
+weight: 47
+---
+
+## Handling Related Events with Event Sources
+
+See also
+this [blog post](https://csviri.medium.com/java-operator-sdk-introduction-to-event-sources-a1aab5af4b7b)
+.
+
+Event sources are a relatively simple yet powerful and extensible concept to trigger controller
+executions, usually based on changes to dependent resources. You typically need an event source
+when you want your `Reconciler` to be triggered when something occurs to secondary resources
+that might affect the state of your primary resource. This is needed because a given
+`Reconciler` will only listen by default to events affecting the primary resource type it is
+configured for. Event sources act as listen to events affecting these secondary resources so
+that a reconciliation of the associated primary resource can be triggered when needed. Note that
+these secondary resources need not be Kubernetes resources. Typically, when dealing with
+non-Kubernetes objects or services, we can extend our operator to handle webhooks or websockets
+or to react to any event coming from a service we interact with. This allows for very efficient
+controller implementations because reconciliations are then only triggered when something occurs
+on resources affecting our primary resources thus doing away with the need to periodically
+reschedule reconciliations.
+
+
+
+There are few interesting points here:
+
+The `CustomResourceEventSource` event source is a special one, responsible for handling events
+pertaining to changes affecting our primary resources. This `EventSource` is always registered
+for every controller automatically by the SDK. It is important to note that events always relate
+to a given primary resource. Concurrency is still handled for you, even in the presence of
+`EventSource` implementations, and the SDK still guarantees that there is no concurrent execution of
+the controller for any given primary resource (though, of course, concurrent/parallel executions
+of events pertaining to other primary resources still occur as expected).
+
+### Caching and Event Sources
+
+Kubernetes resources are handled in a declarative manner. The same also holds true for event
+sources. For example, if we define an event source to watch for changes of a Kubernetes Deployment
+object using an `InformerEventSource`, we always receive the whole associated object from the
+Kubernetes API. This object might be needed at any point during our reconciliation process and
+it's best to retrieve it from the event source directly when possible instead of fetching it
+from the Kubernetes API since the event source guarantees that it will provide the latest
+version. Not only that, but many event source implementations also cache resources they handle
+so that it's possible to retrieve the latest version of resources without needing to make any
+calls to the Kubernetes API, thus allowing for very efficient controller implementations.
+
+Note after an operator starts, caches are already populated by the time the first reconciliation
+is processed for the `InformerEventSource` implementation. However, this does not necessarily
+hold true for all event source implementations (`PerResourceEventSource` for example). The SDK
+provides methods to handle this situation elegantly, allowing you to check if an object is
+cached, retrieving it from a provided supplier if not. See
+related [method](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/polling/PerResourcePollingEventSource.java#L146)
+.
+
+### Registering Event Sources
+
+To register event sources, your `Reconciler` has to override the `prepareEventSources` and return
+list of event sources to register. One way to see this in action is
+to look at the
+[WebPage example](https://github.com/operator-framework/java-operator-sdk/blob/main/sample-operators/webpage/src/main/java/io/javaoperatorsdk/operator/sample/WebPageReconciler.java)
+(irrelevant details omitted):
+
+```java
+
+import java.util.List;
+
+@ControllerConfiguration
+public class WebappReconciler
+ implements Reconciler, Cleaner, EventSourceInitializer {
+ // ommitted code
+
+ @Override
+ public List> prepareEventSources(EventSourceContext context) {
+ InformerEventSourceConfiguration configuration =
+ InformerEventSourceConfiguration.from(Deployment.class, Webapp.class)
+ .withLabelSelector(SELECTOR)
+ .build();
+ return List.of(new InformerEventSource<>(configuration, context));
+ }
+}
+```
+
+In the example above an `InformerEventSource` is configured and registered.
+`InformerEventSource` is one of the bundled `EventSource` implementations that JOSDK provides to
+cover common use cases.
+
+### Managing Relation between Primary and Secondary Resources
+
+Event sources let your operator know when a secondary resource has changed and that your
+operator might need to reconcile this new information. However, in order to do so, the SDK needs
+to somehow retrieve the primary resource associated with which ever secondary resource triggered
+the event. In the `Webapp` example above, when an event occurs on a tracked `Deployment`, the
+SDK needs to be able to identify which `Webapp` resource is impacted by that change.
+
+Seasoned Kubernetes users already know one way to track this parent-child kind of relationship:
+using owner references. Indeed, that's how the SDK deals with this situation by default as well,
+that is, if your controller properly set owner references on your secondary resources, the SDK
+will be able to follow that reference back to your primary resource automatically without you
+having to worry about it.
+
+However, owner references cannot always be used as they are restricted to operating within a
+single namespace (i.e. you cannot have an owner reference to a resource in a different namespace)
+and are, by essence, limited to Kubernetes resources so you're out of luck if your secondary
+resources live outside of a cluster.
+
+This is why JOSDK provides the `SecondaryToPrimaryMapper` interface so that you can provide
+alternative ways for the SDK to identify which primary resource needs to be reconciled when
+something occurs to your secondary resources. We even provide some of these alternatives in the
+[Mappers](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/informer/Mappers.java)
+class.
+
+Note that, while a set of `ResourceID` is returned, this set usually consists only of one
+element. It is however possible to return multiple values or even no value at all to cover some
+rare corner cases. Returning an empty set means that the mapper considered the secondary
+resource event as irrelevant and the SDK will thus not trigger a reconciliation of the primary
+resource in that situation.
+
+Adding a `SecondaryToPrimaryMapper` is typically sufficient when there is a one-to-many relationship
+between primary and secondary resources. The secondary resources can be mapped to its primary
+owner, and this is enough information to also get these secondary resources from the `Context`
+object that's passed to your `Reconciler`.
+
+There are however cases when this isn't sufficient and you need to provide an explicit mapping
+between a primary resource and its associated secondary resources using an implementation of the
+`PrimaryToSecondaryMapper` interface. This is typically needed when there are many-to-one or
+many-to-many relationships between primary and secondary resources, e.g. when the primary resource
+is referencing secondary resources.
+See [PrimaryToSecondaryIT](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/primarytosecondary/PrimaryToSecondaryIT.java)
+integration test for a sample.
+
+### Built-in EventSources
+
+There are multiple event-sources provided out of the box, the following are some more central ones:
+
+#### `InformerEventSource`
+
+[InformerEventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/informer/InformerEventSource.java)
+is probably the most important `EventSource` implementation to know about. When you create an
+`InformerEventSource`, JOSDK will automatically create and register a `SharedIndexInformer`, a
+fabric8 Kubernetes client class, that will listen for events associated with the resource type
+you configured your `InformerEventSource` with. If you want to listen to Kubernetes resource
+events, `InformerEventSource` is probably the only thing you need to use. It's highly
+configurable so you can tune it to your needs. Take a look at
+[InformerEventSourceConfiguration](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/informer/InformerEventSourceConfiguration.java)
+and associated classes for more details but some interesting features we can mention here is the
+ability to filter events so that you can only get notified for events you care about. A
+particularly interesting feature of the `InformerEventSource`, as opposed to using your own
+informer-based listening mechanism is that caches are particularly well optimized preventing
+reconciliations from being triggered when not needed and allowing efficient operators to be written.
+
+#### `PerResourcePollingEventSource`
+
+[PerResourcePollingEventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/polling/PerResourcePollingEventSource.java)
+is used to poll external APIs, which don't support webhooks or other event notifications. It
+extends the abstract
+[ExternalResourceCachingEventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/ExternalResourceCachingEventSource.java)
+to support caching.
+See [MySQL Schema sample](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/sample-operators/mysql-schema/src/main/java/io/javaoperatorsdk/operator/sample/MySQLSchemaReconciler.java)
+for usage.
+
+#### `PollingEventSource`
+
+[PollingEventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/polling/PollingEventSource.java)
+is similar to `PerResourceCachingEventSource` except that, contrary to that event source, it
+doesn't poll a specific API separately per resource, but periodically and independently of
+actually observed primary resources.
+
+#### Inbound event sources
+
+[SimpleInboundEventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/inbound/SimpleInboundEventSource.java)
+and
+[CachingInboundEventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/inbound/CachingInboundEventSource.java)
+are used to handle incoming events from webhooks and messaging systems.
+
+#### `ControllerResourceEventSource`
+
+[ControllerResourceEventSource](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/source/controller/ControllerResourceEventSource.java)
+is a special `EventSource` implementation that you will never have to deal with directly. It is,
+however, at the core of the SDK is automatically added for you: this is the main event source
+that listens for changes to your primary resources and triggers your `Reconciler` when needed.
+It features smart caching and is really optimized to minimize Kubernetes API accesses and avoid
+triggering unduly your `Reconciler`.
+
+More on the philosophy of the non Kubernetes API related event source see in
+issue [#729](https://github.com/java-operator-sdk/java-operator-sdk/issues/729).
+
+
+## InformerEventSource Multi-Cluster Support
+
+It is possible to handle resources for remote cluster with `InformerEventSource`. To do so,
+simply set a client that connects to a remote cluster:
+
+```java
+
+InformerEventSourceConfiguration configuration =
+ InformerEventSourceConfiguration.from(SecondaryResource.class, PrimaryResource.class)
+ .withKubernetesClient(remoteClusterClient)
+ .withSecondaryToPrimaryMapper(Mappers.fromDefaultAnnotations());
+
+```
+
+You will also need to specify a `SecondaryToPrimaryMapper`, since the default one
+is based on owner references and won't work across cluster instances. You could, for example, use the provided implementation that relies on annotations added to the secondary resources to identify the associated primary resource.
+
+See related [integration test](https://github.com/operator-framework/java-operator-sdk/tree/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/informerremotecluster).
+
+
+## Generation Awareness and Event Filtering
+
+A best practice when an operator starts up is to reconcile all the associated resources because
+changes might have occurred to the resources while the operator was not running.
+
+When this first reconciliation is done successfully, the next reconciliation is triggered if either
+dependent resources are changed or the primary resource `.spec` field is changed. If other fields
+like `.metadata` are changed on the primary resource, the reconciliation could be skipped. This
+behavior is supported out of the box and reconciliation is by default not triggered if
+changes to the primary resource do not increase the `.metadata.generation` field.
+Note that changes to `.metada.generation` are automatically handled by Kubernetes.
+
+To turn off this feature, set `generationAwareEventProcessing` to `false` for the `Reconciler`.
+
+
+## Max Interval Between Reconciliations
+
+When informers / event sources are properly set up, and the `Reconciler` implementation is
+correct, no additional reconciliation triggers should be needed. However, it's
+a [common practice](https://github.com/java-operator-sdk/java-operator-sdk/issues/848#issuecomment-1016419966)
+to have a failsafe periodic trigger in place, just to make sure resources are nevertheless
+reconciled after a certain amount of time. This functionality is in place by default, with a
+rather high time interval (currently 10 hours) after which a reconciliation will be
+automatically triggered even in the absence of other events. See how to override this using the
+standard annotation:
+
+```java
+@ControllerConfiguration(maxReconciliationInterval = @MaxReconciliationInterval(
+ interval = 50,
+ timeUnit = TimeUnit.MILLISECONDS))
+public class MyReconciler implements Reconciler {}
+```
+
+The event is not propagated at a fixed rate, rather it's scheduled after each reconciliation. So the
+next reconciliation will occur at most within the specified interval after the last reconciliation.
+
+This feature can be turned off by setting `maxReconciliationInterval`
+to [`Constants.NO_MAX_RECONCILIATION_INTERVAL`](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/reconciler/Constants.java#L20-L20)
+or any non-positive number.
+
+The automatic retries are not affected by this feature so a reconciliation will be re-triggered
+on error, according to the specified retry policy, regardless of this maximum interval setting.
+
+## Rate Limiting
+
+It is possible to rate limit reconciliation on a per-resource basis. The rate limit also takes
+precedence over retry/re-schedule configurations: for example, even if a retry was scheduled for
+the next second but this request would make the resource go over its rate limit, the next
+reconciliation will be postponed according to the rate limiting rules. Note that the
+reconciliation is never cancelled, it will just be executed as early as possible based on rate
+limitations.
+
+Rate limiting is by default turned **off**, since correct configuration depends on the reconciler
+implementation, in particular, on how long a typical reconciliation takes.
+(The parallelism of reconciliation itself can be
+limited [`ConfigurationService`](https://github.com/java-operator-sdk/java-operator-sdk/blob/ce4d996ee073ebef5715737995fc3d33f4751275/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ConfigurationService.java#L120-L120)
+by configuring the `ExecutorService` appropriately.)
+
+A default rate limiter implementation is provided, see:
+[`PeriodRateLimiter`](https://github.com/java-operator-sdk/java-operator-sdk/blob/ce4d996ee073ebef5715737995fc3d33f4751275/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/rate/PeriodRateLimiter.java#L14-L14)
+.
+Users can override it by implementing their own
+[`RateLimiter`](https://github.com/java-operator-sdk/java-operator-sdk/blob/ce4d996ee073ebef5715737995fc3d33f4751275/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/processing/event/rate/RateLimiter.java)
+and specifying this custom implementation using the `rateLimiter` field of the
+`@ControllerConfiguration` annotation. Similarly to the `Retry` implementations,
+`RateLimiter` implementations must provide an accessible, no-arg constructor for instantiation
+purposes and can further be automatically configured from your own, provided annotation provided
+your `RateLimiter` implementation also implements the `AnnotationConfigurable` interface,
+parameterized by your custom annotation type.
+
+To configure the default rate limiter use the `@RateLimited` annotation on your
+`Reconciler` class. The following configuration limits each resource to reconcile at most twice
+within a 3 second interval:
+
+```java
+
+@RateLimited(maxReconciliations = 2, within = 3, unit = TimeUnit.SECONDS)
+@ControllerConfiguration
+public class MyReconciler implements Reconciler {
+
+}
+```
+
+Thus, if a given resource was reconciled twice in one second, no further reconciliation for this
+resource will happen before two seconds have elapsed. Note that, since rate is limited on a
+per-resource basis, other resources can still be reconciled at the same time, as long, of course,
+that they stay within their own rate limits.
+
+## Optimizing Caches
+
+One of the ideas around the operator pattern is that all the relevant resources are cached, thus reconciliation is
+usually very fast (especially if no resources are updated in the process) since the operator is then mostly working with
+in-memory state. However for large clusters, caching huge amount of primary and secondary resources might consume lots
+of memory. JOSDK provides ways to mitigate this issue and optimize the memory usage of controllers. While these features
+are working and tested, we need feedback from real production usage.
+
+### Bounded Caches for Informers
+
+Limiting caches for informers - thus for Kubernetes resources - is supported by ensuring that resources are in the cache
+for a limited time, via a cache eviction of least recently used resources. This means that when resources are created
+and frequently reconciled, they stay "hot" in the cache. However, if, over time, a given resource "cools" down, i.e. it
+becomes less and less used to the point that it might not be reconciled anymore, it will eventually get evicted from the
+cache to free up memory. If such an evicted resource were to become reconciled again, the bounded cache implementation
+would then fetch it from the API server and the "hot/cold" cycle would start anew.
+
+Since all resources need to be reconciled when a controller start, it is not practical to set a maximal cache size as
+it's desirable that all resources be cached as soon as possible to make the initial reconciliation process on start as
+fast and efficient as possible, avoiding undue load on the API server. It's therefore more interesting to gradually
+evict cold resources than try to limit cache sizes.
+
+See usage of the related implementation using [Caffeine](https://github.com/ben-manes/caffeine) cache in integration
+tests
+for [primary resources](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/caffeine-bounded-cache-support/src/test/java/io/javaoperatorsdk/operator/processing/event/source/cache/sample/AbstractTestReconciler.java).
+
+See
+also [CaffeineBoundedItemStores](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/caffeine-bounded-cache-support/src/main/java/io/javaoperatorsdk/operator/processing/event/source/cache/CaffeineBoundedItemStores.java)
+for more details.
\ No newline at end of file
diff --git a/docs/content/en/docs/documentation/features.md b/docs/content/en/docs/documentation/features.md
new file mode 100644
index 0000000000..8c8909c8b2
--- /dev/null
+++ b/docs/content/en/docs/documentation/features.md
@@ -0,0 +1,55 @@
+---
+title: Other Features
+weight: 57
+---
+
+The Java Operator SDK (JOSDK) is a high-level framework and tooling suite for implementing Kubernetes operators. By default, features follow best practices in an opinionated way. However, configuration options and feature flags are available to fine-tune or disable these features.
+
+## Support for Well-Known Kubernetes Resources
+
+Controllers can be registered for standard Kubernetes resources (not just custom resources), such as `Ingress`, `Deployment`, and others.
+
+See the [integration test](https://github.com/operator-framework/java-operator-sdk/blob/main/operator-framework/src/test/java/io/javaoperatorsdk/operator/baseapi/deployment) for an example of reconciling deployments.
+
+```java
+public class DeploymentReconciler
+ implements Reconciler, TestExecutionInfoProvider {
+
+ @Override
+ public UpdateControl reconcile(
+ Deployment resource, Context context) {
+ // omitted code
+ }
+}
+```
+
+## Leader Election
+
+Operators are typically deployed with a single active instance. However, you can deploy multiple instances where only one (the "leader") processes events. This is achieved through "leader election."
+
+While all instances run and start their event sources to populate caches, only the leader processes events. If the leader crashes, other instances are already warmed up and ready to take over when a new leader is elected.
+
+See sample configuration in the [E2E test](https://github.com/java-operator-sdk/java-operator-sdk/blob/8865302ac0346ee31f2d7b348997ec2913d5922b/sample-operators/leader-election/src/main/java/io/javaoperatorsdk/operator/sample/LeaderElectionTestOperator.java#L21-L23).
+
+## Automatic CRD Generation
+
+**Note:** This feature is provided by the [Fabric8 Kubernetes Client](https://github.com/fabric8io/kubernetes-client), not JOSDK itself.
+
+To automatically generate CRD manifests from your annotated Custom Resource classes, add this dependency to your project:
+
+```xml
+
+
+ io.fabric8
+ crd-generator-apt
+ provided
+
+```
+
+The CRD will be generated in `target/classes/META-INF/fabric8` (or `target/test-classes/META-INF/fabric8` for test scope) with the CRD name suffixed by the generated spec version.
+
+For example, a CR using the `java-operator-sdk.io` group with a `mycrs` plural form will result in these files:
+- `mycrs.java-operator-sdk.io-v1.yml`
+- `mycrs.java-operator-sdk.io-v1beta1.yml`
+
+**Note for Quarkus users:** If you're using the `quarkus-operator-sdk` extension, you don't need to add any extra dependency for CRD generation - the extension handles this automatically.
diff --git a/docs/content/en/docs/documentation/observability.md b/docs/content/en/docs/documentation/observability.md
new file mode 100644
index 0000000000..27a68086d5
--- /dev/null
+++ b/docs/content/en/docs/documentation/observability.md
@@ -0,0 +1,112 @@
+---
+title: Observability
+weight: 55
+---
+
+## Runtime Info
+
+[RuntimeInfo](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/RuntimeInfo.java#L16-L16)
+is used mainly to check the actual health of event sources. Based on this information it is easy to implement custom
+liveness probes.
+
+[stopOnInformerErrorDuringStartup](https://github.com/java-operator-sdk/java-operator-sdk/blob/main/operator-framework-core/src/main/java/io/javaoperatorsdk/operator/api/config/ConfigurationService.java#L168-L168)
+setting, where this flag usually needs to be set to false, in order to control the exact liveness properties.
+
+See also an example implementation in the
+[WebPage sample](https://github.com/java-operator-sdk/java-operator-sdk/blob/3e2e7c4c834ef1c409d636156b988125744ca911/sample-operators/webpage/src/main/java/io/javaoperatorsdk/operator/sample/WebPageOperator.java#L38-L43)
+
+## Contextual Info for Logging with MDC
+
+Logging is enhanced with additional contextual information using
+[MDC](http://www.slf4j.org/manual.html#mdc). The following attributes are available in most
+parts of reconciliation logic and during the execution of the controller:
+
+| MDC Key | Value added from primary resource |
+|:---------------------------|:----------------------------------|
+| `resource.apiVersion` | `.apiVersion` |
+| `resource.kind` | `.kind` |
+| `resource.name` | `.metadata.name` |
+| `resource.namespace` | `.metadata.namespace` |
+| `resource.resourceVersion` | `.metadata.resourceVersion` |
+| `resource.generation` | `.metadata.generation` |
+| `resource.uid` | `.metadata.uid` |
+
+For more information about MDC see this [link](https://www.baeldung.com/mdc-in-log4j-2-logback).
+
+## Metrics
+
+JOSDK provides built-in support for metrics reporting on what is happening with your reconcilers in the form of
+the `Metrics` interface which can be implemented to connect to your metrics provider of choice, JOSDK calling the
+methods as it goes about reconciling resources. By default, a no-operation implementation is provided thus providing a
+no-cost sane default. A [micrometer](https://micrometer.io)-based implementation is also provided.
+
+You can use a different implementation by overriding the default one provided by the default `ConfigurationService`, as
+follows:
+
+```java
+Metrics metrics; // initialize your metrics implementation
+Operator operator = new Operator(client, o -> o.withMetrics(metrics));
+```
+
+### Micrometer implementation
+
+The micrometer implementation is typically created using one of the provided factory methods which, depending on which
+is used, will return either a ready to use instance or a builder allowing users to customized how the implementation
+behaves, in particular when it comes to the granularity of collected metrics. It is, for example, possible to collect
+metrics on a per-resource basis via tags that are associated with meters. This is the default, historical behavior but
+this will change in a future version of JOSDK because this dramatically increases the cardinality of metrics, which
+could lead to performance issues.
+
+To create a `MicrometerMetrics` implementation that behaves how it has historically behaved, you can just create an
+instance via:
+
+```java
+MeterRegistry registry; // initialize your registry implementation
+Metrics metrics = new MicrometerMetrics(registry);
+```
+
+Note, however, that this constructor is deprecated and we encourage you to use the factory methods instead, which either
+return a fully pre-configured instance or a builder object that will allow you to configure more easily how the instance
+will behave. You can, for example, configure whether or not the implementation should collect metrics on a per-resource
+basis, whether or not associated meters should be removed when a resource is deleted and how the clean-up is performed.
+See the relevant classes documentation for more details.
+
+For example, the following will create a `MicrometerMetrics` instance configured to collect metrics on a per-resource
+basis, deleting the associated meters after 5 seconds when a resource is deleted, using up to 2 threads to do so.
+
+```java
+MicrometerMetrics.newPerResourceCollectingMicrometerMetricsBuilder(registry)
+ .withCleanUpDelayInSeconds(5)
+ .withCleaningThreadNumber(2)
+ .build();
+```
+
+### Operator SDK metrics
+
+The micrometer implementation records the following metrics:
+
+| Meter name | Type | Tag names | Description |
+|-------------------------------------------------------------|----------------|-------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------|
+| operator.sdk.reconciliations.executions.`` | gauge | group, version, kind | Number of executions of the named reconciler |
+| operator.sdk.reconciliations.queue.size.`` | gauge | group, version, kind | How many resources are queued to get reconciled by named reconciler |
+| operator.sdk.`