diff --git a/CHANGELOG.md b/CHANGELOG.md index 1331dbbc..22cc8b6f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,8 @@ All notable changes to this project will be documented in this file. ## [Unreleased] +## [23.7.0] - 2023-07-14 + ### Added - Generate OLM bundle for Release 23.4.0 ([#585]). diff --git a/Cargo.lock b/Cargo.lock index 592f3c11..f2cb486d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1904,7 +1904,7 @@ dependencies = [ [[package]] name = "stackable-kafka-crd" -version = "0.0.0-dev" +version = "23.7.0" dependencies = [ "rstest", "semver", @@ -1919,7 +1919,7 @@ dependencies = [ [[package]] name = "stackable-kafka-operator" -version = "0.0.0-dev" +version = "23.7.0" dependencies = [ "futures", "serde", @@ -1935,7 +1935,7 @@ dependencies = [ [[package]] name = "stackable-kafka-operator-binary" -version = "0.0.0-dev" +version = "23.7.0" dependencies = [ "built", "clap", diff --git a/deploy/helm/kafka-operator/Chart.yaml b/deploy/helm/kafka-operator/Chart.yaml index 3af8f3fb..f2c32438 100644 --- a/deploy/helm/kafka-operator/Chart.yaml +++ b/deploy/helm/kafka-operator/Chart.yaml @@ -1,8 +1,8 @@ --- apiVersion: v2 name: kafka-operator -version: "0.0.0-dev" -appVersion: "0.0.0-dev" +version: "23.7.0" +appVersion: "23.7.0" description: The Stackable Operator for Apache Kafka home: https://github.com/stackabletech/kafka-operator maintainers: diff --git a/docs/antora.yml b/docs/antora.yml index d522680b..156ae9bf 100644 --- a/docs/antora.yml +++ b/docs/antora.yml @@ -1,3 +1,4 @@ --- name: home -version: "nightly" +version: "23.7" +prerelease: false diff --git a/docs/modules/kafka/examples/getting_started/getting_started.sh b/docs/modules/kafka/examples/getting_started/getting_started.sh index 019c423d..e5eab8d3 100755 --- a/docs/modules/kafka/examples/getting_started/getting_started.sh +++ b/docs/modules/kafka/examples/getting_started/getting_started.sh @@ -21,28 +21,28 @@ cd "$(dirname "$0")" case "$1" in "helm") -echo "Adding 'stackable-dev' Helm Chart repository" +echo "Adding 'stackable-stable' Helm Chart repository" # tag::helm-add-repo[] -helm repo add stackable-dev https://repo.stackable.tech/repository/helm-dev/ +helm repo add stackable-stable https://repo.stackable.tech/repository/helm-stable/ # end::helm-add-repo[] echo "Updating Helm repositories" helm repo update echo "Installing Operators with Helm" # tag::helm-install-operators[] -helm install --wait commons-operator stackable-dev/commons-operator --version 0.0.0-dev -helm install --wait secret-operator stackable-dev/secret-operator --version 0.0.0-dev -helm install --wait zookeeper-operator stackable-dev/zookeeper-operator --version 0.0.0-dev -helm install --wait kafka-operator stackable-dev/kafka-operator --version 0.0.0-dev +helm install --wait commons-operator stackable-stable/commons-operator --version 23.7.0 +helm install --wait secret-operator stackable-stable/secret-operator --version 23.7.0 +helm install --wait zookeeper-operator stackable-stable/zookeeper-operator --version 23.7.0 +helm install --wait kafka-operator stackable-stable/kafka-operator --version 23.7.0 # end::helm-install-operators[] ;; "stackablectl") echo "installing Operators with stackablectl" # tag::stackablectl-install-operators[] stackablectl operator install \ - commons=0.0.0-dev \ - secret=0.0.0-dev \ - zookeeper=0.0.0-dev \ - kafka=0.0.0-dev + commons=23.7.0 \ + secret=23.7.0 \ + zookeeper=23.7.0 \ + kafka=23.7.0 # end::stackablectl-install-operators[] ;; *) diff --git a/docs/modules/kafka/examples/getting_started/install-operator-output.txt b/docs/modules/kafka/examples/getting_started/install-operator-output.txt index d5c18d55..65c7260c 100644 --- a/docs/modules/kafka/examples/getting_started/install-operator-output.txt +++ b/docs/modules/kafka/examples/getting_started/install-operator-output.txt @@ -1,6 +1,6 @@ # tag::stackablectl-install-operators-output[] -[INFO ] Installing commons operator in version 0.0.0-dev -[INFO ] Installing secret operator in version 0.0.0-dev -[INFO ] Installing zookeeper operator in version 0.0.0-dev -[INFO ] Installing kafka operator in version 0.0.0-dev +[INFO ] Installing commons operator in version 23.7.0 +[INFO ] Installing secret operator in version 23.7.0 +[INFO ] Installing zookeeper operator in version 23.7.0 +[INFO ] Installing kafka operator in version 23.7.0 # end::stackablectl-install-operators-output[] diff --git a/docs/modules/kafka/pages/getting_started/installation.adoc b/docs/modules/kafka/pages/getting_started/installation.adoc index d494c884..fb61cd7c 100644 --- a/docs/modules/kafka/pages/getting_started/installation.adoc +++ b/docs/modules/kafka/pages/getting_started/installation.adoc @@ -1,20 +1,22 @@ = Installation -On this page you will install the Stackable Operator for Apache Kafka and operators for its dependencies - ZooKeeper - as well as the commons and secret operator which are required by all Stackable Operators. +On this page you will install the Stackable Operator for Apache Kafka and operators for its dependencies - ZooKeeper - +as well as the commons and secret operator which are required by all Stackable Operators. == Stackable Operators There are 2 ways to install Stackable Operators: -1. Using xref:stackablectl::index.adoc[stackablectl] - -2. Using Helm +. Using xref:management:stackablectl:index.adoc[stackablectl] +. Using Helm === stackablectl -The stackablectl command line tool is the recommended way to interact with operators and dependencies. Follow the xref:stackablectl::installation.adoc[installation steps] for your platform if you choose to work with stackablectl. +The `stackablectl` command line tool is the recommended way to interact with operators and dependencies. Follow the +xref:management:stackablectl:installation.adoc[installation steps] for your platform if you choose to work with +`stackablectl`. -After you have installed stackablectl, run the following command to install all operators necessary for Kafka: +After you have installed `stackablectl`, run the following command to install all operators necessary for Kafka: [source,bash] ---- @@ -28,7 +30,7 @@ The tool will show include::example$getting_started/install-operator-output.txt[tag=stackablectl-install-operators-output] ---- -TIP: Consult the xref:stackablectl::quickstart.adoc[] to learn more about how to use stackablectl. +TIP: Consult the xref:management:stackablectl:quickstart.adoc[] to learn more about how to use `stackablectl`. === Helm @@ -46,7 +48,8 @@ Then install the Stackable Operators: include::example$getting_started/getting_started.sh[tag=helm-install-operators] ---- -Helm will deploy the operators in a Kubernetes Deployment and apply the CRDs for the Apache Kafka service (as well as the CRDs for the required operators). You are now ready to deploy Apache Kafka in Kubernetes. +Helm will deploy the operators in a Kubernetes Deployment and apply the CRDs for the Apache Kafka service (as well as +the CRDs for the required operators). You are now ready to deploy Apache Kafka in Kubernetes. == What's next diff --git a/docs/modules/kafka/pages/index.adoc b/docs/modules/kafka/pages/index.adoc index 6693b701..0d962c15 100644 --- a/docs/modules/kafka/pages/index.adoc +++ b/docs/modules/kafka/pages/index.adoc @@ -2,45 +2,66 @@ :description: The Stackable Operator for Apache Superset is a Kubernetes operator that can manage Apache Kafka clusters. Learn about its features, resources, dependencies and demos, and see the list of supported Kafka versions. :keywords: Stackable Operator, Apache Kafka, Kubernetes, operator, SQL, engineer, broker, big data, CRD, StatefulSet, ConfigMap, Service, Druid, ZooKeeper, NiFi, S3, demo, version -The Stackable Operator for Apache Kafka is an operator that can deploy and manage https://kafka.apache.org/[Apache Kafka] clusters on Kubernetes. +:metadata-quorum: https://cwiki.apache.org/confluence/display/KAFKA/KIP-500%3A+Replace+ZooKeeper+with+a+Self-Managed+Metadata+Quorum + +The Stackable Operator for Apache Kafka is an operator that can deploy and manage https://kafka.apache.org/[Apache Kafka] +clusters on Kubernetes. // what is Kafka? -Apache Kafka is a distributed streaming platform designed to handle large volumes of data in real-time. It is commonly used for real-time data processing, data ingestion, event streaming, and messaging between applications. +Apache Kafka is a distributed streaming platform designed to handle large volumes of data in real-time. It is commonly +used for real-time data processing, data ingestion, event streaming, and messaging between applications. == Getting started -Follow the xref:kafka:getting_started/index.adoc[] which will guide you through installing The Stackable Kafka and ZooKeeper Operators, setting up ZooKeeper and Kafka and testing your Kafka using kcat. +Follow the xref:kafka:getting_started/index.adoc[] which will guide you through installing The Stackable Kafka and +ZooKeeper Operators, setting up ZooKeeper and Kafka and testing your Kafka using kcat. == Resources -The _KafkaCluster_ custom resource contains your Kafka cluster configuration. It defines a single `broker` xref:concepts:roles-and-role-groups.adoc[role]. +The _KafkaCluster_ custom resource contains your Kafka cluster configuration. It defines a single `broker` +xref:concepts:roles-and-role-groups.adoc[role]. image::kafka_overview.drawio.svg[A diagram depicting the Kubernetes resources created by the operator.] -For every xref:concepts:roles-and-role-groups.adoc#_role_groups[role group] in the `broker` role the Operator creates a StatefulSet. Multiple Services are created - one at role level, one per role group as well as one for every individual Pod - to allow access to the entire Kafka cluster, parts of it or just individual brokers. +For every xref:concepts:roles-and-role-groups.adoc#_role_groups[role group] in the `broker` role the Operator creates a +StatefulSet. Multiple Services are created - one at role level, one per role group as well as one for every individual +Pod - to allow access to the entire Kafka cluster, parts of it or just individual brokers. -For every StatefulSet (role group) a ConfigMap is deployed containing a `log4j.properties` file for xref:usage-guide/logging.adoc[logging] configuration and a `server.properties` file containing the whole Kafka configuration which is derived from the KafkaCluster resource. +For every StatefulSet (role group) a ConfigMap is deployed containing a `log4j.properties` file for +xref:usage-guide/logging.adoc[logging] configuration and a `server.properties` file containing the whole Kafka +configuration which is derived from the KafkaCluster resource. -The Operator creates a xref:concepts:service_discovery.adoc[] for the whole KafkaCluster which references the Service for the whole cluster. Other operators use this ConfigMap to connect to a Kafka cluster simply by name and it can also be used by custom third party applications to find the connection endpoint. +The Operator creates a xref:concepts:service_discovery.adoc[] for the whole KafkaCluster which references the Service +for the whole cluster. Other operators use this ConfigMap to connect to a Kafka cluster simply by name and it can also +be used by custom third party applications to find the connection endpoint. == Dependencies -Kafka requires xref:zookeeper:index.adoc[Apache ZooKeeper] for coordination purposes (it will not be needed in the future as it will be replaced with a https://cwiki.apache.org/confluence/display/KAFKA/KIP-500%3A+Replace+ZooKeeper+with+a+Self-Managed+Metadata+Quorum[built-in solution]). +Kafka requires xref:zookeeper:index.adoc[Apache ZooKeeper] for coordination purposes (it will not be needed in the +future as it will be replaced with a {metadata-quorum}[built-in solution]). == Connections to other products -Since Kafka often takes on a bridging role, many other products connect to it. In the <> below you will find example data pipelines that use xref:nifi:index.adoc[Apache NiFi with the Stackable Operator] to write to Kafka and xref:nifi:index.adoc[Apache Druid with the Stackable Operator] to read from Kafka. But you can also connect using xref:spark-k8s:index.adoc[Apache Spark] or with a custom Job written in various languages. +Since Kafka often takes on a bridging role, many other products connect to it. In the <> below you will +find example data pipelines that use xref:nifi:index.adoc[Apache NiFi with the Stackable Operator] to write to Kafka and +xref:nifi:index.adoc[Apache Druid with the Stackable Operator] to read from Kafka. But you can also connect using +xref:spark-k8s:index.adoc[Apache Spark] or with a custom Job written in various languages. == [[demos]]Demos -xref:stackablectl::index.adoc[] supports installing xref:stackablectl::demos/index.adoc[] with a single command. The demos are complete data piplines which showcase multiple components of the Stackable platform working together and which you can try out interactively. Both demos below inject data into Kafka using NiFi and read from the Kafka topics using Druid. +xref:management:stackablectl:index.adoc[] supports installing xref:demos:index.adoc[] with a single command. The demos +are complete data piplines which showcase multiple components of the Stackable platform working together and which you +can try out interactively. Both demos below inject data into Kafka using NiFi and read from the Kafka topics using Druid. === Waterlevel Demo -The xref:stackablectl::demos/nifi-kafka-druid-water-level-data.adoc[] demo uses data from https://www.pegelonline.wsv.de/webservice/ueberblick[PEGELONLINE] to visualize water levels in rivers and coastal regions of Germany from historic and real time data. +The xref:demos:nifi-kafka-druid-water-level-data.adoc[] demo uses data from +https://www.pegelonline.wsv.de/webservice/ueberblick[PEGELONLINE] to visualize water levels in rivers and coastal +regions of Germany from historic and real time data. === Earthquake Demo -The xref:stackablectl::demos/nifi-kafka-druid-earthquake-data.adoc[] demo ingests https://earthquake.usgs.gov/[earthquake data] into a similar pipeline as is used in the waterlevel demo. +The xref:demos:nifi-kafka-druid-earthquake-data.adoc[] demo ingests https://earthquake.usgs.gov/[earthquake data] into +a similar pipeline as is used in the waterlevel demo. == Supported Versions diff --git a/docs/modules/kafka/pages/usage-guide/logging.adoc b/docs/modules/kafka/pages/usage-guide/logging.adoc index a19ccd95..9d0dee2e 100644 --- a/docs/modules/kafka/pages/usage-guide/logging.adoc +++ b/docs/modules/kafka/pages/usage-guide/logging.adoc @@ -15,4 +15,4 @@ spec: ---- Further information on how to configure logging, can be found in -xref:home:concepts:logging.adoc[]. +xref:concepts:logging.adoc[]. diff --git a/docs/modules/kafka/pages/usage-guide/monitoring.adoc b/docs/modules/kafka/pages/usage-guide/monitoring.adoc index 6be233ea..efcc5c76 100644 --- a/docs/modules/kafka/pages/usage-guide/monitoring.adoc +++ b/docs/modules/kafka/pages/usage-guide/monitoring.adoc @@ -1,4 +1,4 @@ = Monitoring The managed Kafka instances are automatically configured to export Prometheus metrics. See -xref:home:operators:monitoring.adoc[] for more details. +xref:operators:monitoring.adoc[] for more details. diff --git a/docs/modules/kafka/pages/usage-guide/security.adoc b/docs/modules/kafka/pages/usage-guide/security.adoc index 82ef2bd6..673b627e 100644 --- a/docs/modules/kafka/pages/usage-guide/security.adoc +++ b/docs/modules/kafka/pages/usage-guide/security.adoc @@ -2,7 +2,8 @@ == Encryption -The internal and client communication can be encrypted TLS. This requires the xref:secret-operator::index.adoc[Secret Operator] to be present in order to provide certificates. The utilized certificates can be changed in a top-level config. +The internal and client communication can be encrypted TLS. This requires the xref:secret-operator:index.adoc[Secret +Operator] to be present in order to provide certificates. The utilized certificates can be changed in a top-level config. [source,yaml] ---- @@ -28,7 +29,7 @@ spec: <1> The `spec.clusterConfig.tls.serverSecretClass` refers to the client-to-server encryption. Defaults to the `tls` secret. Can be deactivated by setting `serverSecretClass` to `null`. <2> The `spec.clusterConfig.tls.internalSecretClass` refers to the broker-to-broker internal encryption. This must be explicitly set or defaults to `tls`. May be disabled by setting `internalSecretClass` to `null`. -The `tls` secret is deployed from the xref:secret-operator::index.adoc[Secret Operator] and looks like this: +The `tls` secret is deployed from the xref:secret-operator:index.adoc[Secret Operator] and looks like this: [source,yaml] ---- @@ -47,11 +48,14 @@ spec: autoGenerate: true ---- -You can create your own secrets and reference them e.g. in the `spec.clusterConfig.tls.serverSecretClass` or `spec.clusterConfig.tls.internalSecretClass` to use different certificates. +You can create your own secrets and reference them e.g. in the `spec.clusterConfig.tls.serverSecretClass` or +`spec.clusterConfig.tls.internalSecretClass` to use different certificates. == Authentication -The internal or broker-to-broker communication is authenticated via TLS. In order to enforce TLS authentication for client-to-server communication, you can set an `AuthenticationClass` reference in the custom resource provided by the xref:commons-operator::index.adoc[Commons Operator]. +The internal or broker-to-broker communication is authenticated via TLS. In order to enforce TLS authentication for +client-to-server communication, you can set an `AuthenticationClass` reference in the custom resource provided by the +xref:commons-operator:index.adoc[Commons Operator]. [source,yaml] ---- @@ -103,7 +107,9 @@ spec: == [[authorization]]Authorization -If you wish to include integration with xref:opa::index.adoc[Open Policy Agent] and already have an OPA cluster, then you can include an `opa` field pointing to the OPA cluster discovery `ConfigMap` and the required package. The package is optional and will default to the `metadata.name` field: +If you wish to include integration with xref:opa:index.adoc[Open Policy Agent] and already have an OPA cluster, then you +can include an `opa` field pointing to the OPA cluster discovery `ConfigMap` and the required package. The package is +optional and will default to the `metadata.name` field: [source,yaml] ---- diff --git a/docs/modules/kafka/pages/usage-guide/storage-resources.adoc b/docs/modules/kafka/pages/usage-guide/storage-resources.adoc index 299d6305..214e39d7 100644 --- a/docs/modules/kafka/pages/usage-guide/storage-resources.adoc +++ b/docs/modules/kafka/pages/usage-guide/storage-resources.adoc @@ -22,7 +22,7 @@ If nothing is configured in the custom resource for a certain role group, then b == Resource Requests -include::home:concepts:stackable_resource_requests.adoc[] +include::concepts:stackable_resource_requests.adoc[] A minimal HA setup consisting of 2 Brokers has the following https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/[resource requirements]: diff --git a/docs/templating_vars.yaml b/docs/templating_vars.yaml index 66cb405d..5ed56e0c 100644 --- a/docs/templating_vars.yaml +++ b/docs/templating_vars.yaml @@ -1,9 +1,9 @@ --- helm: - repo_name: stackable-dev - repo_url: https://repo.stackable.tech/repository/helm-dev/ + repo_name: stackable-stable + repo_url: https://repo.stackable.tech/repository/helm-stable/ versions: - commons: 0.0.0-dev - secret: 0.0.0-dev - zookeeper: 0.0.0-dev - kafka: 0.0.0-dev + commons: "23.7.0" + secret: "23.7.0" + zookeeper: "23.7.0" + kafka: "23.7.0" diff --git a/rust/crd/Cargo.toml b/rust/crd/Cargo.toml index db48fb8a..3d16f5e2 100644 --- a/rust/crd/Cargo.toml +++ b/rust/crd/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" license = "OSL-3.0" name = "stackable-kafka-crd" repository = "https://github.com/stackabletech/kafka-operator" -version = "0.0.0-dev" +version = "23.7.0" publish = false [dependencies] diff --git a/rust/operator-binary/Cargo.toml b/rust/operator-binary/Cargo.toml index 61e3a0d5..29a889c5 100644 --- a/rust/operator-binary/Cargo.toml +++ b/rust/operator-binary/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" license = "OSL-3.0" name = "stackable-kafka-operator-binary" repository = "https://github.com/stackabletech/kafka-operator" -version = "0.0.0-dev" +version = "23.7.0" publish = false [dependencies] diff --git a/rust/operator/Cargo.toml b/rust/operator/Cargo.toml index 2361533d..3dd7db01 100644 --- a/rust/operator/Cargo.toml +++ b/rust/operator/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" license = "OSL-3.0" name = "stackable-kafka-operator" repository = "https://github.com/stackabletech/kafka-operator" -version = "0.0.0-dev" +version = "23.7.0" publish = false [dependencies] diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index d60a2631..7935de8b 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -6,30 +6,30 @@ dimensions: - name: kafka values: - - 2.8.1-stackable0.0.0-dev - - 3.1.0-stackable0.0.0-dev - - 3.2.0-stackable0.0.0-dev - - 3.3.1-stackable0.0.0-dev - - 3.4.0-stackable0.0.0-dev + - 2.8.1-stackable23.7 + - 3.1.0-stackable23.7 + - 3.2.0-stackable23.7 + - 3.3.1-stackable23.7 + - 3.4.0-stackable23.7 - name: kafka-latest values: - - 3.4.0-stackable0.0.0-dev + - 3.4.0-stackable23.7 - name: zookeeper values: - - 3.6.3-stackable0.0.0-dev - - 3.7.0-stackable0.0.0-dev - - 3.8.0-stackable0.0.0-dev + - 3.6.3-stackable23.7 + - 3.7.0-stackable23.7 + - 3.8.0-stackable23.7 - name: zookeeper-latest values: - - 3.8.0-stackable0.0.0-dev + - 3.8.0-stackable23.7 - name: upgrade_old values: - - 2.8.1-stackable0.0.0-dev - - 3.2.0-stackable0.0.0-dev - - 3.3.1-stackable0.0.0-dev + - 2.8.1-stackable23.7 + - 3.2.0-stackable23.7 + - 3.3.1-stackable23.7 - name: upgrade_new values: - - 3.4.0-stackable0.0.0-dev + - 3.4.0-stackable23.7 - name: use-client-tls values: - "true"