diff --git a/.gitignore b/.gitignore
index 3010bccd5..40e8fa2cd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,6 +4,7 @@
# IDE specific files
.idea/
+.history/
# Hugo
.hugo_build.lock
diff --git a/.prow.yaml b/.prow.yaml
index 7ac9b0d80..d44f6dc0d 100644
--- a/.prow.yaml
+++ b/.prow.yaml
@@ -5,7 +5,7 @@ presubmits:
clone_uri: "ssh://git@github.com/kubermatic/docs.git"
spec:
containers:
- - image: quay.io/kubermatic/build:go-1.22-node-18-kind-0.21-2
+ - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-4
command:
- make
args:
@@ -21,7 +21,7 @@ presubmits:
clone_uri: "ssh://git@github.com/kubermatic/docs.git"
spec:
containers:
- - image: quay.io/kubermatic/build:go-1.22-node-18-kind-0.21-2
+ - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-4
command:
- "./hack/verify-filenames.sh"
resources:
@@ -35,7 +35,7 @@ presubmits:
clone_uri: "ssh://git@github.com/kubermatic/docs.git"
spec:
containers:
- - image: quay.io/kubermatic/hugo:0.119.0-0
+ - image: quay.io/kubermatic/hugo:0.150.0-0
command:
- "./hack/ci/verify-hugo.sh"
resources:
@@ -50,12 +50,12 @@ presubmits:
clone_uri: "ssh://git@github.com/kubermatic/docs.git"
spec:
containers:
- - image: quay.io/kubermatic/remark-lint:1.0.0
+ - image: quay.io/kubermatic/remark-lint:2.0.0
command:
- "./hack/ci/lint-markdown.sh"
resources:
requests:
cpu: 200m
- memory: 128Mi
+ memory: 512Mi
limits:
- memory: 1Gi
+ memory: 2Gi
diff --git a/Makefile b/Makefile
index c3fd54ed5..edf975bdc 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,4 @@
-CODESPELL_IMAGE ?= quay.io/kubermatic/build:go-1.24-node-20-kind-0.27-1
+CODESPELL_IMAGE ?= quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-4
CODESPELL_BIN := $(shell which codespell)
DOCKER_BIN := $(shell which docker)
@@ -8,7 +8,7 @@ preview:
--name kubermatic-docs \
-p 1313:1313 \
-w /docs \
- -v `pwd`:/docs quay.io/kubermatic/hugo:0.119.0-0 \
+ -v `pwd`:/docs quay.io/kubermatic/hugo:0.150.0-0 \
hugo server -D -F --bind 0.0.0.0
.PHONY: runbook
diff --git a/OWNERS b/OWNERS
index 45207248d..0f3eb8bd0 100644
--- a/OWNERS
+++ b/OWNERS
@@ -8,6 +8,7 @@ approvers:
- toschneck
- themue
- scheeles
+ - csengerszabo
reviewers:
- sig-api
diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES
index 653d04ddb..7340aecdf 100644
--- a/OWNERS_ALIASES
+++ b/OWNERS_ALIASES
@@ -21,14 +21,18 @@ aliases:
sig-app-management:
- ahmedwaleedmalik
- archups
+ - buraksekili
- cnvergence
- dharapvj
- julioc-p
+ - mohamed-rafraf
- simontheleg
- soer3n
- xrstf
sig-cluster-management:
+ - adoi
- ahmedwaleedmalik
+ - buraksekili
- cnvergence
- embik
- julioc-p
@@ -41,6 +45,7 @@ aliases:
- xmudrii
- xrstf
sig-networking:
+ - adoi
- buraksekili
- cnvergence
- moadqassem
diff --git a/config.toml b/config.toml
index b35a32d8d..6388bc385 100644
--- a/config.toml
+++ b/config.toml
@@ -54,7 +54,7 @@ themeVariant = "kubermatic"
# Force to have /en/my-page and /fr/my-page routes, even for default language.
# defaultContentLanguageInSubdir = true
baseWebsiteUrl = "https://www.kubermatic.com/"
-cookiefirstBanner = "e938558d-78b5-4f1b-b574-cd5dfde73684"
+cookiefirstScript = "https://consent.cookiefirst.com/sites/docs.kubermatic.com-e938558d-78b5-4f1b-b574-cd5dfde73684/consent.js"
titleSuffix = "Kubermatic Documentation"
slackJoinLink = "https://join.slack.com/t/kubermatic-community/shared_invite/zt-1jtex2o9f-fpaDZ2ytX7FmDaNOHqljIg"
enableTOC = true
diff --git a/content/developer-platform/platform-users/consuming-services/_index.en.md b/content/developer-platform/platform-users/consuming-services/_index.en.md
index cfa251cab..cb289ae77 100644
--- a/content/developer-platform/platform-users/consuming-services/_index.en.md
+++ b/content/developer-platform/platform-users/consuming-services/_index.en.md
@@ -29,10 +29,10 @@ offered to add it to the organization.
Note that every Service shows:
-* its main title (the human-readable name of a Service, like "Certificate Management")
-* its internal name (ultimately the name of the Kubernetes `Service` object you would need to
+- its main title (the human-readable name of a Service, like "Certificate Management")
+- its internal name (ultimately the name of the Kubernetes `Service` object you would need to
manually enable the service using `kubectl`)
-* a short description
+- a short description
Check out [Your First Service]({{< relref "../../tutorials/your-first-service/" >}}) if you want to publish services by yourself.
@@ -102,5 +102,5 @@ spec:
Rejecting a claim will severely impact a Service, if not even break it. Consult with the Service's
documentation or the service owner if rejecting a claim is supported.
-When you _change into_ (`kubctl ws …`) a different workspace, kubectl will inform you if there are
+When you *change into* (`kubctl ws …`) a different workspace, kubectl will inform you if there are
outstanding permission claims that you need to accept or reject.
diff --git a/content/developer-platform/service-providers/api-syncagent/_index.en.md b/content/developer-platform/service-providers/api-syncagent/_index.en.md
index 8d0f84edf..4e5a5c7fc 100644
--- a/content/developer-platform/service-providers/api-syncagent/_index.en.md
+++ b/content/developer-platform/service-providers/api-syncagent/_index.en.md
@@ -15,28 +15,28 @@ The intended usecase follows roughly these steps:
workspace. This service (not to be confused with Kubernetes services) reserves an API group
in the organization for itself, like `databases.example.corp` (two `Services` must not register
the same API group).
-2. After the `Service` is created, KDP will reconcile it, create an `APIExport` object and provide
+1. After the `Service` is created, KDP will reconcile it, create an `APIExport` object and provide
appropriate credentials for the api-syncagent (e.g. by creating a Kubernetes Secret with a
preconfigured kubeconfig in it).
-3. A service owner will now take these credentials and the configured API group and use them
+1. A service owner will now take these credentials and the configured API group and use them
to setup the api-syncagent. It is assumed that the service owner (i.e. the cluster-admin in a
service cluster) wants to make some resources (usually CRDs) available to use inside of KDP.
-4. The service owner uses the api-syncagent Helm chart (or similar deployment technique) to install
+1. The service owner uses the api-syncagent Helm chart (or similar deployment technique) to install
the agent in their cluster.
-5. To actually make resources available in the platform, the service owner now has to create a
+1. To actually make resources available in the platform, the service owner now has to create a
set of `PublishedResource` objects. The configuration happens from their point of view, meaning
they define how to publish a CRD in the platform, defining renaming rules and other projection
settings.
-6. Once a `PublishedResource` is created in the service cluster, the agent will pick it up,
+1. Once a `PublishedResource` is created in the service cluster, the agent will pick it up,
find the referenced CRD, convert/project this CRD into an `APIResourceSchema` (ARS) for kcp and
then create the ARS in org workspace.
-7. Finally the api-syncagent will take all `PublishedResources` and bundle them into the pre-existing
+1. Finally the api-syncagent will take all `PublishedResources` and bundle them into the pre-existing
`APIExport` in the org workspace. This APIExport can then be bound in the org workspace itself
(or later any sub workspaces (depending on permissions)) and be used there. The `APIExport` has
the same name as the KDP `Service` the agent is working with.
-8. kcp automatically provides a virtual workspace for the `APIExport` and this is what the agent
+1. kcp automatically provides a virtual workspace for the `APIExport` and this is what the agent
then uses to watch all objects for the relevant resources in the platform (i.e. in all workspaces).
-9. The api-syncagent will now begin to synchronize objects back and forth between the service cluster
+1. The api-syncagent will now begin to synchronize objects back and forth between the service cluster
and KDP.
## Details
@@ -49,7 +49,7 @@ with making their CRDs available in KDP (i.e. "publish" them).
However the actual data flow later will work in the opposite direction: users creating objects inside
their kcp workspaces serve as the source of truth. From there they are synced down to the service
-cluster, which is doing the projection of the `PublishedResource` _in reverse_.
+cluster, which is doing the projection of the `PublishedResource` *in reverse*.
Of course additional, auxiliary (related) objects could originate on the service cluster. For example
if you create a Certificate object in a kcp workspace and it's synced down, cert-manager will then
diff --git a/content/developer-platform/service-providers/crossplane/_index.en.md b/content/developer-platform/service-providers/crossplane/_index.en.md
index 4a51d4f42..19f1b4174 100644
--- a/content/developer-platform/service-providers/crossplane/_index.en.md
+++ b/content/developer-platform/service-providers/crossplane/_index.en.md
@@ -4,419 +4,6 @@ linkTitle = "Using Crossplane"
weight = 2
+++
-The guide describes the process of making a resource (usually defined by a CustomResourceDefinition)
-of one Kubernetes cluster (the "service cluster" or "local cluster") available for use in the KDP
-platform (the "platform cluster" or "KDP workspaces"). This involves setting up a KDP Service and
-then installing the kcp api-syncagent and defining `PublishedResources` in the local cluster.
-
-All of the documentation and API types are worded and named from the perspective of a service owner,
-the person(s) who own a service and want to make it available to consumers in the KDP platform.
-
-## High-level Overview
-
-A "service" in KDP comprises a set of resources within a single Kubernetes API group. It doesn't
-need to be _all_ of the resources in that group, service owners are free and encouraged to only make
-a subset of resources (i.e. a subset of CRDs) available for use in the platform.
-
-For each of the CRDs on the service cluster that should be published, the service owner creates a
-`PublishedResource` object, which will contain both which CRD to publish, as well as numerous other
-important settings that influence the behaviour around handling the CRD.
-
-When publishing a resource (CRD), exactly one version is published. All others are ignored from the
-standpoint of the resource synchronization logic.
-
-All published resources together form the KDP Service. When a service is enabled in a workspace
-(i.e. it is bound to it), users can manage objects for the projected resources described by the
-published resources. These objects will be synced from the workspace onto the service cluster,
-where they are meant to be processed in whatever way the service owners desire. Any possible
-status information (in the `status` subresource) will in turn be synced back up into the workspace
-where the user can inspect it.
-
-Additionally, a published resource can describe additional so-called "related resources". These
-usually originate on the service cluster and could be for example connection detail secrets created
-by Crossplane, but could also originate in the user workspace and just be additional, auxiliary
-resources that need to be synced down to the service cluster.
-
-### `PublishedResource`
-
-In its simplest form (which is rarely practical) a `PublishedResource` looks like this:
-
-```yaml
-apiVersion: services.kdp.k8c.io/v1alpha1
-kind: PublishedResource
-metadata:
- name: publish-certmanager-certs # name can be freely chosen
-spec:
- resource:
- kind: Certificate
- apiGroup: cert-manager.io
- version: v1
-```
-
-However, you will most likely apply more configuration and use features described below.
-
-### Filtering
-
-The api-syncagent can be instructed to only work on a subset of resources in the KDP platform. This
-can be restricted by namespace and/or label selector.
-
-```yaml
-apiVersion: services.kdp.k8c.io/v1alpha1
-kind: PublishedResource
-metadata:
- name: publish-certmanager-certs # name can be freely chosen
-spec:
- resource: ...
- filter:
- namespace: my-app
- resource:
- matchLabels:
- foo: bar
-```
-
-### Schema
-
-**Warning:** The actual CRD schema is always copied verbatim. All projections
-etc. have to take into account that the resource contents must be expressible without changes to the
-schema.
-
-### Projection
-
-For stronger separation of concerns and to enable whitelabelling of services, the type meta for
-can be projected, i.e. changed between the local service cluster and the KDP platform. You could
-for example rename `Certificate` from cert-manager to `Zertifikat` inside the platform.
-
-Note that the API group of all published resources is always changed to the one defined in the
-KDP `Service` object (meaning 1 api-syncagent serves all the published resources under the same API
-group). That is why changing the API group cannot be configured in the projection.
-
-Besides renaming the Kind and Version, dependent fields like Plural, ShortNames and Categories
-can be adjusted to fit the desired naming scheme in the platform. The Plural name is computed
-automatically, but can be overridden. ShortNames and Categories are copied unless overwritten in the
-`PublishedResource`.
-
-It is also possible to change the scope of resources, i.e. turning a namespaced resource into a
-cluster-wide. This should be used carefully and might require extensive mutations.
-
-```yaml
-apiVersion: services.kdp.k8c.io/v1alpha1
-kind: PublishedResource
-metadata:
- name: publish-certmanager-certs # name can be freely chosen
-spec:
- resource: ...
- projection:
- version: v1beta1
- kind: Zertifikat
- plural: Zertifikate
- shortNames: [zerts]
- # categories: [management]
- # scope: Namespaced # change only when you know what you're doing
-```
-
-Consumers (end users) in the platform would then ultimately see projected names only. Note that GVK
-projection applies only to the synced object itself and has no effect on the contents of these
-objects. To change the contents, use external solutions like Crossplane to transform objects.
-
-
-### Naming
-
-Since the api-syncagent ingests resources from many different Kubernetes clusters (workspaces) and
-combines them onto a single cluster, resources have to be renamed to prevent collisions and also
-follow the conventions of whatever tooling ultimately processes the resources locally.
-
-The renaming is configured in `spec.naming`. In there, renaming patterns are configured, where
-pre-defined placeholders can be used, for example `foo-$placeholder`. The following placeholders
-are available:
-
-* `$remoteClusterName` – the KDP workspace's cluster name (e.g. "1084s8ceexsehjm2")
-* `$remoteNamespace` – the original namespace used by the consumer inside the KDP workspace
-* `$remoteNamespaceHash` – first 20 hex characters of the SHA-1 hash of `$remoteNamespace`
-* `$remoteName` – the original name of the object inside the KDP workspace (rarely used to construct
- local namespace names)
-* `$remoteNameHash` – first 20 hex characters of the SHA-1 hash of `$remoteName`
-
-If nothing is configured, the default ensures that no collisions will happen: Each workspace in
-the platform will create a namespace on the local cluster, with a combination of namespace and
-name hashes used for the actual resource names.
-
-```yaml
-apiVersion: services.kdp.k8c.io/v1alpha1
-kind: PublishedResource
-metadata:
- name: publish-certmanager-certs # name can be freely chosen
-spec:
- resource: ...
- naming:
- namespace: "$remoteClusterName"
- name: "cert-$remoteNamespaceHash-$remoteNameHash"
-```
-
-### Related Resources
-
-The processing of resources on the service cluster often leads to additional resources being
-created, like a `Secret` for each cert-manager `Certificate` or a connection detail secret created
-by Crossplane. These need to be made available to the user in their workspaces.
-
-Likewise it's possible for auxiliary resources having to be created by the user, for example when
-the user has to provide credentials.
-
-To handle these cases, a `PublishedResource` can define multiple "related resources". Each related
-resource currently represents exactly one object to synchronize between user workspace and service
-cluster (i.e. you cannot express "sync all Secrets"). While the main published resource sync is
-always workspace->service cluster, related resources can originate on either side and so either can
-work as the source of truth.
-
-At the moment, only `ConfigMaps` and `Secrets` are allowed related resource kinds.
-
-For each related resource, the api-syncagent needs to be told the name/namespace. This is done by
-selecting a field in the main resource (for a `Certificate` this would mean `spec.secretName`). Both
-name and namespace need to be part of the main object (or be fixed values, like a hardcoded
-`kube-system` namespace).
-
-The path expressions for name and namespace are evaluated against the main object on either side
-to determine their values. So if you had a `Certificate` in your workspace with
-`spec.secretName = "my-cert"` and after syncing it down, the copy on the service cluster has a
-rewritten/mutated `spec.secretName = "jk23h4wz47329rz2r72r92-cert"` (e.g. to prevent naming
-collisions), the expression `spec.secretName` would yield `"my-cert"` for the name in the workspace
-and `"jk...."` as the name on the service cluster. Once the object exists with that name on the
-originating side, the api-syncagent will begin to sync it to the other side.
-
-```yaml
-apiVersion: services.kdp.k8c.io/v1alpha1
-kind: PublishedResource
-metadata:
- name: publish-certmanager-certs
-spec:
- resource:
- kind: Certificate
- apiGroup: cert-manager.io
- version: v1
-
- naming:
- # this is where our CA and Issuer live in this example
- namespace: kube-system
- # need to adjust it to prevent collions (normally clustername is the namespace)
- name: "$remoteClusterName-$remoteNamespaceHash-$remoteNameHash"
-
- related:
- - origin: service # service or platform
- kind: Secret # for now, only "Secret" and "ConfigMap" are supported;
- # there is no GVK projection for related resources
-
- # configure where in the parent object we can find
- # the name/namespace of the related resource (the child)
- reference:
- name:
- # This path is evaluated in both the local and remote objects, to figure out
- # the local and remote names for the related object. This saves us from having
- # to remember mutated fields before their mutation (similar to the last-known
- # annotation).
- path: spec.secretName
-
- # namespace part is optional; if not configured,
- # api-syncagent assumes the same namespace as the owning resource
- #
- # namespace:
- # path: spec.secretName
- # regex:
- # pattern: '...'
- # replacement: '...'
- #
- # to inject static values, select a meaningless string value
- # and leave the pattern empty
- #
- # namespace:
- # path: metadata.uid
- # regex:
- # replacement: kube-system
-```
-
-## Examples
-
-### Provide Certificates
-
-This combination of `Service` and `PublishedResource` make cert-manager certificates available in
-kcp. The `Service` needs to be created in a workspace, most likely in an organization workspace.
-The `PublishedResource` is created wherever the api-syncagent and cert-manager are running.
-
-```yaml
-apiVersion: core.kdp.k8c.io/v1alpha1
-kind: Service
-metadata:
- name: certificate-management
-spec:
- apiGroup: certificates.example.corp
- catalogMetadata:
- title: Certificate Management
- description: Acquire certificates signed by Example Corp's internal CA.
-```
-
-```yaml
-apiVersion: services.kdp.k8c.io/v1alpha1
-kind: PublishedResource
-metadata:
- name: publish-certmanager-certs
-spec:
- resource:
- kind: Certificate
- apiGroup: cert-manager.io
- version: v1
-
- naming:
- # this is where our CA and Issuer live in this example
- namespace: kube-system
- # need to adjust it to prevent collions (normally clustername is the namespace)
- name: "$remoteClusterName-$remoteNamespaceHash-$remoteNameHash"
-
- related:
- - origin: service # service or platform
- kind: Secret # for now, only "Secret" and "ConfigMap" are supported;
- # there is no GVK projection for related resources
-
- # configure where in the parent object we can find
- # the name/namespace of the related resource (the child)
- reference:
- name:
- # This path is evaluated in both the local and remote objects, to figure out
- # the local and remote names for the related object. This saves us from having
- # to remember mutated fields before their mutation (similar to the last-known
- # annotation).
- path: spec.secretName
- # namespace part is optional; if not configured,
- # api-syncagent assumes the same namespace as the owning resource
- # namespace:
- # path: spec.secretName
- # regex:
- # pattern: '...'
- # replacement: '...'
-```
-
-## Technical Details
-
-The following sections go into more details of the behind the scenes magic.
-
-### Synchronization
-
-Even though the whole configuration is written from the standpoint of the service owner, the actual
-synchronization logic considers the platform side as the canonical source of truth. The api-syncagent
-continuously tries to make the local objects look like the ones in the platform, while pushing
-status updates back into the platform (if the given `PublishedResource` (i.e. CRD) has a `status`
-subresource enabled).
-
-### Local <-> Remote Connection
-
-The api-syncagent tries to keep KDP-related metadata on the service cluster, away from the consumers.
-This is both to prevent vandalism and to hide implementation details.
-
-To ensure stability against future changes, once KDP has determined how a local object should be
-named, it will remember this decision in its metadata. This is so that on future reconciliations,
-the (potentially costly, but probably not) renaming logic does not need to be applied again. This
-allows the api-syncagent to change defaults and also allows the service owner to make changes to the
-naming rules without breaking existing objects.
-
-Since we do not want to store metadata on the platform side, we instead rely on label selectors on
-the local objects. Each local object has a label for the remote cluster name, namespace and object
-name, and when trying to find the matching local object, the api-syncagent simply does a label-based
-search.
-
-There is currently no sync-related metadata available on source objects, as this would either be
-annotations (untyped strings...) or require schema changes to allow additional fields in basically
-random CRDs.
-
-Note that fields like `generation` or `resourceVersion` are not relevant for any of the sync logic.
-
-### Reconcile Loop
-
-The sync loop can be divided into 5 parts:
-
-1. find the local object
-2. handle deletion
-3. ensure the destination object exists
-4. ensure the destination object's content matches the source object
-5. synchronize related resources the same way (repeat 1-4 for each related resource)
-
-#### Phase 1: Find the Local Object
-
-For this, as mentioned in the connection chapter above, the api-syncagent tries to follow label
-selectors on the local cluster. This helps prevent cluttering with consumer workspaces with KDP
-metadata. If no object is found to match the labels, that's fine and the loop will continue with
-phase 2, in which a possible Conflict error (if labels broke) is handled gracefully.
-
-The remote object in the workspace becomes the `source object` and its local equivalent is called
-the `destination object`.
-
-#### Phase 2: Handle Deletion
-
-A finalizer is used in the platform workspaces to prevent orphans in the service cluster side. This
-is the only real evidence in the platform side that the api-syncagent is even doing things. When a
-remote (source) object is deleted, the corresponding local object is deleted as well. Once the local
-object is gone, the finalizer is removed from the source object.
-
-#### Phase 3: Ensure Object Existence
-
-We have a source object and now need to create the destination. This chart shows what's happening.
-
-```mermaid
-graph TB
- A(source object):::state --> B([cleanup if in deletion]):::step
- B --> C([ensure finalizer on source object]):::step
- C --> D{exists local object?}
-
- D -- yes --> I("continue with next phase…"):::state
- D -- no --> E([apply projection]):::step
-
- subgraph "ensure dest object exists"
- E --> G([ensure resulting namespace exists]):::step
- G --> H([create local object]):::step
- H --> H_err{Errors?}
- H_err -- Conflict --> J([attempt to adopt existing object]):::step
- end
-
- H_err -- success --> I
- J --> I
-
- classDef step color:#77F
- classDef state color:#F77
-```
-
-After we followed through with these steps, both the source and destination objects exists and we
-can continue with phase 4.
-
-Resource adoption happens when creation of the initial local object fails. This can happen when labels
-get mangled. If such a conflict happens, the api-syncagent will "adopt" the existing local object by
-adding / fixing the labels on it, so that for the next reconciliation it will be found and updated.
-
-#### Phase 4: Content Synchronization
-
-Content synchronization is rather simple, really.
-
-First the source "spec" is used to patch the local object. Note that this step is called "spec", but
-should actually be called "all top-level elements besides `apiVersion`, `kind`, `status` and
-`metadata`, but still including some labels and annotations"; so if you were to publish RBAC objects,
-the syncer would include `roleRef` field, for example).
-
-To allow proper patch generation, the last known state of an object is stored in a dedicated Secret.
-This functions just like the one kubectl uses and is required for the api-syncagent to properly detect
-changes made by mutation webhooks, but uses a Secret instead of annotations because state needs to
-be kept for more objects (like related resources) and not always on the destination objects.
-
-If the published resource (CRD) has a `status` subresource enabled (not just a `status` field in its
-scheme, it must be a real subresource), then the api-syncagent will copy the status from the local
-object back up to the remote (source) object.
-
-#### Phase 5: Sync Related Resources
-
-The same logic for synchronizing the main published resource applies to their related resources as
-well. The only difference is that the source side can be either remote (workspace) or local
-(service cluster).
-
-This currently also means that sync-related metadata, which is always kept on the object's copy,
-will end up in the user workspace when a related object originates on the service cluster (the
-most common usecase). In a future version it could be nice to keep the sync state only on the
-service cluster side, away from the users.
-# Publishing resources with Crossplane
-
This guide describes the process of leveraging Crossplane as a service provider to make Crossplane
claims available as `PublishedResources` for use in KDP. This involves installing Crossplane -
including all required Crossplane [providers][crossplane/docs/providers] and
@@ -435,11 +22,11 @@ platform users.
> While this guide is not intended to be a comprehensive Crossplane guide, it is useful to be aware
> of the most common terms:
>
-> * **Providers** are pluggable building blocks to provision and manage resources via a third-party API (e.g. AWS provider)
-> * **Managed resources** (MRs) are representations of actual, provider-specific resources (e.g. EC2 instance)
-> * **Composite resource definitions** (XRDs) are Crossplane-specific definitions of API resources (similar to CRDs)
-> * **Composite resources** (XRs) and **Claims** are Crossplane-specific custom resources created from XRD objects (similar to CRs)
-> * **Compositions** are Crossplane-specific templates for transforming a XR object into one or more MR object(s)
+> - **Providers** are pluggable building blocks to provision and manage resources via a third-party API (e.g. AWS provider)
+> - **Managed resources** (MRs) are representations of actual, provider-specific resources (e.g. EC2 instance)
+> - **Composite resource definitions** (XRDs) are Crossplane-specific definitions of API resources (similar to CRDs)
+> - **Composite resources** (XRs) and **Claims** are Crossplane-specific custom resources created from XRD objects (similar to CRs)
+> - **Compositions** are Crossplane-specific templates for transforming a XR object into one or more MR object(s)
This guide will show you how to install Crossplane and all required providers on a service cluster
and provide a stripped-down `Certificate` resource in KDP. While we ultimately use cert-manager to
@@ -472,7 +59,7 @@ helm upgrade crossplane crossplane \
Once the installation is done, verify the status with the following command:
```bash
-$ kubectl get pods --namespace=crossplane-system
+kubectl get pods --namespace=crossplane-system
NAME READY STATUS RESTARTS AGE
crossplane-6494656b8b-bflcf 1/1 Running 0 45s
crossplane-rbac-manager-8458557cdd-sls58 1/1 Running 0 45s
@@ -516,7 +103,7 @@ EOF
Once the provider is installed, verify the provider status with the following command:
```bash
-$ kubectl get providers crossplane-provider-kubernetes
+kubectl get providers crossplane-provider-kubernetes
NAME INSTALLED HEALTHY PACKAGE AGE
crossplane-provider-kubernetes True True xpkg.upbound.io/crossplane-contrib/provider-kubernetes:v0.11.1 104s
```
@@ -577,9 +164,9 @@ Crossplane specific `Certificate` object.
Create and apply the following three manifests to your service cluster (you can safely ignore the
misleading warnings from Crossplane regarding the validation of the composition). This will
-* bootstrap a cert-manager `ClusterIssuer` named "default-ca",
-* create a Crossplane `CompositeResourceDefinition` that defines our `Certificate` resource (which exposes only the requested common name),
-* create a Crossplane `Composition` that uses cert-manager and the created "default-ca" to issue the requested certificate
+- bootstrap a cert-manager `ClusterIssuer` named "default-ca",
+- create a Crossplane `CompositeResourceDefinition` that defines our `Certificate` resource (which exposes only the requested common name),
+- create a Crossplane `Composition` that uses cert-manager and the created "default-ca" to issue the requested certificate
```bash
kubectl apply --filename=cluster-issuer.yaml
@@ -625,6 +212,7 @@ spec:
ca:
secretName: default-ca
```
+
@@ -670,6 +258,7 @@ spec:
type: string
minLength: 1
```
+
@@ -778,13 +367,14 @@ spec:
fromConnectionSecretKey: tls.key
writeConnectionSecretsToNamespace: crossplane-system
```
+
Afterwards verify the status of the composite resource definition and the composition with the
following command:
```bash
-$ kubectl get compositeresourcedefinitions,compositions
+kubectl get compositeresourcedefinitions,compositions
NAME ESTABLISHED OFFERED AGE
xcertificates.pki.xaas.k8c.io True True 10s
@@ -859,7 +449,7 @@ graph RL
If everything worked out, you should get all relevant objects with the following command:
```bash
-$ kubectl get claim,composite,managed,certificate
+kubectl get claim,composite,managed,certificate
NAME SYNCED READY CONNECTION-SECRET AGE
certificate.pki.xaas.k8c.io/www-example-com True True www-example-com 21m
diff --git a/content/developer-platform/service-providers/publish-resources/_index.en.md b/content/developer-platform/service-providers/publish-resources/_index.en.md
index df81e2412..0b94f54d0 100644
--- a/content/developer-platform/service-providers/publish-resources/_index.en.md
+++ b/content/developer-platform/service-providers/publish-resources/_index.en.md
@@ -14,7 +14,7 @@ the person(s) who own a service and want to make it available to consumers in th
## High-level Overview
A "service" in KDP comprises a set of resources within a single Kubernetes API group. It doesn't
-need to be _all_ of the resources in that group, service owners are free and encouraged to only make
+need to be *all* of the resources in that group, service owners are free and encouraged to only make
a subset of resources (i.e. a subset of CRDs) available for use in the platform.
For each of the CRDs on the service cluster that should be published, the service owner creates a
@@ -117,6 +117,7 @@ spec:
Consumers (end users) in the platform would then ultimately see projected names only. Note that GVK
projection applies only to the synced object itself and has no effect on the contents of these
objects. To change the contents, use external solutions like Crossplane to transform objects.
+
### (Re-)Naming
@@ -129,12 +130,12 @@ The renaming is configured in `spec.naming`. In there, renaming patterns are con
pre-defined placeholders can be used, for example `foo-$placeholder`. The following placeholders
are available:
-* `$remoteClusterName` – the KDP workspace's cluster name (e.g. "1084s8ceexsehjm2")
-* `$remoteNamespace` – the original namespace used by the consumer inside the KDP workspace
-* `$remoteNamespaceHash` – first 20 hex characters of the SHA-1 hash of `$remoteNamespace`
-* `$remoteName` – the original name of the object inside the KDP workspace (rarely used to construct
+- `$remoteClusterName` – the KDP workspace's cluster name (e.g. "1084s8ceexsehjm2")
+- `$remoteNamespace` – the original namespace used by the consumer inside the KDP workspace
+- `$remoteNamespaceHash` – first 20 hex characters of the SHA-1 hash of `$remoteNamespace`
+- `$remoteName` – the original name of the object inside the KDP workspace (rarely used to construct
local namespace names)
-* `$remoteNameHash` – first 20 hex characters of the SHA-1 hash of `$remoteName`
+- `$remoteNameHash` – first 20 hex characters of the SHA-1 hash of `$remoteName`
If nothing is configured, the default ensures that no collisions will happen: Each workspace in
the platform will create a namespace on the local cluster, with a combination of namespace and
@@ -160,10 +161,10 @@ These can be configured in a number of way in the `PublishedResource`.
Configuration happens `spec.mutation` and there are two fields:
-* `spec` contains the mutation rules when syncing the desired state (often in `spec`, but can also
+- `spec` contains the mutation rules when syncing the desired state (often in `spec`, but can also
be other top-level fields) from the remote side to the local side. Use this to apply defaulting,
normalising, and enforcing rules.
-* `status` contains the mutation rules when syncing the `status` subresource back from the local
+- `status` contains the mutation rules when syncing the `status` subresource back from the local
cluster up into the platform. Use this to normalize names and values (e.g. if you rewrote
`.spec.secretName` from `"foo"` to `"dfkbssbfh"`, make sure the status does not "leak" this name
by accident).
@@ -401,10 +402,10 @@ Note that fields like `generation` or `resourceVersion` are not relevant for any
The sync loop can be divided into 5 parts:
1. find the local object
-2. handle deletion
-3. ensure the destination object exists
-4. ensure the destination object's content matches the source object
-5. synchronize related resources the same way (repeat 1-4 for each related resource)
+1. handle deletion
+1. ensure the destination object exists
+1. ensure the destination object's content matches the source object
+1. synchronize related resources the same way (repeat 1-4 for each related resource)
#### Phase 1: Find the Local Object
@@ -481,6 +482,6 @@ well. The only difference is that the source side can be either remote (workspac
(service cluster).
Since the Sync Agent tries its best to keep sync-related data out of kcp workspaces, the last known
-state for related resources is _not_ kept together with the destination object in the kcp workspaces.
+state for related resources is *not* kept together with the destination object in the kcp workspaces.
Instead all known states (from the main object and all related resources) is kept in a single Secret
on the service cluster side.
diff --git a/content/developer-platform/setup/_index.en.md b/content/developer-platform/setup/_index.en.md
new file mode 100644
index 000000000..716d3a8f2
--- /dev/null
+++ b/content/developer-platform/setup/_index.en.md
@@ -0,0 +1,4 @@
++++
+title = "Setup"
+weight = 3
++++
diff --git a/content/developer-platform/setup/ai-agent/_index.en.md b/content/developer-platform/setup/ai-agent/_index.en.md
new file mode 100644
index 000000000..4447f91d9
--- /dev/null
+++ b/content/developer-platform/setup/ai-agent/_index.en.md
@@ -0,0 +1,139 @@
++++
+title = "AI Agent"
+weight = 3
++++
+
+## Overview
+
+The Kubermatic Developer Platform AI Agent is a specialized assistant that helps users generate Kubernetes resource YAML files through natural language within KDP workspaces. It converts requests in natural language into properly formatted Kubernetes manifests, eliminating the need to manually write lengthy YAML files from scratch.
+
+## Prerequisites
+
+Before installing the AI Agent, ensure you have:
+
+- A running KDP installation on your Kubernetes cluster
+- OpenAI API key for the language model capabilities
+- OIDC provider configured (same one used by KDP)
+
+## Installation
+
+The AI Agent is deployed using Helm. Follow these steps to install it:
+
+### Prepare the Configuration
+
+Create a `ai-agent.values.yaml` file with your specific configuration:
+
+```yaml
+aiAgent:
+ imagePullSecret: |
+ {
+ "auths": {
+ "quay.io": {
+ "auth": "",
+ "email": ""
+ }
+ }
+ }
+
+
+ config:
+ oidc:
+ clientID: kdp-kubelogin
+ clientSecret:
+ issuerURL: https://login.
+ kubernetes_api_url: https://api.
+ openai_api_key: "" # OpenAI API key for the language model
+
+ ingress:
+ create: true
+ host: # Use same domain as the frontend to avoid CORS errors
+ prefix: /ai-agent(/|$)(.*)
+ certIssuer:
+ kind: ClusterIssuer
+ name: letsencrypt-prod
+
+```
+
+Before deploying the KDP dashboard, you need to replace the following placeholder variables in the `ai-agent.values.yaml` file with your own values:
+
+- ``
+- ``
+- ``
+
+The `` placeholder **must** be replaced with the value set in Dex and configured in the `dex.values.yaml` file.
+
+### Install with Helm
+
+Now that all placeholders are replaced, deploy the KDP AI Agent Helm chart.
+To log into the Helm registry, use your email address as the username and the license key you received as the password.
+
+```bash
+helm registry login quay.io
+helm upgrade --install kdp-ai-agent \
+ oci://quay.io/kubermatic/helm-charts/developer-platform-ai-agent \
+ --version=0.9.0 \
+ --create-namespace \
+ --namespace=kdp-system \
+ --values=ai-agent.values.yaml
+```
+
+### Configure the Dashboard
+
+To make the AI Agent accessible from the KDP Dashboard, you need to update the `values.yaml` file for your **dashboard deployment**. Assuming you followed the quickstart, this file would be `kdp-dashboard.values.yaml`.
+
+You will need to edit it to activate the AI Agent feature and set the backend url.
+
+
+```yaml
+dashboard:
+ config:
+ features:
+ aiAgent:
+ enabled: true
+ generatorURL: /ai-agent/ # same domain as the host
+```
+
+
+You'll need to replace ``.
+
+Then after this update the release of your kdp dashboard. If you followed the Quickstart it will be called `kdp-dashboard` in the `kdp-system` namespace, so the command would look like this to first login and then update:
+
+```bash
+$ helm registry login quay.io
+$ helm upgrade --install kdp-dashboard \
+ oci://quay.io/kubermatic/helm-charts/developer-platform-dashboard \
+ --version=0.9.0 \
+ --create-namespace \
+ --namespace=kdp-system \
+ --values=kdp-dashboard.values.yaml
+```
+
+After this you will need to delete the pod for the dashboard manually for it to be redeployed and pick up the new values. You can find them by the label `app.kubernetes.io/name: kdp-dashboard` and delete with.
+
+```bash
+kubectl delete pods -l app.kubernetes.io/name=kdp-dashboard -n kdp-system
+```
+
+### Verify the Installation
+
+Once the pods start, you can use the AI Agent in the frontend.
+
+A purple button should be visible in the form to create a new service object within a workspace.
+
+
+
+Then, once clicked, a text field will be visible were you can describe how you want your resource to be.
+
+Here is an example after writing a prompt and clicking on `Generate`:
+
+
+
+After a few seconds you should get the result:
+
+
+
+You can then edit and modify if you like, from the form or directly in the YAML.
+
+You also do not have to worry about getting a wrong schema since it is getting validated in the backend. You can be sure there are no hallucinated fields nor missing required fields.
+
+**Note:** Please be sure to check the values and the YAML in general before submitting. AI can make mistakes.
diff --git a/content/developer-platform/setup/ai-agent/ai-agent-button.png b/content/developer-platform/setup/ai-agent/ai-agent-button.png
new file mode 100644
index 000000000..8e5fe331a
Binary files /dev/null and b/content/developer-platform/setup/ai-agent/ai-agent-button.png differ
diff --git a/content/developer-platform/setup/ai-agent/ai-agent-example-response.png b/content/developer-platform/setup/ai-agent/ai-agent-example-response.png
new file mode 100644
index 000000000..d5bf9d542
Binary files /dev/null and b/content/developer-platform/setup/ai-agent/ai-agent-example-response.png differ
diff --git a/content/developer-platform/setup/ai-agent/ai-agent-prompt-example.png b/content/developer-platform/setup/ai-agent/ai-agent-prompt-example.png
new file mode 100644
index 000000000..a9e1993cf
Binary files /dev/null and b/content/developer-platform/setup/ai-agent/ai-agent-prompt-example.png differ
diff --git a/content/developer-platform/setup/quickstart/_index.en.md b/content/developer-platform/setup/quickstart/_index.en.md
new file mode 100644
index 000000000..cdbfbf8cc
--- /dev/null
+++ b/content/developer-platform/setup/quickstart/_index.en.md
@@ -0,0 +1,262 @@
++++
+title = "Quickstart"
+weight = 1
++++
+
+This quickstart provides the steps to install the Kubermatic Developer Platform (KDP) on an existing Kubernetes cluster.
+You'll use Helm to deploy KDP and its core components, including Dex for user authentication and kcp as central control plane.
+You will also set up automated TLS certificate management with cert-manager and Let's Encrypt.
+By the end, you will have a fully functional KDP installation, accessible through the KDP dashboard as well as directly with kubectl.
+
+## Prerequisites
+
+{{% notice note %}}
+At the moment, you need to be invited to get access to Kubermatic's Docker repository before you can install the Kubermatic Developer Platform.
+Please [contact sales](mailto:sales@kubermatic.com) to receive your credentials.
+{{% /notice %}}
+
+To follow this guide, you need:
+
+- an existing Kubernetes cluster with at least 3 nodes
+- a running CSI driver with a default storage class
+- a running [cert-manager][cert-manager/docs/installation] installation
+- an running ingress controller (for this guide, the [NGINX ingress controller][ingress-nginx/docs/installation] is required)
+- [kubectl][k8s/docs/tools/installation] and [Helm][helm/docs/installation] (version 3) installed locally
+
+## Installation
+
+The installation is divided into five main steps, each deploying a core component of KDP.
+You will perform the following tasks:
+
+- **Set up certificates**: First, you will configure a cert-manager issuer to automatically obtain and renew TLS certificates from Let's Encrypt.
+
+- **Deploy an identity provider**: Next, you will deploy Dex to handle user authentication, creating a central login service for both the KDP dashboard and command-line access.
+
+- **Deploy kcp**: You will deploy kcp, the core engine that enables multi-tenancy by providing isolated, secure workspaces for your users.
+
+- **Deploy KDP**: Afterwards, you will install the main KDP controllers that connect to kcp and manage the platform's resources.
+
+- **Launch the KDP dashboard**: Finally, you will deploy the KDP dashboard, the primary graphical interface for developers to interact with the platform and manage their service objects.
+
+Throughout this guide, you will need to replace several placeholder variables in the Helm values files.
+Below is a description of each value you need to provide.
+
+- ``: Your email address, used by Let's Encrypt to send notifications about your TLS certificate status.
+- ``: A base64-encoded password or token for the quay.io container registry. This is required for you to get access to the KDP Helm charts and container images.
+- ``: The primary public domain name you will use to access your KDP installation (e.g., kdp.my-company.com). You must own this domain and be able to configure its DNS records.
+- ``: A generated bcrypt hash of the password you choose for the initial admin user.
+- ``: A randomly generated, secure string that acts as a password for the KDP dashboard to authenticate with the Dex identity provider.
+- ``: A second, unique random string used by the KDP dashboard itself to encrypt user session cookies, adding another layer of security.
+
+### Create ClusterIssuer
+
+First, you need to create a *ClusterIssuer* named `letsencrypt-prod` for cert-manager.
+This automates the process of obtaining and renewing TLS certificates from Let's Encrypt, ensuring all web-facing components like the Dex login page and the KDP dashboard are served securely over HTTPS.
+
+Save the following content to a file named `cluster-issuer.yaml`, and change the value of the `email` field to your email address:
+
+```yaml
+{{< readfile "developer-platform/setup/quickstart/data/letsencrypt.cluster-issuer.yaml" >}}
+```
+
+Create the *ClusterIssuer* by applying the manifest:
+
+```bash
+kubectl apply -f ./cluster-issuer.yaml
+```
+
+### Deploy Dex
+
+Now, you'll deploy Dex as the platform's central identity provider.
+It handles all user logins and authentication.
+The provided configuration creates an initial admin user and prepares Dex for the integration with the KDP dashboard and [kubelogin][kubelogin/src/readme] for a seamless user authentication.
+
+Save the following content to a file named `dex.values.yaml`:
+
+```yaml
+{{< readfile "developer-platform/setup/quickstart/data/dex.values.yaml" >}}
+```
+
+Before deploying Dex, you need to replace the following placeholder variables in the `dex.values.yaml` file with your own values:
+
+- ``
+- ``
+- ``
+
+For the initial admin user, you must provide your own password as bcrypt hash in ``.
+To create this hash, you can use the `htpasswd` utility, which is part of the Apache web server tools and available on most Linux distributions (you may need to install a package like "apache2-utils" or "httpd-tools").
+
+Choose a strong password and run the following command in your terminal, replacing YOUR_PASSWORD with the password you've selected:
+
+```bash
+echo 'YOUR_PASSWORD' | htpasswd -inBC 10 admin | cut -d: -f2
+```
+
+Copy the entire output string (it will start with `$2a$` or `$2y$`) and paste it as the value for `` in your `dex.values.yaml` file.
+Remember to save the plain-text password you chose in a secure location, as you will need it to log in to the KDP dashboard.
+
+The `` placeholder must be replaced with a long, random string that the KDP dashboard and kubelogin use to securely communicate with Dex.
+You can generate a secure, random string with the following command:
+
+```bash
+cat /dev/urandom | base64 | tr -dc 'A-Za-z0-9' | head -c32
+```
+
+This will output a random string that you can copy and paste as the value for ``.
+Save the value for later use when you deploy the KDP dashboard.
+
+Once you've replaced all placeholders, deploy the Dex Helm chart:
+
+```bash
+helm upgrade --install dex dex \
+ --repo=https://charts.dexidp.io \
+ --version=0.23.0 \
+ --create-namespace \
+ --namespace=kdp-system \
+ --values=dex.values.yaml
+```
+
+### Deploy kcp
+
+Next, you'll install kcp.
+It acts as the central control plane for KDP that provides and manages the isolated workspaces for each user or team, ensuring resources are kept separate and secure.
+It's configured to use Dex for authenticating user requests.
+
+Save the following content to a file named `kcp.values.yaml`:
+
+```yaml
+{{< readfile "developer-platform/setup/quickstart/data/kcp.values.yaml" >}}
+```
+
+Before deploying kcp, you need to replace the following placeholder variables in the `kcp.values.yaml` file with your own values:
+
+- ``
+
+After you've replaced all the placeholders, deploy the kcp Helm chart:
+
+```bash
+helm upgrade --install kcp kcp \
+ --repo=https://kcp-dev.github.io/helm-charts \
+ --version=0.11.1 \
+ --create-namespace \
+ --namespace=kdp-system \
+ --values=kcp.values.yaml
+```
+
+### Deploy KDP
+
+Finally, you'll deploy the main KDP application.
+It connects to the kcp control plane and includes a one-time bootstrap job that grants the admin user full administrative rights, allowing them to manage the entire platform.
+
+Save the following content to a file named `kdp.values.yaml`:
+
+```yaml
+{{< readfile "developer-platform/setup/quickstart/data/kdp.values.yaml" >}}
+```
+
+Before deploying KDP, you need to replace the following placeholder variables in the `kdp.values.yaml` file with your own values:
+
+- ``
+- ``
+
+With all placeholders replaced, deploy the KDP Helm chart.
+Use your email address as the username and the license key you received as the password to log into the Helm registry.
+
+```bash
+helm registry login quay.io
+helm upgrade --install kdp \
+ oci://quay.io/kubermatic/helm-charts/developer-platform \
+ --version=0.9.0 \
+ --create-namespace \
+ --namespace=kdp-system \
+ --values=kdp.values.yaml
+```
+
+### Deploy KDP dashboard
+
+Last but not least, you'll deploy the KDP's web-based dashboard, which serves as the primary user interface.
+It's configured to use Dex for user login and connects to kcp, providing developers with a graphical interface to create and manage their service objects.
+
+Save the following content to a file named `kdp-dashboard.values.yaml`:
+
+```yaml
+{{< readfile "developer-platform/setup/quickstart/data/kdp-dashboard.values.yaml" >}}
+```
+
+Before deploying the KDP dashboard, you need to replace the following placeholder variables in the `kdp-dashboard.values.yaml` file with your own values:
+
+- ``
+- ``
+- ``
+- ``
+
+The `` placeholder **must** be replaced with the value generated in step "Deploy Dex" and configured in the `dex.values.yaml` file.
+
+The `` placeholder must - similar to the OIDC client secret - be replaced with a long, random string that the KDP dashboard uses to protect user sessions.
+You can use the same command, to generate a secure, random string:
+
+```bash
+cat /dev/urandom | base64 | tr -dc 'A-Za-z0-9' | head -c32
+```
+
+Copy and paste the output as the value for ``.
+
+Now that all placeholders are replaced, deploy the KDP dashboard Helm chart.
+To log into the Helm registry, again use your email address as the username and the license key you received as the password.
+
+```bash
+helm registry login quay.io
+helm upgrade --install kdp-dashboard \
+ oci://quay.io/kubermatic/helm-charts/developer-platform-dashboard \
+ --version=0.9.0 \
+ --create-namespace \
+ --namespace=kdp-system \
+ --values=kdp-dashboard.values.yaml
+```
+
+### Configure DNS records
+
+In order to finalize the installation and make your KDP instance accessible, you must create four records in your DNS provider.
+These records point the hostnames you configured earlier to the correct load balancers of your Kubernetes cluster.
+
+First, create three DNS records that direct traffic for the Dex login page (`login.`), the public API endpoint (`api.`), and the KDP dashboard (`dashboard.`) to your cluster's NGINX ingress controller.
+
+Assuming you installed the NGINX ingress controller into the `ingress-nginx` namespace, use the following command to the retrieve the external IP address or DNS name of the load balancer (in column "EXTERNAL-IP"):
+
+```bash
+kubectl --namespace=ingress-nginx get service ingress-nginx-controller
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ingress-nginx-controller LoadBalancer 10.47.248.232 4cdd93dfab834ed9a78858c7f2633380.eu-west-1.elb.amazonaws.com 80:30807/TCP,443:30184/TCP 449d
+```
+
+Second, create a DNS record specifically for kcp (`internal.`) that points to the external IP address or DNS name of the dedicated load balancer for the kcp *Service*.
+Use the following command to the retrieve the external IP address or DNS name of kcp's load balancer:
+
+```bash
+kubectl --namespace=kdp-system get service kcp-front-proxy
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+kcp-front-proxy LoadBalancer 10.240.20.65 99f1093e45d6482d95a0c22c4a2bd056.eu-west-1.elb.amazonaws.com 8443:30295/TCP 381d
+```
+
+### Access the dashboard
+
+Congratulations, your KDP installation is now complete! Once your DNS records have propagated, you can access the dashboard by navigating your web browser to the URL you configured (`https://dashboard.`).
+
+You will be redirected to the Dex login page and you can use the administrative credentials that were created during the setup:
+
+- **Username**: `admin`
+- **Password**: The password you chose in step [Deploy Dex](#deploy-dex)
+
+After logging in, you will be taken to the KDP dashboard, where you can begin exploring your platform. Welcome to KDP!
+
+[cert-manager/docs/installation]: https://cert-manager.io/docs/installation/helm/
+[helm/docs/installation]: https://helm.sh/docs/intro/install/
+[ingress-nginx/docs/installation]: https://kubernetes.github.io/ingress-nginx/deploy/
+[k8s/docs/tools/installation]: https://kubernetes.io/docs/tasks/tools/#kubectl
+[kcp/chart/readme]: https://github.com/kcp-dev/helm-charts/tree/main/charts/kcp
+[kubelogin/src/readme]: https://github.com/int128/kubelogin
+
+
+### Extensions
+
+If you want to install the KDP AI Agent, which helps you generate yaml files for resources from descriptions in natural language, follow [these instructions](../ai-agent/_index.en.md).
\ No newline at end of file
diff --git a/content/developer-platform/setup/quickstart/data/dex.values.yaml b/content/developer-platform/setup/quickstart/data/dex.values.yaml
new file mode 100644
index 000000000..c2788eed4
--- /dev/null
+++ b/content/developer-platform/setup/quickstart/data/dex.values.yaml
@@ -0,0 +1,36 @@
+# dex.values.yaml
+config:
+ issuer: https://login.
+ storage:
+ type: kubernetes
+ config:
+ inCluster: true
+ staticClients:
+ - id: kdp-kubelogin
+ name: kdp-kubelogin
+ secret:
+ RedirectURIs:
+ - http://localhost:8000
+ - http://localhost:18000
+ - https://dashboard./api/auth/callback/oidc
+ enablePasswordDB: true
+ staticPasswords:
+ - email: admin
+ hash: ""
+ username: admin
+ userID: 08a8684b-db88-4b73-90a9-3cd1661f5466
+
+ingress:
+ enabled: true
+ annotations:
+ cert-manager.io/cluster-issuer: letsencrypt-prod
+ className: nginx
+ hosts:
+ - host: login.
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ tls:
+ - secretName: dex-tls
+ hosts:
+ - login.
\ No newline at end of file
diff --git a/content/developer-platform/setup/quickstart/data/kcp.values.yaml b/content/developer-platform/setup/quickstart/data/kcp.values.yaml
new file mode 100644
index 000000000..ea4b480a7
--- /dev/null
+++ b/content/developer-platform/setup/quickstart/data/kcp.values.yaml
@@ -0,0 +1,30 @@
+# kcp.values.yaml
+externalHostname: "internal."
+externalPort: "8443"
+
+kcpFrontProxy:
+ service:
+ type: LoadBalancer
+ additionalPathMappings:
+ - path: /services/organization/
+ backend: https://kdp-virtual-workspaces:6444
+ backend_server_ca: /etc/kcp/tls/ca/tls.crt
+ proxy_client_cert: /etc/kcp-front-proxy/requestheader-client/tls.crt
+ proxy_client_key: /etc/kcp-front-proxy/requestheader-client/tls.key
+ - path: /services/service/
+ backend: https://kdp-virtual-workspaces:6444
+ backend_server_ca: /etc/kcp/tls/ca/tls.crt
+ proxy_client_cert: /etc/kcp-front-proxy/requestheader-client/tls.crt
+ proxy_client_key: /etc/kcp-front-proxy/requestheader-client/tls.key
+ extraFlags:
+ - '--cors-allowed-origins=localhost,dashboard.$'
+ - '--authentication-drop-groups=system:kcp:logical-cluster-admin'
+
+oidc:
+ enabled: true
+ issuerUrl: https://login.
+ clientId: kdp-kubelogin
+ groupClaim: groups
+ usernameClaim: email
+ usernamePrefix: 'oidc:'
+ groupsPrefix: 'oidc:'
\ No newline at end of file
diff --git a/content/developer-platform/setup/quickstart/data/kdp-dashboard.values.yaml b/content/developer-platform/setup/quickstart/data/kdp-dashboard.values.yaml
new file mode 100644
index 000000000..83112c2ce
--- /dev/null
+++ b/content/developer-platform/setup/quickstart/data/kdp-dashboard.values.yaml
@@ -0,0 +1,45 @@
+# kdp-dashboard.values.yaml
+dashboard:
+ imagePullSecret: |-
+ {
+ "auths": {
+ "quay.io": {
+ "auth": "",
+ "email": ""
+ }
+ }
+ }
+
+ config:
+ app:
+ baseURL: https://dashboard.
+ authentication:
+ encryptionKey:
+ oidc:
+ clientID: kdp-kubelogin
+ clientSecret:
+ issuerURL: https://login.
+ backend:
+ frontProxyURL: https://api.
+ features:
+ aiAgent:
+ enabled: false
+ kubeconfigDownload:
+ enabled: true
+ serverCA: /app/_config/user-kubeconfig/ca.crt
+ serverURL: https://internal.:8443
+
+ ingress:
+ create: true
+ host: dashboard.
+ certIssuer:
+ kind: ClusterIssuer
+ name: letsencrypt-prod
+
+ extraVolumeMounts:
+ - name: user-kubeconfig-ca
+ mountPath: /app/_config/user-kubeconfig
+ secretName: kcp-ca
+ items:
+ - key: tls.crt
+ path: ca.crt
\ No newline at end of file
diff --git a/content/developer-platform/setup/quickstart/data/kdp.values.yaml b/content/developer-platform/setup/quickstart/data/kdp.values.yaml
new file mode 100644
index 000000000..04e537801
--- /dev/null
+++ b/content/developer-platform/setup/quickstart/data/kdp.values.yaml
@@ -0,0 +1,36 @@
+# kdp.values.yaml
+kdp:
+ imagePullSecret: |-
+ {
+ "auths": {
+ "quay.io": {
+ "auth": "",
+ "email": ""
+ }
+ }
+ }
+
+ frontProxy:
+ internalDomain: internal.
+ publicDomain: api.
+ url: https://internal.:8443
+
+ virtualWorkspaces:
+ shardExternalURL: https://internal.:8443
+
+ hooks:
+ bootstrap:
+ enabled: true
+ extraManifests:
+ rbac.yaml: |
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: admin:cluster-admin
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+ subjects:
+ - kind: User
+ name: oidc:admin
\ No newline at end of file
diff --git a/content/developer-platform/setup/quickstart/data/letsencrypt.cluster-issuer.yaml b/content/developer-platform/setup/quickstart/data/letsencrypt.cluster-issuer.yaml
new file mode 100644
index 000000000..e5fb3a241
--- /dev/null
+++ b/content/developer-platform/setup/quickstart/data/letsencrypt.cluster-issuer.yaml
@@ -0,0 +1,15 @@
+# cluster-issuer.yaml
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: letsencrypt-prod
+spec:
+ acme:
+ email:
+ server: https://acme-v02.api.letsencrypt.org/directory
+ privateKeySecretRef:
+ name: letsencrypt-prod-credentials
+ solvers:
+ - http01:
+ ingress:
+ class: nginx
\ No newline at end of file
diff --git a/content/developer-platform/tutorials/agent-without-kdp/_index.en.md b/content/developer-platform/tutorials/agent-without-kdp/_index.en.md
index bbddd8303..e3704e595 100644
--- a/content/developer-platform/tutorials/agent-without-kdp/_index.en.md
+++ b/content/developer-platform/tutorials/agent-without-kdp/_index.en.md
@@ -31,15 +31,15 @@ Create a file with a similar content (you most likely want to change the name, a
group under which your published resources will be made available) and create it in a kcp workspace
of your choice:
-```sh
+```bash
# use the kcp kubeconfig
-$ export KUBECONFIG=/path/to/kcp.kubeconfig
+export KUBECONFIG=/path/to/kcp.kubeconfig
# nativagate to the workspace where the APIExport should exist
-$ kubectl ws :workspace:you:want:to:create:it
+kubectl ws :workspace:you:want:to:create:it
# create it
-$ kubectl create --filename apiexport.yaml
+kubectl create --filename apiexport.yaml
apiexport/test.example.com created
```
@@ -57,8 +57,8 @@ Make sure that the kubeconfig points to the right workspace (not necessarily the
This can be done via a command like this:
-```sh
-$ kubectl create secret generic kcp-kubeconfig \
+```bash
+kubectl create secret generic kcp-kubeconfig \
--namespace kcp-system \
--from-file "kubeconfig=admin.kubeconfig"
```
@@ -88,7 +88,7 @@ kcpKubeconfig: kcp-kubeconfig
Once this `values.yaml` file is prepared, install a recent development build of the Sync Agent:
-```sh
+```bash
helm repo add kcp https://kcp-dev.github.io/helm-charts
helm repo update
@@ -156,11 +156,11 @@ the RBAC rules that grant the Agent access.
The Sync Agent needs to
-* manage its `APIExport`,
-* manage `APIResourceSchemas` and
-* access the virtual workspace for its `APIExport`.
+- manage its `APIExport`,
+- manage `APIResourceSchemas` and
+- access the virtual workspace for its `APIExport`.
-This can be achieved by applying RBAC like this _in the workspace where the `APIExport` resides_:
+This can be achieved by applying RBAC like this *in the workspace where the `APIExport` resides*:
```yaml
apiVersion: rbac.authorization.k8s.io/v1
diff --git a/content/developer-platform/tutorials/kcp-command-line/_index.en.md b/content/developer-platform/tutorials/kcp-command-line/_index.en.md
index 90e449af0..1d46feb83 100644
--- a/content/developer-platform/tutorials/kcp-command-line/_index.en.md
+++ b/content/developer-platform/tutorials/kcp-command-line/_index.en.md
@@ -57,7 +57,7 @@ kubectl ws my-subworkspace
## API Management
-A KDP Service is reconciled into an `APIExport`. To use this API, you have to _bind to_ it. Binding
+A KDP Service is reconciled into an `APIExport`. To use this API, you have to *bind to* it. Binding
involves creating a matching (= same name) `APIBinding` in the workspace where the API should be
made available.
diff --git a/content/developer-platform/tutorials/your-first-service/_index.en.md b/content/developer-platform/tutorials/your-first-service/_index.en.md
index d65236ca5..58cbe3f51 100644
--- a/content/developer-platform/tutorials/your-first-service/_index.en.md
+++ b/content/developer-platform/tutorials/your-first-service/_index.en.md
@@ -64,15 +64,15 @@ spec:
This can be applied with `kubectl` in your organization workspace.
-```sh
-$ kubectl ws :root:my-org # switch to your workspace
-$ kubectl apply -f service.yaml
+```bash
+kubectl ws :root:my-org # switch to your workspace
+kubectl apply -f service.yaml
```
Use the following command to explore the full schema for `Service` objects:
-```sh
-$ kubectl explain --api-version=core.kdp.k8c.io/v1alpha1 service
+```bash
+kubectl explain --api-version=core.kdp.k8c.io/v1alpha1 service
```
This concludes all required steps to define the new service. Click on the confirm button to create
@@ -96,7 +96,7 @@ In `spec.kubeconfig` you will find the name of the kubeconfig Secret that you ca
api-syncagent.
Now switch your focus to your own cluster, where your business logic happens (for example where
-Crossplane runs). For your Service you need to provide exactly _one_ api-syncagent in _one_ Kubernetes
+Crossplane runs). For your Service you need to provide exactly *one* api-syncagent in *one* Kubernetes
cluster. This agent can have multiple replicas as it uses leader election, but you must not have two
or more independent agents processing the same Service. There is currently no mechanism to spread
load between multiple Service clusters and two or more agents will most likely conflict with each
@@ -107,7 +107,6 @@ for more information. You basically need to provide the kubeconfig generated by
"kcp kubeconfig", the service's name (not its API Group) and a unique name for the agent itself. Put
all the information in a `values.yaml` and run `helm install` to deploy your agent.
-
{{% notice warning %}}
Currently only api-syncagent version 0.2.x is supported. Make sure you pass `--version 0.2.0` when installing the api-syncagent chart.
{{% /notice %}}
diff --git a/content/kubelb/main/_index.en.md b/content/kubelb/main/_index.en.md
index f4f4b0276..9f15e962f 100644
--- a/content/kubelb/main/_index.en.md
+++ b/content/kubelb/main/_index.en.md
@@ -35,7 +35,7 @@ KubeLB solves this problem by providing a centralized management solution that c
- [Introducing KubeLB](https://www.kubermatic.com/products/kubelb/)
- [KubeLB Whitepaper](https://www.kubermatic.com/static/KubeLB-Cloud-Native-Multi-Tenant-Load-Balancer.pdf)
-- [KubeLB CE](https://github.com/kubermatic/kubelb)
+- [KubeLB - GitHub Repository](https://github.com/kubermatic/kubelb)
Visit [kubermatic.com](https://www.kubermatic.com/) for further information.
diff --git a/content/kubelb/main/ce-ee-matrix/_index.en.md b/content/kubelb/main/ce-ee-matrix/_index.en.md
index f6917b9ac..9d14bcee8 100644
--- a/content/kubelb/main/ce-ee-matrix/_index.en.md
+++ b/content/kubelb/main/ce-ee-matrix/_index.en.md
@@ -20,6 +20,7 @@ KubeLB is available in two versions: Community and Enterprise.
| Ingress | ✔️ | ✔️ |
| Gateway API v1 | ✔️ | ✔️ |
| Bring your own secrets(certificates) | ✔️ | ✔️ |
+| Tunneling support through CLI | ✔️ | ❌ |
| Gateway API beta/alpha(TLS/TCP/UDP routes) | ✔️ | ❌ |
| Multiple Gateways | ✔️ | ❌ |
| DNS automation | ✔️ | ❌ |
@@ -27,7 +28,12 @@ KubeLB is available in two versions: Community and Enterprise.
| Limits for LoadBalancers, Gateways | ✔️ | ❌ |
{{% notice note %}}
-KubeLB support [ingress-nginx](https://kubernetes.github.io/ingress-nginx/) for **Ingress** resources. [Envoy Gateway](https://gateway.envoyproxy.io/) is supported for **Gateway API** resources. While other products might work for Ingress and Gateway API resources, we are not testing them and can't guarantee the compatibility.
+KubeLB supports the following products for Ingress and Gateway API resources:
+
+- [Ingress-nginx](https://kubernetes.github.io/ingress-nginx/) for **Ingress** resources.
+- [Envoy Gateway](https://gateway.envoyproxy.io/) is supported for **Gateway API** resources.
+
+While other products might work for Ingress and Gateway API resources, we are not testing them and can't guarantee the compatibility.
{{% /notice %}}
## Support Policy
diff --git a/content/kubelb/main/cli/_index.en.md b/content/kubelb/main/cli/_index.en.md
new file mode 100644
index 000000000..1058a4084
--- /dev/null
+++ b/content/kubelb/main/cli/_index.en.md
@@ -0,0 +1,62 @@
++++
+title = "KubeLB CLI"
+date = 2025-08-27T10:07:15+02:00
+weight = 30
+description = "Learn how you can use KubeLB CLI to provision Load Balancers and tunnels to expose local workloads"
++++
+
+
+
+## KubeLB CLI
+
+KubeLB CLI is a command line tool that has been introduced to complement KubeLB and make it easier to manage load balancing configurations for multiple tenants in Kube and non-Kube based environments.
+
+The source code is open source and available at [kubermatic/kubelb-cli](https://github.com/kubermatic/kubelb-cli).
+
+{{% notice note %}}
+KubeLB CLI is currently in beta feature stage and is not yet ready for production use. We are actively working on the feature set and taking feedback from the community and our customers to improve the CLI.
+{{% /notice %}}
+
+## Installation
+
+### Manual Installation
+
+Users can download the pre-compiled binaries from the [releases page](https://github.com/kubermatic/kubelb-cli/releases) for their system and copy them to the desired location.
+
+{{% notice note %}}
+KubeLB CLI is currently available for Linux, macOS, and Windows.
+{{% /notice %}}
+
+### Install using `go install`
+
+If you have Go installed, you can also build the binary from the source code using the following command:
+
+```bash
+go install github.com/kubermatic/kubelb-cli@v0.1.0
+```
+
+### Configuration
+
+KubeLB CLI needs the tenant scoped kubeconfig and the tenant name to be configured either via environment variables or through the CLI flags. Environment variables are preferred as you don't have to specify them for each command.
+
+```bash
+export KUBECONFIG=/path/to/kubeconfig
+export TENANT_NAME=my-tenant
+```
+
+## Table of Content
+
+{{% children depth=5 %}}
+{{% /children %}}
+
+## Further Information
+
+- [Introducing KubeLB](https://www.kubermatic.com/products/kubelb/)
+- [KubeLB Whitepaper](https://www.kubermatic.com/static/KubeLB-Cloud-Native-Multi-Tenant-Load-Balancer.pdf)
+- [KubeLB - GitHub Repository](https://github.com/kubermatic/kubelb)
+
+Visit [kubermatic.com](https://www.kubermatic.com/) for further information.
+
+{{% notice tip %}}
+For latest updates follow us on Twitter [@Kubermatic](https://twitter.com/Kubermatic)
+{{% /notice %}}
diff --git a/content/kubelb/main/cli/compatibility-matrix/_index.en.md b/content/kubelb/main/cli/compatibility-matrix/_index.en.md
new file mode 100644
index 000000000..a40e2097b
--- /dev/null
+++ b/content/kubelb/main/cli/compatibility-matrix/_index.en.md
@@ -0,0 +1,21 @@
++++
+title = "Compatibility Matrix"
+date = 2025-08-27T00:00:00+01:00
+weight = 30
++++
+
+KubeLB CLI uses Kubernetes management cluster that has KubeLB installed as it's source of truth for the load balancing configurations.
+
+Since it has been introduced alongside KubeLB v1.2, it has a hard dependency for the KubeLB management cluster to be at least v1.2.
+
+{{% notice note %}}
+KubeLB CLI is currently in beta feature stage and is not yet ready for production use. We are actively working on the feature set and taking feedback from the community and our customers to improve the CLI.
+{{% /notice %}}
+
+| KubeLB CLI | KubeLB Management Cluster |
+|------------|---------------------------|
+| v0.1.0 | v1.2+ |
+
+## Support Policy
+
+For support policy, please refer to the [KubeLB Support Policy](../../support-policy/)
diff --git a/content/kubelb/main/cli/loadbalancing/_index.en.md b/content/kubelb/main/cli/loadbalancing/_index.en.md
new file mode 100644
index 000000000..36f51a059
--- /dev/null
+++ b/content/kubelb/main/cli/loadbalancing/_index.en.md
@@ -0,0 +1,36 @@
++++
+title = "Load Balancing"
+date = 2025-08-27T00:00:00+01:00
+weight = 20
++++
+
+KubeLB CLI can be used to quickly provision Load Balancers that can be public/private based on your load balancing configurations and needs. KubeLB then takes care of securing your endpoint with TLS certificates, automatically creating DNS records, and managing the load balancing configurations.
+
+## Pre-requisites
+
+Please refer to the [DNS](../../tutorials/security/dns/#enable-dns-automation) documentation to configure the Gateway or Ingress to manage DNS for the load balancer.
+
+## Create a Load Balancer
+
+To create a load balancer, you can use the `kubelb loadbalancer create` command.
+
+For example
+
+```bash
+kubelb loadbalancer create my-app --endpoints 10.0.1.1:8080,10.0.1.2:8080 --hostname my-app.example.com
+```
+
+This will create a Load Balancer resource that will forward traffic to the endpoints `10.0.1.1:8080` and `10.0.1.2:8080` and will be accessible at `https://my-app.example.com`.
+
+Specifying hostname is optional and if not provided, KubeLB will generate a random hostname for you if the wildcard domain is enabled for the tenant or globally.
+
+
+
+## Further actions
+
+Further actions include:
+
+- Updating the load balancer configuration
+- Deleting the load balancer
+- Getting the load balancer details
+- Listing all the load balancers
diff --git a/content/kubelb/main/cli/references/_index.en.md b/content/kubelb/main/cli/references/_index.en.md
new file mode 100644
index 000000000..44f9eae92
--- /dev/null
+++ b/content/kubelb/main/cli/references/_index.en.md
@@ -0,0 +1,40 @@
++++
+title = "References"
+date = 2024-03-06T12:00:00+02:00
+weight = 50
++++
+
+This section contains a reference of the Kubermatic KubeLB CLI commands and flags.
+
+## kubelb
+
+KubeLB CLI - Manage load balancers and create secure tunnels
+
+### Synopsis
+
+KubeLB CLI provides tools to manage KubeLB load balancers and create secure tunnels
+to expose local services through the KubeLB infrastructure.
+
+### Options
+
+```
+ -h, --help help for kubelb
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb completion](commands/kubelb_completion) - Generate the autocompletion script for the specified shell
+* [kubelb docs](commands/kubelb_docs) - Generate markdown documentation for all commands
+* [kubelb expose](commands/kubelb_expose) - Expose a local port via tunnel
+* [kubelb loadbalancer](commands/kubelb_loadbalancer) - Manage KubeLB load balancers
+* [kubelb status](commands/kubelb_status) - Display current status of KubeLB
+* [kubelb tunnel](commands/kubelb_tunnel) - Manage secure tunnels to expose local services
+* [kubelb version](commands/kubelb_version) - Print the version information
diff --git a/content/kubelb/main/cli/references/commands/kubelb_completion.md b/content/kubelb/main/cli/references/commands/kubelb_completion.md
new file mode 100644
index 000000000..2ff39c182
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_completion.md
@@ -0,0 +1,41 @@
++++
+title = "kubelb completion"
+date = 2025-08-27T00:00:00+01:00
+weight = 200
++++
+
+## kubelb completion
+
+Generate the autocompletion script for the specified shell
+
+### Synopsis
+
+Generate the autocompletion script for kubelb for the specified shell.
+See each sub-command's help for details on how to use the generated script.
+
+### Options
+
+```
+ -h, --help help for completion
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels
+* [kubelb completion bash](../kubelb_completion_bash) - Generate the autocompletion script for bash
+* [kubelb completion fish](../kubelb_completion_fish) - Generate the autocompletion script for fish
+* [kubelb completion powershell](../kubelb_completion_powershell) - Generate the autocompletion script for powershell
+* [kubelb completion zsh](../kubelb_completion_zsh) - Generate the autocompletion script for zsh
diff --git a/content/kubelb/main/cli/references/commands/kubelb_completion_bash.md b/content/kubelb/main/cli/references/commands/kubelb_completion_bash.md
new file mode 100644
index 000000000..fa713d587
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_completion_bash.md
@@ -0,0 +1,60 @@
++++
+title = "kubelb completion bash"
+date = 2025-08-27T00:00:00+01:00
+weight = 210
++++
+
+## kubelb completion bash
+
+Generate the autocompletion script for bash
+
+### Synopsis
+
+Generate the autocompletion script for the bash shell.
+
+This script depends on the 'bash-completion' package.
+If it is not installed already, you can install it via your OS's package manager.
+
+To load completions in your current shell session:
+
+ source <(kubelb completion bash)
+
+To load completions for every new session, execute once:
+
+#### Linux
+
+ kubelb completion bash > /etc/bash_completion.d/kubelb
+
+#### macOS
+
+ kubelb completion bash > $(brew --prefix)/etc/bash_completion.d/kubelb
+
+You will need to start a new shell for this setup to take effect.
+
+```
+kubelb completion bash
+```
+
+### Options
+
+```
+ -h, --help help for bash
+ --no-descriptions disable completion descriptions
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb completion](../kubelb_completion) - Generate the autocompletion script for the specified shell
diff --git a/content/kubelb/main/cli/references/commands/kubelb_completion_fish.md b/content/kubelb/main/cli/references/commands/kubelb_completion_fish.md
new file mode 100644
index 000000000..81cd45c0b
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_completion_fish.md
@@ -0,0 +1,51 @@
++++
+title = "kubelb completion fish"
+date = 2025-08-27T00:00:00+01:00
+weight = 220
++++
+
+## kubelb completion fish
+
+Generate the autocompletion script for fish
+
+### Synopsis
+
+Generate the autocompletion script for the fish shell.
+
+To load completions in your current shell session:
+
+ kubelb completion fish | source
+
+To load completions for every new session, execute once:
+
+ kubelb completion fish > ~/.config/fish/completions/kubelb.fish
+
+You will need to start a new shell for this setup to take effect.
+
+```
+kubelb completion fish [flags]
+```
+
+### Options
+
+```
+ -h, --help help for fish
+ --no-descriptions disable completion descriptions
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb completion](../kubelb_completion) - Generate the autocompletion script for the specified shell
diff --git a/content/kubelb/main/cli/references/commands/kubelb_completion_powershell.md b/content/kubelb/main/cli/references/commands/kubelb_completion_powershell.md
new file mode 100644
index 000000000..f01116ed0
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_completion_powershell.md
@@ -0,0 +1,48 @@
++++
+title = "kubelb completion powershell"
+date = 2025-08-27T00:00:00+01:00
+weight = 230
++++
+
+## kubelb completion powershell
+
+Generate the autocompletion script for powershell
+
+### Synopsis
+
+Generate the autocompletion script for powershell.
+
+To load completions in your current shell session:
+
+ kubelb completion powershell | Out-String | Invoke-Expression
+
+To load completions for every new session, add the output of the above command
+to your powershell profile.
+
+```
+kubelb completion powershell [flags]
+```
+
+### Options
+
+```
+ -h, --help help for powershell
+ --no-descriptions disable completion descriptions
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb completion](../kubelb_completion) - Generate the autocompletion script for the specified shell
diff --git a/content/kubelb/main/cli/references/commands/kubelb_completion_zsh.md b/content/kubelb/main/cli/references/commands/kubelb_completion_zsh.md
new file mode 100644
index 000000000..4f8ab1f41
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_completion_zsh.md
@@ -0,0 +1,62 @@
++++
+title = "kubelb completion zsh"
+date = 2025-08-27T00:00:00+01:00
+weight = 240
++++
+
+## kubelb completion zsh
+
+Generate the autocompletion script for zsh
+
+### Synopsis
+
+Generate the autocompletion script for the zsh shell.
+
+If shell completion is not already enabled in your environment you will need
+to enable it. You can execute the following once:
+
+ echo "autoload -U compinit; compinit" >> ~/.zshrc
+
+To load completions in your current shell session:
+
+ source <(kubelb completion zsh)
+
+To load completions for every new session, execute once:
+
+#### Linux
+
+ kubelb completion zsh > "${fpath[1]}/_kubelb"
+
+#### macOS
+
+ kubelb completion zsh > $(brew --prefix)/share/zsh/site-functions/_kubelb
+
+You will need to start a new shell for this setup to take effect.
+
+```
+kubelb completion zsh [flags]
+```
+
+### Options
+
+```
+ -h, --help help for zsh
+ --no-descriptions disable completion descriptions
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb completion](../kubelb_completion) - Generate the autocompletion script for the specified shell
diff --git a/content/kubelb/main/cli/references/commands/kubelb_docs.md b/content/kubelb/main/cli/references/commands/kubelb_docs.md
new file mode 100644
index 000000000..b41a983d4
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_docs.md
@@ -0,0 +1,42 @@
++++
+title = "kubelb docs"
+date = 2025-08-27T00:00:00+01:00
+weight = 40
++++
+
+## kubelb docs
+
+Generate markdown documentation for all commands
+
+### Synopsis
+
+Generate markdown documentation for all CLI commands and their parameters.
+This creates individual markdown files for each command with complete usage information.
+
+```
+kubelb docs [flags]
+```
+
+### Options
+
+```
+ -h, --help help for docs
+ -o, --output string Output directory for generated documentation (default "./docs")
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels
diff --git a/content/kubelb/main/cli/references/commands/kubelb_expose.md b/content/kubelb/main/cli/references/commands/kubelb_expose.md
new file mode 100644
index 000000000..6b435de09
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_expose.md
@@ -0,0 +1,62 @@
++++
+title = "kubelb expose"
+date = 2025-08-27T00:00:00+01:00
+weight = 30
++++
+
+## kubelb expose
+
+Expose a local port via tunnel
+
+### Synopsis
+
+Expose a local port via secure tunnel with auto-generated name.
+
+This is a convenience command that creates a tunnel with an auto-generated
+name and immediately connects to it.
+
+Examples:
+
+# Expose port 8080 with auto-generated tunnel name
+
+ kubelb expose 8080
+
+# Expose port 3000 with custom hostname
+
+ kubelb expose 3000 --hostname api.example.com
+
+```
+kubelb expose PORT [flags]
+```
+
+### Examples
+
+```
+kubelb expose 8080 --tenant=mytenant
+```
+
+### Options
+
+```
+ -h, --help help for expose
+ --hostname string Custom hostname for the tunnel (default: auto-assigned wildcard domain)
+ -o, --output string Output format (summary, yaml, json) (default "summary")
+ --wait Wait for tunnel to be ready (default true)
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels
diff --git a/content/kubelb/main/cli/references/commands/kubelb_loadbalancer.md b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer.md
new file mode 100644
index 000000000..ea12542a3
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer.md
@@ -0,0 +1,40 @@
++++
+title = "kubelb loadbalancer"
+date = 2025-08-27T00:00:00+01:00
+weight = 60
++++
+
+## kubelb loadbalancer
+
+Manage KubeLB load balancers
+
+### Synopsis
+
+Manage KubeLB load balancer configurations
+
+### Options
+
+```
+ -h, --help help for loadbalancer
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels
+* [kubelb loadbalancer create](../kubelb_loadbalancer_create) - Create a load balancer
+* [kubelb loadbalancer delete](../kubelb_loadbalancer_delete) - Delete a load balancer
+* [kubelb loadbalancer get](../kubelb_loadbalancer_get) - Get a load balancer
+* [kubelb loadbalancer list](../kubelb_loadbalancer_list) - List load balancers
diff --git a/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_create.md b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_create.md
new file mode 100644
index 000000000..e542a0a56
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_create.md
@@ -0,0 +1,69 @@
++++
+title = "kubelb loadbalancer create"
+date = 2025-08-27T00:00:00+01:00
+weight = 70
++++
+
+## kubelb loadbalancer create
+
+Create a load balancer
+
+### Synopsis
+
+Create a new HTTP load balancer with the specified endpoints.
+
+The load balancer supports HTTP routing and hostname-based access.
+
+Examples:
+
+# Create HTTP load balancer with random hostname
+
+ kubelb lb create my-app --endpoints 10.0.1.1:8080
+
+# Create HTTP load balancer with custom hostname
+
+ kubelb lb create my-app --endpoints 10.0.1.1:8080 --hostname app.example.com
+
+# Create HTTP load balancer without a route
+
+ kubelb lb create my-app --endpoints 10.0.1.1:8080 --route=false
+
+```
+kubelb loadbalancer create NAME [flags]
+```
+
+### Examples
+
+```
+kubelb loadbalancer create my-app --endpoints 10.0.1.1:8080,10.0.1.2:8080 --tenant=mytenant
+```
+
+### Options
+
+```
+ -e, --endpoints string Comma-separated list of IP:port pairs (required)
+ -h, --help help for create
+ --hostname string Custom hostname for the route
+ -o, --output string Output format (summary, yaml, json) (default "summary")
+ -p, --protocol string Protocol (http only) (default "http")
+ --route Create a route for HTTP traffic (default true)
+ --type string LoadBalancer type (ClusterIP, LoadBalancer), defaults to ClusterIP (default "ClusterIP")
+ --wait Wait for load balancer to be ready (default true)
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb loadbalancer](../kubelb_loadbalancer) - Manage KubeLB load balancers
diff --git a/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_delete.md b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_delete.md
new file mode 100644
index 000000000..26535b8fa
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_delete.md
@@ -0,0 +1,54 @@
++++
+title = "kubelb loadbalancer delete"
+date = 2025-08-27T00:00:00+01:00
+weight = 90
++++
+
+## kubelb loadbalancer delete
+
+Delete a load balancer
+
+### Synopsis
+
+Delete a load balancer by ID.
+
+This command will:
+- Check if the load balancer was created by the CLI
+- Display a warning if it wasn't created by the CLI
+- Ask for confirmation before deletion (unless --force is used)
+- Delete the load balancer resource
+
+
+```
+kubelb loadbalancer delete ID [flags]
+```
+
+### Examples
+
+```
+kubelb loadbalancer delete nginx-loadbalancer --tenant=mytenant --kubeconfig=./kubeconfig
+```
+
+### Options
+
+```
+ -f, --force Force deletion without confirmation
+ -h, --help help for delete
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb loadbalancer](../kubelb_loadbalancer) - Manage KubeLB load balancers
diff --git a/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_get.md b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_get.md
new file mode 100644
index 000000000..c8259ea3f
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_get.md
@@ -0,0 +1,46 @@
++++
+title = "kubelb loadbalancer get"
+date = 2025-08-27T00:00:00+01:00
+weight = 80
++++
+
+## kubelb loadbalancer get
+
+Get a load balancer
+
+### Synopsis
+
+Retrieve a load balancer by ID and output it's complete YAML specification.
+
+```
+kubelb loadbalancer get ID [flags]
+```
+
+### Examples
+
+```
+kubelb loadbalancer get nginx-loadbalancer --tenant=mytenant --kubeconfig=./kubeconfig
+```
+
+### Options
+
+```
+ -h, --help help for get
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb loadbalancer](../kubelb_loadbalancer) - Manage KubeLB load balancers
diff --git a/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_list.md b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_list.md
new file mode 100644
index 000000000..385ca74e8
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_list.md
@@ -0,0 +1,47 @@
++++
+title = "kubelb loadbalancer list"
+date = 2025-08-27T00:00:00+01:00
+weight = 85
++++
+
+## kubelb loadbalancer list
+
+List load balancers
+
+### Synopsis
+
+List all load balancers for the tenant.
+
+
+```
+kubelb loadbalancer list [flags]
+```
+
+### Examples
+
+```
+kubelb loadbalancer list --tenant=mytenant --kubeconfig=./kubeconfig
+```
+
+### Options
+
+```
+ -h, --help help for list
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb loadbalancer](../kubelb_loadbalancer) - Manage KubeLB load balancers
diff --git a/content/kubelb/main/cli/references/commands/kubelb_status.md b/content/kubelb/main/cli/references/commands/kubelb_status.md
new file mode 100644
index 000000000..b1bebd066
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_status.md
@@ -0,0 +1,47 @@
++++
+title = "kubelb status"
+date = 2025-08-27T00:00:00+01:00
+weight = 20
++++
+
+## kubelb status
+
+Display current status of KubeLB
+
+### Synopsis
+
+Display the current status of KubeLB including version information, configuration, and state
+
+```
+kubelb status [flags]
+```
+
+### Examples
+
+```
+ # Display status for current tenant
+ kubelb status
+```
+
+### Options
+
+```
+ -h, --help help for status
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels
diff --git a/content/kubelb/main/cli/references/commands/kubelb_tunnel.md b/content/kubelb/main/cli/references/commands/kubelb_tunnel.md
new file mode 100644
index 000000000..89eb79aec
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_tunnel.md
@@ -0,0 +1,41 @@
++++
+title = "kubelb tunnel"
+date = 2025-08-27T00:00:00+01:00
+weight = 100
++++
+
+## kubelb tunnel
+
+Manage secure tunnels to expose local services
+
+### Synopsis
+
+Create and manage secure tunnels to expose local services through the KubeLB infrastructure
+
+### Options
+
+```
+ -h, --help help for tunnel
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels
+* [kubelb tunnel connect](../kubelb_tunnel_connect) - Connect to an existing tunnel
+* [kubelb tunnel create](../kubelb_tunnel_create) - Create a tunnel
+* [kubelb tunnel delete](../kubelb_tunnel_delete) - Delete a tunnel
+* [kubelb tunnel get](../kubelb_tunnel_get) - Get a tunnel
+* [kubelb tunnel list](../kubelb_tunnel_list) - List tunnels
diff --git a/content/kubelb/main/cli/references/commands/kubelb_tunnel_connect.md b/content/kubelb/main/cli/references/commands/kubelb_tunnel_connect.md
new file mode 100644
index 000000000..7427539ac
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_tunnel_connect.md
@@ -0,0 +1,50 @@
++++
+title = "kubelb tunnel connect"
+date = 2025-08-27T00:00:00+01:00
+weight = 115
++++
+
+## kubelb tunnel connect
+
+Connect to an existing tunnel
+
+### Synopsis
+
+Connect to an existing tunnel to start forwarding traffic.
+
+This command establishes a secure connection to the tunnel and forwards
+traffic from the tunnel to your local service.
+
+```
+kubelb tunnel connect NAME [flags]
+```
+
+### Examples
+
+```
+kubelb tunnel connect my-app --port 8080 --tenant=mytenant
+```
+
+### Options
+
+```
+ -h, --help help for connect
+ -p, --port int Local port to forward to (required)
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services
diff --git a/content/kubelb/main/cli/references/commands/kubelb_tunnel_create.md b/content/kubelb/main/cli/references/commands/kubelb_tunnel_create.md
new file mode 100644
index 000000000..bd164bdce
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_tunnel_create.md
@@ -0,0 +1,64 @@
++++
+title = "kubelb tunnel create"
+date = 2025-08-27T00:00:00+01:00
+weight = 110
++++
+
+## kubelb tunnel create
+
+Create a tunnel
+
+### Synopsis
+
+Create a new secure tunnel to expose a local service.
+
+The tunnel provides secure access to your local service through the KubeLB infrastructure.
+
+Examples:
+ # Create tunnel for local app on port 8080
+ kubelb tunnel create my-app --port 8080
+
+ # Create tunnel with custom hostname
+ kubelb tunnel create my-app --port 8080 --hostname app.example.com
+
+ # Create tunnel and connect immediately
+ kubelb tunnel create my-app --port 8080 --connect
+
+
+```
+kubelb tunnel create NAME [flags]
+```
+
+### Examples
+
+```
+kubelb tunnel create my-app --port 8080 --tenant=mytenant
+```
+
+### Options
+
+```
+ --connect Connect to tunnel after creation
+ -h, --help help for create
+ --hostname string Custom hostname for the tunnel (default: auto-assigned wildcard domain)
+ -o, --output string Output format (summary, yaml, json) (default "summary")
+ -p, --port int Local port to tunnel (required)
+ --wait Wait for tunnel to be ready (default true)
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services
diff --git a/content/kubelb/main/cli/references/commands/kubelb_tunnel_delete.md b/content/kubelb/main/cli/references/commands/kubelb_tunnel_delete.md
new file mode 100644
index 000000000..e9a9cee37
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_tunnel_delete.md
@@ -0,0 +1,53 @@
++++
+title = "kubelb tunnel delete"
+date = 2025-08-27T00:00:00+01:00
+weight = 130
++++
+
+## kubelb tunnel delete
+
+Delete a tunnel
+
+### Synopsis
+
+Delete a tunnel by name.
+
+This command will:
+- Check if the tunnel exists
+- Ask for confirmation before deletion (unless --force is used)
+- Delete the tunnel resource
+
+
+```
+kubelb tunnel delete NAME [flags]
+```
+
+### Examples
+
+```
+kubelb tunnel delete my-app --tenant=mytenant --kubeconfig=./kubeconfig
+```
+
+### Options
+
+```
+ -f, --force Force deletion without confirmation
+ -h, --help help for delete
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services
diff --git a/content/kubelb/main/cli/references/commands/kubelb_tunnel_get.md b/content/kubelb/main/cli/references/commands/kubelb_tunnel_get.md
new file mode 100644
index 000000000..662ac2f3f
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_tunnel_get.md
@@ -0,0 +1,47 @@
++++
+title = "kubelb tunnel get"
+date = 2025-08-27T00:00:00+01:00
+weight = 120
++++
+
+## kubelb tunnel get
+
+Get a tunnel
+
+### Synopsis
+
+Retrieve a tunnel by name and output it's complete YAML specification.
+
+
+```
+kubelb tunnel get NAME [flags]
+```
+
+### Examples
+
+```
+kubelb tunnel get my-app --tenant=mytenant --kubeconfig=./kubeconfig
+```
+
+### Options
+
+```
+ -h, --help help for get
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services
diff --git a/content/kubelb/main/cli/references/commands/kubelb_tunnel_list.md b/content/kubelb/main/cli/references/commands/kubelb_tunnel_list.md
new file mode 100644
index 000000000..e46291576
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_tunnel_list.md
@@ -0,0 +1,47 @@
++++
+title = "kubelb tunnel list"
+date = 2025-08-27T00:00:00+01:00
+weight = 125
++++
+
+## kubelb tunnel list
+
+List tunnels
+
+### Synopsis
+
+List all tunnels for the tenant.
+
+
+```
+kubelb tunnel list [flags]
+```
+
+### Examples
+
+```
+kubelb tunnel list --tenant=mytenant --kubeconfig=./kubeconfig
+```
+
+### Options
+
+```
+ -h, --help help for list
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services
diff --git a/content/kubelb/main/cli/references/commands/kubelb_version.md b/content/kubelb/main/cli/references/commands/kubelb_version.md
new file mode 100644
index 000000000..3a5a117fa
--- /dev/null
+++ b/content/kubelb/main/cli/references/commands/kubelb_version.md
@@ -0,0 +1,47 @@
++++
+title = "kubelb version"
+date = 2025-08-27T00:00:00+01:00
+weight = 50
++++
+
+## kubelb version
+
+Print the version information
+
+### Synopsis
+
+Print the version information of the KubeLB CLI
+
+```
+kubelb version [flags]
+```
+
+### Examples
+
+```
+kubelb version
+```
+
+### Options
+
+```
+ -h, --help help for version
+ --short Print only the version in short format
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels
diff --git a/content/kubelb/main/cli/release-notes/_index.en.md b/content/kubelb/main/cli/release-notes/_index.en.md
new file mode 100644
index 000000000..4d95a6cab
--- /dev/null
+++ b/content/kubelb/main/cli/release-notes/_index.en.md
@@ -0,0 +1,69 @@
++++
+title = "Release Notes"
+date = 2024-03-15T00:00:00+01:00
+weight = 40
++++
+
+{{% notice warning %}}
+This document is work in progress and might not be in correct or up to date state
+{{% /notice %}}
+
+## Kubermatic KubeLB v1.x.x
+
+- [v1.x.x](#v1xx)
+ - [Community Edition](#community-edition)
+ - [Enterprise Edition](#enterprise-edition)
+
+## v1.x.x
+
+**GitHub release: [v1.x.x](https://github.com/kubermatic/kubelb/releases/tag/v1.x.x)**
+
+### Highlights
+
+#### Community Edition(CE)
+
+_content_
+
+#### Enterprise Edition(EE)
+
+_content_
+
+### Community Edition
+
+#### Urgent Upgrade Notes
+
+_content_
+
+#### Deprecation
+
+_content_
+
+#### API Changes
+
+_content_
+
+#### Features
+
+_content_
+
+#### Design
+
+_content_
+
+#### Bug or Regression
+
+_content_
+
+#### Other (Cleanup, Flake, or Chore)
+
+_content_
+
+**Full Changelog**:
+
+### Enterprise Edition
+
+**Enterprise Edition includes everything from Community Edition and more. The release notes below are for changes specific to just the Enterprise Edition.**
+
+#### EE Features
+
+_content_
diff --git a/content/kubelb/main/cli/tunneling/_index.en.md b/content/kubelb/main/cli/tunneling/_index.en.md
new file mode 100644
index 000000000..329c1ff2d
--- /dev/null
+++ b/content/kubelb/main/cli/tunneling/_index.en.md
@@ -0,0 +1,127 @@
++++
+title = "Tunneling"
+date = 2025-08-27T00:00:00+01:00
+weight = 10
+enterprise = true
++++
+
+Tunneling allows users to tunnel locally running applications on their workstations or inside VMs and expose them over the internet without worrying about firewalls, NAT, DNS, and certificate issues. It is a great way to expose your local services to the internet without having to worry about the complexities of setting up a load balancer and a DNS record.
+
+KubeLB CLI will expose the workload on secure tunnel with TLS certificates and a DNS record.
+
+These tunnels are designed to be reusable and hence have their own dedicated API type in KubeLB i.e. `Tunnel`. Once a tunnel is created, it's registered with the KubeLB management cluster and can be connected to using the `kubelb tunnel connect` command.
+
+## Tunnels
+
+### Tunnel Configuration
+
+To enable tunneling, you need to configure KubeLB management cluster to expose connection management API. The values.yaml file can be modified like this:
+
+```yaml
+kubelb:
+ enableGatewayAPI: true
+ debug: true
+ envoyProxy:
+ # -- Topology defines the deployment topology for Envoy Proxy. Valid values are: shared, dedicated, and global.
+ topology: shared
+ # -- The number of replicas for the Envoy Proxy deployment.
+ replicas: 1
+ # -- Propagate all annotations from the LB resource to the LB service.
+ propagateAllAnnotations: true
+
+ # Tunnel configuration
+ tunnel:
+ enabled: true
+ connectionManager:
+ httpRoute:
+ enabled: true
+ domain: "connection-manager.example.com"
+ gatewayName: "default"
+ gatewayNamespace: "kubelb"
+ annotations:
+ external-dns.alpha.kubernetes.io/hostname: "*.apps.example.com,connection-manager.example.com"
+ external-dns.alpha.kubernetes.io/ttl: "300"
+ cert-manager.io/cluster-issuer: "letsencrypt-production-dns"
+ ingress:
+ enabled: false
+ className: "nginx"
+ annotations:
+ cert-manager.io/cluster-issuer: "letsencrypt-production-dns"
+ nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
+ nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
+ external-dns.alpha.kubernetes.io/hostname: connection-manager-ingress.example.com
+ external-dns.alpha.kubernetes.io/ttl: "10"
+ nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
+ hosts:
+ - host: connection-manager-ingress.example.com
+ paths:
+ - path: /tunnel
+ pathType: Prefix
+ - path: /health
+ pathType: Prefix
+ tls:
+ - secretName: connection-manager-tls
+ hosts:
+ - connection-manager-ingress.example.com
+```
+
+You can either use Ingress or HTTPRoute to expose the connection management API. Gateway API is the preferred way to expose the API. In this example `*.apps.example.com` is used as a wildard domain for these tunnels, you can use any other domain you want.
+
+Afterwards, you need to configure the connection manager URL at the Config or Tenant level:
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Config
+metadata:
+ name: default
+ namespace: kubelb
+spec:
+ ingress:
+ class: "nginx"
+ gatewayAPI:
+ class: "eg"
+ loadBalancer:
+ limit: 15
+ certificates:
+ defaultClusterIssuer: letsencrypt-staging-dns
+ tunnel:
+ connectionManagerURL: "https://connection-manager.example.com/"
+```
+
+**NOTE: Apart from this the Gateway or Ingress should be configured to manage DNS for the tunnel. Please refer to the [DNS](../../tutorials/security/dns/#enable-dns-automation) documentation for more details.**
+
+### Provisioning Tunnels
+
+Tunnels are created either using the `kubelb expose 1313` command or the `kubelb tunnel create` command.
+
+```bash
+kubelb expose 1313
+```
+
+
+
+This will create a tunnel with a generated hostname and will forward traffic to the port `1313` on the local machine. The Ingress point for this traffic is KubeLB's management cluster and hence the traffic is secure and encrypted.
+
+An alternative way to create a tunnel is to use the `kubelb tunnel create` command.
+
+```bash
+kubelb tunnel create my-app --port 1313
+```
+
+This will create a tunnel with a generated hostname and can be used through the `kubelb tunnel connect` command.
+
+```bash
+kubelb tunnel connect my-app --port 1313
+```
+
+This will connect to the tunnel and forward traffic to the port `1313` on the local machine. The Ingress point for this traffic is KubeLB's management cluster and hence the traffic is secure and encrypted.
+
+## Further actions
+
+Further actions include:
+
+- Deleting the tunnel
+- Getting the tunnel details
+- Listing all the tunnels
+
+For more information, please refer to the [Tunnel API](../../references/api/tunnel/) documentation.
diff --git a/content/kubelb/main/compatibility-matrix/_index.en.md b/content/kubelb/main/compatibility-matrix/_index.en.md
index f5356090b..11195dcff 100644
--- a/content/kubelb/main/compatibility-matrix/_index.en.md
+++ b/content/kubelb/main/compatibility-matrix/_index.en.md
@@ -12,6 +12,7 @@ We are only testing our software with specific versions of the components, we ar
| KubeLB | Kubermatic Kubernetes Platform | Gateway API | Envoy Gateway | NGINX Ingress | Kubernetes |
|--------|-------------------------------|-------------|---------------|-------------------------|------------|
+| v1.2 | v2.27, v2.28 | v1.3.0 | v1.3.0 | v1.10.0+ | v1.27+ |
| v1.1 | v2.26, v2.27 | v1.1.0 | v1.1.0 | v1.10.0+ | v1.27+ |
| v1.0 | v2.24, v2.25 | Not Supported| Not Supported | v1.10.0+ | v1.27+ |
diff --git a/content/kubelb/main/installation/management-cluster/_index.en.md b/content/kubelb/main/installation/management-cluster/_index.en.md
index b9fb323db..6a37e7052 100644
--- a/content/kubelb/main/installation/management-cluster/_index.en.md
+++ b/content/kubelb/main/installation/management-cluster/_index.en.md
@@ -32,7 +32,9 @@ imagePullSecrets:
### Install the helm chart
```sh
-helm pull oci://quay.io/kubermatic/helm-charts/kubelb-manager-ee --version=v1.1.5 --untardir "." --untar
+helm pull oci://quay.io/kubermatic/helm-charts/kubelb-manager-ee --version=v1.2.0 --untardir "." --untar
+## Apply CRDs
+kubectl apply -f kubelb-manager-ee/crds/
## Create and update values.yaml with the required values.
helm upgrade --install kubelb-manager kubelb-manager-ee --namespace kubelb -f kubelb-manager-ee/values.yaml --create-namespace
```
@@ -52,15 +54,16 @@ helm upgrade --install kubelb-manager kubelb-manager-ee --namespace kubelb -f ku
| fullnameOverride | string | `""` | |
| image.pullPolicy | string | `"IfNotPresent"` | |
| image.repository | string | `"quay.io/kubermatic/kubelb-manager-ee"` | |
-| image.tag | string | `"v1.1.5"` | |
+| image.tag | string | `"v1.2.0"` | |
| imagePullSecrets[0].name | string | `"kubermatic-quay.io"` | |
+| kkpintegration.rbac | bool | `false` | Create RBAC for KKP integration. |
| kubelb.debug | bool | `true` | |
| kubelb.enableGatewayAPI | bool | `false` | enableGatewayAPI specifies whether to enable the Gateway API and Gateway Controllers. By default Gateway API is disabled since without Gateway APIs installed the controller cannot start. |
| kubelb.enableLeaderElection | bool | `true` | |
| kubelb.enableTenantMigration | bool | `true` | |
| kubelb.envoyProxy.affinity | object | `{}` | |
| kubelb.envoyProxy.nodeSelector | object | `{}` | |
-| kubelb.envoyProxy.replicas | int | `3` | The number of replicas for the Envoy Proxy deployment. |
+| kubelb.envoyProxy.replicas | int | `2` | The number of replicas for the Envoy Proxy deployment. |
| kubelb.envoyProxy.resources | object | `{}` | |
| kubelb.envoyProxy.singlePodPerNode | bool | `true` | Deploy single pod per node. |
| kubelb.envoyProxy.tolerations | list | `[]` | |
@@ -69,6 +72,31 @@ helm upgrade --install kubelb-manager kubelb-manager-ee --namespace kubelb -f ku
| kubelb.propagateAllAnnotations | bool | `false` | Propagate all annotations from the LB resource to the LB service. |
| kubelb.propagatedAnnotations | object | `{}` | Allowed annotations that will be propagated from the LB resource to the LB service. |
| kubelb.skipConfigGeneration | bool | `false` | Set to true to skip the generation of the Config CR. Useful when the config CR needs to be managed manually. |
+| kubelb.tunnel.connectionManager.affinity | object | `{}` | |
+| kubelb.tunnel.connectionManager.healthCheck.enabled | bool | `true` | |
+| kubelb.tunnel.connectionManager.healthCheck.livenessInitialDelay | int | `30` | |
+| kubelb.tunnel.connectionManager.healthCheck.readinessInitialDelay | int | `10` | |
+| kubelb.tunnel.connectionManager.httpAddr | string | `":8080"` | Server addresses |
+| kubelb.tunnel.connectionManager.httpRoute.annotations | object | `{"cert-manager.io/cluster-issuer":"letsencrypt-prod","external-dns.alpha.kubernetes.io/hostname":"connection-manager.${DOMAIN}"}` | Annotations for HTTPRoute |
+| kubelb.tunnel.connectionManager.httpRoute.domain | string | `"connection-manager.${DOMAIN}"` | Domain for the HTTPRoute NOTE: Replace ${DOMAIN} with your domain name. |
+| kubelb.tunnel.connectionManager.httpRoute.enabled | bool | `false` | |
+| kubelb.tunnel.connectionManager.httpRoute.gatewayName | string | `"gateway"` | Gateway name to attach to |
+| kubelb.tunnel.connectionManager.httpRoute.gatewayNamespace | string | `""` | Gateway namespace |
+| kubelb.tunnel.connectionManager.image | object | `{"pullPolicy":"IfNotPresent","repository":"quay.io/kubermatic/kubelb-connection-manager-ee","tag":""}` | Connection manager image configuration |
+| kubelb.tunnel.connectionManager.ingress | object | `{"annotations":{"cert-manager.io/cluster-issuer":"letsencrypt-prod","external-dns.alpha.kubernetes.io/hostname":"connection-manager.${DOMAIN}","nginx.ingress.kubernetes.io/backend-protocol":"HTTP","nginx.ingress.kubernetes.io/proxy-read-timeout":"3600","nginx.ingress.kubernetes.io/proxy-send-timeout":"3600"},"className":"nginx","enabled":false,"hosts":[{"host":"connection-manager.${DOMAIN}","paths":[{"path":"/tunnel","pathType":"Prefix"},{"path":"/health","pathType":"Prefix"}]}],"tls":[{"hosts":["connection-manager.${DOMAIN}"],"secretName":"connection-manager-tls"}]}` | Ingress configuration for external HTTP/2 access |
+| kubelb.tunnel.connectionManager.nodeSelector | object | `{}` | |
+| kubelb.tunnel.connectionManager.podAnnotations | object | `{}` | Pod configuration |
+| kubelb.tunnel.connectionManager.podLabels | object | `{}` | |
+| kubelb.tunnel.connectionManager.podSecurityContext.fsGroup | int | `65534` | |
+| kubelb.tunnel.connectionManager.podSecurityContext.runAsNonRoot | bool | `true` | |
+| kubelb.tunnel.connectionManager.podSecurityContext.runAsUser | int | `65534` | |
+| kubelb.tunnel.connectionManager.replicaCount | int | `1` | Number of connection manager replicas |
+| kubelb.tunnel.connectionManager.requestTimeout | string | `"30s"` | |
+| kubelb.tunnel.connectionManager.resources | object | `{"limits":{"cpu":"500m","memory":"256Mi"},"requests":{"cpu":"250m","memory":"128Mi"}}` | Resource limits |
+| kubelb.tunnel.connectionManager.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsNonRoot":true,"runAsUser":65534}` | Security context |
+| kubelb.tunnel.connectionManager.service | object | `{"httpPort":8080,"type":"ClusterIP"}` | Service configuration |
+| kubelb.tunnel.connectionManager.tolerations | list | `[]` | |
+| kubelb.tunnel.enabled | bool | `false` | Enable tunnel functionality |
| nameOverride | string | `""` | |
| nodeSelector | object | `{}` | |
| podAnnotations | object | `{}` | |
@@ -102,7 +130,9 @@ helm upgrade --install kubelb-manager kubelb-manager-ee --namespace kubelb -f ku
### Install the helm chart
```sh
-helm pull oci://quay.io/kubermatic/helm-charts/kubelb-manager --version=v1.1.5 --untardir "." --untar
+helm pull oci://quay.io/kubermatic/helm-charts/kubelb-manager --version=v1.2.0 --untardir "." --untar
+## Apply CRDs
+kubectl apply -f kubelb-manager/crds/
## Create and update values.yaml with the required values.
helm upgrade --install kubelb-manager kubelb-manager --namespace kubelb -f kubelb-manager/values.yaml --create-namespace
```
@@ -120,15 +150,16 @@ helm upgrade --install kubelb-manager kubelb-manager --namespace kubelb -f kubel
| fullnameOverride | string | `""` | |
| image.pullPolicy | string | `"IfNotPresent"` | |
| image.repository | string | `"quay.io/kubermatic/kubelb-manager"` | |
-| image.tag | string | `"v1.1.5"` | |
+| image.tag | string | `"v1.2.0"` | |
| imagePullSecrets | list | `[]` | |
+| kkpintegration.rbac | bool | `false` | Create RBAC for KKP integration. |
| kubelb.debug | bool | `true` | |
| kubelb.enableGatewayAPI | bool | `false` | enableGatewayAPI specifies whether to enable the Gateway API and Gateway Controllers. By default Gateway API is disabled since without Gateway APIs installed the controller cannot start. |
| kubelb.enableLeaderElection | bool | `true` | |
| kubelb.enableTenantMigration | bool | `true` | |
| kubelb.envoyProxy.affinity | object | `{}` | |
| kubelb.envoyProxy.nodeSelector | object | `{}` | |
-| kubelb.envoyProxy.replicas | int | `3` | The number of replicas for the Envoy Proxy deployment. |
+| kubelb.envoyProxy.replicas | int | `2` | The number of replicas for the Envoy Proxy deployment. |
| kubelb.envoyProxy.resources | object | `{}` | |
| kubelb.envoyProxy.singlePodPerNode | bool | `true` | Deploy single pod per node. |
| kubelb.envoyProxy.tolerations | list | `[]` | |
@@ -173,17 +204,98 @@ helm upgrade --install kubelb-manager kubelb-manager --namespace kubelb -f kubel
The examples and tools shared below are for demonstration purposes, you can use any other tools or configurations as per your requirements.
{{% /notice %}}
-Management cluster is the place where all the components required for Layer 4 and Layer 7 load balancing are installed. The management cluster is responsible for managing the tenant clusters and their load balancing requests/configurations.
+Management cluster acts as the dataplane and central control plane for all your load balancing configurations. It is the place where all the components required for Layer 4 and Layer 7 load balancing, AI Gateways, MCP Gateways, Agent2Agent Gateways, and API Gateways etc. are deployed. The management cluster is multi-tenant by design which makes it a perfect for managing a fleet of clusters in a scalable, robust, and secure way.
-### Layer 4 Load Balancing
+KubeLB has introduced an addons chart to simplify the installation of the required components for the management cluster. The chart is already part of the KubeLB manager chart and can be installed by setting the `kubelb-addons.enabled` to `true` in the values.yaml.
+
+```yaml
+kubelb:
+ enableGatewayAPI: true
+ debug: true
+
+## Addon configuration
+kubelb-addons:
+ enabled: true
+
+ gatewayClass:
+ create: true
+
+ # Ingress Nginx
+ ingress-nginx:
+ enabled: false
+ controller:
+ service:
+ externalTrafficPolicy: Local
+
+ # Envoy Gateway
+ envoy-gateway:
+ enabled: true
+
+ # Cert Manager
+ cert-manager:
+ enabled: true
+ crds:
+ enabled: true
+ config:
+ apiVersion: controller.config.cert-manager.io/v1alpha1
+ kind: ControllerConfiguration
+ enableGatewayAPI: true
+
+ # External DNS
+ external-dns:
+ domainFilters:
+ - example.com
+ extraVolumes:
+ - name: credentials
+ secret:
+ secretName: route53-credentials
+ extraVolumeMounts:
+ - name: credentials
+ mountPath: /.aws
+ readOnly: true
+ env:
+ - name: AWS_SHARED_CREDENTIALS_FILE
+ value: /.aws/credentials
+ txtOwnerId: kubelb-example-aws
+ registry: txt
+ provider: aws
+ policy: sync
+ sources:
+ - service
+ - ingress
+ - gateway-httproute
+ - gateway-grpcroute
+ - gateway-tlsroute
+ - gateway-tcproute
+ - gateway-udproute
+
+ ## AI and Agent2Agent Gateways Integration
+ # KGateway CRDs
+ kgateway-crds:
+ enabled: true
+
+ # KGateway
+ kgateway:
+ enabled: true
+ gateway:
+ aiExtension:
+ enabled: true
+ agentgateway:
+ enabled: true
+
+```
+
+### TCP/UDP Load Balancing (Layer 4)
Refer to [Layer 4 Load Balancing Setup]({{< relref "../../tutorials/loadbalancer#setup" >}}) for more details.
-### Layer 7 Load Balancing
+### Application Layer Load Balancing (Layer 7)
+
+For Application layer load balancing, **kubeLB supports both Ingress and Gateway API resources**.
-For Layer 7 load balancing, kubeLB supports both Ingress and Gateway API resources.
+Our default recommendation is to use Gateway API and use [Envoy Gateway](https://gateway.envoyproxy.io/) as the Gateway API implementation. Most of the upcoming and current features that KubeLB will focus on will prioritize Gateway API instead of Ingress. With Envoy Gateway being the product that we'll actively support, test, and base our features on.
-Our default recommendation is to use Gateway API and use [Envoy Gateway](https://gateway.envoyproxy.io/) as the Gateway API implementation. The features specific to Gateway API that will be built and consumed in KubeLB will be based on Envoy Gateway. While KubeLB supports integration with any Ingress or Gateway API implementation, the only limitation is that we only support native Kubernetes APIs i.e. Ingress and Gateway APIs. Provider specific APIs are not supported by KubeLB and will be completely ignored. Also, we are only testing KubeLB with Envoy Gateway and Nginx Ingress, we can't guarantee the compatibility with other Gateway API or Ingress implementations.
+While KubeLB supports integration with any Ingress or Gateway API implementation, the only limitation is that we only support native Kubernetes APIs i.e. Ingress and Gateway APIs. Provider specific APIs are not supported by KubeLB and will be completely ignored. Also, we are only testing KubeLB with Envoy Gateway and Nginx Ingress, we can't guarantee the compatibility with other Gateway API or Ingress implementations.
#### Ingress
diff --git a/content/kubelb/main/installation/tenant-cluster/_index.en.md b/content/kubelb/main/installation/tenant-cluster/_index.en.md
index 3e935b3f4..8b9971cb8 100644
--- a/content/kubelb/main/installation/tenant-cluster/_index.en.md
+++ b/content/kubelb/main/installation/tenant-cluster/_index.en.md
@@ -82,7 +82,15 @@ kubelb:
## Installation for KubeLB CCM
-{{% notice warning %}} In case if Gateway API needs to be enabled for the cluster. Please set `kubelb.enableGatewayAPI` to `true` in the `values.yaml`. This is required otherwise due to missing CRDs, kubelb will not be able to start. {{% /notice %}}
+{{% notice warning %}} In case if Gateway API needs to be enabled for the cluster. Please set the following fields in the `values.yaml`. This is required otherwise due to missing CRDs, kubelb will not be able to start.
+
+```yaml
+kubelb:
+ enableGatewayAPI: true
+ installGatewayAPICRDs: true
+```
+
+{{% /notice %}}
{{< tabs name="KubeLB CCM" >}}
{{% tab name="Enterprise Edition" %}}
@@ -105,7 +113,9 @@ kubelb:
### Install the helm chart
```sh
-helm pull oci://quay.io/kubermatic/helm-charts/kubelb-ccm-ee --version=v1.1.5 --untardir "." --untar
+helm pull oci://quay.io/kubermatic/helm-charts/kubelb-ccm-ee --version=v1.2.0 --untardir "." --untar
+## Apply CRDs
+kubectl apply -f kubelb-ccm-ee/crds/
## Create and update values.yaml with the required values.
helm upgrade --install kubelb-ccm kubelb-ccm-ee --namespace kubelb -f kubelb-ccm-ee/values.yaml --create-namespace
```
@@ -125,7 +135,7 @@ helm upgrade --install kubelb-ccm kubelb-ccm-ee --namespace kubelb -f kubelb-ccm
| fullnameOverride | string | `""` | |
| image.pullPolicy | string | `"IfNotPresent"` | |
| image.repository | string | `"quay.io/kubermatic/kubelb-ccm-ee"` | |
-| image.tag | string | `"v1.1.5"` | |
+| image.tag | string | `"v1.2.0"` | |
| imagePullSecrets[0].name | string | `"kubermatic-quay.io"` | |
| kubelb.clusterSecretName | string | `"kubelb-cluster"` | Name of the secret that contains kubeconfig for the loadbalancer cluster |
| kubelb.disableGRPCRouteController | bool | `false` | disableGRPCRouteController specifies whether to disable the GRPCRoute Controller. |
@@ -178,7 +188,9 @@ helm upgrade --install kubelb-ccm kubelb-ccm-ee --namespace kubelb -f kubelb-ccm
### Install the helm chart
```sh
-helm pull oci://quay.io/kubermatic/helm-charts/kubelb-ccm --version=v1.1.5 --untardir "." --untar
+helm pull oci://quay.io/kubermatic/helm-charts/kubelb-ccm --version=v1.2.0 --untardir "." --untar
+## Apply CRDs
+kubectl apply -f kubelb-ccm/crds/
## Create and update values.yaml with the required values.
helm upgrade --install kubelb-ccm kubelb-ccm --namespace kubelb -f kubelb-ccm/values.yaml --create-namespace
```
@@ -198,7 +210,7 @@ helm upgrade --install kubelb-ccm kubelb-ccm --namespace kubelb -f kubelb-ccm/va
| fullnameOverride | string | `""` | |
| image.pullPolicy | string | `"IfNotPresent"` | |
| image.repository | string | `"quay.io/kubermatic/kubelb-ccm"` | |
-| image.tag | string | `"v1.1.5"` | |
+| image.tag | string | `"v1.2.0"` | |
| imagePullSecrets | list | `[]` | |
| kubelb.clusterSecretName | string | `"kubelb-cluster"` | Name of the secret that contains kubeconfig for the loadbalancer cluster |
| kubelb.disableGRPCRouteController | bool | `false` | disableGRPCRouteController specifies whether to disable the GRPCRoute Controller. |
diff --git a/content/kubelb/main/references/ce/_index.en.md b/content/kubelb/main/references/ce/_index.en.md
index f0e3dcd5c..3c2159c27 100644
--- a/content/kubelb/main/references/ce/_index.en.md
+++ b/content/kubelb/main/references/ce/_index.en.md
@@ -5,6 +5,8 @@ date = 2024-03-06T12:00:00+02:00
weight = 60
+++
+**Source: [kubelb.k8c.io/v1alpha1](https://github.com/kubermatic/kubelb/tree/main/api/ce/kubelb.k8c.io/v1alpha1)**
+
## Packages
- [kubelb.k8c.io/v1alpha1](#kubelbk8ciov1alpha1)
@@ -27,6 +29,8 @@ Package v1alpha1 contains API Schema definitions for the kubelb.k8c.io v1alpha1
- [SyncSecretList](#syncsecretlist)
- [Tenant](#tenant)
- [TenantList](#tenantlist)
+- [TenantState](#tenantstate)
+- [TenantStateList](#tenantstatelist)
#### Addresses
@@ -75,6 +79,32 @@ _Appears in:_
- [Addresses](#addresses)
+#### AnnotatedResource
+
+_Underlying type:_ _string_
+
+_Validation:_
+
+- Enum: [all service ingress gateway httproute grpcroute tcproute udproute tlsroute]
+
+_Appears in:_
+
+- [AnnotationSettings](#annotationsettings)
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
+
+| Field | Description |
+| --- | --- |
+| `all` | |
+| `service` | |
+| `ingress` | |
+| `gateway` | |
+| `httproute` | |
+| `grpcroute` | |
+| `tcproute` | |
+| `udproute` | |
+| `tlsroute` | |
+
#### AnnotationSettings
_Appears in:_
@@ -84,8 +114,30 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
This will have a higher precedence than the annotations specified at the Config level. | | |
-| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
This will have a higher precedence than the value specified at the Config level. | | |
+| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | |
+| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+
+#### Annotations
+
+_Underlying type:_ _object_
+
+_Appears in:_
+
+- [AnnotationSettings](#annotationsettings)
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
+
+#### CertificatesSettings
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `defaultClusterIssuer` _string_ | DefaultClusterIssuer is the Cluster Issuer to use for the certificates by default. This is only used for load balancer hostname. | | |
#### Config
@@ -102,6 +154,21 @@ _Appears in:_
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `spec` _[ConfigSpec](#configspec)_ | | | |
+#### ConfigDNSSettings
+
+ConfigDNSSettings defines the global settings for DNS management and automation.
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `wildcardDomain` _string_ | WildcardDomain is the domain that will be used as the base domain to create wildcard DNS records for DNS resources.
This is only used for determining the hostname for LoadBalancer resources at LoadBalancer.Spec.Hostname. | | |
+| `allowExplicitHostnames` _boolean_ | AllowExplicitHostnames is a flag that can be used to allow explicit hostnames to be used for DNS resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | |
+| `useDNSAnnotations` _boolean_ | UseDNSAnnotations is a flag that can be used to add DNS annotations to DNS resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | |
+| `useCertificateAnnotations` _boolean_ | UseCertificateAnnotations is a flag that can be used to add Certificate annotations to Certificate resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | |
+
#### ConfigList
ConfigList contains a list of Config
@@ -123,12 +190,30 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
This will have a higher precedence than the annotations specified at the Config level. | | |
-| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
This will have a higher precedence than the value specified at the Config level. | | |
+| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | |
+| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
| `envoyProxy` _[EnvoyProxy](#envoyproxy)_ | EnvoyProxy defines the desired state of the Envoy Proxy | | |
| `loadBalancer` _[LoadBalancerSettings](#loadbalancersettings)_ | | | |
| `ingress` _[IngressSettings](#ingresssettings)_ | | | |
| `gatewayAPI` _[GatewayAPISettings](#gatewayapisettings)_ | | | |
+| `dns` _[ConfigDNSSettings](#configdnssettings)_ | | | |
+| `certificates` _[CertificatesSettings](#certificatessettings)_ | | | |
+
+#### DNSSettings
+
+DNSSettings defines the settings for DNS management and automation.
+
+_Appears in:_
+
+- [TenantSpec](#tenantspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `wildcardDomain` _string_ | WildcardDomain is the domain that will be used as the base domain to create wildcard DNS records for DNS resources.
This is only used for determining the hostname for LoadBalancer resources at LoadBalancer.Spec.Hostname. | | |
+| `allowExplicitHostnames` _boolean_ | AllowExplicitHostnames is a flag that can be used to allow explicit hostnames to be used for DNS resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | |
+| `useDNSAnnotations` _boolean_ | UseDNSAnnotations is a flag that can be used to add DNS annotations to DNS resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | |
+| `useCertificateAnnotations` _boolean_ | UseCertificateAnnotations is a flag that can be used to add Certificate annotations to Certificate resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | |
#### EndpointAddress
@@ -203,8 +288,21 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `class` _string_ | Class is the class of the gateway API to use. This can be used to specify a specific gateway API implementation.
This has higher precedence than the value specified in the Config. | | |
+| `defaultGateway` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectreference-v1-core)_ | DefaultGateway is the default gateway reference to use for the tenant. This is only used for load balancer hostname. | | |
| `disable` _boolean_ | Disable is a flag that can be used to disable Gateway API for a tenant. | | |
+#### HostnameStatus
+
+_Appears in:_
+
+- [LoadBalancerStatus](#loadbalancerstatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `hostname` _string_ | Hostname contains the hostname of the load-balancer. | | |
+| `tlsEnabled` _boolean_ | TLSEnabled is true if certificate is created for the hostname. | | |
+| `dnsRecordCreated` _boolean_ | DNSRecordCreated is true if DNS record is created for the hostname. | | |
+
#### IngressSettings
IngressSettings defines the settings for the ingress.
@@ -325,8 +423,19 @@ _Appears in:_
| --- | --- | --- | --- |
| `endpoints` _[LoadBalancerEndpoints](#loadbalancerendpoints) array_ | Sets of addresses and ports that comprise an exposed user service on a cluster. | | MinItems: 1
|
| `ports` _[LoadBalancerPort](#loadbalancerport) array_ | The list of ports that are exposed by the load balancer service.
only needed for layer 4 | | |
+| `hostname` _string_ | Hostname is the domain name at which the load balancer service will be accessible.
When hostname is set, KubeLB will create a route(ingress or httproute) for the service, and expose it with TLS on the given hostname. Currently, only HTTP protocol is supported | | |
| `type` _[ServiceType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicetype-v1-core)_ | type determines how the Service is exposed. Defaults to ClusterIP. Valid
options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
"ExternalName" maps to the specified externalName.
"ClusterIP" allocates a cluster-internal IP address for load-balancing to
endpoints. Endpoints are determined by the selector or if that is not
specified, by manual construction of an Endpoints object. If clusterIP is
"None", no virtual IP is allocated and the endpoints are published as a
set of endpoints rather than a stable IP.
"NodePort" builds on ClusterIP and allocates a port on every node which
routes to the clusterIP.
"LoadBalancer" builds on NodePort and creates an
external load-balancer (if supported in the current cloud) which routes
to the clusterIP.
More info: | ClusterIP | |
+#### LoadBalancerState
+
+_Appears in:_
+
+- [TenantStateStatus](#tenantstatestatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `disable` _boolean_ | | | |
+
#### LoadBalancerStatus
LoadBalancerStatus defines the observed state of LoadBalancer
@@ -339,6 +448,7 @@ _Appears in:_
| --- | --- | --- | --- |
| `loadBalancer` _[LoadBalancerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#loadbalancerstatus-v1-core)_ | LoadBalancer contains the current status of the load-balancer,
if one is present. | | |
| `service` _[ServiceStatus](#servicestatus)_ | Service contains the current status of the LB service. | | |
+| `hostname` _[HostnameStatus](#hostnamestatus)_ | Hostname contains the status for hostname resources. | | |
#### ResourceState
@@ -458,7 +568,7 @@ _Appears in:_
| --- | --- | --- | --- |
| `name` _string_ | The name of this port within the service. This must be a DNS_LABEL.
All ports within a ServiceSpec must have unique names. When considering
the endpoints for a Service, this must match the 'name' field in the
EndpointPort.
Optional if only one ServicePort is defined on this service. | | |
| `protocol` _[Protocol](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#protocol-v1-core)_ | The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
Default is TCP. | | |
-| `appProtocol` _string_ | The application protocol for this port.
This is used as a hint for implementations to offer richer behavior for protocols that they understand.
This field follows standard Kubernetes label syntax.
Valid values are either:
_Un-prefixed protocol names - reserved for IANA standard service names (as per
RFC-6335 and ).
_ Kubernetes-defined prefixed names:
_'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in
_ 'kubernetes.io/ws' - WebSocket over cleartext as described in
_'kubernetes.io/wss' - WebSocket over TLS as described in
_ Other protocols should use implementation-defined prefixed names such as
mycompany.com/my-custom-protocol. | | |
+| `appProtocol` _string_ | The application protocol for this port.
This is used as a hint for implementations to offer richer behavior for protocols that they understand.
This field follows standard Kubernetes label syntax.
Valid values are either:
_Un-prefixed protocol names - reserved for IANA standard service names (as per
RFC-6335 and ).
_ Kubernetes-defined prefixed names:
_'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in
_ 'kubernetes.io/ws' - WebSocket over cleartext as described in
_'kubernetes.io/wss' - WebSocket over TLS as described in
_ Other protocols should use implementation-defined prefixed names such as
mycompany.com/my-custom-protocol. | | |
| `port` _integer_ | The port that will be exposed by this service. | | |
| `targetPort` _[IntOrString](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#intorstring-intstr-util)_ | Number or name of the port to access on the pods targeted by the service.
Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
If this is a string, it will be looked up as a named port in the
target Pod's container ports. If this is not specified, the value
of the 'port' field is used (an identity map).
This field is ignored for services with clusterIP=None, and should be
omitted or set equal to the 'port' field.
More info: | | |
| `nodePort` _integer_ | The port on each node on which this service is exposed when type is
NodePort or LoadBalancer. Usually assigned by the system. If a value is
specified, in-range, and not in use it will be used, otherwise the
operation will fail. If not specified, a port will be allocated if this
Service requires one. If this field is specified when creating a
Service which does not need it, creation will fail. This field will be
wiped when updating a Service to no longer need it (e.g. changing type
from NodePort to ClusterIP).
More info: | | |
@@ -540,11 +650,64 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
This will have a higher precedence than the annotations specified at the Config level. | | |
-| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
This will have a higher precedence than the value specified at the Config level. | | |
+| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | |
+| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
| `loadBalancer` _[LoadBalancerSettings](#loadbalancersettings)_ | | | |
| `ingress` _[IngressSettings](#ingresssettings)_ | | | |
| `gatewayAPI` _[GatewayAPISettings](#gatewayapisettings)_ | | | |
+| `dns` _[DNSSettings](#dnssettings)_ | | | |
+| `certificates` _[CertificatesSettings](#certificatessettings)_ | | | |
+
+#### TenantState
+
+TenantState is the Schema for the tenants API
+
+_Appears in:_
+
+- [TenantStateList](#tenantstatelist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `TenantState` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[TenantStateSpec](#tenantstatespec)_ | | | |
+| `status` _[TenantStateStatus](#tenantstatestatus)_ | | | |
+
+#### TenantStateList
+
+TenantStateList contains a list of TenantState
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `TenantStateList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[TenantState](#tenantstate) array_ | | | |
+
+#### TenantStateSpec
+
+TenantStateSpec defines the desired state of TenantState.
+
+_Appears in:_
+
+- [TenantState](#tenantstate)
+
+#### TenantStateStatus
+
+TenantStateStatus defines the observed state of TenantState
+
+_Appears in:_
+
+- [TenantState](#tenantstate)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `version` _[Version](#version)_ | | | |
+| `lastUpdated` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#time-v1-meta)_ | | | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | | | |
+| `loadBalancer` _[LoadBalancerState](#loadbalancerstate)_ | | | |
#### TenantStatus
@@ -570,3 +733,16 @@ _Appears in:_
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `spec` _[ServiceSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicespec-v1-core)_ | Spec defines the behavior of a service.
| | |
| `status` _[ServiceStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicestatus-v1-core)_ | Most recently observed status of the service.
Populated by the system.
Read-only.
More info: | | |
+
+#### Version
+
+_Appears in:_
+
+- [TenantStateStatus](#tenantstatestatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `gitVersion` _string_ | | | |
+| `gitCommit` _string_ | | | |
+| `buildDate` _string_ | | | |
+| `edition` _string_ | | | |
diff --git a/content/kubelb/main/references/ee/_index.en.md b/content/kubelb/main/references/ee/_index.en.md
index 852720608..1a71a06e4 100644
--- a/content/kubelb/main/references/ee/_index.en.md
+++ b/content/kubelb/main/references/ee/_index.en.md
@@ -6,6 +6,8 @@ weight = 50
enterprise = true
+++
+**Source: [kubelb.k8c.io/v1alpha1](https://github.com/kubermatic/kubelb/tree/main/api/ee/kubelb.k8c.io/v1alpha1)**
+
## Packages
- [kubelb.k8c.io/v1alpha1](#kubelbk8ciov1alpha1)
@@ -28,6 +30,10 @@ Package v1alpha1 contains API Schema definitions for the kubelb.k8c.io v1alpha1
- [SyncSecretList](#syncsecretlist)
- [Tenant](#tenant)
- [TenantList](#tenantlist)
+- [TenantState](#tenantstate)
+- [TenantStateList](#tenantstatelist)
+- [Tunnel](#tunnel)
+- [TunnelList](#tunnellist)
#### Addresses
@@ -76,6 +82,32 @@ _Appears in:_
- [Addresses](#addresses)
+#### AnnotatedResource
+
+_Underlying type:_ _string_
+
+_Validation:_
+
+- Enum: [all service ingress gateway httproute grpcroute tcproute udproute tlsroute]
+
+_Appears in:_
+
+- [AnnotationSettings](#annotationsettings)
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
+
+| Field | Description |
+| --- | --- |
+| `all` | |
+| `service` | |
+| `ingress` | |
+| `gateway` | |
+| `httproute` | |
+| `grpcroute` | |
+| `tcproute` | |
+| `udproute` | |
+| `tlsroute` | |
+
#### AnnotationSettings
_Appears in:_
@@ -85,8 +117,19 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
This will have a higher precedence than the annotations specified at the Config level. | | |
-| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
This will have a higher precedence than the value specified at the Config level. | | |
+| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | |
+| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+
+#### Annotations
+
+_Underlying type:_ _object_
+
+_Appears in:_
+
+- [AnnotationSettings](#annotationsettings)
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
#### CertificatesSettings
@@ -100,7 +143,7 @@ _Appears in:_
| --- | --- | --- | --- |
| `disable` _boolean_ | Disable is a flag that can be used to disable certificate automation for a tenant. | | |
| `defaultClusterIssuer` _string_ | DefaultClusterIssuer is the Cluster Issuer to use for the certificates by default. This is applied when the cluster issuer is not specified in the annotations on the resource itself. | | |
-| `allowedDomains` _string array_ | AllowedDomains is a list of allowed domains for automated Certificate management. Has a higher precedence than the value specified in the Config.
If empty, the value specified in `tenant.spec.allowedDomains` will be used.
Examples:
- ["*.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["*"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level. | | |
+| `allowedDomains` _string array_ | AllowedDomains is a list of allowed domains for automated Certificate management. Has a higher precedence than the value specified in the Config.
If empty, the value specified in `tenant.spec.allowedDomains` will be used.
Examples:
- ["_.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["_"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level. | | |
#### Config
@@ -132,7 +175,7 @@ _Appears in:_
#### ConfigDNSSettings
-ConfigDNSSettings defines the global settings for the DNS.
+ConfigDNSSettings defines the global settings for DNS management and automation.
_Appears in:_
@@ -141,6 +184,10 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `disable` _boolean_ | Disable is a flag that can be used to disable DNS automation globally for all the tenants. | | |
+| `wildcardDomain` _string_ | WildcardDomain is the domain that will be used as the base domain to create wildcard DNS records for DNS resources.
This is only used for determining the hostname for LoadBalancer and Tunnel resources. | | |
+| `allowExplicitHostnames` _boolean_ | AllowExplicitHostnames is a flag that can be used to allow explicit hostnames to be used for DNS resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | |
+| `useDNSAnnotations` _boolean_ | UseDNSAnnotations is a flag that can be used to add DNS annotations to DNS resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | |
+| `useCertificateAnnotations` _boolean_ | UseCertificateAnnotations is a flag that can be used to add Certificate annotations to Certificate resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | |
#### ConfigList
@@ -163,18 +210,20 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
This will have a higher precedence than the annotations specified at the Config level. | | |
-| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
This will have a higher precedence than the value specified at the Config level. | | |
+| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | |
+| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
| `envoyProxy` _[EnvoyProxy](#envoyproxy)_ | EnvoyProxy defines the desired state of the Envoy Proxy | | |
| `loadBalancer` _[LoadBalancerSettings](#loadbalancersettings)_ | | | |
| `ingress` _[IngressSettings](#ingresssettings)_ | | | |
| `gatewayAPI` _[GatewayAPISettings](#gatewayapisettings)_ | | | |
| `dns` _[ConfigDNSSettings](#configdnssettings)_ | | | |
| `certificates` _[ConfigCertificatesSettings](#configcertificatessettings)_ | | | |
+| `tunnel` _[TunnelSettings](#tunnelsettings)_ | | | |
#### DNSSettings
-DNSSettings defines the settings for the DNS.
+DNSSettings defines the tenant specific settings for DNS management and automation.
_Appears in:_
@@ -183,7 +232,11 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `disable` _boolean_ | Disable is a flag that can be used to disable DNS automation for a tenant. | | |
-| `allowedDomains` _string array_ | AllowedDomains is a list of allowed domains for automated DNS management. Has a higher precedence than the value specified in the Config.
If empty, the value specified in `tenant.spec.allowedDomains` will be used.
Examples:
- ["*.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["*"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level. | | |
+| `allowedDomains` _string array_ | AllowedDomains is a list of allowed domains for automated DNS management. Has a higher precedence than the value specified in the Config.
If empty, the value specified in `tenant.spec.allowedDomains` will be used.
Examples:
- ["_.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["_"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level. | | |
+| `wildcardDomain` _string_ | WildcardDomain is the domain that will be used as the base domain to create wildcard DNS records for DNS resources.
This is only used for determining the hostname for LoadBalancer and Tunnel resources. | | |
+| `allowExplicitHostnames` _boolean_ | AllowExplicitHostnames is a flag that can be used to allow explicit hostnames to be used for DNS resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | |
+| `useDNSAnnotations` _boolean_ | UseDNSAnnotations is a flag that can be used to add DNS annotations to DNS resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | |
+| `useCertificateAnnotations` _boolean_ | UseCertificateAnnotations is a flag that can be used to add Certificate annotations to Certificate resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | |
#### EndpointAddress
@@ -259,6 +312,7 @@ _Appears in:_
| --- | --- | --- | --- |
| `class` _string_ | Class is the class of the gateway API to use. This can be used to specify a specific gateway API implementation.
This has higher precedence than the value specified in the Config. | | |
| `disable` _boolean_ | Disable is a flag that can be used to disable Gateway API for a tenant. | | |
+| `defaultGateway` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectreference-v1-core)_ | DefaultGateway is the default gateway reference to use for the tenant. This is only used for load balancer hostname and tunneling. | | |
| `gateway` _[GatewaySettings](#gatewaysettings)_ | | | |
| `disableHTTPRoute` _boolean_ | | | |
| `disableGRPCRoute` _boolean_ | | | |
@@ -292,6 +346,18 @@ _Appears in:_
| --- | --- | --- | --- |
| `limit` _integer_ | Limit is the maximum number of gateways to create.
If a lower limit is set than the number of reources that exist, the limit will be disallow creation of new resources but will not delete existing resources. The reason behind this
is that it is not possible for KubeLB to know which resources are safe to remove. | | |
+#### HostnameStatus
+
+_Appears in:_
+
+- [LoadBalancerStatus](#loadbalancerstatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `hostname` _string_ | Hostname contains the hostname of the load-balancer. | | |
+| `tlsEnabled` _boolean_ | TLSEnabled is true if certificate is created for the hostname. | | |
+| `dnsRecordCreated` _boolean_ | DNSRecordCreated is true if DNS record is created for the hostname. | | |
+
#### IngressSettings
IngressSettings defines the settings for the ingress.
@@ -413,8 +479,20 @@ _Appears in:_
| --- | --- | --- | --- |
| `endpoints` _[LoadBalancerEndpoints](#loadbalancerendpoints) array_ | Sets of addresses and ports that comprise an exposed user service on a cluster. | | MinItems: 1
|
| `ports` _[LoadBalancerPort](#loadbalancerport) array_ | The list of ports that are exposed by the load balancer service.
only needed for layer 4 | | |
+| `hostname` _string_ | Hostname is the domain name at which the load balancer service will be accessible.
When hostname is set, KubeLB will create a route(ingress or httproute) for the service, and expose it with TLS on the given hostname. | | |
| `type` _[ServiceType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicetype-v1-core)_ | type determines how the Service is exposed. Defaults to ClusterIP. Valid
options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
"ExternalName" maps to the specified externalName.
"ClusterIP" allocates a cluster-internal IP address for load-balancing to
endpoints. Endpoints are determined by the selector or if that is not
specified, by manual construction of an Endpoints object. If clusterIP is
"None", no virtual IP is allocated and the endpoints are published as a
set of endpoints rather than a stable IP.
"NodePort" builds on ClusterIP and allocates a port on every node which
routes to the clusterIP.
"LoadBalancer" builds on NodePort and creates an
external load-balancer (if supported in the current cloud) which routes
to the clusterIP.
More info: | ClusterIP | |
+#### LoadBalancerState
+
+_Appears in:_
+
+- [TenantStateStatus](#tenantstatestatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `disable` _boolean_ | | | |
+| `limit` _integer_ | | | |
+
#### LoadBalancerStatus
LoadBalancerStatus defines the observed state of LoadBalancer
@@ -427,6 +505,7 @@ _Appears in:_
| --- | --- | --- | --- |
| `loadBalancer` _[LoadBalancerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#loadbalancerstatus-v1-core)_ | LoadBalancer contains the current status of the load-balancer,
if one is present. | | |
| `service` _[ServiceStatus](#servicestatus)_ | Service contains the current status of the LB service. | | |
+| `hostname` _[HostnameStatus](#hostnamestatus)_ | Hostname contains the status for hostname resources. | | |
#### ResourceState
@@ -546,7 +625,7 @@ _Appears in:_
| --- | --- | --- | --- |
| `name` _string_ | The name of this port within the service. This must be a DNS_LABEL.
All ports within a ServiceSpec must have unique names. When considering
the endpoints for a Service, this must match the 'name' field in the
EndpointPort.
Optional if only one ServicePort is defined on this service. | | |
| `protocol` _[Protocol](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#protocol-v1-core)_ | The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
Default is TCP. | | |
-| `appProtocol` _string_ | The application protocol for this port.
This is used as a hint for implementations to offer richer behavior for protocols that they understand.
This field follows standard Kubernetes label syntax.
Valid values are either:
_Un-prefixed protocol names - reserved for IANA standard service names (as per
RFC-6335 and ).
_ Kubernetes-defined prefixed names:
_'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in
_ 'kubernetes.io/ws' - WebSocket over cleartext as described in
_'kubernetes.io/wss' - WebSocket over TLS as described in
_ Other protocols should use implementation-defined prefixed names such as
mycompany.com/my-custom-protocol. | | |
+| `appProtocol` _string_ | The application protocol for this port.
This is used as a hint for implementations to offer richer behavior for protocols that they understand.
This field follows standard Kubernetes label syntax.
Valid values are either:
_Un-prefixed protocol names - reserved for IANA standard service names (as per
RFC-6335 and ).
_ Kubernetes-defined prefixed names:
_'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in
_ 'kubernetes.io/ws' - WebSocket over cleartext as described in
_'kubernetes.io/wss' - WebSocket over TLS as described in
_ Other protocols should use implementation-defined prefixed names such as
mycompany.com/my-custom-protocol. | | |
| `port` _integer_ | The port that will be exposed by this service. | | |
| `targetPort` _[IntOrString](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#intorstring-intstr-util)_ | Number or name of the port to access on the pods targeted by the service.
Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
If this is a string, it will be looked up as a named port in the
target Pod's container ports. If this is not specified, the value
of the 'port' field is used (an identity map).
This field is ignored for services with clusterIP=None, and should be
omitted or set equal to the 'port' field.
More info: | | |
| `nodePort` _integer_ | The port on each node on which this service is exposed when type is
NodePort or LoadBalancer. Usually assigned by the system. If a value is
specified, in-range, and not in use it will be used, otherwise the
operation will fail. If not specified, a port will be allocated if this
Service requires one. If this field is specified when creating a
Service which does not need it, creation will fail. This field will be
wiped when updating a Service to no longer need it (e.g. changing type
from NodePort to ClusterIP).
More info: | | |
@@ -628,14 +707,68 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
This will have a higher precedence than the annotations specified at the Config level. | | |
-| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
This will have a higher precedence than the value specified at the Config level. | | |
+| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | |
+| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
| `loadBalancer` _[LoadBalancerSettings](#loadbalancersettings)_ | | | |
| `ingress` _[IngressSettings](#ingresssettings)_ | | | |
| `gatewayAPI` _[GatewayAPISettings](#gatewayapisettings)_ | | | |
| `dns` _[DNSSettings](#dnssettings)_ | | | |
| `certificates` _[CertificatesSettings](#certificatessettings)_ | | | |
-| `allowedDomains` _string array_ | List of allowed domains for the tenant. This is used to restrict the domains that can be used
for the tenant. If specified, applies on all the components such as Ingress, GatewayAPI, DNS, certificates, etc.
Examples:
- ["*.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["*"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level.
Default: value is ["**"] and all domains are allowed. | [**] | |
+| `tunnel` _[TenantTunnelSettings](#tenanttunnelsettings)_ | | | |
+| `allowedDomains` _string array_ | List of allowed domains for the tenant. This is used to restrict the domains that can be used
for the tenant. If specified, applies on all the components such as Ingress, GatewayAPI, DNS, certificates, etc.
Examples:
- ["_.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["_"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level.
Default: value is ["**"] and all domains are allowed. | [**] | |
+
+#### TenantState
+
+TenantState is the Schema for the tenants API
+
+_Appears in:_
+
+- [TenantStateList](#tenantstatelist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `TenantState` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[TenantStateSpec](#tenantstatespec)_ | | | |
+| `status` _[TenantStateStatus](#tenantstatestatus)_ | | | |
+
+#### TenantStateList
+
+TenantStateList contains a list of TenantState
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `TenantStateList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[TenantState](#tenantstate) array_ | | | |
+
+#### TenantStateSpec
+
+TenantStateSpec defines the desired state of TenantState.
+
+_Appears in:_
+
+- [TenantState](#tenantstate)
+
+#### TenantStateStatus
+
+TenantStateStatus defines the observed state of TenantState
+
+_Appears in:_
+
+- [TenantState](#tenantstate)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `version` _[Version](#version)_ | | | |
+| `lastUpdated` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#time-v1-meta)_ | | | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | | | |
+| `tunnel` _[TunnelState](#tunnelstate)_ | | | |
+| `loadBalancer` _[LoadBalancerState](#loadbalancerstate)_ | | | |
+| `allowedDomains` _string array_ | | | |
#### TenantStatus
@@ -645,6 +778,131 @@ _Appears in:_
- [Tenant](#tenant)
+#### TenantTunnelSettings
+
+TenantTunnelSettings defines the settings for the tunnel.
+
+_Appears in:_
+
+- [TenantSpec](#tenantspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `limit` _integer_ | Limit is the maximum number of tunnels to create.
If a lower limit is set than the number of reources that exist, the limit will be disallow creation of new resources but will not delete existing resources. The reason behind this
is that it is not possible for KubeLB to know which resources are safe to remove. | | |
+| `disable` _boolean_ | Disable is a flag that can be used to disable tunneling for a tenant. | | |
+
+#### Tunnel
+
+Tunnel is the Schema for the tunnels API
+
+_Appears in:_
+
+- [TunnelList](#tunnellist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `Tunnel` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[TunnelSpec](#tunnelspec)_ | | | |
+| `status` _[TunnelStatus](#tunnelstatus)_ | | | |
+
+#### TunnelList
+
+TunnelList contains a list of Tunnel
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `TunnelList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[Tunnel](#tunnel) array_ | | | |
+
+#### TunnelPhase
+
+_Underlying type:_ _string_
+
+TunnelPhase represents the phase of tunnel
+
+_Appears in:_
+
+- [TunnelStatus](#tunnelstatus)
+
+| Field | Description |
+| --- | --- |
+| `Pending` | TunnelPhasePending means the tunnel is being provisioned
|
+| `Ready` | TunnelPhaseReady means the tunnel is ready to accept connections
|
+| `Failed` | TunnelPhaseFailed means the tunnel provisioning failed
|
+| `Terminating` | TunnelPhaseTerminating means the tunnel is being terminated
|
+
+#### TunnelResources
+
+TunnelResources contains references to resources created for the tunnel
+
+_Appears in:_
+
+- [TunnelStatus](#tunnelstatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `serviceName` _string_ | ServiceName is the name of the service created for this tunnel | | |
+| `routeRef` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectreference-v1-core)_ | RouteRef is a reference to the route (HTTPRoute or Ingress) created for this tunnel | | |
+
+#### TunnelSettings
+
+TunnelSettings defines the global settings for Tunnel resources.
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `limit` _integer_ | Limit is the maximum number of tunnels to create.
If a lower limit is set than the number of reources that exist, the limit will be disallow creation of new resources but will not delete existing resources. The reason behind this
is that it is not possible for KubeLB to know which resources are safe to remove. | | |
+| `connectionManagerURL` _string_ | ConnectionManagerURL is the URL of the connection manager service that handles tunnel connections.
This is required if tunneling is enabled.
For example: "" | | |
+| `disable` _boolean_ | Disable indicates whether tunneling feature should be disabled. | | |
+
+#### TunnelSpec
+
+TunnelSpec defines the desired state of Tunnel
+
+_Appears in:_
+
+- [Tunnel](#tunnel)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `hostname` _string_ | Hostname is the hostname of the tunnel. If not specified, the hostname will be generated by KubeLB. | | |
+
+#### TunnelState
+
+_Appears in:_
+
+- [TenantStateStatus](#tenantstatestatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `disable` _boolean_ | | | |
+| `limit` _integer_ | | | |
+| `connectionManagerURL` _string_ | | | |
+
+#### TunnelStatus
+
+TunnelStatus defines the observed state of Tunnel
+
+_Appears in:_
+
+- [Tunnel](#tunnel)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `hostname` _string_ | Hostname contains the actual hostname assigned to the tunnel | | |
+| `url` _string_ | URL contains the full URL to access the tunnel | | |
+| `connectionManagerURL` _string_ | ConnectionManagerURL contains the URL that clients should use to establish tunnel connections | | |
+| `phase` _[TunnelPhase](#tunnelphase)_ | Phase represents the current phase of the tunnel | | |
+| `resources` _[TunnelResources](#tunnelresources)_ | Resources contains references to the resources created for this tunnel | | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | Conditions represents the current conditions of the tunnel | | |
+
#### UpstreamService
UpstreamService is a wrapper over the corev1.Service object.
@@ -661,3 +919,16 @@ _Appears in:_
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `spec` _[ServiceSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicespec-v1-core)_ | Spec defines the behavior of a service.
| | |
| `status` _[ServiceStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicestatus-v1-core)_ | Most recently observed status of the service.
Populated by the system.
Read-only.
More info: | | |
+
+#### Version
+
+_Appears in:_
+
+- [TenantStateStatus](#tenantstatestatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `gitVersion` _string_ | | | |
+| `gitCommit` _string_ | | | |
+| `buildDate` _string_ | | | |
+| `edition` _string_ | | | |
diff --git a/content/kubelb/main/tutorials/aigateway/_index.en.md b/content/kubelb/main/tutorials/aigateway/_index.en.md
new file mode 100644
index 000000000..148cb8985
--- /dev/null
+++ b/content/kubelb/main/tutorials/aigateway/_index.en.md
@@ -0,0 +1,239 @@
++++
+title = "AI & MCP Gateway"
+linkTitle = "AI & MCP Gateway"
+date = 2023-10-27T10:07:15+02:00
+weight = 7
++++
+
+This tutorial will guide you through setting up an AI and MCP Gateway using KubeLB with KGateway to securely manage Large Language Model (LLM) requests and MCP tool servers.
+
+## Overview
+
+KubeLB leverages [KGateway](https://kgateway.dev/), a CNCF Sandbox project (accepted March 2025), to provide advanced AI Gateway capabilities. KGateway is built on Envoy and implements the Kubernetes Gateway API specification, offering:
+
+- **AI Workload Protection**: Secure applications, models, and data from inappropriate access
+- **LLM Traffic Management**: Intelligent routing to LLM providers with load balancing based on model metrics
+- **Prompt Engineering**: System-level prompt enrichment and guards
+- **Multi-Provider Support**: Works with OpenAI, Anthropic, Google Gemini, Mistral, and local models like Ollama
+- **Model Context Protocol (MCP) Gateway**: Federates MCP tool servers into a single, secure endpoint
+- **Advanced Security**: Authentication, authorization, rate limiting tailored for AI workloads
+
+### Key Features
+
+#### AI-Specific Capabilities
+
+- **Prompt Guards**: Protect against prompt injection and data leakage
+- **Model Failover**: Automatic failover between LLM providers
+- **Function Calling**: Support for LLM function/tool calling
+- **AI Observability**: Detailed metrics and tracing for AI requests
+- **Semantic Caching**: Cache responses based on semantic similarity
+- **Token-Based Rate Limiting**: Control costs with token consumption limits
+
+#### Gateway API Inference Extension
+
+KGateway supports the Gateway API Inference Extension which introduces:
+
+- `InferenceModel` CRD: Define LLM models and their endpoints
+- `InferencePool` CRD: Group models for load balancing and failover
+- Intelligent endpoint picking based on model performance metrics
+
+## Setup
+
+### Step 1: Enable KGateway AI Extension
+
+Update values.yaml for KubeLB manager chart to enable KGateway with AI capabilities:
+
+```yaml
+kubelb:
+ enableGatewayAPI: true
+
+kubelb-addons:
+ enabled: true
+
+ kgateway:
+ enabled: true
+ gateway:
+ aiExtension:
+ enabled: true
+```
+
+### Step 2: Create Gateway Specific Resources
+
+1. Deploy a Gateway resource to handle AI traffic:
+
+```yaml
+apiVersion: gateway.networking.k8s.io/v1
+kind: Gateway
+metadata:
+ name: ai-gateway
+ namespace: kubelb
+ labels:
+ app: ai-gateway
+spec:
+ gatewayClassName: kgateway
+ infrastructure:
+ parametersRef:
+ name: ai-gateway
+ group: gateway.kgateway.dev
+ kind: GatewayParameters
+ listeners:
+ - protocol: HTTP
+ port: 8080
+ name: http
+ allowedRoutes:
+ namespaces:
+ from: All
+```
+
+2. Deploy a GatewayParameters resource to enable the AI extension:
+
+```yaml
+apiVersion: gateway.kgateway.dev/v1alpha1
+kind: GatewayParameters
+metadata:
+ name: ai-gateway
+ namespace: kubelb
+ labels:
+ app: ai-gateway
+spec:
+ kube:
+ aiExtension:
+ enabled: true
+ ports:
+ - name: ai-monitoring
+ containerPort: 9092
+ image:
+ registry: cr.kgateway.dev/kgateway-dev
+ repository: kgateway-ai-extension
+ tag: v2.1.0-main
+ service:
+ type: LoadBalancer
+```
+
+## OpenAI Integration Example
+
+This example shows how to set up secure access to OpenAI through the AI Gateway.
+
+### Step 1: Store OpenAI API Key
+
+Create a Kubernetes secret with your OpenAI API key:
+
+```bash
+export OPENAI_API_KEY="sk-..."
+
+kubectl create secret generic openai-secret \
+ --from-literal=Authorization="Bearer ${OPENAI_API_KEY}" \
+ --namespace kubelb
+```
+
+### Step 2: Create Backend Configuration
+
+Define an AI Backend that uses the secret for authentication:
+
+```yaml
+apiVersion: gateway.kgateway.dev/v1alpha1
+kind: Backend
+metadata:
+ name: openai
+ namespace: kubelb
+spec:
+ type: AI
+ ai:
+ llm:
+ provider:
+ openai:
+ authToken:
+ kind: SecretRef
+ secretRef:
+ name: openai-secret
+ namespace: kubelb
+ model: "gpt-3.5-turbo"
+```
+
+### Step 3: Create HTTPRoute
+
+Route traffic to the OpenAI backend:
+
+```yaml
+apiVersion: gateway.networking.k8s.io/v1
+kind: HTTPRoute
+metadata:
+ name: openai-route
+ namespace: kubelb
+spec:
+ parentRefs:
+ - name: ai-gateway
+ namespace: kubelb
+ rules:
+ - matches:
+ - path:
+ type: PathPrefix
+ value: /openai
+ filters:
+ - type: URLRewrite
+ urlRewrite:
+ path:
+ type: ReplaceFullPath
+ replaceFullPath: /v1/chat/completions
+ backendRefs:
+ - name: openai
+ namespace: kubelb
+ group: gateway.kgateway.dev
+ kind: Backend
+```
+
+### Step 4: Test the Configuration
+
+Get the Gateway's external IP:
+
+```bash
+kubectl get gateway ai-gateway -n kubelb
+export GATEWAY_IP=$(kubectl get svc -n kubelb ai-gateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+```
+
+Send a test request:
+
+```bash
+curl -X POST "http://${gateway_ip}/openai" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "messages": [
+ {"role": "user", "content": "Hello, how are you?"}
+ ]
+ }'
+```
+
+## Rate Limiting (Optional)
+
+Add rate limiting to control costs and prevent abuse:
+
+```yaml
+apiVersion: gateway.kgateway.dev/v1alpha1
+kind: RateLimitPolicy
+metadata:
+ name: openai-ratelimit
+ namespace: kubelb
+spec:
+ targetRef:
+ kind: HTTPRoute
+ name: openai-route
+ namespace: kubelb
+ limits:
+ - requests: 100
+ unit: hour
+```
+
+## MCP Gateway
+
+Similar to the AI Gateway, you can also use agentgateway to can connect to one or multiple MCP servers in any environment.
+
+Please follow this guide to setup the MCP Gateway: [MCP Gateway](https://kgateway.dev/docs/agentgateway/mcp/)
+
+## Further Reading
+
+For advanced configurations and features:
+
+- [KGateway AI Setup Documentation](https://kgateway.dev/docs/ai/setup/)
+- [KGateway Authentication Guide](https://kgateway.dev/docs/ai/auth/)
+- [Prompt Guards and Security](https://kgateway.dev/docs/ai/prompt-guards/)
+- [Multiple LLM Providers](https://kgateway.dev/docs/ai/cloud-providers/)
diff --git a/content/kubelb/main/tutorials/bgp/_index.en.md b/content/kubelb/main/tutorials/bgp/_index.en.md
new file mode 100644
index 000000000..0852763cc
--- /dev/null
+++ b/content/kubelb/main/tutorials/bgp/_index.en.md
@@ -0,0 +1,53 @@
++++
+title = "Layer 4 Load balancing with BGP"
+linkTitle = "BGP Support"
+date = 2025-08-27T10:07:15+02:00
+weight = 6
++++
+
+In Management Cluster, KubeLB offloads the provisioning of the the actual load balancers to the load balancing appliance that is being used. This can be the CCM in case of a cloud provider or a self-managed solution like [MetalLB](https://metallb.universe.tf), [Cilium Load Balancer](https://cilium.io/use-cases/load-balancer/) or any other solution.
+
+Due to this generic nature, KubeLB can be used with any load balancing appliance and the underlying route advertisement protocol such as BGP, OSPF, L2, are all supported. This tutorial will focus on [BGP](https://networklessons.com/bgp/introduction-to-bgp) but it assumes that the underlying infrastructure of your Kubernetes cluster is already configured to support BGP.
+
+## Setup
+
+We'll use [MetalLB](https://metallb.universe.tf) with BGP for this tutorial. Update the values.yaml file for KubeLB manager to enable metallb:
+
+```yaml
+kubelb-addons:
+ metallb:
+ enabled: true
+```
+
+A minimal configuration for MetalLB for demonstration purposes is as follows:
+
+```yaml
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: extern
+ namespace: metallb-system
+spec:
+ addresses:
+ - 10.10.255.200-10.10.255.250
+ autoAssign: true
+ avoidBuggyIPs: true
+---
+apiVersion: metallb.io/v1beta1
+kind: BGPAdvertisement
+metadata:
+ name: extern
+ namespace: metallb-system
+spec:
+ ipAddressPools:
+ - extern
+```
+
+This configures an address pool `extern` with an IP range from 10.10.255.200 to 10.10.255.250. This IP range can be used by the tenant clusters to allocate IP addresses for the `LoadBalancer` service type.
+
+Afterwards you can follow the [Layer 4 Load balancing](../loadbalancer#usage-with-kubelb) tutorial to create a `LoadBalancer` service in the tenant cluster.
+
+### Further reading
+
+- [MetalLB BGP Configuration](https://metallb.universe.tf/configuration/_advanced_bgp_configuration/)
+- [MetalLB BGP Usage](https://metallb.universe.tf/usage/#bgp)
diff --git a/content/kubelb/main/tutorials/config/_index.en.md b/content/kubelb/main/tutorials/config/_index.en.md
index a676e2068..9ee2a661f 100644
--- a/content/kubelb/main/tutorials/config/_index.en.md
+++ b/content/kubelb/main/tutorials/config/_index.en.md
@@ -60,7 +60,7 @@ These configurations are available at a global level and also at a tenant level.
2. **GatewayAPI.Class**: The class to use for Gateway API resources for tenants in management cluster.
3. **Certificates.DefaultClusterIssuer(EE)**: The default cluster issuer to use for certificate management.
-### Propagate annotations
+### Annotation Settings
KubeLB can propagate annotations from services, ingresses, gateway API objects etc. in the tenant cluster to the corresponding LoadBalancer or Route resources in the management cluster. This is useful for setting annotations that are required by the cloud provider to configure the LoadBalancers. For example, the `service.beta.kubernetes.io/aws-load-balancer-internal` annotation is used to create an internal LoadBalancer in AWS.
@@ -68,7 +68,7 @@ Annotations are not propagated by default since tenants can make unwanted change
The annotation configuration set on the tenant level will override the global annotation configuration for that tenant.
-1. Propagate all annotations
+#### 1. Propagate all annotations
```yaml
apiVersion: kubelb.k8c.io/v1alpha1
@@ -80,7 +80,7 @@ spec:
propagateAllAnnotations: true
```
-2. Propagate specific annotations
+#### 2. Propagate specific annotations
```yaml
apiVersion: kubelb.k8c.io/v1alpha1
@@ -96,6 +96,29 @@ spec:
metallb.universe.tf/loadBalancerIPs: "8.8.8.8"
```
+#### 3. Default annotations
+
+Default annotations for resources that KubeLB generates in the management cluster can also be configured. This is useful for setting annotations that are required by the cloud provider to configure the LoadBalancers. For example, the `service.beta.kubernetes.io/aws-load-balancer-internal` annotation is used to create an internal LoadBalancer in AWS.
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Config
+metadata:
+ name: default
+ namespace: kubelb
+spec:
+ defaultAnnotations:
+ service:
+ service.beta.kubernetes.io/aws-load-balancer-internal: true
+ ingress:
+ kubernetes.io/ingress.class: "nginx"
+ gatewayapi:
+ kubernetes.io/ingress.class: "eg"
+ # Will be applied to all resources such as Ingress, Gateway API resources, services, etc.
+ all:
+ internal: true
+```
+
### Configure Envoy Proxy
Sample configuration, inflated with values for demonstration purposes only. All of the values are optional and have sane defaults. For more details check [CRD References]({{< relref "../../references">}})
@@ -182,17 +205,34 @@ spec:
gatewayAPI:
class: "eg"
disable: false
- # Enterprise Edition Only
+ defaultGateway:
+ name: "default"
+ namespace: "envoy-gateway"
+ # Enterprise Edition Only (all the below options are only available in Enterprise Edition)
gateway:
limits: 10
disableHTTPRoute: false
disableGRPCRoute: false
- # Enterprise Edition Only
disableTCPRoute: false
- # Enterprise Edition Only
disableUDPRoute: false
- # Enterprise Edition Only
disableTLSRoute: false
```
+### Configure DNS Options
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Config
+metadata:
+ name: default
+ namespace: kubelb
+spec:
+ dns:
+ # The wildcard domain to use for auto-generated hostnames for Load balancers
+ # In EE Edition, this is also use to generated dynamic hostnames for tunnels.
+ wildcardDomain: "*.apps.example.com"
+ # Allow tenants to specify explicit hostnames for Load balancers and tunnels(in EE Edition)
+ allowExplicitHostnames: false
+```
+
**For more details and options, please go through [CRD References]({{< relref "../../references">}})**
diff --git a/content/kubelb/main/tutorials/gatewayapi/_index.en.md b/content/kubelb/main/tutorials/gatewayapi/_index.en.md
index dff03c946..9bd0d2c4b 100644
--- a/content/kubelb/main/tutorials/gatewayapi/_index.en.md
+++ b/content/kubelb/main/tutorials/gatewayapi/_index.en.md
@@ -2,7 +2,7 @@
title = "Gateway API"
linkTitle = "Gateway API"
date = 2023-10-27T10:07:15+02:00
-weight = 5
+weight = 4
+++
This tutorial will guide you through the process of setting up Layer 7 load balancing with Gateway API.
@@ -19,19 +19,23 @@ In KubeLB, we treat the admins of management cluster as the Platform provider. H
### Setup
-{{% notice warning %}} Ensure that Gateway API is enabled for the cluster. Please set `kubelb.enableGatewayAPI` to `true` in the `values.yaml`. Gateway API has been disabled by default as due to missing Gateway API CRDs the controller will crash and won't start. {{% /notice %}}
-
Kubermatic's default recommendation is to use Gateway API and use [Envoy Gateway](https://gateway.envoyproxy.io/) as the Gateway API implementation. Install Envoy Gateway by following this [guide](https://gateway.envoyproxy.io/docs/install/install-helm/) or any other Gateway API implementation of your choice.
-Ensure that `GatewayClass` exists in the management cluster. A minimal configuration for GatewayClass is as follows:
+Update values.yaml for KubeLB manager chart to enable the Gateway API addon.
```yaml
-apiVersion: gateway.networking.k8s.io/v1
-kind: GatewayClass
-metadata:
- name: eg
-spec:
- controllerName: gateway.envoyproxy.io/gatewayclass-controller
+kubelb:
+ enableGatewayAPI: true
+
+## Addon configuration
+kubelb-addons:
+ enabled: true
+ # Create the GatewayClass resource in the management cluster.
+ gatewayClass:
+ create: true
+
+ envoy-gateway:
+ enabled: true
```
#### KubeLB Manager Configuration
diff --git a/content/kubelb/main/tutorials/ingress/_index.en.md b/content/kubelb/main/tutorials/ingress/_index.en.md
index 7265f2467..51a5d84b4 100644
--- a/content/kubelb/main/tutorials/ingress/_index.en.md
+++ b/content/kubelb/main/tutorials/ingress/_index.en.md
@@ -2,7 +2,7 @@
title = "Ingress"
linkTitle = "Ingress"
date = 2023-10-27T10:07:15+02:00
-weight = 4
+weight = 5
+++
This tutorial will guide you through the process of setting up Layer 7 load balancing with Ingress.
@@ -50,12 +50,16 @@ spec:
#### Shared
-Install your controller with default configuration.
+Update values.yaml for KubeLB manager chart to enable the ingress-nginx addon.
-```sh
-helm upgrade --install ingress-nginx ingress-nginx \
- --repo https://kubernetes.github.io/ingress-nginx \
- --namespace ingress-nginx --create-namespace
+```yaml
+kubelb-addons:
+ enabled: true
+ ingress-nginx:
+ enabled: true
+ controller:
+ service:
+ externalTrafficPolicy: Local
```
For details:
diff --git a/content/kubelb/main/tutorials/kkp/_index.en.md b/content/kubelb/main/tutorials/kkp/_index.en.md
index 3704c116a..a2c4c4e04 100644
--- a/content/kubelb/main/tutorials/kkp/_index.en.md
+++ b/content/kubelb/main/tutorials/kkp/_index.en.md
@@ -1,7 +1,7 @@
+++
title = "Kubermatic Kubernetes Platform Integration"
date = 2023-10-27T10:07:15+02:00
-weight = 15
+weight = 9
enterprise = true
+++
diff --git a/content/kubelb/main/tutorials/loadbalancer/_index.en.md b/content/kubelb/main/tutorials/loadbalancer/_index.en.md
index 4672fd443..7a2d58459 100644
--- a/content/kubelb/main/tutorials/loadbalancer/_index.en.md
+++ b/content/kubelb/main/tutorials/loadbalancer/_index.en.md
@@ -99,6 +99,38 @@ spec:
This will create a service of type `LoadBalancer` and a deployment. KubeLB CCM will then propagate the request to management cluster, create a LoadBalancer CR there and retrieve the IP address allocated in the management cluster. Eventually the IP address will be assigned to the service in the tenant cluster.
+### Load Balancer Hostname Support
+
+KubeLB now supports assigning a hostname directly to the LoadBalancer resource. This is helpful for simpler configurations where no special routing rules are required for your Ingress or HTTPRoute resources.
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: LoadBalancer
+metadata:
+ name: test-lb-hostname
+ namespace: tenant-dkrqjswsgk
+ annotations:
+ kubelb.k8c.io/request-wildcard-domain: "true"
+spec:
+ # hostname: test.example.com
+ endpoints:
+ - addresses:
+ - ip: 91.99.112.254
+ ports:
+ - name: 8080-tcp
+ port: 31632
+ protocol: TCP
+ ports:
+ - name: 8080-tcp
+ port: 8080
+ protocol: TCP
+ type: ClusterIP
+```
+
+This will create a LoadBalancer resource with the hostname `test.example.com` that can forward traffic to the IP address `91.99.112.254` on port `31632`. The `kubelb.k8c.io/request-wildcard-domain: "true"` annotation is used to request a wildcard domain for the hostname. Otherwise `spec.hostname` can also be used to explicitly set the hostname.
+
+Please take a look at [DNS Automation](../security/dns/#enable-dns-automation) for more details on how to configure DNS for the hostname.
+
### Configurations
KubeLB CCM helm chart can be used to further configure the CCM. Some essential options are:
diff --git a/content/kubelb/main/tutorials/observability/_index.en.md b/content/kubelb/main/tutorials/observability/_index.en.md
index c4f1867d4..743871860 100644
--- a/content/kubelb/main/tutorials/observability/_index.en.md
+++ b/content/kubelb/main/tutorials/observability/_index.en.md
@@ -2,7 +2,7 @@
title = "Observability"
linkTitle = "Observability"
date = 2023-10-27T10:07:15+02:00
-weight = 7
+weight = 8
+++
KubeLB is a mission-critical component in the Kubernetes ecosystem, and its observability is crucial for ensuring the stability and reliability of the platform. This guide will walk you through the steps to enable and configure observability for KubeLB.
diff --git a/content/kubelb/main/tutorials/security/_index.en.md b/content/kubelb/main/tutorials/security/_index.en.md
index 81b527730..310780651 100644
--- a/content/kubelb/main/tutorials/security/_index.en.md
+++ b/content/kubelb/main/tutorials/security/_index.en.md
@@ -2,7 +2,7 @@
title = "Security"
linkTitle = "Security"
date = 2023-10-27T10:07:15+02:00
-weight = 6
+weight = 7
+++
This is a guide towards managing DNS, TLS, and other security-related configurations in KubeLB.
diff --git a/content/kubelb/main/tutorials/security/cert-management/_index.en.md b/content/kubelb/main/tutorials/security/cert-management/_index.en.md
index 2e0331e39..9d28a7b16 100644
--- a/content/kubelb/main/tutorials/security/cert-management/_index.en.md
+++ b/content/kubelb/main/tutorials/security/cert-management/_index.en.md
@@ -17,32 +17,37 @@ These are minimal examples to get you started quickly. Please refer to the docum
{{< tabs name="cert-manager" >}}
{{% tab name="Gateway API" %}}
-For Gateway API, the feature gate to use Gateway APIs needs to be enabled:
-
-```bash
-helm repo add jetstack https://charts.jetstack.io --force-update
-helm upgrade --install \
- cert-manager jetstack/cert-manager \
- --namespace cert-manager \
- --create-namespace \
- --version v1.15.2 \
- --set crds.enabled=true \
- --set config.apiVersion="controller.config.cert-manager.io/v1alpha1" \
- --set config.kind="ControllerConfiguration" \
- --set config.enableGatewayAPI=true
+Update values.yaml for KubeLB manager chart to enable the cert-manager addon.
+
+```yaml
+kubelb-addons:
+ enabled: true
+ cert-manager:
+ enabled: true
+ crds:
+ enabled: true
+ config:
+ apiVersion: controller.config.cert-manager.io/v1alpha1
+ kind: ControllerConfiguration
+ enableGatewayAPI: true
```
{{% /tab %}}
{{% tab name="Ingress" %}}
-```bash
-helm repo add jetstack https://charts.jetstack.io --force-update
-helm upgrade --install \
- cert-manager jetstack/cert-manager \
- --namespace cert-manager \
- --create-namespace \
- --version v1.15.2 \
- --set crds.enabled=true
+Update values.yaml for KubeLB manager chart to enable the cert-manager addon.
+
+```yaml
+kubelb-addons:
+ enabled: true
+ cert-manager:
+ enabled: true
+ crds:
+ enabled: true
+ config:
+ apiVersion: controller.config.cert-manager.io/v1alpha1
+ kind: ControllerConfiguration
+ enableGatewayAPI: false
```
{{% /tab %}}
@@ -76,18 +81,76 @@ Users can then either use [cert-manager annotations](https://cert-manager.io/doc
### Cluster Issuer example
+{{% notice info %}}
+Due to multi-tenancy, it's recommended to use DNS challenge for certificate management. Gateway API has a limitation and doesn't support wildcard domains with HTTP01 challenge. Similarly, for Ingress, unless you are using single ingress installation for all tenants, you will need to create a separate ClusterIssuer for each tenant. Same is the case for Gateway API since it needs the Gateway name to resolve the certificate challenges.
+{{% /notice %}}
+
+#### Example for DNS challenge with AWS Route53
+
```yaml
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
- name: letsencrypt-staging
+ name: letsencrypt-production-dns
+spec:
+ acme:
+ email: user@example.com
+ server: https://acme-v02.api.letsencrypt.org/directory
+ privateKeySecretRef:
+ name: letsencrypt-production-dns
+ solvers:
+ - dns01:
+ route53:
+ region: eu-central-1
+ accessKeyIDSecretRef:
+ name: route53-credentials
+ key: access-key-id
+ secretAccessKeySecretRef:
+ name: route53-credentials
+ key: secret-access-key
+```
+
+#### Example for HTTP01 challenge
+
+{{< tabs name="cert-manager" >}}
+{{% tab name="Gateway API" %}}
+
+```yaml
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: letsencrypt-production
+spec:
+ acme:
+ email: user@example.com
+ server: https://acme-v02.api.letsencrypt.org/directory
+ privateKeySecretRef:
+ name: example-issuer-account-key
+ solvers:
+ - http01:
+ gatewayHTTPRoute:
+ parentRefs:
+ - kind: Gateway
+ name: default
+ namespace: tenant-default
+ sectionName: http
+```
+
+{{% /tab %}}
+{{% tab name="Ingress" %}}
+
+```yaml
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: letsencrypt-production
spec:
acme:
# You must replace this email address with your own.
# Let's Encrypt will use this to contact you about expiring
# certificates, and issues related to your account.
email: user@example.com
- server: https://acme-staging-v02.api.letsencrypt.org/directory
+ server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource that will be used to store the account's private key.
name: example-issuer-account-key
@@ -98,6 +161,9 @@ spec:
ingressClassName: nginx
```
+{{% /tab %}}
+{{< /tabs >}}
+
The additional validation at the tenant level allows us to use a single instance of cert-manager for multiple tenants. Multiple cert-manager installations are not recommended and it's better to have a single instance of cert-manager for all tenants but different ClusterIssuers/Issuers for different tenants, if required.
## Usage
diff --git a/content/kubelb/main/tutorials/security/dns/_index.en.md b/content/kubelb/main/tutorials/security/dns/_index.en.md
index b58536adc..952a5002a 100644
--- a/content/kubelb/main/tutorials/security/dns/_index.en.md
+++ b/content/kubelb/main/tutorials/security/dns/_index.en.md
@@ -10,41 +10,44 @@ enterprise = true
### Install External-dns
-Install [External-dns](https://bitnami.com/stack/external-dns/helm) to manage DNS records for the tenant clusters. A sample configuration to use external-dns with AWS Route53 and domain is shown below.
+We leverage [External-dns](https://bitnami.com/stack/external-dns/helm) to manage DNS records for the tenant clusters.
**This is just an example to give you a headstart. For more details on setting up external-dns for different providers, visit [Official Documentation](https://kubernetes-sigs.github.io/external-dns).**
-#### Values.yaml
+Update the values.yaml for KubeLB manager chart to enable the external-dns addon.
```yaml
-# do not allow any domain that are now below these base domains
-domainFilters:
- - example.com
-
-# mount the credential secret we created outside of helm
-extraVolumes:
- - name: credentials
- secret:
- secretName: route53-credentials
-
-extraVolumeMounts:
- - name: credentials
- mountPath: /.aws
- readOnly: true
-
-env:
- - name: AWS_SHARED_CREDENTIALS_FILE
- value: /.aws/credentials
-
-# NOTE: Enable/Disable based on your requirements
-sources:
- - service
- - ingress
- - gateway-httproute
- - gateway-grpcroute
- - gateway-tlsroute
- - gateway-tcproute
- - gateway-udproute
+kubelb-addons:
+ enabled: true
+
+ external-dns:
+ enabled: true
+ domainFilters:
+ - example.com
+ extraVolumes:
+ - name: credentials
+ secret:
+ secretName: route53-credentials
+ extraVolumeMounts:
+ - name: credentials
+ mountPath: /.aws
+ readOnly: true
+ env:
+ - name: AWS_SHARED_CREDENTIALS_FILE
+ value: /.aws/credentials
+ txtOwnerId: kubelb-example-aws
+ registry: txt
+ provider: aws
+ policy: sync
+ sources:
+ - service
+ - ingress
+ # Comment out the below resources if you are not using Gateway API.
+ - gateway-httproute
+ - gateway-grpcroute
+ - gateway-tlsroute
+ - gateway-tcproute
+ - gateway-udproute
```
#### Credentials secret
@@ -65,12 +68,6 @@ metadata:
type: Opaque
```
-#### Install helm chart
-
-```sh
-helm install external-dns oci://registry-1.docker.io/bitnamicharts/external-dns -n external-dns --values values.yaml
-```
-
### Enable DNS automation
DNS can be enabled/disabled at global or tenant level. For automation purposes, you can configure allowed domains for DNS per tenant.
@@ -90,12 +87,71 @@ spec:
# If not empty, only the domains specified here will have automation for DNS. Everything else will be ignored.
allowedDomains:
- "*.shroud.example.com"
+ # The wildcard domain to use for auto-generated hostnames for Load balancers
+ # In EE Edition, this is also use to generated dynamic hostnames for tunnels.
+ wildcardDomain: "*.apps.example.com"
+ # Allow tenants to specify explicit hostnames for Load balancers and tunnels(in EE Edition)
+ allowExplicitHostnames: false
+ gatewayAPI:
+ class: "eg"
+ defaultGateway:
+ name: "default"
+ namespace: "kubelb"
```
Users can then either use [external-dns annotations](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/annotations/annotations.md) or the annotation `kubelb.k8c.io/manage-dns: true` on their resources to automate DNS management.
The additional validation at the tenant level allows us to use a single instance of external-dns for multiple tenants. Although, if required, external-dns can be installed per tenant as well.
+#### Configure Gateway
+
+Gateway resource needs to be configured for this automation to work. For example, if you are using Gateway API, you can configure the Gateway resource to manage DNS as follows:
+
+```yaml
+apiVersion: gateway.networking.k8s.io/v1
+kind: Gateway
+metadata:
+ name: default
+ namespace: kubelb
+ annotations:
+ cert-manager.io/cluster-issuer: letsencrypt-production
+spec:
+ gatewayClassName: eg
+ listeners:
+ ## HTTP listener to solve DNS challenge for cert-manager
+ - name: http
+ protocol: HTTP
+ port: 80
+ allowedRoutes:
+ namespaces:
+ from: All
+ - protocol: HTTPS
+ port: 443
+ name: https
+ hostname: "*.apps.example.com"
+ allowedRoutes:
+ namespaces:
+ from: All
+ tls:
+ mode: Terminate
+ certificateRefs:
+ - kind: Secret
+ name: eg-https
+ # Required in EE for tunneling
+ - protocol: HTTPS
+ port: 443
+ name: https-connection-manager
+ hostname: "connection-manager.example.com"
+ allowedRoutes:
+ namespaces:
+ from: All
+ tls:
+ mode: Terminate
+ certificateRefs:
+ - kind: Secret
+ name: eg-https-connection-manager
+```
+
## Usage
1. Using external-dns annotations:
diff --git a/content/kubelb/v1.2/_index.en.md b/content/kubelb/v1.2/_index.en.md
new file mode 100644
index 000000000..9f15e962f
--- /dev/null
+++ b/content/kubelb/v1.2/_index.en.md
@@ -0,0 +1,44 @@
++++
+title = "Kubermatic KubeLB"
+date = 2023-10-27T10:07:15+02:00
+weight = 6
+description = "Learn how you can use Kubermatic KubeLB to centrally provision and manage load balancers across multiple cloud and on-premise environments."
++++
+
+
+
+## What is KubeLB?
+
+KubeLB is a project by Kubermatic, it is a Kubernetes native tool, responsible for centrally managing Layer 4 and 7 load balancing configurations for Kubernetes clusters across multi-cloud and on-premise environments.
+
+## Motivation and Background
+
+Kubernetes does not offer any implementation for load balancers and in turn relies on the in-tree or out-of-tree cloud provider implementations to take care of provisioning and managing load balancers. This means that if you are not running on a supported cloud provider, your services of type `LoadBalancer` will never be allotted a load balancer IP address. This is an obstacle for bare-metal Kubernetes environments.
+
+There are solutions available like [MetalLB][2], [Cilium][3], etc. that solve this issue. However, these solutions are focused on a single cluster where you have to deploy the application in the same cluster where you want the load balancers. This is not ideal for multi-cluster environments since you have to configure load balancing for each cluster separately, which makes IP address management not trivial.
+
+For application load balancing, we have the same case where an external application like [nginx-ingress][4], [envoy gateway][5], needs to be deployed in the cluster. To further secure traffic, additional tools are required for managing DNS, TLS certificates, Web Application Firewall, etc.
+
+KubeLB solves this problem by providing a centralized management solution that can manage the data plane for multiple Kubernetes clusters across multi-cloud and on-premise environments. This enables you to manage fleet of Kubernetes clusters in a centralized way, ensuring security compliance, enforcing policies, and providing a consistent experience for developers.
+
+[2]: https://metallb.universe.tf
+[3]: https://cilium.io/use-cases/load-balancer/
+[4]: https://kubernetes.github.io/ingress-nginx/
+[5]: https://gateway.envoyproxy.io/
+
+## Table of Content
+
+{{% children depth=5 %}}
+{{% /children %}}
+
+## Further Information
+
+- [Introducing KubeLB](https://www.kubermatic.com/products/kubelb/)
+- [KubeLB Whitepaper](https://www.kubermatic.com/static/KubeLB-Cloud-Native-Multi-Tenant-Load-Balancer.pdf)
+- [KubeLB - GitHub Repository](https://github.com/kubermatic/kubelb)
+
+Visit [kubermatic.com](https://www.kubermatic.com/) for further information.
+
+{{% notice tip %}}
+For latest updates follow us on Twitter [@Kubermatic](https://twitter.com/Kubermatic)
+{{% /notice %}}
diff --git a/content/kubelb/v1.2/architecture/_index.en.md b/content/kubelb/v1.2/architecture/_index.en.md
new file mode 100644
index 000000000..951b94bb1
--- /dev/null
+++ b/content/kubelb/v1.2/architecture/_index.en.md
@@ -0,0 +1,82 @@
++++
+title = "Architecture"
+date = 2023-10-27T10:07:15+02:00
+weight = 5
++++
+
+KubeLB is an elastically scalable load balancer with a distributed data plane that can span, serve, and scale with apps across various on-premise and cloud locations. The distributed data plane empowers customers to obtain application affinity at the application microservice levels, thus significantly enhancing the overall application performance. In addition, the clean separation of planes also enables the creation of a unified, centralized control plane that significantly alleviates the operational complexity associated with integrating, operating, and managing each ADC appliance across locations individually.
+
+## Terminology
+
+In this chapter, you will find the following KubeLB specific terms:
+
+1. **Management Cluster/Load balancing Cluster** -- A Kubernetes cluster which is responsible for management of all the tenants and their data plane components. Requests for Layer 4 and Layer 7 load balancing are handled by the management cluster.
+2. **Tenant Cluster** -- A Kubernetes cluster which acts as a consumer of the load balancer services. Workloads that need Layer 4 or Layer 7 load balancing are created in the tenant cluster. The tenant cluster hosts the KubeLB Cloud Controller Manager (CCM) component which is responsible for propagating the load balancer configurations to the management cluster. Each Kubernetes cluster where the KubeLB CCM is running is considered a unique tenant. This demarcation is based on the fact that the endpoints, simply the Node IPs and node ports, are unique for each Kubernetes cluster.
+
+## Design and Architecture
+
+KubeLB follows the **hub and spoke** model in which the "Management Cluster" acts as the hub and the "Tenant Clusters" act as the spokes. The information flow is from the tenant clusters to the management cluster. The agent running in the tenant cluster watches for nodes, services, ingresses, and Gateway API etc. resources and then propagates the configuration to the management cluster. The management cluster then deploys the load balancer and configures it according to the desired specification. Management cluster then uses Envoy Proxy to route traffic to the appropriate endpoints i.e. the node ports open on the nodes of the tenant cluster.
+
+For security and isolation, the tenants have no access to any native kubernetes resources in the management cluster. The tenants can only interact with the management cluster via the KubeLB CRDs. This ensures that they are not exceeding their access level and only perform controlled operations in the management cluster.
+
+
+
+## Components
+
+KubeLB comprises of two components:
+
+### Cloud Controller Manager
+
+The **KubeLB CCM** is deployed in the tenant clusters and acts as an `agent` that watches for changes in layer 4 and layer 7 load balancing components in the tenant cluster. Such as nodes, secrets, services, ingresses, Gateway API etc. Based on it's configuration and what's allowed, it processes and propagates the required resources to the `manager` cluster.
+
+For layer 4 load balancing `LoadBalancer` and for Layer 7 load balancing `Route` CRDs are used.
+
+### Manager
+
+The **KubeLB manager** is responsible for managing the data plane of it's tenants. The manager **registers** the tenant clusters as tenants, and then it receives the load balancer configurations from the CCM(s) in the form of `LoadBalancer` or `Route` CRDs. It then deploys the necessary workloads according to the desired specification.
+
+At its core, the KubeLB manager relies on [envoy proxy][1] to load balance the traffic. The manager is responsible for deploying the envoy proxy and configuring it for each load balancer service per tenant, based on the envoy proxy deployment topology.
+
+## Personas
+
+KubeLB targets the following personas:
+
+1. Platform Provider: The Platform Provider is responsible for the overall environment that the cluster runs in, i.e. the cloud provider. The Platform Provider will interact with GatewayClass resources.
+2. Platform Operator: The Platform Operator is responsible for overall cluster administration. They manage policies, network access, application permissions and will interact with Gateway resources.
+3. Service Operator: The Service Operator is responsible for defining application configuration and service composition. They will interact with HTTPRoute and TLSRoute resources and other typical Kubernetes resources.
+
+Inspired from [Gateway API Personas](https://gateway-api.sigs.k8s.io/#personas).
+
+Service Operator and Platform Operator are the more or less the same persona in KubeLB and they are responsible for defining the load balancer configurations in tenant cluster. Platform Provider is the "KubeLB provider" and manages the management cluster.
+
+## Concepts
+
+### Envoy Proxy Deployment Topology
+
+KubeLB manager supports two different deployment topologies for envoy proxy:
+
+1. **Shared (default)**: In this topology, a single envoy proxy is deployed per tenant cluster. All load balancer services in a particular tenant cluster are configured to use this envoy proxy. This is the default topology.
+2. **Global**: In this topology, a single envoy proxy is deployed per KubeLB manager. All load balancer services in all tenant clusters are configured to use this envoy proxy. Pitfalls: Due to a single envoy proxy deployment, service-level network access is required from the tenant namespace to the controller namespace.
+
+The consumers are not aware or affected by the topology. This is only an internal detail for the management cluster.
+
+### User experience
+
+One of the most vital consideration while designing KubeLB was the user experience. There should be least possible friction and divergance of how the workflows to manage Layer 4 and Layer 7 workloads used to work like before KubeLB.
+
+All the end users need is to configure the CCM with there desired configuration and the CCM will take care of the rest. With default configuration, all you need is to use the Class **kubelb** for your resources instead of a provider specific class that the users used to have before.
+
+### Kubernetes Class
+
+Class is a concept in Kubernetes that is used to mark the ownership of a resource. For example an Ingress with `class: nginx` will be owned by a controller that implements the IngressClass named `nginx`. We have the similar concept in services, ingresses, gateway API resources, etc. KubeLB leverages on this concept to provide a seamless experience to the users by simply filtering out and processing the resources that are owned by KubeLB, by default. This behavior can also be changed by overriding the CCM configuration.
+
+## Installation
+
+See the [installation documentation]({{< relref "../installation/">}}) for more details on how to setup and install KubeLB.
+
+[1]: https://github.com/envoyproxy/envoy
+
+## Table of Content
+
+{{% children depth=5 %}}
+{{% /children %}}
diff --git a/content/kubelb/v1.2/architecture/application-load-balancing/_index.en.md b/content/kubelb/v1.2/architecture/application-load-balancing/_index.en.md
new file mode 100644
index 000000000..2c5a171cf
--- /dev/null
+++ b/content/kubelb/v1.2/architecture/application-load-balancing/_index.en.md
@@ -0,0 +1,43 @@
++++
+title = "Application Load Balancing"
+date = 2023-10-27T10:07:15+02:00
+weight = 10
++++
+
+This document explains the architecture for Layer 7 or Application Layer Load Balancing support in KubeLB.
+
+## Background
+
+With KubeLB, we want to build a product that can manage the data plane of a fleet of clusters(tenants) from a centralized point. Providing Layer 4 and Layer 7 load balancing capabilities through a single platform.
+
+KubeLB already had support for L4 load balancing and provisioning/managing load balancers for kubernetes clusters from a central cluster. With v1.1, we want to extend this functionality to managing Application level load balancing including DNS management, TLS management and termination, and other aspects.
+
+### Challenges
+
+Every Kubernetes cluster operates within its isolated network namespace, which offers several advantages. For instance, individual pods can be effortlessly accessed via unique IP addresses. Deploying your load balancing appliance such as nginx-ingress controller or Envoy Gateway would work seamlessly within the cluster because it would run as a pod inside your cluster and by gist, would have access to the same pod-level network as the rest. This enables the load balancing appliance to route and load balance traffic within the cluster.
+
+However, external clusters, management cluster in our case, cannot have direct access to the pod-network of the tenant kubernetes clusters. This introduces a limitation in KubeLB that the management cluster cannot directly route traffic from the load balancing appliance hosted on the management cluster to the tenant clusters. To achieve something like this, the LB cluster would need pod-level network access to ALL the consumer clusters. The options to achieve this are:
+
+- Share the network routes of consumer clusters with the ingress controller server via BGP peering.
+- Leverage tools like Submariner, Cilium Cluster Mesh, to create stretched clusters.
+
+These are the options that we want to look into in the future but they do require significant effort and might not be possible to achieve in some cases since KubeLB is simply an "application" that runs in a Kubernetes Cluster. It doesn't, for now, depend or dictate the infrastructural requirements for that Kubernetes cluster.
+
+### Solution
+
+Considering the limitations, we settled for using services of type `NodePort` to route traffic from the management cluster to the tenants. This offers high level of isolation since the only infrastructural requirement for this is to have network access to the tenant cluster nodes with node port range (default: 30000-32767). This is required for the envoy proxy to be able to connect to the tenant cluster nodes.
+
+This is already a requirement for Layer 4 load balancing so we are not adding any new requirements specifically for this use case. This also means that no additional infrastructural level or network level modifications need to be made to your existing management or tenant clusters.
+
+For layer 7 requests, KubeLB will automatically create a `NodePort` service against your `ClusterIP` service hence no manual actions are required from the user's prospective. The user experience remains exactly the same as if they had the load balancing appliance installed within their own cluster.
+
+### Lifecycle of a request
+
+1. Developer creates a deployment, service, and Ingress.
+2. KubeLB evaluates if the service is of type ClusterIP and generates a NodePort service against it.
+3. After validation, KubeLB CCM will propagate these resources from the tenant to LB cluster using the `Route` CRD.
+4. KubeLB manager then copies/creates the corresponding resources in the tenant namespace in the management cluster.
+5. KubeLB CCM polls for the updated status of the Ingress, updates the status when available.
+6. KubeLB manager starts routing the traffic for your resource.
+
+
diff --git a/content/kubelb/v1.2/architecture/layer-4-load-balancing/_index.en.md b/content/kubelb/v1.2/architecture/layer-4-load-balancing/_index.en.md
new file mode 100644
index 000000000..0095a8f97
--- /dev/null
+++ b/content/kubelb/v1.2/architecture/layer-4-load-balancing/_index.en.md
@@ -0,0 +1,29 @@
++++
+title = "Layer 4 Load Balancing"
+date = 2023-10-27T10:07:15+02:00
+weight = 5
++++
+
+This document explains the architecture for Layer 4 or TCP/UDP Load Balancing support in KubeLB. This feature is used to provision LoadBalancers for a fleet of clusters(tenants) from a centralized platform.
+
+## Background
+
+Kubernetes does not offer an out of the box implementation of load-balancers for clusters. The Network & Application level load balancing is delegated to the IaaS platform(GCP, AWS, Azure, etc.). If you're using a cloud provider that doesn't offer load balancing capabilities then you can't provision services of type `LoadBalancer`.
+
+Solutions which are available e.g. MetalLB focus on a single cluster. There are significant downsides of this since the individual cluster admin needs to be aware and understand how networking works in your cluster to be able to configure some appliance such as MetalLB.
+
+Another use case that was common was using something like F5 for load balancing. Managing and delegating it to individual clusters had massive administrative overheads.
+
+### Solution
+
+KubeLB focuses on managing the load balancers from a centralized point. So instead of having appliances running on each individual clusters. An agent which is the `Cloud Controller Manager` is running on the tenant cluster that propagates all the load balancing request to the management cluster. KubeLB manager running in the management cluster is then responsible for provisioning the actual load balancers and routing traffic back to the tenant workloads.
+
+### Lifecycle of a request
+
+1. Developer creates a service of type LoadBalancer.
+2. After validation, KubeLB CCM will propagate these resources from the tenant to LB cluster using the `LoadBalancer` CRD.
+3. KubeLB manager then copies/creates the corresponding resources in the tenant namespace in the management cluster.
+4. KubeLB CCM polls for the updated status of the service, updates the status when available.
+5. KubeLB manager starts routing the traffic for your resource.
+
+
diff --git a/content/kubelb/v1.2/ce-ee-matrix/_index.en.md b/content/kubelb/v1.2/ce-ee-matrix/_index.en.md
new file mode 100644
index 000000000..9d14bcee8
--- /dev/null
+++ b/content/kubelb/v1.2/ce-ee-matrix/_index.en.md
@@ -0,0 +1,41 @@
++++
+title = "Community vs Enterprise Edition"
+date = 2024-03-15T00:00:00+01:00
+weight = 10
++++
+
+KubeLB is available in two versions: Community and Enterprise.
+
+- **Community Edition (CE)**: Free, open source version that is available to the public. The CE is stable, production ready software available at
+- **Enterprise Edition (EE)**: Only available through an active subscription. In addition to the commercial support, SLAs for the product, the EE version contains a larger feature set in comparison to the CE version.
+
+{{% notice note %}}
+[Get in touch with Kubermatic](mailto:sales@kubermatic.com) to find out more about the KubeLB Enterprise offering.
+{{% /notice %}}
+
+## Feature Matrix
+
+| Feature | EE (Enterprise Edition) | CE (Community Edition) |
+|-------------------------------|--------------------------|-------------------------|
+| Ingress | ✔️ | ✔️ |
+| Gateway API v1 | ✔️ | ✔️ |
+| Bring your own secrets(certificates) | ✔️ | ✔️ |
+| Tunneling support through CLI | ✔️ | ❌ |
+| Gateway API beta/alpha(TLS/TCP/UDP routes) | ✔️ | ❌ |
+| Multiple Gateways | ✔️ | ❌ |
+| DNS automation | ✔️ | ❌ |
+| Certificate Management | ✔️ | ❌ |
+| Limits for LoadBalancers, Gateways | ✔️ | ❌ |
+
+{{% notice note %}}
+KubeLB supports the following products for Ingress and Gateway API resources:
+
+- [Ingress-nginx](https://kubernetes.github.io/ingress-nginx/) for **Ingress** resources.
+- [Envoy Gateway](https://gateway.envoyproxy.io/) is supported for **Gateway API** resources.
+
+While other products might work for Ingress and Gateway API resources, we are not testing them and can't guarantee the compatibility.
+{{% /notice %}}
+
+## Support Policy
+
+For support policy, please refer to the [KubeLB Support Policy](../support-policy/).
diff --git a/content/kubelb/v1.2/cli/_index.en.md b/content/kubelb/v1.2/cli/_index.en.md
new file mode 100644
index 000000000..1058a4084
--- /dev/null
+++ b/content/kubelb/v1.2/cli/_index.en.md
@@ -0,0 +1,62 @@
++++
+title = "KubeLB CLI"
+date = 2025-08-27T10:07:15+02:00
+weight = 30
+description = "Learn how you can use KubeLB CLI to provision Load Balancers and tunnels to expose local workloads"
++++
+
+
+
+## KubeLB CLI
+
+KubeLB CLI is a command line tool that has been introduced to complement KubeLB and make it easier to manage load balancing configurations for multiple tenants in Kube and non-Kube based environments.
+
+The source code is open source and available at [kubermatic/kubelb-cli](https://github.com/kubermatic/kubelb-cli).
+
+{{% notice note %}}
+KubeLB CLI is currently in beta feature stage and is not yet ready for production use. We are actively working on the feature set and taking feedback from the community and our customers to improve the CLI.
+{{% /notice %}}
+
+## Installation
+
+### Manual Installation
+
+Users can download the pre-compiled binaries from the [releases page](https://github.com/kubermatic/kubelb-cli/releases) for their system and copy them to the desired location.
+
+{{% notice note %}}
+KubeLB CLI is currently available for Linux, macOS, and Windows.
+{{% /notice %}}
+
+### Install using `go install`
+
+If you have Go installed, you can also build the binary from the source code using the following command:
+
+```bash
+go install github.com/kubermatic/kubelb-cli@v0.1.0
+```
+
+### Configuration
+
+KubeLB CLI needs the tenant scoped kubeconfig and the tenant name to be configured either via environment variables or through the CLI flags. Environment variables are preferred as you don't have to specify them for each command.
+
+```bash
+export KUBECONFIG=/path/to/kubeconfig
+export TENANT_NAME=my-tenant
+```
+
+## Table of Content
+
+{{% children depth=5 %}}
+{{% /children %}}
+
+## Further Information
+
+- [Introducing KubeLB](https://www.kubermatic.com/products/kubelb/)
+- [KubeLB Whitepaper](https://www.kubermatic.com/static/KubeLB-Cloud-Native-Multi-Tenant-Load-Balancer.pdf)
+- [KubeLB - GitHub Repository](https://github.com/kubermatic/kubelb)
+
+Visit [kubermatic.com](https://www.kubermatic.com/) for further information.
+
+{{% notice tip %}}
+For latest updates follow us on Twitter [@Kubermatic](https://twitter.com/Kubermatic)
+{{% /notice %}}
diff --git a/content/kubelb/v1.2/cli/compatibility-matrix/_index.en.md b/content/kubelb/v1.2/cli/compatibility-matrix/_index.en.md
new file mode 100644
index 000000000..a40e2097b
--- /dev/null
+++ b/content/kubelb/v1.2/cli/compatibility-matrix/_index.en.md
@@ -0,0 +1,21 @@
++++
+title = "Compatibility Matrix"
+date = 2025-08-27T00:00:00+01:00
+weight = 30
++++
+
+KubeLB CLI uses Kubernetes management cluster that has KubeLB installed as it's source of truth for the load balancing configurations.
+
+Since it has been introduced alongside KubeLB v1.2, it has a hard dependency for the KubeLB management cluster to be at least v1.2.
+
+{{% notice note %}}
+KubeLB CLI is currently in beta feature stage and is not yet ready for production use. We are actively working on the feature set and taking feedback from the community and our customers to improve the CLI.
+{{% /notice %}}
+
+| KubeLB CLI | KubeLB Management Cluster |
+|------------|---------------------------|
+| v0.1.0 | v1.2+ |
+
+## Support Policy
+
+For support policy, please refer to the [KubeLB Support Policy](../../support-policy/)
diff --git a/content/kubelb/v1.2/cli/loadbalancing/_index.en.md b/content/kubelb/v1.2/cli/loadbalancing/_index.en.md
new file mode 100644
index 000000000..36f51a059
--- /dev/null
+++ b/content/kubelb/v1.2/cli/loadbalancing/_index.en.md
@@ -0,0 +1,36 @@
++++
+title = "Load Balancing"
+date = 2025-08-27T00:00:00+01:00
+weight = 20
++++
+
+KubeLB CLI can be used to quickly provision Load Balancers that can be public/private based on your load balancing configurations and needs. KubeLB then takes care of securing your endpoint with TLS certificates, automatically creating DNS records, and managing the load balancing configurations.
+
+## Pre-requisites
+
+Please refer to the [DNS](../../tutorials/security/dns/#enable-dns-automation) documentation to configure the Gateway or Ingress to manage DNS for the load balancer.
+
+## Create a Load Balancer
+
+To create a load balancer, you can use the `kubelb loadbalancer create` command.
+
+For example
+
+```bash
+kubelb loadbalancer create my-app --endpoints 10.0.1.1:8080,10.0.1.2:8080 --hostname my-app.example.com
+```
+
+This will create a Load Balancer resource that will forward traffic to the endpoints `10.0.1.1:8080` and `10.0.1.2:8080` and will be accessible at `https://my-app.example.com`.
+
+Specifying hostname is optional and if not provided, KubeLB will generate a random hostname for you if the wildcard domain is enabled for the tenant or globally.
+
+
+
+## Further actions
+
+Further actions include:
+
+- Updating the load balancer configuration
+- Deleting the load balancer
+- Getting the load balancer details
+- Listing all the load balancers
diff --git a/content/kubelb/v1.2/cli/references/_index.en.md b/content/kubelb/v1.2/cli/references/_index.en.md
new file mode 100644
index 000000000..44f9eae92
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/_index.en.md
@@ -0,0 +1,40 @@
++++
+title = "References"
+date = 2024-03-06T12:00:00+02:00
+weight = 50
++++
+
+This section contains a reference of the Kubermatic KubeLB CLI commands and flags.
+
+## kubelb
+
+KubeLB CLI - Manage load balancers and create secure tunnels
+
+### Synopsis
+
+KubeLB CLI provides tools to manage KubeLB load balancers and create secure tunnels
+to expose local services through the KubeLB infrastructure.
+
+### Options
+
+```
+ -h, --help help for kubelb
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb completion](commands/kubelb_completion) - Generate the autocompletion script for the specified shell
+* [kubelb docs](commands/kubelb_docs) - Generate markdown documentation for all commands
+* [kubelb expose](commands/kubelb_expose) - Expose a local port via tunnel
+* [kubelb loadbalancer](commands/kubelb_loadbalancer) - Manage KubeLB load balancers
+* [kubelb status](commands/kubelb_status) - Display current status of KubeLB
+* [kubelb tunnel](commands/kubelb_tunnel) - Manage secure tunnels to expose local services
+* [kubelb version](commands/kubelb_version) - Print the version information
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_completion.md b/content/kubelb/v1.2/cli/references/commands/kubelb_completion.md
new file mode 100644
index 000000000..2ff39c182
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_completion.md
@@ -0,0 +1,41 @@
++++
+title = "kubelb completion"
+date = 2025-08-27T00:00:00+01:00
+weight = 200
++++
+
+## kubelb completion
+
+Generate the autocompletion script for the specified shell
+
+### Synopsis
+
+Generate the autocompletion script for kubelb for the specified shell.
+See each sub-command's help for details on how to use the generated script.
+
+### Options
+
+```
+ -h, --help help for completion
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels
+* [kubelb completion bash](../kubelb_completion_bash) - Generate the autocompletion script for bash
+* [kubelb completion fish](../kubelb_completion_fish) - Generate the autocompletion script for fish
+* [kubelb completion powershell](../kubelb_completion_powershell) - Generate the autocompletion script for powershell
+* [kubelb completion zsh](../kubelb_completion_zsh) - Generate the autocompletion script for zsh
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_completion_bash.md b/content/kubelb/v1.2/cli/references/commands/kubelb_completion_bash.md
new file mode 100644
index 000000000..fa713d587
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_completion_bash.md
@@ -0,0 +1,60 @@
++++
+title = "kubelb completion bash"
+date = 2025-08-27T00:00:00+01:00
+weight = 210
++++
+
+## kubelb completion bash
+
+Generate the autocompletion script for bash
+
+### Synopsis
+
+Generate the autocompletion script for the bash shell.
+
+This script depends on the 'bash-completion' package.
+If it is not installed already, you can install it via your OS's package manager.
+
+To load completions in your current shell session:
+
+ source <(kubelb completion bash)
+
+To load completions for every new session, execute once:
+
+#### Linux
+
+ kubelb completion bash > /etc/bash_completion.d/kubelb
+
+#### macOS
+
+ kubelb completion bash > $(brew --prefix)/etc/bash_completion.d/kubelb
+
+You will need to start a new shell for this setup to take effect.
+
+```
+kubelb completion bash
+```
+
+### Options
+
+```
+ -h, --help help for bash
+ --no-descriptions disable completion descriptions
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb completion](../kubelb_completion) - Generate the autocompletion script for the specified shell
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_completion_fish.md b/content/kubelb/v1.2/cli/references/commands/kubelb_completion_fish.md
new file mode 100644
index 000000000..81cd45c0b
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_completion_fish.md
@@ -0,0 +1,51 @@
++++
+title = "kubelb completion fish"
+date = 2025-08-27T00:00:00+01:00
+weight = 220
++++
+
+## kubelb completion fish
+
+Generate the autocompletion script for fish
+
+### Synopsis
+
+Generate the autocompletion script for the fish shell.
+
+To load completions in your current shell session:
+
+ kubelb completion fish | source
+
+To load completions for every new session, execute once:
+
+ kubelb completion fish > ~/.config/fish/completions/kubelb.fish
+
+You will need to start a new shell for this setup to take effect.
+
+```
+kubelb completion fish [flags]
+```
+
+### Options
+
+```
+ -h, --help help for fish
+ --no-descriptions disable completion descriptions
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb completion](../kubelb_completion) - Generate the autocompletion script for the specified shell
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_completion_powershell.md b/content/kubelb/v1.2/cli/references/commands/kubelb_completion_powershell.md
new file mode 100644
index 000000000..f01116ed0
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_completion_powershell.md
@@ -0,0 +1,48 @@
++++
+title = "kubelb completion powershell"
+date = 2025-08-27T00:00:00+01:00
+weight = 230
++++
+
+## kubelb completion powershell
+
+Generate the autocompletion script for powershell
+
+### Synopsis
+
+Generate the autocompletion script for powershell.
+
+To load completions in your current shell session:
+
+ kubelb completion powershell | Out-String | Invoke-Expression
+
+To load completions for every new session, add the output of the above command
+to your powershell profile.
+
+```
+kubelb completion powershell [flags]
+```
+
+### Options
+
+```
+ -h, --help help for powershell
+ --no-descriptions disable completion descriptions
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb completion](../kubelb_completion) - Generate the autocompletion script for the specified shell
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_completion_zsh.md b/content/kubelb/v1.2/cli/references/commands/kubelb_completion_zsh.md
new file mode 100644
index 000000000..4f8ab1f41
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_completion_zsh.md
@@ -0,0 +1,62 @@
++++
+title = "kubelb completion zsh"
+date = 2025-08-27T00:00:00+01:00
+weight = 240
++++
+
+## kubelb completion zsh
+
+Generate the autocompletion script for zsh
+
+### Synopsis
+
+Generate the autocompletion script for the zsh shell.
+
+If shell completion is not already enabled in your environment you will need
+to enable it. You can execute the following once:
+
+ echo "autoload -U compinit; compinit" >> ~/.zshrc
+
+To load completions in your current shell session:
+
+ source <(kubelb completion zsh)
+
+To load completions for every new session, execute once:
+
+#### Linux
+
+ kubelb completion zsh > "${fpath[1]}/_kubelb"
+
+#### macOS
+
+ kubelb completion zsh > $(brew --prefix)/share/zsh/site-functions/_kubelb
+
+You will need to start a new shell for this setup to take effect.
+
+```
+kubelb completion zsh [flags]
+```
+
+### Options
+
+```
+ -h, --help help for zsh
+ --no-descriptions disable completion descriptions
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb completion](../kubelb_completion) - Generate the autocompletion script for the specified shell
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_docs.md b/content/kubelb/v1.2/cli/references/commands/kubelb_docs.md
new file mode 100644
index 000000000..b41a983d4
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_docs.md
@@ -0,0 +1,42 @@
++++
+title = "kubelb docs"
+date = 2025-08-27T00:00:00+01:00
+weight = 40
++++
+
+## kubelb docs
+
+Generate markdown documentation for all commands
+
+### Synopsis
+
+Generate markdown documentation for all CLI commands and their parameters.
+This creates individual markdown files for each command with complete usage information.
+
+```
+kubelb docs [flags]
+```
+
+### Options
+
+```
+ -h, --help help for docs
+ -o, --output string Output directory for generated documentation (default "./docs")
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_expose.md b/content/kubelb/v1.2/cli/references/commands/kubelb_expose.md
new file mode 100644
index 000000000..6b435de09
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_expose.md
@@ -0,0 +1,62 @@
++++
+title = "kubelb expose"
+date = 2025-08-27T00:00:00+01:00
+weight = 30
++++
+
+## kubelb expose
+
+Expose a local port via tunnel
+
+### Synopsis
+
+Expose a local port via secure tunnel with auto-generated name.
+
+This is a convenience command that creates a tunnel with an auto-generated
+name and immediately connects to it.
+
+Examples:
+
+# Expose port 8080 with auto-generated tunnel name
+
+ kubelb expose 8080
+
+# Expose port 3000 with custom hostname
+
+ kubelb expose 3000 --hostname api.example.com
+
+```
+kubelb expose PORT [flags]
+```
+
+### Examples
+
+```
+kubelb expose 8080 --tenant=mytenant
+```
+
+### Options
+
+```
+ -h, --help help for expose
+ --hostname string Custom hostname for the tunnel (default: auto-assigned wildcard domain)
+ -o, --output string Output format (summary, yaml, json) (default "summary")
+ --wait Wait for tunnel to be ready (default true)
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer.md b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer.md
new file mode 100644
index 000000000..ea12542a3
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer.md
@@ -0,0 +1,40 @@
++++
+title = "kubelb loadbalancer"
+date = 2025-08-27T00:00:00+01:00
+weight = 60
++++
+
+## kubelb loadbalancer
+
+Manage KubeLB load balancers
+
+### Synopsis
+
+Manage KubeLB load balancer configurations
+
+### Options
+
+```
+ -h, --help help for loadbalancer
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels
+* [kubelb loadbalancer create](../kubelb_loadbalancer_create) - Create a load balancer
+* [kubelb loadbalancer delete](../kubelb_loadbalancer_delete) - Delete a load balancer
+* [kubelb loadbalancer get](../kubelb_loadbalancer_get) - Get a load balancer
+* [kubelb loadbalancer list](../kubelb_loadbalancer_list) - List load balancers
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_create.md b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_create.md
new file mode 100644
index 000000000..e542a0a56
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_create.md
@@ -0,0 +1,69 @@
++++
+title = "kubelb loadbalancer create"
+date = 2025-08-27T00:00:00+01:00
+weight = 70
++++
+
+## kubelb loadbalancer create
+
+Create a load balancer
+
+### Synopsis
+
+Create a new HTTP load balancer with the specified endpoints.
+
+The load balancer supports HTTP routing and hostname-based access.
+
+Examples:
+
+# Create HTTP load balancer with random hostname
+
+ kubelb lb create my-app --endpoints 10.0.1.1:8080
+
+# Create HTTP load balancer with custom hostname
+
+ kubelb lb create my-app --endpoints 10.0.1.1:8080 --hostname app.example.com
+
+# Create HTTP load balancer without a route
+
+ kubelb lb create my-app --endpoints 10.0.1.1:8080 --route=false
+
+```
+kubelb loadbalancer create NAME [flags]
+```
+
+### Examples
+
+```
+kubelb loadbalancer create my-app --endpoints 10.0.1.1:8080,10.0.1.2:8080 --tenant=mytenant
+```
+
+### Options
+
+```
+ -e, --endpoints string Comma-separated list of IP:port pairs (required)
+ -h, --help help for create
+ --hostname string Custom hostname for the route
+ -o, --output string Output format (summary, yaml, json) (default "summary")
+ -p, --protocol string Protocol (http only) (default "http")
+ --route Create a route for HTTP traffic (default true)
+ --type string LoadBalancer type (ClusterIP, LoadBalancer), defaults to ClusterIP (default "ClusterIP")
+ --wait Wait for load balancer to be ready (default true)
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb loadbalancer](../kubelb_loadbalancer) - Manage KubeLB load balancers
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_delete.md b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_delete.md
new file mode 100644
index 000000000..26535b8fa
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_delete.md
@@ -0,0 +1,54 @@
++++
+title = "kubelb loadbalancer delete"
+date = 2025-08-27T00:00:00+01:00
+weight = 90
++++
+
+## kubelb loadbalancer delete
+
+Delete a load balancer
+
+### Synopsis
+
+Delete a load balancer by ID.
+
+This command will:
+- Check if the load balancer was created by the CLI
+- Display a warning if it wasn't created by the CLI
+- Ask for confirmation before deletion (unless --force is used)
+- Delete the load balancer resource
+
+
+```
+kubelb loadbalancer delete ID [flags]
+```
+
+### Examples
+
+```
+kubelb loadbalancer delete nginx-loadbalancer --tenant=mytenant --kubeconfig=./kubeconfig
+```
+
+### Options
+
+```
+ -f, --force Force deletion without confirmation
+ -h, --help help for delete
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb loadbalancer](../kubelb_loadbalancer) - Manage KubeLB load balancers
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_get.md b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_get.md
new file mode 100644
index 000000000..c8259ea3f
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_get.md
@@ -0,0 +1,46 @@
++++
+title = "kubelb loadbalancer get"
+date = 2025-08-27T00:00:00+01:00
+weight = 80
++++
+
+## kubelb loadbalancer get
+
+Get a load balancer
+
+### Synopsis
+
+Retrieve a load balancer by ID and output it's complete YAML specification.
+
+```
+kubelb loadbalancer get ID [flags]
+```
+
+### Examples
+
+```
+kubelb loadbalancer get nginx-loadbalancer --tenant=mytenant --kubeconfig=./kubeconfig
+```
+
+### Options
+
+```
+ -h, --help help for get
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb loadbalancer](../kubelb_loadbalancer) - Manage KubeLB load balancers
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_list.md b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_list.md
new file mode 100644
index 000000000..385ca74e8
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_list.md
@@ -0,0 +1,47 @@
++++
+title = "kubelb loadbalancer list"
+date = 2025-08-27T00:00:00+01:00
+weight = 85
++++
+
+## kubelb loadbalancer list
+
+List load balancers
+
+### Synopsis
+
+List all load balancers for the tenant.
+
+
+```
+kubelb loadbalancer list [flags]
+```
+
+### Examples
+
+```
+kubelb loadbalancer list --tenant=mytenant --kubeconfig=./kubeconfig
+```
+
+### Options
+
+```
+ -h, --help help for list
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb loadbalancer](../kubelb_loadbalancer) - Manage KubeLB load balancers
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_status.md b/content/kubelb/v1.2/cli/references/commands/kubelb_status.md
new file mode 100644
index 000000000..b1bebd066
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_status.md
@@ -0,0 +1,47 @@
++++
+title = "kubelb status"
+date = 2025-08-27T00:00:00+01:00
+weight = 20
++++
+
+## kubelb status
+
+Display current status of KubeLB
+
+### Synopsis
+
+Display the current status of KubeLB including version information, configuration, and state
+
+```
+kubelb status [flags]
+```
+
+### Examples
+
+```
+ # Display status for current tenant
+ kubelb status
+```
+
+### Options
+
+```
+ -h, --help help for status
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel.md b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel.md
new file mode 100644
index 000000000..89eb79aec
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel.md
@@ -0,0 +1,41 @@
++++
+title = "kubelb tunnel"
+date = 2025-08-27T00:00:00+01:00
+weight = 100
++++
+
+## kubelb tunnel
+
+Manage secure tunnels to expose local services
+
+### Synopsis
+
+Create and manage secure tunnels to expose local services through the KubeLB infrastructure
+
+### Options
+
+```
+ -h, --help help for tunnel
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels
+* [kubelb tunnel connect](../kubelb_tunnel_connect) - Connect to an existing tunnel
+* [kubelb tunnel create](../kubelb_tunnel_create) - Create a tunnel
+* [kubelb tunnel delete](../kubelb_tunnel_delete) - Delete a tunnel
+* [kubelb tunnel get](../kubelb_tunnel_get) - Get a tunnel
+* [kubelb tunnel list](../kubelb_tunnel_list) - List tunnels
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_connect.md b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_connect.md
new file mode 100644
index 000000000..7427539ac
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_connect.md
@@ -0,0 +1,50 @@
++++
+title = "kubelb tunnel connect"
+date = 2025-08-27T00:00:00+01:00
+weight = 115
++++
+
+## kubelb tunnel connect
+
+Connect to an existing tunnel
+
+### Synopsis
+
+Connect to an existing tunnel to start forwarding traffic.
+
+This command establishes a secure connection to the tunnel and forwards
+traffic from the tunnel to your local service.
+
+```
+kubelb tunnel connect NAME [flags]
+```
+
+### Examples
+
+```
+kubelb tunnel connect my-app --port 8080 --tenant=mytenant
+```
+
+### Options
+
+```
+ -h, --help help for connect
+ -p, --port int Local port to forward to (required)
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_create.md b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_create.md
new file mode 100644
index 000000000..bd164bdce
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_create.md
@@ -0,0 +1,64 @@
++++
+title = "kubelb tunnel create"
+date = 2025-08-27T00:00:00+01:00
+weight = 110
++++
+
+## kubelb tunnel create
+
+Create a tunnel
+
+### Synopsis
+
+Create a new secure tunnel to expose a local service.
+
+The tunnel provides secure access to your local service through the KubeLB infrastructure.
+
+Examples:
+ # Create tunnel for local app on port 8080
+ kubelb tunnel create my-app --port 8080
+
+ # Create tunnel with custom hostname
+ kubelb tunnel create my-app --port 8080 --hostname app.example.com
+
+ # Create tunnel and connect immediately
+ kubelb tunnel create my-app --port 8080 --connect
+
+
+```
+kubelb tunnel create NAME [flags]
+```
+
+### Examples
+
+```
+kubelb tunnel create my-app --port 8080 --tenant=mytenant
+```
+
+### Options
+
+```
+ --connect Connect to tunnel after creation
+ -h, --help help for create
+ --hostname string Custom hostname for the tunnel (default: auto-assigned wildcard domain)
+ -o, --output string Output format (summary, yaml, json) (default "summary")
+ -p, --port int Local port to tunnel (required)
+ --wait Wait for tunnel to be ready (default true)
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_delete.md b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_delete.md
new file mode 100644
index 000000000..e9a9cee37
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_delete.md
@@ -0,0 +1,53 @@
++++
+title = "kubelb tunnel delete"
+date = 2025-08-27T00:00:00+01:00
+weight = 130
++++
+
+## kubelb tunnel delete
+
+Delete a tunnel
+
+### Synopsis
+
+Delete a tunnel by name.
+
+This command will:
+- Check if the tunnel exists
+- Ask for confirmation before deletion (unless --force is used)
+- Delete the tunnel resource
+
+
+```
+kubelb tunnel delete NAME [flags]
+```
+
+### Examples
+
+```
+kubelb tunnel delete my-app --tenant=mytenant --kubeconfig=./kubeconfig
+```
+
+### Options
+
+```
+ -f, --force Force deletion without confirmation
+ -h, --help help for delete
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_get.md b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_get.md
new file mode 100644
index 000000000..662ac2f3f
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_get.md
@@ -0,0 +1,47 @@
++++
+title = "kubelb tunnel get"
+date = 2025-08-27T00:00:00+01:00
+weight = 120
++++
+
+## kubelb tunnel get
+
+Get a tunnel
+
+### Synopsis
+
+Retrieve a tunnel by name and output it's complete YAML specification.
+
+
+```
+kubelb tunnel get NAME [flags]
+```
+
+### Examples
+
+```
+kubelb tunnel get my-app --tenant=mytenant --kubeconfig=./kubeconfig
+```
+
+### Options
+
+```
+ -h, --help help for get
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_list.md b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_list.md
new file mode 100644
index 000000000..e46291576
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_list.md
@@ -0,0 +1,47 @@
++++
+title = "kubelb tunnel list"
+date = 2025-08-27T00:00:00+01:00
+weight = 125
++++
+
+## kubelb tunnel list
+
+List tunnels
+
+### Synopsis
+
+List all tunnels for the tenant.
+
+
+```
+kubelb tunnel list [flags]
+```
+
+### Examples
+
+```
+kubelb tunnel list --tenant=mytenant --kubeconfig=./kubeconfig
+```
+
+### Options
+
+```
+ -h, --help help for list
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services
diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_version.md b/content/kubelb/v1.2/cli/references/commands/kubelb_version.md
new file mode 100644
index 000000000..3a5a117fa
--- /dev/null
+++ b/content/kubelb/v1.2/cli/references/commands/kubelb_version.md
@@ -0,0 +1,47 @@
++++
+title = "kubelb version"
+date = 2025-08-27T00:00:00+01:00
+weight = 50
++++
+
+## kubelb version
+
+Print the version information
+
+### Synopsis
+
+Print the version information of the KubeLB CLI
+
+```
+kubelb version [flags]
+```
+
+### Examples
+
+```
+kubelb version
+```
+
+### Options
+
+```
+ -h, --help help for version
+ --short Print only the version in short format
+```
+
+### Options inherited from parent commands
+
+```
+ --kubeconfig string Path to the kubeconfig for the tenant
+ --log-file string Log to file instead of stderr
+ --log-format string Log format (cli, json, text) - defaults to cli
+ --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity
+ -q, --quiet Suppress non-essential output (equivalent to --v=0)
+ -t, --tenant string Name of the tenant
+ --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s)
+ -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1)
+```
+
+### SEE ALSO
+
+* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels
diff --git a/content/kubelb/v1.2/cli/release-notes/_index.en.md b/content/kubelb/v1.2/cli/release-notes/_index.en.md
new file mode 100644
index 000000000..3a6fbcf49
--- /dev/null
+++ b/content/kubelb/v1.2/cli/release-notes/_index.en.md
@@ -0,0 +1,31 @@
++++
+title = "Release Notes"
+date = 2024-03-15T00:00:00+01:00
+weight = 40
++++
+
+
+## Kubermatic KubeLB CLI v0.1.0
+
+- [Kubermatic KubeLB CLI v0.1.0](#kubermatic-kubelb-cli--v010)
+- [v0.1.0](#v010)
+ - [Highlights](#highlights)
+ - [Community Edition(CE)](#community-editionce)
+ - [Enterprise Edition(EE)](#enterprise-editionee)
+
+## v0.1.0
+
+**GitHub release: [v0.1.0](https://github.com/kubermatic/kubelb-cli/releases/tag/v0.1.0)**
+
+### Highlights
+
+#### Community Edition(CE)
+
+- Support for provisioning Load balancers with hostnames. THe hostnames are secured with TLS certificates and the DNS and traffic policies are managed by KubeLB.
+- Status command has been introduced to get the status of the tenant. This includes the load balancer limit, allowed domains, wildcard domain, etc.
+- Version command can be used to get the version of the CLI.
+- Add supply chain security with SBOMs and cosign signatures for the CLI.
+
+#### Enterprise Edition(EE)
+
+- Tunneling has been introduced to allow users to tunnel locally running applications on their workstations or inside VMs and expose them over the internet without worrying about firewalls, NAT, DNS, and certificate issues.
diff --git a/content/kubelb/v1.2/cli/tunneling/_index.en.md b/content/kubelb/v1.2/cli/tunneling/_index.en.md
new file mode 100644
index 000000000..329c1ff2d
--- /dev/null
+++ b/content/kubelb/v1.2/cli/tunneling/_index.en.md
@@ -0,0 +1,127 @@
++++
+title = "Tunneling"
+date = 2025-08-27T00:00:00+01:00
+weight = 10
+enterprise = true
++++
+
+Tunneling allows users to tunnel locally running applications on their workstations or inside VMs and expose them over the internet without worrying about firewalls, NAT, DNS, and certificate issues. It is a great way to expose your local services to the internet without having to worry about the complexities of setting up a load balancer and a DNS record.
+
+KubeLB CLI will expose the workload on secure tunnel with TLS certificates and a DNS record.
+
+These tunnels are designed to be reusable and hence have their own dedicated API type in KubeLB i.e. `Tunnel`. Once a tunnel is created, it's registered with the KubeLB management cluster and can be connected to using the `kubelb tunnel connect` command.
+
+## Tunnels
+
+### Tunnel Configuration
+
+To enable tunneling, you need to configure KubeLB management cluster to expose connection management API. The values.yaml file can be modified like this:
+
+```yaml
+kubelb:
+ enableGatewayAPI: true
+ debug: true
+ envoyProxy:
+ # -- Topology defines the deployment topology for Envoy Proxy. Valid values are: shared, dedicated, and global.
+ topology: shared
+ # -- The number of replicas for the Envoy Proxy deployment.
+ replicas: 1
+ # -- Propagate all annotations from the LB resource to the LB service.
+ propagateAllAnnotations: true
+
+ # Tunnel configuration
+ tunnel:
+ enabled: true
+ connectionManager:
+ httpRoute:
+ enabled: true
+ domain: "connection-manager.example.com"
+ gatewayName: "default"
+ gatewayNamespace: "kubelb"
+ annotations:
+ external-dns.alpha.kubernetes.io/hostname: "*.apps.example.com,connection-manager.example.com"
+ external-dns.alpha.kubernetes.io/ttl: "300"
+ cert-manager.io/cluster-issuer: "letsencrypt-production-dns"
+ ingress:
+ enabled: false
+ className: "nginx"
+ annotations:
+ cert-manager.io/cluster-issuer: "letsencrypt-production-dns"
+ nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
+ nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
+ external-dns.alpha.kubernetes.io/hostname: connection-manager-ingress.example.com
+ external-dns.alpha.kubernetes.io/ttl: "10"
+ nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
+ hosts:
+ - host: connection-manager-ingress.example.com
+ paths:
+ - path: /tunnel
+ pathType: Prefix
+ - path: /health
+ pathType: Prefix
+ tls:
+ - secretName: connection-manager-tls
+ hosts:
+ - connection-manager-ingress.example.com
+```
+
+You can either use Ingress or HTTPRoute to expose the connection management API. Gateway API is the preferred way to expose the API. In this example `*.apps.example.com` is used as a wildard domain for these tunnels, you can use any other domain you want.
+
+Afterwards, you need to configure the connection manager URL at the Config or Tenant level:
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Config
+metadata:
+ name: default
+ namespace: kubelb
+spec:
+ ingress:
+ class: "nginx"
+ gatewayAPI:
+ class: "eg"
+ loadBalancer:
+ limit: 15
+ certificates:
+ defaultClusterIssuer: letsencrypt-staging-dns
+ tunnel:
+ connectionManagerURL: "https://connection-manager.example.com/"
+```
+
+**NOTE: Apart from this the Gateway or Ingress should be configured to manage DNS for the tunnel. Please refer to the [DNS](../../tutorials/security/dns/#enable-dns-automation) documentation for more details.**
+
+### Provisioning Tunnels
+
+Tunnels are created either using the `kubelb expose 1313` command or the `kubelb tunnel create` command.
+
+```bash
+kubelb expose 1313
+```
+
+
+
+This will create a tunnel with a generated hostname and will forward traffic to the port `1313` on the local machine. The Ingress point for this traffic is KubeLB's management cluster and hence the traffic is secure and encrypted.
+
+An alternative way to create a tunnel is to use the `kubelb tunnel create` command.
+
+```bash
+kubelb tunnel create my-app --port 1313
+```
+
+This will create a tunnel with a generated hostname and can be used through the `kubelb tunnel connect` command.
+
+```bash
+kubelb tunnel connect my-app --port 1313
+```
+
+This will connect to the tunnel and forward traffic to the port `1313` on the local machine. The Ingress point for this traffic is KubeLB's management cluster and hence the traffic is secure and encrypted.
+
+## Further actions
+
+Further actions include:
+
+- Deleting the tunnel
+- Getting the tunnel details
+- Listing all the tunnels
+
+For more information, please refer to the [Tunnel API](../../references/api/tunnel/) documentation.
diff --git a/content/kubelb/v1.2/compatibility-matrix/_index.en.md b/content/kubelb/v1.2/compatibility-matrix/_index.en.md
new file mode 100644
index 000000000..11195dcff
--- /dev/null
+++ b/content/kubelb/v1.2/compatibility-matrix/_index.en.md
@@ -0,0 +1,21 @@
++++
+title = "Compatibility Matrix"
+date = 2024-03-15T00:00:00+01:00
+weight = 30
++++
+
+Currently, we don't have any hard dependencies on certain components and their versions. This matrix is here to reflect any changes in the compatibility matrix of the components we are using.
+
+We are only testing our software with specific versions of the components, we are not enforcing these versions but these are the ones tested. It should work with other versions of Kubernetes, Gateway API, and Envoy Gateway as well, but we can't guarantee it.
+
+**KubeLB support [ingress-nginx](https://kubernetes.github.io/ingress-nginx/) for Ingress resources. [Envoy Gateway](https://gateway.envoyproxy.io/) is supported for Gateway API resources. While other products might work for Ingress and Gateway API resources, we are not testing them and can't guarantee the compatibility.**
+
+| KubeLB | Kubermatic Kubernetes Platform | Gateway API | Envoy Gateway | NGINX Ingress | Kubernetes |
+|--------|-------------------------------|-------------|---------------|-------------------------|------------|
+| v1.2 | v2.27, v2.28 | v1.3.0 | v1.3.0 | v1.10.0+ | v1.27+ |
+| v1.1 | v2.26, v2.27 | v1.1.0 | v1.1.0 | v1.10.0+ | v1.27+ |
+| v1.0 | v2.24, v2.25 | Not Supported| Not Supported | v1.10.0+ | v1.27+ |
+
+## Support Policy
+
+For support policy, please refer to the [KubeLB Support Policy](../support-policy/)
diff --git a/content/kubelb/v1.2/installation/_index.en.md b/content/kubelb/v1.2/installation/_index.en.md
new file mode 100644
index 000000000..16ff62eab
--- /dev/null
+++ b/content/kubelb/v1.2/installation/_index.en.md
@@ -0,0 +1,16 @@
++++
+title = "Installation"
+date = 2018-04-28T12:07:15+02:00
+weight = 15
++++
+
+This chapter offers guidance on how to install KubeLB and setup the tenant and management clusters.
+
+{{% notice tip %}}
+It is also recommended to first make yourself familiar with our [architecture documentation]({{< ref "../architecture/" >}}).
+{{% /notice %}}
+
+## Table of Content
+
+{{% children depth=5 %}}
+{{% /children %}}
diff --git a/content/kubelb/v1.2/installation/management-cluster/_index.en.md b/content/kubelb/v1.2/installation/management-cluster/_index.en.md
new file mode 100644
index 000000000..6a37e7052
--- /dev/null
+++ b/content/kubelb/v1.2/installation/management-cluster/_index.en.md
@@ -0,0 +1,314 @@
++++
+title = "Install KubeLB Manager and setup Management Cluster"
+linkTitle = "Setup Management Cluster"
+date = 2023-10-27T10:07:15+02:00
+weight = 20
++++
+
+## Requirements
+
+* Service type `LoadBalancer` implementation. This can be a cloud solution or a self-managed implementation like [MetalLB](https://metallb.universe.tf).
+* Network access to the tenant cluster nodes with node port range (default: 30000-32767). This is required for the envoy proxy to be able to connect to the tenant cluster nodes.
+
+## Installation for KubeLB manager
+
+{{% notice warning %}} In case if Gateway API needs to be enabled for the cluster. Please set `kubelb.enableGatewayAPI` to `true` in the `values.yaml`. This is required otherwise due to missing CRDs, kubelb will not be able to start. {{% /notice %}}
+
+{{< tabs name="KubeLB Manager" >}}
+{{% tab name="Enterprise Edition" %}}
+
+### Prerequisites
+
+* Create a namespace **kubelb** for the CCM to be deployed in.
+* Create **imagePullSecrets** for the chart to pull the image from the registry in kubelb namespace.
+
+At this point a minimal values.yaml should look like this:
+
+```yaml
+imagePullSecrets:
+ - name:
+```
+
+### Install the helm chart
+
+```sh
+helm pull oci://quay.io/kubermatic/helm-charts/kubelb-manager-ee --version=v1.2.0 --untardir "." --untar
+## Apply CRDs
+kubectl apply -f kubelb-manager-ee/crds/
+## Create and update values.yaml with the required values.
+helm upgrade --install kubelb-manager kubelb-manager-ee --namespace kubelb -f kubelb-manager-ee/values.yaml --create-namespace
+```
+
+### KubeLB Manager EE Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| affinity | object | `{}` | |
+| autoscaling.enabled | bool | `false` | |
+| autoscaling.maxReplicas | int | `10` | |
+| autoscaling.minReplicas | int | `1` | |
+| autoscaling.targetCPUUtilizationPercentage | int | `80` | |
+| autoscaling.targetMemoryUtilizationPercentage | int | `80` | |
+| cert-manager.enabled | bool | `false` | Enable cert-manager. |
+| external-dns.enabled | bool | `false` | Enable External-DNS. |
+| fullnameOverride | string | `""` | |
+| image.pullPolicy | string | `"IfNotPresent"` | |
+| image.repository | string | `"quay.io/kubermatic/kubelb-manager-ee"` | |
+| image.tag | string | `"v1.2.0"` | |
+| imagePullSecrets[0].name | string | `"kubermatic-quay.io"` | |
+| kkpintegration.rbac | bool | `false` | Create RBAC for KKP integration. |
+| kubelb.debug | bool | `true` | |
+| kubelb.enableGatewayAPI | bool | `false` | enableGatewayAPI specifies whether to enable the Gateway API and Gateway Controllers. By default Gateway API is disabled since without Gateway APIs installed the controller cannot start. |
+| kubelb.enableLeaderElection | bool | `true` | |
+| kubelb.enableTenantMigration | bool | `true` | |
+| kubelb.envoyProxy.affinity | object | `{}` | |
+| kubelb.envoyProxy.nodeSelector | object | `{}` | |
+| kubelb.envoyProxy.replicas | int | `2` | The number of replicas for the Envoy Proxy deployment. |
+| kubelb.envoyProxy.resources | object | `{}` | |
+| kubelb.envoyProxy.singlePodPerNode | bool | `true` | Deploy single pod per node. |
+| kubelb.envoyProxy.tolerations | list | `[]` | |
+| kubelb.envoyProxy.topology | string | `"shared"` | Topology defines the deployment topology for Envoy Proxy. Valid values are: shared and global. |
+| kubelb.envoyProxy.useDaemonset | bool | `false` | Use DaemonSet for Envoy Proxy deployment instead of Deployment. |
+| kubelb.propagateAllAnnotations | bool | `false` | Propagate all annotations from the LB resource to the LB service. |
+| kubelb.propagatedAnnotations | object | `{}` | Allowed annotations that will be propagated from the LB resource to the LB service. |
+| kubelb.skipConfigGeneration | bool | `false` | Set to true to skip the generation of the Config CR. Useful when the config CR needs to be managed manually. |
+| kubelb.tunnel.connectionManager.affinity | object | `{}` | |
+| kubelb.tunnel.connectionManager.healthCheck.enabled | bool | `true` | |
+| kubelb.tunnel.connectionManager.healthCheck.livenessInitialDelay | int | `30` | |
+| kubelb.tunnel.connectionManager.healthCheck.readinessInitialDelay | int | `10` | |
+| kubelb.tunnel.connectionManager.httpAddr | string | `":8080"` | Server addresses |
+| kubelb.tunnel.connectionManager.httpRoute.annotations | object | `{"cert-manager.io/cluster-issuer":"letsencrypt-prod","external-dns.alpha.kubernetes.io/hostname":"connection-manager.${DOMAIN}"}` | Annotations for HTTPRoute |
+| kubelb.tunnel.connectionManager.httpRoute.domain | string | `"connection-manager.${DOMAIN}"` | Domain for the HTTPRoute NOTE: Replace ${DOMAIN} with your domain name. |
+| kubelb.tunnel.connectionManager.httpRoute.enabled | bool | `false` | |
+| kubelb.tunnel.connectionManager.httpRoute.gatewayName | string | `"gateway"` | Gateway name to attach to |
+| kubelb.tunnel.connectionManager.httpRoute.gatewayNamespace | string | `""` | Gateway namespace |
+| kubelb.tunnel.connectionManager.image | object | `{"pullPolicy":"IfNotPresent","repository":"quay.io/kubermatic/kubelb-connection-manager-ee","tag":""}` | Connection manager image configuration |
+| kubelb.tunnel.connectionManager.ingress | object | `{"annotations":{"cert-manager.io/cluster-issuer":"letsencrypt-prod","external-dns.alpha.kubernetes.io/hostname":"connection-manager.${DOMAIN}","nginx.ingress.kubernetes.io/backend-protocol":"HTTP","nginx.ingress.kubernetes.io/proxy-read-timeout":"3600","nginx.ingress.kubernetes.io/proxy-send-timeout":"3600"},"className":"nginx","enabled":false,"hosts":[{"host":"connection-manager.${DOMAIN}","paths":[{"path":"/tunnel","pathType":"Prefix"},{"path":"/health","pathType":"Prefix"}]}],"tls":[{"hosts":["connection-manager.${DOMAIN}"],"secretName":"connection-manager-tls"}]}` | Ingress configuration for external HTTP/2 access |
+| kubelb.tunnel.connectionManager.nodeSelector | object | `{}` | |
+| kubelb.tunnel.connectionManager.podAnnotations | object | `{}` | Pod configuration |
+| kubelb.tunnel.connectionManager.podLabels | object | `{}` | |
+| kubelb.tunnel.connectionManager.podSecurityContext.fsGroup | int | `65534` | |
+| kubelb.tunnel.connectionManager.podSecurityContext.runAsNonRoot | bool | `true` | |
+| kubelb.tunnel.connectionManager.podSecurityContext.runAsUser | int | `65534` | |
+| kubelb.tunnel.connectionManager.replicaCount | int | `1` | Number of connection manager replicas |
+| kubelb.tunnel.connectionManager.requestTimeout | string | `"30s"` | |
+| kubelb.tunnel.connectionManager.resources | object | `{"limits":{"cpu":"500m","memory":"256Mi"},"requests":{"cpu":"250m","memory":"128Mi"}}` | Resource limits |
+| kubelb.tunnel.connectionManager.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsNonRoot":true,"runAsUser":65534}` | Security context |
+| kubelb.tunnel.connectionManager.service | object | `{"httpPort":8080,"type":"ClusterIP"}` | Service configuration |
+| kubelb.tunnel.connectionManager.tolerations | list | `[]` | |
+| kubelb.tunnel.enabled | bool | `false` | Enable tunnel functionality |
+| nameOverride | string | `""` | |
+| nodeSelector | object | `{}` | |
+| podAnnotations | object | `{}` | |
+| podLabels | object | `{}` | |
+| podSecurityContext.runAsNonRoot | bool | `true` | |
+| podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | |
+| rbac.allowLeaderElectionRole | bool | `true` | |
+| rbac.allowMetricsReaderRole | bool | `true` | |
+| rbac.allowProxyRole | bool | `true` | |
+| rbac.enabled | bool | `true` | |
+| replicaCount | int | `1` | |
+| resources.limits.cpu | string | `"500m"` | |
+| resources.limits.memory | string | `"512Mi"` | |
+| resources.requests.cpu | string | `"100m"` | |
+| resources.requests.memory | string | `"128Mi"` | |
+| securityContext.allowPrivilegeEscalation | bool | `false` | |
+| securityContext.capabilities.drop[0] | string | `"ALL"` | |
+| securityContext.runAsUser | int | `65532` | |
+| service.port | int | `8001` | |
+| service.protocol | string | `"TCP"` | |
+| service.type | string | `"ClusterIP"` | |
+| serviceAccount.annotations | object | `{}` | |
+| serviceAccount.create | bool | `true` | |
+| serviceAccount.name | string | `""` | |
+| serviceMonitor.enabled | bool | `false` | |
+| tolerations | list | `[]` | |
+
+{{% /tab %}}
+{{% tab name="Community Edition" %}}
+
+### Install the helm chart
+
+```sh
+helm pull oci://quay.io/kubermatic/helm-charts/kubelb-manager --version=v1.2.0 --untardir "." --untar
+## Apply CRDs
+kubectl apply -f kubelb-manager/crds/
+## Create and update values.yaml with the required values.
+helm upgrade --install kubelb-manager kubelb-manager --namespace kubelb -f kubelb-manager/values.yaml --create-namespace
+```
+
+### KubeLB Manager CE Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| affinity | object | `{}` | |
+| autoscaling.enabled | bool | `false` | |
+| autoscaling.maxReplicas | int | `10` | |
+| autoscaling.minReplicas | int | `1` | |
+| autoscaling.targetCPUUtilizationPercentage | int | `80` | |
+| autoscaling.targetMemoryUtilizationPercentage | int | `80` | |
+| fullnameOverride | string | `""` | |
+| image.pullPolicy | string | `"IfNotPresent"` | |
+| image.repository | string | `"quay.io/kubermatic/kubelb-manager"` | |
+| image.tag | string | `"v1.2.0"` | |
+| imagePullSecrets | list | `[]` | |
+| kkpintegration.rbac | bool | `false` | Create RBAC for KKP integration. |
+| kubelb.debug | bool | `true` | |
+| kubelb.enableGatewayAPI | bool | `false` | enableGatewayAPI specifies whether to enable the Gateway API and Gateway Controllers. By default Gateway API is disabled since without Gateway APIs installed the controller cannot start. |
+| kubelb.enableLeaderElection | bool | `true` | |
+| kubelb.enableTenantMigration | bool | `true` | |
+| kubelb.envoyProxy.affinity | object | `{}` | |
+| kubelb.envoyProxy.nodeSelector | object | `{}` | |
+| kubelb.envoyProxy.replicas | int | `2` | The number of replicas for the Envoy Proxy deployment. |
+| kubelb.envoyProxy.resources | object | `{}` | |
+| kubelb.envoyProxy.singlePodPerNode | bool | `true` | Deploy single pod per node. |
+| kubelb.envoyProxy.tolerations | list | `[]` | |
+| kubelb.envoyProxy.topology | string | `"shared"` | Topology defines the deployment topology for Envoy Proxy. Valid values are: shared and global. |
+| kubelb.envoyProxy.useDaemonset | bool | `false` | Use DaemonSet for Envoy Proxy deployment instead of Deployment. |
+| kubelb.propagateAllAnnotations | bool | `false` | Propagate all annotations from the LB resource to the LB service. |
+| kubelb.propagatedAnnotations | object | `{}` | Allowed annotations that will be propagated from the LB resource to the LB service. |
+| kubelb.skipConfigGeneration | bool | `false` | Set to true to skip the generation of the Config CR. Useful when the config CR needs to be managed manually. |
+| nameOverride | string | `""` | |
+| nodeSelector | object | `{}` | |
+| podAnnotations | object | `{}` | |
+| podLabels | object | `{}` | |
+| podSecurityContext.runAsNonRoot | bool | `true` | |
+| podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | |
+| rbac.allowLeaderElectionRole | bool | `true` | |
+| rbac.allowMetricsReaderRole | bool | `true` | |
+| rbac.allowProxyRole | bool | `true` | |
+| rbac.enabled | bool | `true` | |
+| replicaCount | int | `1` | |
+| resources.limits.cpu | string | `"500m"` | |
+| resources.limits.memory | string | `"512Mi"` | |
+| resources.requests.cpu | string | `"100m"` | |
+| resources.requests.memory | string | `"128Mi"` | |
+| securityContext.allowPrivilegeEscalation | bool | `false` | |
+| securityContext.capabilities.drop[0] | string | `"ALL"` | |
+| securityContext.runAsUser | int | `65532` | |
+| service.port | int | `8001` | |
+| service.protocol | string | `"TCP"` | |
+| service.type | string | `"ClusterIP"` | |
+| serviceAccount.annotations | object | `{}` | |
+| serviceAccount.create | bool | `true` | |
+| serviceAccount.name | string | `""` | |
+| serviceMonitor.enabled | bool | `false` | |
+| tolerations | list | `[]` | |
+
+{{% /tab %}}
+{{< /tabs >}}
+
+## Setup the management cluster
+
+{{% notice note %}}
+The examples and tools shared below are for demonstration purposes, you can use any other tools or configurations as per your requirements.
+{{% /notice %}}
+
+Management cluster acts as the dataplane and central control plane for all your load balancing configurations. It is the place where all the components required for Layer 4 and Layer 7 load balancing, AI Gateways, MCP Gateways, Agent2Agent Gateways, and API Gateways etc. are deployed. The management cluster is multi-tenant by design which makes it a perfect for managing a fleet of clusters in a scalable, robust, and secure way.
+
+KubeLB has introduced an addons chart to simplify the installation of the required components for the management cluster. The chart is already part of the KubeLB manager chart and can be installed by setting the `kubelb-addons.enabled` to `true` in the values.yaml.
+
+```yaml
+kubelb:
+ enableGatewayAPI: true
+ debug: true
+
+## Addon configuration
+kubelb-addons:
+ enabled: true
+
+ gatewayClass:
+ create: true
+
+ # Ingress Nginx
+ ingress-nginx:
+ enabled: false
+ controller:
+ service:
+ externalTrafficPolicy: Local
+
+ # Envoy Gateway
+ envoy-gateway:
+ enabled: true
+
+ # Cert Manager
+ cert-manager:
+ enabled: true
+ crds:
+ enabled: true
+ config:
+ apiVersion: controller.config.cert-manager.io/v1alpha1
+ kind: ControllerConfiguration
+ enableGatewayAPI: true
+
+ # External DNS
+ external-dns:
+ domainFilters:
+ - example.com
+ extraVolumes:
+ - name: credentials
+ secret:
+ secretName: route53-credentials
+ extraVolumeMounts:
+ - name: credentials
+ mountPath: /.aws
+ readOnly: true
+ env:
+ - name: AWS_SHARED_CREDENTIALS_FILE
+ value: /.aws/credentials
+ txtOwnerId: kubelb-example-aws
+ registry: txt
+ provider: aws
+ policy: sync
+ sources:
+ - service
+ - ingress
+ - gateway-httproute
+ - gateway-grpcroute
+ - gateway-tlsroute
+ - gateway-tcproute
+ - gateway-udproute
+
+ ## AI and Agent2Agent Gateways Integration
+ # KGateway CRDs
+ kgateway-crds:
+ enabled: true
+
+ # KGateway
+ kgateway:
+ enabled: true
+ gateway:
+ aiExtension:
+ enabled: true
+ agentgateway:
+ enabled: true
+
+```
+
+### TCP/UDP Load Balancing (Layer 4)
+
+Refer to [Layer 4 Load Balancing Setup]({{< relref "../../tutorials/loadbalancer#setup" >}}) for more details.
+
+### Application Layer Load Balancing (Layer 7)
+
+For Application layer load balancing, **kubeLB supports both Ingress and Gateway API resources**.
+
+Our default recommendation is to use Gateway API and use [Envoy Gateway](https://gateway.envoyproxy.io/) as the Gateway API implementation. Most of the upcoming and current features that KubeLB will focus on will prioritize Gateway API instead of Ingress. With Envoy Gateway being the product that we'll actively support, test, and base our features on.
+
+While KubeLB supports integration with any Ingress or Gateway API implementation, the only limitation is that we only support native Kubernetes APIs i.e. Ingress and Gateway APIs. Provider specific APIs are not supported by KubeLB and will be completely ignored. Also, we are only testing KubeLB with Envoy Gateway and Nginx Ingress, we can't guarantee the compatibility with other Gateway API or Ingress implementations.
+
+#### Ingress
+
+Refer to [Ingress Setup]({{< relref "../../tutorials/ingress#setup" >}}) for more details.
+
+#### Gateway API
+
+Refer to [Gateway API Setup]({{< relref "../../tutorials/gatewayapi#setup" >}}) for more details.
+
+### Certificate Management(Enterprise Edition)
+
+Refer to [Certificate Management Setup]({{< relref "../../tutorials/security/cert-management#setup" >}}) for more details.
+
+### DNS Management(Enterprise Edition)
+
+Refer to [DNS Management Setup]({{< relref "../../tutorials/security/dns#setup" >}}) for more details.
diff --git a/content/kubelb/v1.2/installation/tenant-cluster/_index.en.md b/content/kubelb/v1.2/installation/tenant-cluster/_index.en.md
new file mode 100644
index 000000000..8b9971cb8
--- /dev/null
+++ b/content/kubelb/v1.2/installation/tenant-cluster/_index.en.md
@@ -0,0 +1,296 @@
++++
+title = "Install KubeLB CCM and setup Tenant Cluster"
+linkTitle = "Setup Tenant Cluster"
+date = 2023-10-27T10:07:15+02:00
+weight = 20
++++
+
+## Requirements
+
+* KubeLB management cluster kubernetes API access.
+* Registered as a tenant in the KubeLB management cluster.
+
+## Pre-requisites
+
+* Create a namespace **kubelb** for the CCM to be deployed in.
+* The agent expects a **Secret** with a kubeconf file named **`kubelb`** to access the management/load balancing cluster.
+ * First register the tenant in LB cluster by following [tenant registration]({{< relref "../../tutorials/tenants">}}) guidelines.
+ * Fetch the generated kubeconfig and create a secret from the management cluster by using these command:
+
+ ```sh
+ # Replace with the tenant cluster kubeconfig path
+ TENANT_KUBECONFIG=~/.kube/
+ # Replace with the tenant name
+ TENANT_NAME=tenant-shroud
+ KUBELB_KUBECONFIG=$(kubectl get secret kubelb-ccm-kubeconfig -n $TENANT_NAME --template={{.data.kubelb}})
+ # At this point we have the kubeconfig in base64 encoded format.
+ # Switch the context to the Tenant cluster
+ export KUBECONFIG=$TENANT_KUBECONFIG
+ kubectl --namespace kubelb create secret generic kubelb-cluster --from-literal=kubelb="$(echo $KUBELB_KUBECONFIG | base64 -d)"
+ ```
+
+* The name of secret can be overridden using `.Values.kubelb.clusterSecretName`, if required. If not the secret needs to be named `kubelb` and look like:
+
+ ```
+ kubectl get secrets -o yaml kubelb-cluster
+ ```
+
+ ```
+ apiVersion: v1
+ data:
+ kubelb: xxx-base64-encoded-xxx
+ kind: Secret
+ metadata:
+ name: kubelb-cluster
+ namespace: kubelb
+ type: Opaque
+ ```
+
+* Update the `tenantName` in the `values.yaml` to a unique identifier for the tenant. This is used to identify the tenant in the manager cluster. Tenants are registered in the management cluster by the Platform Provider and the name is prefixed with `tenant-`. So for example, a tenant named `my-tenant` will be registered as `tenant-my-tenant`. **NOTE: We have an automation in place and both tenant name without and with `tenant-` prefix are supported.**
+
+At this point a minimal `values.yaml` should look like this:
+
+```yaml
+kubelb:
+ clusterSecretName: kubelb-cluster
+ tenantName:
+```
+
+{{% notice info %}}
+
+**Important configurations for private clusters!**
+If your cluster only uses internal IPs for nodes (check the following example output) you would need to change the value `kubelb.nodeAddressType` to `InternalIP`:
+
+```bash
+kubectl get nodes -o wide
+```
+
+```
+NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
+node-x Ready control-plane 208d v1.29.9 10.66.99.222 Ubuntu 5.15.0-121-generic containerd://1.6.33
+```
+
+Adjust `values.yaml`:
+
+```yaml
+kubelb:
+ # -- Address type to use for routing traffic to node ports. Values are ExternalIP, InternalIP.
+ nodeAddressType: InternalIP
+```
+
+{{% /notice %}}
+
+## Installation for KubeLB CCM
+
+{{% notice warning %}} In case if Gateway API needs to be enabled for the cluster. Please set the following fields in the `values.yaml`. This is required otherwise due to missing CRDs, kubelb will not be able to start.
+
+```yaml
+kubelb:
+ enableGatewayAPI: true
+ installGatewayAPICRDs: true
+```
+
+{{% /notice %}}
+
+{{< tabs name="KubeLB CCM" >}}
+{{% tab name="Enterprise Edition" %}}
+
+### Prerequisites
+
+* Create a namespace **kubelb** for the CCM to be deployed in.
+* Create **imagePullSecrets** for the chart to pull the image from the registry in kubelb namespace.
+
+At this point a minimal values.yaml should look like this:
+
+```yaml
+imagePullSecrets:
+ - name:
+kubelb:
+ clusterSecretName: kubelb-cluster
+ tenantName:
+```
+
+### Install the helm chart
+
+```sh
+helm pull oci://quay.io/kubermatic/helm-charts/kubelb-ccm-ee --version=v1.2.0 --untardir "." --untar
+## Apply CRDs
+kubectl apply -f kubelb-ccm-ee/crds/
+## Create and update values.yaml with the required values.
+helm upgrade --install kubelb-ccm kubelb-ccm-ee --namespace kubelb -f kubelb-ccm-ee/values.yaml --create-namespace
+```
+
+### KubeLB CCM EE Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| affinity | object | `{}` | |
+| autoscaling.enabled | bool | `false` | |
+| autoscaling.maxReplicas | int | `10` | |
+| autoscaling.minReplicas | int | `1` | |
+| autoscaling.targetCPUUtilizationPercentage | int | `80` | |
+| autoscaling.targetMemoryUtilizationPercentage | int | `80` | |
+| extraVolumeMounts | list | `[]` | |
+| extraVolumes | list | `[]` | |
+| fullnameOverride | string | `""` | |
+| image.pullPolicy | string | `"IfNotPresent"` | |
+| image.repository | string | `"quay.io/kubermatic/kubelb-ccm-ee"` | |
+| image.tag | string | `"v1.2.0"` | |
+| imagePullSecrets[0].name | string | `"kubermatic-quay.io"` | |
+| kubelb.clusterSecretName | string | `"kubelb-cluster"` | Name of the secret that contains kubeconfig for the loadbalancer cluster |
+| kubelb.disableGRPCRouteController | bool | `false` | disableGRPCRouteController specifies whether to disable the GRPCRoute Controller. |
+| kubelb.disableGatewayController | bool | `false` | disableGatewayController specifies whether to disable the Gateway Controller. |
+| kubelb.disableHTTPRouteController | bool | `false` | disableHTTPRouteController specifies whether to disable the HTTPRoute Controller. |
+| kubelb.disableIngressController | bool | `false` | disableIngressController specifies whether to disable the Ingress Controller. |
+| kubelb.disableTCPRouteController | bool | `false` | disableTCPRouteController specifies whether to disable the TCPRoute Controller. |
+| kubelb.disableTLSRouteController | bool | `false` | disableTLSRouteController specifies whether to disable the TLSRoute Controller. |
+| kubelb.disableUDPRouteController | bool | `false` | disableUDPRouteController specifies whether to disable the UDPRoute Controller. |
+| kubelb.enableGatewayAPI | bool | `false` | enableGatewayAPI specifies whether to enable the Gateway API and Gateway Controllers. By default Gateway API is disabled since without Gateway APIs installed the controller cannot start. |
+| kubelb.enableLeaderElection | bool | `true` | Enable the leader election. |
+| kubelb.enableSecretSynchronizer | bool | `false` | Enable to automatically convert Secrets labelled with `kubelb.k8c.io/managed-by: kubelb` to Sync Secrets. This is used to sync secrets from tenants to the LB cluster in a controlled and secure way. |
+| kubelb.gatewayAPICRDsChannel | string | `"experimental"` | gatewayAPICRDsChannel specifies the channel for the Gateway API CRDs. Options are `standard` and `experimental`. |
+| kubelb.installGatewayAPICRDs | bool | `false` | installGatewayAPICRDs Installs and manages the Gateway API CRDs using gateway crd controller. |
+| kubelb.nodeAddressType | string | `"ExternalIP"` | Address type to use for routing traffic to node ports. Values are ExternalIP, InternalIP. |
+| kubelb.tenantName | string | `nil` | Name of the tenant, must be unique against a load balancer cluster. |
+| kubelb.useGatewayClass | bool | `true` | useGatewayClass specifies whether to target resources with `kubelb` gateway class or all resources. |
+| kubelb.useIngressClass | bool | `true` | useIngressClass specifies whether to target resources with `kubelb` ingress class or all resources. |
+| kubelb.useLoadBalancerClass | bool | `false` | useLoadBalancerClass specifies whether to target services of type LoadBalancer with `kubelb` load balancer class or all services of type LoadBalancer. |
+| nameOverride | string | `""` | |
+| nodeSelector | object | `{}` | |
+| podAnnotations | object | `{}` | |
+| podLabels | object | `{}` | |
+| podSecurityContext.runAsNonRoot | bool | `true` | |
+| podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | |
+| rbac.allowLeaderElectionRole | bool | `true` | |
+| rbac.allowMetricsReaderRole | bool | `true` | |
+| rbac.allowProxyRole | bool | `true` | |
+| rbac.enabled | bool | `true` | |
+| replicaCount | int | `1` | |
+| resources.limits.cpu | string | `"500m"` | |
+| resources.limits.memory | string | `"512Mi"` | |
+| resources.requests.cpu | string | `"100m"` | |
+| resources.requests.memory | string | `"128Mi"` | |
+| securityContext.allowPrivilegeEscalation | bool | `false` | |
+| securityContext.capabilities.drop[0] | string | `"ALL"` | |
+| securityContext.runAsUser | int | `65532` | |
+| service.port | int | `8443` | |
+| service.protocol | string | `"TCP"` | |
+| service.type | string | `"ClusterIP"` | |
+| serviceAccount.annotations | object | `{}` | |
+| serviceAccount.create | bool | `true` | |
+| serviceAccount.name | string | `""` | |
+| serviceMonitor.enabled | bool | `false` | |
+| tolerations | list | `[]` | |
+
+{{% /tab %}}
+{{% tab name="Community Edition" %}}
+
+### Install the helm chart
+
+```sh
+helm pull oci://quay.io/kubermatic/helm-charts/kubelb-ccm --version=v1.2.0 --untardir "." --untar
+## Apply CRDs
+kubectl apply -f kubelb-ccm/crds/
+## Create and update values.yaml with the required values.
+helm upgrade --install kubelb-ccm kubelb-ccm --namespace kubelb -f kubelb-ccm/values.yaml --create-namespace
+```
+
+### KubeLB CCM Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| affinity | object | `{}` | |
+| autoscaling.enabled | bool | `false` | |
+| autoscaling.maxReplicas | int | `10` | |
+| autoscaling.minReplicas | int | `1` | |
+| autoscaling.targetCPUUtilizationPercentage | int | `80` | |
+| autoscaling.targetMemoryUtilizationPercentage | int | `80` | |
+| extraVolumeMounts | list | `[]` | |
+| extraVolumes | list | `[]` | |
+| fullnameOverride | string | `""` | |
+| image.pullPolicy | string | `"IfNotPresent"` | |
+| image.repository | string | `"quay.io/kubermatic/kubelb-ccm"` | |
+| image.tag | string | `"v1.2.0"` | |
+| imagePullSecrets | list | `[]` | |
+| kubelb.clusterSecretName | string | `"kubelb-cluster"` | Name of the secret that contains kubeconfig for the loadbalancer cluster |
+| kubelb.disableGRPCRouteController | bool | `false` | disableGRPCRouteController specifies whether to disable the GRPCRoute Controller. |
+| kubelb.disableGatewayController | bool | `false` | disableGatewayController specifies whether to disable the Gateway Controller. |
+| kubelb.disableHTTPRouteController | bool | `false` | disableHTTPRouteController specifies whether to disable the HTTPRoute Controller. |
+| kubelb.disableIngressController | bool | `false` | disableIngressController specifies whether to disable the Ingress Controller. |
+| kubelb.enableGatewayAPI | bool | `false` | enableGatewayAPI specifies whether to enable the Gateway API and Gateway Controllers. By default Gateway API is disabled since without Gateway APIs installed the controller cannot start. |
+| kubelb.enableLeaderElection | bool | `true` | Enable the leader election. |
+| kubelb.enableSecretSynchronizer | bool | `false` | Enable to automatically convert Secrets labelled with `kubelb.k8c.io/managed-by: kubelb` to Sync Secrets. This is used to sync secrets from tenants to the LB cluster in a controlled and secure way. |
+| kubelb.gatewayAPICRDsChannel | string | `"standard"` | gatewayAPICRDsChannel specifies the channel for the Gateway API CRDs. Options are `standard` and `experimental`. |
+| kubelb.installGatewayAPICRDs | bool | `false` | installGatewayAPICRDs Installs and manages the Gateway API CRDs using gateway crd controller. |
+| kubelb.nodeAddressType | string | `"ExternalIP"` | Address type to use for routing traffic to node ports. Values are ExternalIP, InternalIP. |
+| kubelb.tenantName | string | `nil` | Name of the tenant, must be unique against a load balancer cluster. |
+| kubelb.useGatewayClass | bool | `true` | useGatewayClass specifies whether to target resources with `kubelb` gateway class or all resources. |
+| kubelb.useIngressClass | bool | `true` | useIngressClass specifies whether to target resources with `kubelb` ingress class or all resources. |
+| kubelb.useLoadBalancerClass | bool | `false` | useLoadBalancerClass specifies whether to target services of type LoadBalancer with `kubelb` load balancer class or all services of type LoadBalancer. |
+| nameOverride | string | `""` | |
+| nodeSelector | object | `{}` | |
+| podAnnotations | object | `{}` | |
+| podLabels | object | `{}` | |
+| podSecurityContext.runAsNonRoot | bool | `true` | |
+| podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | |
+| rbac.allowLeaderElectionRole | bool | `true` | |
+| rbac.allowMetricsReaderRole | bool | `true` | |
+| rbac.allowProxyRole | bool | `true` | |
+| rbac.enabled | bool | `true` | |
+| replicaCount | int | `1` | |
+| resources.limits.cpu | string | `"500m"` | |
+| resources.limits.memory | string | `"512Mi"` | |
+| resources.requests.cpu | string | `"100m"` | |
+| resources.requests.memory | string | `"128Mi"` | |
+| securityContext.allowPrivilegeEscalation | bool | `false` | |
+| securityContext.capabilities.drop[0] | string | `"ALL"` | |
+| securityContext.runAsUser | int | `65532` | |
+| service.port | int | `8443` | |
+| service.protocol | string | `"TCP"` | |
+| service.type | string | `"ClusterIP"` | |
+| serviceAccount.annotations | object | `{}` | |
+| serviceAccount.create | bool | `true` | |
+| serviceAccount.name | string | `""` | |
+| serviceMonitor.enabled | bool | `false` | |
+| tolerations | list | `[]` | |
+
+{{% /tab %}}
+{{< /tabs >}}
+
+## Setup the tenant cluster
+
+### Install Gateway API CRDs
+
+Starting from KubeLB v1.2.0, the Gateway API CRDs can be installed using the `installGatewayAPICRDs` flag.
+
+{{< tabs name="Gateway APIs" >}}
+{{% tab name="Enterprise Edition" %}}
+
+```yaml
+imagePullSecrets:
+ - name:
+kubelb:
+ clusterSecretName: kubelb-cluster
+ tenantName:
+ # This will install the experimental channel of the Gateway API CRDs
+ installGatewayAPICRDs: true
+ enableGatewayAPI: true
+```
+
+For more details: [Experimental Install](https://gateway-api.sigs.k8s.io/guides/#install-experimental-channel)
+{{% /tab %}}
+{{% tab name="Community Edition" %}}
+
+```yaml
+kubelb:
+ clusterSecretName: kubelb-cluster
+ tenantName:
+ # This will install the standard channel of the Gateway API CRDs
+ installGatewayAPICRDs: true
+ enableGatewayAPI: true
+```
+
+For more details: [Standard Install](https://gateway-api.sigs.k8s.io/guides/#install-standard-channel)
+
+{{% /tab %}}
+{{< /tabs >}}
diff --git a/content/kubelb/v1.2/references/_index.en.md b/content/kubelb/v1.2/references/_index.en.md
new file mode 100644
index 000000000..fa3b4e4f7
--- /dev/null
+++ b/content/kubelb/v1.2/references/_index.en.md
@@ -0,0 +1,12 @@
++++
+title = "References"
+date = 2024-03-06T12:00:00+02:00
+weight = 50
++++
+
+This section contains a reference of the Kubermatic KubeLB Custom Resource Definitions.
+
+## Table of Content
+
+{{% children depth=5 %}}
+{{% /children %}}
diff --git a/content/kubelb/v1.2/references/ce/_index.en.md b/content/kubelb/v1.2/references/ce/_index.en.md
new file mode 100644
index 000000000..3c2159c27
--- /dev/null
+++ b/content/kubelb/v1.2/references/ce/_index.en.md
@@ -0,0 +1,748 @@
++++
+title = "KubeLB Community Edition CRD References"
+linkTitle = "Community Edition"
+date = 2024-03-06T12:00:00+02:00
+weight = 60
++++
+
+**Source: [kubelb.k8c.io/v1alpha1](https://github.com/kubermatic/kubelb/tree/main/api/ce/kubelb.k8c.io/v1alpha1)**
+
+## Packages
+
+- [kubelb.k8c.io/v1alpha1](#kubelbk8ciov1alpha1)
+
+## kubelb.k8c.io/v1alpha1
+
+Package v1alpha1 contains API Schema definitions for the kubelb.k8c.io v1alpha1 API group
+
+### Resource Types
+
+- [Addresses](#addresses)
+- [AddressesList](#addresseslist)
+- [Config](#config)
+- [ConfigList](#configlist)
+- [LoadBalancer](#loadbalancer)
+- [LoadBalancerList](#loadbalancerlist)
+- [Route](#route)
+- [RouteList](#routelist)
+- [SyncSecret](#syncsecret)
+- [SyncSecretList](#syncsecretlist)
+- [Tenant](#tenant)
+- [TenantList](#tenantlist)
+- [TenantState](#tenantstate)
+- [TenantStateList](#tenantstatelist)
+
+#### Addresses
+
+Addresses is the Schema for the addresses API
+
+_Appears in:_
+
+- [AddressesList](#addresseslist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `Addresses` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[AddressesSpec](#addressesspec)_ | | | |
+| `status` _[AddressesStatus](#addressesstatus)_ | | | |
+
+#### AddressesList
+
+AddressesList contains a list of Addresses
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `AddressesList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[Addresses](#addresses) array_ | | | |
+
+#### AddressesSpec
+
+AddressesSpec defines the desired state of Addresses
+
+_Appears in:_
+
+- [Addresses](#addresses)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `addresses` _[EndpointAddress](#endpointaddress) array_ | Addresses contains a list of addresses. | | MinItems: 1
|
+
+#### AddressesStatus
+
+AddressesStatus defines the observed state of Addresses
+
+_Appears in:_
+
+- [Addresses](#addresses)
+
+#### AnnotatedResource
+
+_Underlying type:_ _string_
+
+_Validation:_
+
+- Enum: [all service ingress gateway httproute grpcroute tcproute udproute tlsroute]
+
+_Appears in:_
+
+- [AnnotationSettings](#annotationsettings)
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
+
+| Field | Description |
+| --- | --- |
+| `all` | |
+| `service` | |
+| `ingress` | |
+| `gateway` | |
+| `httproute` | |
+| `grpcroute` | |
+| `tcproute` | |
+| `udproute` | |
+| `tlsroute` | |
+
+#### AnnotationSettings
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | |
+| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+
+#### Annotations
+
+_Underlying type:_ _object_
+
+_Appears in:_
+
+- [AnnotationSettings](#annotationsettings)
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
+
+#### CertificatesSettings
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `defaultClusterIssuer` _string_ | DefaultClusterIssuer is the Cluster Issuer to use for the certificates by default. This is only used for load balancer hostname. | | |
+
+#### Config
+
+Config is the object that represents the Config for the KubeLB management controller.
+
+_Appears in:_
+
+- [ConfigList](#configlist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `Config` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[ConfigSpec](#configspec)_ | | | |
+
+#### ConfigDNSSettings
+
+ConfigDNSSettings defines the global settings for DNS management and automation.
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `wildcardDomain` _string_ | WildcardDomain is the domain that will be used as the base domain to create wildcard DNS records for DNS resources.
This is only used for determining the hostname for LoadBalancer resources at LoadBalancer.Spec.Hostname. | | |
+| `allowExplicitHostnames` _boolean_ | AllowExplicitHostnames is a flag that can be used to allow explicit hostnames to be used for DNS resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | |
+| `useDNSAnnotations` _boolean_ | UseDNSAnnotations is a flag that can be used to add DNS annotations to DNS resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | |
+| `useCertificateAnnotations` _boolean_ | UseCertificateAnnotations is a flag that can be used to add Certificate annotations to Certificate resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | |
+
+#### ConfigList
+
+ConfigList contains a list of Config
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `ConfigList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[Config](#config) array_ | | | |
+
+#### ConfigSpec
+
+ConfigSpec defines the desired state of the Config
+
+_Appears in:_
+
+- [Config](#config)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | |
+| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+| `envoyProxy` _[EnvoyProxy](#envoyproxy)_ | EnvoyProxy defines the desired state of the Envoy Proxy | | |
+| `loadBalancer` _[LoadBalancerSettings](#loadbalancersettings)_ | | | |
+| `ingress` _[IngressSettings](#ingresssettings)_ | | | |
+| `gatewayAPI` _[GatewayAPISettings](#gatewayapisettings)_ | | | |
+| `dns` _[ConfigDNSSettings](#configdnssettings)_ | | | |
+| `certificates` _[CertificatesSettings](#certificatessettings)_ | | | |
+
+#### DNSSettings
+
+DNSSettings defines the settings for DNS management and automation.
+
+_Appears in:_
+
+- [TenantSpec](#tenantspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `wildcardDomain` _string_ | WildcardDomain is the domain that will be used as the base domain to create wildcard DNS records for DNS resources.
This is only used for determining the hostname for LoadBalancer resources at LoadBalancer.Spec.Hostname. | | |
+| `allowExplicitHostnames` _boolean_ | AllowExplicitHostnames is a flag that can be used to allow explicit hostnames to be used for DNS resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | |
+| `useDNSAnnotations` _boolean_ | UseDNSAnnotations is a flag that can be used to add DNS annotations to DNS resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | |
+| `useCertificateAnnotations` _boolean_ | UseCertificateAnnotations is a flag that can be used to add Certificate annotations to Certificate resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | |
+
+#### EndpointAddress
+
+EndpointAddress is a tuple that describes single IP address.
+
+_Appears in:_
+
+- [AddressesSpec](#addressesspec)
+- [LoadBalancerEndpoints](#loadbalancerendpoints)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `ip` _string_ | The IP of this endpoint.
May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
or link-local multicast ((224.0.0.0/24). | | MinLength: 7
|
+| `hostname` _string_ | The Hostname of this endpoint | | |
+
+#### EndpointPort
+
+EndpointPort is a tuple that describes a single port.
+
+_Appears in:_
+
+- [LoadBalancerEndpoints](#loadbalancerendpoints)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `name` _string_ | The name of this port. This must match the 'name' field in the
corresponding ServicePort.
Must be a DNS_LABEL.
Optional only if one port is defined. | | |
+| `port` _integer_ | The port number of the endpoint. | | |
+| `protocol` _[Protocol](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#protocol-v1-core)_ | The IP protocol for this port. Defaults to "TCP". | | Enum: [TCP UDP]
|
+
+#### EnvoyProxy
+
+EnvoyProxy defines the desired state of the EnvoyProxy
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `topology` _[EnvoyProxyTopology](#envoyproxytopology)_ | Topology defines the deployment topology for Envoy Proxy. Valid values are: shared and global.
DEPRECATION NOTICE: The value "dedicated" is deprecated and will be removed in a future release. Dedicated topology will now default to shared topology. | shared | Enum: [shared dedicated global]
|
+| `useDaemonset` _boolean_ | UseDaemonset defines whether Envoy Proxy will run as daemonset. By default, Envoy Proxy will run as deployment.
If set to true, Replicas will be ignored. | | |
+| `replicas` _integer_ | Replicas defines the number of replicas for Envoy Proxy. This field is ignored if UseDaemonset is set to true. | 3 | Minimum: 1
|
+| `singlePodPerNode` _boolean_ | SinglePodPerNode defines whether Envoy Proxy pods will be spread across nodes. This ensures that multiple replicas are not running on the same node. | | |
+| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector is used to select nodes to run Envoy Proxy. If specified, the node must have all the indicated labels. | | |
+| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#toleration-v1-core) array_ | Tolerations is used to schedule Envoy Proxy pods on nodes with matching taints. | | |
+| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core)_ | Resources defines the resource requirements for Envoy Proxy. | | |
+| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#affinity-v1-core)_ | Affinity is used to schedule Envoy Proxy pods on nodes with matching affinity. | | |
+
+#### EnvoyProxyTopology
+
+_Underlying type:_ _string_
+
+_Appears in:_
+
+- [EnvoyProxy](#envoyproxy)
+
+| Field | Description |
+| --- | --- |
+| `shared` | |
+| `dedicated` | |
+| `global` | |
+
+#### GatewayAPISettings
+
+GatewayAPISettings defines the settings for the gateway API.
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `class` _string_ | Class is the class of the gateway API to use. This can be used to specify a specific gateway API implementation.
This has higher precedence than the value specified in the Config. | | |
+| `defaultGateway` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectreference-v1-core)_ | DefaultGateway is the default gateway reference to use for the tenant. This is only used for load balancer hostname. | | |
+| `disable` _boolean_ | Disable is a flag that can be used to disable Gateway API for a tenant. | | |
+
+#### HostnameStatus
+
+_Appears in:_
+
+- [LoadBalancerStatus](#loadbalancerstatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `hostname` _string_ | Hostname contains the hostname of the load-balancer. | | |
+| `tlsEnabled` _boolean_ | TLSEnabled is true if certificate is created for the hostname. | | |
+| `dnsRecordCreated` _boolean_ | DNSRecordCreated is true if DNS record is created for the hostname. | | |
+
+#### IngressSettings
+
+IngressSettings defines the settings for the ingress.
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `class` _string_ | Class is the class of the ingress to use.
This has higher precedence than the value specified in the Config. | | |
+| `disable` _boolean_ | Disable is a flag that can be used to disable Ingress for a tenant. | | |
+
+#### KubernetesSource
+
+_Appears in:_
+
+- [RouteSource](#routesource)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `resource` _[Unstructured](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#unstructured-unstructured-v1)_ | | | EmbeddedResource: \{\}
|
+| `services` _[UpstreamService](#upstreamservice) array_ | Services contains the list of services that are used as the source for the Route. | | |
+
+#### LoadBalancer
+
+LoadBalancer is the Schema for the loadbalancers API
+
+_Appears in:_
+
+- [LoadBalancerList](#loadbalancerlist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `LoadBalancer` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[LoadBalancerSpec](#loadbalancerspec)_ | | | |
+| `status` _[LoadBalancerStatus](#loadbalancerstatus)_ | | | |
+
+#### LoadBalancerEndpoints
+
+LoadBalancerEndpoints is a group of addresses with a common set of ports. The
+expanded set of endpoints is the Cartesian product of Addresses x Ports.
+For example, given:
+
+ {
+ Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+ Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+ }
+
+The resulting set of endpoints can be viewed as:
+
+ a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
+ b: [ 10.10.1.1:309, 10.10.2.2:309 ]
+
+_Appears in:_
+
+- [LoadBalancerSpec](#loadbalancerspec)
+- [RouteSpec](#routespec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `name` _string_ | Name is the name of the endpoints. | | |
+| `addresses` _[EndpointAddress](#endpointaddress) array_ | IP addresses which offer the related ports that are marked as ready. These endpoints
should be considered safe for load balancers and clients to utilize. | | MinItems: 1
|
+| `addressesReference` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectreference-v1-core)_ | AddressesReference is a reference to the Addresses object that contains the IP addresses.
If this field is set, the Addresses field will be ignored. | | |
+| `ports` _[EndpointPort](#endpointport) array_ | Port numbers available on the related IP addresses.
This field is ignored for routes that are using kubernetes resources as the source. | | MinItems: 1
|
+
+#### LoadBalancerList
+
+LoadBalancerList contains a list of LoadBalancer
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `LoadBalancerList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[LoadBalancer](#loadbalancer) array_ | | | |
+
+#### LoadBalancerPort
+
+LoadBalancerPort contains information on service's port.
+
+_Appears in:_
+
+- [LoadBalancerSpec](#loadbalancerspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `name` _string_ | The name of this port within the service. This must be a DNS_LABEL.
All ports within a Spec must have unique names. When considering
the endpoints for a Service, this must match the 'name' field in the
EndpointPort.
Optional if only one ServicePort is defined on this service. | | |
+| `protocol` _[Protocol](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#protocol-v1-core)_ | The IP protocol for this port. Defaults to "TCP". | | Enum: [TCP UDP]
|
+| `port` _integer_ | The port that will be exposed by the LoadBalancer. | | |
+
+#### LoadBalancerSettings
+
+LoadBalancerSettings defines the settings for the load balancers.
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `class` _string_ | Class is the class of the load balancer to use.
This has higher precedence than the value specified in the Config. | | |
+| `disable` _boolean_ | Disable is a flag that can be used to disable L4 load balancing for a tenant. | | |
+
+#### LoadBalancerSpec
+
+LoadBalancerSpec defines the desired state of LoadBalancer
+
+_Appears in:_
+
+- [LoadBalancer](#loadbalancer)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `endpoints` _[LoadBalancerEndpoints](#loadbalancerendpoints) array_ | Sets of addresses and ports that comprise an exposed user service on a cluster. | | MinItems: 1
|
+| `ports` _[LoadBalancerPort](#loadbalancerport) array_ | The list of ports that are exposed by the load balancer service.
only needed for layer 4 | | |
+| `hostname` _string_ | Hostname is the domain name at which the load balancer service will be accessible.
When hostname is set, KubeLB will create a route(ingress or httproute) for the service, and expose it with TLS on the given hostname. Currently, only HTTP protocol is supported | | |
+| `type` _[ServiceType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicetype-v1-core)_ | type determines how the Service is exposed. Defaults to ClusterIP. Valid
options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
"ExternalName" maps to the specified externalName.
"ClusterIP" allocates a cluster-internal IP address for load-balancing to
endpoints. Endpoints are determined by the selector or if that is not
specified, by manual construction of an Endpoints object. If clusterIP is
"None", no virtual IP is allocated and the endpoints are published as a
set of endpoints rather than a stable IP.
"NodePort" builds on ClusterIP and allocates a port on every node which
routes to the clusterIP.
"LoadBalancer" builds on NodePort and creates an
external load-balancer (if supported in the current cloud) which routes
to the clusterIP.
More info: | ClusterIP | |
+
+#### LoadBalancerState
+
+_Appears in:_
+
+- [TenantStateStatus](#tenantstatestatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `disable` _boolean_ | | | |
+
+#### LoadBalancerStatus
+
+LoadBalancerStatus defines the observed state of LoadBalancer
+
+_Appears in:_
+
+- [LoadBalancer](#loadbalancer)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `loadBalancer` _[LoadBalancerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#loadbalancerstatus-v1-core)_ | LoadBalancer contains the current status of the load-balancer,
if one is present. | | |
+| `service` _[ServiceStatus](#servicestatus)_ | Service contains the current status of the LB service. | | |
+| `hostname` _[HostnameStatus](#hostnamestatus)_ | Hostname contains the status for hostname resources. | | |
+
+#### ResourceState
+
+_Appears in:_
+
+- [RouteResourcesStatus](#routeresourcesstatus)
+- [RouteServiceStatus](#routeservicestatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | APIVersion is the API version of the resource. | | |
+| `name` _string_ | Name is the name of the resource. | | |
+| `namespace` _string_ | Namespace is the namespace of the resource. | | |
+| `generatedName` _string_ | GeneratedName is the generated name of the resource. | | |
+| `status` _[RawExtension](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#rawextension-runtime-pkg)_ | Status is the actual status of the resource. | | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | | | |
+
+#### Route
+
+Route is the object that represents a route in the cluster.
+
+_Appears in:_
+
+- [RouteList](#routelist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `Route` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[RouteSpec](#routespec)_ | | | |
+| `status` _[RouteStatus](#routestatus)_ | | | |
+
+#### RouteList
+
+RouteList contains a list of Routes
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `RouteList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[Route](#route) array_ | | | |
+
+#### RouteResourcesStatus
+
+_Appears in:_
+
+- [RouteStatus](#routestatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `source` _string_ | | | |
+| `services` _object (keys:string, values:[RouteServiceStatus](#routeservicestatus))_ | | | |
+| `route` _[ResourceState](#resourcestate)_ | | | |
+
+#### RouteServiceStatus
+
+_Appears in:_
+
+- [RouteResourcesStatus](#routeresourcesstatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | APIVersion is the API version of the resource. | | |
+| `name` _string_ | Name is the name of the resource. | | |
+| `namespace` _string_ | Namespace is the namespace of the resource. | | |
+| `generatedName` _string_ | GeneratedName is the generated name of the resource. | | |
+| `status` _[RawExtension](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#rawextension-runtime-pkg)_ | Status is the actual status of the resource. | | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | | | |
+| `ports` _[ServicePort](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#serviceport-v1-core) array_ | | | |
+
+#### RouteSource
+
+_Appears in:_
+
+- [RouteSpec](#routespec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `kubernetes` _[KubernetesSource](#kubernetessource)_ | Kubernetes contains the information about the Kubernetes source.
This field is automatically populated by the KubeLB CCM and in most cases, users should not set this field manually. | | |
+
+#### RouteSpec
+
+RouteSpec defines the desired state of the Route.
+
+_Appears in:_
+
+- [Route](#route)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `endpoints` _[LoadBalancerEndpoints](#loadbalancerendpoints) array_ | Sets of addresses and ports that comprise an exposed user service on a cluster. | | MinItems: 1
|
+| `source` _[RouteSource](#routesource)_ | Source contains the information about the source of the route. This is used when the route is created from external sources. | | |
+
+#### RouteStatus
+
+RouteStatus defines the observed state of the Route.
+
+_Appears in:_
+
+- [Route](#route)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `resources` _[RouteResourcesStatus](#routeresourcesstatus)_ | Resources contains the list of resources that are created/processed as a result of the Route. | | |
+
+#### ServicePort
+
+ServicePort contains information on service's port.
+
+_Appears in:_
+
+- [ServiceStatus](#servicestatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `name` _string_ | The name of this port within the service. This must be a DNS_LABEL.
All ports within a ServiceSpec must have unique names. When considering
the endpoints for a Service, this must match the 'name' field in the
EndpointPort.
Optional if only one ServicePort is defined on this service. | | |
+| `protocol` _[Protocol](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#protocol-v1-core)_ | The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
Default is TCP. | | |
+| `appProtocol` _string_ | The application protocol for this port.
This is used as a hint for implementations to offer richer behavior for protocols that they understand.
This field follows standard Kubernetes label syntax.
Valid values are either:
_Un-prefixed protocol names - reserved for IANA standard service names (as per
RFC-6335 and ).
_ Kubernetes-defined prefixed names:
_'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in
_ 'kubernetes.io/ws' - WebSocket over cleartext as described in
_'kubernetes.io/wss' - WebSocket over TLS as described in
_ Other protocols should use implementation-defined prefixed names such as
mycompany.com/my-custom-protocol. | | |
+| `port` _integer_ | The port that will be exposed by this service. | | |
+| `targetPort` _[IntOrString](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#intorstring-intstr-util)_ | Number or name of the port to access on the pods targeted by the service.
Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
If this is a string, it will be looked up as a named port in the
target Pod's container ports. If this is not specified, the value
of the 'port' field is used (an identity map).
This field is ignored for services with clusterIP=None, and should be
omitted or set equal to the 'port' field.
More info: | | |
+| `nodePort` _integer_ | The port on each node on which this service is exposed when type is
NodePort or LoadBalancer. Usually assigned by the system. If a value is
specified, in-range, and not in use it will be used, otherwise the
operation will fail. If not specified, a port will be allocated if this
Service requires one. If this field is specified when creating a
Service which does not need it, creation will fail. This field will be
wiped when updating a Service to no longer need it (e.g. changing type
from NodePort to ClusterIP).
More info: | | |
+| `upstreamTargetPort` _integer_ | | | |
+
+#### ServiceStatus
+
+_Appears in:_
+
+- [LoadBalancerStatus](#loadbalancerstatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `ports` _[ServicePort](#serviceport) array_ | | | |
+
+#### SyncSecret
+
+SyncSecret is a wrapper over Kubernetes Secret object. This is used to sync secrets from tenants to the LB cluster in a controlled and secure way.
+
+_Appears in:_
+
+- [SyncSecretList](#syncsecretlist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `SyncSecret` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `immutable` _boolean_ | | | |
+| `data` _object (keys:string, values:integer array)_ | | | |
+| `stringData` _object (keys:string, values:string)_ | | | |
+| `type` _[SecretType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#secrettype-v1-core)_ | | | |
+
+#### SyncSecretList
+
+SyncSecretList contains a list of SyncSecrets
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `SyncSecretList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[SyncSecret](#syncsecret) array_ | | | |
+
+#### Tenant
+
+Tenant is the Schema for the tenants API
+
+_Appears in:_
+
+- [TenantList](#tenantlist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `Tenant` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[TenantSpec](#tenantspec)_ | | | |
+| `status` _[TenantStatus](#tenantstatus)_ | | | |
+
+#### TenantList
+
+TenantList contains a list of Tenant
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `TenantList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[Tenant](#tenant) array_ | | | |
+
+#### TenantSpec
+
+TenantSpec defines the desired state of Tenant
+
+_Appears in:_
+
+- [Tenant](#tenant)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | |
+| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+| `loadBalancer` _[LoadBalancerSettings](#loadbalancersettings)_ | | | |
+| `ingress` _[IngressSettings](#ingresssettings)_ | | | |
+| `gatewayAPI` _[GatewayAPISettings](#gatewayapisettings)_ | | | |
+| `dns` _[DNSSettings](#dnssettings)_ | | | |
+| `certificates` _[CertificatesSettings](#certificatessettings)_ | | | |
+
+#### TenantState
+
+TenantState is the Schema for the tenants API
+
+_Appears in:_
+
+- [TenantStateList](#tenantstatelist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `TenantState` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[TenantStateSpec](#tenantstatespec)_ | | | |
+| `status` _[TenantStateStatus](#tenantstatestatus)_ | | | |
+
+#### TenantStateList
+
+TenantStateList contains a list of TenantState
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `TenantStateList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[TenantState](#tenantstate) array_ | | | |
+
+#### TenantStateSpec
+
+TenantStateSpec defines the desired state of TenantState.
+
+_Appears in:_
+
+- [TenantState](#tenantstate)
+
+#### TenantStateStatus
+
+TenantStateStatus defines the observed state of TenantState
+
+_Appears in:_
+
+- [TenantState](#tenantstate)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `version` _[Version](#version)_ | | | |
+| `lastUpdated` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#time-v1-meta)_ | | | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | | | |
+| `loadBalancer` _[LoadBalancerState](#loadbalancerstate)_ | | | |
+
+#### TenantStatus
+
+TenantStatus defines the observed state of Tenant
+
+_Appears in:_
+
+- [Tenant](#tenant)
+
+#### UpstreamService
+
+UpstreamService is a wrapper over the corev1.Service object.
+This is required as kubebuilder:validation:EmbeddedResource marker adds the x-kubernetes-embedded-resource to the array instead of
+the elements within it. Which results in a broken CRD; validation error. Without this marker, the embedded resource is not properly
+serialized to the CRD.
+
+_Appears in:_
+
+- [KubernetesSource](#kubernetessource)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[ServiceSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicespec-v1-core)_ | Spec defines the behavior of a service.
| | |
+| `status` _[ServiceStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicestatus-v1-core)_ | Most recently observed status of the service.
Populated by the system.
Read-only.
More info: | | |
+
+#### Version
+
+_Appears in:_
+
+- [TenantStateStatus](#tenantstatestatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `gitVersion` _string_ | | | |
+| `gitCommit` _string_ | | | |
+| `buildDate` _string_ | | | |
+| `edition` _string_ | | | |
diff --git a/content/kubelb/v1.2/references/ee/_index.en.md b/content/kubelb/v1.2/references/ee/_index.en.md
new file mode 100644
index 000000000..1a71a06e4
--- /dev/null
+++ b/content/kubelb/v1.2/references/ee/_index.en.md
@@ -0,0 +1,934 @@
++++
+title = "KubeLB Enterprise Edition CRD References"
+linkTitle = "Enterprise Edition"
+date = 2024-03-06T12:00:00+02:00
+weight = 50
+enterprise = true
++++
+
+**Source: [kubelb.k8c.io/v1alpha1](https://github.com/kubermatic/kubelb/tree/main/api/ee/kubelb.k8c.io/v1alpha1)**
+
+## Packages
+
+- [kubelb.k8c.io/v1alpha1](#kubelbk8ciov1alpha1)
+
+## kubelb.k8c.io/v1alpha1
+
+Package v1alpha1 contains API Schema definitions for the kubelb.k8c.io v1alpha1 API group
+
+### Resource Types
+
+- [Addresses](#addresses)
+- [AddressesList](#addresseslist)
+- [Config](#config)
+- [ConfigList](#configlist)
+- [LoadBalancer](#loadbalancer)
+- [LoadBalancerList](#loadbalancerlist)
+- [Route](#route)
+- [RouteList](#routelist)
+- [SyncSecret](#syncsecret)
+- [SyncSecretList](#syncsecretlist)
+- [Tenant](#tenant)
+- [TenantList](#tenantlist)
+- [TenantState](#tenantstate)
+- [TenantStateList](#tenantstatelist)
+- [Tunnel](#tunnel)
+- [TunnelList](#tunnellist)
+
+#### Addresses
+
+Addresses is the Schema for the addresses API
+
+_Appears in:_
+
+- [AddressesList](#addresseslist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `Addresses` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[AddressesSpec](#addressesspec)_ | | | |
+| `status` _[AddressesStatus](#addressesstatus)_ | | | |
+
+#### AddressesList
+
+AddressesList contains a list of Addresses
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `AddressesList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[Addresses](#addresses) array_ | | | |
+
+#### AddressesSpec
+
+AddressesSpec defines the desired state of Addresses
+
+_Appears in:_
+
+- [Addresses](#addresses)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `addresses` _[EndpointAddress](#endpointaddress) array_ | Addresses contains a list of addresses. | | MinItems: 1
|
+
+#### AddressesStatus
+
+AddressesStatus defines the observed state of Addresses
+
+_Appears in:_
+
+- [Addresses](#addresses)
+
+#### AnnotatedResource
+
+_Underlying type:_ _string_
+
+_Validation:_
+
+- Enum: [all service ingress gateway httproute grpcroute tcproute udproute tlsroute]
+
+_Appears in:_
+
+- [AnnotationSettings](#annotationsettings)
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
+
+| Field | Description |
+| --- | --- |
+| `all` | |
+| `service` | |
+| `ingress` | |
+| `gateway` | |
+| `httproute` | |
+| `grpcroute` | |
+| `tcproute` | |
+| `udproute` | |
+| `tlsroute` | |
+
+#### AnnotationSettings
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | |
+| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+
+#### Annotations
+
+_Underlying type:_ _object_
+
+_Appears in:_
+
+- [AnnotationSettings](#annotationsettings)
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
+
+#### CertificatesSettings
+
+CertificatesSettings defines the settings for the certificates.
+
+_Appears in:_
+
+- [TenantSpec](#tenantspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `disable` _boolean_ | Disable is a flag that can be used to disable certificate automation for a tenant. | | |
+| `defaultClusterIssuer` _string_ | DefaultClusterIssuer is the Cluster Issuer to use for the certificates by default. This is applied when the cluster issuer is not specified in the annotations on the resource itself. | | |
+| `allowedDomains` _string array_ | AllowedDomains is a list of allowed domains for automated Certificate management. Has a higher precedence than the value specified in the Config.
If empty, the value specified in `tenant.spec.allowedDomains` will be used.
Examples:
- ["_.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["_"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level. | | |
+
+#### Config
+
+Config is the object that represents the Config for the KubeLB management controller.
+
+_Appears in:_
+
+- [ConfigList](#configlist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `Config` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[ConfigSpec](#configspec)_ | | | |
+
+#### ConfigCertificatesSettings
+
+ConfigCertificatesSettings defines the global settings for the certificates.
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `disable` _boolean_ | Disable is a flag that can be used to disable certificate automation globally for all the tenants. | | |
+| `defaultClusterIssuer` _string_ | DefaultClusterIssuer is the Cluster Issuer to use for the certificates by default. This is applied when the cluster issuer is not specified in the annotations on the resource itself. | | |
+
+#### ConfigDNSSettings
+
+ConfigDNSSettings defines the global settings for DNS management and automation.
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `disable` _boolean_ | Disable is a flag that can be used to disable DNS automation globally for all the tenants. | | |
+| `wildcardDomain` _string_ | WildcardDomain is the domain that will be used as the base domain to create wildcard DNS records for DNS resources.
This is only used for determining the hostname for LoadBalancer and Tunnel resources. | | |
+| `allowExplicitHostnames` _boolean_ | AllowExplicitHostnames is a flag that can be used to allow explicit hostnames to be used for DNS resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | |
+| `useDNSAnnotations` _boolean_ | UseDNSAnnotations is a flag that can be used to add DNS annotations to DNS resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | |
+| `useCertificateAnnotations` _boolean_ | UseCertificateAnnotations is a flag that can be used to add Certificate annotations to Certificate resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | |
+
+#### ConfigList
+
+ConfigList contains a list of Config
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `ConfigList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[Config](#config) array_ | | | |
+
+#### ConfigSpec
+
+ConfigSpec defines the desired state of the Config
+
+_Appears in:_
+
+- [Config](#config)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | |
+| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+| `envoyProxy` _[EnvoyProxy](#envoyproxy)_ | EnvoyProxy defines the desired state of the Envoy Proxy | | |
+| `loadBalancer` _[LoadBalancerSettings](#loadbalancersettings)_ | | | |
+| `ingress` _[IngressSettings](#ingresssettings)_ | | | |
+| `gatewayAPI` _[GatewayAPISettings](#gatewayapisettings)_ | | | |
+| `dns` _[ConfigDNSSettings](#configdnssettings)_ | | | |
+| `certificates` _[ConfigCertificatesSettings](#configcertificatessettings)_ | | | |
+| `tunnel` _[TunnelSettings](#tunnelsettings)_ | | | |
+
+#### DNSSettings
+
+DNSSettings defines the tenant specific settings for DNS management and automation.
+
+_Appears in:_
+
+- [TenantSpec](#tenantspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `disable` _boolean_ | Disable is a flag that can be used to disable DNS automation for a tenant. | | |
+| `allowedDomains` _string array_ | AllowedDomains is a list of allowed domains for automated DNS management. Has a higher precedence than the value specified in the Config.
If empty, the value specified in `tenant.spec.allowedDomains` will be used.
Examples:
- ["_.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["_"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level. | | |
+| `wildcardDomain` _string_ | WildcardDomain is the domain that will be used as the base domain to create wildcard DNS records for DNS resources.
This is only used for determining the hostname for LoadBalancer and Tunnel resources. | | |
+| `allowExplicitHostnames` _boolean_ | AllowExplicitHostnames is a flag that can be used to allow explicit hostnames to be used for DNS resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | |
+| `useDNSAnnotations` _boolean_ | UseDNSAnnotations is a flag that can be used to add DNS annotations to DNS resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | |
+| `useCertificateAnnotations` _boolean_ | UseCertificateAnnotations is a flag that can be used to add Certificate annotations to Certificate resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | |
+
+#### EndpointAddress
+
+EndpointAddress is a tuple that describes single IP address.
+
+_Appears in:_
+
+- [AddressesSpec](#addressesspec)
+- [LoadBalancerEndpoints](#loadbalancerendpoints)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `ip` _string_ | The IP of this endpoint.
May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
or link-local multicast ((224.0.0.0/24). | | MinLength: 7
|
+| `hostname` _string_ | The Hostname of this endpoint | | |
+
+#### EndpointPort
+
+EndpointPort is a tuple that describes a single port.
+
+_Appears in:_
+
+- [LoadBalancerEndpoints](#loadbalancerendpoints)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `name` _string_ | The name of this port. This must match the 'name' field in the
corresponding ServicePort.
Must be a DNS_LABEL.
Optional only if one port is defined. | | |
+| `port` _integer_ | The port number of the endpoint. | | |
+| `protocol` _[Protocol](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#protocol-v1-core)_ | The IP protocol for this port. Defaults to "TCP". | | Enum: [TCP UDP]
|
+
+#### EnvoyProxy
+
+EnvoyProxy defines the desired state of the EnvoyProxy
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `topology` _[EnvoyProxyTopology](#envoyproxytopology)_ | Topology defines the deployment topology for Envoy Proxy. Valid values are: shared and global.
DEPRECATION NOTICE: The value "dedicated" is deprecated and will be removed in a future release. Dedicated topology will now default to shared topology. | shared | Enum: [shared dedicated global]
|
+| `useDaemonset` _boolean_ | UseDaemonset defines whether Envoy Proxy will run as daemonset. By default, Envoy Proxy will run as deployment.
If set to true, Replicas will be ignored. | | |
+| `replicas` _integer_ | Replicas defines the number of replicas for Envoy Proxy. This field is ignored if UseDaemonset is set to true. | 3 | Minimum: 1
|
+| `singlePodPerNode` _boolean_ | SinglePodPerNode defines whether Envoy Proxy pods will be spread across nodes. This ensures that multiple replicas are not running on the same node. | | |
+| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector is used to select nodes to run Envoy Proxy. If specified, the node must have all the indicated labels. | | |
+| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#toleration-v1-core) array_ | Tolerations is used to schedule Envoy Proxy pods on nodes with matching taints. | | |
+| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core)_ | Resources defines the resource requirements for Envoy Proxy. | | |
+| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#affinity-v1-core)_ | Affinity is used to schedule Envoy Proxy pods on nodes with matching affinity. | | |
+
+#### EnvoyProxyTopology
+
+_Underlying type:_ _string_
+
+_Appears in:_
+
+- [EnvoyProxy](#envoyproxy)
+
+| Field | Description |
+| --- | --- |
+| `shared` | |
+| `dedicated` | |
+| `global` | |
+
+#### GatewayAPISettings
+
+GatewayAPISettings defines the settings for the gateway API.
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `class` _string_ | Class is the class of the gateway API to use. This can be used to specify a specific gateway API implementation.
This has higher precedence than the value specified in the Config. | | |
+| `disable` _boolean_ | Disable is a flag that can be used to disable Gateway API for a tenant. | | |
+| `defaultGateway` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectreference-v1-core)_ | DefaultGateway is the default gateway reference to use for the tenant. This is only used for load balancer hostname and tunneling. | | |
+| `gateway` _[GatewaySettings](#gatewaysettings)_ | | | |
+| `disableHTTPRoute` _boolean_ | | | |
+| `disableGRPCRoute` _boolean_ | | | |
+| `disableTCPRoute` _boolean_ | | | |
+| `disableUDPRoute` _boolean_ | | | |
+| `disableTLSRoute` _boolean_ | | | |
+
+#### GatewayAPIsSettings
+
+_Appears in:_
+
+- [GatewayAPISettings](#gatewayapisettings)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `disableHTTPRoute` _boolean_ | | | |
+| `disableGRPCRoute` _boolean_ | | | |
+| `disableTCPRoute` _boolean_ | | | |
+| `disableUDPRoute` _boolean_ | | | |
+| `disableTLSRoute` _boolean_ | | | |
+
+#### GatewaySettings
+
+GatewaySettings defines the settings for the gateway resource.
+
+_Appears in:_
+
+- [GatewayAPISettings](#gatewayapisettings)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `limit` _integer_ | Limit is the maximum number of gateways to create.
If a lower limit is set than the number of reources that exist, the limit will be disallow creation of new resources but will not delete existing resources. The reason behind this
is that it is not possible for KubeLB to know which resources are safe to remove. | | |
+
+#### HostnameStatus
+
+_Appears in:_
+
+- [LoadBalancerStatus](#loadbalancerstatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `hostname` _string_ | Hostname contains the hostname of the load-balancer. | | |
+| `tlsEnabled` _boolean_ | TLSEnabled is true if certificate is created for the hostname. | | |
+| `dnsRecordCreated` _boolean_ | DNSRecordCreated is true if DNS record is created for the hostname. | | |
+
+#### IngressSettings
+
+IngressSettings defines the settings for the ingress.
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `class` _string_ | Class is the class of the ingress to use.
This has higher precedence than the value specified in the Config. | | |
+| `disable` _boolean_ | Disable is a flag that can be used to disable Ingress for a tenant. | | |
+
+#### KubernetesSource
+
+_Appears in:_
+
+- [RouteSource](#routesource)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `resource` _[Unstructured](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#unstructured-unstructured-v1)_ | | | EmbeddedResource: \{\}
|
+| `services` _[UpstreamService](#upstreamservice) array_ | Services contains the list of services that are used as the source for the Route. | | |
+
+#### LoadBalancer
+
+LoadBalancer is the Schema for the loadbalancers API
+
+_Appears in:_
+
+- [LoadBalancerList](#loadbalancerlist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `LoadBalancer` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[LoadBalancerSpec](#loadbalancerspec)_ | | | |
+| `status` _[LoadBalancerStatus](#loadbalancerstatus)_ | | | |
+
+#### LoadBalancerEndpoints
+
+LoadBalancerEndpoints is a group of addresses with a common set of ports. The
+expanded set of endpoints is the Cartesian product of Addresses x Ports.
+For example, given:
+
+ {
+ Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+ Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+ }
+
+The resulting set of endpoints can be viewed as:
+
+ a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
+ b: [ 10.10.1.1:309, 10.10.2.2:309 ]
+
+_Appears in:_
+
+- [LoadBalancerSpec](#loadbalancerspec)
+- [RouteSpec](#routespec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `name` _string_ | Name is the name of the endpoints. | | |
+| `addresses` _[EndpointAddress](#endpointaddress) array_ | IP addresses which offer the related ports that are marked as ready. These endpoints
should be considered safe for load balancers and clients to utilize. | | MinItems: 1
|
+| `addressesReference` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectreference-v1-core)_ | AddressesReference is a reference to the Addresses object that contains the IP addresses.
If this field is set, the Addresses field will be ignored. | | |
+| `ports` _[EndpointPort](#endpointport) array_ | Port numbers available on the related IP addresses.
This field is ignored for routes that are using kubernetes resources as the source. | | MinItems: 1
|
+
+#### LoadBalancerList
+
+LoadBalancerList contains a list of LoadBalancer
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `LoadBalancerList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[LoadBalancer](#loadbalancer) array_ | | | |
+
+#### LoadBalancerPort
+
+LoadBalancerPort contains information on service's port.
+
+_Appears in:_
+
+- [LoadBalancerSpec](#loadbalancerspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `name` _string_ | The name of this port within the service. This must be a DNS_LABEL.
All ports within a Spec must have unique names. When considering
the endpoints for a Service, this must match the 'name' field in the
EndpointPort.
Optional if only one ServicePort is defined on this service. | | |
+| `protocol` _[Protocol](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#protocol-v1-core)_ | The IP protocol for this port. Defaults to "TCP". | | Enum: [TCP UDP]
|
+| `port` _integer_ | The port that will be exposed by the LoadBalancer. | | |
+
+#### LoadBalancerSettings
+
+LoadBalancerSettings defines the settings for the load balancers.
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+- [TenantSpec](#tenantspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `class` _string_ | Class is the class of the load balancer to use.
This has higher precedence than the value specified in the Config. | | |
+| `limit` _integer_ | Limit is the maximum number of load balancers to create.
If a lower limit is set than the number of reources that exist, the limit will be disallow creation of new resources but will not delete existing resources. The reason behind this
is that it is not possible for KubeLB to know which resources are safe to remove. | | |
+| `disable` _boolean_ | Disable is a flag that can be used to disable L4 load balancing for a tenant. | | |
+
+#### LoadBalancerSpec
+
+LoadBalancerSpec defines the desired state of LoadBalancer
+
+_Appears in:_
+
+- [LoadBalancer](#loadbalancer)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `endpoints` _[LoadBalancerEndpoints](#loadbalancerendpoints) array_ | Sets of addresses and ports that comprise an exposed user service on a cluster. | | MinItems: 1
|
+| `ports` _[LoadBalancerPort](#loadbalancerport) array_ | The list of ports that are exposed by the load balancer service.
only needed for layer 4 | | |
+| `hostname` _string_ | Hostname is the domain name at which the load balancer service will be accessible.
When hostname is set, KubeLB will create a route(ingress or httproute) for the service, and expose it with TLS on the given hostname. | | |
+| `type` _[ServiceType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicetype-v1-core)_ | type determines how the Service is exposed. Defaults to ClusterIP. Valid
options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
"ExternalName" maps to the specified externalName.
"ClusterIP" allocates a cluster-internal IP address for load-balancing to
endpoints. Endpoints are determined by the selector or if that is not
specified, by manual construction of an Endpoints object. If clusterIP is
"None", no virtual IP is allocated and the endpoints are published as a
set of endpoints rather than a stable IP.
"NodePort" builds on ClusterIP and allocates a port on every node which
routes to the clusterIP.
"LoadBalancer" builds on NodePort and creates an
external load-balancer (if supported in the current cloud) which routes
to the clusterIP.
More info: | ClusterIP | |
+
+#### LoadBalancerState
+
+_Appears in:_
+
+- [TenantStateStatus](#tenantstatestatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `disable` _boolean_ | | | |
+| `limit` _integer_ | | | |
+
+#### LoadBalancerStatus
+
+LoadBalancerStatus defines the observed state of LoadBalancer
+
+_Appears in:_
+
+- [LoadBalancer](#loadbalancer)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `loadBalancer` _[LoadBalancerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#loadbalancerstatus-v1-core)_ | LoadBalancer contains the current status of the load-balancer,
if one is present. | | |
+| `service` _[ServiceStatus](#servicestatus)_ | Service contains the current status of the LB service. | | |
+| `hostname` _[HostnameStatus](#hostnamestatus)_ | Hostname contains the status for hostname resources. | | |
+
+#### ResourceState
+
+_Appears in:_
+
+- [RouteResourcesStatus](#routeresourcesstatus)
+- [RouteServiceStatus](#routeservicestatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | APIVersion is the API version of the resource. | | |
+| `name` _string_ | Name is the name of the resource. | | |
+| `namespace` _string_ | Namespace is the namespace of the resource. | | |
+| `generatedName` _string_ | GeneratedName is the generated name of the resource. | | |
+| `status` _[RawExtension](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#rawextension-runtime-pkg)_ | Status is the actual status of the resource. | | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | | | |
+
+#### Route
+
+Route is the object that represents a route in the cluster.
+
+_Appears in:_
+
+- [RouteList](#routelist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `Route` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[RouteSpec](#routespec)_ | | | |
+| `status` _[RouteStatus](#routestatus)_ | | | |
+
+#### RouteList
+
+RouteList contains a list of Routes
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `RouteList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[Route](#route) array_ | | | |
+
+#### RouteResourcesStatus
+
+_Appears in:_
+
+- [RouteStatus](#routestatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `source` _string_ | | | |
+| `services` _object (keys:string, values:[RouteServiceStatus](#routeservicestatus))_ | | | |
+| `route` _[ResourceState](#resourcestate)_ | | | |
+
+#### RouteServiceStatus
+
+_Appears in:_
+
+- [RouteResourcesStatus](#routeresourcesstatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | APIVersion is the API version of the resource. | | |
+| `name` _string_ | Name is the name of the resource. | | |
+| `namespace` _string_ | Namespace is the namespace of the resource. | | |
+| `generatedName` _string_ | GeneratedName is the generated name of the resource. | | |
+| `status` _[RawExtension](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#rawextension-runtime-pkg)_ | Status is the actual status of the resource. | | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | | | |
+| `ports` _[ServicePort](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#serviceport-v1-core) array_ | | | |
+
+#### RouteSource
+
+_Appears in:_
+
+- [RouteSpec](#routespec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `kubernetes` _[KubernetesSource](#kubernetessource)_ | Kubernetes contains the information about the Kubernetes source.
This field is automatically populated by the KubeLB CCM and in most cases, users should not set this field manually. | | |
+
+#### RouteSpec
+
+RouteSpec defines the desired state of the Route.
+
+_Appears in:_
+
+- [Route](#route)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `endpoints` _[LoadBalancerEndpoints](#loadbalancerendpoints) array_ | Sets of addresses and ports that comprise an exposed user service on a cluster. | | MinItems: 1
|
+| `source` _[RouteSource](#routesource)_ | Source contains the information about the source of the route. This is used when the route is created from external sources. | | |
+
+#### RouteStatus
+
+RouteStatus defines the observed state of the Route.
+
+_Appears in:_
+
+- [Route](#route)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `resources` _[RouteResourcesStatus](#routeresourcesstatus)_ | Resources contains the list of resources that are created/processed as a result of the Route. | | |
+
+#### ServicePort
+
+ServicePort contains information on service's port.
+
+_Appears in:_
+
+- [ServiceStatus](#servicestatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `name` _string_ | The name of this port within the service. This must be a DNS_LABEL.
All ports within a ServiceSpec must have unique names. When considering
the endpoints for a Service, this must match the 'name' field in the
EndpointPort.
Optional if only one ServicePort is defined on this service. | | |
+| `protocol` _[Protocol](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#protocol-v1-core)_ | The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
Default is TCP. | | |
+| `appProtocol` _string_ | The application protocol for this port.
This is used as a hint for implementations to offer richer behavior for protocols that they understand.
This field follows standard Kubernetes label syntax.
Valid values are either:
_Un-prefixed protocol names - reserved for IANA standard service names (as per
RFC-6335 and ).
_ Kubernetes-defined prefixed names:
_'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in
_ 'kubernetes.io/ws' - WebSocket over cleartext as described in
_'kubernetes.io/wss' - WebSocket over TLS as described in
_ Other protocols should use implementation-defined prefixed names such as
mycompany.com/my-custom-protocol. | | |
+| `port` _integer_ | The port that will be exposed by this service. | | |
+| `targetPort` _[IntOrString](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#intorstring-intstr-util)_ | Number or name of the port to access on the pods targeted by the service.
Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
If this is a string, it will be looked up as a named port in the
target Pod's container ports. If this is not specified, the value
of the 'port' field is used (an identity map).
This field is ignored for services with clusterIP=None, and should be
omitted or set equal to the 'port' field.
More info: | | |
+| `nodePort` _integer_ | The port on each node on which this service is exposed when type is
NodePort or LoadBalancer. Usually assigned by the system. If a value is
specified, in-range, and not in use it will be used, otherwise the
operation will fail. If not specified, a port will be allocated if this
Service requires one. If this field is specified when creating a
Service which does not need it, creation will fail. This field will be
wiped when updating a Service to no longer need it (e.g. changing type
from NodePort to ClusterIP).
More info: | | |
+| `upstreamTargetPort` _integer_ | | | |
+
+#### ServiceStatus
+
+_Appears in:_
+
+- [LoadBalancerStatus](#loadbalancerstatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `ports` _[ServicePort](#serviceport) array_ | | | |
+
+#### SyncSecret
+
+SyncSecret is a wrapper over Kubernetes Secret object. This is used to sync secrets from tenants to the LB cluster in a controlled and secure way.
+
+_Appears in:_
+
+- [SyncSecretList](#syncsecretlist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `SyncSecret` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `immutable` _boolean_ | | | |
+| `data` _object (keys:string, values:integer array)_ | | | |
+| `stringData` _object (keys:string, values:string)_ | | | |
+| `type` _[SecretType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#secrettype-v1-core)_ | | | |
+
+#### SyncSecretList
+
+SyncSecretList contains a list of SyncSecrets
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `SyncSecretList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[SyncSecret](#syncsecret) array_ | | | |
+
+#### Tenant
+
+Tenant is the Schema for the tenants API
+
+_Appears in:_
+
+- [TenantList](#tenantlist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `Tenant` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[TenantSpec](#tenantspec)_ | | | |
+| `status` _[TenantStatus](#tenantstatus)_ | | | |
+
+#### TenantList
+
+TenantList contains a list of Tenant
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `TenantList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[Tenant](#tenant) array_ | | | |
+
+#### TenantSpec
+
+TenantSpec defines the desired state of Tenant
+
+_Appears in:_
+
+- [Tenant](#tenant)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | |
+| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | |
+| `loadBalancer` _[LoadBalancerSettings](#loadbalancersettings)_ | | | |
+| `ingress` _[IngressSettings](#ingresssettings)_ | | | |
+| `gatewayAPI` _[GatewayAPISettings](#gatewayapisettings)_ | | | |
+| `dns` _[DNSSettings](#dnssettings)_ | | | |
+| `certificates` _[CertificatesSettings](#certificatessettings)_ | | | |
+| `tunnel` _[TenantTunnelSettings](#tenanttunnelsettings)_ | | | |
+| `allowedDomains` _string array_ | List of allowed domains for the tenant. This is used to restrict the domains that can be used
for the tenant. If specified, applies on all the components such as Ingress, GatewayAPI, DNS, certificates, etc.
Examples:
- ["_.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["_"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level.
Default: value is ["**"] and all domains are allowed. | [**] | |
+
+#### TenantState
+
+TenantState is the Schema for the tenants API
+
+_Appears in:_
+
+- [TenantStateList](#tenantstatelist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `TenantState` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[TenantStateSpec](#tenantstatespec)_ | | | |
+| `status` _[TenantStateStatus](#tenantstatestatus)_ | | | |
+
+#### TenantStateList
+
+TenantStateList contains a list of TenantState
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `TenantStateList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[TenantState](#tenantstate) array_ | | | |
+
+#### TenantStateSpec
+
+TenantStateSpec defines the desired state of TenantState.
+
+_Appears in:_
+
+- [TenantState](#tenantstate)
+
+#### TenantStateStatus
+
+TenantStateStatus defines the observed state of TenantState
+
+_Appears in:_
+
+- [TenantState](#tenantstate)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `version` _[Version](#version)_ | | | |
+| `lastUpdated` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#time-v1-meta)_ | | | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | | | |
+| `tunnel` _[TunnelState](#tunnelstate)_ | | | |
+| `loadBalancer` _[LoadBalancerState](#loadbalancerstate)_ | | | |
+| `allowedDomains` _string array_ | | | |
+
+#### TenantStatus
+
+TenantStatus defines the observed state of Tenant
+
+_Appears in:_
+
+- [Tenant](#tenant)
+
+#### TenantTunnelSettings
+
+TenantTunnelSettings defines the settings for the tunnel.
+
+_Appears in:_
+
+- [TenantSpec](#tenantspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `limit` _integer_ | Limit is the maximum number of tunnels to create.
If a lower limit is set than the number of reources that exist, the limit will be disallow creation of new resources but will not delete existing resources. The reason behind this
is that it is not possible for KubeLB to know which resources are safe to remove. | | |
+| `disable` _boolean_ | Disable is a flag that can be used to disable tunneling for a tenant. | | |
+
+#### Tunnel
+
+Tunnel is the Schema for the tunnels API
+
+_Appears in:_
+
+- [TunnelList](#tunnellist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `Tunnel` | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[TunnelSpec](#tunnelspec)_ | | | |
+| `status` _[TunnelStatus](#tunnelstatus)_ | | | |
+
+#### TunnelList
+
+TunnelList contains a list of Tunnel
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | |
+| `kind` _string_ | `TunnelList` | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[Tunnel](#tunnel) array_ | | | |
+
+#### TunnelPhase
+
+_Underlying type:_ _string_
+
+TunnelPhase represents the phase of tunnel
+
+_Appears in:_
+
+- [TunnelStatus](#tunnelstatus)
+
+| Field | Description |
+| --- | --- |
+| `Pending` | TunnelPhasePending means the tunnel is being provisioned
|
+| `Ready` | TunnelPhaseReady means the tunnel is ready to accept connections
|
+| `Failed` | TunnelPhaseFailed means the tunnel provisioning failed
|
+| `Terminating` | TunnelPhaseTerminating means the tunnel is being terminated
|
+
+#### TunnelResources
+
+TunnelResources contains references to resources created for the tunnel
+
+_Appears in:_
+
+- [TunnelStatus](#tunnelstatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `serviceName` _string_ | ServiceName is the name of the service created for this tunnel | | |
+| `routeRef` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectreference-v1-core)_ | RouteRef is a reference to the route (HTTPRoute or Ingress) created for this tunnel | | |
+
+#### TunnelSettings
+
+TunnelSettings defines the global settings for Tunnel resources.
+
+_Appears in:_
+
+- [ConfigSpec](#configspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `limit` _integer_ | Limit is the maximum number of tunnels to create.
If a lower limit is set than the number of reources that exist, the limit will be disallow creation of new resources but will not delete existing resources. The reason behind this
is that it is not possible for KubeLB to know which resources are safe to remove. | | |
+| `connectionManagerURL` _string_ | ConnectionManagerURL is the URL of the connection manager service that handles tunnel connections.
This is required if tunneling is enabled.
For example: "" | | |
+| `disable` _boolean_ | Disable indicates whether tunneling feature should be disabled. | | |
+
+#### TunnelSpec
+
+TunnelSpec defines the desired state of Tunnel
+
+_Appears in:_
+
+- [Tunnel](#tunnel)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `hostname` _string_ | Hostname is the hostname of the tunnel. If not specified, the hostname will be generated by KubeLB. | | |
+
+#### TunnelState
+
+_Appears in:_
+
+- [TenantStateStatus](#tenantstatestatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `disable` _boolean_ | | | |
+| `limit` _integer_ | | | |
+| `connectionManagerURL` _string_ | | | |
+
+#### TunnelStatus
+
+TunnelStatus defines the observed state of Tunnel
+
+_Appears in:_
+
+- [Tunnel](#tunnel)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `hostname` _string_ | Hostname contains the actual hostname assigned to the tunnel | | |
+| `url` _string_ | URL contains the full URL to access the tunnel | | |
+| `connectionManagerURL` _string_ | ConnectionManagerURL contains the URL that clients should use to establish tunnel connections | | |
+| `phase` _[TunnelPhase](#tunnelphase)_ | Phase represents the current phase of the tunnel | | |
+| `resources` _[TunnelResources](#tunnelresources)_ | Resources contains references to the resources created for this tunnel | | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | Conditions represents the current conditions of the tunnel | | |
+
+#### UpstreamService
+
+UpstreamService is a wrapper over the corev1.Service object.
+This is required as kubebuilder:validation:EmbeddedResource marker adds the x-kubernetes-embedded-resource to the array instead of
+the elements within it. Which results in a broken CRD; validation error. Without this marker, the embedded resource is not properly
+serialized to the CRD.
+
+_Appears in:_
+
+- [KubernetesSource](#kubernetessource)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[ServiceSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicespec-v1-core)_ | Spec defines the behavior of a service.
| | |
+| `status` _[ServiceStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicestatus-v1-core)_ | Most recently observed status of the service.
Populated by the system.
Read-only.
More info: | | |
+
+#### Version
+
+_Appears in:_
+
+- [TenantStateStatus](#tenantstatestatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `gitVersion` _string_ | | | |
+| `gitCommit` _string_ | | | |
+| `buildDate` _string_ | | | |
+| `edition` _string_ | | | |
diff --git a/content/kubelb/v1.2/release-notes/_index.en.md b/content/kubelb/v1.2/release-notes/_index.en.md
new file mode 100644
index 000000000..f91bf57e0
--- /dev/null
+++ b/content/kubelb/v1.2/release-notes/_index.en.md
@@ -0,0 +1,94 @@
++++
+title = "Release Notes"
+date = 2024-03-15T00:00:00+01:00
+weight = 60
++++
+
+## Kubermatic KubeLB v1.2
+
+- [v1.2.0](#v120)
+ - [Community Edition](#community-edition)
+ - [Enterprise Edition](#enterprise-edition)
+
+**Full Changelog**:
+
+## v1.2.0
+
+**GitHub release: [v1.2.0](https://github.com/kubermatic/kubelb/releases/tag/v1.2.0)**
+
+### Highlights
+
+#### Community Edition(CE)
+
+- Support for Load Balancer Hostname has been introduced. This allows users to specify a hostname for the load balancer.
+- Default Annotations can now be configured for services, Ingress, and Gateway API resources in the management cluster.
+- KubeLB Addons chart has been introduced to simplify the installation of the required components for the management cluster.
+ - Tools such as ingress-nginx, external-dns, cert-manager, etc. can be installed through a single KubeLB management chart through this change.
+ - KubeLB Addons chart will ship versions of components that we are actively testing and supporting.
+- TenantState API has been introduced to share tenant status with the KubeLB consumers i.e. through CCM or CLI. This simplifies sharing details such as load balancer limit, allowed domains, wildcard domain, etc. with the consumers.
+- KubeLB CCM can now install Gateway API CRDs by itself. Hence, removing the need to install them manually.
+- KubeLB now maintains the required RBAC attached to the kubeconfig for KKP integration. `kkpintegration.rbac: true` can be used to manage the RBAC using KubeLB helm chart.
+
+#### Enterprise Edition(EE)
+
+- Tunneling support has been introduced in the Management Cluster. The server side and control plane components for tunneling are shipped with Enterprise Edition of KubeLB.
+- AI and MCP Gateway Integration has been introduced. As running your AI, MCP, and Agent2Agent toolings alongisde your data plane is a common use case, we are now leveraging [kgateway](https://kgateway.dev/) to solidify the integration with AI, MCP, and Agent2Agent toolings.
+
+### Community Edition
+
+#### API Changes
+
+- Enterprise Edition APIs for KubeLB are now available at k8c.io/kubelb/api/ee/kubelb.k8c.io/v1alpha1 ([#101](https://github.com/kubermatic/kubelb/pull/101))
+
+#### Features
+
+- Support for adding default annotations to the load balancing resources ([#78](https://github.com/kubermatic/kubelb/pull/78))
+- KubeLB now maintains the required RBAC attached to the kubeconfig for KKP integration. `kkpintegration.rbac: true` can be used to manage the RBAC using KubeLB helm chart ([#79](https://github.com/kubermatic/kubelb/pull/79))
+- Envoy: no_traffic_interval for upstream endpoints health check has been reduced to 5s from the default of 60s. Envoy will start sending health checks to a new cluster after 5s now ([#106](https://github.com/kubermatic/kubelb/pull/106))
+- KubeLB CCM will now automatically install Kubernetes Gateway API CRDs using the following flags:
+ - --install-gateway-api-crds: That installs and manages the Gateway API CRDs using gateway crd controller.
+ - --gateway-api-crds-channel: That specifies the channel for Gateway API CRDs, with possible values of 'standard' or 'experimental'. ([#110](https://github.com/kubermatic/kubelb/pull/110))
+- Improve validations for cluster-name in CCM ([#111](https://github.com/kubermatic/kubelb/pull/111))
+- Gracefully handle nodes that don't have an IP address assigned while computing Addresses ([#111](https://github.com/kubermatic/kubelb/pull/111))
+- LoadBalancer resources can now be directly assigned a hostname/URL ([#113](https://github.com/kubermatic/kubelb/pull/113))
+- TenantState API has been introduced to share tenant status with the KubeLB consumers i.e. through CCM or CLI ([#117](https://github.com/kubermatic/kubelb/pull/117))
+- Dedicated addons chart has been introduced for KubeLB at `oci://quay.io/kubermatic/helm-charts/kubelb-addons`. ([#122](https://github.com/kubermatic/kubelb/pull/122))
+- KubeLB is now built using Go 1.25 ([#126](https://github.com/kubermatic/kubelb/pull/126))
+- Update kube-rbac-proxy to v0.19.1 ([#128](https://github.com/kubermatic/kubelb/pull/128))
+- Add metallb to kubelb-addons ([#130](https://github.com/kubermatic/kubelb/pull/130))
+
+#### Design
+
+- Restructure repository and make Enterprise Edition APIs available at k8c.io/kubelb/api/ee/kubelb.k8c.io/v1alpha1 ([#101](https://github.com/kubermatic/kubelb/pull/101))
+
+#### Bug or Regression
+
+- Fix annotation handling for services ([#82](https://github.com/kubermatic/kubelb/pull/82))
+- Don't modify IngressClassName if it's not set in the configuration ([#88](https://github.com/kubermatic/kubelb/pull/88))
+- Fix an issue with KubeLB not respecting the already allocated NodePort in the management cluster for load balancers with large amount of open Nodeports ([#91](https://github.com/kubermatic/kubelb/pull/91))
+- Before removing RBAC for tenant, ensure that all routes, load balancers, and syncsecrets are cleaned up ([#92](https://github.com/kubermatic/kubelb/pull/92))
+- Update health checks for envoy upstream endpoint:
+ - UDP health checking has been removed due to limited supported from Envoy
+ - TCP health checking has been updated to perform a connect-only health check ([#103](https://github.com/kubermatic/kubelb/pull/103))
+- Use arbitrary ports as target port for load balancer services ([#119](https://github.com/kubermatic/kubelb/pull/119))
+
+#### Other (Cleanup, Flake, or Chore)
+
+- Upgrade to Go 1.24.1 ([#87](https://github.com/kubermatic/kubelb/pull/87))
+- Upgrade to EnvoyProxy v1.33.1 ([#87](https://github.com/kubermatic/kubelb/pull/87))
+- Sort IPs in `addresses` Endpoint to reduce updates ([#93](https://github.com/kubermatic/kubelb/pull/93))
+- KubeLB is now built using Go 1.24.6 ([#118](https://github.com/kubermatic/kubelb/pull/118))
+- Add additional columns for TenantState and Tunnel CRDs ([#124](https://github.com/kubermatic/kubelb/pull/124))
+
+**Full Changelog**:
+
+### Enterprise Edition
+
+**Enterprise Edition includes everything from Community Edition and more. The release notes below are for changes specific to just the Enterprise Edition.**
+
+#### EE Features
+
+- Default annotations support for Alpha/Beta Gateway API resources like TLSRoute, TCPRoute, and UDPRoute.
+- More fine-grained load balancer hostname support.
+- Tunneling support has been introduced in the Management Cluster. With the newly introduced KubeLB CLI, users can now expose workloads/applications running in their local workstations or VMs in closed networks to the outside world. Since all the traffic is routed through the KubeLB management cluster, security, observability, and other features are available and applied by default based on your configuration.
+- AI and MCP Gateway Integration has been introduced. As running your AI, MCP, and Agent2Agent toolings alongisde your data plane is a common use case, we are now leveraging [kgateway](https://kgateway.dev/) to solidify the integration with AI, MCP, and Agent2Agent toolings.
diff --git a/content/kubelb/v1.2/support-policy/_index.en.md b/content/kubelb/v1.2/support-policy/_index.en.md
new file mode 100644
index 000000000..d2604c8ed
--- /dev/null
+++ b/content/kubelb/v1.2/support-policy/_index.en.md
@@ -0,0 +1,27 @@
++++
+title = "Support Policy"
+date = 2024-03-15T00:00:00+01:00
+weight = 40
++++
+
+KubeLB has an open-source community edition and an enterprise edition. The community edition is free to use and is licensed under the [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0).
+
+The enterprise edition, backed by an active subscription for [KubeLB](https://www.kubermatic.com/products/kubelb/), provides comprehensive enterprise-grade support, guaranteed SLAs, priority issue resolution, and direct access to our expert engineering team. Our enterprise customers benefit from personalized technical guidance, architectural consulting, and our commitment to their production success.
+
+## Enterprise Edition Support
+
+As a default, our support covers the following:
+
+- Debugging for issues related to KubeLB
+- Enhancing documentation
+- Fixing bugs that block the usage of the platform
+
+What is not covered:
+
+- Issues related to the underlying Kubernetes cluster and infrastructure.
+- Custom configurations for the underlying product suite including ingress-nginx, Envoy Gateway, External DNS, and Cert Manager. KubeLB only provides you with sane default configurations and an integration for those products.
+- Issues related to misconfigured Ingress or Gateway API resources by the KubeLB users(tenant clusters). For example, misconfigured TLS certificates or missing hostnames in the Ingress or HTTPRoute resources.
+
+{{% notice info %}}
+**Discover our enterprise-grade support offerings and customized solutions for your organization's needs. [Contact our solutions team](mailto:sales@kubermatic.com) to explore how we can help ensure your success.**
+{{% /notice %}}
diff --git a/content/kubelb/v1.2/tutorials/_index.en.md b/content/kubelb/v1.2/tutorials/_index.en.md
new file mode 100644
index 000000000..d59eef078
--- /dev/null
+++ b/content/kubelb/v1.2/tutorials/_index.en.md
@@ -0,0 +1,18 @@
++++
+title = "Guides"
+linkTitle = "Tutorials"
+date = 2023-10-27T10:07:15+02:00
+description = "Get familiar with KubeLB and read step-by-step instructions to handle important scenarios"
+weight = 20
+chapter = true
++++
+
+
+# Guides
+
+Get familiar with KubeLB and read step-by-step instructions to handle important scenarios
+
+## Table of Content
+
+{{% children depth=5 %}}
+{{% /children %}}
diff --git a/content/kubelb/v1.2/tutorials/aigateway/_index.en.md b/content/kubelb/v1.2/tutorials/aigateway/_index.en.md
new file mode 100644
index 000000000..148cb8985
--- /dev/null
+++ b/content/kubelb/v1.2/tutorials/aigateway/_index.en.md
@@ -0,0 +1,239 @@
++++
+title = "AI & MCP Gateway"
+linkTitle = "AI & MCP Gateway"
+date = 2023-10-27T10:07:15+02:00
+weight = 7
++++
+
+This tutorial will guide you through setting up an AI and MCP Gateway using KubeLB with KGateway to securely manage Large Language Model (LLM) requests and MCP tool servers.
+
+## Overview
+
+KubeLB leverages [KGateway](https://kgateway.dev/), a CNCF Sandbox project (accepted March 2025), to provide advanced AI Gateway capabilities. KGateway is built on Envoy and implements the Kubernetes Gateway API specification, offering:
+
+- **AI Workload Protection**: Secure applications, models, and data from inappropriate access
+- **LLM Traffic Management**: Intelligent routing to LLM providers with load balancing based on model metrics
+- **Prompt Engineering**: System-level prompt enrichment and guards
+- **Multi-Provider Support**: Works with OpenAI, Anthropic, Google Gemini, Mistral, and local models like Ollama
+- **Model Context Protocol (MCP) Gateway**: Federates MCP tool servers into a single, secure endpoint
+- **Advanced Security**: Authentication, authorization, rate limiting tailored for AI workloads
+
+### Key Features
+
+#### AI-Specific Capabilities
+
+- **Prompt Guards**: Protect against prompt injection and data leakage
+- **Model Failover**: Automatic failover between LLM providers
+- **Function Calling**: Support for LLM function/tool calling
+- **AI Observability**: Detailed metrics and tracing for AI requests
+- **Semantic Caching**: Cache responses based on semantic similarity
+- **Token-Based Rate Limiting**: Control costs with token consumption limits
+
+#### Gateway API Inference Extension
+
+KGateway supports the Gateway API Inference Extension which introduces:
+
+- `InferenceModel` CRD: Define LLM models and their endpoints
+- `InferencePool` CRD: Group models for load balancing and failover
+- Intelligent endpoint picking based on model performance metrics
+
+## Setup
+
+### Step 1: Enable KGateway AI Extension
+
+Update values.yaml for KubeLB manager chart to enable KGateway with AI capabilities:
+
+```yaml
+kubelb:
+ enableGatewayAPI: true
+
+kubelb-addons:
+ enabled: true
+
+ kgateway:
+ enabled: true
+ gateway:
+ aiExtension:
+ enabled: true
+```
+
+### Step 2: Create Gateway Specific Resources
+
+1. Deploy a Gateway resource to handle AI traffic:
+
+```yaml
+apiVersion: gateway.networking.k8s.io/v1
+kind: Gateway
+metadata:
+ name: ai-gateway
+ namespace: kubelb
+ labels:
+ app: ai-gateway
+spec:
+ gatewayClassName: kgateway
+ infrastructure:
+ parametersRef:
+ name: ai-gateway
+ group: gateway.kgateway.dev
+ kind: GatewayParameters
+ listeners:
+ - protocol: HTTP
+ port: 8080
+ name: http
+ allowedRoutes:
+ namespaces:
+ from: All
+```
+
+2. Deploy a GatewayParameters resource to enable the AI extension:
+
+```yaml
+apiVersion: gateway.kgateway.dev/v1alpha1
+kind: GatewayParameters
+metadata:
+ name: ai-gateway
+ namespace: kubelb
+ labels:
+ app: ai-gateway
+spec:
+ kube:
+ aiExtension:
+ enabled: true
+ ports:
+ - name: ai-monitoring
+ containerPort: 9092
+ image:
+ registry: cr.kgateway.dev/kgateway-dev
+ repository: kgateway-ai-extension
+ tag: v2.1.0-main
+ service:
+ type: LoadBalancer
+```
+
+## OpenAI Integration Example
+
+This example shows how to set up secure access to OpenAI through the AI Gateway.
+
+### Step 1: Store OpenAI API Key
+
+Create a Kubernetes secret with your OpenAI API key:
+
+```bash
+export OPENAI_API_KEY="sk-..."
+
+kubectl create secret generic openai-secret \
+ --from-literal=Authorization="Bearer ${OPENAI_API_KEY}" \
+ --namespace kubelb
+```
+
+### Step 2: Create Backend Configuration
+
+Define an AI Backend that uses the secret for authentication:
+
+```yaml
+apiVersion: gateway.kgateway.dev/v1alpha1
+kind: Backend
+metadata:
+ name: openai
+ namespace: kubelb
+spec:
+ type: AI
+ ai:
+ llm:
+ provider:
+ openai:
+ authToken:
+ kind: SecretRef
+ secretRef:
+ name: openai-secret
+ namespace: kubelb
+ model: "gpt-3.5-turbo"
+```
+
+### Step 3: Create HTTPRoute
+
+Route traffic to the OpenAI backend:
+
+```yaml
+apiVersion: gateway.networking.k8s.io/v1
+kind: HTTPRoute
+metadata:
+ name: openai-route
+ namespace: kubelb
+spec:
+ parentRefs:
+ - name: ai-gateway
+ namespace: kubelb
+ rules:
+ - matches:
+ - path:
+ type: PathPrefix
+ value: /openai
+ filters:
+ - type: URLRewrite
+ urlRewrite:
+ path:
+ type: ReplaceFullPath
+ replaceFullPath: /v1/chat/completions
+ backendRefs:
+ - name: openai
+ namespace: kubelb
+ group: gateway.kgateway.dev
+ kind: Backend
+```
+
+### Step 4: Test the Configuration
+
+Get the Gateway's external IP:
+
+```bash
+kubectl get gateway ai-gateway -n kubelb
+export GATEWAY_IP=$(kubectl get svc -n kubelb ai-gateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+```
+
+Send a test request:
+
+```bash
+curl -X POST "http://${gateway_ip}/openai" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "messages": [
+ {"role": "user", "content": "Hello, how are you?"}
+ ]
+ }'
+```
+
+## Rate Limiting (Optional)
+
+Add rate limiting to control costs and prevent abuse:
+
+```yaml
+apiVersion: gateway.kgateway.dev/v1alpha1
+kind: RateLimitPolicy
+metadata:
+ name: openai-ratelimit
+ namespace: kubelb
+spec:
+ targetRef:
+ kind: HTTPRoute
+ name: openai-route
+ namespace: kubelb
+ limits:
+ - requests: 100
+ unit: hour
+```
+
+## MCP Gateway
+
+Similar to the AI Gateway, you can also use agentgateway to can connect to one or multiple MCP servers in any environment.
+
+Please follow this guide to setup the MCP Gateway: [MCP Gateway](https://kgateway.dev/docs/agentgateway/mcp/)
+
+## Further Reading
+
+For advanced configurations and features:
+
+- [KGateway AI Setup Documentation](https://kgateway.dev/docs/ai/setup/)
+- [KGateway Authentication Guide](https://kgateway.dev/docs/ai/auth/)
+- [Prompt Guards and Security](https://kgateway.dev/docs/ai/prompt-guards/)
+- [Multiple LLM Providers](https://kgateway.dev/docs/ai/cloud-providers/)
diff --git a/content/kubelb/v1.2/tutorials/bgp/_index.en.md b/content/kubelb/v1.2/tutorials/bgp/_index.en.md
new file mode 100644
index 000000000..0852763cc
--- /dev/null
+++ b/content/kubelb/v1.2/tutorials/bgp/_index.en.md
@@ -0,0 +1,53 @@
++++
+title = "Layer 4 Load balancing with BGP"
+linkTitle = "BGP Support"
+date = 2025-08-27T10:07:15+02:00
+weight = 6
++++
+
+In Management Cluster, KubeLB offloads the provisioning of the the actual load balancers to the load balancing appliance that is being used. This can be the CCM in case of a cloud provider or a self-managed solution like [MetalLB](https://metallb.universe.tf), [Cilium Load Balancer](https://cilium.io/use-cases/load-balancer/) or any other solution.
+
+Due to this generic nature, KubeLB can be used with any load balancing appliance and the underlying route advertisement protocol such as BGP, OSPF, L2, are all supported. This tutorial will focus on [BGP](https://networklessons.com/bgp/introduction-to-bgp) but it assumes that the underlying infrastructure of your Kubernetes cluster is already configured to support BGP.
+
+## Setup
+
+We'll use [MetalLB](https://metallb.universe.tf) with BGP for this tutorial. Update the values.yaml file for KubeLB manager to enable metallb:
+
+```yaml
+kubelb-addons:
+ metallb:
+ enabled: true
+```
+
+A minimal configuration for MetalLB for demonstration purposes is as follows:
+
+```yaml
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: extern
+ namespace: metallb-system
+spec:
+ addresses:
+ - 10.10.255.200-10.10.255.250
+ autoAssign: true
+ avoidBuggyIPs: true
+---
+apiVersion: metallb.io/v1beta1
+kind: BGPAdvertisement
+metadata:
+ name: extern
+ namespace: metallb-system
+spec:
+ ipAddressPools:
+ - extern
+```
+
+This configures an address pool `extern` with an IP range from 10.10.255.200 to 10.10.255.250. This IP range can be used by the tenant clusters to allocate IP addresses for the `LoadBalancer` service type.
+
+Afterwards you can follow the [Layer 4 Load balancing](../loadbalancer#usage-with-kubelb) tutorial to create a `LoadBalancer` service in the tenant cluster.
+
+### Further reading
+
+- [MetalLB BGP Configuration](https://metallb.universe.tf/configuration/_advanced_bgp_configuration/)
+- [MetalLB BGP Usage](https://metallb.universe.tf/usage/#bgp)
diff --git a/content/kubelb/v1.2/tutorials/config/_index.en.md b/content/kubelb/v1.2/tutorials/config/_index.en.md
new file mode 100644
index 000000000..9ee2a661f
--- /dev/null
+++ b/content/kubelb/v1.2/tutorials/config/_index.en.md
@@ -0,0 +1,238 @@
++++
+title = "KubeLB Management Cluster Configuration"
+linkTitle = "Management Configuration"
+date = 2023-10-27T10:07:15+02:00
+weight = 1
++++
+
+We have a dedicated CRD `config` that can be used to manage configuration for KubeLB manager in management cluster. The following is an example of a `config` CRD:
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Config
+metadata:
+ name: default
+ namespace: kubelb
+spec:
+ envoyProxy:
+ replicas: 3
+ topology: shared
+```
+
+Users can skip creation of **Config** object via helm by applying the following modification to the **values.yaml** file for the helm chart:
+
+```yaml
+kubelb:
+ skipConfigGeneration: true
+```
+
+This will de-couple the `config` from the helm chart and users can manage it separately. This is recommended since the coupling of `config` CRD with helm chart makes it dependent on the helm chart and the admin would need to upgrade the helm chart to update the `config` CRD.
+
+**NOTE: The Config CR named `default` is mandatory for KubeLB manager to work.**
+
+## Configuration Options
+
+{{% notice note %}}
+Tenant configuration has a higher precedence than the global configuration and overrides the global configuration values for the tenant if the fields are available in both the tenant and global configuration.
+{{% /notice %}}
+
+### Essential configurations
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Config
+metadata:
+ name: default
+ namespace: kubelb
+spec:
+ ingress:
+ class: "nginx"
+ gatewayAPI:
+ class: "eg"
+ # Enterprise Edition only
+ certificates:
+ defaultClusterIssuer: "letsencrypt-prod"
+```
+
+These configurations are available at a global level and also at a tenant level. The tenant level configurations will override the global configurations for that tenant. It's important to configure these options at one of those levels since they perform essential functions for KubeLB.
+
+1. **Ingress.Class**: The class to use for Ingress resources for tenants in management cluster.
+2. **GatewayAPI.Class**: The class to use for Gateway API resources for tenants in management cluster.
+3. **Certificates.DefaultClusterIssuer(EE)**: The default cluster issuer to use for certificate management.
+
+### Annotation Settings
+
+KubeLB can propagate annotations from services, ingresses, gateway API objects etc. in the tenant cluster to the corresponding LoadBalancer or Route resources in the management cluster. This is useful for setting annotations that are required by the cloud provider to configure the LoadBalancers. For example, the `service.beta.kubernetes.io/aws-load-balancer-internal` annotation is used to create an internal LoadBalancer in AWS.
+
+Annotations are not propagated by default since tenants can make unwanted changes to the LoadBalancer configuration. Since each tenant is treated as a separate entity, the KubeLB manager cluster needs to be configured to allow the propagation of specific annotations.
+
+The annotation configuration set on the tenant level will override the global annotation configuration for that tenant.
+
+#### 1. Propagate all annotations
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Config
+metadata:
+ name: default
+ namespace: kubelb
+spec:
+ propagateAllAnnotations: true
+```
+
+#### 2. Propagate specific annotations
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Config
+metadata:
+ name: default
+ namespace: kubelb
+spec:
+ propagatedAnnotations:
+ # If the key is empty, any value can be configured for propagation.
+ metalb.universe.tf/allow-shared-ip: ""
+ # Since the value is explicitly provided, only this value will be allowed for propagation.
+ metallb.universe.tf/loadBalancerIPs: "8.8.8.8"
+```
+
+#### 3. Default annotations
+
+Default annotations for resources that KubeLB generates in the management cluster can also be configured. This is useful for setting annotations that are required by the cloud provider to configure the LoadBalancers. For example, the `service.beta.kubernetes.io/aws-load-balancer-internal` annotation is used to create an internal LoadBalancer in AWS.
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Config
+metadata:
+ name: default
+ namespace: kubelb
+spec:
+ defaultAnnotations:
+ service:
+ service.beta.kubernetes.io/aws-load-balancer-internal: true
+ ingress:
+ kubernetes.io/ingress.class: "nginx"
+ gatewayapi:
+ kubernetes.io/ingress.class: "eg"
+ # Will be applied to all resources such as Ingress, Gateway API resources, services, etc.
+ all:
+ internal: true
+```
+
+### Configure Envoy Proxy
+
+Sample configuration, inflated with values for demonstration purposes only. All of the values are optional and have sane defaults. For more details check [CRD References]({{< relref "../../references">}})
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Config
+metadata:
+ name: default
+ namespace: kubelb
+spec:
+ envoyProxy:
+ replicas: 3
+ # Immutable, cannot be changed after configuration.
+ topology: shared
+ useDaemonset: false
+ singlePodPerNode: false
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+ - effect: NoSchedule
+ operator: Exists
+ # Can be used to configure requests/limits for envoy proxy
+ resources:
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ limits:
+ cpu: 200m
+ memory: 256Mi
+ # Configure affinity for envoy proxy
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/os
+ operator: In
+ values:
+ - linux
+```
+
+### Configure LoadBalancer Options
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Config
+metadata:
+ name: default
+ namespace: kubelb
+spec:
+ loadBalancer:
+ # The class to use for LB service in the management cluster
+ class: "metallb.universe.tf/metallb"
+ disable: false
+ # Enterprise Edition Only
+ limit: 5
+```
+
+### Configure Ingress Options
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Config
+metadata:
+ name: default
+ namespace: kubelb
+spec:
+ ingress:
+ # The class to use for Ingress resources in the management cluster
+ class: "nginx"
+ disable: false
+```
+
+### Configure Gateway API Options
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Config
+metadata:
+ name: default
+ namespace: kubelb
+spec:
+ gatewayAPI:
+ class: "eg"
+ disable: false
+ defaultGateway:
+ name: "default"
+ namespace: "envoy-gateway"
+ # Enterprise Edition Only (all the below options are only available in Enterprise Edition)
+ gateway:
+ limits: 10
+ disableHTTPRoute: false
+ disableGRPCRoute: false
+ disableTCPRoute: false
+ disableUDPRoute: false
+ disableTLSRoute: false
+```
+
+### Configure DNS Options
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Config
+metadata:
+ name: default
+ namespace: kubelb
+spec:
+ dns:
+ # The wildcard domain to use for auto-generated hostnames for Load balancers
+ # In EE Edition, this is also use to generated dynamic hostnames for tunnels.
+ wildcardDomain: "*.apps.example.com"
+ # Allow tenants to specify explicit hostnames for Load balancers and tunnels(in EE Edition)
+ allowExplicitHostnames: false
+```
+
+**For more details and options, please go through [CRD References]({{< relref "../../references">}})**
diff --git a/content/kubelb/v1.2/tutorials/gatewayapi/_index.en.md b/content/kubelb/v1.2/tutorials/gatewayapi/_index.en.md
new file mode 100644
index 000000000..9bd0d2c4b
--- /dev/null
+++ b/content/kubelb/v1.2/tutorials/gatewayapi/_index.en.md
@@ -0,0 +1,195 @@
++++
+title = "Gateway API"
+linkTitle = "Gateway API"
+date = 2023-10-27T10:07:15+02:00
+weight = 4
++++
+
+This tutorial will guide you through the process of setting up Layer 7 load balancing with Gateway API.
+
+Gateway API targets three personas:
+
+1. Platform Provider: The Platform Provider is responsible for the overall environment that the cluster runs in, i.e. the cloud provider. The Platform Provider will interact with GatewayClass resources.
+2. Platform Operator: The Platform Operator is responsible for overall cluster administration. They manage policies, network access, application permissions and will interact with Gateway resources.
+3. Service Operator: The Service Operator is responsible for defining application configuration and service composition. They will interact with HTTPRoute and TLSRoute resources and other typical Kubernetes resources.
+
+Further reading:
+
+In KubeLB, we treat the admins of management cluster as the Platform provider. Hence, they are responsible for creating the `GatewayClass` resource. Tenants are the Service Operators. For Platform Operator, this role could vary based on your configurations for the management cluster. In Enterprise edition, users can set the limit of Gateways to 0 to shift the role of "Platform Operator" to the "Platform Provider". In other case, by default, the Platform Operator role is assigned to the tenants.
+
+### Setup
+
+Kubermatic's default recommendation is to use Gateway API and use [Envoy Gateway](https://gateway.envoyproxy.io/) as the Gateway API implementation. Install Envoy Gateway by following this [guide](https://gateway.envoyproxy.io/docs/install/install-helm/) or any other Gateway API implementation of your choice.
+
+Update values.yaml for KubeLB manager chart to enable the Gateway API addon.
+
+```yaml
+kubelb:
+ enableGatewayAPI: true
+
+## Addon configuration
+kubelb-addons:
+ enabled: true
+ # Create the GatewayClass resource in the management cluster.
+ gatewayClass:
+ create: true
+
+ envoy-gateway:
+ enabled: true
+```
+
+#### KubeLB Manager Configuration
+
+Update the KubeLB Manager configuration to use the Gateway Class name as `eg` either at a Global or Tenant level:
+
+#### Global
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Config
+metadata:
+ name: default
+ namespace: kubelb
+spec:
+ gatewayAPI:
+ # Name of the Gateway Class.
+ class: "eg"
+```
+
+#### Tenant
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Tenant
+metadata:
+ name: shroud
+spec:
+ gatewayAPI:
+ # Name of the Gateway Class.
+ class: "eg"
+```
+
+**Leave it empty if you named your Gateway Class as `kubelb`**
+
+### Usage with KubeLB
+
+#### Gateway resource
+
+Once you have created the GatewayClass, the next resource that is required is the Gateway. For CE version, the Gateway needs to be created in the tenant cluster. However, in Enterprise edition, the Gateway can exist in the management cluster or the tenant cluster. In Enterprise edition, users can set the limit of Gateways to 0 to shift the role of "Platform Operator" to the "Platform Provider". Otherwise, by default, the Platform Operator role is assigned to the tenants.
+
+```yaml
+apiVersion: gateway.networking.k8s.io/v1
+kind: Gateway
+metadata:
+ name: kubelb
+spec:
+ gatewayClassName: kubelb
+ listeners:
+ - name: http
+ protocol: HTTP
+ port: 80
+```
+
+It is recommended to create the Gateway in tenant cluster directly since the Gateway Object needs to be modified regularly to attach new routes etc. In cases where the Gateway exists in management cluster, set the `use-gateway-class` argument for CCM to false.
+
+{{% notice warning %}}
+Community Edition only one gateway is allowed per tenant and that has to be named `kubelb`.
+{{% /notice %}}
+
+#### HTTPRoute resource
+
+```yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: backend
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: backend
+ labels:
+ app: backend
+ service: backend
+spec:
+ ports:
+ - name: http
+ port: 3000
+ targetPort: 3000
+ selector:
+ app: backend
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: backend
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: backend
+ version: v1
+ template:
+ metadata:
+ labels:
+ app: backend
+ version: v1
+ spec:
+ serviceAccountName: backend
+ containers:
+ - image: gcr.io/k8s-staging-gateway-api/echo-basic:v20231214-v1.0.0-140-gf544a46e
+ imagePullPolicy: IfNotPresent
+ name: backend
+ ports:
+ - containerPort: 3000
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+---
+apiVersion: gateway.networking.k8s.io/v1
+kind: HTTPRoute
+metadata:
+ name: backend
+spec:
+ parentRefs:
+ - name: kubelb
+ hostnames:
+ - "www.example.com"
+ rules:
+ - backendRefs:
+ - group: ""
+ kind: Service
+ name: backend
+ port: 3000
+ weight: 1
+ matches:
+ - path:
+ type: PathPrefix
+ value: /
+```
+
+### Support
+
+The following resources are supported in CE and EE version:
+
+- Community Edition:
+ - HTTPRoute
+ - GRPCRoute
+- Enterprise Edition:
+ - HTTPRoute
+ - GRPCRoute
+ - TCPRoute
+ - UDPRoute
+ - TLSRoute
+
+**For more details on how to use them and example, please refer to examples from [Envoy Gateway Documentation](https://gateway.envoyproxy.io/docs/tasks/)**
+
+### Limitations
+
+- ReferenceGrants, BackendTLSPolicy are not supported in KubeLB, yet.
diff --git a/content/kubelb/v1.2/tutorials/ingress/_index.en.md b/content/kubelb/v1.2/tutorials/ingress/_index.en.md
new file mode 100644
index 000000000..51a5d84b4
--- /dev/null
+++ b/content/kubelb/v1.2/tutorials/ingress/_index.en.md
@@ -0,0 +1,157 @@
++++
+title = "Ingress"
+linkTitle = "Ingress"
+date = 2023-10-27T10:07:15+02:00
+weight = 5
++++
+
+This tutorial will guide you through the process of setting up Layer 7 load balancing with Ingress.
+
+Kubermatic's default recommendation is to use Gateway API and use [Envoy Gateway](https://gateway.envoyproxy.io/) as the Gateway API implementation. The features specific to Gateway API that will be built and consumed in KubeLB will be based on Envoy Gateway. Although this is not a strict binding and our consumers are free to use any Ingress or Gateway API implementation. The only limitation is that we only support native Kubernetes APIs i.e. Ingress and Gateway APIs. Provider specific APIs are not supported by KubeLB and will be completely ignored.
+
+Although KubeLB supports Ingress, we strongly encourage you to use Gateway API instead as Ingress has been [feature frozen](https://kubernetes.io/docs/concepts/services-networking/ingress/#:~:text=Note%3A-,Ingress%20is%20frozen,-.%20New%20features%20are) in Kubernetes and all new development is happening in the Gateway API space. The biggest advantage of Gateway API is that it is a more flexible, has extensible APIs and is **multi-tenant compliant** by default. Ingress doesn't support multi-tenancy.
+
+### Setup
+
+There are two modes in which Ingress can be setup in the management cluster:
+
+#### Per tenant(Recommended)
+
+Install your controller in the following way and scope it down to a specific namespace. This is the recommended approach as it allows you to have a single controller per tenant and the IP for ingress controller is not shared across tenants.
+
+Install the **Ingress Controller** in the tenant namespace. Replace **TENANT_NAME** with the name of the tenant. This has to be unique to ensure that any cluster level resource that is installed, doesn't create a conflict with existing resources. Following example is for a tenant named `shroud`:
+
+```sh
+TENANT_NAME=shroud
+TENANT_NAMESPACE=tenant-$TENANT_NAME
+
+helm upgrade --install ingress-nginx-${TENANT_NAME} ingress-nginx \
+ --repo https://kubernetes.github.io/ingress-nginx \
+ --namespace ${TENANT_NAMESPACE} \
+ --create-namespace \
+ --set controller.scope.enabled=true \
+ --set controller.scope.namespace=${TENANT_NAMESPACE} \
+ --set controller.ingressClassResource.name=nginx-${TENANT_NAME}
+```
+
+For details:
+
+The next step would be to configure the tenant to use the new ingress controller:
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Tenant
+metadata:
+ name: ${TENANT_NAME}
+spec:
+ ingress:
+ class: "nginx-${TENANT_NAME}"
+```
+
+#### Shared
+
+Update values.yaml for KubeLB manager chart to enable the ingress-nginx addon.
+
+```yaml
+kubelb-addons:
+ enabled: true
+ ingress-nginx:
+ enabled: true
+ controller:
+ service:
+ externalTrafficPolicy: Local
+```
+
+For details:
+
+### Usage with KubeLB
+
+In the tenant cluster, create the following resources:
+
+```yaml
+
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: backend
+spec:
+ ingressClassName: kubelb
+ rules:
+ # Replace with your domain
+ - host: "demo.example.com"
+ http:
+ paths:
+ - path: /backend
+ pathType: Exact
+ backend:
+ service:
+ name: backend
+ port:
+ number: 3000
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: backend
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: backend
+ labels:
+ app: backend
+ service: backend
+spec:
+ ports:
+ - name: http
+ port: 3000
+ targetPort: 3000
+ selector:
+ app: backend
+ type: ClusterIP
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: backend
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: backend
+ version: v1
+ template:
+ metadata:
+ labels:
+ app: backend
+ version: v1
+ spec:
+ serviceAccountName: backend
+ containers:
+ - image: gcr.io/k8s-staging-gateway-api/echo-basic:v20231214-v1.0.0-140-gf544a46e
+ imagePullPolicy: IfNotPresent
+ name: backend
+ ports:
+ - containerPort: 3000
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+```
+
+This will create an Ingress resource, a service and a deployment. KubeLB CCM will create a service of type `NodePort` against your service to ensure connectivity from the management cluster. Note that the class for ingress is `kubelb`, this is required for KubeLB to manage the Ingress resources. This behavior can be changed however by following the [Ingress configuration](#configurations).
+
+### Configurations
+
+KubeLB CCM helm chart can be used to further configure the CCM. Some essential options are:
+
+```yaml
+kubelb:
+ # Set to false to watch all resources irrespective of the Ingress class.
+ useIngressClass: true
+```
diff --git a/content/kubelb/v1.2/tutorials/kkp/_index.en.md b/content/kubelb/v1.2/tutorials/kkp/_index.en.md
new file mode 100644
index 000000000..a2c4c4e04
--- /dev/null
+++ b/content/kubelb/v1.2/tutorials/kkp/_index.en.md
@@ -0,0 +1,48 @@
++++
+title = "Kubermatic Kubernetes Platform Integration"
+date = 2023-10-27T10:07:15+02:00
+weight = 9
+enterprise = true
++++
+
+## Kubermatic Kubernetes Platform (Enterprise Edition Only)
+
+Starting with KKP v2.24, KubeLB Enterprise Edition is integrated into the Kubermatic Kubernetes Platform (KKP). This means that you can use KubeLB to provision load balancers for your KKP clusters. KKP will take care of configurations and deployments for you in the user cluster. Admins mainly need to create the KubeLB manager cluster and configure KKP to use it.
+
+## Prerequisites
+
+To configure KubeLB for KKP, you first need a KubeLB management cluster and its Kubeconfig. KubeLB requires access to certain resources like Tenants, LoadBalancer, Routes, etc. for the KKP integration to work. Instead of using admin Kubeconfig, we can use a Kubeconfig with the necessary RBAC permissions to access the required resources.
+
+1. Create a KubeLB management cluster with the following settings in the `values.yaml` file for the `kubelb-management` chart:
+
+```yaml
+kkpintegration.rbac: true
+```
+
+2. Install the [kubectl-view-serviceaccount-kubeconfig](https://github.com/superbrothers/kubectl-view-serviceaccount-kubeconfig-plugin?tab=readme-ov-file#install-the-plugin) plugin.
+3. Use the following command to generate a Kubeconfig for the service account `kubelb-manager` in the `kubelb` namespace:
+
+```bash
+kubectl view-serviceaccount-kubeconfig kubelb-kkp -n kubelb --admin
+```
+
+4. Use the output of the previous command to create a file `kubelb-secret.yaml` with the required secret:
+
+```bash
+kubectl create secret generic kubelb-management-cluster \
+ --namespace=kubermatic \
+ --from-literal=kubeconfig="$(kubectl view-serviceaccount-kubeconfig kubelb-kkp -n kubelb --admin)" \
+ --dry-run=client -o yaml > kubelb-secret.yaml
+```
+
+5. Apply the file `kubelb-secret.yaml` to the `kubermatic` namespace in your KKP cluster.
+
+```bash
+kubectl apply -f kubelb-secret.yaml
+```
+
+For further configuration, please refer to the [official KKP documentation](https://docs.kubermatic.com/kubermatic/latest/tutorials-howtos/kubelb).
+
+{{% notice note %}}
+To use KubeLB enterprise offering, you need to have a valid license. Please [contact sales](mailto:sales@kubermatic.com) for more information.
+{{% /notice %}}
diff --git a/content/kubelb/v1.2/tutorials/loadbalancer/_index.en.md b/content/kubelb/v1.2/tutorials/loadbalancer/_index.en.md
new file mode 100644
index 000000000..7a2d58459
--- /dev/null
+++ b/content/kubelb/v1.2/tutorials/loadbalancer/_index.en.md
@@ -0,0 +1,145 @@
++++
+title = "Layer 4 Load balancing"
+linkTitle = "Layer 4 Load balancing"
+date = 2023-10-27T10:07:15+02:00
+weight = 3
++++
+
+This tutorial will guide you through the process of setting up a Layer 4 LoadBalancer using KubeLB.
+
+### Setup
+
+For layer 4 load balancing, either the kubernetes cluster should be on a cloud, using it's CCM, that supports the `LoadBalancer` service type or a self-managed solution like [MetalLB](https://metallb.universe.tf) should be installed. [This guide](https://metallb.universe.tf/installation/#installation-with-helm) can be followed to install and configure MetalLB on the management cluster.
+
+A minimal configuration for MetalLB for demonstration purposes is as follows:
+
+```yaml
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: extern
+ namespace: metallb-system
+spec:
+ ipAddressPools:
+ - extern
+---
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: extern
+ namespace: metallb-system
+spec:
+ addresses:
+ - 10.10.255.200-10.10.255.250
+```
+
+This configures an address pool `extern` with an IP range from 10.10.255.200 to 10.10.255.250. This IP range can be used by the tenant clusters to allocate IP addresses for the `LoadBalancer` service type.
+
+Further reading:
+
+### Usage with KubeLB
+
+In the tenant cluster, create a service of type `LoadBalancer` and a deployment:
+
+```yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: backend
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: backend
+ labels:
+ app: backend
+ service: backend
+spec:
+ ports:
+ - name: http
+ port: 3000
+ targetPort: 3000
+ selector:
+ app: backend
+ type: LoadBalancer
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: backend
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: backend
+ version: v1
+ template:
+ metadata:
+ labels:
+ app: backend
+ version: v1
+ spec:
+ serviceAccountName: backend
+ containers:
+ - image: gcr.io/k8s-staging-gateway-api/echo-basic:v20231214-v1.0.0-140-gf544a46e
+ imagePullPolicy: IfNotPresent
+ name: backend
+ ports:
+ - containerPort: 3000
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+```
+
+This will create a service of type `LoadBalancer` and a deployment. KubeLB CCM will then propagate the request to management cluster, create a LoadBalancer CR there and retrieve the IP address allocated in the management cluster. Eventually the IP address will be assigned to the service in the tenant cluster.
+
+### Load Balancer Hostname Support
+
+KubeLB now supports assigning a hostname directly to the LoadBalancer resource. This is helpful for simpler configurations where no special routing rules are required for your Ingress or HTTPRoute resources.
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: LoadBalancer
+metadata:
+ name: test-lb-hostname
+ namespace: tenant-dkrqjswsgk
+ annotations:
+ kubelb.k8c.io/request-wildcard-domain: "true"
+spec:
+ # hostname: test.example.com
+ endpoints:
+ - addresses:
+ - ip: 91.99.112.254
+ ports:
+ - name: 8080-tcp
+ port: 31632
+ protocol: TCP
+ ports:
+ - name: 8080-tcp
+ port: 8080
+ protocol: TCP
+ type: ClusterIP
+```
+
+This will create a LoadBalancer resource with the hostname `test.example.com` that can forward traffic to the IP address `91.99.112.254` on port `31632`. The `kubelb.k8c.io/request-wildcard-domain: "true"` annotation is used to request a wildcard domain for the hostname. Otherwise `spec.hostname` can also be used to explicitly set the hostname.
+
+Please take a look at [DNS Automation](../security/dns/#enable-dns-automation) for more details on how to configure DNS for the hostname.
+
+### Configurations
+
+KubeLB CCM helm chart can be used to further configure the CCM. Some essential options are:
+
+```yaml
+kubelb:
+ # Use ExternalIP or InternalIP in the management cluster to route traffic back to the node ports of the tenant cluster.
+ nodeAddressType: ExternalIP
+ # This can be enabled to use KubeLB in a cluster where another load balancer provider is already running. When enabled, kubeLB will only manage
+ # services of type LoadBalancer that are using the `kubelb` LoadBalancerClass.
+ useLoadBalancerClass: false
+```
diff --git a/content/kubelb/v1.2/tutorials/observability/_index.en.md b/content/kubelb/v1.2/tutorials/observability/_index.en.md
new file mode 100644
index 000000000..743871860
--- /dev/null
+++ b/content/kubelb/v1.2/tutorials/observability/_index.en.md
@@ -0,0 +1,26 @@
++++
+title = "Observability"
+linkTitle = "Observability"
+date = 2023-10-27T10:07:15+02:00
+weight = 8
++++
+
+KubeLB is a mission-critical component in the Kubernetes ecosystem, and its observability is crucial for ensuring the stability and reliability of the platform. This guide will walk you through the steps to enable and configure observability for KubeLB.
+
+KubeLB in itself doesn't restrict the platform providers to certain observability tools. Since we are well aware that different customers will have different Monitoring, logging, alerting, and tracing etc. stacks deployed which are based on their own requirements. Although it does offer Grafana dashboards that can be plugged into your existing monitoring stack.
+
+## Grafana Dashboard [WIP]
+
+This is work in progress and can be tracked against [Monitoring and Alerting](https://github.com/kubermatic/kubelb/issues/56)
+
+## Alerting and Recording rules [WIP]
+
+This is work in progress and can be tracked against [Monitoring and Alerting](https://github.com/kubermatic/kubelb/issues/56)
+
+## Recommended Tools
+
+Our suggested tools for observability are:
+
+1. [Gateway Observability](https://gateway.envoyproxy.io/docs/tasks/observability/gateway-observability/): This is the default MLA stack provided by Envoy Gateway. Since it's designed specifically for Envoy Gateway and Gateway APIs, it offers a comprehensive set of observability features tailored to the needs of Envoy Gateway users.
+2. [Hubble UI](https://docs.cilium.io/en/stable/gettingstarted/hubble_setup/): When using Cilium as the CNI, Hubble UI provides a user-friendly interface for visualizing and analyzing network traffic in your Kubernetes cluster.
+3. [Kiali](https://kiali.io/docs/installation/installation-guide/): When using Istio as the service mesh, Kiali is a powerful tool for visualizing and analyzing the traffic flow within your Istio-based applications.
diff --git a/content/kubelb/v1.2/tutorials/security/_index.en.md b/content/kubelb/v1.2/tutorials/security/_index.en.md
new file mode 100644
index 000000000..310780651
--- /dev/null
+++ b/content/kubelb/v1.2/tutorials/security/_index.en.md
@@ -0,0 +1,13 @@
++++
+title = "Security"
+linkTitle = "Security"
+date = 2023-10-27T10:07:15+02:00
+weight = 7
++++
+
+This is a guide towards managing DNS, TLS, and other security-related configurations in KubeLB.
+
+## Table of Content
+
+{{% children depth=5 %}}
+{{% /children %}}
diff --git a/content/kubelb/v1.2/tutorials/security/cert-management/_index.en.md b/content/kubelb/v1.2/tutorials/security/cert-management/_index.en.md
new file mode 100644
index 000000000..9d28a7b16
--- /dev/null
+++ b/content/kubelb/v1.2/tutorials/security/cert-management/_index.en.md
@@ -0,0 +1,261 @@
++++
+title = "Certificate Management"
+linkTitle = "Certificate Management"
+date = 2023-10-27T10:07:15+02:00
+weight = 1
+enterprise = true
++++
+
+## Setup
+
+### Install Cert-Manager
+
+Install [cert-manager](https://cert-manager.io) to manage certificates for your tenants.
+
+These are minimal examples to get you started quickly. Please refer to the documentation of [cert-manager](https://cert-manager.io/docs/installation/helm/) for further details and configurations.
+
+{{< tabs name="cert-manager" >}}
+{{% tab name="Gateway API" %}}
+
+Update values.yaml for KubeLB manager chart to enable the cert-manager addon.
+
+```yaml
+kubelb-addons:
+ enabled: true
+ cert-manager:
+ enabled: true
+ crds:
+ enabled: true
+ config:
+ apiVersion: controller.config.cert-manager.io/v1alpha1
+ kind: ControllerConfiguration
+ enableGatewayAPI: true
+```
+
+{{% /tab %}}
+{{% tab name="Ingress" %}}
+
+Update values.yaml for KubeLB manager chart to enable the cert-manager addon.
+
+```yaml
+kubelb-addons:
+ enabled: true
+ cert-manager:
+ enabled: true
+ crds:
+ enabled: true
+ config:
+ apiVersion: controller.config.cert-manager.io/v1alpha1
+ kind: ControllerConfiguration
+ enableGatewayAPI: false
+```
+
+{{% /tab %}}
+{{< /tabs >}}
+
+### Configure Tenant
+
+Certificate management can be enabled/disabled at global or tenant level. For automation purposes, you can configure allowed domains and default issuer for the certificates at the tenant level.
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Tenant
+metadata:
+ name: shroud
+spec:
+ # These domains are allowed to be used for Ingress, Gateway API, DNS, and certs.
+ allowedDomains:
+ - "kube.example.com"
+ - "*.kube.example.com"
+ - "*.shroud.example.com"
+ certificates:
+ # can also be configured in the `Config` resource at a global level.
+ # Default issuer to use if `kubelb.k8c.io/manage-certificates` annotation is added to the cluster.
+ defaultClusterIssuer: "letsencrypt-staging"
+ # If not empty, only the domains specified here will have automation for Certificates. Everything else will be ignored.
+ allowedDomains:
+ - "*.shroud.example.com"
+```
+
+Users can then either use [cert-manager annotations](https://cert-manager.io/docs/usage/ingress/) or the annotation `kubelb.k8c.io/manage-certificates: true` on their resources to automate certificate management.
+
+### Cluster Issuer example
+
+{{% notice info %}}
+Due to multi-tenancy, it's recommended to use DNS challenge for certificate management. Gateway API has a limitation and doesn't support wildcard domains with HTTP01 challenge. Similarly, for Ingress, unless you are using single ingress installation for all tenants, you will need to create a separate ClusterIssuer for each tenant. Same is the case for Gateway API since it needs the Gateway name to resolve the certificate challenges.
+{{% /notice %}}
+
+#### Example for DNS challenge with AWS Route53
+
+```yaml
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: letsencrypt-production-dns
+spec:
+ acme:
+ email: user@example.com
+ server: https://acme-v02.api.letsencrypt.org/directory
+ privateKeySecretRef:
+ name: letsencrypt-production-dns
+ solvers:
+ - dns01:
+ route53:
+ region: eu-central-1
+ accessKeyIDSecretRef:
+ name: route53-credentials
+ key: access-key-id
+ secretAccessKeySecretRef:
+ name: route53-credentials
+ key: secret-access-key
+```
+
+#### Example for HTTP01 challenge
+
+{{< tabs name="cert-manager" >}}
+{{% tab name="Gateway API" %}}
+
+```yaml
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: letsencrypt-production
+spec:
+ acme:
+ email: user@example.com
+ server: https://acme-v02.api.letsencrypt.org/directory
+ privateKeySecretRef:
+ name: example-issuer-account-key
+ solvers:
+ - http01:
+ gatewayHTTPRoute:
+ parentRefs:
+ - kind: Gateway
+ name: default
+ namespace: tenant-default
+ sectionName: http
+```
+
+{{% /tab %}}
+{{% tab name="Ingress" %}}
+
+```yaml
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: letsencrypt-production
+spec:
+ acme:
+ # You must replace this email address with your own.
+ # Let's Encrypt will use this to contact you about expiring
+ # certificates, and issues related to your account.
+ email: user@example.com
+ server: https://acme-v02.api.letsencrypt.org/directory
+ privateKeySecretRef:
+ # Secret resource that will be used to store the account's private key.
+ name: example-issuer-account-key
+ # Add a single challenge solver, HTTP01 using nginx
+ solvers:
+ - http01:
+ ingress:
+ ingressClassName: nginx
+```
+
+{{% /tab %}}
+{{< /tabs >}}
+
+The additional validation at the tenant level allows us to use a single instance of cert-manager for multiple tenants. Multiple cert-manager installations are not recommended and it's better to have a single instance of cert-manager for all tenants but different ClusterIssuers/Issuers for different tenants, if required.
+
+## Usage
+
+In tenant cluster, create the following resources. Based on your requirements:
+
+1. Use cert-manager with known issuer:
+
+```yaml
+apiVersion: gateway.networking.k8s.io/v1
+kind: Gateway
+metadata:
+ name: example
+ annotations:
+ cert-manager.io/issuer: foo
+spec:
+ gatewayClassName: kubelb
+ listeners:
+ - name: http
+ hostname: example.com
+ port: 443
+ protocol: HTTPS
+ allowedRoutes:
+ namespaces:
+ from: All
+ tls:
+ mode: Terminate
+ certificateRefs:
+ - name: example-com-tls
+```
+
+2. Leave the issuer up to the management cluster:
+
+```yaml
+apiVersion: gateway.networking.k8s.io/v1
+kind: Gateway
+metadata:
+ name: example
+ annotations:
+ kubelb.k8c.io/manage-certificates: true
+spec:
+ gatewayClassName: kubelb
+ listeners:
+ - name: http
+ hostname: example.com
+ port: 443
+ protocol: HTTPS
+ allowedRoutes:
+ namespaces:
+ from: All
+ tls:
+ mode: Terminate
+ certificateRefs:
+ - name: example-com-tls
+```
+
+3. Use custom certificates:
+
+```yaml
+apiVersion: gateway.networking.k8s.io/v1
+kind: Gateway
+metadata:
+ name: example
+ namespace: default
+spec:
+ gatewayClassName: kubelb
+ listeners:
+ - name: http
+ hostname: example.com
+ port: 443
+ protocol: HTTPS
+ allowedRoutes:
+ namespaces:
+ from: All
+ tls:
+ mode: Terminate
+ certificateRefs:
+ - name: custom-certificate
+---
+kind: SyncSecret
+apiVersion: kubelb.k8c.io/v1alpha1
+data:
+ tls.crt: ZnJhbmtsYW1wYXJkCg==
+ tls.key: ZnJhbmtsYW1wYXJkCg==
+metadata:
+ annotations:
+ name: custom-certificate
+ namespace: default
+type: kubernetes.io/tls
+---
+```
+
+This will then sync the secret to the management cluster in a secure way. Refer to [Bring your own Certificates]({{< relref "../secrets" >}}) for more details.
+
+**For more use cases, view [cert-manager documentation](https://cert-manager.io/docs/usage/gateway/)**
diff --git a/content/kubelb/v1.2/tutorials/security/dns/_index.en.md b/content/kubelb/v1.2/tutorials/security/dns/_index.en.md
new file mode 100644
index 000000000..952a5002a
--- /dev/null
+++ b/content/kubelb/v1.2/tutorials/security/dns/_index.en.md
@@ -0,0 +1,216 @@
++++
+title = "DNS Management"
+linkTitle = "DNS Management"
+date = 2023-10-27T10:07:15+02:00
+weight = 1
+enterprise = true
++++
+
+## Setup
+
+### Install External-dns
+
+We leverage [External-dns](https://bitnami.com/stack/external-dns/helm) to manage DNS records for the tenant clusters.
+
+**This is just an example to give you a headstart. For more details on setting up external-dns for different providers, visit [Official Documentation](https://kubernetes-sigs.github.io/external-dns).**
+
+Update the values.yaml for KubeLB manager chart to enable the external-dns addon.
+
+```yaml
+kubelb-addons:
+ enabled: true
+
+ external-dns:
+ enabled: true
+ domainFilters:
+ - example.com
+ extraVolumes:
+ - name: credentials
+ secret:
+ secretName: route53-credentials
+ extraVolumeMounts:
+ - name: credentials
+ mountPath: /.aws
+ readOnly: true
+ env:
+ - name: AWS_SHARED_CREDENTIALS_FILE
+ value: /.aws/credentials
+ txtOwnerId: kubelb-example-aws
+ registry: txt
+ provider: aws
+ policy: sync
+ sources:
+ - service
+ - ingress
+ # Comment out the below resources if you are not using Gateway API.
+ - gateway-httproute
+ - gateway-grpcroute
+ - gateway-tlsroute
+ - gateway-tcproute
+ - gateway-udproute
+```
+
+#### Credentials secret
+
+```yaml
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: external-dns
+---
+apiVersion: v1
+data:
+ credentials: W2RlZmF1bHRdCmF3c19hY2Nlc3Nfa2V5X2lkID0gTk9UVEhBVERVTUIKYXdzX3NlY3JldF9hY2Nlc3Nfa2V5ID0gTUFZQkVJVFNBU0VDUkVU
+kind: Secret
+metadata:
+ name: route53-credentials
+ namespace: external-dns
+type: Opaque
+```
+
+### Enable DNS automation
+
+DNS can be enabled/disabled at global or tenant level. For automation purposes, you can configure allowed domains for DNS per tenant.
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Tenant
+metadata:
+ name: shroud
+spec:
+ # These domains are allowed to be used for Ingress, Gateway API, DNS, and certs.
+ allowedDomains:
+ - "kube.example.com"
+ - "*.kube.example.com"
+ - "*.shroud.example.com"
+ dns:
+ # If not empty, only the domains specified here will have automation for DNS. Everything else will be ignored.
+ allowedDomains:
+ - "*.shroud.example.com"
+ # The wildcard domain to use for auto-generated hostnames for Load balancers
+ # In EE Edition, this is also use to generated dynamic hostnames for tunnels.
+ wildcardDomain: "*.apps.example.com"
+ # Allow tenants to specify explicit hostnames for Load balancers and tunnels(in EE Edition)
+ allowExplicitHostnames: false
+ gatewayAPI:
+ class: "eg"
+ defaultGateway:
+ name: "default"
+ namespace: "kubelb"
+```
+
+Users can then either use [external-dns annotations](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/annotations/annotations.md) or the annotation `kubelb.k8c.io/manage-dns: true` on their resources to automate DNS management.
+
+The additional validation at the tenant level allows us to use a single instance of external-dns for multiple tenants. Although, if required, external-dns can be installed per tenant as well.
+
+#### Configure Gateway
+
+Gateway resource needs to be configured for this automation to work. For example, if you are using Gateway API, you can configure the Gateway resource to manage DNS as follows:
+
+```yaml
+apiVersion: gateway.networking.k8s.io/v1
+kind: Gateway
+metadata:
+ name: default
+ namespace: kubelb
+ annotations:
+ cert-manager.io/cluster-issuer: letsencrypt-production
+spec:
+ gatewayClassName: eg
+ listeners:
+ ## HTTP listener to solve DNS challenge for cert-manager
+ - name: http
+ protocol: HTTP
+ port: 80
+ allowedRoutes:
+ namespaces:
+ from: All
+ - protocol: HTTPS
+ port: 443
+ name: https
+ hostname: "*.apps.example.com"
+ allowedRoutes:
+ namespaces:
+ from: All
+ tls:
+ mode: Terminate
+ certificateRefs:
+ - kind: Secret
+ name: eg-https
+ # Required in EE for tunneling
+ - protocol: HTTPS
+ port: 443
+ name: https-connection-manager
+ hostname: "connection-manager.example.com"
+ allowedRoutes:
+ namespaces:
+ from: All
+ tls:
+ mode: Terminate
+ certificateRefs:
+ - kind: Secret
+ name: eg-https-connection-manager
+```
+
+## Usage
+
+1. Using external-dns annotations:
+
+```yaml
+apiVersion: gateway.networking.k8s.io/v1
+kind: Gateway
+metadata:
+ name: example
+ annotations:
+ external-dns.alpha.kubernetes.io/hostname: example.com
+spec:
+ gatewayClassName: kubelb
+ listeners:
+ - name: http
+ hostname: example.com
+ port: 443
+ protocol: HTTPS
+ allowedRoutes:
+ namespaces:
+ from: All
+```
+
+2. Delegate DNS management to KubeLB:
+
+```yaml
+apiVersion: gateway.networking.k8s.io/v1
+kind: Gateway
+metadata:
+ name: example
+ annotations:
+ kubelb.k8c.io/manage-dns: true
+spec:
+ gatewayClassName: kubelb
+ listeners:
+ - name: http
+ hostname: example.com
+ port: 443
+ protocol: HTTPS
+ allowedRoutes:
+ namespaces:
+ from: All
+```
+
+3. Services can also be annotated to manage DNS:
+
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: backend
+ annotations:
+ external-dns.alpha.kubernetes.io/hostname: backend.example.com
+spec:
+ ports:
+ - name: http
+ port: 3000
+ targetPort: 3000
+ selector:
+ app: backend
+ type: LoadBalancer
+```
diff --git a/content/kubelb/v1.2/tutorials/security/secrets/_index.en.md b/content/kubelb/v1.2/tutorials/security/secrets/_index.en.md
new file mode 100644
index 000000000..45cc905ff
--- /dev/null
+++ b/content/kubelb/v1.2/tutorials/security/secrets/_index.en.md
@@ -0,0 +1,47 @@
++++
+title = "Bring your own Secrets"
+linkTitle = "Bring your own Secrets"
+date = 2023-10-27T10:07:15+02:00
+weight = 6
++++
+
+To propagate secrets from tenant to management cluster. KubeLB has introduced a custom resource `SyncSecret` which is merely a wrapper over the native Kubernetes secret. The custom resource helps us ensure that we are not exposing any secrets from the LB cluster to the tenants.
+
+## SyncSecret Example
+
+### Native Kubernetes Secret
+
+```
+kind: Secret
+apiVersion: v1
+metadata:
+ name: mongodb-credentials
+stringData:
+ mongodb-password: "123456"
+ mongodb-root-password: "123456"
+type: Opaque
+```
+
+### Converted to a Sync Secret
+
+```
+kind: SyncSecret
+apiVersion: kubelb.k8c.io/v1alpha1
+metadata:
+ name: mongodb-credentials
+stringData:
+ mongodb-password: "123456"
+ mongodb-root-password: "123456"
+type: Opaque
+```
+
+### Automation
+
+To automate the process of creating SyncSecrets from kubernetes secrets, re-deploy the kubeLB CCM with the following modifications:
+
+```yaml
+kubelb:
+ enableSecretSynchronizer: true
+```
+
+This would assign CRUD access for secrets to KubeLB controller and enable a syncer that can convert secrets labelled with `kubelb.k8c.io/managed-by: kubelb` to SyncSecrets.
diff --git a/content/kubelb/v1.2/tutorials/tenants/_index.en.md b/content/kubelb/v1.2/tutorials/tenants/_index.en.md
new file mode 100644
index 000000000..af6a2445e
--- /dev/null
+++ b/content/kubelb/v1.2/tutorials/tenants/_index.en.md
@@ -0,0 +1,73 @@
++++
+title = "Tenants"
+linkTitle = "Tenants"
+date = 2023-10-27T10:07:15+02:00
+weight = 2
++++
+
+Tenants represent the consumers of the load balancer services in the management cluster. They can be individual users, teams, or applications that have their workloads, access control, and quotas isolated by using the tenant concept in management cluster. Tenants are represented by the tenant CRD and have a dedicated namespace `tenant-` in the management cluster. Each Kubernetes cluster where the KubeLB CCM is running is considered a unique tenant. This demarcation is based on the fact that the endpoints, simply the Node IPs and node ports, are unique for each Kubernetes cluster.
+
+{{% notice note %}}
+Tenant configuration has a higher precedence than the global configuration and overrides the global configuration values for the tenant if the fields are available in both the tenant and global configuration.
+{{% /notice %}}
+
+## Kubermatic Kubernetes Platform (Enterprise Edition Only)
+
+For details, go through [KKP integration details]({{< relref "../../tutorials/kkp">}})
+
+## Usage
+
+For usage outside of KKP please follow the guide along. This guide assumes that the KubeLB manager cluster has been configured by following the [installation guide](../../installation/).
+
+### KubeLB Tenant
+
+With KubeLB v1.1, the process to register a new tenant has been simplified. Instead of running scripts to register a new tenant, the user can now create a `Tenant` CRD.
+
+```yaml
+apiVersion: kubelb.k8c.io/v1alpha1
+kind: Tenant
+metadata:
+ name: shroud
+spec:
+ propagatedAnnotations: null
+ # Propagate all annotations to the resources.
+ propagateAllAnnotations: true
+ loadBalancer:
+ class: "metallb.universe.tf/metallb"
+ # Enterprise Edition Only
+ limit: 10
+ ingress:
+ class: "nginx"
+ gatewayAPI:
+ class: "eg"
+ # All of the below configurations are Enterprise Edition Only
+ dns:
+ allowedDomains:
+ - "*.example.com"
+ certificates:
+ defaultClusterIssuer: "letsencrypt-prod"
+ allowedDomains:
+ - "*.example.com"
+ allowedDomains:
+ # All subdomains of example.com are allowed but at a single lower level. For example, kube.example.com, test.example.com, etc.
+ - "*.example.com"
+ # All subdomains of kube.com are allowed but at any lower level. For example, example.kube.com, test.tenant1.prod.kube.com etc.
+ - "**.kube.com"
+```
+
+With this CR we are creating a tenant named `shroud` with the following configurations:
+
+* **propagateAllAnnotations: true** - Propagate all annotations to the resources.
+* **loadBalancer.class: metallb.universe.tf/metallb** - The class to use for LoadBalancer resources for tenants in the management cluster.
+* **loadBalancer.limit: 10** - The limit of LoadBalancer resources that can be created by the tenant.
+* **ingress.class: nginx** - The class to use for Ingress resources for tenants in the management cluster.
+* **gatewayAPI.class: eg** - The class to use for Gateway API resources for tenants in the management cluster.
+* For DNS configuration, we have allowed domains `*.example.com`.
+* For Certificates configuration, we have the default cluster issuer `letsencrypt-prod` and allowed domains `*.example.com`.
+* For Ingress and Gateway API, we have allowed domains `*.example.com` and `**.kube.com`.
+
+{{% notice info %}}
+The tenant name provided to the consumers is the name of the namespace that is created in the management cluster against the tenant CRD. So the tenant **shroud** will be represented by the namespace **tenant-shroud** in the management cluster. For the CCM, tenantName of **tenant-shroud** needs to be used.
+{{% /notice %}}
+
+**For more details and options, please go through [CRD References]({{< relref "../../references">}})**
diff --git a/content/kubeone/main/architecture/_index.en.md b/content/kubeone/main/architecture/_index.en.md
index 2bdbfd79e..af010052f 100644
--- a/content/kubeone/main/architecture/_index.en.md
+++ b/content/kubeone/main/architecture/_index.en.md
@@ -2,7 +2,7 @@
title = "Architecture"
date = 2021-02-10T09:00:00+02:00
description = "Learn about the architecture of Kubermatic KubeOne and how you can automate cluster operations on all environments"
-weight = 2
+weight = 3
+++
Kubermatic KubeOne automates cluster operations on all your cloud, on-prem,
diff --git a/content/kubeone/main/architecture/compatibility/supported-versions/_index.en.md b/content/kubeone/main/architecture/compatibility/supported-versions/_index.en.md
index 35fc05729..b7b9aa29f 100644
--- a/content/kubeone/main/architecture/compatibility/supported-versions/_index.en.md
+++ b/content/kubeone/main/architecture/compatibility/supported-versions/_index.en.md
@@ -14,17 +14,17 @@ support policy in the [Version Skew Policy document][upstream-supported-versions
In the following table you can find the supported Kubernetes versions for the
current KubeOne version.
-| KubeOne \ Kubernetes | 1.33 | 1.32 | 1.31 | 1.30 | 1.29[^1] |
+| KubeOne \ Kubernetes | 1.34 | 1.33 | 1.32 | 1.31 | 1.30[^1] |
| -------------------- | ---- | ---- | ---- | -----| -------- |
-| v1.11 | ✓ | ✓ | ✓ | - | - |
-| v1.10 | - | ✓ | ✓ | ✓ | - |
-| v1.9 | - | - | ✓ | ✓ | ✓ |
+| v1.12 | ✓ | ✓ | ✓ | - | - |
+| v1.11 | - | ✓ | ✓ | ✓ | - |
+| v1.10 | - | - | ✓ | ✓ | ✓ |
-[^1]: Kubernetes 1.29 has reached End-of-Life (EOL) and is not supported any longer.
+[^1]: Kubernetes 1.30 has reached End-of-Life (EOL) and is not supported any longer.
We strongly recommend upgrading to a newer supported Kubernetes release as soon as possible.
We recommend using a Kubernetes release that's not older than one minor release
-than the latest Kubernetes release. For example, with 1.32 being the latest
-release, we recommend running at least Kubernetes 1.31.
+than the latest Kubernetes release. For example, with 1.34 being the latest
+release, we recommend running at least Kubernetes 1.33.
[upstream-supported-versions]: https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions
diff --git a/content/kubeone/main/architecture/operating-system-manager/usage/_index.en.md b/content/kubeone/main/architecture/operating-system-manager/usage/_index.en.md
index abeacc163..2e5cdcc95 100644
--- a/content/kubeone/main/architecture/operating-system-manager/usage/_index.en.md
+++ b/content/kubeone/main/architecture/operating-system-manager/usage/_index.en.md
@@ -13,7 +13,7 @@ To fallback to legacy user-data from Machine Controller, we can disable OSM for
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.34.1
addons:
enable: true
operatingSystemManager:
@@ -163,7 +163,7 @@ The variable `initial_machinedeployment_operating_system_profile` can also be co
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: "1.29.4"
+ kubernetes: "1.34.1"
cloudProvider:
aws: {}
addons:
diff --git a/content/kubeone/main/cheat-sheets/_index.en.md b/content/kubeone/main/cheat-sheets/_index.en.md
index 9fae4b91a..da8ea9121 100644
--- a/content/kubeone/main/cheat-sheets/_index.en.md
+++ b/content/kubeone/main/cheat-sheets/_index.en.md
@@ -2,7 +2,7 @@
title = "Cheat Sheets"
date = 2021-02-10T09:00:00+02:00
description = "Keep the most important concepts of Kubermatic KubeOne handy for quick reference"
-weight = 7
+weight = 8
chapter = true
+++
diff --git a/content/kubeone/main/creating-clusters/_index.en.md b/content/kubeone/main/creating-clusters/_index.en.md
new file mode 100644
index 000000000..bc38556bd
--- /dev/null
+++ b/content/kubeone/main/creating-clusters/_index.en.md
@@ -0,0 +1,5 @@
++++
+title = "Creating a Kubernetes Cluster"
+url = "/kubeone/main/tutorials/creating-clusters/"
+weight = 2
++++
\ No newline at end of file
diff --git a/content/kubeone/main/examples/_index.en.md b/content/kubeone/main/examples/_index.en.md
index 756b51b92..0ea68a23d 100644
--- a/content/kubeone/main/examples/_index.en.md
+++ b/content/kubeone/main/examples/_index.en.md
@@ -2,7 +2,7 @@
title = "Examples"
date = 2021-02-10T09:00:00+02:00
description = "A collection of select concepts and scenarios to help you master Kubermatic KubeOne"
-weight = 6
+weight = 7
chapter = true
+++
diff --git a/content/kubeone/main/guides/_index.en.md b/content/kubeone/main/guides/_index.en.md
index 8f1262064..fdf8ce6a4 100644
--- a/content/kubeone/main/guides/_index.en.md
+++ b/content/kubeone/main/guides/_index.en.md
@@ -2,7 +2,7 @@
title = "Guides"
date = 2021-02-10T09:00:00+02:00
description = "Get familiar with Kubermatic KubeOne and read step-by-step instructions to handle important scenarios"
-weight = 4
+weight = 5
chapter = true
+++
diff --git a/content/kubeone/main/guides/addons/_index.en.md b/content/kubeone/main/guides/addons/_index.en.md
index 725b59df0..ed7443363 100644
--- a/content/kubeone/main/guides/addons/_index.en.md
+++ b/content/kubeone/main/guides/addons/_index.en.md
@@ -64,7 +64,7 @@ the `addons` config:
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.34.1
cloudProvider:
aws: {}
# Addons are Kubernetes manifests to be deployed after provisioning the cluster
@@ -113,7 +113,7 @@ Example:
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.34.1
addons:
enable: true
@@ -145,7 +145,7 @@ To delete embedded addon from the cluster, use the new `delete` field from the
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.34.1
addons:
enable: true
@@ -180,7 +180,7 @@ you can use it to override globally defined parameters.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.34.1
addons:
enable: true
diff --git a/content/kubeone/main/guides/autoscaler-addon/_index.en.md b/content/kubeone/main/guides/autoscaler-addon/_index.en.md
index cc76f595c..ec64c242f 100644
--- a/content/kubeone/main/guides/autoscaler-addon/_index.en.md
+++ b/content/kubeone/main/guides/autoscaler-addon/_index.en.md
@@ -33,7 +33,7 @@ kubeone.yaml
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4' ## kubernetes version
+ kubernetes: '1.34.1' ## kubernetes version
cloudProvider: ## This field is sourced automatically if terraform is used for the cluster
aws: {}
addons:
@@ -52,7 +52,7 @@ If you're running a cluster with nodes in the multiple zones for the HA purposes
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4' ## kubernetes version
+ kubernetes: '1.34.1' ## kubernetes version
cloudProvider: ## This field is sourced automatically if terraform is used for the cluster
aws: {}
addons:
@@ -146,9 +146,9 @@ Run the following kubectl command to inspect the available Machinedeployments:
```bash
$ kubectl get machinedeployments -n kube-system
NAME REPLICAS AVAILABLE-REPLICAS PROVIDER OS KUBELET AGE
-kb-cluster-eu-west-3a 1 1 aws ubuntu 1.20.4 10h
-kb-cluster-eu-west-3b 1 1 aws ubuntu 1.20.4 10h
-kb-cluster-eu-west-3c 1 1 aws ubuntu 1.20.4 10h
+kb-cluster-eu-west-3a 1 1 aws ubuntu 1.34.1 10h
+kb-cluster-eu-west-3b 1 1 aws ubuntu 1.34.1 10h
+kb-cluster-eu-west-3c 1 1 aws ubuntu 1.34.1 10h
```
### Step 2: Annotate Machinedeployments
@@ -237,4 +237,4 @@ That is it! You have successfully deployed Kubernetes autoscaler on the KubeOne
[step-5]: {{< ref "../../tutorials/creating-clusters/#step-5" >}}
[embedded-addons]: {{< ref "../../guides/addons/#overriding-embedded-eddons" >}}
[ca-faq]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md
-[ca-faq-what-is]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-is-cluster-autoscaler
\ No newline at end of file
+[ca-faq-what-is]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-is-cluster-autoscaler
diff --git a/content/kubeone/main/guides/cis-benchmarking/_index.en.md b/content/kubeone/main/guides/cis-benchmarking/_index.en.md
deleted file mode 100644
index 141d454ce..000000000
--- a/content/kubeone/main/guides/cis-benchmarking/_index.en.md
+++ /dev/null
@@ -1,42 +0,0 @@
-+++
-title = "CIS Benchmarking"
-date = 2024-03-06T12:00:00+02:00
-+++
-
-[CIS Benchmark for Kubernetes](https://www.cisecurity.org/benchmark/kubernetes) is a guide that consists of secure configuration guidelines and best practices developed for Kubernetes.
-
-In this document, information how it can be run on a Kubernetes cluster created using KubeOne and what to expect as the result is described.
-
-## Tooling
-
-[kube-bench](https://github.com/aquasecurity/kube-bench) is used to create the assessment.
-
-### Installation
-{{% notice note %}}
-There are [multiple ways](https://github.com/aquasecurity/kube-bench/blob/main/docs/running.md) to run `kube-bench`. Below method describes how it's running via logging to a master and worker node to run it.
-{{% /notice %}}
-
-```bash
-# make sure you run those commands as root user:
-KUBE_BENCH_VERSION="0.7.2"
-KUBE_BENCH_URL="https://github.com/aquasecurity/kube-bench/releases/download/v$%7BKUBE_BENCH_VERSION%7D/kube-bench_$%7BKUBE_BENCH_VERSION%7D_linux_amd64.tar.gz"
-
-mkdir /root/kube-bench
-cd /root/kube-bench
-curl -L ${KUBE_BENCH_URL} -o kube-bench_${KUBE_BENCH_VERSION}_linux_amd64.tar.gz
-tar xvf kube-bench_${KUBE_BENCH_VERSION}_linux_amd64.tar.gz
-```
-
-### Run on controlplane node
-
-```bash
-cd /root/kube-bench
-./kube-bench -D ./cfg/ run --targets=controlplane,master,etcd,node --benchmark=cis-1.8
-```
-
-### Run on a worker node
-
-```bash
-cd /root/kube-bench
-./kube-bench -D ./cfg/ run --targets=node --benchmark=cis-1.8
-```
diff --git a/content/kubeone/main/guides/encryption-providers/_index.en.md b/content/kubeone/main/guides/encryption-providers/_index.en.md
index 09c42aed9..54c78d6ee 100644
--- a/content/kubeone/main/guides/encryption-providers/_index.en.md
+++ b/content/kubeone/main/guides/encryption-providers/_index.en.md
@@ -34,7 +34,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: k1-cluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.34.1'
features:
# enable encryption providers
encryptionProviders:
@@ -82,7 +82,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: k1-cluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.34.1'
features:
# enable encryption providers
encryptionProviders:
@@ -140,7 +140,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: k1-cluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.34.1'
features:
encryptionProviders:
enable: true
@@ -175,7 +175,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: kms-test
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.34.1'
cloudProvider:
aws: {}
features:
diff --git a/content/kubeone/main/guides/mirror-registries/_index.en.md b/content/kubeone/main/guides/mirror-registries/_index.en.md
index 49f7a580f..4ff32c862 100644
--- a/content/kubeone/main/guides/mirror-registries/_index.en.md
+++ b/content/kubeone/main/guides/mirror-registries/_index.en.md
@@ -98,7 +98,7 @@ kubeone apply --manifest kubeone.yaml --credentials credentials.yaml
docker.io registry introduced pretty low rate limits for unauthenticated requests. There are few workarounds:
-* Buy docker subscribtion.
+* Buy docker subscription.
How to use docker.io credentials is covered in the [section above][using-credentials].
* Setup own pull-through caching proxy.
* Use public pull-through caching proxy.
diff --git a/content/kubeone/main/guides/registry-configuration/_index.en.md b/content/kubeone/main/guides/registry-configuration/_index.en.md
index 5ef593084..98d74d7c6 100644
--- a/content/kubeone/main/guides/registry-configuration/_index.en.md
+++ b/content/kubeone/main/guides/registry-configuration/_index.en.md
@@ -37,36 +37,58 @@ This guide assumes that:
If you don't have an image registry, you can check out the
[Docker Registry][docker-reg-guide] as a possible solution.
-## Preloading Images
+## Mirroring Images with `kubeone mirror-images`
-Another prerequisites for this guide to work is that your image registry has
-all images needed for your cluster to work preloaded.
+KubeOne provides a built-in command `kubeone mirror-images` to simplify mirroring all required images (Kubernetes core components, CNI plugins, etc.) to your private registry. This command replaces the older `image-loader.sh` script and supports advanced filtering and multi-version mirroring.
-To make this task easier, we provide the image loader script that:
+### Prerequisites
-* pulls all images used by components deployed by KubeOne (CNI,
- metrics-server...) and Kubeadm (Kubernetes core components and CoreDNS)
-* re-tag those images so the image registry (e.g. `docker.io`) is replaced
- with the image registry provided by the user
-* push re-tagged images to your (mirror) image registry
+1. **Registry Setup**: Ensure your registry is accessible by all cluster nodes and supports TLS if using containerd.
+2. **Authentication**: The registry must allow unauthenticated access (support for credentials is planned for future releases).
+3. **KubeOne CLI**: Use KubeOne v1.5.0 or newer.
-The image loader script (`image-loader.sh`) comes in the KubeOne release
-archive, under the `hack` directory. It can also be found on [GitHub in the
-`hack` directory][img-loader]. If you're downloading the script from GitHub,
-it's recommended to switch to the appropriate tag depending on which KubeOne
-version you're using.
+### Usage
-Once you have downloaded the script, you can run it in the following way.
-Make sure to replace `KUBERNETES_VERSION` with the Kubernetes version you plan
-to use (without the `v` prefix), as well as, replace the `TARGET_REGISTRY` with
-the address to your image registry.
+The `kubeone mirror-images` command pulls, re-tags, and pushes images to your registry. Use the following syntax:
+```bash
+kubeone mirror-images \
+ [--filter base,optional,control-plane] \
+ [--kubernetes-versions v1.34.1,v1.33.5] \
+ [--insecure] # Allow pushing to insecure registries (HTTP) \
+ --registry
```
-KUBERNETES_VERSION=1.29.4 TARGET_REGISTRY=127.0.0.1:5000 ./image-loader.sh
+
+#### Key Flags:
+- `--filter`: Select image groups (comma-separated):
+ - `base`: Core images (OSM, DNS Cache, Calico, Machine-Controller).
+ - `optional`: Add-ons like CCMs and CSI Drivers.
+ - `control-plane`: Only Kubernetes core components (kube-apiserver, etcd, etc.).
+- `--kubernetes-versions`: Specify versions (comma-separated). If omitted, **all KubeOne-supported versions are mirrored**.
+- `--insecure`: Skip TLS verification for registries using HTTP (useful for local/insecure setups).
+
+### Examples
+
+#### 1. Mirror All Base Images for Specific Versions
+```bash
+kubeone mirror-images \
+ --filter base \
+ --kubernetes-versions v1.34.1,v1.33.5 \
+ registry.example.com:5000
+```
+
+#### 2. Mirror Only Control-Plane Images For All Supported Versions
+```bash
+kubeone mirror-images \
+ --filter control-plane \
+ registry.example.com:5000
```
-The preloading process can take a several minutes, depending on your
-connection speed.
+### Benefits of `kubeone mirror-images`
+- **Simpler Workflow**: No need to manually download or manage scripts.
+- **Multi-Version Support**: Mirror images for multiple Kubernetes versions in one command.
+- **Granular Control**: Use filters to mirror only the images you need.
+- **Automated Retagging**: Handles registry prefixes (e.g., `docker.io` → `registry.example.com`).
## Overriding Image Registries
@@ -77,7 +99,7 @@ stanza to your KubeOne configuration file, such as:
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.34.1
cloudProvider:
aws: {}
registryConfiguration:
diff --git a/content/kubeone/main/references/_index.en.md b/content/kubeone/main/references/_index.en.md
index 71dfb7b6e..2187b9ca8 100644
--- a/content/kubeone/main/references/_index.en.md
+++ b/content/kubeone/main/references/_index.en.md
@@ -1,7 +1,7 @@
+++
title = "References"
date = 2021-02-10T09:00:00+02:00
-weight = 5
+weight = 6
chapter = true
+++
diff --git a/content/kubeone/main/references/kubeone-cluster-v1beta2/_index.en.md b/content/kubeone/main/references/kubeone-cluster-v1beta2/_index.en.md
index bd2e3b309..1f375ded2 100644
--- a/content/kubeone/main/references/kubeone-cluster-v1beta2/_index.en.md
+++ b/content/kubeone/main/references/kubeone-cluster-v1beta2/_index.en.md
@@ -1,6 +1,6 @@
+++
title = "v1beta2 API Reference"
-date = 2025-06-12T15:55:08+03:00
+date = 2025-10-14T16:55:58+03:00
weight = 11
+++
## v1beta2
@@ -159,6 +159,8 @@ CanalSpec defines the Canal CNI plugin
| ----- | ----------- | ------ | -------- |
| bundle | Bundle inline PEM encoded global CA | string | false |
| file | File is a path to the CA bundle file, used as a replacement for Bundle | string | false |
+| certificateValidityPeriod | CertificateValidityPeriod specifies the validity period for a non-CA certificate generated by kubeadm. Default value: 8760h (365 days * 24 hours = 1 year) | *metav1.Duration | false |
+| caCertificateValidityPeriod | CACertificateValidityPeriod specifies the validity period for a CA certificate generated by kubeadm. Default value: 87600h (365 days * 24 hours * 10 = 10 years) | *metav1.Duration | false |
[Back to Group](#v1beta2)
@@ -260,6 +262,7 @@ ContainerdRegistry defines endpoints and security for given container registry
| Field | Description | Scheme | Required |
| ----- | ----------- | ------ | -------- |
| mirrors | List of registry mirrors to use | []string | false |
+| overridePath | Configure override_path | bool | false |
| tlsConfig | TLSConfig for the registry | *[ContainerdTLSConfig](#containerdtlsconfig) | false |
| auth | Registry authentication | *[ContainerdRegistryAuthConfig](#containerdregistryauthconfig) | false |
@@ -489,6 +492,7 @@ HostConfig describes a single control plane or worker node.
| bastionPort | BastionPort is SSH port to use when connecting to the bastion if it's configured in .Bastion. Default value is 22. | int | false |
| bastionUser | BastionUser is system login name to use when connecting to bastion host. Default value is \"root\". | string | false |
| bastionHostPublicKey | BastionHostPublicKey if not empty, will be used to verify bastion SSH public key | []byte | false |
+| bastionPrivateKeyFile | BastionPrivateKeyFile is path to the file with PRIVATE AND CLEANTEXT ssh key. Default value is \"\". | string | false |
| hostname | Hostname is the hostname(1) of the host. Default value is populated at the runtime via running `hostname -f` command over ssh. | string | false |
| isLeader | IsLeader indicates this host as a session leader. Default value is populated at the runtime. | bool | false |
| taints | Taints are taints applied to nodes. Those taints are only applied when the node is being provisioned. If not provided (i.e. nil) for control plane nodes, it defaults to TaintEffectNoSchedule with key\n node-role.kubernetes.io/control-plane\nExplicitly empty (i.e. []corev1.Taint{}) means no taints will be applied (this is default for worker nodes). | [][corev1.Taint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#taint-v1-core) | false |
@@ -531,6 +535,7 @@ KubeOneCluster is KubeOne Cluster API Schema
| ----- | ----------- | ------ | -------- |
| name | Name is the name of the cluster. | string | true |
| controlPlane | ControlPlane describes the control plane nodes and how to access them. | [ControlPlaneConfig](#controlplaneconfig) | true |
+| kubeletConfig | KubeletConfig used to generate cluster's KubeletConfiguration that will be used along with kubeadm | [KubeletConfig](#kubeletconfig) | false |
| apiEndpoint | APIEndpoint are pairs of address and port used to communicate with the Kubernetes API. | [APIEndpoint](#apiendpoint) | true |
| cloudProvider | CloudProvider configures the cloud provider specific features. | [CloudProviderSpec](#cloudproviderspec) | true |
| versions | Versions defines which Kubernetes version will be installed. | [VersionConfig](#versionconfig) | true |
@@ -576,6 +581,10 @@ KubeletConfig provides some kubelet configuration options
| kubeReserved | KubeReserved configure --kube-reserved command-line flag of the kubelet. See more at: https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ | map[string]string | false |
| evictionHard | EvictionHard configure --eviction-hard command-line flag of the kubelet. See more at: https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ | map[string]string | false |
| maxPods | MaxPods configures maximum number of pods per node. If not provided, default value provided by kubelet will be used (max. 110 pods per node) | *int32 | false |
+| imageGCHighThresholdPercent | ImageGCHighThresholdPercent is the percent of disk usage after which image garbage collection is always run. The percent is calculated by dividing this field value by 100, so this field must be between 0 and 100, inclusive. When specified, the value must be greater than imageGCLowThresholdPercent. Default: 85 | *int32 | false |
+| imageGCLowThresholdPercent | ImageGCLowThresholdPercent is the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The percent is calculated by dividing this field value by 100, so the field value must be between 0 and 100, inclusive. When specified, the value must be less than imageGCHighThresholdPercent. Default: 80 | *int32 | false |
+| imageMinimumGCAge | ImageMinimumGCAge is the minimum age for an unused image before it is garbage collected. Default: \"2m\" | metav1.Duration | false |
+| imageMaximumGCAge | ImageMaximumGCAge is the maximum age an image can be unused before it is garbage collected. The default of this field is \"0s\", which disables this field--meaning images won't be garbage collected based on being unused for too long. Default: \"0s\" (disabled) | metav1.Duration | false |
[Back to Group](#v1beta2)
@@ -695,6 +704,7 @@ OperatingSystemManagerConfig configures kubermatic operating-system-manager depl
| Field | Description | Scheme | Required |
| ----- | ----------- | ------ | -------- |
| deploy | Deploy | bool | false |
+| enableNonRootDeviceOwnership | EnableNonRootDeviceOwnership enables the non-root device ownership feature in the container runtime. | bool | false |
[Back to Group](#v1beta2)
diff --git a/content/kubeone/main/references/kubeone-cluster-v1beta3/_index.en.md b/content/kubeone/main/references/kubeone-cluster-v1beta3/_index.en.md
index 1eaf83c80..8ad3c73e8 100644
--- a/content/kubeone/main/references/kubeone-cluster-v1beta3/_index.en.md
+++ b/content/kubeone/main/references/kubeone-cluster-v1beta3/_index.en.md
@@ -1,6 +1,6 @@
+++
title = "v1beta3 API Reference"
-date = 2025-06-12T15:55:08+03:00
+date = 2025-10-14T16:55:58+03:00
weight = 11
+++
## v1beta3
@@ -172,6 +172,8 @@ CanalSpec defines the Canal CNI plugin
| ----- | ----------- | ------ | -------- |
| bundle | Bundle inline PEM encoded global CA | string | false |
| file | File is a path to the CA bundle file, used as a replacement for Bundle | string | false |
+| certificateValidityPeriod | CertificateValidityPeriod specifies the validity period for a non-CA certificate generated by kubeadm. Default value: 8760h (365 days * 24 hours = 1 year) | *metav1.Duration | false |
+| caCertificateValidityPeriod | CACertificateValidityPeriod specifies the validity period for a CA certificate generated by kubeadm. Default value: 87600h (365 days * 24 hours * 10 = 10 years) | *metav1.Duration | false |
[Back to Group](#v1beta3)
@@ -262,6 +264,7 @@ ContainerdRegistry defines endpoints and security for given container registry
| Field | Description | Scheme | Required |
| ----- | ----------- | ------ | -------- |
| mirrors | List of registry mirrors to use | []string | false |
+| overridePath | Configure override_path | bool | false |
| tlsConfig | TLSConfig for the registry | *[ContainerdTLSConfig](#containerdtlsconfig) | false |
| auth | Registry authentication | *[ContainerdRegistryAuthConfig](#containerdregistryauthconfig) | false |
@@ -491,6 +494,7 @@ HostConfig describes a single control plane or worker node.
| bastionPort | BastionPort is SSH port to use when connecting to the bastion if it's configured in .Bastion. Default value is 22. | int | false |
| bastionUser | BastionUser is system login name to use when connecting to bastion host. Default value is \"root\". | string | false |
| bastionHostPublicKey | BastionHostPublicKey if not empty, will be used to verify bastion SSH public key | []byte | false |
+| bastionPrivateKeyFile | BastionPrivateKeyFile is path to the file with PRIVATE AND CLEANTEXT ssh key. Default value is \"\". | string | false |
| hostname | Hostname is the hostname(1) of the host. Default value is populated at the runtime via running `hostname -f` command over ssh. | string | false |
| isLeader | IsLeader indicates this host as a session leader. Default value is populated at the runtime. | bool | false |
| taints | Taints are taints applied to nodes. Those taints are only applied when the node is being provisioned. If not provided (i.e. nil) for control plane nodes, it defaults to TaintEffectNoSchedule with key\n node-role.kubernetes.io/control-plane\nExplicitly empty (i.e. []corev1.Taint{}) means no taints will be applied (this is default for worker nodes). | [][corev1.Taint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#taint-v1-core) | false |
@@ -535,6 +539,7 @@ KubeOneCluster is KubeOne Cluster API Schema
| controlPlane | ControlPlane describes the control plane nodes and how to access them. | [ControlPlaneConfig](#controlplaneconfig) | true |
| apiEndpoint | APIEndpoint are pairs of address and port used to communicate with the Kubernetes API. | [APIEndpoint](#apiendpoint) | true |
| cloudProvider | CloudProvider configures the cloud provider specific features. | [CloudProviderSpec](#cloudproviderspec) | true |
+| kubeletConfig | KubeletConfig used to generate cluster's KubeletConfiguration that will be used along with kubeadm | [KubeletConfig](#kubeletconfig) | false |
| versions | Versions defines which Kubernetes version will be installed. | [VersionConfig](#versionconfig) | true |
| containerRuntime | ContainerRuntime defines which container runtime will be installed | [ContainerRuntimeConfig](#containerruntimeconfig) | false |
| clusterNetwork | ClusterNetwork configures the in-cluster networking. | [ClusterNetworkConfig](#clusternetworkconfig) | false |
@@ -578,6 +583,10 @@ KubeletConfig provides some kubelet configuration options
| evictionHard | EvictionHard configure --eviction-hard command-line flag of the kubelet. See more at: https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ | map[string]string | false |
| maxPods | MaxPods configures maximum number of pods per node. If not provided, default value provided by kubelet will be used (max. 110 pods per node) | *int32 | false |
| podPidsLimit | PodPidsLimit configures the maximum number of processes running in a Pod If not provided, default value provided by kubelet will be used -1 See more about pid-limiting at: https://kubernetes.io/docs/concepts/policy/pid-limiting/ | *int64 | false |
+| imageGCHighThresholdPercent | ImageGCHighThresholdPercent is the percent of disk usage after which image garbage collection is always run. The percent is calculated by dividing this field value by 100, so this field must be between 0 and 100, inclusive. When specified, the value must be greater than imageGCLowThresholdPercent. Default: 85 | *int32 | false |
+| imageGCLowThresholdPercent | ImageGCLowThresholdPercent is the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The percent is calculated by dividing this field value by 100, so the field value must be between 0 and 100, inclusive. When specified, the value must be less than imageGCHighThresholdPercent. Default: 80 | *int32 | false |
+| imageMinimumGCAge | ImageMinimumGCAge is the minimum age for an unused image before it is garbage collected. Default: \"2m\" | metav1.Duration | false |
+| imageMaximumGCAge | ImageMaximumGCAge is the maximum age an image can be unused before it is garbage collected. The default of this field is \"0s\", which disables this field--meaning images won't be garbage collected based on being unused for too long. Default: \"0s\" (disabled) | metav1.Duration | false |
[Back to Group](#v1beta3)
@@ -697,6 +706,7 @@ OperatingSystemManagerConfig configures kubermatic operating-system-manager depl
| Field | Description | Scheme | Required |
| ----- | ----------- | ------ | -------- |
| deploy | Deploy | bool | false |
+| enableNonRootDeviceOwnership | EnableNonRootDeviceOwnership enables the non-root device ownership feature in the container runtime. | bool | false |
[Back to Group](#v1beta3)
diff --git a/content/kubeone/main/security/_index.en.md b/content/kubeone/main/security/_index.en.md
new file mode 100644
index 000000000..8eca98cb6
--- /dev/null
+++ b/content/kubeone/main/security/_index.en.md
@@ -0,0 +1,13 @@
++++
+title = "Security"
+date = 2025-09-19T09:00:00+02:00
+weight = 6
+chapter = true
++++
+
+# Security
+
+## Table of Content
+
+{{% children depth=5 %}}
+{{% /children %}}
diff --git a/content/kubeone/main/security/cis-benchmarking/_index.en.md b/content/kubeone/main/security/cis-benchmarking/_index.en.md
new file mode 100644
index 000000000..d1d54adcf
--- /dev/null
+++ b/content/kubeone/main/security/cis-benchmarking/_index.en.md
@@ -0,0 +1,28 @@
++++
+title = "CIS Benchmarking"
+date = 2024-03-06T12:00:00+02:00
+weight = 10
++++
+
+[CIS Benchmark for Kubernetes](https://www.cisecurity.org/benchmark/kubernetes) is a guide that consists of secure configuration guidelines and best practices developed for Kubernetes.
+
+In this document, information how it can be run on a Kubernetes cluster created using KubeOne and what to expect as the result is described.
+
+## Tooling
+
+[Trivy](https://github.com/aquasecurity/trivy) is the tool used to run the benchmark.
+
+### Installation
+
+To install trivy, follow the instructions [here](https://trivy.dev/latest/getting-started/installation/).
+
+### Running the Benchmark
+
+```bash
+trivy k8s --compliance=k8s-cis-1.23 --report summary --timeout=1h --tolerations node-role.kubernetes.io/control-plane="":NoSchedule
+```
+
+## Table of Content
+
+{{% children depth=5 %}}
+{{% /children %}}
diff --git a/content/kubeone/main/security/cis-benchmarking/kubeone1.11-k8s1.33/_index.en.md b/content/kubeone/main/security/cis-benchmarking/kubeone1.11-k8s1.33/_index.en.md
new file mode 100644
index 000000000..c5a99d2c7
--- /dev/null
+++ b/content/kubeone/main/security/cis-benchmarking/kubeone1.11-k8s1.33/_index.en.md
@@ -0,0 +1,1067 @@
++++
+title = "Benchmark on Kubernetes 1.33 with KubeOne 1.11.2"
+date = 2025-09-19T16:39:34+02:00
++++
+
+This guide helps you evaluate the security of a Kubernetes cluster created using KubeOne against each control in the CIS Kubernetes Benchmark.
+
+This guide corresponds to the following versions of KubeOne, CIS Benchmarks, and Kubernetes:
+
+| KubeOne Version | Kubernetes Version | CIS Benchmark Version |
+| ---------------- | ------------------ | --------------------- |
+| 1.11.2 | 1.33.4 | CIS-1.23 |
+
+## Testing Methodology
+
+### Running the Benchmark
+
+[Trivy](https://github.com/aquasecurity/trivy) was used to run the benchmark.
+
+```bash
+trivy k8s --compliance=k8s-cis-1.23 --report summary --timeout=1h --tolerations node-role.kubernetes.io/control-plane="":NoSchedule
+```
+
+### Results
+
+Summary Report for compliance: CIS Kubernetes Benchmarks v1.23
+
+Each control in the CIS Kubernetes Benchmark was evaluated. These are the possible results for each control:
+
+🟢 **Pass:** The cluster passes the audit/control outlined in the benchmark.
+
+🔵 **Pass (Additional Configuration Required):** The cluster passes the audit/control outlined in the benchmark with some extra configuration. The documentation is provided.
+
+🔴 **Fail:** The audit/control will be fixed in a future KubeOne release.
+
+## Control Type: Control Plane Components
+
+### 1.1. Control Plane Node Configuration Files
+
+#### 1.1.1: Ensure that the API server pod specification file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.2: Ensure that the API server pod specification file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.3: Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.4: Ensure that the controller manager pod specification file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.5: Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.6: Ensure that the scheduler pod specification file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.7: Ensure that the etcd pod specification file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.8: Ensure that the etcd pod specification file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.9: Ensure that the Container Network Interface file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.10: Ensure that the Container Network Interface file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.11: Ensure that the etcd data directory permissions are set to 700 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.12: Ensure that the etcd data directory ownership is set to etcd:etcd
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.1.13: Ensure that the admin.conf file permissions are set to 600
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.14: Ensure that the admin.conf file ownership is set to root:root
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.15: Ensure that the scheduler.conf file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.16: Ensure that the scheduler.conf file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.17: Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.18: Ensure that the controller-manager.conf file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.19: Ensure that the Kubernetes PKI directory and file ownership is set to root:root
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.20: Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.21: Ensure that the Kubernetes PKI key file permissions are set to 600
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+### 1.2. API Server
+
+#### 1.2.1: Ensure that the --anonymous-auth argument is set to false
+
+**Severity:** MEDIUM
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.2.2: Ensure that the --token-auth-file parameter is not set
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.3: Ensure that the --DenyServiceExternalIPs is not set
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.4: Ensure that the --kubelet-https argument is set to true
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.5: Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.6: Ensure that the --kubelet-certificate-authority argument is set as appropriate
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.7: Ensure that the --authorization-mode argument is not set to AlwaysAllow
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.8: Ensure that the --authorization-mode argument includes Node
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.9: Ensure that the --authorization-mode argument includes RBAC
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.10: Ensure that the admission control plugin EventRateLimit is set
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.2.11: Ensure that the admission control plugin AlwaysAdmit is not set
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.12: Ensure that the admission control plugin AlwaysPullImages is set
+
+**Severity:** MEDIUM
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.2.13: Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.14: Ensure that the admission control plugin ServiceAccount is set
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.15: Ensure that the admission control plugin NamespaceLifecycle is set
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.16: Ensure that the admission control plugin NodeRestriction is set
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.2.17: Ensure that the --secure-port argument is not set to 0
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.18: Ensure that the --profiling argument is set to false
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.19: Ensure that the --audit-log-path argument is set
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.2.20: Ensure that the --audit-log-maxage argument is set to 30 or as appropriate
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.2.21: Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.2.22: Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.2.24: Ensure that the --service-account-lookup argument is set to true
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.25: Ensure that the --service-account-key-file argument is set as appropriate
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.26: Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.27: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.28: Ensure that the --client-ca-file argument is set appropriate
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.29: Ensure that the --etcd-cafile argument is set as appropriate
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.30: Ensure that the --encryption-provider-config argument is set as appropriate
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+### 1.3. Controller Manager
+
+#### 1.3.1: Ensure that the --terminated-pod-gc-threshold argument is set as appropriate
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.3.3: Ensure that the --use-service-account-credentials argument is set to true
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.3.4: Ensure that the --service-account-private-key-file argument is set as appropriate
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.3.5: Ensure that the --root-ca-file argument is set as appropriate
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.3.6: Ensure that the RotateKubeletServerCertificate argument is set to true
+
+**Severity:** MEDIUM
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.3.7: Ensure that the --bind-address argument is set to 127.0.0.1
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+### 1.4. Scheduler
+
+#### 1.4.1: Ensure that the --profiling argument is set to false
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.4.2: Ensure that the --bind-address argument is set to 127.0.0.1
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+## Control Type: Etcd
+
+#### 2.1: Ensure that the --cert-file and --key-file arguments are set as appropriate
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 2.2: Ensure that the --client-cert-auth argument is set to true
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 2.3: Ensure that the --auto-tls argument is not set to true
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 2.4: Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 2.5: Ensure that the --peer-client-cert-auth argument is set to true
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 2.6: Ensure that the --peer-auto-tls argument is not set to true
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+## Control Type: Control Plane Configuration
+
+### 3.1. Authentication and Authorization
+
+#### 3.1.1: Client certificate authentication should not be used for users (Manual)
+
+**Severity:** HIGH
+
+**Result:** Manual check required
+
+---
+
+### 3.2. Logging
+
+#### 3.2.1: Ensure that a minimal audit policy is created (Manual)
+
+**Severity:** HIGH
+
+**Result:** Manual check required
+
+---
+
+#### 3.2.2: Ensure that the audit policy covers key security concerns (Manual)
+
+**Severity:** HIGH
+
+**Result:** Manual check required
+
+---
+
+## Control Type: Worker Nodes
+
+### 4.1. Worker Node Configuration Files
+
+#### 4.1.1: Ensure that the kubelet service file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 4.1.2: Ensure that the kubelet service file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.1.3: If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.1.4: If proxy kubeconfig file exists ensure ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.1.5: Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 4.1.6: Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.1.7: Ensure that the certificate authorities file permissions are set to 600 or more restrictive
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.1.8: Ensure that the client certificate authorities file ownership is set to root:root
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.1.9: If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.1.10: If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+### 4.2. Kubelet
+
+#### 4.2.1: Ensure that the --anonymous-auth argument is set to false
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.2: Ensure that the --authorization-mode argument is not set to AlwaysAllow
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.3: Ensure that the --client-ca-file argument is set as appropriate
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.4: Verify that the --read-only-port argument is set to 0
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.5: Ensure that the --streaming-connection-idle-timeout argument is not set to 0
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.6: Ensure that the --protect-kernel-defaults argument is set to true
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.7: Ensure that the --make-iptables-util-chains argument is set to true
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.8: Ensure that the --hostname-override argument is not set
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 4.2.9: Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.10: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate
+
+**Severity:** CRITICAL
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 4.2.11: Ensure that the --rotate-certificates argument is not set to false
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.12: Verify that the RotateKubeletServerCertificate argument is set to true
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.13: Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+## Control Type: Policies
+
+### 5.1. RBAC and Service Accounts
+
+#### 5.1.1: Ensure that the cluster-admin role is only used where required
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.1.2: Minimize access to secrets
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.1.3: Minimize wildcard use in Roles and ClusterRoles
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.1.6: Ensure that Service Account Tokens are only mounted where necessary
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 5.1.8: Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+### 5.2. Pod Security Standards
+
+#### 5.2.2: Minimize the admission of privileged containers
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.3: Minimize the admission of containers wishing to share the host process ID namespace
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.4: Minimize the admission of containers wishing to share the host IPC namespace
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 5.2.5: Minimize the admission of containers wishing to share the host network namespace
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.6: Minimize the admission of containers with allowPrivilegeEscalation
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.7: Minimize the admission of root containers
+
+**Severity:** MEDIUM
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.8: Minimize the admission of containers with the NET_RAW capability
+
+**Severity:** MEDIUM
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.9: Minimize the admission of containers with added capabilities
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.10: Minimize the admission of containers with capabilities assigned
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.11: Minimize the admission of containers with capabilities assigned
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 5.2.12: Minimize the admission of HostPath volumes
+
+**Severity:** MEDIUM
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.13: Minimize the admission of containers which use HostPorts
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+### 5.3. Network Policies and CNI
+
+#### 5.3.1: Ensure that the CNI in use supports Network Policies (Manual)
+
+**Severity:** MEDIUM
+
+**Result:** Manual check required
+
+---
+
+#### 5.3.2: Ensure that all Namespaces have Network Policies defined
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+### 5.4. Secrets Management
+
+#### 5.4.1: Prefer using secrets as files over secrets as environment variables (Manual)
+
+**Severity:** MEDIUM
+
+**Result:** Manual check required
+
+---
+
+#### 5.4.2: Consider external secret storage (Manual)
+
+**Severity:** MEDIUM
+
+**Result:** Manual check required
+
+---
+
+### 5.5. Extensible Admission Control
+
+#### 5.5.1: Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)
+
+**Severity:** MEDIUM
+
+**Result:** Manual check required
+
+---
+
+### 5.7. General Policies
+
+#### 5.7.1: Create administrative boundaries between resources using namespaces (Manual)
+
+**Severity:** MEDIUM
+
+**Result:** Manual check required
+
+---
+
+#### 5.7.2: Ensure that the seccomp profile is set to docker/default in your pod definitions
+
+**Severity:** MEDIUM
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.7.3: Apply Security Context to Your Pods and Containers
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.7.4: The default namespace should not be used
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+## References
+
+[audit-logging]: {{< ref "../../../tutorials/creating-clusters-oidc/#audit-logging" >}}
+[encryption-providers]: {{< ref "../../../guides/encryption-providers/" >}}
+[oidc]: {{< ref "../../../tutorials/creating-clusters-oidc/" >}}
+[anon-req]:
+[eventratelimit]:
+[securitycontextdeny]:
diff --git a/content/kubeone/main/security/cis-benchmarking/kubeone1.11-k8s1.33/result.json b/content/kubeone/main/security/cis-benchmarking/kubeone1.11-k8s1.33/result.json
new file mode 100644
index 000000000..608377bc1
--- /dev/null
+++ b/content/kubeone/main/security/cis-benchmarking/kubeone1.11-k8s1.33/result.json
@@ -0,0 +1,694 @@
+{
+ "ID": "k8s-cis-1.23",
+ "Title": "CIS Kubernetes Benchmarks v1.23",
+ "SummaryControls": [
+ {
+ "ID": "1.1.1",
+ "Name": "Ensure that the API server pod specification file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.2",
+ "Name": "Ensure that the API server pod specification file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.3",
+ "Name": "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.4",
+ "Name": "Ensure that the controller manager pod specification file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.5",
+ "Name": "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.6",
+ "Name": "Ensure that the scheduler pod specification file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.7",
+ "Name": "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.8",
+ "Name": "Ensure that the etcd pod specification file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.9",
+ "Name": "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.10",
+ "Name": "Ensure that the Container Network Interface file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.11",
+ "Name": "Ensure that the etcd data directory permissions are set to 700 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.12",
+ "Name": "Ensure that the etcd data directory ownership is set to etcd:etcd",
+ "Severity": "LOW",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.1.13",
+ "Name": "Ensure that the admin.conf file permissions are set to 600",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.14",
+ "Name": "Ensure that the admin.conf file ownership is set to root:root",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.15",
+ "Name": "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.16",
+ "Name": "Ensure that the scheduler.conf file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.17",
+ "Name": "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.18",
+ "Name": "Ensure that the controller-manager.conf file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.19",
+ "Name": "Ensure that the Kubernetes PKI directory and file ownership is set to root:root",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.20",
+ "Name": "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.21",
+ "Name": "Ensure that the Kubernetes PKI key file permissions are set to 600",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.1",
+ "Name": "Ensure that the --anonymous-auth argument is set to false",
+ "Severity": "MEDIUM",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.2.2",
+ "Name": "Ensure that the --token-auth-file parameter is not set",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.3",
+ "Name": "Ensure that the --DenyServiceExternalIPs is not set",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.4",
+ "Name": "Ensure that the --kubelet-https argument is set to true",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.5",
+ "Name": "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.6",
+ "Name": "Ensure that the --kubelet-certificate-authority argument is set as appropriate",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.7",
+ "Name": "Ensure that the --authorization-mode argument is not set to AlwaysAllow",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.8",
+ "Name": "Ensure that the --authorization-mode argument includes Node",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.9",
+ "Name": "Ensure that the --authorization-mode argument includes RBAC",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.10",
+ "Name": "Ensure that the admission control plugin EventRateLimit is set",
+ "Severity": "HIGH",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.2.11",
+ "Name": "Ensure that the admission control plugin AlwaysAdmit is not set",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.12",
+ "Name": "Ensure that the admission control plugin AlwaysPullImages is set",
+ "Severity": "MEDIUM",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.2.13",
+ "Name": "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.14",
+ "Name": "Ensure that the admission control plugin ServiceAccount is set",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.15",
+ "Name": "Ensure that the admission control plugin NamespaceLifecycle is set",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.16",
+ "Name": "Ensure that the admission control plugin NodeRestriction is set",
+ "Severity": "LOW",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.2.17",
+ "Name": "Ensure that the --secure-port argument is not set to 0",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.18",
+ "Name": "Ensure that the --profiling argument is set to false",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.19",
+ "Name": "Ensure that the --audit-log-path argument is set",
+ "Severity": "LOW",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.2.20",
+ "Name": "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate",
+ "Severity": "LOW",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.2.21",
+ "Name": "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate",
+ "Severity": "LOW",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.2.22",
+ "Name": "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate",
+ "Severity": "LOW",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.2.24",
+ "Name": "Ensure that the --service-account-lookup argument is set to true",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.25",
+ "Name": "Ensure that the --service-account-key-file argument is set as appropriate",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.26",
+ "Name": "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.27",
+ "Name": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.28",
+ "Name": "Ensure that the --client-ca-file argument is set appropriate",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.29",
+ "Name": "Ensure that the --etcd-cafile argument is set as appropriate",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.30",
+ "Name": "Ensure that the --encryption-provider-config argument is set as appropriate",
+ "Severity": "LOW",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.3.1",
+ "Name": "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.3.3",
+ "Name": "Ensure that the --use-service-account-credentials argument is set to true",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.3.4",
+ "Name": "Ensure that the --service-account-private-key-file argument is set as appropriate",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.3.5",
+ "Name": "Ensure that the --root-ca-file argument is set as appropriate",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.3.6",
+ "Name": "Ensure that the RotateKubeletServerCertificate argument is set to true",
+ "Severity": "MEDIUM",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.3.7",
+ "Name": "Ensure that the --bind-address argument is set to 127.0.0.1",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.4.1",
+ "Name": "Ensure that the --profiling argument is set to false",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.4.2",
+ "Name": "Ensure that the --bind-address argument is set to 127.0.0.1",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "2.1",
+ "Name": "Ensure that the --cert-file and --key-file arguments are set as appropriate",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "2.2",
+ "Name": "Ensure that the --client-cert-auth argument is set to true",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "2.3",
+ "Name": "Ensure that the --auto-tls argument is not set to true",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "2.4",
+ "Name": "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "2.5",
+ "Name": "Ensure that the --peer-client-cert-auth argument is set to true",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "2.6",
+ "Name": "Ensure that the --peer-auto-tls argument is not set to true",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "3.1.1",
+ "Name": "Client certificate authentication should not be used for users (Manual)",
+ "Severity": "HIGH"
+ },
+ {
+ "ID": "3.2.1",
+ "Name": "Ensure that a minimal audit policy is created (Manual)",
+ "Severity": "HIGH"
+ },
+ {
+ "ID": "3.2.2",
+ "Name": "Ensure that the audit policy covers key security concerns (Manual)",
+ "Severity": "HIGH"
+ },
+ {
+ "ID": "4.1.1",
+ "Name": "Ensure that the kubelet service file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 3
+ },
+ {
+ "ID": "4.1.2",
+ "Name": "Ensure that the kubelet service file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.1.3",
+ "Name": "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.1.4",
+ "Name": "If proxy kubeconfig file exists ensure ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.1.5",
+ "Name": "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 3
+ },
+ {
+ "ID": "4.1.6",
+ "Name": "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.1.7",
+ "Name": "Ensure that the certificate authorities file permissions are set to 600 or more restrictive",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.1.8",
+ "Name": "Ensure that the client certificate authorities file ownership is set to root:root",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.1.9",
+ "Name": "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.1.10",
+ "Name": "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.1",
+ "Name": "Ensure that the --anonymous-auth argument is set to false",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.2",
+ "Name": "Ensure that the --authorization-mode argument is not set to AlwaysAllow",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.3",
+ "Name": "Ensure that the --client-ca-file argument is set as appropriate",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.4",
+ "Name": "Verify that the --read-only-port argument is set to 0",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.5",
+ "Name": "Ensure that the --streaming-connection-idle-timeout argument is not set to 0",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.6",
+ "Name": "Ensure that the --protect-kernel-defaults argument is set to true",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.7",
+ "Name": "Ensure that the --make-iptables-util-chains argument is set to true",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.8",
+ "Name": "Ensure that the --hostname-override argument is not set",
+ "Severity": "HIGH",
+ "TotalFail": 6
+ },
+ {
+ "ID": "4.2.9",
+ "Name": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.10",
+ "Name": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate",
+ "Severity": "CRITICAL",
+ "TotalFail": 1
+ },
+ {
+ "ID": "4.2.11",
+ "Name": "Ensure that the --rotate-certificates argument is not set to false",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.12",
+ "Name": "Verify that the RotateKubeletServerCertificate argument is set to true",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.13",
+ "Name": "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "5.1.1",
+ "Name": "Ensure that the cluster-admin role is only used where required",
+ "Severity": "HIGH",
+ "TotalFail": 2
+ },
+ {
+ "ID": "5.1.2",
+ "Name": "Minimize access to secrets",
+ "Severity": "HIGH",
+ "TotalFail": 15
+ },
+ {
+ "ID": "5.1.3",
+ "Name": "Minimize wildcard use in Roles and ClusterRoles",
+ "Severity": "HIGH",
+ "TotalFail": 8
+ },
+ {
+ "ID": "5.1.6",
+ "Name": "Ensure that Service Account Tokens are only mounted where necessary",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "5.1.8",
+ "Name": "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "5.2.2",
+ "Name": "Minimize the admission of privileged containers",
+ "Severity": "HIGH",
+ "TotalFail": 8
+ },
+ {
+ "ID": "5.2.3",
+ "Name": "Minimize the admission of containers wishing to share the host process ID namespace",
+ "Severity": "HIGH",
+ "TotalFail": 3
+ },
+ {
+ "ID": "5.2.4",
+ "Name": "Minimize the admission of containers wishing to share the host IPC namespace",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "5.2.5",
+ "Name": "Minimize the admission of containers wishing to share the host network namespace",
+ "Severity": "HIGH",
+ "TotalFail": 15
+ },
+ {
+ "ID": "5.2.6",
+ "Name": "Minimize the admission of containers with allowPrivilegeEscalation",
+ "Severity": "HIGH",
+ "TotalFail": 31
+ },
+ {
+ "ID": "5.2.7",
+ "Name": "Minimize the admission of root containers",
+ "Severity": "MEDIUM",
+ "TotalFail": 35
+ },
+ {
+ "ID": "5.2.8",
+ "Name": "Minimize the admission of containers with the NET_RAW capability",
+ "Severity": "MEDIUM",
+ "TotalFail": 2
+ },
+ {
+ "ID": "5.2.9",
+ "Name": "Minimize the admission of containers with added capabilities",
+ "Severity": "LOW",
+ "TotalFail": 39
+ },
+ {
+ "ID": "5.2.10",
+ "Name": "Minimize the admission of containers with capabilities assigned",
+ "Severity": "LOW",
+ "TotalFail": 39
+ },
+ {
+ "ID": "5.2.11",
+ "Name": "Minimize the admission of containers with capabilities assigned",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "5.2.12",
+ "Name": "Minimize the admission of HostPath volumes",
+ "Severity": "MEDIUM",
+ "TotalFail": 18
+ },
+ {
+ "ID": "5.2.13",
+ "Name": "Minimize the admission of containers which use HostPorts",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "5.3.1",
+ "Name": "Ensure that the CNI in use supports Network Policies (Manual)",
+ "Severity": "MEDIUM"
+ },
+ {
+ "ID": "5.3.2",
+ "Name": "Ensure that all Namespaces have Network Policies defined",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "5.4.1",
+ "Name": "Prefer using secrets as files over secrets as environment variables (Manual)",
+ "Severity": "MEDIUM"
+ },
+ {
+ "ID": "5.4.2",
+ "Name": "Consider external secret storage (Manual)",
+ "Severity": "MEDIUM"
+ },
+ {
+ "ID": "5.5.1",
+ "Name": "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)",
+ "Severity": "MEDIUM"
+ },
+ {
+ "ID": "5.7.1",
+ "Name": "Create administrative boundaries between resources using namespaces (Manual)",
+ "Severity": "MEDIUM"
+ },
+ {
+ "ID": "5.7.2",
+ "Name": "Ensure that the seccomp profile is set to docker/default in your pod definitions",
+ "Severity": "MEDIUM",
+ "TotalFail": 19
+ },
+ {
+ "ID": "5.7.3",
+ "Name": "Apply Security Context to Your Pods and Containers",
+ "Severity": "HIGH",
+ "TotalFail": 124
+ },
+ {
+ "ID": "5.7.4",
+ "Name": "The default namespace should not be used",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ }
+ ]
+}
diff --git a/content/kubeone/main/guides/cis-benchmarking/cis1.8-kubeone1.7-k8s1.27/_index.en.md b/content/kubeone/main/security/cis-benchmarking/kubeone1.7-k8s1.27/_index.en.md
similarity index 100%
rename from content/kubeone/main/guides/cis-benchmarking/cis1.8-kubeone1.7-k8s1.27/_index.en.md
rename to content/kubeone/main/security/cis-benchmarking/kubeone1.7-k8s1.27/_index.en.md
diff --git a/content/kubeone/main/guides/cis-benchmarking/cis1.8-kubeone1.8-k8s1.29/_index.en.md b/content/kubeone/main/security/cis-benchmarking/kubeone1.8-k8s1.29/_index.en.md
similarity index 97%
rename from content/kubeone/main/guides/cis-benchmarking/cis1.8-kubeone1.8-k8s1.29/_index.en.md
rename to content/kubeone/main/security/cis-benchmarking/kubeone1.8-k8s1.29/_index.en.md
index 47dbb97d9..3123adc14 100644
--- a/content/kubeone/main/guides/cis-benchmarking/cis1.8-kubeone1.8-k8s1.29/_index.en.md
+++ b/content/kubeone/main/security/cis-benchmarking/kubeone1.8-k8s1.29/_index.en.md
@@ -22,47 +22,57 @@ Each control in the CIS Kubernetes Benchmark was evaluated. These are the possib
🔴 **Fail:** The audit/control will be fixed in a future KubeOne release.
## Control Type: master
+
### 1.1. Control Plane Node Configuration Files
+
#### 1.1.1: Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)
**Result:** 🟢 Pass
---
+
#### 1.1.2: Ensure that the API server pod specification file ownership is set to root:root (Automated)
**Result:** 🟢 Pass
---
+
#### 1.1.3: Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)
**Result:** 🟢 Pass
---
+
#### 1.1.4: Ensure that the controller manager pod specification file ownership is set to root:root (Automated)
**Result:** 🟢 Pass
---
+
#### 1.1.5: Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)
**Result:** 🟢 Pass
---
+
#### 1.1.6: Ensure that the scheduler pod specification file ownership is set to root:root (Automated)
**Result:** 🟢 Pass
---
+
#### 1.1.7: Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)
**Result:** 🟢 Pass
---
+
#### 1.1.8: Ensure that the etcd pod specification file ownership is set to root:root (Automated)
**Result:** 🟢 Pass
---
+
#### 1.1.9: Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)
**Result:** 🔴 Fail
@@ -70,16 +80,19 @@ Each control in the CIS Kubernetes Benchmark was evaluated. These are the possib
_The issue is under investigation to provide a fix in a future KubeOne release_
---
+
#### 1.1.10: Ensure that the Container Network Interface file ownership is set to root:root (Manual)
**Result:** 🟢 Pass
---
+
#### 1.1.11: Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)
**Result:** 🟢 Pass
---
+
#### 1.1.12: Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)
**Result:** 🟢 Pass
@@ -87,52 +100,63 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** KubeOne runs etcd cluster as containers and there is not a `etcd` user and group on the system
---
+
#### 1.1.13: Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)
**Result:** 🟢 Pass
---
+
#### 1.1.14: Ensure that the admin.conf file ownership is set to root:root (Automated)
**Result:** 🟢 Pass
---
+
#### 1.1.15: Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)
**Result:** 🟢 Pass
---
+
#### 1.1.16: Ensure that the scheduler.conf file ownership is set to root:root (Automated)
**Result:** 🟢 Pass
---
+
#### 1.1.17: Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)
**Result:** 🟢 Pass
---
+
#### 1.1.18: Ensure that the controller-manager.conf file ownership is set to root:root (Automated)
**Result:** 🟢 Pass
---
+
#### 1.1.19: Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)
**Result:** 🟢 Pass
---
+
#### 1.1.20: Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)
**Result:** 🟢 Pass
---
+
#### 1.1.21: Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)
**Result:** 🟢 Pass
---
+
### 1.2. API Server
+
#### 1.2.1: Ensure that the --anonymous-auth argument is set to false (Manual)
**Result:** 🔴 Fail
@@ -140,11 +164,13 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
_The issue is under investigation to provide a fix in a future KubeOne release_
---
+
#### 1.2.2: Ensure that the --token-auth-file parameter is not set (Automated)
**Result:** 🟢 Pass
---
+
#### 1.2.3: Ensure that the --DenyServiceExternalIPs is set (Manual)
**Result:** 🔴 Fail
@@ -152,31 +178,37 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
_The issue is under investigation to provide a fix in a future KubeOne release_
---
+
#### 1.2.4: Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)
**Result:** 🟢 Pass
---
+
#### 1.2.5: Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)
**Result:** 🟢 Pass
---
+
#### 1.2.6: Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)
**Result:** 🟢 Pass
---
+
#### 1.2.7: Ensure that the --authorization-mode argument includes Node (Automated)
**Result:** 🟢 Pass
---
+
#### 1.2.8: Ensure that the --authorization-mode argument includes RBAC (Automated)
**Result:** 🟢 Pass
---
+
#### 1.2.9: Ensure that the admission control plugin EventRateLimit is set (Manual)
**Result:** 🔴 Fail
@@ -184,11 +216,13 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** EventRateLimit admission control plugin in in Alpha state, please see [here][eventratelimit]. Supporting Alpha features is under consideration.
---
+
#### 1.2.10: Ensure that the admission control plugin AlwaysAdmit is not set (Automated)
**Result:** 🟢 Pass
---
+
#### 1.2.11: Ensure that the admission control plugin AlwaysPullImages is set (Manual)
**Result:** 🔴 Fail
@@ -196,6 +230,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
_The issue is under investigation to provide a fix in a future KubeOne release_
---
+
#### 1.2.12: Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)
**Result:** 🔴 Fail
@@ -203,16 +238,19 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** SecurityContextDeny admission control plugin is deprecated as of [Kubernetes 1.27][securitycontextdeny], hence it is not enabled.
---
+
#### 1.2.13: Ensure that the admission control plugin ServiceAccount is set (Automated)
**Result:** 🟢 Pass
---
+
#### 1.2.14: Ensure that the admission control plugin NamespaceLifecycle is set (Automated)
**Result:** 🟢 Pass
---
+
#### 1.2.15: Ensure that the admission control plugin NodeRestriction is set (Automated)
**Result:** 🔴 Fail
@@ -220,11 +258,13 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
_The issue is under investigation to provide a fix in a future KubeOne release_
---
+
#### 1.2.16: Ensure that the --profiling argument is set to false (Automated)
**Result:** 🟢 Pass
---
+
#### 1.2.17: Ensure that the --audit-log-path argument is set (Automated)
**Result:** 🔵 Pass (Additional Configuration Required)
@@ -232,6 +272,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** Audit logging is not enabled by default, it can be configured as described [here][audit-logging]
---
+
#### 1.2.18: Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)
**Result:** 🔵 Pass (Additional Configuration Required)
@@ -239,6 +280,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** Audit logging is not enabled by default, it can be configured as described [here][audit-logging]
---
+
#### 1.2.19: Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)
**Result:** 🔵 Pass (Additional Configuration Required)
@@ -246,6 +288,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** Audit logging is not enabled by default, it can be configured as described [here][audit-logging]
---
+
#### 1.2.20: Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)
**Result:** 🔵 Pass (Additional Configuration Required)
@@ -253,6 +296,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** Audit logging is not enabled by default, it can be configured as described [here][audit-logging]
---
+
#### 1.2.21: Ensure that the --request-timeout argument is set as appropriate (Manual)
**Result:** 🟢 Pass
@@ -260,36 +304,43 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** The timeout is set to 60 seconds by default. Setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack.
---
+
#### 1.2.22: Ensure that the --service-account-lookup argument is set to true (Automated)
**Result:** 🟢 Pass
---
+
#### 1.2.23: Ensure that the --service-account-key-file argument is set as appropriate (Automated)
**Result:** 🟢 Pass
---
+
#### 1.2.24: Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)
**Result:** 🟢 Pass
---
+
#### 1.2.25: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)
**Result:** 🟢 Pass
---
+
#### 1.2.26: Ensure that the --client-ca-file argument is set as appropriate (Automated)
**Result:** 🟢 Pass
---
+
#### 1.2.27: Ensure that the --etcd-cafile argument is set as appropriate (Automated)
**Result:** 🟢 Pass
---
+
#### 1.2.28: Ensure that the --encryption-provider-config argument is set as appropriate (Manual)
**Result:** 🔵 Pass (Additional Configuration Required)
@@ -297,6 +348,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** Encryption configuration can be enabled as described [here][encryption-providers]
---
+
#### 1.2.29: Ensure that encryption providers are appropriately configured (Manual)
**Result:** 🔵 Pass (Additional Configuration Required)
@@ -304,97 +356,121 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** Encryption configuration can be enabled as described [here][encryption-providers]
---
+
#### 1.2.30: Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)
**Result:** 🟢 Pass
---
+
### 1.3. Controller Manager
+
#### 1.3.1: Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)
**Result:** 🟢 Pass
---
+
#### 1.3.2: Ensure that the --profiling argument is set to false (Automated)
**Result:** 🟢 Pass
---
+
#### 1.3.3: Ensure that the --use-service-account-credentials argument is set to true (Automated)
**Result:** 🟢 Pass
---
+
#### 1.3.4: Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)
**Result:** 🟢 Pass
---
+
#### 1.3.5: Ensure that the --root-ca-file argument is set as appropriate (Automated)
**Result:** 🟢 Pass
---
+
#### 1.3.6: Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)
**Result:** 🟢 Pass
---
+
#### 1.3.7: Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)
**Result:** 🟢 Pass
---
+
### 1.4. Scheduler
+
#### 1.4.1: Ensure that the --profiling argument is set to false (Automated)
**Result:** 🟢 Pass
---
+
#### 1.4.2: Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)
**Result:** 🟢 Pass
---
+
## Control Type: etcd
+
### 2. Etcd Node Configuration
+
#### 2.1: Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)
**Result:** 🟢 Pass
---
+
#### 2.2: Ensure that the --client-cert-auth argument is set to true (Automated)
**Result:** 🟢 Pass
---
+
#### 2.3: Ensure that the --auto-tls argument is not set to true (Automated)
**Result:** 🟢 Pass
---
+
#### 2.4: Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)
**Result:** 🟢 Pass
---
+
#### 2.5: Ensure that the --peer-client-cert-auth argument is set to true (Automated)
**Result:** 🟢 Pass
---
+
#### 2.6: Ensure that the --peer-auto-tls argument is not set to true (Automated)
**Result:** 🟢 Pass
---
+
#### 2.7: Ensure that a unique Certificate Authority is used for etcd (Manual)
**Result:** 🟢 Pass
---
+
## Control Type: controlplane
+
### 3.1. Authentication and Authorization
+
#### 3.1.1: Client certificate authentication should not be used for users (Manual)
**Result:** 🔵 Pass (Additional Configuration Required)
@@ -402,6 +478,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** KubeOne can be configured with OIDC authentication as described [here][oidc]
---
+
#### 3.1.2: Service account token authentication should not be used for users (Manual)
**Result:** 🔵 Pass (Additional Configuration Required)
@@ -409,6 +486,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** KubeOne can be configured with OIDC authentication as described [here][oidc]
---
+
#### 3.1.3: Bootstrap token authentication should not be used for users (Manual)
**Result:** 🔵 Pass (Additional Configuration Required)
@@ -416,7 +494,9 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** KubeOne can be configured with OIDC authentication as described [here][oidc]
---
+
### 3.2. Logging
+
#### 3.2.1: Ensure that a minimal audit policy is created (Manual)
**Result:** 🔵 Pass (Additional Configuration Required)
@@ -424,6 +504,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** Audit logging is not enabled by default, it can be configured as described [here][audit-logging]
---
+
#### 3.2.2: Ensure that the audit policy covers key security concerns (Manual)
**Result:** 🔵 Pass (Additional Configuration Required)
@@ -431,18 +512,23 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** Audit logging is not enabled by default, it can be configured as described [here][audit-logging]
---
+
## Control Type: node
+
### 4.1. Worker Node Configuration Files
+
#### 4.1.1: Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)
**Result:** 🟢 Pass
---
+
#### 4.1.2: Ensure that the kubelet service file ownership is set to root:root (Automated)
**Result:** 🟢 Pass
---
+
#### 4.1.3: If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)
**Result:** 🟢 Pass
@@ -450,6 +536,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** KubeOne does not contain `/etc/kubernetes/proxy.conf` file
---
+
#### 4.1.4: If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)
**Result:** 🟢 Pass
@@ -457,67 +544,81 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** KubeOne does not contain `/etc/kubernetes/proxy.conf` file
---
+
#### 4.1.5: Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)
**Result:** 🟢 Pass
---
+
#### 4.1.6: Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)
**Result:** 🟢 Pass
---
+
#### 4.1.7: Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)
**Result:** 🟢 Pass
---
+
#### 4.1.8: Ensure that the client certificate authorities file ownership is set to root:root (Manual)
**Result:** 🟢 Pass
---
+
#### 4.1.9: If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)
**Result:** 🟢 Pass
---
+
#### 4.1.10: If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)
**Result:** 🟢 Pass
---
+
### 4.2. Kubelet
+
#### 4.2.1: Ensure that the --anonymous-auth argument is set to false (Automated)
**Result:** 🟢 Pass
---
+
#### 4.2.2: Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)
**Result:** 🟢 Pass
---
+
#### 4.2.3: Ensure that the --client-ca-file argument is set as appropriate (Automated)
**Result:** 🟢 Pass
---
+
#### 4.2.4: Verify that the --read-only-port argument is set to 0 (Manual)
**Result:** 🟢 Pass
---
+
#### 4.2.5: Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)
**Result:** 🟢 Pass
---
+
#### 4.2.6: Ensure that the --make-iptables-util-chains argument is set to true (Automated)
**Result:** 🟢 Pass
---
+
#### 4.2.7: Ensure that the --hostname-override argument is not set (Manual)
**Result:** 🔴 Fail
@@ -525,11 +626,13 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
_The issue is under investigation to provide a fix in a future KubeOne release_
---
+
#### 4.2.8: Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)
**Result:** 🟢 Pass
---
+
#### 4.2.9: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)
**Result:** 🟢 Pass
@@ -537,16 +640,19 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
**Details:** `--tls-cert-file` and `--tls-private-key-file` options are provided to Kubelet
---
+
#### 4.2.10: Ensure that the --rotate-certificates argument is not set to false (Automated)
**Result:** 🟢 Pass
---
+
#### 4.2.11: Verify that the RotateKubeletServerCertificate argument is set to true (Manual)
**Result:** 🟢 Pass
---
+
#### 4.2.12: Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)
**Result:** 🔴 Fail
@@ -554,6 +660,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
_The issue is under investigation to provide a fix in a future KubeOne release_
---
+
#### 4.2.13: Ensure that a limit is set on pod PIDs (Manual)
**Result:** 🔴 Fail
@@ -565,6 +672,6 @@ _The issue is under investigation to provide a fix in a future KubeOne release_
[audit-logging]: {{< ref "../../../tutorials/creating-clusters-oidc/#audit-logging" >}}
[encryption-providers]: {{< ref "../../../guides/encryption-providers/" >}}
[oidc]: {{< ref "../../../tutorials/creating-clusters-oidc/" >}}
-[anon-req]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#anonymous-requests
-[eventratelimit]: https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#eventratelimit
-[securitycontextdeny]: https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#securitycontextdeny
+[anon-req]:
+[eventratelimit]:
+[securitycontextdeny]:
diff --git a/content/kubeone/main/security/system-logs/_index.en.md b/content/kubeone/main/security/system-logs/_index.en.md
new file mode 100644
index 000000000..b968e9fcb
--- /dev/null
+++ b/content/kubeone/main/security/system-logs/_index.en.md
@@ -0,0 +1,136 @@
++++
+title = "Personally Identifiable Information Analysis: Kubernetes and KubeOne System Logs"
+date = 2024-03-06T12:00:00+02:00
+weight = 10
++++
+
+This document provides a comprehensive analysis of potential Personally Identifiable Information (PII) and personal data (indirect identifiers) that may be present in system logs from Kubernetes clusters deployed using KubeOne.
+
+**Target Audience**: Platform operators, security teams, compliance officers
+
+**Prerequisites**: Basic understanding of Kubernetes and KubeOne
+
+While KubeOne inherently tries to avoid logging any PII, there are some cases where it is unavoidable and outside the control of the platform operator. This could be a component that KubeOne ships or the underlying Kubernetes components.
+
+## PII Categories (GDPR-Aligned)
+
+System logs from Kubernetes clusters may contain the following types of PII:
+
+### Direct Identifiers
+
+* **Usernames**: Kubernetes usernames, system usernames, service account names
+* **Email addresses**: From TLS certificate subjects (CN, O, OU), OIDC claims, audit logs, or user labels
+* **IP addresses**: Client IPs
+
+### Indirect Identifiers
+
+* **Resource names**: Pod names, namespace names, deployment names containing user/org identifiers
+ * Example: `webapp-john-deployment`, `john-doe-dev` namespace
+* **Hostnames**: Node hostnames with user or organizational patterns
+ * Example: `worker-john-prod-01.company.com`
+* **Labels and annotations**: Custom metadata that may include user data
+ * Example: `owner=john.doe@company.com`
+* **Volume paths**: Mount paths revealing directory structures with usernames
+ * Example: `/home/john/data:/data`
+
+### Cloud Provider Identifiers
+
+* **Account IDs**: AWS account IDs, Azure subscription IDs, GCP project IDs
+* **Resource IDs**: Instance IDs, VPC IDs, volume IDs, subnet IDs, security group IDs
+* **DNS names**: Load balancer DNS, instance DNS names
+* **Geographic data**: Availability zones, regions
+
+### Operational Data That May Reveal personal data
+
+* **DNS queries**: Service/pod names in DNS lookups
+* **HTTP/gRPC metadata**: URLs, headers, cookies (if Layer 7 visibility enabled in CNI)
+* **Error messages**: Often contain detailed context with resource IDs and user identifiers
+* **Audit logs**: Comprehensive request/response data including full user context
+
+## Risk Assessment Matrix
+
+| Component | User Identity | IP Addresses | Credentials | Cloud IDs | Risk Level |
+|-----------|---------------|--------------|-------------|-----------|------------|
+| kube-apiserver | ✅ High | ✅ High | ✅ High | ❌ No | 🔴 **HIGH** |
+| kubelet | ⚠️ Medium | ✅ High | ✅ High | ❌ No | 🔴 **HIGH** |
+| etcd | ✅ High | ⚠️ Medium | ✅ High | ❌ No | 🔴 **HIGH** |
+| Cloud Controller Managers | ❌ No | ✅ High | ✅ High | ✅ High | 🔴 **HIGH** |
+| CSI Drivers | ❌ No | ⚠️ Medium | ✅ High | ✅ High | 🔴 **HIGH** |
+| Secrets Store CSI | ❌ No | ❌ No | ✅ High | ⚠️ Low | 🔴 **HIGH** |
+| Cilium | ⚠️ Medium | ✅ High | ❌ No | ❌ No | 🟡 **MEDIUM-HIGH** |
+| kube-controller-manager | ⚠️ Low | ⚠️ Medium | ⚠️ Medium | ⚠️ Medium | 🟡 **MEDIUM** |
+| kube-scheduler | ⚠️ Low | ❌ No | ❌ No | ❌ No | 🟡 **MEDIUM** |
+| kube-proxy | ❌ No | ✅ High | ❌ No | ❌ No | 🟡 **MEDIUM** |
+| CoreDNS | ⚠️ Low | ⚠️ Medium | ❌ No | ❌ No | 🟡 **MEDIUM** |
+| Canal | ❌ No | ✅ High | ❌ No | ❌ No | 🟡 **MEDIUM** |
+| WeaveNet | ❌ No | ✅ High | ⚠️ Low | ❌ No | 🟡 **MEDIUM** |
+| cluster-autoscaler | ⚠️ Low | ⚠️ Low | ⚠️ Low | ✅ High | 🟡 **MEDIUM** |
+| NodeLocalDNS | ⚠️ Low | ⚠️ Medium | ❌ No | ❌ No | 🟡 **MEDIUM** |
+| metrics-server | ⚠️ Low | ❌ No | ❌ No | ❌ No | 🟢 **LOW-MEDIUM** |
+| machine-controller | ⚠️ Low | ❌ No | ⚠️ Low | ✅ High | 🟢 **LOW** |
+| operating-system-manager | ⚠️ Low | ❌ No | ❌ No | ⚠️ Low | 🟢 **LOW** |
+
+**Legend**:
+
+* ✅ High: Frequent and detailed PII exposure
+* ⚠️ Medium: Moderate PII exposure
+* ❌ No: Minimal or no PII exposure
+
+### Understanding Risk Context
+
+While the risk matrix provides a helpful overview of potential PII exposure, it is important to note that the risk is not always proportional to the exposure. For example, a low-risk component may have high exposure if it is combined with a high-risk component.
+
+An example of this would be a component that logs a full Kubernetes resource in case of a validation failure. The Kubernetes resource itself may contain PII, and while the fields that might contain personal data are not directly being referred to in the logs, the full resource is being logged. This results in private data being exposed to the logs. It is always recommended to review and sanitize the logs before sharing them anywhere.
+
+## Log Filtering and Sanitization
+
+### Automated PII Filtering
+
+Implement automated filtering in your log aggregation pipeline to remove PII and personal data from the logs.
+
+#### Use external tools for PII Redaction
+
+* [Presidio](https://microsoft.github.io/presidio/) - A set of tools for data protection and privacy
+* [Azure Purview](https://learn.microsoft.com/en-us/purview/information-protection) - A cloud-based data governance service that helps you manage and protect your sensitive data
+
+### Manual PII Filtering - Common patterns to filter
+
+```regex
+# Email addresses
+[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}
+
+# IPv4 addresses
+\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b
+
+# Basic Auth in URLs
+https?://[^:]+:[^@]+@
+```
+
+## Best Practices
+
+### Before sharing logs with Kubermatic Support
+
+1. Identify the time range needed (minimize data exposure)
+2. Export only relevant namespaces/components
+3. Run PII redaction tool or scripts
+4. Manual review of first 100 lines to verify redaction
+5. Approval from data protection officer (if required)
+
+## Conclusion
+
+### Key Points
+
+1. Kubernetes logs contain significant PII, especially from kube-apiserver, kubelet, etcd, and all cloud provider components
+2. Higher log verbosity (v=4-5) dramatically increases PII exposure
+3. Cloud provider account identifiers are prevalent in Cloud Controller Managers (CCMs) and CSI drivers
+4. Automated filtering tools are essential for safe log sharing at scale
+5. Manual review is still necessary to catch context-specific PII
+
+### Best Practice for Support
+
+## Additional Resources
+
+### GDPR and Privacy
+
+* [GDPR Official Text](https://gdpr-info.eu/)
+* [Article 29 Working Party Opinion on Personal Data](https://ec.europa.eu/justice/article-29/documentation/opinion-recommendation/index_en.htm)
diff --git a/content/kubeone/main/tutorials/_index.en.md b/content/kubeone/main/tutorials/_index.en.md
index 6dca2a05e..203c45d2a 100644
--- a/content/kubeone/main/tutorials/_index.en.md
+++ b/content/kubeone/main/tutorials/_index.en.md
@@ -2,7 +2,7 @@
title = "Tutorials & How-tos"
date = 2021-02-10T09:00:00+02:00
description = "Read and learn the functions and tasks you can perform in Kubermatic KubeOne"
-weight = 3
+weight = 4
chapter = true
+++
diff --git a/content/kubeone/main/tutorials/creating-clusters-baremetal/_index.en.md b/content/kubeone/main/tutorials/creating-clusters-baremetal/_index.en.md
index 6ed6e8c72..0b12a00b2 100644
--- a/content/kubeone/main/tutorials/creating-clusters-baremetal/_index.en.md
+++ b/content/kubeone/main/tutorials/creating-clusters-baremetal/_index.en.md
@@ -135,8 +135,8 @@ The following infrastructure requirements **must** be satisfied to successfully
provision a Kubernetes cluster using KubeOne:
* You need the appropriate number of instances dedicated for the control plane
- * You need **even** number of instances with a minimum of **three** instances
- for the Highly-Available control plane
+ * You need an **odd** number of instances with a minimum of **three** instances
+ for the highly-available control plane
* If you decide to use a single-node control plane instead, one instance is
enough, however, highly-available control plane is highly advised,
especially in the production environments
@@ -222,7 +222,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: bm-cluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.34.1'
cloudProvider:
none: {}
@@ -233,7 +233,7 @@ controlPlane:
sshUsername: root
sshPrivateKeyFile: '/home/me/.ssh/id_rsa'
taints:
- - key: "node-role.kubernetes.io/master"
+ - key: "node-role.kubernetes.io/control-plane"
effect: "NoSchedule"
staticWorkers:
@@ -298,11 +298,11 @@ INFO[11:37:28 CEST] Determine operating system…
INFO[11:37:30 CEST] Running host probes…
The following actions will be taken:
Run with --verbose flag for more information.
- + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.20.4
- + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.20.4
- + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.20.4
- + join worker node "ip-172-31-223-103.eu-west-3.compute.internal" (172.31.223.103) using 1.20.4
- + join worker node "ip-172-31-224-178.eu-west-3.compute.internal" (172.31.224.178) using 1.20.4
+ + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.34.1
+ + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.34.1
+ + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.34.1
+ + join worker node "ip-172-31-223-103.eu-west-3.compute.internal" (172.31.223.103) using 1.34.1
+ + join worker node "ip-172-31-224-178.eu-west-3.compute.internal" (172.31.224.178) using 1.34.1
Do you want to proceed (yes/no):
```
@@ -356,11 +356,11 @@ You should see output such as the following one.
```
NAME STATUS ROLES AGE VERSION
-ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.20.4
-ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.20.4
-ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.20.4
-ip-172-31-223-103.eu-west-3.compute.internal Ready 38m v1.20.4
-ip-172-31-224-178.eu-west-3.compute.internal Ready 38m v1.20.4
+ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.34.1
+ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.34.1
+ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.34.1
+ip-172-31-223-103.eu-west-3.compute.internal Ready 38m v1.34.1
+ip-172-31-224-178.eu-west-3.compute.internal Ready 38m v1.34.1
```
## Conclusion
diff --git a/content/kubeone/main/tutorials/creating-clusters-oidc/_index.en.md b/content/kubeone/main/tutorials/creating-clusters-oidc/_index.en.md
index f23d89388..0444e5958 100644
--- a/content/kubeone/main/tutorials/creating-clusters-oidc/_index.en.md
+++ b/content/kubeone/main/tutorials/creating-clusters-oidc/_index.en.md
@@ -47,7 +47,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.34.1'
cloudProvider:
hetzner: {}
@@ -331,7 +331,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.34.1'
cloudProvider:
hetzner: {}
@@ -482,7 +482,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.34.1'
cloudProvider:
hetzner: {}
diff --git a/content/kubeone/main/tutorials/creating-clusters/_index.en.md b/content/kubeone/main/tutorials/creating-clusters/_index.en.md
index 2361fe2f1..699c92446 100644
--- a/content/kubeone/main/tutorials/creating-clusters/_index.en.md
+++ b/content/kubeone/main/tutorials/creating-clusters/_index.en.md
@@ -585,7 +585,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
aws: {}
@@ -613,7 +613,7 @@ with your cluster name in the cloud-config example below.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
azure: {}
external: true
@@ -648,7 +648,7 @@ and fetches information about nodes from the API.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
digitalocean: {}
external: true
@@ -666,7 +666,7 @@ configs.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
gce: {}
external: true
@@ -697,7 +697,7 @@ The Hetzner CCM fetches information about nodes from the API.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
hetzner: {}
external: true
@@ -715,7 +715,7 @@ replace the placeholder values.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
nutanix: {}
addons:
@@ -745,7 +745,7 @@ cloud-config section.**
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
openstack: {}
external: true
@@ -767,7 +767,7 @@ cloudProvider:
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
openstack: {}
external: true
@@ -791,7 +791,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
vmwareCloudDirector: {}
@@ -810,7 +810,7 @@ automatically by KubeOne.**
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
vsphere: {}
external: true
@@ -858,18 +858,18 @@ In the following table, you can find a list of supported Kubernetes version
for latest KubeOne versions (you can run `kubeone version` to find the version
that you're running).
-| KubeOne \ Kubernetes | 1.33 | 1.32 | 1.31 | 1.30 | 1.29[^1] |
+| KubeOne \ Kubernetes | 1.34 | 1.33 | 1.32 | 1.31 | 1.30[^1] |
| -------------------- | ---- | ---- | ---- | -----| -------- |
-| v1.11 | ✓ | ✓ | ✓ | - | - |
-| v1.10 | - | ✓ | ✓ | ✓ | - |
-| v1.9 | - | - | ✓ | ✓ | ✓ |
+| v1.12 | ✓ | ✓ | ✓ | - | - |
+| v1.11 | - | ✓ | ✓ | ✓ | - |
+| v1.10 | - | - | ✓ | ✓ | ✓ |
-[^1]: Kubernetes 1.29 has reached End-of-Life (EOL) and is not supported any longer.
+[^1]: Kubernetes 1.30 has reached End-of-Life (EOL) and is not supported any longer.
We strongly recommend upgrading to a newer supported Kubernetes release as soon as possible.
We recommend using a Kubernetes release that's not older than one minor release
-than the latest Kubernetes release. For example, with 1.33 being the latest
-release, we recommend running at least Kubernetes 1.32.
+than the latest Kubernetes release. For example, with 1.34 being the latest
+release, we recommend running at least Kubernetes 1.33.
Now, we're ready to provision the cluster! This is done by running the
`kubeone apply` command and providing it the configuration manifest and the
@@ -897,9 +897,9 @@ INFO[11:37:28 CEST] Determine operating system…
INFO[11:37:30 CEST] Running host probes…
The following actions will be taken:
Run with --verbose flag for more information.
- + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.20.4
- + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.20.4
- + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.20.4
+ + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.34.1
+ + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.34.1
+ + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.34.1
+ ensure machinedeployment "marko-1-eu-west-3a" with 1 replica(s) exists
+ ensure machinedeployment "marko-1-eu-west-3b" with 1 replica(s) exists
+ ensure machinedeployment "marko-1-eu-west-3c" with 1 replica(s) exists
@@ -977,12 +977,12 @@ cluster.
```
NAME STATUS ROLES AGE VERSION
-ip-172-31-220-166.eu-west-3.compute.internal Ready 38m v1.20.4
-ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.20.4
-ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.20.4
-ip-172-31-221-18.eu-west-3.compute.internal Ready 38m v1.20.4
-ip-172-31-222-211.eu-west-3.compute.internal Ready 38m v1.20.4
-ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.20.4
+ip-172-31-220-166.eu-west-3.compute.internal Ready 38m v1.34.1
+ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.34.1
+ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.34.1
+ip-172-31-221-18.eu-west-3.compute.internal Ready 38m v1.34.1
+ip-172-31-222-211.eu-west-3.compute.internal Ready 38m v1.34.1
+ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.34.1
```
## Conclusion
diff --git a/content/kubeone/v1.10/architecture/operating-system-manager/usage/_index.en.md b/content/kubeone/v1.10/architecture/operating-system-manager/usage/_index.en.md
index abeacc163..23b99a368 100644
--- a/content/kubeone/v1.10/architecture/operating-system-manager/usage/_index.en.md
+++ b/content/kubeone/v1.10/architecture/operating-system-manager/usage/_index.en.md
@@ -13,7 +13,7 @@ To fallback to legacy user-data from Machine Controller, we can disable OSM for
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.32.9
addons:
enable: true
operatingSystemManager:
@@ -163,7 +163,7 @@ The variable `initial_machinedeployment_operating_system_profile` can also be co
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: "1.29.4"
+ kubernetes: "1.32.9"
cloudProvider:
aws: {}
addons:
diff --git a/content/kubeone/v1.10/examples/addons-calico-vxlan/_index.en.md b/content/kubeone/v1.10/examples/addons-calico-vxlan/_index.en.md
index cdaf9d331..320ba12a3 100644
--- a/content/kubeone/v1.10/examples/addons-calico-vxlan/_index.en.md
+++ b/content/kubeone/v1.10/examples/addons-calico-vxlan/_index.en.md
@@ -13,7 +13,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.32.9
cloudProvider:
aws: {}
diff --git a/content/kubeone/v1.10/guides/addons/_index.en.md b/content/kubeone/v1.10/guides/addons/_index.en.md
index b6e654c0e..8d1e79afa 100644
--- a/content/kubeone/v1.10/guides/addons/_index.en.md
+++ b/content/kubeone/v1.10/guides/addons/_index.en.md
@@ -64,7 +64,7 @@ the `addons` config:
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.32.9
cloudProvider:
aws: {}
# Addons are Kubernetes manifests to be deployed after provisioning the cluster
@@ -113,7 +113,7 @@ Example:
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.32.9
addons:
enable: true
@@ -145,7 +145,7 @@ To delete embedded addon from the cluster, use the new `delete` field from the
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.32.9
addons:
enable: true
@@ -180,7 +180,7 @@ you can use it to override globally defined parameters.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.32.9
addons:
enable: true
diff --git a/content/kubeone/v1.10/guides/autoscaler-addon/_index.en.md b/content/kubeone/v1.10/guides/autoscaler-addon/_index.en.md
index cc76f595c..6c9fb9f28 100644
--- a/content/kubeone/v1.10/guides/autoscaler-addon/_index.en.md
+++ b/content/kubeone/v1.10/guides/autoscaler-addon/_index.en.md
@@ -33,7 +33,7 @@ kubeone.yaml
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4' ## kubernetes version
+ kubernetes: '1.32.9' ## kubernetes version
cloudProvider: ## This field is sourced automatically if terraform is used for the cluster
aws: {}
addons:
@@ -52,7 +52,7 @@ If you're running a cluster with nodes in the multiple zones for the HA purposes
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4' ## kubernetes version
+ kubernetes: '1.32.9' ## kubernetes version
cloudProvider: ## This field is sourced automatically if terraform is used for the cluster
aws: {}
addons:
@@ -146,9 +146,9 @@ Run the following kubectl command to inspect the available Machinedeployments:
```bash
$ kubectl get machinedeployments -n kube-system
NAME REPLICAS AVAILABLE-REPLICAS PROVIDER OS KUBELET AGE
-kb-cluster-eu-west-3a 1 1 aws ubuntu 1.20.4 10h
-kb-cluster-eu-west-3b 1 1 aws ubuntu 1.20.4 10h
-kb-cluster-eu-west-3c 1 1 aws ubuntu 1.20.4 10h
+kb-cluster-eu-west-3a 1 1 aws ubuntu 1.32.9 10h
+kb-cluster-eu-west-3b 1 1 aws ubuntu 1.32.9 10h
+kb-cluster-eu-west-3c 1 1 aws ubuntu 1.32.9 10h
```
### Step 2: Annotate Machinedeployments
@@ -237,4 +237,4 @@ That is it! You have successfully deployed Kubernetes autoscaler on the KubeOne
[step-5]: {{< ref "../../tutorials/creating-clusters/#step-5" >}}
[embedded-addons]: {{< ref "../../guides/addons/#overriding-embedded-eddons" >}}
[ca-faq]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md
-[ca-faq-what-is]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-is-cluster-autoscaler
\ No newline at end of file
+[ca-faq-what-is]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-is-cluster-autoscaler
diff --git a/content/kubeone/v1.10/guides/encryption-providers/_index.en.md b/content/kubeone/v1.10/guides/encryption-providers/_index.en.md
index 09c42aed9..cb6519b7e 100644
--- a/content/kubeone/v1.10/guides/encryption-providers/_index.en.md
+++ b/content/kubeone/v1.10/guides/encryption-providers/_index.en.md
@@ -34,7 +34,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: k1-cluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
features:
# enable encryption providers
encryptionProviders:
@@ -82,7 +82,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: k1-cluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
features:
# enable encryption providers
encryptionProviders:
@@ -140,7 +140,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: k1-cluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
features:
encryptionProviders:
enable: true
@@ -175,7 +175,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: kms-test
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
cloudProvider:
aws: {}
features:
diff --git a/content/kubeone/v1.10/guides/mirror-registries/_index.en.md b/content/kubeone/v1.10/guides/mirror-registries/_index.en.md
index 49f7a580f..4ff32c862 100644
--- a/content/kubeone/v1.10/guides/mirror-registries/_index.en.md
+++ b/content/kubeone/v1.10/guides/mirror-registries/_index.en.md
@@ -98,7 +98,7 @@ kubeone apply --manifest kubeone.yaml --credentials credentials.yaml
docker.io registry introduced pretty low rate limits for unauthenticated requests. There are few workarounds:
-* Buy docker subscribtion.
+* Buy docker subscription.
How to use docker.io credentials is covered in the [section above][using-credentials].
* Setup own pull-through caching proxy.
* Use public pull-through caching proxy.
diff --git a/content/kubeone/v1.10/guides/registry-configuration/_index.en.md b/content/kubeone/v1.10/guides/registry-configuration/_index.en.md
index 2d93ff12a..90b35f607 100644
--- a/content/kubeone/v1.10/guides/registry-configuration/_index.en.md
+++ b/content/kubeone/v1.10/guides/registry-configuration/_index.en.md
@@ -62,7 +62,7 @@ to use (without the `v` prefix), as well as, replace the `TARGET_REGISTRY` with
the address to your image registry.
```
-KUBERNETES_VERSION=1.29.4 TARGET_REGISTRY=127.0.0.1:5000 ./image-loader.sh
+KUBERNETES_VERSION=1.32.9 TARGET_REGISTRY=127.0.0.1:5000 ./image-loader.sh
```
The preloading process can take a several minutes, depending on your
@@ -77,7 +77,7 @@ stanza to your KubeOne configuration file, such as:
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.32.9
cloudProvider:
aws: {}
registryConfiguration:
diff --git a/content/kubeone/v1.10/tutorials/creating-clusters-baremetal/_index.en.md b/content/kubeone/v1.10/tutorials/creating-clusters-baremetal/_index.en.md
index 35a887a5b..3a9b1497e 100644
--- a/content/kubeone/v1.10/tutorials/creating-clusters-baremetal/_index.en.md
+++ b/content/kubeone/v1.10/tutorials/creating-clusters-baremetal/_index.en.md
@@ -222,7 +222,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: bm-cluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
cloudProvider:
none: {}
@@ -297,11 +297,11 @@ INFO[11:37:28 CEST] Determine operating system…
INFO[11:37:30 CEST] Running host probes…
The following actions will be taken:
Run with --verbose flag for more information.
- + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.20.4
- + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.20.4
- + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.20.4
- + join worker node "ip-172-31-223-103.eu-west-3.compute.internal" (172.31.223.103) using 1.20.4
- + join worker node "ip-172-31-224-178.eu-west-3.compute.internal" (172.31.224.178) using 1.20.4
+ + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.32.9
+ + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.32.9
+ + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.32.9
+ + join worker node "ip-172-31-223-103.eu-west-3.compute.internal" (172.31.223.103) using 1.32.9
+ + join worker node "ip-172-31-224-178.eu-west-3.compute.internal" (172.31.224.178) using 1.32.9
Do you want to proceed (yes/no):
```
@@ -355,11 +355,11 @@ You should see output such as the following one.
```
NAME STATUS ROLES AGE VERSION
-ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.20.4
-ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.20.4
-ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.20.4
-ip-172-31-223-103.eu-west-3.compute.internal Ready 38m v1.20.4
-ip-172-31-224-178.eu-west-3.compute.internal Ready 38m v1.20.4
+ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.32.9
+ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.32.9
+ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.32.9
+ip-172-31-223-103.eu-west-3.compute.internal Ready 38m v1.32.9
+ip-172-31-224-178.eu-west-3.compute.internal Ready 38m v1.32.9
```
## Conclusion
diff --git a/content/kubeone/v1.10/tutorials/creating-clusters-oidc/_index.en.md b/content/kubeone/v1.10/tutorials/creating-clusters-oidc/_index.en.md
index f23d89388..97b45a83e 100644
--- a/content/kubeone/v1.10/tutorials/creating-clusters-oidc/_index.en.md
+++ b/content/kubeone/v1.10/tutorials/creating-clusters-oidc/_index.en.md
@@ -47,7 +47,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
cloudProvider:
hetzner: {}
@@ -331,7 +331,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
cloudProvider:
hetzner: {}
@@ -482,7 +482,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
cloudProvider:
hetzner: {}
diff --git a/content/kubeone/v1.10/tutorials/creating-clusters/_index.en.md b/content/kubeone/v1.10/tutorials/creating-clusters/_index.en.md
index 02a7602eb..93ceb3cb4 100644
--- a/content/kubeone/v1.10/tutorials/creating-clusters/_index.en.md
+++ b/content/kubeone/v1.10/tutorials/creating-clusters/_index.en.md
@@ -615,7 +615,7 @@ supported provider.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
cloudProvider:
aws: {}
external: true
@@ -642,7 +642,7 @@ with your cluster name in the cloud-config example below.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
cloudProvider:
azure: {}
external: true
@@ -677,7 +677,7 @@ and fetches information about nodes from the API.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
cloudProvider:
digitalocean: {}
external: true
@@ -695,7 +695,7 @@ configs.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
cloudProvider:
gce: {}
external: true
@@ -726,7 +726,7 @@ The Hetzner CCM fetches information about nodes from the API.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
cloudProvider:
hetzner: {}
external: true
@@ -744,7 +744,7 @@ replace the placeholder values.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
cloudProvider:
nutanix: {}
addons:
@@ -774,7 +774,7 @@ cloud-config section.**
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
cloudProvider:
openstack: {}
external: true
@@ -796,7 +796,7 @@ cloudProvider:
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
cloudProvider:
openstack: {}
external: true
@@ -824,7 +824,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
cloudProvider:
equinixmetal: {}
@@ -845,7 +845,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
cloudProvider:
vmwareCloudDirector: {}
@@ -864,7 +864,7 @@ automatically by KubeOne.**
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.32.9'
cloudProvider:
vsphere: {}
external: true
@@ -950,9 +950,9 @@ INFO[11:37:28 CEST] Determine operating system…
INFO[11:37:30 CEST] Running host probes…
The following actions will be taken:
Run with --verbose flag for more information.
- + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.20.4
- + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.20.4
- + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.20.4
+ + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.32.9
+ + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.32.9
+ + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.32.9
+ ensure machinedeployment "marko-1-eu-west-3a" with 1 replica(s) exists
+ ensure machinedeployment "marko-1-eu-west-3b" with 1 replica(s) exists
+ ensure machinedeployment "marko-1-eu-west-3c" with 1 replica(s) exists
@@ -1030,12 +1030,12 @@ cluster.
```
NAME STATUS ROLES AGE VERSION
-ip-172-31-220-166.eu-west-3.compute.internal Ready 38m v1.20.4
-ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.20.4
-ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.20.4
-ip-172-31-221-18.eu-west-3.compute.internal Ready 38m v1.20.4
-ip-172-31-222-211.eu-west-3.compute.internal Ready 38m v1.20.4
-ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.20.4
+ip-172-31-220-166.eu-west-3.compute.internal Ready 38m v1.32.9
+ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.32.9
+ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.32.9
+ip-172-31-221-18.eu-west-3.compute.internal Ready 38m v1.32.9
+ip-172-31-222-211.eu-west-3.compute.internal Ready 38m v1.32.9
+ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.32.9
```
## Conclusion
diff --git a/content/kubeone/v1.11/architecture/operating-system-manager/usage/_index.en.md b/content/kubeone/v1.11/architecture/operating-system-manager/usage/_index.en.md
index abeacc163..627110b1a 100644
--- a/content/kubeone/v1.11/architecture/operating-system-manager/usage/_index.en.md
+++ b/content/kubeone/v1.11/architecture/operating-system-manager/usage/_index.en.md
@@ -13,7 +13,7 @@ To fallback to legacy user-data from Machine Controller, we can disable OSM for
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.33.5
addons:
enable: true
operatingSystemManager:
@@ -163,7 +163,7 @@ The variable `initial_machinedeployment_operating_system_profile` can also be co
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: "1.29.4"
+ kubernetes: "1.33.5"
cloudProvider:
aws: {}
addons:
diff --git a/content/kubeone/v1.11/guides/addons/_index.en.md b/content/kubeone/v1.11/guides/addons/_index.en.md
index 725b59df0..5f08591f1 100644
--- a/content/kubeone/v1.11/guides/addons/_index.en.md
+++ b/content/kubeone/v1.11/guides/addons/_index.en.md
@@ -64,7 +64,7 @@ the `addons` config:
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.33.5
cloudProvider:
aws: {}
# Addons are Kubernetes manifests to be deployed after provisioning the cluster
@@ -113,7 +113,7 @@ Example:
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.33.5
addons:
enable: true
@@ -145,7 +145,7 @@ To delete embedded addon from the cluster, use the new `delete` field from the
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.33.5
addons:
enable: true
@@ -180,7 +180,7 @@ you can use it to override globally defined parameters.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.33.5
addons:
enable: true
diff --git a/content/kubeone/v1.11/guides/autoscaler-addon/_index.en.md b/content/kubeone/v1.11/guides/autoscaler-addon/_index.en.md
index cc76f595c..7521eabcd 100644
--- a/content/kubeone/v1.11/guides/autoscaler-addon/_index.en.md
+++ b/content/kubeone/v1.11/guides/autoscaler-addon/_index.en.md
@@ -33,7 +33,7 @@ kubeone.yaml
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4' ## kubernetes version
+ kubernetes: '1.33.5' ## kubernetes version
cloudProvider: ## This field is sourced automatically if terraform is used for the cluster
aws: {}
addons:
@@ -52,7 +52,7 @@ If you're running a cluster with nodes in the multiple zones for the HA purposes
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4' ## kubernetes version
+ kubernetes: '1.33.5' ## kubernetes version
cloudProvider: ## This field is sourced automatically if terraform is used for the cluster
aws: {}
addons:
@@ -146,9 +146,9 @@ Run the following kubectl command to inspect the available Machinedeployments:
```bash
$ kubectl get machinedeployments -n kube-system
NAME REPLICAS AVAILABLE-REPLICAS PROVIDER OS KUBELET AGE
-kb-cluster-eu-west-3a 1 1 aws ubuntu 1.20.4 10h
-kb-cluster-eu-west-3b 1 1 aws ubuntu 1.20.4 10h
-kb-cluster-eu-west-3c 1 1 aws ubuntu 1.20.4 10h
+kb-cluster-eu-west-3a 1 1 aws ubuntu 1.33.5 10h
+kb-cluster-eu-west-3b 1 1 aws ubuntu 1.33.5 10h
+kb-cluster-eu-west-3c 1 1 aws ubuntu 1.33.5 10h
```
### Step 2: Annotate Machinedeployments
@@ -237,4 +237,4 @@ That is it! You have successfully deployed Kubernetes autoscaler on the KubeOne
[step-5]: {{< ref "../../tutorials/creating-clusters/#step-5" >}}
[embedded-addons]: {{< ref "../../guides/addons/#overriding-embedded-eddons" >}}
[ca-faq]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md
-[ca-faq-what-is]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-is-cluster-autoscaler
\ No newline at end of file
+[ca-faq-what-is]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-is-cluster-autoscaler
diff --git a/content/kubeone/v1.11/guides/cis-benchmarking/cis1.23-kubeone1.11-k8s1.33/_index.en.md b/content/kubeone/v1.11/guides/cis-benchmarking/cis1.23-kubeone1.11-k8s1.33/_index.en.md
new file mode 100644
index 000000000..c5a99d2c7
--- /dev/null
+++ b/content/kubeone/v1.11/guides/cis-benchmarking/cis1.23-kubeone1.11-k8s1.33/_index.en.md
@@ -0,0 +1,1067 @@
++++
+title = "Benchmark on Kubernetes 1.33 with KubeOne 1.11.2"
+date = 2025-09-19T16:39:34+02:00
++++
+
+This guide helps you evaluate the security of a Kubernetes cluster created using KubeOne against each control in the CIS Kubernetes Benchmark.
+
+This guide corresponds to the following versions of KubeOne, CIS Benchmarks, and Kubernetes:
+
+| KubeOne Version | Kubernetes Version | CIS Benchmark Version |
+| ---------------- | ------------------ | --------------------- |
+| 1.11.2 | 1.33.4 | CIS-1.23 |
+
+## Testing Methodology
+
+### Running the Benchmark
+
+[Trivy](https://github.com/aquasecurity/trivy) was used to run the benchmark.
+
+```bash
+trivy k8s --compliance=k8s-cis-1.23 --report summary --timeout=1h --tolerations node-role.kubernetes.io/control-plane="":NoSchedule
+```
+
+### Results
+
+Summary Report for compliance: CIS Kubernetes Benchmarks v1.23
+
+Each control in the CIS Kubernetes Benchmark was evaluated. These are the possible results for each control:
+
+🟢 **Pass:** The cluster passes the audit/control outlined in the benchmark.
+
+🔵 **Pass (Additional Configuration Required):** The cluster passes the audit/control outlined in the benchmark with some extra configuration. The documentation is provided.
+
+🔴 **Fail:** The audit/control will be fixed in a future KubeOne release.
+
+## Control Type: Control Plane Components
+
+### 1.1. Control Plane Node Configuration Files
+
+#### 1.1.1: Ensure that the API server pod specification file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.2: Ensure that the API server pod specification file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.3: Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.4: Ensure that the controller manager pod specification file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.5: Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.6: Ensure that the scheduler pod specification file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.7: Ensure that the etcd pod specification file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.8: Ensure that the etcd pod specification file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.9: Ensure that the Container Network Interface file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.10: Ensure that the Container Network Interface file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.11: Ensure that the etcd data directory permissions are set to 700 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.12: Ensure that the etcd data directory ownership is set to etcd:etcd
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.1.13: Ensure that the admin.conf file permissions are set to 600
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.14: Ensure that the admin.conf file ownership is set to root:root
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.15: Ensure that the scheduler.conf file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.16: Ensure that the scheduler.conf file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.17: Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.18: Ensure that the controller-manager.conf file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.19: Ensure that the Kubernetes PKI directory and file ownership is set to root:root
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.20: Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.1.21: Ensure that the Kubernetes PKI key file permissions are set to 600
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+### 1.2. API Server
+
+#### 1.2.1: Ensure that the --anonymous-auth argument is set to false
+
+**Severity:** MEDIUM
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.2.2: Ensure that the --token-auth-file parameter is not set
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.3: Ensure that the --DenyServiceExternalIPs is not set
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.4: Ensure that the --kubelet-https argument is set to true
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.5: Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.6: Ensure that the --kubelet-certificate-authority argument is set as appropriate
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.7: Ensure that the --authorization-mode argument is not set to AlwaysAllow
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.8: Ensure that the --authorization-mode argument includes Node
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.9: Ensure that the --authorization-mode argument includes RBAC
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.10: Ensure that the admission control plugin EventRateLimit is set
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.2.11: Ensure that the admission control plugin AlwaysAdmit is not set
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.12: Ensure that the admission control plugin AlwaysPullImages is set
+
+**Severity:** MEDIUM
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.2.13: Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.14: Ensure that the admission control plugin ServiceAccount is set
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.15: Ensure that the admission control plugin NamespaceLifecycle is set
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.16: Ensure that the admission control plugin NodeRestriction is set
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.2.17: Ensure that the --secure-port argument is not set to 0
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.18: Ensure that the --profiling argument is set to false
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.19: Ensure that the --audit-log-path argument is set
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.2.20: Ensure that the --audit-log-maxage argument is set to 30 or as appropriate
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.2.21: Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.2.22: Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.2.24: Ensure that the --service-account-lookup argument is set to true
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.25: Ensure that the --service-account-key-file argument is set as appropriate
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.26: Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.27: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.28: Ensure that the --client-ca-file argument is set appropriate
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.29: Ensure that the --etcd-cafile argument is set as appropriate
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.2.30: Ensure that the --encryption-provider-config argument is set as appropriate
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+### 1.3. Controller Manager
+
+#### 1.3.1: Ensure that the --terminated-pod-gc-threshold argument is set as appropriate
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.3.3: Ensure that the --use-service-account-credentials argument is set to true
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.3.4: Ensure that the --service-account-private-key-file argument is set as appropriate
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.3.5: Ensure that the --root-ca-file argument is set as appropriate
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.3.6: Ensure that the RotateKubeletServerCertificate argument is set to true
+
+**Severity:** MEDIUM
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 1.3.7: Ensure that the --bind-address argument is set to 127.0.0.1
+
+**Severity:** LOW
+
+**Result:** 🟢 Pass
+
+---
+
+### 1.4. Scheduler
+
+#### 1.4.1: Ensure that the --profiling argument is set to false
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 1.4.2: Ensure that the --bind-address argument is set to 127.0.0.1
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+## Control Type: Etcd
+
+#### 2.1: Ensure that the --cert-file and --key-file arguments are set as appropriate
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 2.2: Ensure that the --client-cert-auth argument is set to true
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 2.3: Ensure that the --auto-tls argument is not set to true
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 2.4: Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 2.5: Ensure that the --peer-client-cert-auth argument is set to true
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 2.6: Ensure that the --peer-auto-tls argument is not set to true
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+## Control Type: Control Plane Configuration
+
+### 3.1. Authentication and Authorization
+
+#### 3.1.1: Client certificate authentication should not be used for users (Manual)
+
+**Severity:** HIGH
+
+**Result:** Manual check required
+
+---
+
+### 3.2. Logging
+
+#### 3.2.1: Ensure that a minimal audit policy is created (Manual)
+
+**Severity:** HIGH
+
+**Result:** Manual check required
+
+---
+
+#### 3.2.2: Ensure that the audit policy covers key security concerns (Manual)
+
+**Severity:** HIGH
+
+**Result:** Manual check required
+
+---
+
+## Control Type: Worker Nodes
+
+### 4.1. Worker Node Configuration Files
+
+#### 4.1.1: Ensure that the kubelet service file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 4.1.2: Ensure that the kubelet service file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.1.3: If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.1.4: If proxy kubeconfig file exists ensure ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.1.5: Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 4.1.6: Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.1.7: Ensure that the certificate authorities file permissions are set to 600 or more restrictive
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.1.8: Ensure that the client certificate authorities file ownership is set to root:root
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.1.9: If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.1.10: If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+### 4.2. Kubelet
+
+#### 4.2.1: Ensure that the --anonymous-auth argument is set to false
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.2: Ensure that the --authorization-mode argument is not set to AlwaysAllow
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.3: Ensure that the --client-ca-file argument is set as appropriate
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.4: Verify that the --read-only-port argument is set to 0
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.5: Ensure that the --streaming-connection-idle-timeout argument is not set to 0
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.6: Ensure that the --protect-kernel-defaults argument is set to true
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.7: Ensure that the --make-iptables-util-chains argument is set to true
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.8: Ensure that the --hostname-override argument is not set
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 4.2.9: Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.10: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate
+
+**Severity:** CRITICAL
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 4.2.11: Ensure that the --rotate-certificates argument is not set to false
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.12: Verify that the RotateKubeletServerCertificate argument is set to true
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+#### 4.2.13: Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers
+
+**Severity:** CRITICAL
+
+**Result:** 🟢 Pass
+
+---
+
+## Control Type: Policies
+
+### 5.1. RBAC and Service Accounts
+
+#### 5.1.1: Ensure that the cluster-admin role is only used where required
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.1.2: Minimize access to secrets
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.1.3: Minimize wildcard use in Roles and ClusterRoles
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.1.6: Ensure that Service Account Tokens are only mounted where necessary
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 5.1.8: Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+### 5.2. Pod Security Standards
+
+#### 5.2.2: Minimize the admission of privileged containers
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.3: Minimize the admission of containers wishing to share the host process ID namespace
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.4: Minimize the admission of containers wishing to share the host IPC namespace
+
+**Severity:** HIGH
+
+**Result:** 🟢 Pass
+
+---
+
+#### 5.2.5: Minimize the admission of containers wishing to share the host network namespace
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.6: Minimize the admission of containers with allowPrivilegeEscalation
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.7: Minimize the admission of root containers
+
+**Severity:** MEDIUM
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.8: Minimize the admission of containers with the NET_RAW capability
+
+**Severity:** MEDIUM
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.9: Minimize the admission of containers with added capabilities
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.10: Minimize the admission of containers with capabilities assigned
+
+**Severity:** LOW
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.11: Minimize the admission of containers with capabilities assigned
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+#### 5.2.12: Minimize the admission of HostPath volumes
+
+**Severity:** MEDIUM
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.2.13: Minimize the admission of containers which use HostPorts
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+### 5.3. Network Policies and CNI
+
+#### 5.3.1: Ensure that the CNI in use supports Network Policies (Manual)
+
+**Severity:** MEDIUM
+
+**Result:** Manual check required
+
+---
+
+#### 5.3.2: Ensure that all Namespaces have Network Policies defined
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+### 5.4. Secrets Management
+
+#### 5.4.1: Prefer using secrets as files over secrets as environment variables (Manual)
+
+**Severity:** MEDIUM
+
+**Result:** Manual check required
+
+---
+
+#### 5.4.2: Consider external secret storage (Manual)
+
+**Severity:** MEDIUM
+
+**Result:** Manual check required
+
+---
+
+### 5.5. Extensible Admission Control
+
+#### 5.5.1: Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)
+
+**Severity:** MEDIUM
+
+**Result:** Manual check required
+
+---
+
+### 5.7. General Policies
+
+#### 5.7.1: Create administrative boundaries between resources using namespaces (Manual)
+
+**Severity:** MEDIUM
+
+**Result:** Manual check required
+
+---
+
+#### 5.7.2: Ensure that the seccomp profile is set to docker/default in your pod definitions
+
+**Severity:** MEDIUM
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.7.3: Apply Security Context to Your Pods and Containers
+
+**Severity:** HIGH
+
+**Result:** 🔴 Fail
+
+_The issue is under investigation to provide a fix in a future KubeOne release_
+
+---
+
+#### 5.7.4: The default namespace should not be used
+
+**Severity:** MEDIUM
+
+**Result:** 🟢 Pass
+
+---
+
+## References
+
+[audit-logging]: {{< ref "../../../tutorials/creating-clusters-oidc/#audit-logging" >}}
+[encryption-providers]: {{< ref "../../../guides/encryption-providers/" >}}
+[oidc]: {{< ref "../../../tutorials/creating-clusters-oidc/" >}}
+[anon-req]:
+[eventratelimit]:
+[securitycontextdeny]:
diff --git a/content/kubeone/v1.11/guides/cis-benchmarking/cis1.23-kubeone1.11-k8s1.33/result.json b/content/kubeone/v1.11/guides/cis-benchmarking/cis1.23-kubeone1.11-k8s1.33/result.json
new file mode 100644
index 000000000..608377bc1
--- /dev/null
+++ b/content/kubeone/v1.11/guides/cis-benchmarking/cis1.23-kubeone1.11-k8s1.33/result.json
@@ -0,0 +1,694 @@
+{
+ "ID": "k8s-cis-1.23",
+ "Title": "CIS Kubernetes Benchmarks v1.23",
+ "SummaryControls": [
+ {
+ "ID": "1.1.1",
+ "Name": "Ensure that the API server pod specification file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.2",
+ "Name": "Ensure that the API server pod specification file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.3",
+ "Name": "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.4",
+ "Name": "Ensure that the controller manager pod specification file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.5",
+ "Name": "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.6",
+ "Name": "Ensure that the scheduler pod specification file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.7",
+ "Name": "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.8",
+ "Name": "Ensure that the etcd pod specification file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.9",
+ "Name": "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.10",
+ "Name": "Ensure that the Container Network Interface file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.11",
+ "Name": "Ensure that the etcd data directory permissions are set to 700 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.12",
+ "Name": "Ensure that the etcd data directory ownership is set to etcd:etcd",
+ "Severity": "LOW",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.1.13",
+ "Name": "Ensure that the admin.conf file permissions are set to 600",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.14",
+ "Name": "Ensure that the admin.conf file ownership is set to root:root",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.15",
+ "Name": "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.16",
+ "Name": "Ensure that the scheduler.conf file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.17",
+ "Name": "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.18",
+ "Name": "Ensure that the controller-manager.conf file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.19",
+ "Name": "Ensure that the Kubernetes PKI directory and file ownership is set to root:root",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.20",
+ "Name": "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.1.21",
+ "Name": "Ensure that the Kubernetes PKI key file permissions are set to 600",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.1",
+ "Name": "Ensure that the --anonymous-auth argument is set to false",
+ "Severity": "MEDIUM",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.2.2",
+ "Name": "Ensure that the --token-auth-file parameter is not set",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.3",
+ "Name": "Ensure that the --DenyServiceExternalIPs is not set",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.4",
+ "Name": "Ensure that the --kubelet-https argument is set to true",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.5",
+ "Name": "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.6",
+ "Name": "Ensure that the --kubelet-certificate-authority argument is set as appropriate",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.7",
+ "Name": "Ensure that the --authorization-mode argument is not set to AlwaysAllow",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.8",
+ "Name": "Ensure that the --authorization-mode argument includes Node",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.9",
+ "Name": "Ensure that the --authorization-mode argument includes RBAC",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.10",
+ "Name": "Ensure that the admission control plugin EventRateLimit is set",
+ "Severity": "HIGH",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.2.11",
+ "Name": "Ensure that the admission control plugin AlwaysAdmit is not set",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.12",
+ "Name": "Ensure that the admission control plugin AlwaysPullImages is set",
+ "Severity": "MEDIUM",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.2.13",
+ "Name": "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.14",
+ "Name": "Ensure that the admission control plugin ServiceAccount is set",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.15",
+ "Name": "Ensure that the admission control plugin NamespaceLifecycle is set",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.16",
+ "Name": "Ensure that the admission control plugin NodeRestriction is set",
+ "Severity": "LOW",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.2.17",
+ "Name": "Ensure that the --secure-port argument is not set to 0",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.18",
+ "Name": "Ensure that the --profiling argument is set to false",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.19",
+ "Name": "Ensure that the --audit-log-path argument is set",
+ "Severity": "LOW",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.2.20",
+ "Name": "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate",
+ "Severity": "LOW",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.2.21",
+ "Name": "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate",
+ "Severity": "LOW",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.2.22",
+ "Name": "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate",
+ "Severity": "LOW",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.2.24",
+ "Name": "Ensure that the --service-account-lookup argument is set to true",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.25",
+ "Name": "Ensure that the --service-account-key-file argument is set as appropriate",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.26",
+ "Name": "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.27",
+ "Name": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.28",
+ "Name": "Ensure that the --client-ca-file argument is set appropriate",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.29",
+ "Name": "Ensure that the --etcd-cafile argument is set as appropriate",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.2.30",
+ "Name": "Ensure that the --encryption-provider-config argument is set as appropriate",
+ "Severity": "LOW",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.3.1",
+ "Name": "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.3.3",
+ "Name": "Ensure that the --use-service-account-credentials argument is set to true",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.3.4",
+ "Name": "Ensure that the --service-account-private-key-file argument is set as appropriate",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.3.5",
+ "Name": "Ensure that the --root-ca-file argument is set as appropriate",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.3.6",
+ "Name": "Ensure that the RotateKubeletServerCertificate argument is set to true",
+ "Severity": "MEDIUM",
+ "TotalFail": 3
+ },
+ {
+ "ID": "1.3.7",
+ "Name": "Ensure that the --bind-address argument is set to 127.0.0.1",
+ "Severity": "LOW",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.4.1",
+ "Name": "Ensure that the --profiling argument is set to false",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "1.4.2",
+ "Name": "Ensure that the --bind-address argument is set to 127.0.0.1",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "2.1",
+ "Name": "Ensure that the --cert-file and --key-file arguments are set as appropriate",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "2.2",
+ "Name": "Ensure that the --client-cert-auth argument is set to true",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "2.3",
+ "Name": "Ensure that the --auto-tls argument is not set to true",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "2.4",
+ "Name": "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "2.5",
+ "Name": "Ensure that the --peer-client-cert-auth argument is set to true",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "2.6",
+ "Name": "Ensure that the --peer-auto-tls argument is not set to true",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "3.1.1",
+ "Name": "Client certificate authentication should not be used for users (Manual)",
+ "Severity": "HIGH"
+ },
+ {
+ "ID": "3.2.1",
+ "Name": "Ensure that a minimal audit policy is created (Manual)",
+ "Severity": "HIGH"
+ },
+ {
+ "ID": "3.2.2",
+ "Name": "Ensure that the audit policy covers key security concerns (Manual)",
+ "Severity": "HIGH"
+ },
+ {
+ "ID": "4.1.1",
+ "Name": "Ensure that the kubelet service file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 3
+ },
+ {
+ "ID": "4.1.2",
+ "Name": "Ensure that the kubelet service file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.1.3",
+ "Name": "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.1.4",
+ "Name": "If proxy kubeconfig file exists ensure ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.1.5",
+ "Name": "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 3
+ },
+ {
+ "ID": "4.1.6",
+ "Name": "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.1.7",
+ "Name": "Ensure that the certificate authorities file permissions are set to 600 or more restrictive",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.1.8",
+ "Name": "Ensure that the client certificate authorities file ownership is set to root:root",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.1.9",
+ "Name": "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.1.10",
+ "Name": "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.1",
+ "Name": "Ensure that the --anonymous-auth argument is set to false",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.2",
+ "Name": "Ensure that the --authorization-mode argument is not set to AlwaysAllow",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.3",
+ "Name": "Ensure that the --client-ca-file argument is set as appropriate",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.4",
+ "Name": "Verify that the --read-only-port argument is set to 0",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.5",
+ "Name": "Ensure that the --streaming-connection-idle-timeout argument is not set to 0",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.6",
+ "Name": "Ensure that the --protect-kernel-defaults argument is set to true",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.7",
+ "Name": "Ensure that the --make-iptables-util-chains argument is set to true",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.8",
+ "Name": "Ensure that the --hostname-override argument is not set",
+ "Severity": "HIGH",
+ "TotalFail": 6
+ },
+ {
+ "ID": "4.2.9",
+ "Name": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.10",
+ "Name": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate",
+ "Severity": "CRITICAL",
+ "TotalFail": 1
+ },
+ {
+ "ID": "4.2.11",
+ "Name": "Ensure that the --rotate-certificates argument is not set to false",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.12",
+ "Name": "Verify that the RotateKubeletServerCertificate argument is set to true",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "4.2.13",
+ "Name": "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers",
+ "Severity": "CRITICAL",
+ "TotalFail": 0
+ },
+ {
+ "ID": "5.1.1",
+ "Name": "Ensure that the cluster-admin role is only used where required",
+ "Severity": "HIGH",
+ "TotalFail": 2
+ },
+ {
+ "ID": "5.1.2",
+ "Name": "Minimize access to secrets",
+ "Severity": "HIGH",
+ "TotalFail": 15
+ },
+ {
+ "ID": "5.1.3",
+ "Name": "Minimize wildcard use in Roles and ClusterRoles",
+ "Severity": "HIGH",
+ "TotalFail": 8
+ },
+ {
+ "ID": "5.1.6",
+ "Name": "Ensure that Service Account Tokens are only mounted where necessary",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "5.1.8",
+ "Name": "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "5.2.2",
+ "Name": "Minimize the admission of privileged containers",
+ "Severity": "HIGH",
+ "TotalFail": 8
+ },
+ {
+ "ID": "5.2.3",
+ "Name": "Minimize the admission of containers wishing to share the host process ID namespace",
+ "Severity": "HIGH",
+ "TotalFail": 3
+ },
+ {
+ "ID": "5.2.4",
+ "Name": "Minimize the admission of containers wishing to share the host IPC namespace",
+ "Severity": "HIGH",
+ "TotalFail": 0
+ },
+ {
+ "ID": "5.2.5",
+ "Name": "Minimize the admission of containers wishing to share the host network namespace",
+ "Severity": "HIGH",
+ "TotalFail": 15
+ },
+ {
+ "ID": "5.2.6",
+ "Name": "Minimize the admission of containers with allowPrivilegeEscalation",
+ "Severity": "HIGH",
+ "TotalFail": 31
+ },
+ {
+ "ID": "5.2.7",
+ "Name": "Minimize the admission of root containers",
+ "Severity": "MEDIUM",
+ "TotalFail": 35
+ },
+ {
+ "ID": "5.2.8",
+ "Name": "Minimize the admission of containers with the NET_RAW capability",
+ "Severity": "MEDIUM",
+ "TotalFail": 2
+ },
+ {
+ "ID": "5.2.9",
+ "Name": "Minimize the admission of containers with added capabilities",
+ "Severity": "LOW",
+ "TotalFail": 39
+ },
+ {
+ "ID": "5.2.10",
+ "Name": "Minimize the admission of containers with capabilities assigned",
+ "Severity": "LOW",
+ "TotalFail": 39
+ },
+ {
+ "ID": "5.2.11",
+ "Name": "Minimize the admission of containers with capabilities assigned",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "5.2.12",
+ "Name": "Minimize the admission of HostPath volumes",
+ "Severity": "MEDIUM",
+ "TotalFail": 18
+ },
+ {
+ "ID": "5.2.13",
+ "Name": "Minimize the admission of containers which use HostPorts",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "5.3.1",
+ "Name": "Ensure that the CNI in use supports Network Policies (Manual)",
+ "Severity": "MEDIUM"
+ },
+ {
+ "ID": "5.3.2",
+ "Name": "Ensure that all Namespaces have Network Policies defined",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ },
+ {
+ "ID": "5.4.1",
+ "Name": "Prefer using secrets as files over secrets as environment variables (Manual)",
+ "Severity": "MEDIUM"
+ },
+ {
+ "ID": "5.4.2",
+ "Name": "Consider external secret storage (Manual)",
+ "Severity": "MEDIUM"
+ },
+ {
+ "ID": "5.5.1",
+ "Name": "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)",
+ "Severity": "MEDIUM"
+ },
+ {
+ "ID": "5.7.1",
+ "Name": "Create administrative boundaries between resources using namespaces (Manual)",
+ "Severity": "MEDIUM"
+ },
+ {
+ "ID": "5.7.2",
+ "Name": "Ensure that the seccomp profile is set to docker/default in your pod definitions",
+ "Severity": "MEDIUM",
+ "TotalFail": 19
+ },
+ {
+ "ID": "5.7.3",
+ "Name": "Apply Security Context to Your Pods and Containers",
+ "Severity": "HIGH",
+ "TotalFail": 124
+ },
+ {
+ "ID": "5.7.4",
+ "Name": "The default namespace should not be used",
+ "Severity": "MEDIUM",
+ "TotalFail": 0
+ }
+ ]
+}
diff --git a/content/kubeone/v1.11/guides/encryption-providers/_index.en.md b/content/kubeone/v1.11/guides/encryption-providers/_index.en.md
index 09c42aed9..b4bdb07f7 100644
--- a/content/kubeone/v1.11/guides/encryption-providers/_index.en.md
+++ b/content/kubeone/v1.11/guides/encryption-providers/_index.en.md
@@ -34,7 +34,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: k1-cluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.33.5'
features:
# enable encryption providers
encryptionProviders:
@@ -82,7 +82,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: k1-cluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.33.5'
features:
# enable encryption providers
encryptionProviders:
@@ -140,7 +140,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: k1-cluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.33.5'
features:
encryptionProviders:
enable: true
@@ -175,7 +175,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: kms-test
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.33.5'
cloudProvider:
aws: {}
features:
diff --git a/content/kubeone/v1.11/guides/mirror-registries/_index.en.md b/content/kubeone/v1.11/guides/mirror-registries/_index.en.md
index 49f7a580f..4ff32c862 100644
--- a/content/kubeone/v1.11/guides/mirror-registries/_index.en.md
+++ b/content/kubeone/v1.11/guides/mirror-registries/_index.en.md
@@ -98,7 +98,7 @@ kubeone apply --manifest kubeone.yaml --credentials credentials.yaml
docker.io registry introduced pretty low rate limits for unauthenticated requests. There are few workarounds:
-* Buy docker subscribtion.
+* Buy docker subscription.
How to use docker.io credentials is covered in the [section above][using-credentials].
* Setup own pull-through caching proxy.
* Use public pull-through caching proxy.
diff --git a/content/kubeone/v1.11/guides/registry-configuration/_index.en.md b/content/kubeone/v1.11/guides/registry-configuration/_index.en.md
index 5ef593084..5e74b245e 100644
--- a/content/kubeone/v1.11/guides/registry-configuration/_index.en.md
+++ b/content/kubeone/v1.11/guides/registry-configuration/_index.en.md
@@ -37,36 +37,58 @@ This guide assumes that:
If you don't have an image registry, you can check out the
[Docker Registry][docker-reg-guide] as a possible solution.
-## Preloading Images
+## Mirroring Images with `kubeone mirror-images`
-Another prerequisites for this guide to work is that your image registry has
-all images needed for your cluster to work preloaded.
+KubeOne provides a built-in command `kubeone mirror-images` to simplify mirroring all required images (Kubernetes core components, CNI plugins, etc.) to your private registry. This command replaces the older `image-loader.sh` script and supports advanced filtering and multi-version mirroring.
-To make this task easier, we provide the image loader script that:
+### Prerequisites
-* pulls all images used by components deployed by KubeOne (CNI,
- metrics-server...) and Kubeadm (Kubernetes core components and CoreDNS)
-* re-tag those images so the image registry (e.g. `docker.io`) is replaced
- with the image registry provided by the user
-* push re-tagged images to your (mirror) image registry
+1. **Registry Setup**: Ensure your registry is accessible by all cluster nodes and supports TLS if using containerd.
+2. **Authentication**: The registry must allow unauthenticated access (support for credentials is planned for future releases).
+3. **KubeOne CLI**: Use KubeOne v1.5.0 or newer.
-The image loader script (`image-loader.sh`) comes in the KubeOne release
-archive, under the `hack` directory. It can also be found on [GitHub in the
-`hack` directory][img-loader]. If you're downloading the script from GitHub,
-it's recommended to switch to the appropriate tag depending on which KubeOne
-version you're using.
+### Usage
-Once you have downloaded the script, you can run it in the following way.
-Make sure to replace `KUBERNETES_VERSION` with the Kubernetes version you plan
-to use (without the `v` prefix), as well as, replace the `TARGET_REGISTRY` with
-the address to your image registry.
+The `kubeone mirror-images` command pulls, re-tags, and pushes images to your registry. Use the following syntax:
+```bash
+kubeone mirror-images \
+ [--filter base,optional,control-plane] \
+ [--kubernetes-versions v1.33.5,v1.32.9] \
+ [--insecure] # Allow pushing to insecure registries (HTTP) \
+ --registry
```
-KUBERNETES_VERSION=1.29.4 TARGET_REGISTRY=127.0.0.1:5000 ./image-loader.sh
+
+#### Key Flags:
+- `--filter`: Select image groups (comma-separated):
+ - `base`: Core images (OSM, DNS Cache, Calico, Machine-Controller).
+ - `optional`: Add-ons like CCMs and CSI Drivers.
+ - `control-plane`: Only Kubernetes core components (kube-apiserver, etcd, etc.).
+- `--kubernetes-versions`: Specify versions (comma-separated). If omitted, **all KubeOne-supported versions are mirrored**.
+- `--insecure`: Skip TLS verification for registries using HTTP (useful for local/insecure setups).
+
+### Examples
+
+#### 1. Mirror All Base Images for Specific Versions
+```bash
+kubeone mirror-images \
+ --filter base \
+ --kubernetes-versions v1.33.5,v1.32.9 \
+ registry.example.com:5000
+```
+
+#### 2. Mirror Only Control-Plane Images For All Supported Versions
+```bash
+kubeone mirror-images \
+ --filter control-plane \
+ registry.example.com:5000
```
-The preloading process can take a several minutes, depending on your
-connection speed.
+### Benefits of `kubeone mirror-images`
+- **Simpler Workflow**: No need to manually download or manage scripts.
+- **Multi-Version Support**: Mirror images for multiple Kubernetes versions in one command.
+- **Granular Control**: Use filters to mirror only the images you need.
+- **Automated Retagging**: Handles registry prefixes (e.g., `docker.io` → `registry.example.com`).
## Overriding Image Registries
@@ -77,7 +99,7 @@ stanza to your KubeOne configuration file, such as:
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.33.5
cloudProvider:
aws: {}
registryConfiguration:
diff --git a/content/kubeone/v1.11/tutorials/creating-clusters-baremetal/_index.en.md b/content/kubeone/v1.11/tutorials/creating-clusters-baremetal/_index.en.md
index 6ed6e8c72..9d92bca63 100644
--- a/content/kubeone/v1.11/tutorials/creating-clusters-baremetal/_index.en.md
+++ b/content/kubeone/v1.11/tutorials/creating-clusters-baremetal/_index.en.md
@@ -222,7 +222,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: bm-cluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.33.5'
cloudProvider:
none: {}
@@ -298,11 +298,11 @@ INFO[11:37:28 CEST] Determine operating system…
INFO[11:37:30 CEST] Running host probes…
The following actions will be taken:
Run with --verbose flag for more information.
- + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.20.4
- + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.20.4
- + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.20.4
- + join worker node "ip-172-31-223-103.eu-west-3.compute.internal" (172.31.223.103) using 1.20.4
- + join worker node "ip-172-31-224-178.eu-west-3.compute.internal" (172.31.224.178) using 1.20.4
+ + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.33.5
+ + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.33.5
+ + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.33.5
+ + join worker node "ip-172-31-223-103.eu-west-3.compute.internal" (172.31.223.103) using 1.33.5
+ + join worker node "ip-172-31-224-178.eu-west-3.compute.internal" (172.31.224.178) using 1.33.5
Do you want to proceed (yes/no):
```
@@ -356,11 +356,11 @@ You should see output such as the following one.
```
NAME STATUS ROLES AGE VERSION
-ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.20.4
-ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.20.4
-ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.20.4
-ip-172-31-223-103.eu-west-3.compute.internal Ready 38m v1.20.4
-ip-172-31-224-178.eu-west-3.compute.internal Ready 38m v1.20.4
+ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.33.5
+ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.33.5
+ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.33.5
+ip-172-31-223-103.eu-west-3.compute.internal Ready 38m v1.33.5
+ip-172-31-224-178.eu-west-3.compute.internal Ready 38m v1.33.5
```
## Conclusion
diff --git a/content/kubeone/v1.11/tutorials/creating-clusters-oidc/_index.en.md b/content/kubeone/v1.11/tutorials/creating-clusters-oidc/_index.en.md
index f23d89388..402b2b8a9 100644
--- a/content/kubeone/v1.11/tutorials/creating-clusters-oidc/_index.en.md
+++ b/content/kubeone/v1.11/tutorials/creating-clusters-oidc/_index.en.md
@@ -47,7 +47,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.33.5'
cloudProvider:
hetzner: {}
@@ -331,7 +331,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.33.5'
cloudProvider:
hetzner: {}
@@ -482,7 +482,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.33.5'
cloudProvider:
hetzner: {}
diff --git a/content/kubeone/v1.11/tutorials/creating-clusters/_index.en.md b/content/kubeone/v1.11/tutorials/creating-clusters/_index.en.md
index 2361fe2f1..6bad11cad 100644
--- a/content/kubeone/v1.11/tutorials/creating-clusters/_index.en.md
+++ b/content/kubeone/v1.11/tutorials/creating-clusters/_index.en.md
@@ -585,7 +585,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
aws: {}
@@ -613,7 +613,7 @@ with your cluster name in the cloud-config example below.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
azure: {}
external: true
@@ -648,7 +648,7 @@ and fetches information about nodes from the API.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
digitalocean: {}
external: true
@@ -666,7 +666,7 @@ configs.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
gce: {}
external: true
@@ -697,7 +697,7 @@ The Hetzner CCM fetches information about nodes from the API.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
hetzner: {}
external: true
@@ -715,7 +715,7 @@ replace the placeholder values.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
nutanix: {}
addons:
@@ -745,7 +745,7 @@ cloud-config section.**
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
openstack: {}
external: true
@@ -767,7 +767,7 @@ cloudProvider:
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
openstack: {}
external: true
@@ -791,7 +791,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.33.5'
cloudProvider:
vmwareCloudDirector: {}
@@ -810,7 +810,7 @@ automatically by KubeOne.**
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.33.2'
+ kubernetes: '1.34.1'
cloudProvider:
vsphere: {}
external: true
@@ -897,9 +897,9 @@ INFO[11:37:28 CEST] Determine operating system…
INFO[11:37:30 CEST] Running host probes…
The following actions will be taken:
Run with --verbose flag for more information.
- + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.20.4
- + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.20.4
- + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.20.4
+ + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.33.5
+ + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.33.5
+ + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.33.5
+ ensure machinedeployment "marko-1-eu-west-3a" with 1 replica(s) exists
+ ensure machinedeployment "marko-1-eu-west-3b" with 1 replica(s) exists
+ ensure machinedeployment "marko-1-eu-west-3c" with 1 replica(s) exists
@@ -977,12 +977,12 @@ cluster.
```
NAME STATUS ROLES AGE VERSION
-ip-172-31-220-166.eu-west-3.compute.internal Ready 38m v1.20.4
-ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.20.4
-ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.20.4
-ip-172-31-221-18.eu-west-3.compute.internal Ready 38m v1.20.4
-ip-172-31-222-211.eu-west-3.compute.internal Ready 38m v1.20.4
-ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.20.4
+ip-172-31-220-166.eu-west-3.compute.internal Ready 38m v1.33.5
+ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.33.5
+ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.33.5
+ip-172-31-221-18.eu-west-3.compute.internal Ready 38m v1.33.5
+ip-172-31-222-211.eu-west-3.compute.internal Ready 38m v1.33.5
+ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.33.5
```
## Conclusion
diff --git a/content/kubeone/v1.9/architecture/operating-system-manager/usage/_index.en.md b/content/kubeone/v1.9/architecture/operating-system-manager/usage/_index.en.md
index abeacc163..d165b23fe 100644
--- a/content/kubeone/v1.9/architecture/operating-system-manager/usage/_index.en.md
+++ b/content/kubeone/v1.9/architecture/operating-system-manager/usage/_index.en.md
@@ -13,7 +13,7 @@ To fallback to legacy user-data from Machine Controller, we can disable OSM for
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.31.13
addons:
enable: true
operatingSystemManager:
@@ -163,7 +163,7 @@ The variable `initial_machinedeployment_operating_system_profile` can also be co
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: "1.29.4"
+ kubernetes: "1.31.13"
cloudProvider:
aws: {}
addons:
diff --git a/content/kubeone/v1.9/examples/addons-calico-vxlan/_index.en.md b/content/kubeone/v1.9/examples/addons-calico-vxlan/_index.en.md
index cdaf9d331..3e4136212 100644
--- a/content/kubeone/v1.9/examples/addons-calico-vxlan/_index.en.md
+++ b/content/kubeone/v1.9/examples/addons-calico-vxlan/_index.en.md
@@ -13,7 +13,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.31.13
cloudProvider:
aws: {}
diff --git a/content/kubeone/v1.9/guides/addons/_index.en.md b/content/kubeone/v1.9/guides/addons/_index.en.md
index 4f439c6a7..d0fcf2e7f 100644
--- a/content/kubeone/v1.9/guides/addons/_index.en.md
+++ b/content/kubeone/v1.9/guides/addons/_index.en.md
@@ -64,7 +64,7 @@ the `addons` config:
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.31.13
cloudProvider:
aws: {}
# Addons are Kubernetes manifests to be deployed after provisioning the cluster
@@ -113,7 +113,7 @@ Example:
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.31.13
addons:
enable: true
@@ -145,7 +145,7 @@ To delete embedded addon from the cluster, use the new `delete` field from the
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.31.13
addons:
enable: true
@@ -180,7 +180,7 @@ you can use it to override globally defined parameters.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.31.13
addons:
enable: true
diff --git a/content/kubeone/v1.9/guides/autoscaler-addon/_index.en.md b/content/kubeone/v1.9/guides/autoscaler-addon/_index.en.md
index cc76f595c..f778ead63 100644
--- a/content/kubeone/v1.9/guides/autoscaler-addon/_index.en.md
+++ b/content/kubeone/v1.9/guides/autoscaler-addon/_index.en.md
@@ -33,7 +33,7 @@ kubeone.yaml
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4' ## kubernetes version
+ kubernetes: '1.31.13' ## kubernetes version
cloudProvider: ## This field is sourced automatically if terraform is used for the cluster
aws: {}
addons:
@@ -52,7 +52,7 @@ If you're running a cluster with nodes in the multiple zones for the HA purposes
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4' ## kubernetes version
+ kubernetes: '1.31.13' ## kubernetes version
cloudProvider: ## This field is sourced automatically if terraform is used for the cluster
aws: {}
addons:
@@ -146,9 +146,9 @@ Run the following kubectl command to inspect the available Machinedeployments:
```bash
$ kubectl get machinedeployments -n kube-system
NAME REPLICAS AVAILABLE-REPLICAS PROVIDER OS KUBELET AGE
-kb-cluster-eu-west-3a 1 1 aws ubuntu 1.20.4 10h
-kb-cluster-eu-west-3b 1 1 aws ubuntu 1.20.4 10h
-kb-cluster-eu-west-3c 1 1 aws ubuntu 1.20.4 10h
+kb-cluster-eu-west-3a 1 1 aws ubuntu 1.31.13 10h
+kb-cluster-eu-west-3b 1 1 aws ubuntu 1.31.13 10h
+kb-cluster-eu-west-3c 1 1 aws ubuntu 1.31.13 10h
```
### Step 2: Annotate Machinedeployments
@@ -237,4 +237,4 @@ That is it! You have successfully deployed Kubernetes autoscaler on the KubeOne
[step-5]: {{< ref "../../tutorials/creating-clusters/#step-5" >}}
[embedded-addons]: {{< ref "../../guides/addons/#overriding-embedded-eddons" >}}
[ca-faq]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md
-[ca-faq-what-is]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-is-cluster-autoscaler
\ No newline at end of file
+[ca-faq-what-is]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-is-cluster-autoscaler
diff --git a/content/kubeone/v1.9/guides/encryption-providers/_index.en.md b/content/kubeone/v1.9/guides/encryption-providers/_index.en.md
index 09c42aed9..2c47248cc 100644
--- a/content/kubeone/v1.9/guides/encryption-providers/_index.en.md
+++ b/content/kubeone/v1.9/guides/encryption-providers/_index.en.md
@@ -34,7 +34,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: k1-cluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
features:
# enable encryption providers
encryptionProviders:
@@ -82,7 +82,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: k1-cluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
features:
# enable encryption providers
encryptionProviders:
@@ -140,7 +140,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: k1-cluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
features:
encryptionProviders:
enable: true
@@ -175,7 +175,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: kms-test
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
cloudProvider:
aws: {}
features:
diff --git a/content/kubeone/v1.9/guides/mirror-registries/_index.en.md b/content/kubeone/v1.9/guides/mirror-registries/_index.en.md
index 49f7a580f..4ff32c862 100644
--- a/content/kubeone/v1.9/guides/mirror-registries/_index.en.md
+++ b/content/kubeone/v1.9/guides/mirror-registries/_index.en.md
@@ -98,7 +98,7 @@ kubeone apply --manifest kubeone.yaml --credentials credentials.yaml
docker.io registry introduced pretty low rate limits for unauthenticated requests. There are few workarounds:
-* Buy docker subscribtion.
+* Buy docker subscription.
How to use docker.io credentials is covered in the [section above][using-credentials].
* Setup own pull-through caching proxy.
* Use public pull-through caching proxy.
diff --git a/content/kubeone/v1.9/guides/registry-configuration/_index.en.md b/content/kubeone/v1.9/guides/registry-configuration/_index.en.md
index b7b39eed4..4e534997e 100644
--- a/content/kubeone/v1.9/guides/registry-configuration/_index.en.md
+++ b/content/kubeone/v1.9/guides/registry-configuration/_index.en.md
@@ -62,7 +62,7 @@ to use (without the `v` prefix), as well as, replace the `TARGET_REGISTRY` with
the address to your image registry.
```
-KUBERNETES_VERSION=1.29.4 TARGET_REGISTRY=127.0.0.1:5000 ./image-loader.sh
+KUBERNETES_VERSION=1.31.13 TARGET_REGISTRY=127.0.0.1:5000 ./image-loader.sh
```
The preloading process can take a several minutes, depending on your
@@ -77,7 +77,7 @@ stanza to your KubeOne configuration file, such as:
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: 1.29.4
+ kubernetes: 1.31.13
cloudProvider:
aws: {}
registryConfiguration:
diff --git a/content/kubeone/v1.9/tutorials/creating-clusters-baremetal/_index.en.md b/content/kubeone/v1.9/tutorials/creating-clusters-baremetal/_index.en.md
index 6ffdf326a..47f42cb76 100644
--- a/content/kubeone/v1.9/tutorials/creating-clusters-baremetal/_index.en.md
+++ b/content/kubeone/v1.9/tutorials/creating-clusters-baremetal/_index.en.md
@@ -222,7 +222,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
name: bm-cluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
cloudProvider:
none: {}
@@ -301,11 +301,11 @@ INFO[11:37:28 CEST] Determine operating system…
INFO[11:37:30 CEST] Running host probes…
The following actions will be taken:
Run with --verbose flag for more information.
- + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.20.4
- + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.20.4
- + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.20.4
- + join worker node "ip-172-31-223-103.eu-west-3.compute.internal" (172.31.223.103) using 1.20.4
- + join worker node "ip-172-31-224-178.eu-west-3.compute.internal" (172.31.224.178) using 1.20.4
+ + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.31.13
+ + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.31.13
+ + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.31.13
+ + join worker node "ip-172-31-223-103.eu-west-3.compute.internal" (172.31.223.103) using 1.31.13
+ + join worker node "ip-172-31-224-178.eu-west-3.compute.internal" (172.31.224.178) using 1.31.13
Do you want to proceed (yes/no):
```
@@ -359,11 +359,11 @@ You should see output such as the following one.
```
NAME STATUS ROLES AGE VERSION
-ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.20.4
-ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.20.4
-ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.20.4
-ip-172-31-223-103.eu-west-3.compute.internal Ready 38m v1.20.4
-ip-172-31-224-178.eu-west-3.compute.internal Ready 38m v1.20.4
+ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.31.13
+ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.31.13
+ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.31.13
+ip-172-31-223-103.eu-west-3.compute.internal Ready 38m v1.31.13
+ip-172-31-224-178.eu-west-3.compute.internal Ready 38m v1.31.13
```
## Conclusion
diff --git a/content/kubeone/v1.9/tutorials/creating-clusters-oidc/_index.en.md b/content/kubeone/v1.9/tutorials/creating-clusters-oidc/_index.en.md
index f23d89388..f59694024 100644
--- a/content/kubeone/v1.9/tutorials/creating-clusters-oidc/_index.en.md
+++ b/content/kubeone/v1.9/tutorials/creating-clusters-oidc/_index.en.md
@@ -47,7 +47,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
cloudProvider:
hetzner: {}
@@ -331,7 +331,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
cloudProvider:
hetzner: {}
@@ -482,7 +482,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
cloudProvider:
hetzner: {}
diff --git a/content/kubeone/v1.9/tutorials/creating-clusters/_index.en.md b/content/kubeone/v1.9/tutorials/creating-clusters/_index.en.md
index 6cdb8a3ed..711ebc4ae 100644
--- a/content/kubeone/v1.9/tutorials/creating-clusters/_index.en.md
+++ b/content/kubeone/v1.9/tutorials/creating-clusters/_index.en.md
@@ -615,7 +615,7 @@ supported provider.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
cloudProvider:
aws: {}
external: true
@@ -642,7 +642,7 @@ with your cluster name in the cloud-config example below.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
cloudProvider:
azure: {}
external: true
@@ -677,7 +677,7 @@ and fetches information about nodes from the API.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
cloudProvider:
digitalocean: {}
external: true
@@ -695,7 +695,7 @@ configs.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
cloudProvider:
gce: {}
external: true
@@ -726,7 +726,7 @@ The Hetzner CCM fetches information about nodes from the API.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
cloudProvider:
hetzner: {}
external: true
@@ -744,7 +744,7 @@ replace the placeholder values.
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
cloudProvider:
nutanix: {}
addons:
@@ -774,7 +774,7 @@ cloud-config section.**
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
cloudProvider:
openstack: {}
external: true
@@ -796,7 +796,7 @@ cloudProvider:
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
cloudProvider:
openstack: {}
external: true
@@ -824,7 +824,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
cloudProvider:
equinixmetal: {}
@@ -845,7 +845,7 @@ apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
cloudProvider:
vmwareCloudDirector: {}
@@ -864,7 +864,7 @@ automatically by KubeOne.**
apiVersion: kubeone.k8c.io/v1beta2
kind: KubeOneCluster
versions:
- kubernetes: '1.29.4'
+ kubernetes: '1.31.13'
cloudProvider:
vsphere: {}
external: true
@@ -954,9 +954,9 @@ INFO[11:37:28 CEST] Determine operating system…
INFO[11:37:30 CEST] Running host probes…
The following actions will be taken:
Run with --verbose flag for more information.
- + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.20.4
- + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.20.4
- + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.20.4
+ + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.31.13
+ + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.31.13
+ + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.31.13
+ ensure machinedeployment "marko-1-eu-west-3a" with 1 replica(s) exists
+ ensure machinedeployment "marko-1-eu-west-3b" with 1 replica(s) exists
+ ensure machinedeployment "marko-1-eu-west-3c" with 1 replica(s) exists
@@ -1034,12 +1034,12 @@ cluster.
```
NAME STATUS ROLES AGE VERSION
-ip-172-31-220-166.eu-west-3.compute.internal Ready 38m v1.20.4
-ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.20.4
-ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.20.4
-ip-172-31-221-18.eu-west-3.compute.internal Ready 38m v1.20.4
-ip-172-31-222-211.eu-west-3.compute.internal Ready 38m v1.20.4
-ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.20.4
+ip-172-31-220-166.eu-west-3.compute.internal Ready 38m v1.31.13
+ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.31.13
+ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.31.13
+ip-172-31-221-18.eu-west-3.compute.internal Ready 38m v1.31.13
+ip-172-31-222-211.eu-west-3.compute.internal Ready 38m v1.31.13
+ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.31.13
```
## Conclusion
diff --git a/content/kubermatic-virtualization/_index.md b/content/kubermatic-virtualization/_index.md
new file mode 100644
index 000000000..b9b17e94d
--- /dev/null
+++ b/content/kubermatic-virtualization/_index.md
@@ -0,0 +1,7 @@
++++
+title = "Kubermatic Virtualization Docs"
+description = "Seamlessly modernize your infrastructure by building your private cloud entirely with Kubernetes"
+sitemapexclude = true
++++
+
+Seamlessly modernize your infrastructure by building your private cloud entirely with Kubernetes
\ No newline at end of file
diff --git a/content/kubermatic-virtualization/main/_index.en.md b/content/kubermatic-virtualization/main/_index.en.md
new file mode 100644
index 000000000..81ee5fd83
--- /dev/null
+++ b/content/kubermatic-virtualization/main/_index.en.md
@@ -0,0 +1,32 @@
++++
+title = ""
+date = 2025-07-18T16:06:34+02:00
++++
+
+## What is Kubermatic Virtualization (Kube-V)?
+Kubermatic Virtualization (Kube-V) provides a unified platform that enables organizations to seamlessly orchestrate and manage both traditional virtual machines (VMs) and modern containerized applications.
+
+It extends the powerful automation and operational benefits of Kubernetes to your VM-based workloads, allowing for a more consistent and efficient approach to infrastructure management.
+
+Kubermatic Virtualization leverages Kubernetes-native management by unifying VM and container orchestration as it integrates virtual machines (VMs) directly into Kubernetes as native, first-class objects by managing, scaling, and deploying VMs using the same familiar Kubernetes tools, APIs, and workflows you already use for your containerized applications.
+## Features
+Kubermatic Virtualization offers a comprehensive set of features designed to modernize infrastructure and streamline operations by converging virtual machine and container management.
+
+### Streamlined Transition and Unified Control
+
+* Effortless Migration: Tools are provided to simplify the migration of existing VMs from diverse environments to the unified platform, making infrastructure modernization more accessible.
+* Centralized Operations: Gain single-pane-of-glass management for the entire lifecycle of both VMs and containers. This includes everything from creation, networking, and storage to scaling and monitoring, all accessible from a centralized interface or command-line tools.
+
+### Infrastructure Modernization and Efficiency
+
+* Gradual Modernization Path: Integrate VMs into a cloud-native environment, offering a practical pathway to modernize legacy applications without the immediate need for extensive refactoring into containers. You can run new containerized applications alongside existing virtualized ones.
+* Optimized Resource Use: By running VMs and containers on the same underlying physical infrastructure, organizations can achieve better hardware resource utilization and significantly reduce operational overhead.
+
+### Enhanced Development and Reliability
+
+* Improved Developer Experience: Developers can leverage familiar, native Kubernetes tools and workflows for managing both VMs and containers, which minimizes learning curves and speeds up development cycles.
+* Automated Workflows (CI/CD): Integrate VMs seamlessly into Kubernetes-native CI/CD pipelines, enabling automated testing and deployment processes.
+* Built-in Resilience: Benefit from the platform's inherent high availability and fault tolerance features, including automated restarts and live migration of VMs between nodes, ensuring continuous application uptime.
+* Integrated Networking and Storage: VMs natively use the platform's software-defined networking (SDN) and storage capabilities, providing consistent network policies, enhanced security, and streamlined storage management.
+
+See [kubermatic.com](https://www.kubermatic.com/).
diff --git a/content/kubermatic-virtualization/main/architecture/_index.en.md b/content/kubermatic-virtualization/main/architecture/_index.en.md
new file mode 100644
index 000000000..7f70fdd10
--- /dev/null
+++ b/content/kubermatic-virtualization/main/architecture/_index.en.md
@@ -0,0 +1,38 @@
++++
+title = "Architecture"
+date = 2025-07-18T16:06:34+02:00
+weight = 5
+
++++
+
+## Architecture Overview
+Kubermatic-Virtualization (Kube-V) is an advanced platform engineered to construct private cloud infrastructures founded
+entirely on Kubernetes. Its core design principle is the seamless integration of Kubernetes-native workloads (containers)
+and traditional virtualized workloads (Virtual Machines - VMs) under a unified management umbrella. Kube-V achieves this
+by building upon Kubernetes as its foundational layer and incorporating KubeVirt to orchestrate and manage VMs alongside
+containerized applications.
+
+Here's a breakdown of the architecture and how these components interact:
+### Host Nodes
+Host nodes can operate on any popular Linux-based operating system such as Ubuntu and RockyLinux where nested virtualization
+is enabled to run KVM based virtual machines.
+
+### Kubernetes
+The foundation, providing the orchestration, scheduling, and management plane for all workloads. In addition to introduce
+declarative API and custom resources (CRDs).
+
+### KubeVirt
+An extension to Kubernetes that enables running and managing VMs as native Kubernetes objects. It utilizes Kubernetes pods
+as the execution unit each running VM is encapsulated within a standard Kubernetes pod, specifically a virt-launcher pod.
+
+### OVN (Open Virtual Network)
+The network fabric, providing advanced SDN (Software-Defined Networking) capabilities for VMs and Pods, replacing or
+augmenting the default CNI (Container Network Interface). The network fabric introduces VPCs(Virtual Private Cloud) as
+an operational and isolated ecosystem, through subnets and network policies.
+
+### CSI Drivers
+A standardized interface that allows Kubernetes to connect to various storage systems, providing persistent storage for
+VMs and containers. Kube-V is agnostic about the storage of the underlying infrastructure where any CSI driver can be
+used to enabling dynamic provisioning, attachment, and management of persistent volumes for VMs and Pods.
+
+
\ No newline at end of file
diff --git a/content/kubermatic-virtualization/main/architecture/compatibility/_index.en.md b/content/kubermatic-virtualization/main/architecture/compatibility/_index.en.md
new file mode 100644
index 000000000..451b0ea62
--- /dev/null
+++ b/content/kubermatic-virtualization/main/architecture/compatibility/_index.en.md
@@ -0,0 +1,5 @@
++++
+title = "Compatibility"
+date = 2025-07-18T16:06:34+02:00
+weight = 5
++++
diff --git a/content/kubermatic-virtualization/main/architecture/compatibility/kubev-components-versioning/_index.en.md b/content/kubermatic-virtualization/main/architecture/compatibility/kubev-components-versioning/_index.en.md
new file mode 100644
index 000000000..3fc97b05b
--- /dev/null
+++ b/content/kubermatic-virtualization/main/architecture/compatibility/kubev-components-versioning/_index.en.md
@@ -0,0 +1,23 @@
++++
+title = "Kubermatic Virtualization Components"
+date = 2025-07-18T16:06:34+02:00
+weight = 5
++++
+
+The following list is only applicable for the Kube-V version that is currently available. Kubermatic has a strong emphasis
+on security and reliability of provided software and therefore releases updates regularly that also include component updates.
+
+
+| Kube-V Component | Version |
+|:---------------------------------:|:-------:|
+| Kubernetes | v1.33.0 |
+| KubeVirt | v1.5.2 |
+| Containerized Data Importer (CDI) | v1.62.0 |
+| KubeOVN | v1.13.2 |
+| KubeOne | v1.11.1 |
+| Kyverno | v1.14.4 |
+| Cert Manager | v1.18.2 |
+| MetalLB | v0.15.2 |
+| Multus CNI | v4.2.2 |
+| Longhorn | v1.9.1 |
+
diff --git a/content/kubermatic-virtualization/main/architecture/compatibility/operating-system/_index.en.md b/content/kubermatic-virtualization/main/architecture/compatibility/operating-system/_index.en.md
new file mode 100644
index 000000000..d4ea5d2a6
--- /dev/null
+++ b/content/kubermatic-virtualization/main/architecture/compatibility/operating-system/_index.en.md
@@ -0,0 +1,21 @@
++++
+title = "Operating Systems"
+date = 2025-07-18T16:06:34+02:00
+weight = 3
++++
+
+## Supported Operating Systems
+
+The following operating systems are supported:
+
+* Ubuntu 20.04 (Focal)
+* Ubuntu 22.04 (Jammy Jellyfish)
+* Ubuntu 24.04 (Noble Numbat)
+* Rocky Linux 8
+* RHEL 8.0, 8.1, 8.2, 8.3, 8.4
+* Flatcar
+
+{{% notice warning %}}
+The minimum kernel version for Kubernetes 1.32 clusters is 4.19. Some operating system versions, such as RHEL 8,
+do not meet this requirement and therefore do not support Kubernetes 1.32 or newer.
+{{% /notice %}}
\ No newline at end of file
diff --git a/content/kubermatic-virtualization/main/architecture/concepts/_index.en.md b/content/kubermatic-virtualization/main/architecture/concepts/_index.en.md
new file mode 100644
index 000000000..1ea21ecb8
--- /dev/null
+++ b/content/kubermatic-virtualization/main/architecture/concepts/_index.en.md
@@ -0,0 +1,7 @@
++++
+title = "Concepts"
+date = 2025-07-18T16:06:34+02:00
+weight = 1
++++
+
+Get to know the concepts behind Kubermatic Virtualization (KubeV).
diff --git a/content/kubermatic-virtualization/main/architecture/concepts/compute/_index.en.md b/content/kubermatic-virtualization/main/architecture/concepts/compute/_index.en.md
new file mode 100644
index 000000000..989f60e28
--- /dev/null
+++ b/content/kubermatic-virtualization/main/architecture/concepts/compute/_index.en.md
@@ -0,0 +1,5 @@
++++
+title = "Compute"
+date = 2025-07-18T16:06:34+02:00
+weight = 15
++++
diff --git a/content/kubermatic-virtualization/main/architecture/concepts/compute/virtual-machines/_index.en.md b/content/kubermatic-virtualization/main/architecture/concepts/compute/virtual-machines/_index.en.md
new file mode 100644
index 000000000..a912c3f48
--- /dev/null
+++ b/content/kubermatic-virtualization/main/architecture/concepts/compute/virtual-machines/_index.en.md
@@ -0,0 +1,241 @@
++++
+title = "VirtualMachines Resources"
+date = 2025-07-18T16:06:34+02:00
+weight = 15
++++
+
+## VirtualMachines
+As the name suggests, a VirtualMachine(VM) represents a long-running, stateful virtual machine. It's similar to a
+Kubernetes Deployment for Pods, meaning you define the desired state (e.g., "this VM should be running," "it should
+have 2 CPUs and 4GB RAM") and Kubermatic-Virtualization ensures that state is maintained. It allows you to start, stop, and configure VMs.
+
+Here is an example of how users can create a VM:
+```yaml
+apiVersion: kubevirt.io/v1
+kind: VirtualMachine
+metadata:
+ name: my-vm-with-http-data-volume
+spec:
+ runStrategy: RerunOnFailure
+ template:
+ metadata:
+ labels:
+ app: my-vm-with-http-data-volume
+ annotations:
+ kubevirt.io/allow-pod-bridge-network-live-migration: "true"
+ spec:
+ domain:
+ cpu:
+ cores: 1
+ memory:
+ guest: 2Gi
+ devices:
+ disks:
+ - name: rootdisk
+ disk:
+ bus: virtio
+ interfaces:
+ - name: default
+ masquerade: {}
+ volumes:
+ - name: rootdisk
+ dataVolume:
+ name: my-http-data-volume
+ networks:
+ - name: default
+ pod: {}
+ dataVolumeTemplates:
+ - metadata:
+ name: my-http-data-volume
+ spec:
+ sourceRef:
+ kind: DataSource
+ name: my-http-datasource
+ apiGroup: cdi.kubevirt.io
+ pvc:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 10Gi # <--- IMPORTANT: Adjust to your desired disk size
+ # storageClassName: my-storage-class # <--- OPTIONAL: Uncomment and replace with your StorageClass name if needed
+---
+apiVersion: cdi.kubevirt.io/v1beta1
+kind: DataSource
+metadata:
+ name: my-http-datasource
+spec:
+ source:
+ http:
+ url: "http://example.com/path/to/your/image.qcow2" # <--- IMPORTANT: Replace with the actual URL of your disk image
+ # certConfig: # <--- OPTIONAL: Uncomment and configure if your HTTP server uses a custom CA
+ # caBundle: "base64encodedCABundle"
+ # secretRef:
+ # name: "my-http-cert-secret"
+ # cert:
+ # secretRef:
+ # name: "my-http-cert-secret"
+ # key:
+ # secretRef:
+ # name: "my-http-key-secret"
+```
+### 1. `VirtualMachine` (apiVersion: `kubevirt.io/v1`)
+
+This is the main KubeVirt resource that defines your virtual machine.
+
+- **`spec.template.spec.domain.devices.disks`**:
+ Defines the disk attached to the VM. We reference `rootdisk` here, which is backed by our DataVolume.
+
+- **`spec.template.spec.volumes`**:
+ Links the `rootdisk` to a `dataVolume` named `my-http-data-volume`.
+
+- **`spec.dataVolumeTemplates`**:
+ This is the crucial part. It defines a template for a DataVolume that will be created automatically when the VM is started.
+
+---
+
+### 2. `DataVolumeTemplate` (within `VirtualMachine.spec.dataVolumeTemplates`)
+
+- **`metadata.name`**:
+ The name of the DataVolume that will be created (referenced in `spec.template.spec.volumes`).
+
+- **`spec.sourceRef`**:
+ Points to a `DataSource` resource that defines the actual source of the disk image. A `DataSource` is used here to encapsulate HTTP details.
+
+- **`spec.pvc`**:
+ Defines the characteristics of the PersistentVolumeClaim (PVC) that will be created for this DataVolume:
+
+ - **`accessModes`**: Typically `ReadWriteOnce` for VM disks.
+ - **`resources.requests.storage`**:
+ ⚠️ **Crucially, set this to the desired size of your VM's disk.** It should be at least as large as your source image.
+ - **`storageClassName`**: *(Optional)* Specify a StorageClass if needed; otherwise, the default will be used.
+
+---
+
+### 3. `DataSource` (apiVersion: `cdi.kubevirt.io/v1beta1`)
+
+This is a CDI (Containerized Data Importer) resource that encapsulates the details of where your disk image comes from.
+
+- **`metadata.name`**:
+ The name of the `DataSource` (referenced in `dataVolumeTemplate.spec.sourceRef`).
+
+- **`spec.source.http.url`**:
+ 🔗 This is where you put the direct URL to your disk image (e.g., a `.qcow2`, `.raw`, etc. file).
+
+- **`spec.source.http.certConfig`**: *(Optional)*
+ If your HTTP server uses a custom CA or requires client certificates, configure them here.
+
+---
+
+### VirtualMachinePools
+KubeVirt's VirtualMachinePool is a powerful resource that allows you to manage a group of identical Virtual Machines (VMs)
+as a single unit, similar to how a Kubernetes Deployment manages a set of Pods. It's designed for scenarios where you need
+multiple, consistent, and often ephemeral VMs that can scale up or down based on demand.
+
+Here's a breakdown of the key aspects of KubeVirt VirtualMachinePools:
+
+
+```yaml
+apiVersion: kubevirt.io/v1alpha1
+kind: VirtualMachinePool
+metadata:
+ name: my-vm-http-pool
+spec:
+ replicas: 3 # <--- IMPORTANT: Number of VMs in the pool
+ selector:
+ matchLabels:
+ app: my-vm-http-pool-member
+ virtualMachineTemplate:
+ metadata:
+ labels:
+ app: my-vm-http-pool-member
+ annotations:
+ kubevirt.io/allow-pod-bridge-network-live-migration: "true"
+ spec:
+ runStrategy: RerunOnFailure # Or Always, Halted, Manual
+ domain:
+ cpu:
+ cores: 1
+ memory:
+ guest: 2Gi
+ devices:
+ disks:
+ - name: rootdisk
+ disk:
+ bus: virtio
+ interfaces:
+ - name: default
+ masquerade: {}
+ volumes:
+ - name: rootdisk
+ dataVolume:
+ name: my-pool-vm-data-volume # This name will have a unique suffix appended by KubeVirt
+ networks:
+ - name: default
+ pod: {}
+ dataVolumeTemplates:
+ - metadata:
+ name: my-pool-vm-data-volume # This name will be the base for the unique DataVolume names
+ spec:
+ sourceRef:
+ kind: DataSource
+ name: my-http-datasource
+ apiGroup: cdi.kubevirt.io
+ pvc:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 10Gi # <--- IMPORTANT: Adjust to your desired disk size for each VM
+ # storageClassName: my-storage-class # <--- OPTIONAL: Uncomment and replace with your StorageClass name if needed
+---
+apiVersion: cdi.kubevirt.io/v1beta1
+kind: DataSource
+metadata:
+ name: my-http-datasource
+spec:
+ source:
+ http:
+ url: "http://example.com/path/to/your/image.qcow2" # <--- IMPORTANT: Replace with the actual URL of your disk image
+ # certConfig: # <--- OPTIONAL: Uncomment and configure if your HTTP server uses a custom CA
+ # caBundle: "base64encodedCABundle"
+ # secretRef:
+ # name: "my-http-cert-secret"
+ # cert:
+ # secretRef:
+ # name: "my-http-cert-secret"
+ # key:
+ # secretRef:
+ # name: "my-http-key-secret"
+
+```
+### VirtualMachinePool (apiVersion: `kubevirt.io/v1alpha1`)
+
+1. **`API Version`**
+ - Use `apiVersion: kubevirt.io/v1alpha1` for `VirtualMachinePool`.
+ - This is a slightly different API version than `VirtualMachine`.
+
+2. **`spec.replicas`**
+ - Specifies how many `VirtualMachine` instances the pool should maintain.
+
+3. **`spec.selector`**
+ - Essential for the `VirtualMachinePool` controller to manage its VMs.
+ - `matchLabels` must correspond to the `metadata.labels` within `virtualMachineTemplate`.
+
+4. **spec.virtualMachineTemplate**
+ - This section contains the full `VirtualMachine` spec that serves as the template for each VM in the pool.
+
+5. **`dataVolumeTemplates` Naming in a Pool**
+ - `VirtualMachinePool` creates `DataVolumes` from `dataVolumeTemplates`.
+ - A unique suffix is appended to the `metadata.name` of each `DataVolume` (e.g., `my-pool-vm-data-volume-abcde`), ensuring each VM gets a distinct PVC.
+
+---
+
+### How It Works (Similar to Deployment for Pods)
+
+1. Apply the `VirtualMachinePool` manifest. KubeVirt ensures the `my-http-datasource` `DataSource` exists.
+2. The `VirtualMachinePool` controller creates the defined number of `VirtualMachine` replicas.
+3. Each `VirtualMachine` triggers the creation of a `DataVolume` using the specified `dataVolumeTemplate` and `my-http-datasource`.
+4. CDI (Containerized Data Importer) downloads the image into a new unique `PersistentVolumeClaim` (PVC) for each VM.
+5. Each `VirtualMachine` then starts using its dedicated PVC.
+
diff --git a/content/kubermatic-virtualization/main/architecture/concepts/networks/_index.en.md b/content/kubermatic-virtualization/main/architecture/concepts/networks/_index.en.md
new file mode 100644
index 000000000..662b0d8ee
--- /dev/null
+++ b/content/kubermatic-virtualization/main/architecture/concepts/networks/_index.en.md
@@ -0,0 +1,5 @@
++++
+title = "Networking"
+date = 2025-07-18T16:06:34+02:00
+weight = 15
++++
diff --git a/content/kubermatic-virtualization/main/architecture/concepts/networks/vms-networks-assignment/_index.en.md b/content/kubermatic-virtualization/main/architecture/concepts/networks/vms-networks-assignment/_index.en.md
new file mode 100644
index 000000000..99285fd7d
--- /dev/null
+++ b/content/kubermatic-virtualization/main/architecture/concepts/networks/vms-networks-assignment/_index.en.md
@@ -0,0 +1,159 @@
++++
+title = "VMs Network Assignment"
+date = 2025-07-18T16:06:34+02:00
+weight = 15
++++
+
+Assigning a Virtual Machine (VM) to a VPC and Subnet typically involves integrating VM’s network interface using
+Multus CNI with a Kube-OVN network attachment definition (NAD). Assigning a Virtual Machine (VM) to a VPC and
+Subnet involves a few key steps:
+
+### 1. Define or use an existing VPC:
+
+If you require isolated network spaces for different tenants or environments, you'll first define a Vpc resource.
+This acts as a logical router for your Subnets.
+```yaml
+apiVersion: kubeovn.io/v1
+kind: Vpc
+metadata:
+ name: my-vpc # Name of your VPC
+spec:
+ # Optional: You can specify which namespaces are allowed to use this VPC.
+ # If left empty, all namespaces can use it.
+ # namespaces:
+ # - my-namespace
+ # - my-namespace-1
+```
+---
+
+### 2. Define or use an existing Subnet:
+
+Next, you create a Subnet resource, associating it with your Vpc (or the default ovn-cluster VPC if you're not using a
+custom VPC). You also define the CIDR range and, crucially, the Namespaces that will use this Subnet.
+```yaml
+apiVersion: kubeovn.io/v1
+kind: Subnet
+metadata:
+ name: my-vm-subnet # Name of your Subnet
+spec:
+ # Associate this subnet with your VPC. If omitted, it defaults to 'ovn-cluster'.
+ vpc: my-vpc
+ cidrBlock: 10.10.0.0/24 # The IP range for this subnet
+ gateway: 10.10.0.1 # The gateway IP for this subnet (Kube-OVN often sets this automatically)
+ namespaces:
+ - vm-namespace # The Namespace where your VMs will reside
+```
+
+---
+### 3. Create a Kubernetes Namespace (if it doesn't exist):
+
+Ensure the Namespace you defined in your Subnet exists.
+```yaml
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: vm-namespace
+```
+
+---
+
+### 4. Define a NetworkAttachmentDefinition:
+
+While Kube-OVN can work directly by binding a Namespace to a Subnet, using a NetworkAttachmentDefinition (NAD) with
+Multus provides more explicit control, especially if your VM needs multiple network interfaces or a specific CNI configuration.
+
+```yaml
+apiVersion: k8s.cni.cncf.io/v1
+kind: NetworkAttachmentDefinition
+metadata:
+ name: vm-network # Name of the NAD
+ namespace: vm-namespace # Must be in the same namespace as the VMs using it
+spec:
+ config: |
+ {
+ "cniVersion": "0.3.1",
+ "name": "vm-network",
+ "type": "kube-ovn",
+ "server_socker": "/run/openvswitch/kube-ovn-daemon.sock",
+ "netAttachDefName": "vm-namespace/vm-network"
+ }
+```
+{{% notice note %}}
+Note: For a VM to automatically pick up the correct Subnet via the Namespace binding, you often don't strictly
+need a `NetworkAttachmentDefinition` for the primary interface if the Namespace is directly linked to the Subnet. However,
+it's crucial for secondary interfaces or explicit network definitions.
+{{% /notice %}}
+
+---
+
+### 5. Assign the KubeVirt Virtual Machine to the Subnet/VPC:
+
+When defining your `VirtualMachine` (or `VirtualMachinePool`), you ensure it's created in the `vm-namespace` that is
+bound to your `my-vm-subnet`.
+
+#### Option 1: Relying on Namespace-Subnet Binding (Simplest)
+
+If your `vm-namespace` is explicitly listed in the `spec.namespaces` of `my-vm-subnet`, any `VM` (or `Pod`) created in
+`vm-namespace` will automatically get an IP from `my-vm-subnet`.
+
+#### Option 2: Explicitly Specifying the Subnet/NAD via Annotations (For Multiple NICs or Specificity)
+
+If you're using a `NetworkAttachmentDefinition` (`NAD`) or need to explicitly control which subnet is used, especially
+for secondary interfaces, you'd use Multus annotations on your `VM` definition.
+
+```yaml
+apiVersion: kubevirt.io/v1
+kind: VirtualMachine
+metadata:
+ name: my-kubeovn-vm-multus
+ namespace: vm-namespace
+ annotations:
+ # Reference the NetworkAttachmentDefinition for the primary interface
+ # The format is /
+ k8s.v1.cni.cncf.io/networks: vm-network
+ # Optional: For static IP assignment from the subnet
+ # ovn.kubernetes.io/ip_address: 10.10.0.10
+spec:
+ runStrategy: Always
+ template:
+ spec:
+ domain:
+ devices:
+ disks:
+ - name: containerdisk
+ disk:
+ bus: virtio
+ - name: cloudinitdisk
+ disk:
+ bus: virtio
+ interfaces:
+ - name: primary-nic
+ # This interface will use the network defined by the NAD
+ bridge: {} # Or masquerade: {}
+ # Example for a secondary NIC on a different Kube-OVN Subnet/NAD
+ # - name: secondary-nic
+ # bridge: {}
+ resources:
+ requests:
+ memory: 2Gi
+ volumes:
+ - name: containerdisk
+ containerDisk:
+ image: kubevirt/fedora-cloud-container-disk-demo
+ - name: cloudinitdisk
+ cloudInitNoCloud:
+ userData: |
+ #cloud-config
+
+```
+Important Kube-OVN Annotations for VMs/Pods:
+
+- `ovn.kubernetes.io/logical_switch`: Explicitly assigns the workload to a specific Kube-OVN logical switch (which
+corresponds to a Subnet). This overrides the Namespace's default subnet.
+
+- `ovn.kubernetes.io/ip_address`: Assigns a specific static IP address from the subnet. Make sure this IP is excluded from
+the subnet's dynamic IP range (excludeIps in the Subnet definition) to avoid conflicts.
+
+- `ovn.kubernetes.io/network_attachment`: When using Multus, this annotation on the `NetworkAttachmentDefinition`'s config
+can specify the Kube-OVN provider or other details if you have multiple Kube-OVN deployments or specific requirements.
+
diff --git a/content/kubermatic-virtualization/main/architecture/concepts/networks/vpc-subnets/_index.en.md b/content/kubermatic-virtualization/main/architecture/concepts/networks/vpc-subnets/_index.en.md
new file mode 100644
index 000000000..9707271e9
--- /dev/null
+++ b/content/kubermatic-virtualization/main/architecture/concepts/networks/vpc-subnets/_index.en.md
@@ -0,0 +1,107 @@
++++
+title = "Networking"
+date = 2025-07-18T16:06:34+02:00
+weight = 15
++++
+Kubermatic-Virtualization uses KubeOVN as a software defined network(SDN) and it supercharges Kubernetes networking by
+integrating it with Open Virtual Network (OVN) and Open vSwitch (OVS). These aren't new players; OVN and OVS are long-standing,
+industry-standard technologies in the Software-Defined Networking (SDN) space, predating Kubernetes itself. By leveraging
+their robust, mature capabilities, Kube-OVN significantly expands what Kubernetes can do with its network.
+
+## VPC
+A VPC (Virtual Private Cloud) in Kube-OVN represents an isolated layer-3 network domain that contains one or more subnets.
+Each VPC provides its own routing table and default gateway, allowing you to logically separate network traffic between
+tenants or workloads.
+
+Kubermatic Virtualization simplifies network setup by providing a default Virtual Private Cloud (VPC) and a default Subnet
+right out of the box. These are pre-configured to connect directly to the underlying node network, offering a seamless link
+to your existing infrastructure. This means you don't need to attach external networks to get started.
+
+This design is a huge win for new users. It allows customers to dive into Kubermatic Virtualization and quickly establish
+network connectivity between their workloads and the hypervisor without wrestling with complex network configurations,
+external appliances, or advanced networking concepts. It's all about making the initial experience as straightforward
+and efficient as possible, letting you focus on your applications rather than network plumbing.
+
+
+Here is an example of a VPC definition:
+```yaml
+apiVersion: kubeovn.io/v1
+kind: Vpc
+metadata:
+ name: custom-vpc
+spec:
+ cidr: 10.200.0.0/16
+ enableNAT: false
+ defaultGateway: ""
+ staticRoutes:
+ - cidr: 0.0.0.0/0
+ nextHopIP: 10.200.0.1
+```
+
+| Field | Description |
+| ---------------- | --------------------------------------------------------------------------------------- |
+| `metadata.name` | Name of the VPC. Must be unique within the cluster. |
+| `spec.cidr` | The overall IP range for the VPC. Subnets under this VPC should fall within this range. |
+| `enableNAT` | Whether to enable NAT for outbound traffic. Useful for internet access. |
+| `defaultGateway` | IP address used as the default gateway for this VPC. Usually left blank for automatic. |
+| `staticRoutes` | List of manually defined routes for the VPC. |
+
+## Subnet
+
+Subnets are the fundamental building blocks for network and IP management. They serve as the primary organizational unit
+for configuring network settings and IP addresses.
+
+- Namespace-Centric: Each Kubernetes Namespace can be assigned to a specific Subnet.
+- Automatic IP Allocation: Pods deployed within a Namespace automatically receive their IP addresses from the Subnet that
+Namespace is associated with.
+- Shared Network Configuration: All Pods within a Namespace inherit the network configuration defined by their Subnet. This includes:
+ - CIDR (Classless Inter-Domain Routing): The IP address range for the Subnet.
+ - Gateway Type: How traffic leaves the Subnet.
+ - Access Control: Network policies and security rules.
+ - NAT Control: Network Address Translation settings.
+
+Here is an example of a VPC definition:
+```yaml
+apiVersion: kubeovn.io/v1
+kind: Subnet
+metadata:
+ name: my-custom-subnet
+ namespace: kube-system
+spec:
+ cidrBlock: 10.16.0.0/16
+ gateway: 10.16.0.1
+ gatewayType: distributed
+ excludeIps:
+ - 10.16.0.1
+ - 10.16.0.2..10.16.0.10
+ protocol: IPv4
+ natOutgoing: true
+ private: false
+ vpc: custom-vpc
+ enableDHCP: true
+ allowSubnets: []
+ vlan: ""
+ namespaces:
+ - default
+ - dev
+ subnetType: overlay
+```
+| Field | Description |
+|----------------------|---------------------------------------------------------------------------------------|
+| `apiVersion` | Must be `kubeovn.io/v1`. |
+| `kind` | Always set to `Subnet`. |
+| `metadata.name` | Unique name for the subnet resource. |
+| `metadata.namespace` | Namespace where the subnet object resides. Usually `kube-system`. |
+| `spec.cidrBlock` | The IP range (CIDR notation) assigned to this subnet. |
+| `spec.gateway` | IP address used as the gateway for this subnet. |
+| `spec.gatewayType` | `centralized` or `distributed`. `distributed` allows egress from local node gateways. |
+| `spec.excludeIps` | IPs or IP ranges excluded from dynamic allocation. |
+| `spec.protocol` | Can be `IPv4`, `IPv6`, or `Dual`. |
+| `spec.natOutgoing` | If true, pods using this subnet will have outbound NAT enabled. |
+| `spec.private` | If true, pod traffic is restricted to this subnet only. |
+| `spec.vpc` | Is the name of the VPC that the subnet belongs to. |
+| `spec.enableDHCP` | Enables DHCP services in the subnet. |
+| `spec.allowSubnets` | List of subnets allowed to communicate with this one (used with private=true). |
+| `spec.vlan` | Optional VLAN name (empty string means no VLAN). |
+| `spec.namespaces` | Namespaces whose pods will be assigned IPs from this subnet. |
+| `spec.subnetType` | Can be `overlay`, `underlay`, `VLAN`, or `external`. |
diff --git a/content/kubermatic-virtualization/main/architecture/concepts/storage/_index.en.md b/content/kubermatic-virtualization/main/architecture/concepts/storage/_index.en.md
new file mode 100644
index 000000000..760102919
--- /dev/null
+++ b/content/kubermatic-virtualization/main/architecture/concepts/storage/_index.en.md
@@ -0,0 +1,133 @@
++++
+title = "Storage"
+date = 2025-07-18T16:06:34+02:00
+weight = 15
++++
+
+At its heart, Kubermatic Virtualization uses KubeVirt, a Kubernetes add-on. KubeVirt allows you to run virtual machines
+(VMs) right alongside your containers, and it's built to heavily use Kubernetes' existing storage model. The Container
+Storage Interface (CSI) driver is a crucial component in this setup because it allows KubeVirt to leverage the vast and
+diverse storage ecosystem of Kubernetes for its VMs.
+
+The Container Storage Interface (CSI) is a standard for exposing arbitrary block and file storage systems to containerized
+workloads on Container Orchestration Systems (COs) like Kubernetes. Before CSI, storage integrations were tightly coupled
+with Kubernetes' core code. CSI revolutionized this by providing a pluggable architecture, allowing storage vendors to
+develop drivers that can integrate with Kubernetes without modifying Kubernetes itself.
+
+# KubeVirt + CSI Drivers: How It Works
+
+KubeVirt’s integration with CSI (Container Storage Interface) drivers is fundamental to how it manages VM storage. This document explains how CSI enables dynamic volume provisioning, image importing, and advanced VM disk features in KubeVirt.
+
+---
+
+## 1. Dynamic Volume Provisioning for VM Disks
+
+### PersistentVolumeClaims (PVCs)
+KubeVirt does not directly interact with the underlying storage backend (e.g., SAN, NAS, cloud block storage). Instead, it uses Kubernetes’ PVC abstraction. When a VM is defined, KubeVirt requests a PVC.
+
+### StorageClasses
+PVCs reference a `StorageClass`, which is configured to use a specific CSI driver as its "provisioner".
+
+### Driver’s Role
+The CSI driver associated with the `StorageClass` handles the provisioning of persistent storage by interfacing with external systems (e.g., vCenter, Ceph, cloud providers).
+
+### VM Disk Attachment
+Once the PV is bound, KubeVirt uses the `virt-launcher` pod to attach the volume as a virtual disk to the VM.
+
+---
+
+## 2. Containerized Data Importer (CDI) Integration
+
+### Importing VM Images
+KubeVirt works with the CDI project to import disk images (e.g., `.qcow2`, `.raw`) from HTTP, S3, and other sources into PVCs.
+
+### CSI Uses CSI
+CDI relies on CSI drivers to provision the PVCs that will store the imported images. After import, KubeVirt consumes the PVC as a disk.
+
+### DataVolume Resource
+KubeVirt’s `DataVolume` custom resource simplifies image importing and ties CDI with PVC creation in a declarative way.
+
+---
+
+## 3. Advanced Storage Features (via CSI Capabilities)
+
+CSI drivers allow powerful features previously complex for VM setups:
+
+- **Snapshots**: If supported, KubeVirt can create `VolumeSnapshot` objects for point-in-time backups.
+- **Cloning**: Allows fast provisioning of VM disks from existing PVCs without re-importing.
+- **Volume Expansion**: Resize VM disks dynamically with `allowVolumeExpansion`.
+- **ReadWriteMany (RWX) Mode**: Enables live migration by allowing shared access across nodes.
+- **Block vs. Filesystem Modes**: CSI supports both `Filesystem` and `Block`. Choose based on workload performance needs.
+
+---
+
+## 4. Example Scenario
+Admin creates a `StorageClass`:
+```yaml
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: my-fast-storage
+provisioner: csi.my-storage-vendor.com # This points to the specific CSI driver
+parameters:
+ type: "ssd"
+volumeBindingMode: WaitForFirstConsumer # Important for VM scheduling
+allowVolumeExpansion: true
+```
+User defines a `VirtualMachine` with a `DataVolume`:
+```yaml
+apiVersion: kubevirt.io/v1
+kind: VirtualMachine
+metadata:
+ name: my-vm
+spec:
+ dataVolumeTemplates:
+ - metadata:
+ name: my-vm-disk
+ spec:
+ storageClassName: my-fast-storage # References the StorageClass
+ source:
+ http:
+ url: "http://example.com/my-vm-image.qcow2"
+ pvc:
+ accessModes:
+ - ReadWriteOnce # Or ReadWriteMany for live migration
+ resources:
+ requests:
+ storage: 20Gi
+ template:
+ spec:
+ domain:
+ devices:
+ disks:
+ - name: my-vm-disk
+ disk:
+ bus: virtio
+ # ... other VM specs
+ volumes:
+ - name: my-vm-disk
+ dataVolume:
+ name: my-vm-disk
+```
+In this flow:
+
+- KubeVirt sees the DataVolumeTemplate and requests a PVC (my-vm-disk) using my-fast-storage.
+
+- The my-fast-storage StorageClass directs the request to csi.my-storage-vendor.com (the CSI driver).
+
+- The CSI driver provisions a 20Gi volume on the backend storage.
+
+- CDI then imports my-vm-image.qcow2 into this newly provisioned PVC.
+
+- Once the data import is complete, KubeVirt starts the VM, and the PVC is attached as the VM's disk.
+
+---
+
+## Summary
+
+KubeVirt uses CSI to:
+- Abstract storage provisioning and attachment.
+- Enable features like cloning, snapshots, and expansion.
+- Import images using CDI with CSI-provisioned PVCs.
+- Support enterprise-grade live migration and scalability.
+
diff --git a/content/kubermatic-virtualization/main/architecture/kube-v-architecture.png b/content/kubermatic-virtualization/main/architecture/kube-v-architecture.png
new file mode 100644
index 000000000..f20425cf9
Binary files /dev/null and b/content/kubermatic-virtualization/main/architecture/kube-v-architecture.png differ
diff --git a/content/kubermatic-virtualization/main/architecture/requirements/_index.en.md b/content/kubermatic-virtualization/main/architecture/requirements/_index.en.md
new file mode 100644
index 000000000..76d2e4549
--- /dev/null
+++ b/content/kubermatic-virtualization/main/architecture/requirements/_index.en.md
@@ -0,0 +1,7 @@
++++
+title = "Requirements"
+date = 2025-06-28T12:07:15+02:00
+weight = 3
++++
+
+Find out about the requirements for the operation of Kubermatic Virtualization (Kube-V).
diff --git a/content/kubermatic-virtualization/main/architecture/requirements/high-availability-deployment/_index.en.md b/content/kubermatic-virtualization/main/architecture/requirements/high-availability-deployment/_index.en.md
new file mode 100644
index 000000000..4185212fc
--- /dev/null
+++ b/content/kubermatic-virtualization/main/architecture/requirements/high-availability-deployment/_index.en.md
@@ -0,0 +1,82 @@
++++
+title = "High-Availability Deployment"
+date = 2025-06-28T12:07:15+02:00
+weight = 15
++++
+
+## High-Availability Deployment
+
+The hardware foundation for Kubermatic Virtualization is multi-faceted, encompassing requirements for the Kubermatic
+Virtualization (Kube-V) management layer, the KubeVirt infrastructure nodes that host virtual machines, in addition to
+various services that are running as part of the ecosystem.
+
+### Control Plane Nodes
+
+* Nodes: Minimum 3 control plane nodes to ensure a quorum for etcd (Kubernetes' key-value store) and prevent a single point of failure.
+These should ideally be distributed across different failure domains (e.g., availability zones, racks).
+* CPU: At least 2 vCPUs per control plane node.
+* RAM: At least 4 GB RAM per control plane node. Recommended: 8-16 GB for robust performance.
+* Storage: Fast, persistent storage for etcd (SSD-backed recommended) with sufficient capacity.
+
+### Worker Nodes
+
+* Minimum 2 worker nodes (for KubeVirt VMs): For HA, you need more than one node to run VMs. This allows for live migration
+and VM rescheduling in case of a node failure.
+* CPU: A minimum of 8 CPU cores per node is suggested for testing environments. For production deployments, 16 CPU cores
+or more per node are recommended to accommodate multiple VMs and their workloads effectively. Each worker node must have
+Intel VT-x or AMD-V hardware virtualization extensions enabled in the BIOS/UEFI.
+This is a fundamental requirement for KubeVirt to leverage KVM (Kernel-based Virtual Machine) for efficient VM execution.
+Without this, KubeVirt can fall back to software emulation, but it's significantly slower and not suitable for production HA.
+* RAM: At least 8 GB RAM per node. Recommended: 16-32 GB, depending on the number and memory requirements of your VMs.
+* Storage: SSDs or NVMe drives are highly recommended for good VM performance in addition to sufficient storage capacity
+based on the disk images of your VMs and any data they store.
+
+### Storage
+
+* CSI Driver Capabilities (Crucial for HA/Live Migration): This is perhaps the most critical component for KubeVirt HA and live migration.
+ You need a shared storage backend that supports ReadWriteMany (RWX) access mode or Block-mode (volumeMode: Block) volumes.
+* Capacity: Sufficient storage capacity based on the disk images of your VMs and any data they store.
+* Performance: SSDs or NVMe drives are highly recommended for good VM performance where high-throughput services,
+ low-latency, high-IOPS storage (often block storage) is critical.
+* Replication and Redundancy: To achieve HA, data must be replicated across multiple nodes or availability zones.
+ If a node fails, the data should still be accessible from another.
+
+### Networking
+
+A well-planned and correctly configured network infrastructure is fundamental to the stability and performance of
+Kubermatic Virtualization. This includes considerations for IP addressing, DNS, load balancing, and inter-component communication.
+
+* High-bandwidth, low-latency connections: 1 Gbps NICs are a minimum; 10 Gbps or higher is recommended for performance-sensitive
+workloads and efficient live migration.
+* Load Balancing: External/internal load balancers for distributing traffic across control planes and worker nodes.
+* Dedicated network for live migration (recommended): While not strictly minimal, a dedicated Multus network for live
+migration can significantly reduce network saturation on tenant workloads during migrations.
+* Connectivity: Full and unrestricted network connectivity is paramount between all host nodes. Firewalls and security
+groups must be configured to permit all necessary Kubernetes control plane traffic, KubeVirt communication, and KubeV-specific
+inter-cluster communication.
+* DNS: DNS resolution is crucial for the Kube-V environment, enabling all nodes to find each other and external services.
+A potential conflict can arise if both the KubeVirt infrastructure and guest user clusters
+use NodeLocal DNSCache with the same default IP address, leading to DNS resolution issues for guest VMs. This can be
+mitigated by adjusting the dnsConfig and dnsPolicy of the guest VMs.
+
+
+| Component | Port(s) | Protocol | Direction | Purpose |
+|:------------------:| :------------------: | :------: | :----------: | :-----------------------------------------------------: |
+| API Server | 6443 | TCP | Inbound | All API communication with the cluster |
+| etcd | 2379-2380 | TCP | Inbound | etcd database communication |
+| Kubelet | 10250 | TCP | Inbound | Kubelet API for control plane communication |
+| Kube-Scheduler | 10259 | TCP | Inbound | Kube-Scheduler component |
+| Controller-Manager | 10257 | TCP | Inbound | Kube-Controller-Manager component |
+| Kube-Proxy | 10256 | TCP | Inbound | Kube-Proxy health checks and service routing |
+| NodePort Services | 30000-32767 | TCP/UDP | Inbound | Default range for exposing services on node IPs |
+| KubeVirt API | 8443 | TCP | Internal | KubeVirt API communication |
+| Live Migration | 61000-61009 (approx) | TCP | Node-to-Node | For migrating VM state between nodes |
+| OVN NB DB | 6641 | TCP | Internal | OVN Northbound Database |
+| OVN SB DB | 6642 | TCP | Internal | OVN Southbound Database |
+| OVN Northd | 6643 | TCP | Internal | OVN Northd process |
+| OVN Raft | 6644 | TCP | Internal | OVN Raft consensus (for HA OVN DBs) |
+| Geneve Tunnel | 6081 | UDP | Node-to-Node | Default overlay network for pod communication (OVN) |
+| OVN Controller | 10660 | TCP | Internal | Metrics for OVN Controller |
+| OVN Daemon | 10665 | TCP | Internal | Metrics for OVN Daemon (on each node) |
+| OVN Monitor | 10661 | TCP | Internal | Metrics for OVN Monitor |
+
diff --git a/content/kubermatic-virtualization/main/architecture/requirements/single-node-deployment/_index.en.md b/content/kubermatic-virtualization/main/architecture/requirements/single-node-deployment/_index.en.md
new file mode 100644
index 000000000..b9786ee3a
--- /dev/null
+++ b/content/kubermatic-virtualization/main/architecture/requirements/single-node-deployment/_index.en.md
@@ -0,0 +1,64 @@
++++
+title = "Single Node Deployment"
+date = 2025-06-28T12:07:15+02:00
+weight = 15
++++
+
+## Single Node Deployment
+
+The hardware foundation for Kubermatic Virtualization is multi-faceted, encompassing requirements for the Kubermatic
+Virtualization (Kube-V) management layer, the KubeVirt infrastructure node that host virtual machines, in addition to
+various services that are running as part of the ecosystem.
+
+### Host Configuration
+
+* CPU: A minimum of 8 CPU cores is suggested for testing environments. For production deployments, 16 CPU cores
+ or more are recommended to accommodate multiple VMs and their workloads effectively. The host node must have
+ Intel VT-x or AMD-V hardware virtualization extensions enabled in the BIOS/UEFI.
+ This is a fundamental requirement for KubeVirt to leverage KVM (Kernel-based Virtual Machine) for efficient VM execution.
+ Without this, KubeVirt can fall back to software emulation, but it's significantly slower and not suitable for production HA.
+* RAM: At least 8 GB RAM per node. Recommended: 16-32 GB, depending on the number and memory requirements of your VMs.
+* Storage: SSDs or NVMe drives are highly recommended for good VM performance in addition to sufficient storage capacity
+ based on the disk images of your VMs and any data they store.
+
+### Storage
+
+* CSI Driver Capabilities (Crucial for HA/Live Migration): This is perhaps the most critical component for KubeVirt.
+ You need a shared storage backend that supports ReadWriteMany (RWX) access mode or Block-mode (volumeMode: Block) volumes.
+* Capacity: Sufficient storage capacity based on the disk images of your VMs and any data they store.
+* Performance: SSDs or NVMe drives are highly recommended for good VM performance where high-throughput services,
+ low-latency, high-IOPS storage (often block storage) is critical.
+
+### Networking
+
+A well-planned and correctly configured network infrastructure is fundamental to the stability and performance of
+Kubermatic Virtualization. This includes considerations for IP addressing, DNS, load balancing, and inter-component communication.
+
+* High-bandwidth, low-latency connections: 1 Gbps NICs are a minimum; 10 Gbps or higher is recommended for performance-sensitive
+ workloads.
+* DNS: DNS resolution is crucial for the Kube-V environment, enabling all nodes to find each other and external services.
+ A potential conflict can arise if both the KubeVirt infrastructure and guest user clusters
+ use NodeLocal DNSCache with the same default IP address, leading to DNS resolution issues for guest VMs. This can be
+ mitigated by adjusting the dnsConfig and dnsPolicy of the guest VMs.
+
+
+| Component | Port(s) | Protocol | Direction | Purpose |
+|:------------------:| :------------------: | :------: | :----------: | :-----------------------------------------------------: |
+| API Server | 6443 | TCP | Inbound | All API communication with the cluster |
+| etcd | 2379-2380 | TCP | Inbound | etcd database communication |
+| Kubelet | 10250 | TCP | Inbound | Kubelet API for control plane communication |
+| Kube-Scheduler | 10259 | TCP | Inbound | Kube-Scheduler component |
+| Controller-Manager | 10257 | TCP | Inbound | Kube-Controller-Manager component |
+| Kube-Proxy | 10256 | TCP | Inbound | Kube-Proxy health checks and service routing |
+| NodePort Services | 30000-32767 | TCP/UDP | Inbound | Default range for exposing services on node IPs |
+| KubeVirt API | 8443 | TCP | Internal | KubeVirt API communication |
+| Live Migration | 61000-61009 (approx) | TCP | Node-to-Node | For migrating VM state between nodes |
+| OVN NB DB | 6641 | TCP | Internal | OVN Northbound Database |
+| OVN SB DB | 6642 | TCP | Internal | OVN Southbound Database |
+| OVN Northd | 6643 | TCP | Internal | OVN Northd process |
+| OVN Raft | 6644 | TCP | Internal | OVN Raft consensus (for HA OVN DBs) |
+| Geneve Tunnel | 6081 | UDP | Node-to-Node | Default overlay network for pod communication (OVN) |
+| OVN Controller | 10660 | TCP | Internal | Metrics for OVN Controller |
+| OVN Daemon | 10665 | TCP | Internal | Metrics for OVN Daemon (on each node) |
+| OVN Monitor | 10661 | TCP | Internal | Metrics for OVN Monitor |
+
diff --git a/content/kubermatic-virtualization/main/installation/_index.en.md b/content/kubermatic-virtualization/main/installation/_index.en.md
new file mode 100644
index 000000000..87bf41899
--- /dev/null
+++ b/content/kubermatic-virtualization/main/installation/_index.en.md
@@ -0,0 +1,161 @@
++++
+title = "Installation"
+date = 2025-06-28T12:07:15+02:00
+weight = 15
++++
+
+This chapter offers guidance on how to install Kubermatic Virtualization
+
+## Installing Kubermatic Virtualization with CLI
+
+Kubermatic Virtualization comes with an interactive installer, a CLI tool that helps administrators and users provision the entire platform easily. With just a few inputs, you can deploy and configure the stack in no time.
+
+{{% notice note %}}
+To get started with the CLI, you will first need a Kubermatic Virtualization license. This license grants you access to the necessary resources, including the CLI. Please [contact sales](mailto:sales@kubermatic.com) to obtain your license.
+{{% /notice %}}
+
+### **1. Navigating the Interactive CLI Installer**
+
+The Kubermatic Virtualization installer is an interactive CLI that guides you through the installation process with clear instructions and prompts for user input. Each page contains important information and features a help bar at the bottom to assist with navigation.
+
+
+
+---
+
+### **2. Configuring the Network Stack**
+
+One of the foundational steps in setting up Kubermatic Virtualization is defining the network configuration. This step ensures that your virtual machines and containerized workloads have a dedicated IP range to operate within, similar to a default VPC. Proper network configuration is crucial for seamless communication and resource management.
+
+
+#### **Key Components**
+- **Network (CIDR)**: Specify the IP range where your virtual machines and containerized workloads will reside. This defines the subnet they will use by default.
+- **DNS Server**: Provide the DNS server address to ensure proper name resolution for your workloads.
+- **Gateway IP**: Define the gateway IP to facilitate network routing and connectivity.
+
+{{% notice warning %}}
+
+When setting up your network configuration, it is crucial to provide a working DNS server address to ensure proper name resolution for your virtual machines and containerized workloads, as failure to do so can result in issues like the inability to resolve domain names, failed connections to external services, or degraded functionality due to unresolved hostnames; if you do not have an internal DNS server configured, it is recommended to use a public and trusted DNS server such as Google Public DNS (`8.8.8.8` or `8.8.4.4`) or Cloudflare DNS (`1.1.1.1` or `1.0.0.1`) to ensure smooth operation and connectivity for your workloads.
+
+{{% /notice %}}
+
+
+
+
+---
+
+### **3. Configuring the Load Balancer Service**
+
+In this step, you can enable Kubermatic Virtualization’s default Load Balancer service, **MetalLB**, to simplify the creation of load balancers for your workloads. MetalLB is ideal for evaluation and non-production environments but should not be used in production scenarios without proper enterprise-grade solutions.
+
+#### **Key Steps**
+- **Enable MetalLB**: Toggle the checkbox to enable the Load Balancer service.
+- **Define IP Range**: If MetalLB is enabled, specify the IP range that will be used by the Load Balancer.
+
+{{% notice warning %}}
+
+When defining the IP range for MetalLB, ensure that the range is valid and exists within your network infrastructure. This range will be reserved for load balancer IP addresses, and any misconfiguration or overlap with existing IPs can lead to network conflicts, service disruptions, or inaccessible workloads. Always verify the availability and uniqueness of the IP range before proceeding.
+
+{{% /notice %}}
+
+
+
+By configuring these settings, you ensure that your workloads have access to a simple yet effective Load Balancer solution for testing and development purposes. Let’s move on to the next configuration step!
+
+---
+
+### **4. Configuring Nodes**
+
+In this section, you will define the number of nodes in your Kubermatic Virtualization cluster and provide detailed information about each node, including their IP addresses, usernames, and SSH key paths. Accurate configuration is crucial for ensuring smooth communication and management of your cluster.
+
+#### **Step 1: Specify the Number of Nodes**
+
+The first step is to determine how many nodes you want in your cluster. This number will dictate the scale of your infrastructure and influence the subsequent configuration steps.
+
+- **Input**: Enter the total number of nodes you plan to use.
+
+
+
+
+#### **Step 2: Configure Each Node**
+
+After specifying the number of nodes, you will be prompted to configure each node individually. For each node, you need to provide the following details:
+
+1. **Node Address**: The IP address of the node.
+2. **Username**: The username used to access the node via SSH.
+3. **SSH Key Path**: The path to the SSH private key file used to authenticate with the node.
+
+
+
+Repeat this process for each node until all nodes are configured.
+
+#### **Why Accurate Configuration Matters**
+- **Node Addresses**: Ensure that the IP addresses are correct and reachable within your network.
+- **Usernames and SSH Keys**: Provide secure access to the nodes, enabling proper communication and management.
+
+By carefully configuring the number of nodes and providing accurate details for each node, you lay the foundation for a robust and manageable Kubermatic Virtualization environment. Let’s proceed to the next configuration step!
+
+---
+### **5. Configuring the Storage CSI Driver**
+
+In this step, you will decide whether to use the default Container Storage Interface (CSI) driver provided by Kubermatic Virtualization. The default CSI driver is designed for evaluation and staging environments and is not recommended for production use.
+
+
+
+#### **Key Information**
+- **Default CSI Driver**: The default CSI driver (e.g., Longhorn) is included for testing purposes only.
+- **Purpose**: It provides baseline storage functionality during evaluation and staging phases.
+- **Recommendation**: For production environments, it is strongly advised to use a fully supported and robust storage solution.
+
+#### **Disclaimer**
+Please be advised:
+- The default CSI driver is **not intended or supported** for production environments.
+- Its use in production is at your own risk and is not recommended.
+- Kubermatic does not guarantee ongoing maintenance, reliability, or performance of the default CSI driver.
+
+
+
+By making an informed decision about the CSI driver, you ensure that your environment is configured appropriately for its intended purpose—whether it’s for testing or production. Let’s proceed to the next configuration step!
+
+---
+
+### **6. Reviewing the Configuration**
+
+Before proceeding with the installation, it is crucial to review the full configuration to ensure all settings are correct. This step allows you to verify details such as Control Plane nodes, Worker nodes, network configurations, and other critical parameters. Once confirmed, the installation process will begin, and you will not be able to go back.
+
+#### **Key Information**
+- **Purpose**: Confirm that all configurations are accurate before applying them.
+- **Irreversible Step**: After confirming this page, the installation process will start, and changes cannot be made without restarting the entire setup.
+
+#### **What to Review**
+- **Cluster Nodes**:
+ - Addresses
+ - Usernames
+ - SSH key file paths
+
+- **Other Configurations**:
+ - Network settings (CIDR, DNS server, Gateway IP)
+ - Load Balancer configuration (if enabled)
+ - Storage CSI driver selection
+
+
+
+
+{{% notice warning %}}
+**No Going Back**: Once you confirm this page, the installation process will begin, and you cannot modify the configuration without starting over.
+By carefully reviewing the configuration, you ensure that your Kubermatic Virtualization environment is set up correctly from the start. Proceed with confidence when you’re ready!
+
+{{% /notice %}}
+
+---
+
+### **8. Finalizing the Installation**
+
+Once you confirm the configuration, the installation process will begin, and you’ll be able to monitor its progress in real-time through detailed logs displayed on the screen. These logs provide transparency into each step of the deployment, ensuring you stay informed throughout the process.
+
+#### **What Happens During Installation**
+- **Progress Monitoring**: Watch as the installer provisions the Control Plane, Worker Nodes, and other components.
+- **Health Checks**: After deployment, the installer verifies that all parts of the stack are healthy and running as expected.
+- **Completion**: Upon successful installation, the installer will generate and display the **kubeconfig** file for your Kubermatic Virtualization cluster.
+
+### **Congratulations!**
+Your Kubermatic Virtualization environment is now up and running. With the kubeconfig file in hand, you’re ready to start managing your cluster and deploying workloads. Enjoy the power of seamless virtualization on Kubernetes! 🚀
\ No newline at end of file
diff --git a/content/kubermatic-virtualization/main/installation/assets/0-welcome-page.png b/content/kubermatic-virtualization/main/installation/assets/0-welcome-page.png
new file mode 100644
index 000000000..4617462b6
Binary files /dev/null and b/content/kubermatic-virtualization/main/installation/assets/0-welcome-page.png differ
diff --git a/content/kubermatic-virtualization/main/installation/assets/1-network-page.png b/content/kubermatic-virtualization/main/installation/assets/1-network-page.png
new file mode 100644
index 000000000..4cc7ee25b
Binary files /dev/null and b/content/kubermatic-virtualization/main/installation/assets/1-network-page.png differ
diff --git a/content/kubermatic-virtualization/main/installation/assets/2-lb-page.png b/content/kubermatic-virtualization/main/installation/assets/2-lb-page.png
new file mode 100644
index 000000000..9d0e0798b
Binary files /dev/null and b/content/kubermatic-virtualization/main/installation/assets/2-lb-page.png differ
diff --git a/content/kubermatic-virtualization/main/installation/assets/3-node-count.png b/content/kubermatic-virtualization/main/installation/assets/3-node-count.png
new file mode 100644
index 000000000..aecb6f70f
Binary files /dev/null and b/content/kubermatic-virtualization/main/installation/assets/3-node-count.png differ
diff --git a/content/kubermatic-virtualization/main/installation/assets/4-node-config-page.png b/content/kubermatic-virtualization/main/installation/assets/4-node-config-page.png
new file mode 100644
index 000000000..a25486df4
Binary files /dev/null and b/content/kubermatic-virtualization/main/installation/assets/4-node-config-page.png differ
diff --git a/content/kubermatic-virtualization/main/installation/assets/5-csi-page.png b/content/kubermatic-virtualization/main/installation/assets/5-csi-page.png
new file mode 100644
index 000000000..23e25fa54
Binary files /dev/null and b/content/kubermatic-virtualization/main/installation/assets/5-csi-page.png differ
diff --git a/content/kubermatic-virtualization/main/installation/assets/6-review-page.png b/content/kubermatic-virtualization/main/installation/assets/6-review-page.png
new file mode 100644
index 000000000..2163ceb22
Binary files /dev/null and b/content/kubermatic-virtualization/main/installation/assets/6-review-page.png differ
diff --git a/content/kubermatic/main/_index.en.md b/content/kubermatic/main/_index.en.md
index 0ead10bba..b3b1dd9e3 100644
--- a/content/kubermatic/main/_index.en.md
+++ b/content/kubermatic/main/_index.en.md
@@ -9,7 +9,7 @@ date = 2019-04-27T16:06:34+02:00
Kubermatic Kubernetes Platform (KKP) is a Kubernetes management platform that helps address the operational and security challenges of enterprise customers seeking to run Kubernetes at scale. KKP automates deployment and operations of hundreds or thousands of Kubernetes clusters across hybrid-cloud, multi-cloud and edge environments while enabling DevOps teams with a self-service developer and operations portal.
-KKP is directly integrated with leading cloud providers including Amazon Web Services (AWS), Google Cloud, Azure, Openstack, VMware vSphere, Open Telekom Cloud, Digital Ocean, Hetzner, Alibaba Cloud, Equinix Metal and Nutanix. For selected providers, ARM is supported as CPU architecture.
+KKP is directly integrated with leading cloud providers including Amazon Web Services (AWS), Google Cloud, Azure, Openstack, VMware vSphere, Open Telekom Cloud, Digital Ocean, Hetzner, Alibaba Cloud and Nutanix. For selected providers, ARM is supported as CPU architecture.
In addition to the long list of supported cloud providers, KKP allows building your own infrastructure and joining Kubernetes nodes via the popular `kubeadm` tool.
@@ -17,10 +17,12 @@ KKP is the easiest and most effective software for managing cloud native IT infr
## Features
-#### Powerful & Intuitive Dashboard to Visualize your Kubernetes Deployment
+### Powerful & Intuitive Dashboard to Visualize your Kubernetes Deployment
+
Manage your [projects and clusters with the KKP dashboard]({{< ref "./tutorials-howtos/project-and-cluster-management/" >}}). Scale your cluster by adding and removing nodes in just a few clicks. As an admin, the dashboard also allows you to [customize the theme]({{< ref "./tutorials-howtos/dashboard-customization/" >}}) and disable theming options for other users.
-#### Deploy, Scale & Update Multiple Kubernetes Clusters
+### Deploy, Scale & Update Multiple Kubernetes Clusters
+
Kubernetes environments must be highly distributed to meet the performance demands of modern cloud native applications. Organizations can ensure consistent operations across all environments with effective cluster management. KKP empowers you to take advantage of all the advanced features that Kubernetes has to offer and increases the speed, flexibility and scalability of your cloud deployment workflow.
At Kubermatic, we have chosen to do multi-cluster management with Kubernetes Operators. Operators (a method of packaging, deploying and managing a Kubernetes application) allow KKP to automate creation as well as the full lifecycle management of clusters. With KKP you can create a cluster for each need, fine-tune it, reuse it and continue this process hassle-free. This results in:
@@ -29,15 +31,17 @@ At Kubermatic, we have chosen to do multi-cluster management with Kubernetes Ope
- Smaller individual clusters being more adaptable than one big cluster.
- Faster development thanks to less complex environments.
-#### Kubernetes Autoscaler Integration
+### Kubernetes Autoscaler Integration
+
Autoscaling in Kubernetes refers to the ability to increase or decrease the number of nodes as the demand for service response changes. Without autoscaling, teams would manually first provision and then scale up or down resources every time conditions change. This means, either services fail at peak demand due to the unavailability of enough resources or you pay at peak capacity to ensure availability.
[The Kubernetes Autoscaler in a cluster created by KKP]({{< ref "./tutorials-howtos/kkp-autoscaler/cluster-autoscaler/" >}}) can automatically scale up/down when one of the following conditions is satisfied:
1. Some pods fail to run in the cluster due to insufficient resources.
-2. There are nodes in the cluster that have been underutilized for an extended period (10 minutes by default) and pods running on those nodes can be rescheduled to other existing nodes.
+1. There are nodes in the cluster that have been underutilized for an extended period (10 minutes by default) and pods running on those nodes can be rescheduled to other existing nodes.
+
+### Manage all KKP Users Directly from a Single Panel
-#### Manage all KKP Users Directly from a Single Panel
The admin panel allows KKP administrators to manage the global settings that impact all KKP users directly. As an administrator, you can do the following:
- Customize the way custom links (example: Twitter, Github, Slack) are displayed in the Kubermatic dashboard.
@@ -46,32 +50,39 @@ The admin panel allows KKP administrators to manage the global settings that imp
- Define Preset types in a Kubernetes Custom Resource Definition (CRD) allowing the assignment of new credential types to supported providers.
- Enable and configure etcd backups for your clusters through Backup Buckets.
-#### Manage Worker Nodes via the UI or the CLI
+### Manage Worker Nodes via the UI or the CLI
+
Worker nodes can be managed [via the KKP web dashboard]({{< ref "./tutorials-howtos/manage-workers-node/via-ui/" >}}). Once you have installed kubectl, you can also manage them [via CLI]({{< ref "./tutorials-howtos/manage-workers-node/via-command-line" >}}) to automate the creation, deletion, and upgrade of nodes.
-#### Monitoring, Logging & Alerting
+### Monitoring, Logging & Alerting
+
When it comes to monitoring, no approach fits all use cases. KKP allows you to adjust things to your needs by enabling certain customizations to enable easy and tactical monitoring.
KKP provides two different levels of Monitoring, Logging, and Alerting.
1. The first targets only the management components (master, seed, CRDs) and is independent. This is the Master/Seed Cluster MLA Stack and only the KKP Admins can access this monitoring data.
-2. The other component is the User Cluster MLA Stack which is a true multi-tenancy solution for all your end-users as well as a comprehensive overview for the KKP Admin. It helps to speed up individual progress but lets the Admin keep an overview of the big picture. It can be configured per seed to match the requirements of the organizational structure. All users can access monitoring data of the user clusters under the projects that they are members of.
+1. The other component is the User Cluster MLA Stack which is a true multi-tenancy solution for all your end-users as well as a comprehensive overview for the KKP Admin. It helps to speed up individual progress but lets the Admin keep an overview of the big picture. It can be configured per seed to match the requirements of the organizational structure. All users can access monitoring data of the user clusters under the projects that they are members of.
Integrated Monitoring, Logging and Alerting functionality for applications and services in KKP user clusters are built using Prometheus, Loki, Cortex and Grafana. Furthermore, this can be enabled with a single click on the KKP UI.
-#### OIDC Provider Configuration
+### OIDC Provider Configuration
+
Since Kubernetes does not provide an OpenID Connect (OIDC) Identity Provider, KKP allows the user to configure a custom OIDC. This way you can grant access and information to the right stakeholders and fulfill security requirements by managing user access in a central identity provider across your whole infrastructure.
-#### Easily Upgrading Control Plane and Nodes
+### Easily Upgrading Control Plane and Nodes
+
A specific version of Kubernetes’ control plane typically supports a specific range of kubelet versions connected to it. KKP enforces the rule “kubelet must not be newer than kube-apiserver, and maybe up to two minor versions older” on its own. KKP ensures this rule is followed by checking during each upgrade of the clusters’ control plane or node’s kubelet. Additionally, only compatible versions are listed in the UI as available for upgrades.
-#### Open Policy Agent (OPA)
+### Open Policy Agent (OPA)
+
To enforce policies and improve governance in Kubernetes, Open Policy Agent (OPA) can be used. KKP integrates it using OPA Gatekeeper as a kubernetes-native policy engine supporting OPA policies. As an admin you can enable and enforce OPA integration during cluster creation by default via the UI.
-#### Cluster Templates
+### Cluster Templates
+
Clusters can be created in a few clicks with the UI. To take the user experience one step further and make repetitive tasks redundant, cluster templates allow you to save data entered into a wizard to create multiple clusters from a single template at once. Templates can be saved to be used subsequently for new cluster creation.
-#### Use Default Addons to Extend the Functionality of Kubernetes
+### Use Default Addons to Extend the Functionality of Kubernetes
+
[Addons]({{< ref "./architecture/concept/kkp-concepts/addons/" >}}) are specific services and tools extending the functionality of Kubernetes. Default addons are installed in each user cluster in KKP. The KKP Operator comes with a tool to output full default KKP configuration, serving as a starting point for adjustments. Accessible addons can be installed in each user cluster in KKP on user demand.
{{% notice tip %}}
diff --git a/content/kubermatic/main/architecture/compatibility/kkp-components-versioning/_index.en.md b/content/kubermatic/main/architecture/compatibility/kkp-components-versioning/_index.en.md
index f3483089c..aa9a64b81 100644
--- a/content/kubermatic/main/architecture/compatibility/kkp-components-versioning/_index.en.md
+++ b/content/kubermatic/main/architecture/compatibility/kkp-components-versioning/_index.en.md
@@ -14,7 +14,7 @@ of provided software and therefore releases updates regularly that also include
| ------------------------------ | ------------------------------ |
| backup/velero | 1.14.0 |
| cert-manager | 1.17.4 |
-| dex | 2.42.0 |
+| dex | 2.44.0 |
| gitops/kkp-argocd-apps | 1.16.1 |
| iap | 7.8.2 |
| kubermatic-operator | 9.9.9-dev |
@@ -38,6 +38,6 @@ of provided software and therefore releases updates regularly that also include
| monitoring/kube-state-metrics | 2.15.0 |
| monitoring/node-exporter | 1.9.0 |
| monitoring/prometheus | 2.51.1 |
-| nginx-ingress-controller | 1.12.1 |
+| nginx-ingress-controller | 1.13.2 |
| s3-exporter | 0.7.1 |
| telemetry | 0.5.2 |
diff --git a/content/kubermatic/main/architecture/compatibility/os-support-matrix/_index.en.md b/content/kubermatic/main/architecture/compatibility/os-support-matrix/_index.en.md
index 1eab1534c..e4c3b01d9 100644
--- a/content/kubermatic/main/architecture/compatibility/os-support-matrix/_index.en.md
+++ b/content/kubermatic/main/architecture/compatibility/os-support-matrix/_index.en.md
@@ -11,11 +11,12 @@ KKP supports a multitude of operating systems. One of the unique features of KKP
The following operating systems are currently supported by Kubermatic:
-* Ubuntu 20.04, 22.04 and 24.04
-* RHEL beginning with 8.0 (support is cloud provider-specific)
-* Flatcar (Stable channel)
-* Rocky Linux beginning with 8.0
-* Amazon Linux 2
+- Ubuntu 20.04, 22.04 and 24.04
+- RHEL beginning with 8.0 (support is cloud provider-specific)
+- Flatcar (Stable channel)
+- Rocky Linux beginning with 8.0
+- Amazon Linux 2
+
**Note:** CentOS was removed as a supported OS in KKP 2.26.3
This table shows the combinations of operating systems and cloud providers that KKP supports:
@@ -26,7 +27,6 @@ This table shows the combinations of operating systems and cloud providers that
| Azure | ✓ | ✓ | ✓ | x | ✓ |
| Digitalocean | ✓ | x | x | x | ✓ |
| Edge | ✓ | x | x | x | x |
-| Equinix Metal | ✓ | ✓ | x | x | ✓ |
| Google Cloud Platform | ✓ | ✓ | x | x | x |
| Hetzner | ✓ | x | x | x | ✓ |
| KubeVirt | ✓ | ✓ | ✓ | x | ✓ |
diff --git a/content/kubermatic/main/architecture/compatibility/os-support-matrix/ubuntu-requirements/_index.en.md b/content/kubermatic/main/architecture/compatibility/os-support-matrix/ubuntu-requirements/_index.en.md
new file mode 100644
index 000000000..82b7053b7
--- /dev/null
+++ b/content/kubermatic/main/architecture/compatibility/os-support-matrix/ubuntu-requirements/_index.en.md
@@ -0,0 +1,132 @@
++++
+title = "KKP Requirements for Ubuntu"
+date = 2025-08-21T20:07:15+02:00
+weight = 15
+
++++
+
+## KKP Package and Configurations for Ubuntu
+
+This document provides an overview of the system packages and Kubernetes-related binaries installed, along with their respective sources.
+
+{{% notice note %}}
+This document serves as a guideline for users who want to harden their Ubuntu hosts, providing instructions for installing
+and configuring the required packages and settings. By default, OSM handles these installations and configurations through
+an Operating System Profile. However, users who prefer to manage them manually can follow the steps outlined below.
+{{% /notice %}}
+
+---
+
+## System Packages (via `apt`)
+
+The following packages are installed using the **APT package manager**:
+
+| Package | Source |
+|-----------------------------|--------|
+| curl | apt |
+| jq | apt |
+| ca-certificates | apt |
+| ceph-common | apt |
+| cifs-utils | apt |
+| conntrack | apt |
+| e2fsprogs | apt |
+| ebtables | apt |
+| ethtool | apt |
+| glusterfs-client | apt |
+| iptables | apt |
+| kmod | apt |
+| openssh-client | apt |
+| nfs-common | apt |
+| socat | apt |
+| util-linux | apt |
+| ipvsadm | apt |
+| apt-transport-https | apt |
+| software-properties-common | apt |
+| lsb-release | apt |
+| containerd.io | apt |
+
+---
+
+## Kubernetes Dependencies (Manual Download)
+
+The following components are **manually downloaded** (usually from the official [Kubernetes GitHub releases](https://github.com/kubernetes/kubernetes/releases)):
+
+| Package | Source |
+|----------------|--------------------------|
+| CNI plugins | Manual Download (GitHub) |
+| CRI-tools | Manual Download (GitHub) |
+| kubelet | Manual Download (GitHub) |
+| kubeadm | Manual Download (GitHub) |
+| kubectl | Manual Download (GitHub) |
+
+---
+
+
+## Notes
+- **APT packages**: Installed via the system’s package manager for base functionality (networking, file systems, utilities, etc.).
+- **Manual downloads**: Required for Kubernetes setup and cluster management, ensuring version consistency across nodes.
+- **containerd.io**: Installed via apt as the container runtime.
+
+## Kubernetes Node Bootstrap Configuration
+
+This repository contains scripts and systemd unit files that configure a Linux host to function as a Kubernetes node. These scripts do not install Kubernetes packages directly but apply system, kernel, and service configurations required for proper operation.
+
+---
+
+## 🔧 Configurations Applied
+
+### 1. Environment Variables
+- Adds `NO_PROXY` and `no_proxy` to `/etc/environment` to bypass proxying for:
+ - `.svc`
+ - `.cluster.local`
+ - `localhost`
+ - `127.0.0.1`
+
+- Creates an empty APT proxy configuration file: /etc/apt/apt.conf.d/proxy.conf
+
+(Placeholder for proxy settings, not configured by default).
+
+---
+
+### 2. Kernel Modules
+The script loads and enables essential kernel modules for networking and container orchestration:
+- `ip_vs` – IP Virtual Server (transport-layer load balancing).
+- `ip_vs_rr` – Round-robin scheduling algorithm.
+- `ip_vs_wrr` – Weighted round-robin scheduling algorithm.
+- `ip_vs_sh` – Source-hash scheduling algorithm.
+- `nf_conntrack_ipv4` or `nf_conntrack` – Connection tracking support.
+- `br_netfilter` – Enables netfilter for bridged network traffic (required by Kubernetes).
+
+---
+
+### 3. Kernel Parameters (`sysctl`)
+The following runtime kernel parameters are configured:
+
+- `net.bridge.bridge-nf-call-ip6tables = 1`
+- `net.bridge.bridge-nf-call-iptables = 1`
+- `kernel.panic_on_oops = 1`
+- `kernel.panic = 10`
+- `net.ipv4.ip_forward = 1`
+- `vm.overcommit_memory = 1`
+- `fs.inotify.max_user_watches = 1048576`
+- `fs.inotify.max_user_instances = 8192`
+
+---
+
+### 4. System Services & Management
+- **Firewall**: Disables and masks UFW to avoid interfering with Kubernetes networking.
+- **Hostname**: Overrides hostname with `/etc/machine-name` value if available.
+- **APT Repositories**: Adds the official Docker APT repository and imports GPG key.
+- **Symbolic Links**: Makes `kubelet`, `kubeadm`, `kubectl`, and `crictl` binaries available in `$PATH`.
+
+---
+
+### 5. Node IP & Hostname Configuration
+- Discovers IP via: `ip -o route get 1`
+- Discovers hostname via: `hostname -f`
+
+### 6. Swap Disabling
+Kubernetes requires swap to be disabled:
+
+- Removes swap entries from /etc/fstab: `sed -i.orig '/.*swap.*/d' /etc/fstab`
+- Disables active swap immediately: `swapoff -a`
diff --git a/content/kubermatic/main/architecture/compatibility/supported-versions/_index.en.md b/content/kubermatic/main/architecture/compatibility/supported-versions/_index.en.md
index bfc0b4eb0..190cf8fed 100644
--- a/content/kubermatic/main/architecture/compatibility/supported-versions/_index.en.md
+++ b/content/kubermatic/main/architecture/compatibility/supported-versions/_index.en.md
@@ -28,19 +28,19 @@ these migrations.
In the following table you can find the supported Kubernetes versions for the
current KKP version.
-| KKP version | 1.33 |1.32 | 1.31 | 1.30 | 1.29[^2] | 1.28[^2] |
+| KKP version | 1.34 |1.33 | 1.32 | 1.31 | 1.30[^2] | 1.29[^2] |
| -------------------- | -----|-----|-----| ---- | ---- | ---- |
-| 2.28.x | ✓ | ✓ | ✓ | ✓ | -- | -- |
-| 2.27.x | -- | ✓ | ✓ | ✓ | ✓ | -- |
-| 2.26.x | -- | -- | ✓ | ✓ | ✓ | ✓ |
+| 2.29.x | ✓ | ✓ | ✓ | ✓ | -- | -- |
+| 2.28.x | -- | ✓ | ✓ | ✓ | ✓ | -- |
+| 2.27.x | -- | -- | ✓ | ✓ | ✓ | ✓ |
-[^2]: Kubernetes releases below version 1.27 have reached End-of-Life (EOL). We strongly
+[^2]: Kubernetes releases below version 1.31 have reached End-of-Life (EOL). We strongly
recommend upgrading to a supported Kubernetes release as soon as possible. Refer to the
[Kubernetes website](https://kubernetes.io/releases/) for more information on the supported
releases.
Upgrades from a previous Kubernetes version are generally supported whenever a version is
-marked as supported, for example KKP 2.27 supports updating clusters from Kubernetes 1.30 to 1.31.
+marked as supported, for example KKP 2.28 supports updating clusters from Kubernetes 1.32 to 1.33.
## Provider Incompatibilities
diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/addons/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/addons/_index.en.md
index 9fa9d2ac6..feb4805ea 100644
--- a/content/kubermatic/main/architecture/concept/kkp-concepts/addons/_index.en.md
+++ b/content/kubermatic/main/architecture/concept/kkp-concepts/addons/_index.en.md
@@ -24,22 +24,22 @@ In general, we recommend the usage of Applications for workloads running inside
Default addons are installed in each user-cluster in KKP. The default addons are:
-* [Canal](https://github.com/projectcalico/canal): policy based networking for cloud native applications
-* [Dashboard](https://github.com/kubernetes/dashboard): General-purpose web UI for Kubernetes clusters
-* [kube-proxy](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/): Kubernetes network proxy
-* [rbac](https://kubernetes.io/docs/reference/access-authn-authz/rbac/): Kubernetes Role-Based Access Control, needed for
+- [Canal](https://github.com/projectcalico/canal): policy based networking for cloud native applications
+- [Dashboard](https://github.com/kubernetes/dashboard): General-purpose web UI for Kubernetes clusters
+- [kube-proxy](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/): Kubernetes network proxy
+- [rbac](https://kubernetes.io/docs/reference/access-authn-authz/rbac/): Kubernetes Role-Based Access Control, needed for
[TLS node bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/)
-* [OpenVPN client](https://openvpn.net/index.php/open-source/overview.html): virtual private network (VPN). Lets the control
+- [OpenVPN client](https://openvpn.net/index.php/open-source/overview.html): virtual private network (VPN). Lets the control
plan access the Pod & Service network. Required for functionality like `kubectl proxy` & `kubectl port-forward`.
-* pod-security-policy: Policies to configure KKP access when PSPs are enabled
-* default-storage-class: A cloud provider specific StorageClass
-* kubeadm-configmap & kubelet-configmap: A set of ConfigMaps used by kubeadm
+- pod-security-policy: Policies to configure KKP access when PSPs are enabled
+- default-storage-class: A cloud provider specific StorageClass
+- kubeadm-configmap & kubelet-configmap: A set of ConfigMaps used by kubeadm
Installation and configuration of these addons is done by 2 controllers which are part of the KKP
seed-controller-manager:
-* `addon-installer-controller`: Ensures a given set of addons will be installed in all clusters
-* `addon-controller`: Templates the addons & applies the manifests in the user clusters
+- `addon-installer-controller`: Ensures a given set of addons will be installed in all clusters
+- `addon-controller`: Templates the addons & applies the manifests in the user clusters
The KKP binaries come with a `kubermatic-installer` tool, which can output a full default
`KubermaticConfiguration` (`kubermatic-installer print`). This will also include the default configuration for addons and can serve as
@@ -86,7 +86,7 @@ regular addons, which are always installed and cannot be removed by the user). I
and accessible, then it will be installed in the user-cluster, but also be visible to the user, who can manage
it from the KKP dashboard like the other accessible addons. The accessible addons are:
-* [node-exporter](https://github.com/prometheus/node_exporter): Exports metrics from the node
+- [node-exporter](https://github.com/prometheus/node_exporter): Exports metrics from the node
Accessible addons can be managed in the UI from the cluster details view:
@@ -256,6 +256,7 @@ spec:
```
There is a short explanation of the single `formSpec` fields:
+
- `displayName` is the name that is displayed in the UI as the control label.
- `internalName` is the name used internally. It can be referenced with template variables (see the description below).
- `required` indicates if the control should be required in the UI.
@@ -317,7 +318,7 @@ the exact templating syntax.
KKP injects an instance of the `TemplateData` struct into each template. The following
Go snippet shows the available information:
-```
+```plaintext
{{< readfile "kubermatic/main/data/addondata.go" >}}
```
diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/addons/aws-node-termination-handler/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/addons/aws-node-termination-handler/_index.en.md
index c4191f0a8..e3e440b9d 100644
--- a/content/kubermatic/main/architecture/concept/kkp-concepts/addons/aws-node-termination-handler/_index.en.md
+++ b/content/kubermatic/main/architecture/concept/kkp-concepts/addons/aws-node-termination-handler/_index.en.md
@@ -32,6 +32,7 @@ AWS node termination handler is deployed with any aws user cluster created by KK
cluster once the spot instance is interrupted.
## AWS Spot Instances Creation
+
To create a user cluster which runs some spot instance machines, the user can specify the machine type whether it's a spot
instance or not at the step number four (Initial Nodes). A checkbox that has the label "Spot Instance" should be checked.
diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/addons/kubeflow/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/addons/kubeflow/_index.en.md
index da43a0e4f..d50274b7f 100644
--- a/content/kubermatic/main/architecture/concept/kkp-concepts/addons/kubeflow/_index.en.md
+++ b/content/kubermatic/main/architecture/concept/kkp-concepts/addons/kubeflow/_index.en.md
@@ -28,12 +28,12 @@ Before this addon can be deployed in a KKP user cluster, the KKP installation ha
as an [accessible addon](../#accessible-addons). This needs to be done by the KKP installation administrator,
once per KKP installation.
-* Request the KKP addon Docker image with Kubeflow Addon matching your KKP version from Kubermatic
+- Request the KKP addon Docker image with Kubeflow Addon matching your KKP version from Kubermatic
(or [build it yourself](../#creating-a-docker-image) from the [Flowmatic repository](https://github.com/kubermatic/flowmatic)).
-* Configure KKP - edit `KubermaticConfiguration` as follows:
- * modify `spec.userClusters.addons.kubernetes.dockerRepository` to point to the provided addon Docker image repository,
- * add `kubeflow` into `spec.api.accessibleAddons`.
-* Apply the [AddonConfig from the Flowmatic repository](https://raw.githubusercontent.com/kubermatic/flowmatic/master/addon/addonconfig.yaml) in your KKP installation.
+- Configure KKP - edit `KubermaticConfiguration` as follows:
+ - modify `spec.userClusters.addons.kubernetes.dockerRepository` to point to the provided addon Docker image repository,
+ - add `kubeflow` into `spec.api.accessibleAddons`.
+- Apply the [AddonConfig from the Flowmatic repository](https://raw.githubusercontent.com/kubermatic/flowmatic/master/addon/addonconfig.yaml) in your KKP installation.
### Kubeflow prerequisites
@@ -66,7 +66,8 @@ For a LoadBalancer service, an external IP address will be assigned by the cloud
This address can be retrieved by reviewing the `istio-ingressgateway` Service in `istio-system` Namespace, e.g.:
```bash
-$ kubectl get service istio-ingressgateway -n istio-system
+kubectl get service istio-ingressgateway -n istio-system
+
NAME TYPE CLUSTER-IP EXTERNAL-IP
istio-ingressgateway LoadBalancer 10.240.28.214 a286f5a47e9564e43ab4165039e58e5e-1598660756.eu-central-1.elb.amazonaws.com
```
@@ -162,33 +163,33 @@ This section contains a list of known issues in different Kubeflow components:
**Kubermatic Kubernetes Platform**
-* Not all GPU instances of various providers can be started from the KKP UI:
+- Not all GPU instances of various providers can be started from the KKP UI:
**Istio RBAC in Kubeflow:**
-* If enabled, this issue can be hit in the pipelines:
+- If enabled, this issue can be hit in the pipelines:
**Kubeflow UI issues:**
-* Error by adding notebook server: 500 Internal Server Error:
+- Error by adding notebook server: 500 Internal Server Error:
-* Experiment run status shows as unknown:
+- Experiment run status shows as unknown:
**Kale Pipeline:**
-* "Namespace is empty" exception:
+- "Namespace is empty" exception:
**NVIDIA GPU Operator**
-* Please see the official NVIDIA GPU documentation for known limitations:
+- Please see the official NVIDIA GPU documentation for known limitations:
**AMD GPU Support**
-* The latest AMD GPU -enabled instances in AWS ([EC2 G4ad](https://aws.amazon.com/blogs/aws/new-amazon-ec2-g4ad-instances-featuring-amd-gpus-for-graphics-workloads/))
+- The latest AMD GPU -enabled instances in AWS ([EC2 G4ad](https://aws.amazon.com/blogs/aws/new-amazon-ec2-g4ad-instances-featuring-amd-gpus-for-graphics-workloads/))
featuring Radeon Pro V520 GPUs do not seem to be working with Kubeflow (yet). The GPUs are successfully attached
to the pods but the notebook runtime does not seem to recognize them.
diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/_index.en.md
index af587dace..c41fc5d04 100644
--- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/_index.en.md
+++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/_index.en.md
@@ -15,6 +15,7 @@ Currently, helm is exclusively supported as a templating method, but integration
Helm Applications can both be installed from helm registries directly or from a git repository.
## Concepts
+
KKP manages Applications using two key mechanisms: [ApplicationDefinitions]({{< ref "./application-definition" >}}) and [ApplicationInstallations]({{< ref "./application-installation" >}}).
`ApplicationDefinitions` are managed by KKP Admins and contain all the necessary information for an application's installation.
diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/application-definition/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/application-definition/_index.en.md
index d8455e6e4..24af0167a 100644
--- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/application-definition/_index.en.md
+++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/application-definition/_index.en.md
@@ -8,8 +8,9 @@ weight = 1
An `ApplicationDefinition` represents a single Application and contains all its versions. It holds the necessary information to install an application.
Two types of information are required to install an application:
-* How to download the application's source (i.e Kubernetes manifest, helm chart...). We refer to this as `source`.
-* How to render (i.e. templating) the application's source to install it into user-cluster. We refer to this as`templating method`.
+
+- How to download the application's source (i.e Kubernetes manifest, helm chart...). We refer to this as `source`.
+- How to render (i.e. templating) the application's source to install it into user-cluster. We refer to this as`templating method`.
Each version can have a different `source` (`.spec.version[].template.source`) but share the same `templating method` (`.spec.method`).
Here is the minimal example of `ApplicationDefinition`. More advanced configurations are described in subsequent paragraphs.
@@ -43,13 +44,17 @@ spec:
In this example, the `ApplicationDefinition` allows the installation of two versions of apache using the [helm method](#helm-method). Notice that one source originates from a [Helm repository](#helm-source) and the other from a [git repository](#git-source)
## Templating Method
+
Templating Method describes how the Kubernetes manifests are being packaged and rendered.
### Helm Method
+
This method use [Helm](https://helm.sh/docs/) to install, upgrade and uninstall the application into the user-cluster.
## Templating Source
+
### Helm Source
+
The Helm Source allows downloading the application's source from a Helm [HTTP repository](https://helm.sh/docs/topics/chart_repository/) or an [OCI repository](https://helm.sh/blog/storing-charts-in-oci/#helm).
The following parameters are required:
@@ -57,8 +62,8 @@ The following parameters are required:
- `chartName` -> Name of the chart within the repository
- `chartVersion` -> Version of the chart; corresponds to the chartVersion field
-
**Example of Helm source with HTTP repository:**
+
```yaml
- template:
source:
@@ -69,6 +74,7 @@ The following parameters are required:
```
**Example of Helm source with OCI repository:**
+
```yaml
- template:
source:
@@ -77,11 +83,12 @@ The following parameters are required:
chartVersion: 1.13.0-rc5
url: oci://quay.io/kubermatic/helm-charts
```
+
For private git repositories, please check the [working with private registries](#working-with-private-registries) section.
Currently, the best way to obtain `chartName` and `chartVersion` for an HTTP repository is to make use of `helm search`:
-```sh
+```bash
# initial preparation
helm repo add
helm repo update
@@ -99,9 +106,11 @@ helm search repo prometheus-community/prometheus --versions --version ">=15"
For OCI repositories, there is currently [no native helm search](https://github.com/helm/helm/issues/9983). Instead, you have to rely on the capabilities of your OCI registry. For example, harbor supports searching for helm-charts directly [in their UI](https://goharbor.io/docs/2.4.0/working-with-projects/working-with-images/managing-helm-charts/#list-charts).
### Git Source
+
The Git source allows you to download the application's source from a Git repository.
**Example of Git Source:**
+
```yaml
- template:
source:
@@ -121,7 +130,6 @@ The Git source allows you to download the application's source from a Git reposi
For private git repositories, please check the [working with private registries](#working-with-private-registries) section.
-
## Working With Private Registries
For private registries, the Applications Feature supports storing credentials in Kubernetes secrets in the KKP master and referencing the secrets in your ApplicationDefinitions.
@@ -134,67 +142,68 @@ In order for the controller to sync your secrets, they must be annotated with `a
### Git Repositories
KKP supports three types of authentication for git repositories:
-* `password`: authenticate with a username and password.
-* `Token`: authenticate with a Bearer token
-* `SSH-Key`: authenticate with an ssh private key.
+
+- `password`: authenticate with a username and password.
+- `Token`: authenticate with a Bearer token
+- `SSH-Key`: authenticate with an ssh private key.
Their setup is comparable:
1. Create a secret containing our credentials
+ ```bash
+ # inside KKP master
+
+ # user-pass
+ kubectl create secret -n generic --from-literal=pass= --from-literal=user=
+
+ # token
+ kubectl create secret -n generic --from-literal=token=
+
+ # ssh-key
+ kubectl create secret -n generic --from-literal=sshKey=
+
+ # after creation, annotate
+ kubectl annotate secret apps.kubermatic.k8c.io/secret-type="git"
+ ```
+
+1. Reference the secret in the ApplicationDefinition
+ ```yaml
+ spec:
+ versions:
+ - template:
+ source:
+ git:
+ path:
+ ref:
+ branch:
+ remote: # for ssh-key, an ssh url must be chosen (e.g. git@example.com/repo.git)
+ credentials:
+ method:
+ # user-pass
+ username:
+ key: user
+ name:
+ password:
+ key: pass
+ name:
+ # token
+ token:
+ key: token
+ name:
+ # ssh-key
+ sshKey:
+ key: sshKey
+ name:
+ ```
-```sh
-# inside KKP master
-
-# user-pass
-kubectl create secret -n generic --from-literal=pass= --from-literal=user=
-
-# token
-kubectl create secret -n generic --from-literal=token=
-
-# ssh-key
-kubectl create secret -n generic --from-literal=sshKey=
-
-# after creation, annotate
-kubectl annotate secret apps.kubermatic.k8c.io/secret-type="git"
-```
-
-2. Reference the secret in the ApplicationDefinition
-
-```yaml
-spec:
- versions:
- - template:
- source:
- git:
- path:
- ref:
- branch:
- remote: # for ssh-key, an ssh url must be chosen (e.g. git@example.com/repo.git)
- credentials:
- method:
- # user-pass
- username:
- key: user
- name:
- password:
- key: pass
- name:
- # token
- token:
- key: token
- name:
- # ssh-key
- sshKey:
- key: sshKey
- name:
-```
#### Compatibility Warning
Be aware that all authentication methods may be available on your git server. More and more servers disable the authentication with username and password.
More over on some providers like GitHub, to authenticate with an access token, you must use `password` method instead of `token`.
Example of secret to authenticate with [GitHub access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token#using-a-token-on-the-command-line):
-```sh
+
+```bash
kubectl create secret -n generic --from-literal=pass= --from-literal=user=
```
@@ -205,73 +214,71 @@ For other providers, please refer to their official documentation.
[Helm OCI registries](https://helm.sh/docs/topics/registries/#enabling-oci-support) are being accessed using a JSON configuration similar to the `~/.docker/config.json` on the local machine. It should be noted, that all OCI server urls need to be prefixed with `oci://`.
1. Create a secret containing our credentials
-
-```sh
-# inside KKP master
-kubectl create secret -n docker-registry --docker-server= --docker-username= --docker-password=
-kubectl annotate secret apps.kubermatic.k8c.io/secret-type="helm"
-
-# example
-kubectl create secret -n kubermatic docker-registry --docker-server=harbor.example.com/my-project --docker-username=someuser --docker-password=somepaswword oci-cred
-kubectl annotate secret oci-cred apps.kubermatic.k8c.io/secret-type="helm"
-```
-
-2. Reference the secret in the ApplicationDefinition
-
-```yaml
-spec:
- versions:
- - template:
- source:
- helm:
- chartName: examplechart
- chartVersion: 0.1.0
- credentials:
- registryConfigFile:
- key: .dockerconfigjson # `kubectl create secret docker-registry` stores by default the creds under this key
- name:
- url:
-```
+ ```bash
+ # inside KKP master
+ kubectl create secret -n docker-registry --docker-server= --docker-username= --docker-password=
+ kubectl annotate secret apps.kubermatic.k8c.io/secret-type="helm"
+
+ # example
+ kubectl create secret -n kubermatic docker-registry --docker-server=harbor.example.com/my-project --docker-username=someuser --docker-password=somepaswword oci-cred
+ kubectl annotate secret oci-cred apps.kubermatic.k8c.io/secret-type="helm"
+ ```
+
+1. Reference the secret in the ApplicationDefinition
+ ```yaml
+ spec:
+ versions:
+ - template:
+ source:
+ helm:
+ chartName: examplechart
+ chartVersion: 0.1.0
+ credentials:
+ registryConfigFile:
+ key: .dockerconfigjson # `kubectl create secret docker-registry` stores by default the creds under this key
+ name:
+ url:
+ ```
### Helm Userpass Registries
To use KKP Applications with a helm [userpass auth](https://helm.sh/docs/topics/registries/#auth) registry, you can configure the following:
1. Create a secret containing our credentials
-
-```sh
-# inside KKP master
-kubectl create secret -n generic --from-literal=pass= --from-literal=user=
-kubectl annotate secret apps.kubermatic.k8c.io/secret-type="helm"
-```
-
-2. Reference the secret in the ApplicationDefinition
-
-```yaml
-spec:
- versions:
- - template:
- source:
- helm:
- chartName: examplechart
- chartVersion: 0.1.0
- credentials:
- password:
- key: pass
- name:
- username:
- key: user
- name:
- url: