diff --git a/.gitignore b/.gitignore index 3010bccd5..40e8fa2cd 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ # IDE specific files .idea/ +.history/ # Hugo .hugo_build.lock diff --git a/.prow.yaml b/.prow.yaml index 7ac9b0d80..d44f6dc0d 100644 --- a/.prow.yaml +++ b/.prow.yaml @@ -5,7 +5,7 @@ presubmits: clone_uri: "ssh://git@github.com/kubermatic/docs.git" spec: containers: - - image: quay.io/kubermatic/build:go-1.22-node-18-kind-0.21-2 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-4 command: - make args: @@ -21,7 +21,7 @@ presubmits: clone_uri: "ssh://git@github.com/kubermatic/docs.git" spec: containers: - - image: quay.io/kubermatic/build:go-1.22-node-18-kind-0.21-2 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-4 command: - "./hack/verify-filenames.sh" resources: @@ -35,7 +35,7 @@ presubmits: clone_uri: "ssh://git@github.com/kubermatic/docs.git" spec: containers: - - image: quay.io/kubermatic/hugo:0.119.0-0 + - image: quay.io/kubermatic/hugo:0.150.0-0 command: - "./hack/ci/verify-hugo.sh" resources: @@ -50,12 +50,12 @@ presubmits: clone_uri: "ssh://git@github.com/kubermatic/docs.git" spec: containers: - - image: quay.io/kubermatic/remark-lint:1.0.0 + - image: quay.io/kubermatic/remark-lint:2.0.0 command: - "./hack/ci/lint-markdown.sh" resources: requests: cpu: 200m - memory: 128Mi + memory: 512Mi limits: - memory: 1Gi + memory: 2Gi diff --git a/Makefile b/Makefile index c3fd54ed5..edf975bdc 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -CODESPELL_IMAGE ?= quay.io/kubermatic/build:go-1.24-node-20-kind-0.27-1 +CODESPELL_IMAGE ?= quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-4 CODESPELL_BIN := $(shell which codespell) DOCKER_BIN := $(shell which docker) @@ -8,7 +8,7 @@ preview: --name kubermatic-docs \ -p 1313:1313 \ -w /docs \ - -v `pwd`:/docs quay.io/kubermatic/hugo:0.119.0-0 \ + -v `pwd`:/docs quay.io/kubermatic/hugo:0.150.0-0 \ hugo server -D -F --bind 0.0.0.0 .PHONY: runbook diff --git a/OWNERS b/OWNERS index 45207248d..0f3eb8bd0 100644 --- a/OWNERS +++ b/OWNERS @@ -8,6 +8,7 @@ approvers: - toschneck - themue - scheeles + - csengerszabo reviewers: - sig-api diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 653d04ddb..7340aecdf 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -21,14 +21,18 @@ aliases: sig-app-management: - ahmedwaleedmalik - archups + - buraksekili - cnvergence - dharapvj - julioc-p + - mohamed-rafraf - simontheleg - soer3n - xrstf sig-cluster-management: + - adoi - ahmedwaleedmalik + - buraksekili - cnvergence - embik - julioc-p @@ -41,6 +45,7 @@ aliases: - xmudrii - xrstf sig-networking: + - adoi - buraksekili - cnvergence - moadqassem diff --git a/config.toml b/config.toml index b35a32d8d..6388bc385 100644 --- a/config.toml +++ b/config.toml @@ -54,7 +54,7 @@ themeVariant = "kubermatic" # Force to have /en/my-page and /fr/my-page routes, even for default language. # defaultContentLanguageInSubdir = true baseWebsiteUrl = "https://www.kubermatic.com/" -cookiefirstBanner = "e938558d-78b5-4f1b-b574-cd5dfde73684" +cookiefirstScript = "https://consent.cookiefirst.com/sites/docs.kubermatic.com-e938558d-78b5-4f1b-b574-cd5dfde73684/consent.js" titleSuffix = "Kubermatic Documentation" slackJoinLink = "https://join.slack.com/t/kubermatic-community/shared_invite/zt-1jtex2o9f-fpaDZ2ytX7FmDaNOHqljIg" enableTOC = true diff --git a/content/developer-platform/platform-users/consuming-services/_index.en.md b/content/developer-platform/platform-users/consuming-services/_index.en.md index cfa251cab..cb289ae77 100644 --- a/content/developer-platform/platform-users/consuming-services/_index.en.md +++ b/content/developer-platform/platform-users/consuming-services/_index.en.md @@ -29,10 +29,10 @@ offered to add it to the organization. Note that every Service shows: -* its main title (the human-readable name of a Service, like "Certificate Management") -* its internal name (ultimately the name of the Kubernetes `Service` object you would need to +- its main title (the human-readable name of a Service, like "Certificate Management") +- its internal name (ultimately the name of the Kubernetes `Service` object you would need to manually enable the service using `kubectl`) -* a short description +- a short description Check out [Your First Service]({{< relref "../../tutorials/your-first-service/" >}}) if you want to publish services by yourself. @@ -102,5 +102,5 @@ spec: Rejecting a claim will severely impact a Service, if not even break it. Consult with the Service's documentation or the service owner if rejecting a claim is supported. -When you _change into_ (`kubctl ws …`) a different workspace, kubectl will inform you if there are +When you *change into* (`kubctl ws …`) a different workspace, kubectl will inform you if there are outstanding permission claims that you need to accept or reject. diff --git a/content/developer-platform/service-providers/api-syncagent/_index.en.md b/content/developer-platform/service-providers/api-syncagent/_index.en.md index 8d0f84edf..4e5a5c7fc 100644 --- a/content/developer-platform/service-providers/api-syncagent/_index.en.md +++ b/content/developer-platform/service-providers/api-syncagent/_index.en.md @@ -15,28 +15,28 @@ The intended usecase follows roughly these steps: workspace. This service (not to be confused with Kubernetes services) reserves an API group in the organization for itself, like `databases.example.corp` (two `Services` must not register the same API group). -2. After the `Service` is created, KDP will reconcile it, create an `APIExport` object and provide +1. After the `Service` is created, KDP will reconcile it, create an `APIExport` object and provide appropriate credentials for the api-syncagent (e.g. by creating a Kubernetes Secret with a preconfigured kubeconfig in it). -3. A service owner will now take these credentials and the configured API group and use them +1. A service owner will now take these credentials and the configured API group and use them to setup the api-syncagent. It is assumed that the service owner (i.e. the cluster-admin in a service cluster) wants to make some resources (usually CRDs) available to use inside of KDP. -4. The service owner uses the api-syncagent Helm chart (or similar deployment technique) to install +1. The service owner uses the api-syncagent Helm chart (or similar deployment technique) to install the agent in their cluster. -5. To actually make resources available in the platform, the service owner now has to create a +1. To actually make resources available in the platform, the service owner now has to create a set of `PublishedResource` objects. The configuration happens from their point of view, meaning they define how to publish a CRD in the platform, defining renaming rules and other projection settings. -6. Once a `PublishedResource` is created in the service cluster, the agent will pick it up, +1. Once a `PublishedResource` is created in the service cluster, the agent will pick it up, find the referenced CRD, convert/project this CRD into an `APIResourceSchema` (ARS) for kcp and then create the ARS in org workspace. -7. Finally the api-syncagent will take all `PublishedResources` and bundle them into the pre-existing +1. Finally the api-syncagent will take all `PublishedResources` and bundle them into the pre-existing `APIExport` in the org workspace. This APIExport can then be bound in the org workspace itself (or later any sub workspaces (depending on permissions)) and be used there. The `APIExport` has the same name as the KDP `Service` the agent is working with. -8. kcp automatically provides a virtual workspace for the `APIExport` and this is what the agent +1. kcp automatically provides a virtual workspace for the `APIExport` and this is what the agent then uses to watch all objects for the relevant resources in the platform (i.e. in all workspaces). -9. The api-syncagent will now begin to synchronize objects back and forth between the service cluster +1. The api-syncagent will now begin to synchronize objects back and forth between the service cluster and KDP. ## Details @@ -49,7 +49,7 @@ with making their CRDs available in KDP (i.e. "publish" them). However the actual data flow later will work in the opposite direction: users creating objects inside their kcp workspaces serve as the source of truth. From there they are synced down to the service -cluster, which is doing the projection of the `PublishedResource` _in reverse_. +cluster, which is doing the projection of the `PublishedResource` *in reverse*. Of course additional, auxiliary (related) objects could originate on the service cluster. For example if you create a Certificate object in a kcp workspace and it's synced down, cert-manager will then diff --git a/content/developer-platform/service-providers/crossplane/_index.en.md b/content/developer-platform/service-providers/crossplane/_index.en.md index 4a51d4f42..19f1b4174 100644 --- a/content/developer-platform/service-providers/crossplane/_index.en.md +++ b/content/developer-platform/service-providers/crossplane/_index.en.md @@ -4,419 +4,6 @@ linkTitle = "Using Crossplane" weight = 2 +++ -The guide describes the process of making a resource (usually defined by a CustomResourceDefinition) -of one Kubernetes cluster (the "service cluster" or "local cluster") available for use in the KDP -platform (the "platform cluster" or "KDP workspaces"). This involves setting up a KDP Service and -then installing the kcp api-syncagent and defining `PublishedResources` in the local cluster. - -All of the documentation and API types are worded and named from the perspective of a service owner, -the person(s) who own a service and want to make it available to consumers in the KDP platform. - -## High-level Overview - -A "service" in KDP comprises a set of resources within a single Kubernetes API group. It doesn't -need to be _all_ of the resources in that group, service owners are free and encouraged to only make -a subset of resources (i.e. a subset of CRDs) available for use in the platform. - -For each of the CRDs on the service cluster that should be published, the service owner creates a -`PublishedResource` object, which will contain both which CRD to publish, as well as numerous other -important settings that influence the behaviour around handling the CRD. - -When publishing a resource (CRD), exactly one version is published. All others are ignored from the -standpoint of the resource synchronization logic. - -All published resources together form the KDP Service. When a service is enabled in a workspace -(i.e. it is bound to it), users can manage objects for the projected resources described by the -published resources. These objects will be synced from the workspace onto the service cluster, -where they are meant to be processed in whatever way the service owners desire. Any possible -status information (in the `status` subresource) will in turn be synced back up into the workspace -where the user can inspect it. - -Additionally, a published resource can describe additional so-called "related resources". These -usually originate on the service cluster and could be for example connection detail secrets created -by Crossplane, but could also originate in the user workspace and just be additional, auxiliary -resources that need to be synced down to the service cluster. - -### `PublishedResource` - -In its simplest form (which is rarely practical) a `PublishedResource` looks like this: - -```yaml -apiVersion: services.kdp.k8c.io/v1alpha1 -kind: PublishedResource -metadata: - name: publish-certmanager-certs # name can be freely chosen -spec: - resource: - kind: Certificate - apiGroup: cert-manager.io - version: v1 -``` - -However, you will most likely apply more configuration and use features described below. - -### Filtering - -The api-syncagent can be instructed to only work on a subset of resources in the KDP platform. This -can be restricted by namespace and/or label selector. - -```yaml -apiVersion: services.kdp.k8c.io/v1alpha1 -kind: PublishedResource -metadata: - name: publish-certmanager-certs # name can be freely chosen -spec: - resource: ... - filter: - namespace: my-app - resource: - matchLabels: - foo: bar -``` - -### Schema - -**Warning:** The actual CRD schema is always copied verbatim. All projections -etc. have to take into account that the resource contents must be expressible without changes to the -schema. - -### Projection - -For stronger separation of concerns and to enable whitelabelling of services, the type meta for -can be projected, i.e. changed between the local service cluster and the KDP platform. You could -for example rename `Certificate` from cert-manager to `Zertifikat` inside the platform. - -Note that the API group of all published resources is always changed to the one defined in the -KDP `Service` object (meaning 1 api-syncagent serves all the published resources under the same API -group). That is why changing the API group cannot be configured in the projection. - -Besides renaming the Kind and Version, dependent fields like Plural, ShortNames and Categories -can be adjusted to fit the desired naming scheme in the platform. The Plural name is computed -automatically, but can be overridden. ShortNames and Categories are copied unless overwritten in the -`PublishedResource`. - -It is also possible to change the scope of resources, i.e. turning a namespaced resource into a -cluster-wide. This should be used carefully and might require extensive mutations. - -```yaml -apiVersion: services.kdp.k8c.io/v1alpha1 -kind: PublishedResource -metadata: - name: publish-certmanager-certs # name can be freely chosen -spec: - resource: ... - projection: - version: v1beta1 - kind: Zertifikat - plural: Zertifikate - shortNames: [zerts] - # categories: [management] - # scope: Namespaced # change only when you know what you're doing -``` - -Consumers (end users) in the platform would then ultimately see projected names only. Note that GVK -projection applies only to the synced object itself and has no effect on the contents of these -objects. To change the contents, use external solutions like Crossplane to transform objects. - - -### Naming - -Since the api-syncagent ingests resources from many different Kubernetes clusters (workspaces) and -combines them onto a single cluster, resources have to be renamed to prevent collisions and also -follow the conventions of whatever tooling ultimately processes the resources locally. - -The renaming is configured in `spec.naming`. In there, renaming patterns are configured, where -pre-defined placeholders can be used, for example `foo-$placeholder`. The following placeholders -are available: - -* `$remoteClusterName` – the KDP workspace's cluster name (e.g. "1084s8ceexsehjm2") -* `$remoteNamespace` – the original namespace used by the consumer inside the KDP workspace -* `$remoteNamespaceHash` – first 20 hex characters of the SHA-1 hash of `$remoteNamespace` -* `$remoteName` – the original name of the object inside the KDP workspace (rarely used to construct - local namespace names) -* `$remoteNameHash` – first 20 hex characters of the SHA-1 hash of `$remoteName` - -If nothing is configured, the default ensures that no collisions will happen: Each workspace in -the platform will create a namespace on the local cluster, with a combination of namespace and -name hashes used for the actual resource names. - -```yaml -apiVersion: services.kdp.k8c.io/v1alpha1 -kind: PublishedResource -metadata: - name: publish-certmanager-certs # name can be freely chosen -spec: - resource: ... - naming: - namespace: "$remoteClusterName" - name: "cert-$remoteNamespaceHash-$remoteNameHash" -``` - -### Related Resources - -The processing of resources on the service cluster often leads to additional resources being -created, like a `Secret` for each cert-manager `Certificate` or a connection detail secret created -by Crossplane. These need to be made available to the user in their workspaces. - -Likewise it's possible for auxiliary resources having to be created by the user, for example when -the user has to provide credentials. - -To handle these cases, a `PublishedResource` can define multiple "related resources". Each related -resource currently represents exactly one object to synchronize between user workspace and service -cluster (i.e. you cannot express "sync all Secrets"). While the main published resource sync is -always workspace->service cluster, related resources can originate on either side and so either can -work as the source of truth. - -At the moment, only `ConfigMaps` and `Secrets` are allowed related resource kinds. - -For each related resource, the api-syncagent needs to be told the name/namespace. This is done by -selecting a field in the main resource (for a `Certificate` this would mean `spec.secretName`). Both -name and namespace need to be part of the main object (or be fixed values, like a hardcoded -`kube-system` namespace). - -The path expressions for name and namespace are evaluated against the main object on either side -to determine their values. So if you had a `Certificate` in your workspace with -`spec.secretName = "my-cert"` and after syncing it down, the copy on the service cluster has a -rewritten/mutated `spec.secretName = "jk23h4wz47329rz2r72r92-cert"` (e.g. to prevent naming -collisions), the expression `spec.secretName` would yield `"my-cert"` for the name in the workspace -and `"jk...."` as the name on the service cluster. Once the object exists with that name on the -originating side, the api-syncagent will begin to sync it to the other side. - -```yaml -apiVersion: services.kdp.k8c.io/v1alpha1 -kind: PublishedResource -metadata: - name: publish-certmanager-certs -spec: - resource: - kind: Certificate - apiGroup: cert-manager.io - version: v1 - - naming: - # this is where our CA and Issuer live in this example - namespace: kube-system - # need to adjust it to prevent collions (normally clustername is the namespace) - name: "$remoteClusterName-$remoteNamespaceHash-$remoteNameHash" - - related: - - origin: service # service or platform - kind: Secret # for now, only "Secret" and "ConfigMap" are supported; - # there is no GVK projection for related resources - - # configure where in the parent object we can find - # the name/namespace of the related resource (the child) - reference: - name: - # This path is evaluated in both the local and remote objects, to figure out - # the local and remote names for the related object. This saves us from having - # to remember mutated fields before their mutation (similar to the last-known - # annotation). - path: spec.secretName - - # namespace part is optional; if not configured, - # api-syncagent assumes the same namespace as the owning resource - # - # namespace: - # path: spec.secretName - # regex: - # pattern: '...' - # replacement: '...' - # - # to inject static values, select a meaningless string value - # and leave the pattern empty - # - # namespace: - # path: metadata.uid - # regex: - # replacement: kube-system -``` - -## Examples - -### Provide Certificates - -This combination of `Service` and `PublishedResource` make cert-manager certificates available in -kcp. The `Service` needs to be created in a workspace, most likely in an organization workspace. -The `PublishedResource` is created wherever the api-syncagent and cert-manager are running. - -```yaml -apiVersion: core.kdp.k8c.io/v1alpha1 -kind: Service -metadata: - name: certificate-management -spec: - apiGroup: certificates.example.corp - catalogMetadata: - title: Certificate Management - description: Acquire certificates signed by Example Corp's internal CA. -``` - -```yaml -apiVersion: services.kdp.k8c.io/v1alpha1 -kind: PublishedResource -metadata: - name: publish-certmanager-certs -spec: - resource: - kind: Certificate - apiGroup: cert-manager.io - version: v1 - - naming: - # this is where our CA and Issuer live in this example - namespace: kube-system - # need to adjust it to prevent collions (normally clustername is the namespace) - name: "$remoteClusterName-$remoteNamespaceHash-$remoteNameHash" - - related: - - origin: service # service or platform - kind: Secret # for now, only "Secret" and "ConfigMap" are supported; - # there is no GVK projection for related resources - - # configure where in the parent object we can find - # the name/namespace of the related resource (the child) - reference: - name: - # This path is evaluated in both the local and remote objects, to figure out - # the local and remote names for the related object. This saves us from having - # to remember mutated fields before their mutation (similar to the last-known - # annotation). - path: spec.secretName - # namespace part is optional; if not configured, - # api-syncagent assumes the same namespace as the owning resource - # namespace: - # path: spec.secretName - # regex: - # pattern: '...' - # replacement: '...' -``` - -## Technical Details - -The following sections go into more details of the behind the scenes magic. - -### Synchronization - -Even though the whole configuration is written from the standpoint of the service owner, the actual -synchronization logic considers the platform side as the canonical source of truth. The api-syncagent -continuously tries to make the local objects look like the ones in the platform, while pushing -status updates back into the platform (if the given `PublishedResource` (i.e. CRD) has a `status` -subresource enabled). - -### Local <-> Remote Connection - -The api-syncagent tries to keep KDP-related metadata on the service cluster, away from the consumers. -This is both to prevent vandalism and to hide implementation details. - -To ensure stability against future changes, once KDP has determined how a local object should be -named, it will remember this decision in its metadata. This is so that on future reconciliations, -the (potentially costly, but probably not) renaming logic does not need to be applied again. This -allows the api-syncagent to change defaults and also allows the service owner to make changes to the -naming rules without breaking existing objects. - -Since we do not want to store metadata on the platform side, we instead rely on label selectors on -the local objects. Each local object has a label for the remote cluster name, namespace and object -name, and when trying to find the matching local object, the api-syncagent simply does a label-based -search. - -There is currently no sync-related metadata available on source objects, as this would either be -annotations (untyped strings...) or require schema changes to allow additional fields in basically -random CRDs. - -Note that fields like `generation` or `resourceVersion` are not relevant for any of the sync logic. - -### Reconcile Loop - -The sync loop can be divided into 5 parts: - -1. find the local object -2. handle deletion -3. ensure the destination object exists -4. ensure the destination object's content matches the source object -5. synchronize related resources the same way (repeat 1-4 for each related resource) - -#### Phase 1: Find the Local Object - -For this, as mentioned in the connection chapter above, the api-syncagent tries to follow label -selectors on the local cluster. This helps prevent cluttering with consumer workspaces with KDP -metadata. If no object is found to match the labels, that's fine and the loop will continue with -phase 2, in which a possible Conflict error (if labels broke) is handled gracefully. - -The remote object in the workspace becomes the `source object` and its local equivalent is called -the `destination object`. - -#### Phase 2: Handle Deletion - -A finalizer is used in the platform workspaces to prevent orphans in the service cluster side. This -is the only real evidence in the platform side that the api-syncagent is even doing things. When a -remote (source) object is deleted, the corresponding local object is deleted as well. Once the local -object is gone, the finalizer is removed from the source object. - -#### Phase 3: Ensure Object Existence - -We have a source object and now need to create the destination. This chart shows what's happening. - -```mermaid -graph TB - A(source object):::state --> B([cleanup if in deletion]):::step - B --> C([ensure finalizer on source object]):::step - C --> D{exists local object?} - - D -- yes --> I("continue with next phase…"):::state - D -- no --> E([apply projection]):::step - - subgraph "ensure dest object exists" - E --> G([ensure resulting namespace exists]):::step - G --> H([create local object]):::step - H --> H_err{Errors?} - H_err -- Conflict --> J([attempt to adopt existing object]):::step - end - - H_err -- success --> I - J --> I - - classDef step color:#77F - classDef state color:#F77 -``` - -After we followed through with these steps, both the source and destination objects exists and we -can continue with phase 4. - -Resource adoption happens when creation of the initial local object fails. This can happen when labels -get mangled. If such a conflict happens, the api-syncagent will "adopt" the existing local object by -adding / fixing the labels on it, so that for the next reconciliation it will be found and updated. - -#### Phase 4: Content Synchronization - -Content synchronization is rather simple, really. - -First the source "spec" is used to patch the local object. Note that this step is called "spec", but -should actually be called "all top-level elements besides `apiVersion`, `kind`, `status` and -`metadata`, but still including some labels and annotations"; so if you were to publish RBAC objects, -the syncer would include `roleRef` field, for example). - -To allow proper patch generation, the last known state of an object is stored in a dedicated Secret. -This functions just like the one kubectl uses and is required for the api-syncagent to properly detect -changes made by mutation webhooks, but uses a Secret instead of annotations because state needs to -be kept for more objects (like related resources) and not always on the destination objects. - -If the published resource (CRD) has a `status` subresource enabled (not just a `status` field in its -scheme, it must be a real subresource), then the api-syncagent will copy the status from the local -object back up to the remote (source) object. - -#### Phase 5: Sync Related Resources - -The same logic for synchronizing the main published resource applies to their related resources as -well. The only difference is that the source side can be either remote (workspace) or local -(service cluster). - -This currently also means that sync-related metadata, which is always kept on the object's copy, -will end up in the user workspace when a related object originates on the service cluster (the -most common usecase). In a future version it could be nice to keep the sync state only on the -service cluster side, away from the users. -# Publishing resources with Crossplane - This guide describes the process of leveraging Crossplane as a service provider to make Crossplane claims available as `PublishedResources` for use in KDP. This involves installing Crossplane - including all required Crossplane [providers][crossplane/docs/providers] and @@ -435,11 +22,11 @@ platform users. > While this guide is not intended to be a comprehensive Crossplane guide, it is useful to be aware > of the most common terms: > -> * **Providers** are pluggable building blocks to provision and manage resources via a third-party API (e.g. AWS provider) -> * **Managed resources** (MRs) are representations of actual, provider-specific resources (e.g. EC2 instance) -> * **Composite resource definitions** (XRDs) are Crossplane-specific definitions of API resources (similar to CRDs) -> * **Composite resources** (XRs) and **Claims** are Crossplane-specific custom resources created from XRD objects (similar to CRs) -> * **Compositions** are Crossplane-specific templates for transforming a XR object into one or more MR object(s) +> - **Providers** are pluggable building blocks to provision and manage resources via a third-party API (e.g. AWS provider) +> - **Managed resources** (MRs) are representations of actual, provider-specific resources (e.g. EC2 instance) +> - **Composite resource definitions** (XRDs) are Crossplane-specific definitions of API resources (similar to CRDs) +> - **Composite resources** (XRs) and **Claims** are Crossplane-specific custom resources created from XRD objects (similar to CRs) +> - **Compositions** are Crossplane-specific templates for transforming a XR object into one or more MR object(s) This guide will show you how to install Crossplane and all required providers on a service cluster and provide a stripped-down `Certificate` resource in KDP. While we ultimately use cert-manager to @@ -472,7 +59,7 @@ helm upgrade crossplane crossplane \ Once the installation is done, verify the status with the following command: ```bash -$ kubectl get pods --namespace=crossplane-system +kubectl get pods --namespace=crossplane-system NAME READY STATUS RESTARTS AGE crossplane-6494656b8b-bflcf 1/1 Running 0 45s crossplane-rbac-manager-8458557cdd-sls58 1/1 Running 0 45s @@ -516,7 +103,7 @@ EOF Once the provider is installed, verify the provider status with the following command: ```bash -$ kubectl get providers crossplane-provider-kubernetes +kubectl get providers crossplane-provider-kubernetes NAME INSTALLED HEALTHY PACKAGE AGE crossplane-provider-kubernetes True True xpkg.upbound.io/crossplane-contrib/provider-kubernetes:v0.11.1 104s ``` @@ -577,9 +164,9 @@ Crossplane specific `Certificate` object. Create and apply the following three manifests to your service cluster (you can safely ignore the misleading warnings from Crossplane regarding the validation of the composition). This will -* bootstrap a cert-manager `ClusterIssuer` named "default-ca", -* create a Crossplane `CompositeResourceDefinition` that defines our `Certificate` resource (which exposes only the requested common name), -* create a Crossplane `Composition` that uses cert-manager and the created "default-ca" to issue the requested certificate +- bootstrap a cert-manager `ClusterIssuer` named "default-ca", +- create a Crossplane `CompositeResourceDefinition` that defines our `Certificate` resource (which exposes only the requested common name), +- create a Crossplane `Composition` that uses cert-manager and the created "default-ca" to issue the requested certificate ```bash kubectl apply --filename=cluster-issuer.yaml @@ -625,6 +212,7 @@ spec: ca: secretName: default-ca ``` +
@@ -670,6 +258,7 @@ spec: type: string minLength: 1 ``` +
@@ -778,13 +367,14 @@ spec: fromConnectionSecretKey: tls.key writeConnectionSecretsToNamespace: crossplane-system ``` +
Afterwards verify the status of the composite resource definition and the composition with the following command: ```bash -$ kubectl get compositeresourcedefinitions,compositions +kubectl get compositeresourcedefinitions,compositions NAME ESTABLISHED OFFERED AGE xcertificates.pki.xaas.k8c.io True True 10s @@ -859,7 +449,7 @@ graph RL If everything worked out, you should get all relevant objects with the following command: ```bash -$ kubectl get claim,composite,managed,certificate +kubectl get claim,composite,managed,certificate NAME SYNCED READY CONNECTION-SECRET AGE certificate.pki.xaas.k8c.io/www-example-com True True www-example-com 21m diff --git a/content/developer-platform/service-providers/publish-resources/_index.en.md b/content/developer-platform/service-providers/publish-resources/_index.en.md index df81e2412..0b94f54d0 100644 --- a/content/developer-platform/service-providers/publish-resources/_index.en.md +++ b/content/developer-platform/service-providers/publish-resources/_index.en.md @@ -14,7 +14,7 @@ the person(s) who own a service and want to make it available to consumers in th ## High-level Overview A "service" in KDP comprises a set of resources within a single Kubernetes API group. It doesn't -need to be _all_ of the resources in that group, service owners are free and encouraged to only make +need to be *all* of the resources in that group, service owners are free and encouraged to only make a subset of resources (i.e. a subset of CRDs) available for use in the platform. For each of the CRDs on the service cluster that should be published, the service owner creates a @@ -117,6 +117,7 @@ spec: Consumers (end users) in the platform would then ultimately see projected names only. Note that GVK projection applies only to the synced object itself and has no effect on the contents of these objects. To change the contents, use external solutions like Crossplane to transform objects. + ### (Re-)Naming @@ -129,12 +130,12 @@ The renaming is configured in `spec.naming`. In there, renaming patterns are con pre-defined placeholders can be used, for example `foo-$placeholder`. The following placeholders are available: -* `$remoteClusterName` – the KDP workspace's cluster name (e.g. "1084s8ceexsehjm2") -* `$remoteNamespace` – the original namespace used by the consumer inside the KDP workspace -* `$remoteNamespaceHash` – first 20 hex characters of the SHA-1 hash of `$remoteNamespace` -* `$remoteName` – the original name of the object inside the KDP workspace (rarely used to construct +- `$remoteClusterName` – the KDP workspace's cluster name (e.g. "1084s8ceexsehjm2") +- `$remoteNamespace` – the original namespace used by the consumer inside the KDP workspace +- `$remoteNamespaceHash` – first 20 hex characters of the SHA-1 hash of `$remoteNamespace` +- `$remoteName` – the original name of the object inside the KDP workspace (rarely used to construct local namespace names) -* `$remoteNameHash` – first 20 hex characters of the SHA-1 hash of `$remoteName` +- `$remoteNameHash` – first 20 hex characters of the SHA-1 hash of `$remoteName` If nothing is configured, the default ensures that no collisions will happen: Each workspace in the platform will create a namespace on the local cluster, with a combination of namespace and @@ -160,10 +161,10 @@ These can be configured in a number of way in the `PublishedResource`. Configuration happens `spec.mutation` and there are two fields: -* `spec` contains the mutation rules when syncing the desired state (often in `spec`, but can also +- `spec` contains the mutation rules when syncing the desired state (often in `spec`, but can also be other top-level fields) from the remote side to the local side. Use this to apply defaulting, normalising, and enforcing rules. -* `status` contains the mutation rules when syncing the `status` subresource back from the local +- `status` contains the mutation rules when syncing the `status` subresource back from the local cluster up into the platform. Use this to normalize names and values (e.g. if you rewrote `.spec.secretName` from `"foo"` to `"dfkbssbfh"`, make sure the status does not "leak" this name by accident). @@ -401,10 +402,10 @@ Note that fields like `generation` or `resourceVersion` are not relevant for any The sync loop can be divided into 5 parts: 1. find the local object -2. handle deletion -3. ensure the destination object exists -4. ensure the destination object's content matches the source object -5. synchronize related resources the same way (repeat 1-4 for each related resource) +1. handle deletion +1. ensure the destination object exists +1. ensure the destination object's content matches the source object +1. synchronize related resources the same way (repeat 1-4 for each related resource) #### Phase 1: Find the Local Object @@ -481,6 +482,6 @@ well. The only difference is that the source side can be either remote (workspac (service cluster). Since the Sync Agent tries its best to keep sync-related data out of kcp workspaces, the last known -state for related resources is _not_ kept together with the destination object in the kcp workspaces. +state for related resources is *not* kept together with the destination object in the kcp workspaces. Instead all known states (from the main object and all related resources) is kept in a single Secret on the service cluster side. diff --git a/content/developer-platform/setup/_index.en.md b/content/developer-platform/setup/_index.en.md new file mode 100644 index 000000000..716d3a8f2 --- /dev/null +++ b/content/developer-platform/setup/_index.en.md @@ -0,0 +1,4 @@ ++++ +title = "Setup" +weight = 3 ++++ diff --git a/content/developer-platform/setup/ai-agent/_index.en.md b/content/developer-platform/setup/ai-agent/_index.en.md new file mode 100644 index 000000000..4447f91d9 --- /dev/null +++ b/content/developer-platform/setup/ai-agent/_index.en.md @@ -0,0 +1,139 @@ ++++ +title = "AI Agent" +weight = 3 ++++ + +## Overview + +The Kubermatic Developer Platform AI Agent is a specialized assistant that helps users generate Kubernetes resource YAML files through natural language within KDP workspaces. It converts requests in natural language into properly formatted Kubernetes manifests, eliminating the need to manually write lengthy YAML files from scratch. + +## Prerequisites + +Before installing the AI Agent, ensure you have: + +- A running KDP installation on your Kubernetes cluster +- OpenAI API key for the language model capabilities +- OIDC provider configured (same one used by KDP) + +## Installation + +The AI Agent is deployed using Helm. Follow these steps to install it: + +### Prepare the Configuration + +Create a `ai-agent.values.yaml` file with your specific configuration: + +```yaml +aiAgent: + imagePullSecret: | + { + "auths": { + "quay.io": { + "auth": "", + "email": "" + } + } + } + + + config: + oidc: + clientID: kdp-kubelogin + clientSecret: + issuerURL: https://login. + kubernetes_api_url: https://api. + openai_api_key: "" # OpenAI API key for the language model + + ingress: + create: true + host: # Use same domain as the frontend to avoid CORS errors + prefix: /ai-agent(/|$)(.*) + certIssuer: + kind: ClusterIssuer + name: letsencrypt-prod + +``` + +Before deploying the KDP dashboard, you need to replace the following placeholder variables in the `ai-agent.values.yaml` file with your own values: + +- `` +- `` +- `` + +The `` placeholder **must** be replaced with the value set in Dex and configured in the `dex.values.yaml` file. + +### Install with Helm + +Now that all placeholders are replaced, deploy the KDP AI Agent Helm chart. +To log into the Helm registry, use your email address as the username and the license key you received as the password. + +```bash +helm registry login quay.io +helm upgrade --install kdp-ai-agent \ + oci://quay.io/kubermatic/helm-charts/developer-platform-ai-agent \ + --version=0.9.0 \ + --create-namespace \ + --namespace=kdp-system \ + --values=ai-agent.values.yaml +``` + +### Configure the Dashboard + +To make the AI Agent accessible from the KDP Dashboard, you need to update the `values.yaml` file for your **dashboard deployment**. Assuming you followed the quickstart, this file would be `kdp-dashboard.values.yaml`. + +You will need to edit it to activate the AI Agent feature and set the backend url. + + +```yaml +dashboard: + config: + features: + aiAgent: + enabled: true + generatorURL: /ai-agent/ # same domain as the host +``` + + +You'll need to replace ``. + +Then after this update the release of your kdp dashboard. If you followed the Quickstart it will be called `kdp-dashboard` in the `kdp-system` namespace, so the command would look like this to first login and then update: + +```bash +$ helm registry login quay.io +$ helm upgrade --install kdp-dashboard \ + oci://quay.io/kubermatic/helm-charts/developer-platform-dashboard \ + --version=0.9.0 \ + --create-namespace \ + --namespace=kdp-system \ + --values=kdp-dashboard.values.yaml +``` + +After this you will need to delete the pod for the dashboard manually for it to be redeployed and pick up the new values. You can find them by the label `app.kubernetes.io/name: kdp-dashboard` and delete with. + +```bash +kubectl delete pods -l app.kubernetes.io/name=kdp-dashboard -n kdp-system +``` + +### Verify the Installation + +Once the pods start, you can use the AI Agent in the frontend. + +A purple button should be visible in the form to create a new service object within a workspace. + +![Button for AI Agent](ai-agent-button.png) + +Then, once clicked, a text field will be visible were you can describe how you want your resource to be. + +Here is an example after writing a prompt and clicking on `Generate`: + +![Example prompt](ai-agent-prompt-example.png) + +After a few seconds you should get the result: + +![AI Agent response](ai-agent-example-response.png) + +You can then edit and modify if you like, from the form or directly in the YAML. + +You also do not have to worry about getting a wrong schema since it is getting validated in the backend. You can be sure there are no hallucinated fields nor missing required fields. + +**Note:** Please be sure to check the values and the YAML in general before submitting. AI can make mistakes. diff --git a/content/developer-platform/setup/ai-agent/ai-agent-button.png b/content/developer-platform/setup/ai-agent/ai-agent-button.png new file mode 100644 index 000000000..8e5fe331a Binary files /dev/null and b/content/developer-platform/setup/ai-agent/ai-agent-button.png differ diff --git a/content/developer-platform/setup/ai-agent/ai-agent-example-response.png b/content/developer-platform/setup/ai-agent/ai-agent-example-response.png new file mode 100644 index 000000000..d5bf9d542 Binary files /dev/null and b/content/developer-platform/setup/ai-agent/ai-agent-example-response.png differ diff --git a/content/developer-platform/setup/ai-agent/ai-agent-prompt-example.png b/content/developer-platform/setup/ai-agent/ai-agent-prompt-example.png new file mode 100644 index 000000000..a9e1993cf Binary files /dev/null and b/content/developer-platform/setup/ai-agent/ai-agent-prompt-example.png differ diff --git a/content/developer-platform/setup/quickstart/_index.en.md b/content/developer-platform/setup/quickstart/_index.en.md new file mode 100644 index 000000000..cdbfbf8cc --- /dev/null +++ b/content/developer-platform/setup/quickstart/_index.en.md @@ -0,0 +1,262 @@ ++++ +title = "Quickstart" +weight = 1 ++++ + +This quickstart provides the steps to install the Kubermatic Developer Platform (KDP) on an existing Kubernetes cluster. +You'll use Helm to deploy KDP and its core components, including Dex for user authentication and kcp as central control plane. +You will also set up automated TLS certificate management with cert-manager and Let's Encrypt. +By the end, you will have a fully functional KDP installation, accessible through the KDP dashboard as well as directly with kubectl. + +## Prerequisites + +{{% notice note %}} +At the moment, you need to be invited to get access to Kubermatic's Docker repository before you can install the Kubermatic Developer Platform. +Please [contact sales](mailto:sales@kubermatic.com) to receive your credentials. +{{% /notice %}} + +To follow this guide, you need: + +- an existing Kubernetes cluster with at least 3 nodes +- a running CSI driver with a default storage class +- a running [cert-manager][cert-manager/docs/installation] installation +- an running ingress controller (for this guide, the [NGINX ingress controller][ingress-nginx/docs/installation] is required) +- [kubectl][k8s/docs/tools/installation] and [Helm][helm/docs/installation] (version 3) installed locally + +## Installation + +The installation is divided into five main steps, each deploying a core component of KDP. +You will perform the following tasks: + +- **Set up certificates**: First, you will configure a cert-manager issuer to automatically obtain and renew TLS certificates from Let's Encrypt. + +- **Deploy an identity provider**: Next, you will deploy Dex to handle user authentication, creating a central login service for both the KDP dashboard and command-line access. + +- **Deploy kcp**: You will deploy kcp, the core engine that enables multi-tenancy by providing isolated, secure workspaces for your users. + +- **Deploy KDP**: Afterwards, you will install the main KDP controllers that connect to kcp and manage the platform's resources. + +- **Launch the KDP dashboard**: Finally, you will deploy the KDP dashboard, the primary graphical interface for developers to interact with the platform and manage their service objects. + +Throughout this guide, you will need to replace several placeholder variables in the Helm values files. +Below is a description of each value you need to provide. + +- ``: Your email address, used by Let's Encrypt to send notifications about your TLS certificate status. +- ``: A base64-encoded password or token for the quay.io container registry. This is required for you to get access to the KDP Helm charts and container images. +- ``: The primary public domain name you will use to access your KDP installation (e.g., kdp.my-company.com). You must own this domain and be able to configure its DNS records. +- ``: A generated bcrypt hash of the password you choose for the initial admin user. +- ``: A randomly generated, secure string that acts as a password for the KDP dashboard to authenticate with the Dex identity provider. +- ``: A second, unique random string used by the KDP dashboard itself to encrypt user session cookies, adding another layer of security. + +### Create ClusterIssuer + +First, you need to create a *ClusterIssuer* named `letsencrypt-prod` for cert-manager. +This automates the process of obtaining and renewing TLS certificates from Let's Encrypt, ensuring all web-facing components like the Dex login page and the KDP dashboard are served securely over HTTPS. + +Save the following content to a file named `cluster-issuer.yaml`, and change the value of the `email` field to your email address: + +```yaml +{{< readfile "developer-platform/setup/quickstart/data/letsencrypt.cluster-issuer.yaml" >}} +``` + +Create the *ClusterIssuer* by applying the manifest: + +```bash +kubectl apply -f ./cluster-issuer.yaml +``` + +### Deploy Dex + +Now, you'll deploy Dex as the platform's central identity provider. +It handles all user logins and authentication. +The provided configuration creates an initial admin user and prepares Dex for the integration with the KDP dashboard and [kubelogin][kubelogin/src/readme] for a seamless user authentication. + +Save the following content to a file named `dex.values.yaml`: + +```yaml +{{< readfile "developer-platform/setup/quickstart/data/dex.values.yaml" >}} +``` + +Before deploying Dex, you need to replace the following placeholder variables in the `dex.values.yaml` file with your own values: + +- `` +- `` +- `` + +For the initial admin user, you must provide your own password as bcrypt hash in ``. +To create this hash, you can use the `htpasswd` utility, which is part of the Apache web server tools and available on most Linux distributions (you may need to install a package like "apache2-utils" or "httpd-tools"). + +Choose a strong password and run the following command in your terminal, replacing YOUR_PASSWORD with the password you've selected: + +```bash +echo 'YOUR_PASSWORD' | htpasswd -inBC 10 admin | cut -d: -f2 +``` + +Copy the entire output string (it will start with `$2a$` or `$2y$`) and paste it as the value for `` in your `dex.values.yaml` file. +Remember to save the plain-text password you chose in a secure location, as you will need it to log in to the KDP dashboard. + +The `` placeholder must be replaced with a long, random string that the KDP dashboard and kubelogin use to securely communicate with Dex. +You can generate a secure, random string with the following command: + +```bash +cat /dev/urandom | base64 | tr -dc 'A-Za-z0-9' | head -c32 +``` + +This will output a random string that you can copy and paste as the value for ``. +Save the value for later use when you deploy the KDP dashboard. + +Once you've replaced all placeholders, deploy the Dex Helm chart: + +```bash +helm upgrade --install dex dex \ + --repo=https://charts.dexidp.io \ + --version=0.23.0 \ + --create-namespace \ + --namespace=kdp-system \ + --values=dex.values.yaml +``` + +### Deploy kcp + +Next, you'll install kcp. +It acts as the central control plane for KDP that provides and manages the isolated workspaces for each user or team, ensuring resources are kept separate and secure. +It's configured to use Dex for authenticating user requests. + +Save the following content to a file named `kcp.values.yaml`: + +```yaml +{{< readfile "developer-platform/setup/quickstart/data/kcp.values.yaml" >}} +``` + +Before deploying kcp, you need to replace the following placeholder variables in the `kcp.values.yaml` file with your own values: + +- `` + +After you've replaced all the placeholders, deploy the kcp Helm chart: + +```bash +helm upgrade --install kcp kcp \ + --repo=https://kcp-dev.github.io/helm-charts \ + --version=0.11.1 \ + --create-namespace \ + --namespace=kdp-system \ + --values=kcp.values.yaml +``` + +### Deploy KDP + +Finally, you'll deploy the main KDP application. +It connects to the kcp control plane and includes a one-time bootstrap job that grants the admin user full administrative rights, allowing them to manage the entire platform. + +Save the following content to a file named `kdp.values.yaml`: + +```yaml +{{< readfile "developer-platform/setup/quickstart/data/kdp.values.yaml" >}} +``` + +Before deploying KDP, you need to replace the following placeholder variables in the `kdp.values.yaml` file with your own values: + +- `` +- `` + +With all placeholders replaced, deploy the KDP Helm chart. +Use your email address as the username and the license key you received as the password to log into the Helm registry. + +```bash +helm registry login quay.io +helm upgrade --install kdp \ + oci://quay.io/kubermatic/helm-charts/developer-platform \ + --version=0.9.0 \ + --create-namespace \ + --namespace=kdp-system \ + --values=kdp.values.yaml +``` + +### Deploy KDP dashboard + +Last but not least, you'll deploy the KDP's web-based dashboard, which serves as the primary user interface. +It's configured to use Dex for user login and connects to kcp, providing developers with a graphical interface to create and manage their service objects. + +Save the following content to a file named `kdp-dashboard.values.yaml`: + +```yaml +{{< readfile "developer-platform/setup/quickstart/data/kdp-dashboard.values.yaml" >}} +``` + +Before deploying the KDP dashboard, you need to replace the following placeholder variables in the `kdp-dashboard.values.yaml` file with your own values: + +- `` +- `` +- `` +- `` + +The `` placeholder **must** be replaced with the value generated in step "Deploy Dex" and configured in the `dex.values.yaml` file. + +The `` placeholder must - similar to the OIDC client secret - be replaced with a long, random string that the KDP dashboard uses to protect user sessions. +You can use the same command, to generate a secure, random string: + +```bash +cat /dev/urandom | base64 | tr -dc 'A-Za-z0-9' | head -c32 +``` + +Copy and paste the output as the value for ``. + +Now that all placeholders are replaced, deploy the KDP dashboard Helm chart. +To log into the Helm registry, again use your email address as the username and the license key you received as the password. + +```bash +helm registry login quay.io +helm upgrade --install kdp-dashboard \ + oci://quay.io/kubermatic/helm-charts/developer-platform-dashboard \ + --version=0.9.0 \ + --create-namespace \ + --namespace=kdp-system \ + --values=kdp-dashboard.values.yaml +``` + +### Configure DNS records + +In order to finalize the installation and make your KDP instance accessible, you must create four records in your DNS provider. +These records point the hostnames you configured earlier to the correct load balancers of your Kubernetes cluster. + +First, create three DNS records that direct traffic for the Dex login page (`login.`), the public API endpoint (`api.`), and the KDP dashboard (`dashboard.`) to your cluster's NGINX ingress controller. + +Assuming you installed the NGINX ingress controller into the `ingress-nginx` namespace, use the following command to the retrieve the external IP address or DNS name of the load balancer (in column "EXTERNAL-IP"): + +```bash +kubectl --namespace=ingress-nginx get service ingress-nginx-controller +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +ingress-nginx-controller LoadBalancer 10.47.248.232 4cdd93dfab834ed9a78858c7f2633380.eu-west-1.elb.amazonaws.com 80:30807/TCP,443:30184/TCP 449d +``` + +Second, create a DNS record specifically for kcp (`internal.`) that points to the external IP address or DNS name of the dedicated load balancer for the kcp *Service*. +Use the following command to the retrieve the external IP address or DNS name of kcp's load balancer: + +```bash +kubectl --namespace=kdp-system get service kcp-front-proxy +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kcp-front-proxy LoadBalancer 10.240.20.65 99f1093e45d6482d95a0c22c4a2bd056.eu-west-1.elb.amazonaws.com 8443:30295/TCP 381d +``` + +### Access the dashboard + +Congratulations, your KDP installation is now complete! Once your DNS records have propagated, you can access the dashboard by navigating your web browser to the URL you configured (`https://dashboard.`). + +You will be redirected to the Dex login page and you can use the administrative credentials that were created during the setup: + +- **Username**: `admin` +- **Password**: The password you chose in step [Deploy Dex](#deploy-dex) + +After logging in, you will be taken to the KDP dashboard, where you can begin exploring your platform. Welcome to KDP! + +[cert-manager/docs/installation]: https://cert-manager.io/docs/installation/helm/ +[helm/docs/installation]: https://helm.sh/docs/intro/install/ +[ingress-nginx/docs/installation]: https://kubernetes.github.io/ingress-nginx/deploy/ +[k8s/docs/tools/installation]: https://kubernetes.io/docs/tasks/tools/#kubectl +[kcp/chart/readme]: https://github.com/kcp-dev/helm-charts/tree/main/charts/kcp +[kubelogin/src/readme]: https://github.com/int128/kubelogin + + +### Extensions + +If you want to install the KDP AI Agent, which helps you generate yaml files for resources from descriptions in natural language, follow [these instructions](../ai-agent/_index.en.md). \ No newline at end of file diff --git a/content/developer-platform/setup/quickstart/data/dex.values.yaml b/content/developer-platform/setup/quickstart/data/dex.values.yaml new file mode 100644 index 000000000..c2788eed4 --- /dev/null +++ b/content/developer-platform/setup/quickstart/data/dex.values.yaml @@ -0,0 +1,36 @@ +# dex.values.yaml +config: + issuer: https://login. + storage: + type: kubernetes + config: + inCluster: true + staticClients: + - id: kdp-kubelogin + name: kdp-kubelogin + secret: + RedirectURIs: + - http://localhost:8000 + - http://localhost:18000 + - https://dashboard./api/auth/callback/oidc + enablePasswordDB: true + staticPasswords: + - email: admin + hash: "" + username: admin + userID: 08a8684b-db88-4b73-90a9-3cd1661f5466 + +ingress: + enabled: true + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + className: nginx + hosts: + - host: login. + paths: + - path: / + pathType: ImplementationSpecific + tls: + - secretName: dex-tls + hosts: + - login. \ No newline at end of file diff --git a/content/developer-platform/setup/quickstart/data/kcp.values.yaml b/content/developer-platform/setup/quickstart/data/kcp.values.yaml new file mode 100644 index 000000000..ea4b480a7 --- /dev/null +++ b/content/developer-platform/setup/quickstart/data/kcp.values.yaml @@ -0,0 +1,30 @@ +# kcp.values.yaml +externalHostname: "internal." +externalPort: "8443" + +kcpFrontProxy: + service: + type: LoadBalancer + additionalPathMappings: + - path: /services/organization/ + backend: https://kdp-virtual-workspaces:6444 + backend_server_ca: /etc/kcp/tls/ca/tls.crt + proxy_client_cert: /etc/kcp-front-proxy/requestheader-client/tls.crt + proxy_client_key: /etc/kcp-front-proxy/requestheader-client/tls.key + - path: /services/service/ + backend: https://kdp-virtual-workspaces:6444 + backend_server_ca: /etc/kcp/tls/ca/tls.crt + proxy_client_cert: /etc/kcp-front-proxy/requestheader-client/tls.crt + proxy_client_key: /etc/kcp-front-proxy/requestheader-client/tls.key + extraFlags: + - '--cors-allowed-origins=localhost,dashboard.$' + - '--authentication-drop-groups=system:kcp:logical-cluster-admin' + +oidc: + enabled: true + issuerUrl: https://login. + clientId: kdp-kubelogin + groupClaim: groups + usernameClaim: email + usernamePrefix: 'oidc:' + groupsPrefix: 'oidc:' \ No newline at end of file diff --git a/content/developer-platform/setup/quickstart/data/kdp-dashboard.values.yaml b/content/developer-platform/setup/quickstart/data/kdp-dashboard.values.yaml new file mode 100644 index 000000000..83112c2ce --- /dev/null +++ b/content/developer-platform/setup/quickstart/data/kdp-dashboard.values.yaml @@ -0,0 +1,45 @@ +# kdp-dashboard.values.yaml +dashboard: + imagePullSecret: |- + { + "auths": { + "quay.io": { + "auth": "", + "email": "" + } + } + } + + config: + app: + baseURL: https://dashboard. + authentication: + encryptionKey: + oidc: + clientID: kdp-kubelogin + clientSecret: + issuerURL: https://login. + backend: + frontProxyURL: https://api. + features: + aiAgent: + enabled: false + kubeconfigDownload: + enabled: true + serverCA: /app/_config/user-kubeconfig/ca.crt + serverURL: https://internal.:8443 + + ingress: + create: true + host: dashboard. + certIssuer: + kind: ClusterIssuer + name: letsencrypt-prod + + extraVolumeMounts: + - name: user-kubeconfig-ca + mountPath: /app/_config/user-kubeconfig + secretName: kcp-ca + items: + - key: tls.crt + path: ca.crt \ No newline at end of file diff --git a/content/developer-platform/setup/quickstart/data/kdp.values.yaml b/content/developer-platform/setup/quickstart/data/kdp.values.yaml new file mode 100644 index 000000000..04e537801 --- /dev/null +++ b/content/developer-platform/setup/quickstart/data/kdp.values.yaml @@ -0,0 +1,36 @@ +# kdp.values.yaml +kdp: + imagePullSecret: |- + { + "auths": { + "quay.io": { + "auth": "", + "email": "" + } + } + } + + frontProxy: + internalDomain: internal. + publicDomain: api. + url: https://internal.:8443 + + virtualWorkspaces: + shardExternalURL: https://internal.:8443 + + hooks: + bootstrap: + enabled: true + extraManifests: + rbac.yaml: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: admin:cluster-admin + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + subjects: + - kind: User + name: oidc:admin \ No newline at end of file diff --git a/content/developer-platform/setup/quickstart/data/letsencrypt.cluster-issuer.yaml b/content/developer-platform/setup/quickstart/data/letsencrypt.cluster-issuer.yaml new file mode 100644 index 000000000..e5fb3a241 --- /dev/null +++ b/content/developer-platform/setup/quickstart/data/letsencrypt.cluster-issuer.yaml @@ -0,0 +1,15 @@ +# cluster-issuer.yaml +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-prod +spec: + acme: + email: + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: letsencrypt-prod-credentials + solvers: + - http01: + ingress: + class: nginx \ No newline at end of file diff --git a/content/developer-platform/tutorials/agent-without-kdp/_index.en.md b/content/developer-platform/tutorials/agent-without-kdp/_index.en.md index bbddd8303..e3704e595 100644 --- a/content/developer-platform/tutorials/agent-without-kdp/_index.en.md +++ b/content/developer-platform/tutorials/agent-without-kdp/_index.en.md @@ -31,15 +31,15 @@ Create a file with a similar content (you most likely want to change the name, a group under which your published resources will be made available) and create it in a kcp workspace of your choice: -```sh +```bash # use the kcp kubeconfig -$ export KUBECONFIG=/path/to/kcp.kubeconfig +export KUBECONFIG=/path/to/kcp.kubeconfig # nativagate to the workspace where the APIExport should exist -$ kubectl ws :workspace:you:want:to:create:it +kubectl ws :workspace:you:want:to:create:it # create it -$ kubectl create --filename apiexport.yaml +kubectl create --filename apiexport.yaml apiexport/test.example.com created ``` @@ -57,8 +57,8 @@ Make sure that the kubeconfig points to the right workspace (not necessarily the This can be done via a command like this: -```sh -$ kubectl create secret generic kcp-kubeconfig \ +```bash +kubectl create secret generic kcp-kubeconfig \ --namespace kcp-system \ --from-file "kubeconfig=admin.kubeconfig" ``` @@ -88,7 +88,7 @@ kcpKubeconfig: kcp-kubeconfig Once this `values.yaml` file is prepared, install a recent development build of the Sync Agent: -```sh +```bash helm repo add kcp https://kcp-dev.github.io/helm-charts helm repo update @@ -156,11 +156,11 @@ the RBAC rules that grant the Agent access. The Sync Agent needs to -* manage its `APIExport`, -* manage `APIResourceSchemas` and -* access the virtual workspace for its `APIExport`. +- manage its `APIExport`, +- manage `APIResourceSchemas` and +- access the virtual workspace for its `APIExport`. -This can be achieved by applying RBAC like this _in the workspace where the `APIExport` resides_: +This can be achieved by applying RBAC like this *in the workspace where the `APIExport` resides*: ```yaml apiVersion: rbac.authorization.k8s.io/v1 diff --git a/content/developer-platform/tutorials/kcp-command-line/_index.en.md b/content/developer-platform/tutorials/kcp-command-line/_index.en.md index 90e449af0..1d46feb83 100644 --- a/content/developer-platform/tutorials/kcp-command-line/_index.en.md +++ b/content/developer-platform/tutorials/kcp-command-line/_index.en.md @@ -57,7 +57,7 @@ kubectl ws my-subworkspace ## API Management -A KDP Service is reconciled into an `APIExport`. To use this API, you have to _bind to_ it. Binding +A KDP Service is reconciled into an `APIExport`. To use this API, you have to *bind to* it. Binding involves creating a matching (= same name) `APIBinding` in the workspace where the API should be made available. diff --git a/content/developer-platform/tutorials/your-first-service/_index.en.md b/content/developer-platform/tutorials/your-first-service/_index.en.md index d65236ca5..58cbe3f51 100644 --- a/content/developer-platform/tutorials/your-first-service/_index.en.md +++ b/content/developer-platform/tutorials/your-first-service/_index.en.md @@ -64,15 +64,15 @@ spec: This can be applied with `kubectl` in your organization workspace. -```sh -$ kubectl ws :root:my-org # switch to your workspace -$ kubectl apply -f service.yaml +```bash +kubectl ws :root:my-org # switch to your workspace +kubectl apply -f service.yaml ``` Use the following command to explore the full schema for `Service` objects: -```sh -$ kubectl explain --api-version=core.kdp.k8c.io/v1alpha1 service +```bash +kubectl explain --api-version=core.kdp.k8c.io/v1alpha1 service ``` This concludes all required steps to define the new service. Click on the confirm button to create @@ -96,7 +96,7 @@ In `spec.kubeconfig` you will find the name of the kubeconfig Secret that you ca api-syncagent. Now switch your focus to your own cluster, where your business logic happens (for example where -Crossplane runs). For your Service you need to provide exactly _one_ api-syncagent in _one_ Kubernetes +Crossplane runs). For your Service you need to provide exactly *one* api-syncagent in *one* Kubernetes cluster. This agent can have multiple replicas as it uses leader election, but you must not have two or more independent agents processing the same Service. There is currently no mechanism to spread load between multiple Service clusters and two or more agents will most likely conflict with each @@ -107,7 +107,6 @@ for more information. You basically need to provide the kubeconfig generated by "kcp kubeconfig", the service's name (not its API Group) and a unique name for the agent itself. Put all the information in a `values.yaml` and run `helm install` to deploy your agent. - {{% notice warning %}} Currently only api-syncagent version 0.2.x is supported. Make sure you pass `--version 0.2.0` when installing the api-syncagent chart. {{% /notice %}} diff --git a/content/kubelb/main/_index.en.md b/content/kubelb/main/_index.en.md index f4f4b0276..9f15e962f 100644 --- a/content/kubelb/main/_index.en.md +++ b/content/kubelb/main/_index.en.md @@ -35,7 +35,7 @@ KubeLB solves this problem by providing a centralized management solution that c - [Introducing KubeLB](https://www.kubermatic.com/products/kubelb/) - [KubeLB Whitepaper](https://www.kubermatic.com/static/KubeLB-Cloud-Native-Multi-Tenant-Load-Balancer.pdf) -- [KubeLB CE](https://github.com/kubermatic/kubelb) +- [KubeLB - GitHub Repository](https://github.com/kubermatic/kubelb) Visit [kubermatic.com](https://www.kubermatic.com/) for further information. diff --git a/content/kubelb/main/ce-ee-matrix/_index.en.md b/content/kubelb/main/ce-ee-matrix/_index.en.md index f6917b9ac..9d14bcee8 100644 --- a/content/kubelb/main/ce-ee-matrix/_index.en.md +++ b/content/kubelb/main/ce-ee-matrix/_index.en.md @@ -20,6 +20,7 @@ KubeLB is available in two versions: Community and Enterprise. | Ingress | ✔️ | ✔️ | | Gateway API v1 | ✔️ | ✔️ | | Bring your own secrets(certificates) | ✔️ | ✔️ | +| Tunneling support through CLI | ✔️ | ❌ | | Gateway API beta/alpha(TLS/TCP/UDP routes) | ✔️ | ❌ | | Multiple Gateways | ✔️ | ❌ | | DNS automation | ✔️ | ❌ | @@ -27,7 +28,12 @@ KubeLB is available in two versions: Community and Enterprise. | Limits for LoadBalancers, Gateways | ✔️ | ❌ | {{% notice note %}} -KubeLB support [ingress-nginx](https://kubernetes.github.io/ingress-nginx/) for **Ingress** resources. [Envoy Gateway](https://gateway.envoyproxy.io/) is supported for **Gateway API** resources. While other products might work for Ingress and Gateway API resources, we are not testing them and can't guarantee the compatibility. +KubeLB supports the following products for Ingress and Gateway API resources: + +- [Ingress-nginx](https://kubernetes.github.io/ingress-nginx/) for **Ingress** resources. +- [Envoy Gateway](https://gateway.envoyproxy.io/) is supported for **Gateway API** resources. + +While other products might work for Ingress and Gateway API resources, we are not testing them and can't guarantee the compatibility. {{% /notice %}} ## Support Policy diff --git a/content/kubelb/main/cli/_index.en.md b/content/kubelb/main/cli/_index.en.md new file mode 100644 index 000000000..1058a4084 --- /dev/null +++ b/content/kubelb/main/cli/_index.en.md @@ -0,0 +1,62 @@ ++++ +title = "KubeLB CLI" +date = 2025-08-27T10:07:15+02:00 +weight = 30 +description = "Learn how you can use KubeLB CLI to provision Load Balancers and tunnels to expose local workloads" ++++ + +![KubeLB CLI](/img/kubelb/common/logo.png?classes=logo-height) + +## KubeLB CLI + +KubeLB CLI is a command line tool that has been introduced to complement KubeLB and make it easier to manage load balancing configurations for multiple tenants in Kube and non-Kube based environments. + +The source code is open source and available at [kubermatic/kubelb-cli](https://github.com/kubermatic/kubelb-cli). + +{{% notice note %}} +KubeLB CLI is currently in beta feature stage and is not yet ready for production use. We are actively working on the feature set and taking feedback from the community and our customers to improve the CLI. +{{% /notice %}} + +## Installation + +### Manual Installation + +Users can download the pre-compiled binaries from the [releases page](https://github.com/kubermatic/kubelb-cli/releases) for their system and copy them to the desired location. + +{{% notice note %}} +KubeLB CLI is currently available for Linux, macOS, and Windows. +{{% /notice %}} + +### Install using `go install` + +If you have Go installed, you can also build the binary from the source code using the following command: + +```bash +go install github.com/kubermatic/kubelb-cli@v0.1.0 +``` + +### Configuration + +KubeLB CLI needs the tenant scoped kubeconfig and the tenant name to be configured either via environment variables or through the CLI flags. Environment variables are preferred as you don't have to specify them for each command. + +```bash +export KUBECONFIG=/path/to/kubeconfig +export TENANT_NAME=my-tenant +``` + +## Table of Content + +{{% children depth=5 %}} +{{% /children %}} + +## Further Information + +- [Introducing KubeLB](https://www.kubermatic.com/products/kubelb/) +- [KubeLB Whitepaper](https://www.kubermatic.com/static/KubeLB-Cloud-Native-Multi-Tenant-Load-Balancer.pdf) +- [KubeLB - GitHub Repository](https://github.com/kubermatic/kubelb) + +Visit [kubermatic.com](https://www.kubermatic.com/) for further information. + +{{% notice tip %}} +For latest updates follow us on Twitter [@Kubermatic](https://twitter.com/Kubermatic) +{{% /notice %}} diff --git a/content/kubelb/main/cli/compatibility-matrix/_index.en.md b/content/kubelb/main/cli/compatibility-matrix/_index.en.md new file mode 100644 index 000000000..a40e2097b --- /dev/null +++ b/content/kubelb/main/cli/compatibility-matrix/_index.en.md @@ -0,0 +1,21 @@ ++++ +title = "Compatibility Matrix" +date = 2025-08-27T00:00:00+01:00 +weight = 30 ++++ + +KubeLB CLI uses Kubernetes management cluster that has KubeLB installed as it's source of truth for the load balancing configurations. + +Since it has been introduced alongside KubeLB v1.2, it has a hard dependency for the KubeLB management cluster to be at least v1.2. + +{{% notice note %}} +KubeLB CLI is currently in beta feature stage and is not yet ready for production use. We are actively working on the feature set and taking feedback from the community and our customers to improve the CLI. +{{% /notice %}} + +| KubeLB CLI | KubeLB Management Cluster | +|------------|---------------------------| +| v0.1.0 | v1.2+ | + +## Support Policy + +For support policy, please refer to the [KubeLB Support Policy](../../support-policy/) diff --git a/content/kubelb/main/cli/loadbalancing/_index.en.md b/content/kubelb/main/cli/loadbalancing/_index.en.md new file mode 100644 index 000000000..36f51a059 --- /dev/null +++ b/content/kubelb/main/cli/loadbalancing/_index.en.md @@ -0,0 +1,36 @@ ++++ +title = "Load Balancing" +date = 2025-08-27T00:00:00+01:00 +weight = 20 ++++ + +KubeLB CLI can be used to quickly provision Load Balancers that can be public/private based on your load balancing configurations and needs. KubeLB then takes care of securing your endpoint with TLS certificates, automatically creating DNS records, and managing the load balancing configurations. + +## Pre-requisites + +Please refer to the [DNS](../../tutorials/security/dns/#enable-dns-automation) documentation to configure the Gateway or Ingress to manage DNS for the load balancer. + +## Create a Load Balancer + +To create a load balancer, you can use the `kubelb loadbalancer create` command. + +For example + +```bash +kubelb loadbalancer create my-app --endpoints 10.0.1.1:8080,10.0.1.2:8080 --hostname my-app.example.com +``` + +This will create a Load Balancer resource that will forward traffic to the endpoints `10.0.1.1:8080` and `10.0.1.2:8080` and will be accessible at `https://my-app.example.com`. + +Specifying hostname is optional and if not provided, KubeLB will generate a random hostname for you if the wildcard domain is enabled for the tenant or globally. + +![Demo animation](/img/kubelb/v1.2/loadbalancer.gif?classes=shadow,border "Load Balancer Demo") + +## Further actions + +Further actions include: + +- Updating the load balancer configuration +- Deleting the load balancer +- Getting the load balancer details +- Listing all the load balancers diff --git a/content/kubelb/main/cli/references/_index.en.md b/content/kubelb/main/cli/references/_index.en.md new file mode 100644 index 000000000..44f9eae92 --- /dev/null +++ b/content/kubelb/main/cli/references/_index.en.md @@ -0,0 +1,40 @@ ++++ +title = "References" +date = 2024-03-06T12:00:00+02:00 +weight = 50 ++++ + +This section contains a reference of the Kubermatic KubeLB CLI commands and flags. + +## kubelb + +KubeLB CLI - Manage load balancers and create secure tunnels + +### Synopsis + +KubeLB CLI provides tools to manage KubeLB load balancers and create secure tunnels +to expose local services through the KubeLB infrastructure. + +### Options + +``` + -h, --help help for kubelb + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb completion](commands/kubelb_completion) - Generate the autocompletion script for the specified shell +* [kubelb docs](commands/kubelb_docs) - Generate markdown documentation for all commands +* [kubelb expose](commands/kubelb_expose) - Expose a local port via tunnel +* [kubelb loadbalancer](commands/kubelb_loadbalancer) - Manage KubeLB load balancers +* [kubelb status](commands/kubelb_status) - Display current status of KubeLB +* [kubelb tunnel](commands/kubelb_tunnel) - Manage secure tunnels to expose local services +* [kubelb version](commands/kubelb_version) - Print the version information diff --git a/content/kubelb/main/cli/references/commands/kubelb_completion.md b/content/kubelb/main/cli/references/commands/kubelb_completion.md new file mode 100644 index 000000000..2ff39c182 --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_completion.md @@ -0,0 +1,41 @@ ++++ +title = "kubelb completion" +date = 2025-08-27T00:00:00+01:00 +weight = 200 ++++ + +## kubelb completion + +Generate the autocompletion script for the specified shell + +### Synopsis + +Generate the autocompletion script for kubelb for the specified shell. +See each sub-command's help for details on how to use the generated script. + +### Options + +``` + -h, --help help for completion +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels +* [kubelb completion bash](../kubelb_completion_bash) - Generate the autocompletion script for bash +* [kubelb completion fish](../kubelb_completion_fish) - Generate the autocompletion script for fish +* [kubelb completion powershell](../kubelb_completion_powershell) - Generate the autocompletion script for powershell +* [kubelb completion zsh](../kubelb_completion_zsh) - Generate the autocompletion script for zsh diff --git a/content/kubelb/main/cli/references/commands/kubelb_completion_bash.md b/content/kubelb/main/cli/references/commands/kubelb_completion_bash.md new file mode 100644 index 000000000..fa713d587 --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_completion_bash.md @@ -0,0 +1,60 @@ ++++ +title = "kubelb completion bash" +date = 2025-08-27T00:00:00+01:00 +weight = 210 ++++ + +## kubelb completion bash + +Generate the autocompletion script for bash + +### Synopsis + +Generate the autocompletion script for the bash shell. + +This script depends on the 'bash-completion' package. +If it is not installed already, you can install it via your OS's package manager. + +To load completions in your current shell session: + + source <(kubelb completion bash) + +To load completions for every new session, execute once: + +#### Linux + + kubelb completion bash > /etc/bash_completion.d/kubelb + +#### macOS + + kubelb completion bash > $(brew --prefix)/etc/bash_completion.d/kubelb + +You will need to start a new shell for this setup to take effect. + +``` +kubelb completion bash +``` + +### Options + +``` + -h, --help help for bash + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb completion](../kubelb_completion) - Generate the autocompletion script for the specified shell diff --git a/content/kubelb/main/cli/references/commands/kubelb_completion_fish.md b/content/kubelb/main/cli/references/commands/kubelb_completion_fish.md new file mode 100644 index 000000000..81cd45c0b --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_completion_fish.md @@ -0,0 +1,51 @@ ++++ +title = "kubelb completion fish" +date = 2025-08-27T00:00:00+01:00 +weight = 220 ++++ + +## kubelb completion fish + +Generate the autocompletion script for fish + +### Synopsis + +Generate the autocompletion script for the fish shell. + +To load completions in your current shell session: + + kubelb completion fish | source + +To load completions for every new session, execute once: + + kubelb completion fish > ~/.config/fish/completions/kubelb.fish + +You will need to start a new shell for this setup to take effect. + +``` +kubelb completion fish [flags] +``` + +### Options + +``` + -h, --help help for fish + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb completion](../kubelb_completion) - Generate the autocompletion script for the specified shell diff --git a/content/kubelb/main/cli/references/commands/kubelb_completion_powershell.md b/content/kubelb/main/cli/references/commands/kubelb_completion_powershell.md new file mode 100644 index 000000000..f01116ed0 --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_completion_powershell.md @@ -0,0 +1,48 @@ ++++ +title = "kubelb completion powershell" +date = 2025-08-27T00:00:00+01:00 +weight = 230 ++++ + +## kubelb completion powershell + +Generate the autocompletion script for powershell + +### Synopsis + +Generate the autocompletion script for powershell. + +To load completions in your current shell session: + + kubelb completion powershell | Out-String | Invoke-Expression + +To load completions for every new session, add the output of the above command +to your powershell profile. + +``` +kubelb completion powershell [flags] +``` + +### Options + +``` + -h, --help help for powershell + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb completion](../kubelb_completion) - Generate the autocompletion script for the specified shell diff --git a/content/kubelb/main/cli/references/commands/kubelb_completion_zsh.md b/content/kubelb/main/cli/references/commands/kubelb_completion_zsh.md new file mode 100644 index 000000000..4f8ab1f41 --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_completion_zsh.md @@ -0,0 +1,62 @@ ++++ +title = "kubelb completion zsh" +date = 2025-08-27T00:00:00+01:00 +weight = 240 ++++ + +## kubelb completion zsh + +Generate the autocompletion script for zsh + +### Synopsis + +Generate the autocompletion script for the zsh shell. + +If shell completion is not already enabled in your environment you will need +to enable it. You can execute the following once: + + echo "autoload -U compinit; compinit" >> ~/.zshrc + +To load completions in your current shell session: + + source <(kubelb completion zsh) + +To load completions for every new session, execute once: + +#### Linux + + kubelb completion zsh > "${fpath[1]}/_kubelb" + +#### macOS + + kubelb completion zsh > $(brew --prefix)/share/zsh/site-functions/_kubelb + +You will need to start a new shell for this setup to take effect. + +``` +kubelb completion zsh [flags] +``` + +### Options + +``` + -h, --help help for zsh + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb completion](../kubelb_completion) - Generate the autocompletion script for the specified shell diff --git a/content/kubelb/main/cli/references/commands/kubelb_docs.md b/content/kubelb/main/cli/references/commands/kubelb_docs.md new file mode 100644 index 000000000..b41a983d4 --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_docs.md @@ -0,0 +1,42 @@ ++++ +title = "kubelb docs" +date = 2025-08-27T00:00:00+01:00 +weight = 40 ++++ + +## kubelb docs + +Generate markdown documentation for all commands + +### Synopsis + +Generate markdown documentation for all CLI commands and their parameters. +This creates individual markdown files for each command with complete usage information. + +``` +kubelb docs [flags] +``` + +### Options + +``` + -h, --help help for docs + -o, --output string Output directory for generated documentation (default "./docs") +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels diff --git a/content/kubelb/main/cli/references/commands/kubelb_expose.md b/content/kubelb/main/cli/references/commands/kubelb_expose.md new file mode 100644 index 000000000..6b435de09 --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_expose.md @@ -0,0 +1,62 @@ ++++ +title = "kubelb expose" +date = 2025-08-27T00:00:00+01:00 +weight = 30 ++++ + +## kubelb expose + +Expose a local port via tunnel + +### Synopsis + +Expose a local port via secure tunnel with auto-generated name. + +This is a convenience command that creates a tunnel with an auto-generated +name and immediately connects to it. + +Examples: + +# Expose port 8080 with auto-generated tunnel name + + kubelb expose 8080 + +# Expose port 3000 with custom hostname + + kubelb expose 3000 --hostname api.example.com + +``` +kubelb expose PORT [flags] +``` + +### Examples + +``` +kubelb expose 8080 --tenant=mytenant +``` + +### Options + +``` + -h, --help help for expose + --hostname string Custom hostname for the tunnel (default: auto-assigned wildcard domain) + -o, --output string Output format (summary, yaml, json) (default "summary") + --wait Wait for tunnel to be ready (default true) +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels diff --git a/content/kubelb/main/cli/references/commands/kubelb_loadbalancer.md b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer.md new file mode 100644 index 000000000..ea12542a3 --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer.md @@ -0,0 +1,40 @@ ++++ +title = "kubelb loadbalancer" +date = 2025-08-27T00:00:00+01:00 +weight = 60 ++++ + +## kubelb loadbalancer + +Manage KubeLB load balancers + +### Synopsis + +Manage KubeLB load balancer configurations + +### Options + +``` + -h, --help help for loadbalancer +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels +* [kubelb loadbalancer create](../kubelb_loadbalancer_create) - Create a load balancer +* [kubelb loadbalancer delete](../kubelb_loadbalancer_delete) - Delete a load balancer +* [kubelb loadbalancer get](../kubelb_loadbalancer_get) - Get a load balancer +* [kubelb loadbalancer list](../kubelb_loadbalancer_list) - List load balancers diff --git a/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_create.md b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_create.md new file mode 100644 index 000000000..e542a0a56 --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_create.md @@ -0,0 +1,69 @@ ++++ +title = "kubelb loadbalancer create" +date = 2025-08-27T00:00:00+01:00 +weight = 70 ++++ + +## kubelb loadbalancer create + +Create a load balancer + +### Synopsis + +Create a new HTTP load balancer with the specified endpoints. + +The load balancer supports HTTP routing and hostname-based access. + +Examples: + +# Create HTTP load balancer with random hostname + + kubelb lb create my-app --endpoints 10.0.1.1:8080 + +# Create HTTP load balancer with custom hostname + + kubelb lb create my-app --endpoints 10.0.1.1:8080 --hostname app.example.com + +# Create HTTP load balancer without a route + + kubelb lb create my-app --endpoints 10.0.1.1:8080 --route=false + +``` +kubelb loadbalancer create NAME [flags] +``` + +### Examples + +``` +kubelb loadbalancer create my-app --endpoints 10.0.1.1:8080,10.0.1.2:8080 --tenant=mytenant +``` + +### Options + +``` + -e, --endpoints string Comma-separated list of IP:port pairs (required) + -h, --help help for create + --hostname string Custom hostname for the route + -o, --output string Output format (summary, yaml, json) (default "summary") + -p, --protocol string Protocol (http only) (default "http") + --route Create a route for HTTP traffic (default true) + --type string LoadBalancer type (ClusterIP, LoadBalancer), defaults to ClusterIP (default "ClusterIP") + --wait Wait for load balancer to be ready (default true) +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb loadbalancer](../kubelb_loadbalancer) - Manage KubeLB load balancers diff --git a/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_delete.md b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_delete.md new file mode 100644 index 000000000..26535b8fa --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_delete.md @@ -0,0 +1,54 @@ ++++ +title = "kubelb loadbalancer delete" +date = 2025-08-27T00:00:00+01:00 +weight = 90 ++++ + +## kubelb loadbalancer delete + +Delete a load balancer + +### Synopsis + +Delete a load balancer by ID. + +This command will: +- Check if the load balancer was created by the CLI +- Display a warning if it wasn't created by the CLI +- Ask for confirmation before deletion (unless --force is used) +- Delete the load balancer resource + + +``` +kubelb loadbalancer delete ID [flags] +``` + +### Examples + +``` +kubelb loadbalancer delete nginx-loadbalancer --tenant=mytenant --kubeconfig=./kubeconfig +``` + +### Options + +``` + -f, --force Force deletion without confirmation + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb loadbalancer](../kubelb_loadbalancer) - Manage KubeLB load balancers diff --git a/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_get.md b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_get.md new file mode 100644 index 000000000..c8259ea3f --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_get.md @@ -0,0 +1,46 @@ ++++ +title = "kubelb loadbalancer get" +date = 2025-08-27T00:00:00+01:00 +weight = 80 ++++ + +## kubelb loadbalancer get + +Get a load balancer + +### Synopsis + +Retrieve a load balancer by ID and output it's complete YAML specification. + +``` +kubelb loadbalancer get ID [flags] +``` + +### Examples + +``` +kubelb loadbalancer get nginx-loadbalancer --tenant=mytenant --kubeconfig=./kubeconfig +``` + +### Options + +``` + -h, --help help for get +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb loadbalancer](../kubelb_loadbalancer) - Manage KubeLB load balancers diff --git a/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_list.md b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_list.md new file mode 100644 index 000000000..385ca74e8 --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_loadbalancer_list.md @@ -0,0 +1,47 @@ ++++ +title = "kubelb loadbalancer list" +date = 2025-08-27T00:00:00+01:00 +weight = 85 ++++ + +## kubelb loadbalancer list + +List load balancers + +### Synopsis + +List all load balancers for the tenant. + + +``` +kubelb loadbalancer list [flags] +``` + +### Examples + +``` +kubelb loadbalancer list --tenant=mytenant --kubeconfig=./kubeconfig +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb loadbalancer](../kubelb_loadbalancer) - Manage KubeLB load balancers diff --git a/content/kubelb/main/cli/references/commands/kubelb_status.md b/content/kubelb/main/cli/references/commands/kubelb_status.md new file mode 100644 index 000000000..b1bebd066 --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_status.md @@ -0,0 +1,47 @@ ++++ +title = "kubelb status" +date = 2025-08-27T00:00:00+01:00 +weight = 20 ++++ + +## kubelb status + +Display current status of KubeLB + +### Synopsis + +Display the current status of KubeLB including version information, configuration, and state + +``` +kubelb status [flags] +``` + +### Examples + +``` + # Display status for current tenant + kubelb status +``` + +### Options + +``` + -h, --help help for status +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels diff --git a/content/kubelb/main/cli/references/commands/kubelb_tunnel.md b/content/kubelb/main/cli/references/commands/kubelb_tunnel.md new file mode 100644 index 000000000..89eb79aec --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_tunnel.md @@ -0,0 +1,41 @@ ++++ +title = "kubelb tunnel" +date = 2025-08-27T00:00:00+01:00 +weight = 100 ++++ + +## kubelb tunnel + +Manage secure tunnels to expose local services + +### Synopsis + +Create and manage secure tunnels to expose local services through the KubeLB infrastructure + +### Options + +``` + -h, --help help for tunnel +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels +* [kubelb tunnel connect](../kubelb_tunnel_connect) - Connect to an existing tunnel +* [kubelb tunnel create](../kubelb_tunnel_create) - Create a tunnel +* [kubelb tunnel delete](../kubelb_tunnel_delete) - Delete a tunnel +* [kubelb tunnel get](../kubelb_tunnel_get) - Get a tunnel +* [kubelb tunnel list](../kubelb_tunnel_list) - List tunnels diff --git a/content/kubelb/main/cli/references/commands/kubelb_tunnel_connect.md b/content/kubelb/main/cli/references/commands/kubelb_tunnel_connect.md new file mode 100644 index 000000000..7427539ac --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_tunnel_connect.md @@ -0,0 +1,50 @@ ++++ +title = "kubelb tunnel connect" +date = 2025-08-27T00:00:00+01:00 +weight = 115 ++++ + +## kubelb tunnel connect + +Connect to an existing tunnel + +### Synopsis + +Connect to an existing tunnel to start forwarding traffic. + +This command establishes a secure connection to the tunnel and forwards +traffic from the tunnel to your local service. + +``` +kubelb tunnel connect NAME [flags] +``` + +### Examples + +``` +kubelb tunnel connect my-app --port 8080 --tenant=mytenant +``` + +### Options + +``` + -h, --help help for connect + -p, --port int Local port to forward to (required) +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services diff --git a/content/kubelb/main/cli/references/commands/kubelb_tunnel_create.md b/content/kubelb/main/cli/references/commands/kubelb_tunnel_create.md new file mode 100644 index 000000000..bd164bdce --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_tunnel_create.md @@ -0,0 +1,64 @@ ++++ +title = "kubelb tunnel create" +date = 2025-08-27T00:00:00+01:00 +weight = 110 ++++ + +## kubelb tunnel create + +Create a tunnel + +### Synopsis + +Create a new secure tunnel to expose a local service. + +The tunnel provides secure access to your local service through the KubeLB infrastructure. + +Examples: + # Create tunnel for local app on port 8080 + kubelb tunnel create my-app --port 8080 + + # Create tunnel with custom hostname + kubelb tunnel create my-app --port 8080 --hostname app.example.com + + # Create tunnel and connect immediately + kubelb tunnel create my-app --port 8080 --connect + + +``` +kubelb tunnel create NAME [flags] +``` + +### Examples + +``` +kubelb tunnel create my-app --port 8080 --tenant=mytenant +``` + +### Options + +``` + --connect Connect to tunnel after creation + -h, --help help for create + --hostname string Custom hostname for the tunnel (default: auto-assigned wildcard domain) + -o, --output string Output format (summary, yaml, json) (default "summary") + -p, --port int Local port to tunnel (required) + --wait Wait for tunnel to be ready (default true) +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services diff --git a/content/kubelb/main/cli/references/commands/kubelb_tunnel_delete.md b/content/kubelb/main/cli/references/commands/kubelb_tunnel_delete.md new file mode 100644 index 000000000..e9a9cee37 --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_tunnel_delete.md @@ -0,0 +1,53 @@ ++++ +title = "kubelb tunnel delete" +date = 2025-08-27T00:00:00+01:00 +weight = 130 ++++ + +## kubelb tunnel delete + +Delete a tunnel + +### Synopsis + +Delete a tunnel by name. + +This command will: +- Check if the tunnel exists +- Ask for confirmation before deletion (unless --force is used) +- Delete the tunnel resource + + +``` +kubelb tunnel delete NAME [flags] +``` + +### Examples + +``` +kubelb tunnel delete my-app --tenant=mytenant --kubeconfig=./kubeconfig +``` + +### Options + +``` + -f, --force Force deletion without confirmation + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services diff --git a/content/kubelb/main/cli/references/commands/kubelb_tunnel_get.md b/content/kubelb/main/cli/references/commands/kubelb_tunnel_get.md new file mode 100644 index 000000000..662ac2f3f --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_tunnel_get.md @@ -0,0 +1,47 @@ ++++ +title = "kubelb tunnel get" +date = 2025-08-27T00:00:00+01:00 +weight = 120 ++++ + +## kubelb tunnel get + +Get a tunnel + +### Synopsis + +Retrieve a tunnel by name and output it's complete YAML specification. + + +``` +kubelb tunnel get NAME [flags] +``` + +### Examples + +``` +kubelb tunnel get my-app --tenant=mytenant --kubeconfig=./kubeconfig +``` + +### Options + +``` + -h, --help help for get +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services diff --git a/content/kubelb/main/cli/references/commands/kubelb_tunnel_list.md b/content/kubelb/main/cli/references/commands/kubelb_tunnel_list.md new file mode 100644 index 000000000..e46291576 --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_tunnel_list.md @@ -0,0 +1,47 @@ ++++ +title = "kubelb tunnel list" +date = 2025-08-27T00:00:00+01:00 +weight = 125 ++++ + +## kubelb tunnel list + +List tunnels + +### Synopsis + +List all tunnels for the tenant. + + +``` +kubelb tunnel list [flags] +``` + +### Examples + +``` +kubelb tunnel list --tenant=mytenant --kubeconfig=./kubeconfig +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services diff --git a/content/kubelb/main/cli/references/commands/kubelb_version.md b/content/kubelb/main/cli/references/commands/kubelb_version.md new file mode 100644 index 000000000..3a5a117fa --- /dev/null +++ b/content/kubelb/main/cli/references/commands/kubelb_version.md @@ -0,0 +1,47 @@ ++++ +title = "kubelb version" +date = 2025-08-27T00:00:00+01:00 +weight = 50 ++++ + +## kubelb version + +Print the version information + +### Synopsis + +Print the version information of the KubeLB CLI + +``` +kubelb version [flags] +``` + +### Examples + +``` +kubelb version +``` + +### Options + +``` + -h, --help help for version + --short Print only the version in short format +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels diff --git a/content/kubelb/main/cli/release-notes/_index.en.md b/content/kubelb/main/cli/release-notes/_index.en.md new file mode 100644 index 000000000..4d95a6cab --- /dev/null +++ b/content/kubelb/main/cli/release-notes/_index.en.md @@ -0,0 +1,69 @@ ++++ +title = "Release Notes" +date = 2024-03-15T00:00:00+01:00 +weight = 40 ++++ + +{{% notice warning %}} +This document is work in progress and might not be in correct or up to date state +{{% /notice %}} + +## Kubermatic KubeLB v1.x.x + +- [v1.x.x](#v1xx) + - [Community Edition](#community-edition) + - [Enterprise Edition](#enterprise-edition) + +## v1.x.x + +**GitHub release: [v1.x.x](https://github.com/kubermatic/kubelb/releases/tag/v1.x.x)** + +### Highlights + +#### Community Edition(CE) + +_content_ + +#### Enterprise Edition(EE) + +_content_ + +### Community Edition + +#### Urgent Upgrade Notes + +_content_ + +#### Deprecation + +_content_ + +#### API Changes + +_content_ + +#### Features + +_content_ + +#### Design + +_content_ + +#### Bug or Regression + +_content_ + +#### Other (Cleanup, Flake, or Chore) + +_content_ + +**Full Changelog**: + +### Enterprise Edition + +**Enterprise Edition includes everything from Community Edition and more. The release notes below are for changes specific to just the Enterprise Edition.** + +#### EE Features + +_content_ diff --git a/content/kubelb/main/cli/tunneling/_index.en.md b/content/kubelb/main/cli/tunneling/_index.en.md new file mode 100644 index 000000000..329c1ff2d --- /dev/null +++ b/content/kubelb/main/cli/tunneling/_index.en.md @@ -0,0 +1,127 @@ ++++ +title = "Tunneling" +date = 2025-08-27T00:00:00+01:00 +weight = 10 +enterprise = true ++++ + +Tunneling allows users to tunnel locally running applications on their workstations or inside VMs and expose them over the internet without worrying about firewalls, NAT, DNS, and certificate issues. It is a great way to expose your local services to the internet without having to worry about the complexities of setting up a load balancer and a DNS record. + +KubeLB CLI will expose the workload on secure tunnel with TLS certificates and a DNS record. + +These tunnels are designed to be reusable and hence have their own dedicated API type in KubeLB i.e. `Tunnel`. Once a tunnel is created, it's registered with the KubeLB management cluster and can be connected to using the `kubelb tunnel connect` command. + +## Tunnels + +### Tunnel Configuration + +To enable tunneling, you need to configure KubeLB management cluster to expose connection management API. The values.yaml file can be modified like this: + +```yaml +kubelb: + enableGatewayAPI: true + debug: true + envoyProxy: + # -- Topology defines the deployment topology for Envoy Proxy. Valid values are: shared, dedicated, and global. + topology: shared + # -- The number of replicas for the Envoy Proxy deployment. + replicas: 1 + # -- Propagate all annotations from the LB resource to the LB service. + propagateAllAnnotations: true + + # Tunnel configuration + tunnel: + enabled: true + connectionManager: + httpRoute: + enabled: true + domain: "connection-manager.example.com" + gatewayName: "default" + gatewayNamespace: "kubelb" + annotations: + external-dns.alpha.kubernetes.io/hostname: "*.apps.example.com,connection-manager.example.com" + external-dns.alpha.kubernetes.io/ttl: "300" + cert-manager.io/cluster-issuer: "letsencrypt-production-dns" + ingress: + enabled: false + className: "nginx" + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-production-dns" + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" + external-dns.alpha.kubernetes.io/hostname: connection-manager-ingress.example.com + external-dns.alpha.kubernetes.io/ttl: "10" + nginx.ingress.kubernetes.io/backend-protocol: "HTTP" + hosts: + - host: connection-manager-ingress.example.com + paths: + - path: /tunnel + pathType: Prefix + - path: /health + pathType: Prefix + tls: + - secretName: connection-manager-tls + hosts: + - connection-manager-ingress.example.com +``` + +You can either use Ingress or HTTPRoute to expose the connection management API. Gateway API is the preferred way to expose the API. In this example `*.apps.example.com` is used as a wildard domain for these tunnels, you can use any other domain you want. + +Afterwards, you need to configure the connection manager URL at the Config or Tenant level: + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Config +metadata: + name: default + namespace: kubelb +spec: + ingress: + class: "nginx" + gatewayAPI: + class: "eg" + loadBalancer: + limit: 15 + certificates: + defaultClusterIssuer: letsencrypt-staging-dns + tunnel: + connectionManagerURL: "https://connection-manager.example.com/" +``` + +**NOTE: Apart from this the Gateway or Ingress should be configured to manage DNS for the tunnel. Please refer to the [DNS](../../tutorials/security/dns/#enable-dns-automation) documentation for more details.** + +### Provisioning Tunnels + +Tunnels are created either using the `kubelb expose 1313` command or the `kubelb tunnel create` command. + +```bash +kubelb expose 1313 +``` + +![Demo animation](/img/kubelb/v1.2/tunneling.gif?classes=shadow,border "Tunneling Demo") + +This will create a tunnel with a generated hostname and will forward traffic to the port `1313` on the local machine. The Ingress point for this traffic is KubeLB's management cluster and hence the traffic is secure and encrypted. + +An alternative way to create a tunnel is to use the `kubelb tunnel create` command. + +```bash +kubelb tunnel create my-app --port 1313 +``` + +This will create a tunnel with a generated hostname and can be used through the `kubelb tunnel connect` command. + +```bash +kubelb tunnel connect my-app --port 1313 +``` + +This will connect to the tunnel and forward traffic to the port `1313` on the local machine. The Ingress point for this traffic is KubeLB's management cluster and hence the traffic is secure and encrypted. + +## Further actions + +Further actions include: + +- Deleting the tunnel +- Getting the tunnel details +- Listing all the tunnels + +For more information, please refer to the [Tunnel API](../../references/api/tunnel/) documentation. diff --git a/content/kubelb/main/compatibility-matrix/_index.en.md b/content/kubelb/main/compatibility-matrix/_index.en.md index f5356090b..11195dcff 100644 --- a/content/kubelb/main/compatibility-matrix/_index.en.md +++ b/content/kubelb/main/compatibility-matrix/_index.en.md @@ -12,6 +12,7 @@ We are only testing our software with specific versions of the components, we ar | KubeLB | Kubermatic Kubernetes Platform | Gateway API | Envoy Gateway | NGINX Ingress | Kubernetes | |--------|-------------------------------|-------------|---------------|-------------------------|------------| +| v1.2 | v2.27, v2.28 | v1.3.0 | v1.3.0 | v1.10.0+ | v1.27+ | | v1.1 | v2.26, v2.27 | v1.1.0 | v1.1.0 | v1.10.0+ | v1.27+ | | v1.0 | v2.24, v2.25 | Not Supported| Not Supported | v1.10.0+ | v1.27+ | diff --git a/content/kubelb/main/installation/management-cluster/_index.en.md b/content/kubelb/main/installation/management-cluster/_index.en.md index b9fb323db..6a37e7052 100644 --- a/content/kubelb/main/installation/management-cluster/_index.en.md +++ b/content/kubelb/main/installation/management-cluster/_index.en.md @@ -32,7 +32,9 @@ imagePullSecrets: ### Install the helm chart ```sh -helm pull oci://quay.io/kubermatic/helm-charts/kubelb-manager-ee --version=v1.1.5 --untardir "." --untar +helm pull oci://quay.io/kubermatic/helm-charts/kubelb-manager-ee --version=v1.2.0 --untardir "." --untar +## Apply CRDs +kubectl apply -f kubelb-manager-ee/crds/ ## Create and update values.yaml with the required values. helm upgrade --install kubelb-manager kubelb-manager-ee --namespace kubelb -f kubelb-manager-ee/values.yaml --create-namespace ``` @@ -52,15 +54,16 @@ helm upgrade --install kubelb-manager kubelb-manager-ee --namespace kubelb -f ku | fullnameOverride | string | `""` | | | image.pullPolicy | string | `"IfNotPresent"` | | | image.repository | string | `"quay.io/kubermatic/kubelb-manager-ee"` | | -| image.tag | string | `"v1.1.5"` | | +| image.tag | string | `"v1.2.0"` | | | imagePullSecrets[0].name | string | `"kubermatic-quay.io"` | | +| kkpintegration.rbac | bool | `false` | Create RBAC for KKP integration. | | kubelb.debug | bool | `true` | | | kubelb.enableGatewayAPI | bool | `false` | enableGatewayAPI specifies whether to enable the Gateway API and Gateway Controllers. By default Gateway API is disabled since without Gateway APIs installed the controller cannot start. | | kubelb.enableLeaderElection | bool | `true` | | | kubelb.enableTenantMigration | bool | `true` | | | kubelb.envoyProxy.affinity | object | `{}` | | | kubelb.envoyProxy.nodeSelector | object | `{}` | | -| kubelb.envoyProxy.replicas | int | `3` | The number of replicas for the Envoy Proxy deployment. | +| kubelb.envoyProxy.replicas | int | `2` | The number of replicas for the Envoy Proxy deployment. | | kubelb.envoyProxy.resources | object | `{}` | | | kubelb.envoyProxy.singlePodPerNode | bool | `true` | Deploy single pod per node. | | kubelb.envoyProxy.tolerations | list | `[]` | | @@ -69,6 +72,31 @@ helm upgrade --install kubelb-manager kubelb-manager-ee --namespace kubelb -f ku | kubelb.propagateAllAnnotations | bool | `false` | Propagate all annotations from the LB resource to the LB service. | | kubelb.propagatedAnnotations | object | `{}` | Allowed annotations that will be propagated from the LB resource to the LB service. | | kubelb.skipConfigGeneration | bool | `false` | Set to true to skip the generation of the Config CR. Useful when the config CR needs to be managed manually. | +| kubelb.tunnel.connectionManager.affinity | object | `{}` | | +| kubelb.tunnel.connectionManager.healthCheck.enabled | bool | `true` | | +| kubelb.tunnel.connectionManager.healthCheck.livenessInitialDelay | int | `30` | | +| kubelb.tunnel.connectionManager.healthCheck.readinessInitialDelay | int | `10` | | +| kubelb.tunnel.connectionManager.httpAddr | string | `":8080"` | Server addresses | +| kubelb.tunnel.connectionManager.httpRoute.annotations | object | `{"cert-manager.io/cluster-issuer":"letsencrypt-prod","external-dns.alpha.kubernetes.io/hostname":"connection-manager.${DOMAIN}"}` | Annotations for HTTPRoute | +| kubelb.tunnel.connectionManager.httpRoute.domain | string | `"connection-manager.${DOMAIN}"` | Domain for the HTTPRoute NOTE: Replace ${DOMAIN} with your domain name. | +| kubelb.tunnel.connectionManager.httpRoute.enabled | bool | `false` | | +| kubelb.tunnel.connectionManager.httpRoute.gatewayName | string | `"gateway"` | Gateway name to attach to | +| kubelb.tunnel.connectionManager.httpRoute.gatewayNamespace | string | `""` | Gateway namespace | +| kubelb.tunnel.connectionManager.image | object | `{"pullPolicy":"IfNotPresent","repository":"quay.io/kubermatic/kubelb-connection-manager-ee","tag":""}` | Connection manager image configuration | +| kubelb.tunnel.connectionManager.ingress | object | `{"annotations":{"cert-manager.io/cluster-issuer":"letsencrypt-prod","external-dns.alpha.kubernetes.io/hostname":"connection-manager.${DOMAIN}","nginx.ingress.kubernetes.io/backend-protocol":"HTTP","nginx.ingress.kubernetes.io/proxy-read-timeout":"3600","nginx.ingress.kubernetes.io/proxy-send-timeout":"3600"},"className":"nginx","enabled":false,"hosts":[{"host":"connection-manager.${DOMAIN}","paths":[{"path":"/tunnel","pathType":"Prefix"},{"path":"/health","pathType":"Prefix"}]}],"tls":[{"hosts":["connection-manager.${DOMAIN}"],"secretName":"connection-manager-tls"}]}` | Ingress configuration for external HTTP/2 access | +| kubelb.tunnel.connectionManager.nodeSelector | object | `{}` | | +| kubelb.tunnel.connectionManager.podAnnotations | object | `{}` | Pod configuration | +| kubelb.tunnel.connectionManager.podLabels | object | `{}` | | +| kubelb.tunnel.connectionManager.podSecurityContext.fsGroup | int | `65534` | | +| kubelb.tunnel.connectionManager.podSecurityContext.runAsNonRoot | bool | `true` | | +| kubelb.tunnel.connectionManager.podSecurityContext.runAsUser | int | `65534` | | +| kubelb.tunnel.connectionManager.replicaCount | int | `1` | Number of connection manager replicas | +| kubelb.tunnel.connectionManager.requestTimeout | string | `"30s"` | | +| kubelb.tunnel.connectionManager.resources | object | `{"limits":{"cpu":"500m","memory":"256Mi"},"requests":{"cpu":"250m","memory":"128Mi"}}` | Resource limits | +| kubelb.tunnel.connectionManager.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsNonRoot":true,"runAsUser":65534}` | Security context | +| kubelb.tunnel.connectionManager.service | object | `{"httpPort":8080,"type":"ClusterIP"}` | Service configuration | +| kubelb.tunnel.connectionManager.tolerations | list | `[]` | | +| kubelb.tunnel.enabled | bool | `false` | Enable tunnel functionality | | nameOverride | string | `""` | | | nodeSelector | object | `{}` | | | podAnnotations | object | `{}` | | @@ -102,7 +130,9 @@ helm upgrade --install kubelb-manager kubelb-manager-ee --namespace kubelb -f ku ### Install the helm chart ```sh -helm pull oci://quay.io/kubermatic/helm-charts/kubelb-manager --version=v1.1.5 --untardir "." --untar +helm pull oci://quay.io/kubermatic/helm-charts/kubelb-manager --version=v1.2.0 --untardir "." --untar +## Apply CRDs +kubectl apply -f kubelb-manager/crds/ ## Create and update values.yaml with the required values. helm upgrade --install kubelb-manager kubelb-manager --namespace kubelb -f kubelb-manager/values.yaml --create-namespace ``` @@ -120,15 +150,16 @@ helm upgrade --install kubelb-manager kubelb-manager --namespace kubelb -f kubel | fullnameOverride | string | `""` | | | image.pullPolicy | string | `"IfNotPresent"` | | | image.repository | string | `"quay.io/kubermatic/kubelb-manager"` | | -| image.tag | string | `"v1.1.5"` | | +| image.tag | string | `"v1.2.0"` | | | imagePullSecrets | list | `[]` | | +| kkpintegration.rbac | bool | `false` | Create RBAC for KKP integration. | | kubelb.debug | bool | `true` | | | kubelb.enableGatewayAPI | bool | `false` | enableGatewayAPI specifies whether to enable the Gateway API and Gateway Controllers. By default Gateway API is disabled since without Gateway APIs installed the controller cannot start. | | kubelb.enableLeaderElection | bool | `true` | | | kubelb.enableTenantMigration | bool | `true` | | | kubelb.envoyProxy.affinity | object | `{}` | | | kubelb.envoyProxy.nodeSelector | object | `{}` | | -| kubelb.envoyProxy.replicas | int | `3` | The number of replicas for the Envoy Proxy deployment. | +| kubelb.envoyProxy.replicas | int | `2` | The number of replicas for the Envoy Proxy deployment. | | kubelb.envoyProxy.resources | object | `{}` | | | kubelb.envoyProxy.singlePodPerNode | bool | `true` | Deploy single pod per node. | | kubelb.envoyProxy.tolerations | list | `[]` | | @@ -173,17 +204,98 @@ helm upgrade --install kubelb-manager kubelb-manager --namespace kubelb -f kubel The examples and tools shared below are for demonstration purposes, you can use any other tools or configurations as per your requirements. {{% /notice %}} -Management cluster is the place where all the components required for Layer 4 and Layer 7 load balancing are installed. The management cluster is responsible for managing the tenant clusters and their load balancing requests/configurations. +Management cluster acts as the dataplane and central control plane for all your load balancing configurations. It is the place where all the components required for Layer 4 and Layer 7 load balancing, AI Gateways, MCP Gateways, Agent2Agent Gateways, and API Gateways etc. are deployed. The management cluster is multi-tenant by design which makes it a perfect for managing a fleet of clusters in a scalable, robust, and secure way. -### Layer 4 Load Balancing +KubeLB has introduced an addons chart to simplify the installation of the required components for the management cluster. The chart is already part of the KubeLB manager chart and can be installed by setting the `kubelb-addons.enabled` to `true` in the values.yaml. + +```yaml +kubelb: + enableGatewayAPI: true + debug: true + +## Addon configuration +kubelb-addons: + enabled: true + + gatewayClass: + create: true + + # Ingress Nginx + ingress-nginx: + enabled: false + controller: + service: + externalTrafficPolicy: Local + + # Envoy Gateway + envoy-gateway: + enabled: true + + # Cert Manager + cert-manager: + enabled: true + crds: + enabled: true + config: + apiVersion: controller.config.cert-manager.io/v1alpha1 + kind: ControllerConfiguration + enableGatewayAPI: true + + # External DNS + external-dns: + domainFilters: + - example.com + extraVolumes: + - name: credentials + secret: + secretName: route53-credentials + extraVolumeMounts: + - name: credentials + mountPath: /.aws + readOnly: true + env: + - name: AWS_SHARED_CREDENTIALS_FILE + value: /.aws/credentials + txtOwnerId: kubelb-example-aws + registry: txt + provider: aws + policy: sync + sources: + - service + - ingress + - gateway-httproute + - gateway-grpcroute + - gateway-tlsroute + - gateway-tcproute + - gateway-udproute + + ## AI and Agent2Agent Gateways Integration + # KGateway CRDs + kgateway-crds: + enabled: true + + # KGateway + kgateway: + enabled: true + gateway: + aiExtension: + enabled: true + agentgateway: + enabled: true + +``` + +### TCP/UDP Load Balancing (Layer 4) Refer to [Layer 4 Load Balancing Setup]({{< relref "../../tutorials/loadbalancer#setup" >}}) for more details. -### Layer 7 Load Balancing +### Application Layer Load Balancing (Layer 7) + +For Application layer load balancing, **kubeLB supports both Ingress and Gateway API resources**. -For Layer 7 load balancing, kubeLB supports both Ingress and Gateway API resources. +Our default recommendation is to use Gateway API and use [Envoy Gateway](https://gateway.envoyproxy.io/) as the Gateway API implementation. Most of the upcoming and current features that KubeLB will focus on will prioritize Gateway API instead of Ingress. With Envoy Gateway being the product that we'll actively support, test, and base our features on. -Our default recommendation is to use Gateway API and use [Envoy Gateway](https://gateway.envoyproxy.io/) as the Gateway API implementation. The features specific to Gateway API that will be built and consumed in KubeLB will be based on Envoy Gateway. While KubeLB supports integration with any Ingress or Gateway API implementation, the only limitation is that we only support native Kubernetes APIs i.e. Ingress and Gateway APIs. Provider specific APIs are not supported by KubeLB and will be completely ignored. Also, we are only testing KubeLB with Envoy Gateway and Nginx Ingress, we can't guarantee the compatibility with other Gateway API or Ingress implementations. +While KubeLB supports integration with any Ingress or Gateway API implementation, the only limitation is that we only support native Kubernetes APIs i.e. Ingress and Gateway APIs. Provider specific APIs are not supported by KubeLB and will be completely ignored. Also, we are only testing KubeLB with Envoy Gateway and Nginx Ingress, we can't guarantee the compatibility with other Gateway API or Ingress implementations. #### Ingress diff --git a/content/kubelb/main/installation/tenant-cluster/_index.en.md b/content/kubelb/main/installation/tenant-cluster/_index.en.md index 3e935b3f4..8b9971cb8 100644 --- a/content/kubelb/main/installation/tenant-cluster/_index.en.md +++ b/content/kubelb/main/installation/tenant-cluster/_index.en.md @@ -82,7 +82,15 @@ kubelb: ## Installation for KubeLB CCM -{{% notice warning %}} In case if Gateway API needs to be enabled for the cluster. Please set `kubelb.enableGatewayAPI` to `true` in the `values.yaml`. This is required otherwise due to missing CRDs, kubelb will not be able to start. {{% /notice %}} +{{% notice warning %}} In case if Gateway API needs to be enabled for the cluster. Please set the following fields in the `values.yaml`. This is required otherwise due to missing CRDs, kubelb will not be able to start. + +```yaml +kubelb: + enableGatewayAPI: true + installGatewayAPICRDs: true +``` + +{{% /notice %}} {{< tabs name="KubeLB CCM" >}} {{% tab name="Enterprise Edition" %}} @@ -105,7 +113,9 @@ kubelb: ### Install the helm chart ```sh -helm pull oci://quay.io/kubermatic/helm-charts/kubelb-ccm-ee --version=v1.1.5 --untardir "." --untar +helm pull oci://quay.io/kubermatic/helm-charts/kubelb-ccm-ee --version=v1.2.0 --untardir "." --untar +## Apply CRDs +kubectl apply -f kubelb-ccm-ee/crds/ ## Create and update values.yaml with the required values. helm upgrade --install kubelb-ccm kubelb-ccm-ee --namespace kubelb -f kubelb-ccm-ee/values.yaml --create-namespace ``` @@ -125,7 +135,7 @@ helm upgrade --install kubelb-ccm kubelb-ccm-ee --namespace kubelb -f kubelb-ccm | fullnameOverride | string | `""` | | | image.pullPolicy | string | `"IfNotPresent"` | | | image.repository | string | `"quay.io/kubermatic/kubelb-ccm-ee"` | | -| image.tag | string | `"v1.1.5"` | | +| image.tag | string | `"v1.2.0"` | | | imagePullSecrets[0].name | string | `"kubermatic-quay.io"` | | | kubelb.clusterSecretName | string | `"kubelb-cluster"` | Name of the secret that contains kubeconfig for the loadbalancer cluster | | kubelb.disableGRPCRouteController | bool | `false` | disableGRPCRouteController specifies whether to disable the GRPCRoute Controller. | @@ -178,7 +188,9 @@ helm upgrade --install kubelb-ccm kubelb-ccm-ee --namespace kubelb -f kubelb-ccm ### Install the helm chart ```sh -helm pull oci://quay.io/kubermatic/helm-charts/kubelb-ccm --version=v1.1.5 --untardir "." --untar +helm pull oci://quay.io/kubermatic/helm-charts/kubelb-ccm --version=v1.2.0 --untardir "." --untar +## Apply CRDs +kubectl apply -f kubelb-ccm/crds/ ## Create and update values.yaml with the required values. helm upgrade --install kubelb-ccm kubelb-ccm --namespace kubelb -f kubelb-ccm/values.yaml --create-namespace ``` @@ -198,7 +210,7 @@ helm upgrade --install kubelb-ccm kubelb-ccm --namespace kubelb -f kubelb-ccm/va | fullnameOverride | string | `""` | | | image.pullPolicy | string | `"IfNotPresent"` | | | image.repository | string | `"quay.io/kubermatic/kubelb-ccm"` | | -| image.tag | string | `"v1.1.5"` | | +| image.tag | string | `"v1.2.0"` | | | imagePullSecrets | list | `[]` | | | kubelb.clusterSecretName | string | `"kubelb-cluster"` | Name of the secret that contains kubeconfig for the loadbalancer cluster | | kubelb.disableGRPCRouteController | bool | `false` | disableGRPCRouteController specifies whether to disable the GRPCRoute Controller. | diff --git a/content/kubelb/main/references/ce/_index.en.md b/content/kubelb/main/references/ce/_index.en.md index f0e3dcd5c..3c2159c27 100644 --- a/content/kubelb/main/references/ce/_index.en.md +++ b/content/kubelb/main/references/ce/_index.en.md @@ -5,6 +5,8 @@ date = 2024-03-06T12:00:00+02:00 weight = 60 +++ +**Source: [kubelb.k8c.io/v1alpha1](https://github.com/kubermatic/kubelb/tree/main/api/ce/kubelb.k8c.io/v1alpha1)** + ## Packages - [kubelb.k8c.io/v1alpha1](#kubelbk8ciov1alpha1) @@ -27,6 +29,8 @@ Package v1alpha1 contains API Schema definitions for the kubelb.k8c.io v1alpha1 - [SyncSecretList](#syncsecretlist) - [Tenant](#tenant) - [TenantList](#tenantlist) +- [TenantState](#tenantstate) +- [TenantStateList](#tenantstatelist) #### Addresses @@ -75,6 +79,32 @@ _Appears in:_ - [Addresses](#addresses) +#### AnnotatedResource + +_Underlying type:_ _string_ + +_Validation:_ + +- Enum: [all service ingress gateway httproute grpcroute tcproute udproute tlsroute] + +_Appears in:_ + +- [AnnotationSettings](#annotationsettings) +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) + +| Field | Description | +| --- | --- | +| `all` | | +| `service` | | +| `ingress` | | +| `gateway` | | +| `httproute` | | +| `grpcroute` | | +| `tcproute` | | +| `udproute` | | +| `tlsroute` | | + #### AnnotationSettings _Appears in:_ @@ -84,8 +114,30 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
This will have a higher precedence than the annotations specified at the Config level. | | | -| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
This will have a higher precedence than the value specified at the Config level. | | | +| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | +| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | | +| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | + +#### Annotations + +_Underlying type:_ _object_ + +_Appears in:_ + +- [AnnotationSettings](#annotationsettings) +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) + +#### CertificatesSettings + +_Appears in:_ + +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `defaultClusterIssuer` _string_ | DefaultClusterIssuer is the Cluster Issuer to use for the certificates by default. This is only used for load balancer hostname. | | | #### Config @@ -102,6 +154,21 @@ _Appears in:_ | `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | | `spec` _[ConfigSpec](#configspec)_ | | | | +#### ConfigDNSSettings + +ConfigDNSSettings defines the global settings for DNS management and automation. + +_Appears in:_ + +- [ConfigSpec](#configspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `wildcardDomain` _string_ | WildcardDomain is the domain that will be used as the base domain to create wildcard DNS records for DNS resources.
This is only used for determining the hostname for LoadBalancer resources at LoadBalancer.Spec.Hostname. | | | +| `allowExplicitHostnames` _boolean_ | AllowExplicitHostnames is a flag that can be used to allow explicit hostnames to be used for DNS resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | | +| `useDNSAnnotations` _boolean_ | UseDNSAnnotations is a flag that can be used to add DNS annotations to DNS resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | | +| `useCertificateAnnotations` _boolean_ | UseCertificateAnnotations is a flag that can be used to add Certificate annotations to Certificate resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | | + #### ConfigList ConfigList contains a list of Config @@ -123,12 +190,30 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
This will have a higher precedence than the annotations specified at the Config level. | | | -| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
This will have a higher precedence than the value specified at the Config level. | | | +| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | +| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | | +| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | | `envoyProxy` _[EnvoyProxy](#envoyproxy)_ | EnvoyProxy defines the desired state of the Envoy Proxy | | | | `loadBalancer` _[LoadBalancerSettings](#loadbalancersettings)_ | | | | | `ingress` _[IngressSettings](#ingresssettings)_ | | | | | `gatewayAPI` _[GatewayAPISettings](#gatewayapisettings)_ | | | | +| `dns` _[ConfigDNSSettings](#configdnssettings)_ | | | | +| `certificates` _[CertificatesSettings](#certificatessettings)_ | | | | + +#### DNSSettings + +DNSSettings defines the settings for DNS management and automation. + +_Appears in:_ + +- [TenantSpec](#tenantspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `wildcardDomain` _string_ | WildcardDomain is the domain that will be used as the base domain to create wildcard DNS records for DNS resources.
This is only used for determining the hostname for LoadBalancer resources at LoadBalancer.Spec.Hostname. | | | +| `allowExplicitHostnames` _boolean_ | AllowExplicitHostnames is a flag that can be used to allow explicit hostnames to be used for DNS resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | | +| `useDNSAnnotations` _boolean_ | UseDNSAnnotations is a flag that can be used to add DNS annotations to DNS resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | | +| `useCertificateAnnotations` _boolean_ | UseCertificateAnnotations is a flag that can be used to add Certificate annotations to Certificate resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | | #### EndpointAddress @@ -203,8 +288,21 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `class` _string_ | Class is the class of the gateway API to use. This can be used to specify a specific gateway API implementation.
This has higher precedence than the value specified in the Config. | | | +| `defaultGateway` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectreference-v1-core)_ | DefaultGateway is the default gateway reference to use for the tenant. This is only used for load balancer hostname. | | | | `disable` _boolean_ | Disable is a flag that can be used to disable Gateway API for a tenant. | | | +#### HostnameStatus + +_Appears in:_ + +- [LoadBalancerStatus](#loadbalancerstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `hostname` _string_ | Hostname contains the hostname of the load-balancer. | | | +| `tlsEnabled` _boolean_ | TLSEnabled is true if certificate is created for the hostname. | | | +| `dnsRecordCreated` _boolean_ | DNSRecordCreated is true if DNS record is created for the hostname. | | | + #### IngressSettings IngressSettings defines the settings for the ingress. @@ -325,8 +423,19 @@ _Appears in:_ | --- | --- | --- | --- | | `endpoints` _[LoadBalancerEndpoints](#loadbalancerendpoints) array_ | Sets of addresses and ports that comprise an exposed user service on a cluster. | | MinItems: 1
| | `ports` _[LoadBalancerPort](#loadbalancerport) array_ | The list of ports that are exposed by the load balancer service.
only needed for layer 4 | | | +| `hostname` _string_ | Hostname is the domain name at which the load balancer service will be accessible.
When hostname is set, KubeLB will create a route(ingress or httproute) for the service, and expose it with TLS on the given hostname. Currently, only HTTP protocol is supported | | | | `type` _[ServiceType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicetype-v1-core)_ | type determines how the Service is exposed. Defaults to ClusterIP. Valid
options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
"ExternalName" maps to the specified externalName.
"ClusterIP" allocates a cluster-internal IP address for load-balancing to
endpoints. Endpoints are determined by the selector or if that is not
specified, by manual construction of an Endpoints object. If clusterIP is
"None", no virtual IP is allocated and the endpoints are published as a
set of endpoints rather than a stable IP.
"NodePort" builds on ClusterIP and allocates a port on every node which
routes to the clusterIP.
"LoadBalancer" builds on NodePort and creates an
external load-balancer (if supported in the current cloud) which routes
to the clusterIP.
More info: | ClusterIP | | +#### LoadBalancerState + +_Appears in:_ + +- [TenantStateStatus](#tenantstatestatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `disable` _boolean_ | | | | + #### LoadBalancerStatus LoadBalancerStatus defines the observed state of LoadBalancer @@ -339,6 +448,7 @@ _Appears in:_ | --- | --- | --- | --- | | `loadBalancer` _[LoadBalancerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#loadbalancerstatus-v1-core)_ | LoadBalancer contains the current status of the load-balancer,
if one is present. | | | | `service` _[ServiceStatus](#servicestatus)_ | Service contains the current status of the LB service. | | | +| `hostname` _[HostnameStatus](#hostnamestatus)_ | Hostname contains the status for hostname resources. | | | #### ResourceState @@ -458,7 +568,7 @@ _Appears in:_ | --- | --- | --- | --- | | `name` _string_ | The name of this port within the service. This must be a DNS_LABEL.
All ports within a ServiceSpec must have unique names. When considering
the endpoints for a Service, this must match the 'name' field in the
EndpointPort.
Optional if only one ServicePort is defined on this service. | | | | `protocol` _[Protocol](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#protocol-v1-core)_ | The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
Default is TCP. | | | -| `appProtocol` _string_ | The application protocol for this port.
This is used as a hint for implementations to offer richer behavior for protocols that they understand.
This field follows standard Kubernetes label syntax.
Valid values are either:

_Un-prefixed protocol names - reserved for IANA standard service names (as per
RFC-6335 and ).

_ Kubernetes-defined prefixed names:
_'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in
_ 'kubernetes.io/ws' - WebSocket over cleartext as described in
_'kubernetes.io/wss' - WebSocket over TLS as described in

_ Other protocols should use implementation-defined prefixed names such as
mycompany.com/my-custom-protocol. | | | +| `appProtocol` _string_ | The application protocol for this port.
This is used as a hint for implementations to offer richer behavior for protocols that they understand.
This field follows standard Kubernetes label syntax.
Valid values are either:
_Un-prefixed protocol names - reserved for IANA standard service names (as per
RFC-6335 and ).
_ Kubernetes-defined prefixed names:
_'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in
_ 'kubernetes.io/ws' - WebSocket over cleartext as described in
_'kubernetes.io/wss' - WebSocket over TLS as described in
_ Other protocols should use implementation-defined prefixed names such as
mycompany.com/my-custom-protocol. | | | | `port` _integer_ | The port that will be exposed by this service. | | | | `targetPort` _[IntOrString](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#intorstring-intstr-util)_ | Number or name of the port to access on the pods targeted by the service.
Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
If this is a string, it will be looked up as a named port in the
target Pod's container ports. If this is not specified, the value
of the 'port' field is used (an identity map).
This field is ignored for services with clusterIP=None, and should be
omitted or set equal to the 'port' field.
More info: | | | | `nodePort` _integer_ | The port on each node on which this service is exposed when type is
NodePort or LoadBalancer. Usually assigned by the system. If a value is
specified, in-range, and not in use it will be used, otherwise the
operation will fail. If not specified, a port will be allocated if this
Service requires one. If this field is specified when creating a
Service which does not need it, creation will fail. This field will be
wiped when updating a Service to no longer need it (e.g. changing type
from NodePort to ClusterIP).
More info: | | | @@ -540,11 +650,64 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
This will have a higher precedence than the annotations specified at the Config level. | | | -| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
This will have a higher precedence than the value specified at the Config level. | | | +| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | +| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | | +| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | | `loadBalancer` _[LoadBalancerSettings](#loadbalancersettings)_ | | | | | `ingress` _[IngressSettings](#ingresssettings)_ | | | | | `gatewayAPI` _[GatewayAPISettings](#gatewayapisettings)_ | | | | +| `dns` _[DNSSettings](#dnssettings)_ | | | | +| `certificates` _[CertificatesSettings](#certificatessettings)_ | | | | + +#### TenantState + +TenantState is the Schema for the tenants API + +_Appears in:_ + +- [TenantStateList](#tenantstatelist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `TenantState` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[TenantStateSpec](#tenantstatespec)_ | | | | +| `status` _[TenantStateStatus](#tenantstatestatus)_ | | | | + +#### TenantStateList + +TenantStateList contains a list of TenantState + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `TenantStateList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[TenantState](#tenantstate) array_ | | | | + +#### TenantStateSpec + +TenantStateSpec defines the desired state of TenantState. + +_Appears in:_ + +- [TenantState](#tenantstate) + +#### TenantStateStatus + +TenantStateStatus defines the observed state of TenantState + +_Appears in:_ + +- [TenantState](#tenantstate) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `version` _[Version](#version)_ | | | | +| `lastUpdated` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#time-v1-meta)_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | | | | +| `loadBalancer` _[LoadBalancerState](#loadbalancerstate)_ | | | | #### TenantStatus @@ -570,3 +733,16 @@ _Appears in:_ | `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | | `spec` _[ServiceSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicespec-v1-core)_ | Spec defines the behavior of a service.
| | | | `status` _[ServiceStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicestatus-v1-core)_ | Most recently observed status of the service.
Populated by the system.
Read-only.
More info: | | | + +#### Version + +_Appears in:_ + +- [TenantStateStatus](#tenantstatestatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `gitVersion` _string_ | | | | +| `gitCommit` _string_ | | | | +| `buildDate` _string_ | | | | +| `edition` _string_ | | | | diff --git a/content/kubelb/main/references/ee/_index.en.md b/content/kubelb/main/references/ee/_index.en.md index 852720608..1a71a06e4 100644 --- a/content/kubelb/main/references/ee/_index.en.md +++ b/content/kubelb/main/references/ee/_index.en.md @@ -6,6 +6,8 @@ weight = 50 enterprise = true +++ +**Source: [kubelb.k8c.io/v1alpha1](https://github.com/kubermatic/kubelb/tree/main/api/ee/kubelb.k8c.io/v1alpha1)** + ## Packages - [kubelb.k8c.io/v1alpha1](#kubelbk8ciov1alpha1) @@ -28,6 +30,10 @@ Package v1alpha1 contains API Schema definitions for the kubelb.k8c.io v1alpha1 - [SyncSecretList](#syncsecretlist) - [Tenant](#tenant) - [TenantList](#tenantlist) +- [TenantState](#tenantstate) +- [TenantStateList](#tenantstatelist) +- [Tunnel](#tunnel) +- [TunnelList](#tunnellist) #### Addresses @@ -76,6 +82,32 @@ _Appears in:_ - [Addresses](#addresses) +#### AnnotatedResource + +_Underlying type:_ _string_ + +_Validation:_ + +- Enum: [all service ingress gateway httproute grpcroute tcproute udproute tlsroute] + +_Appears in:_ + +- [AnnotationSettings](#annotationsettings) +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) + +| Field | Description | +| --- | --- | +| `all` | | +| `service` | | +| `ingress` | | +| `gateway` | | +| `httproute` | | +| `grpcroute` | | +| `tcproute` | | +| `udproute` | | +| `tlsroute` | | + #### AnnotationSettings _Appears in:_ @@ -85,8 +117,19 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
This will have a higher precedence than the annotations specified at the Config level. | | | -| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
This will have a higher precedence than the value specified at the Config level. | | | +| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | +| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | | +| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | + +#### Annotations + +_Underlying type:_ _object_ + +_Appears in:_ + +- [AnnotationSettings](#annotationsettings) +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) #### CertificatesSettings @@ -100,7 +143,7 @@ _Appears in:_ | --- | --- | --- | --- | | `disable` _boolean_ | Disable is a flag that can be used to disable certificate automation for a tenant. | | | | `defaultClusterIssuer` _string_ | DefaultClusterIssuer is the Cluster Issuer to use for the certificates by default. This is applied when the cluster issuer is not specified in the annotations on the resource itself. | | | -| `allowedDomains` _string array_ | AllowedDomains is a list of allowed domains for automated Certificate management. Has a higher precedence than the value specified in the Config.
If empty, the value specified in `tenant.spec.allowedDomains` will be used.
Examples:
- ["*.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["*"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level. | | | +| `allowedDomains` _string array_ | AllowedDomains is a list of allowed domains for automated Certificate management. Has a higher precedence than the value specified in the Config.
If empty, the value specified in `tenant.spec.allowedDomains` will be used.
Examples:
- ["_.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["_"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level. | | | #### Config @@ -132,7 +175,7 @@ _Appears in:_ #### ConfigDNSSettings -ConfigDNSSettings defines the global settings for the DNS. +ConfigDNSSettings defines the global settings for DNS management and automation. _Appears in:_ @@ -141,6 +184,10 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `disable` _boolean_ | Disable is a flag that can be used to disable DNS automation globally for all the tenants. | | | +| `wildcardDomain` _string_ | WildcardDomain is the domain that will be used as the base domain to create wildcard DNS records for DNS resources.
This is only used for determining the hostname for LoadBalancer and Tunnel resources. | | | +| `allowExplicitHostnames` _boolean_ | AllowExplicitHostnames is a flag that can be used to allow explicit hostnames to be used for DNS resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | | +| `useDNSAnnotations` _boolean_ | UseDNSAnnotations is a flag that can be used to add DNS annotations to DNS resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | | +| `useCertificateAnnotations` _boolean_ | UseCertificateAnnotations is a flag that can be used to add Certificate annotations to Certificate resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | | #### ConfigList @@ -163,18 +210,20 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
This will have a higher precedence than the annotations specified at the Config level. | | | -| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
This will have a higher precedence than the value specified at the Config level. | | | +| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | +| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | | +| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | | `envoyProxy` _[EnvoyProxy](#envoyproxy)_ | EnvoyProxy defines the desired state of the Envoy Proxy | | | | `loadBalancer` _[LoadBalancerSettings](#loadbalancersettings)_ | | | | | `ingress` _[IngressSettings](#ingresssettings)_ | | | | | `gatewayAPI` _[GatewayAPISettings](#gatewayapisettings)_ | | | | | `dns` _[ConfigDNSSettings](#configdnssettings)_ | | | | | `certificates` _[ConfigCertificatesSettings](#configcertificatessettings)_ | | | | +| `tunnel` _[TunnelSettings](#tunnelsettings)_ | | | | #### DNSSettings -DNSSettings defines the settings for the DNS. +DNSSettings defines the tenant specific settings for DNS management and automation. _Appears in:_ @@ -183,7 +232,11 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `disable` _boolean_ | Disable is a flag that can be used to disable DNS automation for a tenant. | | | -| `allowedDomains` _string array_ | AllowedDomains is a list of allowed domains for automated DNS management. Has a higher precedence than the value specified in the Config.
If empty, the value specified in `tenant.spec.allowedDomains` will be used.
Examples:
- ["*.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["*"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level. | | | +| `allowedDomains` _string array_ | AllowedDomains is a list of allowed domains for automated DNS management. Has a higher precedence than the value specified in the Config.
If empty, the value specified in `tenant.spec.allowedDomains` will be used.
Examples:
- ["_.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["_"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level. | | | +| `wildcardDomain` _string_ | WildcardDomain is the domain that will be used as the base domain to create wildcard DNS records for DNS resources.
This is only used for determining the hostname for LoadBalancer and Tunnel resources. | | | +| `allowExplicitHostnames` _boolean_ | AllowExplicitHostnames is a flag that can be used to allow explicit hostnames to be used for DNS resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | | +| `useDNSAnnotations` _boolean_ | UseDNSAnnotations is a flag that can be used to add DNS annotations to DNS resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | | +| `useCertificateAnnotations` _boolean_ | UseCertificateAnnotations is a flag that can be used to add Certificate annotations to Certificate resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | | #### EndpointAddress @@ -259,6 +312,7 @@ _Appears in:_ | --- | --- | --- | --- | | `class` _string_ | Class is the class of the gateway API to use. This can be used to specify a specific gateway API implementation.
This has higher precedence than the value specified in the Config. | | | | `disable` _boolean_ | Disable is a flag that can be used to disable Gateway API for a tenant. | | | +| `defaultGateway` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectreference-v1-core)_ | DefaultGateway is the default gateway reference to use for the tenant. This is only used for load balancer hostname and tunneling. | | | | `gateway` _[GatewaySettings](#gatewaysettings)_ | | | | | `disableHTTPRoute` _boolean_ | | | | | `disableGRPCRoute` _boolean_ | | | | @@ -292,6 +346,18 @@ _Appears in:_ | --- | --- | --- | --- | | `limit` _integer_ | Limit is the maximum number of gateways to create.
If a lower limit is set than the number of reources that exist, the limit will be disallow creation of new resources but will not delete existing resources. The reason behind this
is that it is not possible for KubeLB to know which resources are safe to remove. | | | +#### HostnameStatus + +_Appears in:_ + +- [LoadBalancerStatus](#loadbalancerstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `hostname` _string_ | Hostname contains the hostname of the load-balancer. | | | +| `tlsEnabled` _boolean_ | TLSEnabled is true if certificate is created for the hostname. | | | +| `dnsRecordCreated` _boolean_ | DNSRecordCreated is true if DNS record is created for the hostname. | | | + #### IngressSettings IngressSettings defines the settings for the ingress. @@ -413,8 +479,20 @@ _Appears in:_ | --- | --- | --- | --- | | `endpoints` _[LoadBalancerEndpoints](#loadbalancerendpoints) array_ | Sets of addresses and ports that comprise an exposed user service on a cluster. | | MinItems: 1
| | `ports` _[LoadBalancerPort](#loadbalancerport) array_ | The list of ports that are exposed by the load balancer service.
only needed for layer 4 | | | +| `hostname` _string_ | Hostname is the domain name at which the load balancer service will be accessible.
When hostname is set, KubeLB will create a route(ingress or httproute) for the service, and expose it with TLS on the given hostname. | | | | `type` _[ServiceType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicetype-v1-core)_ | type determines how the Service is exposed. Defaults to ClusterIP. Valid
options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
"ExternalName" maps to the specified externalName.
"ClusterIP" allocates a cluster-internal IP address for load-balancing to
endpoints. Endpoints are determined by the selector or if that is not
specified, by manual construction of an Endpoints object. If clusterIP is
"None", no virtual IP is allocated and the endpoints are published as a
set of endpoints rather than a stable IP.
"NodePort" builds on ClusterIP and allocates a port on every node which
routes to the clusterIP.
"LoadBalancer" builds on NodePort and creates an
external load-balancer (if supported in the current cloud) which routes
to the clusterIP.
More info: | ClusterIP | | +#### LoadBalancerState + +_Appears in:_ + +- [TenantStateStatus](#tenantstatestatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `disable` _boolean_ | | | | +| `limit` _integer_ | | | | + #### LoadBalancerStatus LoadBalancerStatus defines the observed state of LoadBalancer @@ -427,6 +505,7 @@ _Appears in:_ | --- | --- | --- | --- | | `loadBalancer` _[LoadBalancerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#loadbalancerstatus-v1-core)_ | LoadBalancer contains the current status of the load-balancer,
if one is present. | | | | `service` _[ServiceStatus](#servicestatus)_ | Service contains the current status of the LB service. | | | +| `hostname` _[HostnameStatus](#hostnamestatus)_ | Hostname contains the status for hostname resources. | | | #### ResourceState @@ -546,7 +625,7 @@ _Appears in:_ | --- | --- | --- | --- | | `name` _string_ | The name of this port within the service. This must be a DNS_LABEL.
All ports within a ServiceSpec must have unique names. When considering
the endpoints for a Service, this must match the 'name' field in the
EndpointPort.
Optional if only one ServicePort is defined on this service. | | | | `protocol` _[Protocol](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#protocol-v1-core)_ | The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
Default is TCP. | | | -| `appProtocol` _string_ | The application protocol for this port.
This is used as a hint for implementations to offer richer behavior for protocols that they understand.
This field follows standard Kubernetes label syntax.
Valid values are either:

_Un-prefixed protocol names - reserved for IANA standard service names (as per
RFC-6335 and ).

_ Kubernetes-defined prefixed names:
_'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in
_ 'kubernetes.io/ws' - WebSocket over cleartext as described in
_'kubernetes.io/wss' - WebSocket over TLS as described in

_ Other protocols should use implementation-defined prefixed names such as
mycompany.com/my-custom-protocol. | | | +| `appProtocol` _string_ | The application protocol for this port.
This is used as a hint for implementations to offer richer behavior for protocols that they understand.
This field follows standard Kubernetes label syntax.
Valid values are either:
_Un-prefixed protocol names - reserved for IANA standard service names (as per
RFC-6335 and ).
_ Kubernetes-defined prefixed names:
_'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in
_ 'kubernetes.io/ws' - WebSocket over cleartext as described in
_'kubernetes.io/wss' - WebSocket over TLS as described in
_ Other protocols should use implementation-defined prefixed names such as
mycompany.com/my-custom-protocol. | | | | `port` _integer_ | The port that will be exposed by this service. | | | | `targetPort` _[IntOrString](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#intorstring-intstr-util)_ | Number or name of the port to access on the pods targeted by the service.
Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
If this is a string, it will be looked up as a named port in the
target Pod's container ports. If this is not specified, the value
of the 'port' field is used (an identity map).
This field is ignored for services with clusterIP=None, and should be
omitted or set equal to the 'port' field.
More info: | | | | `nodePort` _integer_ | The port on each node on which this service is exposed when type is
NodePort or LoadBalancer. Usually assigned by the system. If a value is
specified, in-range, and not in use it will be used, otherwise the
operation will fail. If not specified, a port will be allocated if this
Service requires one. If this field is specified when creating a
Service which does not need it, creation will fail. This field will be
wiped when updating a Service to no longer need it (e.g. changing type
from NodePort to ClusterIP).
More info: | | | @@ -628,14 +707,68 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
This will have a higher precedence than the annotations specified at the Config level. | | | -| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
This will have a higher precedence than the value specified at the Config level. | | | +| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | +| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | | +| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | | `loadBalancer` _[LoadBalancerSettings](#loadbalancersettings)_ | | | | | `ingress` _[IngressSettings](#ingresssettings)_ | | | | | `gatewayAPI` _[GatewayAPISettings](#gatewayapisettings)_ | | | | | `dns` _[DNSSettings](#dnssettings)_ | | | | | `certificates` _[CertificatesSettings](#certificatessettings)_ | | | | -| `allowedDomains` _string array_ | List of allowed domains for the tenant. This is used to restrict the domains that can be used
for the tenant. If specified, applies on all the components such as Ingress, GatewayAPI, DNS, certificates, etc.
Examples:
- ["*.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["*"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level.
Default: value is ["**"] and all domains are allowed. | [**] | | +| `tunnel` _[TenantTunnelSettings](#tenanttunnelsettings)_ | | | | +| `allowedDomains` _string array_ | List of allowed domains for the tenant. This is used to restrict the domains that can be used
for the tenant. If specified, applies on all the components such as Ingress, GatewayAPI, DNS, certificates, etc.
Examples:
- ["_.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["_"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level.
Default: value is ["**"] and all domains are allowed. | [**] | | + +#### TenantState + +TenantState is the Schema for the tenants API + +_Appears in:_ + +- [TenantStateList](#tenantstatelist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `TenantState` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[TenantStateSpec](#tenantstatespec)_ | | | | +| `status` _[TenantStateStatus](#tenantstatestatus)_ | | | | + +#### TenantStateList + +TenantStateList contains a list of TenantState + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `TenantStateList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[TenantState](#tenantstate) array_ | | | | + +#### TenantStateSpec + +TenantStateSpec defines the desired state of TenantState. + +_Appears in:_ + +- [TenantState](#tenantstate) + +#### TenantStateStatus + +TenantStateStatus defines the observed state of TenantState + +_Appears in:_ + +- [TenantState](#tenantstate) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `version` _[Version](#version)_ | | | | +| `lastUpdated` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#time-v1-meta)_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | | | | +| `tunnel` _[TunnelState](#tunnelstate)_ | | | | +| `loadBalancer` _[LoadBalancerState](#loadbalancerstate)_ | | | | +| `allowedDomains` _string array_ | | | | #### TenantStatus @@ -645,6 +778,131 @@ _Appears in:_ - [Tenant](#tenant) +#### TenantTunnelSettings + +TenantTunnelSettings defines the settings for the tunnel. + +_Appears in:_ + +- [TenantSpec](#tenantspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `limit` _integer_ | Limit is the maximum number of tunnels to create.
If a lower limit is set than the number of reources that exist, the limit will be disallow creation of new resources but will not delete existing resources. The reason behind this
is that it is not possible for KubeLB to know which resources are safe to remove. | | | +| `disable` _boolean_ | Disable is a flag that can be used to disable tunneling for a tenant. | | | + +#### Tunnel + +Tunnel is the Schema for the tunnels API + +_Appears in:_ + +- [TunnelList](#tunnellist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `Tunnel` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[TunnelSpec](#tunnelspec)_ | | | | +| `status` _[TunnelStatus](#tunnelstatus)_ | | | | + +#### TunnelList + +TunnelList contains a list of Tunnel + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `TunnelList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Tunnel](#tunnel) array_ | | | | + +#### TunnelPhase + +_Underlying type:_ _string_ + +TunnelPhase represents the phase of tunnel + +_Appears in:_ + +- [TunnelStatus](#tunnelstatus) + +| Field | Description | +| --- | --- | +| `Pending` | TunnelPhasePending means the tunnel is being provisioned
| +| `Ready` | TunnelPhaseReady means the tunnel is ready to accept connections
| +| `Failed` | TunnelPhaseFailed means the tunnel provisioning failed
| +| `Terminating` | TunnelPhaseTerminating means the tunnel is being terminated
| + +#### TunnelResources + +TunnelResources contains references to resources created for the tunnel + +_Appears in:_ + +- [TunnelStatus](#tunnelstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `serviceName` _string_ | ServiceName is the name of the service created for this tunnel | | | +| `routeRef` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectreference-v1-core)_ | RouteRef is a reference to the route (HTTPRoute or Ingress) created for this tunnel | | | + +#### TunnelSettings + +TunnelSettings defines the global settings for Tunnel resources. + +_Appears in:_ + +- [ConfigSpec](#configspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `limit` _integer_ | Limit is the maximum number of tunnels to create.
If a lower limit is set than the number of reources that exist, the limit will be disallow creation of new resources but will not delete existing resources. The reason behind this
is that it is not possible for KubeLB to know which resources are safe to remove. | | | +| `connectionManagerURL` _string_ | ConnectionManagerURL is the URL of the connection manager service that handles tunnel connections.
This is required if tunneling is enabled.
For example: "" | | | +| `disable` _boolean_ | Disable indicates whether tunneling feature should be disabled. | | | + +#### TunnelSpec + +TunnelSpec defines the desired state of Tunnel + +_Appears in:_ + +- [Tunnel](#tunnel) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `hostname` _string_ | Hostname is the hostname of the tunnel. If not specified, the hostname will be generated by KubeLB. | | | + +#### TunnelState + +_Appears in:_ + +- [TenantStateStatus](#tenantstatestatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `disable` _boolean_ | | | | +| `limit` _integer_ | | | | +| `connectionManagerURL` _string_ | | | | + +#### TunnelStatus + +TunnelStatus defines the observed state of Tunnel + +_Appears in:_ + +- [Tunnel](#tunnel) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `hostname` _string_ | Hostname contains the actual hostname assigned to the tunnel | | | +| `url` _string_ | URL contains the full URL to access the tunnel | | | +| `connectionManagerURL` _string_ | ConnectionManagerURL contains the URL that clients should use to establish tunnel connections | | | +| `phase` _[TunnelPhase](#tunnelphase)_ | Phase represents the current phase of the tunnel | | | +| `resources` _[TunnelResources](#tunnelresources)_ | Resources contains references to the resources created for this tunnel | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | Conditions represents the current conditions of the tunnel | | | + #### UpstreamService UpstreamService is a wrapper over the corev1.Service object. @@ -661,3 +919,16 @@ _Appears in:_ | `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | | `spec` _[ServiceSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicespec-v1-core)_ | Spec defines the behavior of a service.
| | | | `status` _[ServiceStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicestatus-v1-core)_ | Most recently observed status of the service.
Populated by the system.
Read-only.
More info: | | | + +#### Version + +_Appears in:_ + +- [TenantStateStatus](#tenantstatestatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `gitVersion` _string_ | | | | +| `gitCommit` _string_ | | | | +| `buildDate` _string_ | | | | +| `edition` _string_ | | | | diff --git a/content/kubelb/main/tutorials/aigateway/_index.en.md b/content/kubelb/main/tutorials/aigateway/_index.en.md new file mode 100644 index 000000000..148cb8985 --- /dev/null +++ b/content/kubelb/main/tutorials/aigateway/_index.en.md @@ -0,0 +1,239 @@ ++++ +title = "AI & MCP Gateway" +linkTitle = "AI & MCP Gateway" +date = 2023-10-27T10:07:15+02:00 +weight = 7 ++++ + +This tutorial will guide you through setting up an AI and MCP Gateway using KubeLB with KGateway to securely manage Large Language Model (LLM) requests and MCP tool servers. + +## Overview + +KubeLB leverages [KGateway](https://kgateway.dev/), a CNCF Sandbox project (accepted March 2025), to provide advanced AI Gateway capabilities. KGateway is built on Envoy and implements the Kubernetes Gateway API specification, offering: + +- **AI Workload Protection**: Secure applications, models, and data from inappropriate access +- **LLM Traffic Management**: Intelligent routing to LLM providers with load balancing based on model metrics +- **Prompt Engineering**: System-level prompt enrichment and guards +- **Multi-Provider Support**: Works with OpenAI, Anthropic, Google Gemini, Mistral, and local models like Ollama +- **Model Context Protocol (MCP) Gateway**: Federates MCP tool servers into a single, secure endpoint +- **Advanced Security**: Authentication, authorization, rate limiting tailored for AI workloads + +### Key Features + +#### AI-Specific Capabilities + +- **Prompt Guards**: Protect against prompt injection and data leakage +- **Model Failover**: Automatic failover between LLM providers +- **Function Calling**: Support for LLM function/tool calling +- **AI Observability**: Detailed metrics and tracing for AI requests +- **Semantic Caching**: Cache responses based on semantic similarity +- **Token-Based Rate Limiting**: Control costs with token consumption limits + +#### Gateway API Inference Extension + +KGateway supports the Gateway API Inference Extension which introduces: + +- `InferenceModel` CRD: Define LLM models and their endpoints +- `InferencePool` CRD: Group models for load balancing and failover +- Intelligent endpoint picking based on model performance metrics + +## Setup + +### Step 1: Enable KGateway AI Extension + +Update values.yaml for KubeLB manager chart to enable KGateway with AI capabilities: + +```yaml +kubelb: + enableGatewayAPI: true + +kubelb-addons: + enabled: true + + kgateway: + enabled: true + gateway: + aiExtension: + enabled: true +``` + +### Step 2: Create Gateway Specific Resources + +1. Deploy a Gateway resource to handle AI traffic: + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: ai-gateway + namespace: kubelb + labels: + app: ai-gateway +spec: + gatewayClassName: kgateway + infrastructure: + parametersRef: + name: ai-gateway + group: gateway.kgateway.dev + kind: GatewayParameters + listeners: + - protocol: HTTP + port: 8080 + name: http + allowedRoutes: + namespaces: + from: All +``` + +2. Deploy a GatewayParameters resource to enable the AI extension: + +```yaml +apiVersion: gateway.kgateway.dev/v1alpha1 +kind: GatewayParameters +metadata: + name: ai-gateway + namespace: kubelb + labels: + app: ai-gateway +spec: + kube: + aiExtension: + enabled: true + ports: + - name: ai-monitoring + containerPort: 9092 + image: + registry: cr.kgateway.dev/kgateway-dev + repository: kgateway-ai-extension + tag: v2.1.0-main + service: + type: LoadBalancer +``` + +## OpenAI Integration Example + +This example shows how to set up secure access to OpenAI through the AI Gateway. + +### Step 1: Store OpenAI API Key + +Create a Kubernetes secret with your OpenAI API key: + +```bash +export OPENAI_API_KEY="sk-..." + +kubectl create secret generic openai-secret \ + --from-literal=Authorization="Bearer ${OPENAI_API_KEY}" \ + --namespace kubelb +``` + +### Step 2: Create Backend Configuration + +Define an AI Backend that uses the secret for authentication: + +```yaml +apiVersion: gateway.kgateway.dev/v1alpha1 +kind: Backend +metadata: + name: openai + namespace: kubelb +spec: + type: AI + ai: + llm: + provider: + openai: + authToken: + kind: SecretRef + secretRef: + name: openai-secret + namespace: kubelb + model: "gpt-3.5-turbo" +``` + +### Step 3: Create HTTPRoute + +Route traffic to the OpenAI backend: + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: openai-route + namespace: kubelb +spec: + parentRefs: + - name: ai-gateway + namespace: kubelb + rules: + - matches: + - path: + type: PathPrefix + value: /openai + filters: + - type: URLRewrite + urlRewrite: + path: + type: ReplaceFullPath + replaceFullPath: /v1/chat/completions + backendRefs: + - name: openai + namespace: kubelb + group: gateway.kgateway.dev + kind: Backend +``` + +### Step 4: Test the Configuration + +Get the Gateway's external IP: + +```bash +kubectl get gateway ai-gateway -n kubelb +export GATEWAY_IP=$(kubectl get svc -n kubelb ai-gateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +``` + +Send a test request: + +```bash +curl -X POST "http://${gateway_ip}/openai" \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + {"role": "user", "content": "Hello, how are you?"} + ] + }' +``` + +## Rate Limiting (Optional) + +Add rate limiting to control costs and prevent abuse: + +```yaml +apiVersion: gateway.kgateway.dev/v1alpha1 +kind: RateLimitPolicy +metadata: + name: openai-ratelimit + namespace: kubelb +spec: + targetRef: + kind: HTTPRoute + name: openai-route + namespace: kubelb + limits: + - requests: 100 + unit: hour +``` + +## MCP Gateway + +Similar to the AI Gateway, you can also use agentgateway to can connect to one or multiple MCP servers in any environment. + +Please follow this guide to setup the MCP Gateway: [MCP Gateway](https://kgateway.dev/docs/agentgateway/mcp/) + +## Further Reading + +For advanced configurations and features: + +- [KGateway AI Setup Documentation](https://kgateway.dev/docs/ai/setup/) +- [KGateway Authentication Guide](https://kgateway.dev/docs/ai/auth/) +- [Prompt Guards and Security](https://kgateway.dev/docs/ai/prompt-guards/) +- [Multiple LLM Providers](https://kgateway.dev/docs/ai/cloud-providers/) diff --git a/content/kubelb/main/tutorials/bgp/_index.en.md b/content/kubelb/main/tutorials/bgp/_index.en.md new file mode 100644 index 000000000..0852763cc --- /dev/null +++ b/content/kubelb/main/tutorials/bgp/_index.en.md @@ -0,0 +1,53 @@ ++++ +title = "Layer 4 Load balancing with BGP" +linkTitle = "BGP Support" +date = 2025-08-27T10:07:15+02:00 +weight = 6 ++++ + +In Management Cluster, KubeLB offloads the provisioning of the the actual load balancers to the load balancing appliance that is being used. This can be the CCM in case of a cloud provider or a self-managed solution like [MetalLB](https://metallb.universe.tf), [Cilium Load Balancer](https://cilium.io/use-cases/load-balancer/) or any other solution. + +Due to this generic nature, KubeLB can be used with any load balancing appliance and the underlying route advertisement protocol such as BGP, OSPF, L2, are all supported. This tutorial will focus on [BGP](https://networklessons.com/bgp/introduction-to-bgp) but it assumes that the underlying infrastructure of your Kubernetes cluster is already configured to support BGP. + +## Setup + +We'll use [MetalLB](https://metallb.universe.tf) with BGP for this tutorial. Update the values.yaml file for KubeLB manager to enable metallb: + +```yaml +kubelb-addons: + metallb: + enabled: true +``` + +A minimal configuration for MetalLB for demonstration purposes is as follows: + +```yaml +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: extern + namespace: metallb-system +spec: + addresses: + - 10.10.255.200-10.10.255.250 + autoAssign: true + avoidBuggyIPs: true +--- +apiVersion: metallb.io/v1beta1 +kind: BGPAdvertisement +metadata: + name: extern + namespace: metallb-system +spec: + ipAddressPools: + - extern +``` + +This configures an address pool `extern` with an IP range from 10.10.255.200 to 10.10.255.250. This IP range can be used by the tenant clusters to allocate IP addresses for the `LoadBalancer` service type. + +Afterwards you can follow the [Layer 4 Load balancing](../loadbalancer#usage-with-kubelb) tutorial to create a `LoadBalancer` service in the tenant cluster. + +### Further reading + +- [MetalLB BGP Configuration](https://metallb.universe.tf/configuration/_advanced_bgp_configuration/) +- [MetalLB BGP Usage](https://metallb.universe.tf/usage/#bgp) diff --git a/content/kubelb/main/tutorials/config/_index.en.md b/content/kubelb/main/tutorials/config/_index.en.md index a676e2068..9ee2a661f 100644 --- a/content/kubelb/main/tutorials/config/_index.en.md +++ b/content/kubelb/main/tutorials/config/_index.en.md @@ -60,7 +60,7 @@ These configurations are available at a global level and also at a tenant level. 2. **GatewayAPI.Class**: The class to use for Gateway API resources for tenants in management cluster. 3. **Certificates.DefaultClusterIssuer(EE)**: The default cluster issuer to use for certificate management. -### Propagate annotations +### Annotation Settings KubeLB can propagate annotations from services, ingresses, gateway API objects etc. in the tenant cluster to the corresponding LoadBalancer or Route resources in the management cluster. This is useful for setting annotations that are required by the cloud provider to configure the LoadBalancers. For example, the `service.beta.kubernetes.io/aws-load-balancer-internal` annotation is used to create an internal LoadBalancer in AWS. @@ -68,7 +68,7 @@ Annotations are not propagated by default since tenants can make unwanted change The annotation configuration set on the tenant level will override the global annotation configuration for that tenant. -1. Propagate all annotations +#### 1. Propagate all annotations ```yaml apiVersion: kubelb.k8c.io/v1alpha1 @@ -80,7 +80,7 @@ spec: propagateAllAnnotations: true ``` -2. Propagate specific annotations +#### 2. Propagate specific annotations ```yaml apiVersion: kubelb.k8c.io/v1alpha1 @@ -96,6 +96,29 @@ spec: metallb.universe.tf/loadBalancerIPs: "8.8.8.8" ``` +#### 3. Default annotations + +Default annotations for resources that KubeLB generates in the management cluster can also be configured. This is useful for setting annotations that are required by the cloud provider to configure the LoadBalancers. For example, the `service.beta.kubernetes.io/aws-load-balancer-internal` annotation is used to create an internal LoadBalancer in AWS. + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Config +metadata: + name: default + namespace: kubelb +spec: + defaultAnnotations: + service: + service.beta.kubernetes.io/aws-load-balancer-internal: true + ingress: + kubernetes.io/ingress.class: "nginx" + gatewayapi: + kubernetes.io/ingress.class: "eg" + # Will be applied to all resources such as Ingress, Gateway API resources, services, etc. + all: + internal: true +``` + ### Configure Envoy Proxy Sample configuration, inflated with values for demonstration purposes only. All of the values are optional and have sane defaults. For more details check [CRD References]({{< relref "../../references">}}) @@ -182,17 +205,34 @@ spec: gatewayAPI: class: "eg" disable: false - # Enterprise Edition Only + defaultGateway: + name: "default" + namespace: "envoy-gateway" + # Enterprise Edition Only (all the below options are only available in Enterprise Edition) gateway: limits: 10 disableHTTPRoute: false disableGRPCRoute: false - # Enterprise Edition Only disableTCPRoute: false - # Enterprise Edition Only disableUDPRoute: false - # Enterprise Edition Only disableTLSRoute: false ``` +### Configure DNS Options + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Config +metadata: + name: default + namespace: kubelb +spec: + dns: + # The wildcard domain to use for auto-generated hostnames for Load balancers + # In EE Edition, this is also use to generated dynamic hostnames for tunnels. + wildcardDomain: "*.apps.example.com" + # Allow tenants to specify explicit hostnames for Load balancers and tunnels(in EE Edition) + allowExplicitHostnames: false +``` + **For more details and options, please go through [CRD References]({{< relref "../../references">}})** diff --git a/content/kubelb/main/tutorials/gatewayapi/_index.en.md b/content/kubelb/main/tutorials/gatewayapi/_index.en.md index dff03c946..9bd0d2c4b 100644 --- a/content/kubelb/main/tutorials/gatewayapi/_index.en.md +++ b/content/kubelb/main/tutorials/gatewayapi/_index.en.md @@ -2,7 +2,7 @@ title = "Gateway API" linkTitle = "Gateway API" date = 2023-10-27T10:07:15+02:00 -weight = 5 +weight = 4 +++ This tutorial will guide you through the process of setting up Layer 7 load balancing with Gateway API. @@ -19,19 +19,23 @@ In KubeLB, we treat the admins of management cluster as the Platform provider. H ### Setup -{{% notice warning %}} Ensure that Gateway API is enabled for the cluster. Please set `kubelb.enableGatewayAPI` to `true` in the `values.yaml`. Gateway API has been disabled by default as due to missing Gateway API CRDs the controller will crash and won't start. {{% /notice %}} - Kubermatic's default recommendation is to use Gateway API and use [Envoy Gateway](https://gateway.envoyproxy.io/) as the Gateway API implementation. Install Envoy Gateway by following this [guide](https://gateway.envoyproxy.io/docs/install/install-helm/) or any other Gateway API implementation of your choice. -Ensure that `GatewayClass` exists in the management cluster. A minimal configuration for GatewayClass is as follows: +Update values.yaml for KubeLB manager chart to enable the Gateway API addon. ```yaml -apiVersion: gateway.networking.k8s.io/v1 -kind: GatewayClass -metadata: - name: eg -spec: - controllerName: gateway.envoyproxy.io/gatewayclass-controller +kubelb: + enableGatewayAPI: true + +## Addon configuration +kubelb-addons: + enabled: true + # Create the GatewayClass resource in the management cluster. + gatewayClass: + create: true + + envoy-gateway: + enabled: true ``` #### KubeLB Manager Configuration diff --git a/content/kubelb/main/tutorials/ingress/_index.en.md b/content/kubelb/main/tutorials/ingress/_index.en.md index 7265f2467..51a5d84b4 100644 --- a/content/kubelb/main/tutorials/ingress/_index.en.md +++ b/content/kubelb/main/tutorials/ingress/_index.en.md @@ -2,7 +2,7 @@ title = "Ingress" linkTitle = "Ingress" date = 2023-10-27T10:07:15+02:00 -weight = 4 +weight = 5 +++ This tutorial will guide you through the process of setting up Layer 7 load balancing with Ingress. @@ -50,12 +50,16 @@ spec: #### Shared -Install your controller with default configuration. +Update values.yaml for KubeLB manager chart to enable the ingress-nginx addon. -```sh -helm upgrade --install ingress-nginx ingress-nginx \ - --repo https://kubernetes.github.io/ingress-nginx \ - --namespace ingress-nginx --create-namespace +```yaml +kubelb-addons: + enabled: true + ingress-nginx: + enabled: true + controller: + service: + externalTrafficPolicy: Local ``` For details: diff --git a/content/kubelb/main/tutorials/kkp/_index.en.md b/content/kubelb/main/tutorials/kkp/_index.en.md index 3704c116a..a2c4c4e04 100644 --- a/content/kubelb/main/tutorials/kkp/_index.en.md +++ b/content/kubelb/main/tutorials/kkp/_index.en.md @@ -1,7 +1,7 @@ +++ title = "Kubermatic Kubernetes Platform Integration" date = 2023-10-27T10:07:15+02:00 -weight = 15 +weight = 9 enterprise = true +++ diff --git a/content/kubelb/main/tutorials/loadbalancer/_index.en.md b/content/kubelb/main/tutorials/loadbalancer/_index.en.md index 4672fd443..7a2d58459 100644 --- a/content/kubelb/main/tutorials/loadbalancer/_index.en.md +++ b/content/kubelb/main/tutorials/loadbalancer/_index.en.md @@ -99,6 +99,38 @@ spec: This will create a service of type `LoadBalancer` and a deployment. KubeLB CCM will then propagate the request to management cluster, create a LoadBalancer CR there and retrieve the IP address allocated in the management cluster. Eventually the IP address will be assigned to the service in the tenant cluster. +### Load Balancer Hostname Support + +KubeLB now supports assigning a hostname directly to the LoadBalancer resource. This is helpful for simpler configurations where no special routing rules are required for your Ingress or HTTPRoute resources. + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: LoadBalancer +metadata: + name: test-lb-hostname + namespace: tenant-dkrqjswsgk + annotations: + kubelb.k8c.io/request-wildcard-domain: "true" +spec: + # hostname: test.example.com + endpoints: + - addresses: + - ip: 91.99.112.254 + ports: + - name: 8080-tcp + port: 31632 + protocol: TCP + ports: + - name: 8080-tcp + port: 8080 + protocol: TCP + type: ClusterIP +``` + +This will create a LoadBalancer resource with the hostname `test.example.com` that can forward traffic to the IP address `91.99.112.254` on port `31632`. The `kubelb.k8c.io/request-wildcard-domain: "true"` annotation is used to request a wildcard domain for the hostname. Otherwise `spec.hostname` can also be used to explicitly set the hostname. + +Please take a look at [DNS Automation](../security/dns/#enable-dns-automation) for more details on how to configure DNS for the hostname. + ### Configurations KubeLB CCM helm chart can be used to further configure the CCM. Some essential options are: diff --git a/content/kubelb/main/tutorials/observability/_index.en.md b/content/kubelb/main/tutorials/observability/_index.en.md index c4f1867d4..743871860 100644 --- a/content/kubelb/main/tutorials/observability/_index.en.md +++ b/content/kubelb/main/tutorials/observability/_index.en.md @@ -2,7 +2,7 @@ title = "Observability" linkTitle = "Observability" date = 2023-10-27T10:07:15+02:00 -weight = 7 +weight = 8 +++ KubeLB is a mission-critical component in the Kubernetes ecosystem, and its observability is crucial for ensuring the stability and reliability of the platform. This guide will walk you through the steps to enable and configure observability for KubeLB. diff --git a/content/kubelb/main/tutorials/security/_index.en.md b/content/kubelb/main/tutorials/security/_index.en.md index 81b527730..310780651 100644 --- a/content/kubelb/main/tutorials/security/_index.en.md +++ b/content/kubelb/main/tutorials/security/_index.en.md @@ -2,7 +2,7 @@ title = "Security" linkTitle = "Security" date = 2023-10-27T10:07:15+02:00 -weight = 6 +weight = 7 +++ This is a guide towards managing DNS, TLS, and other security-related configurations in KubeLB. diff --git a/content/kubelb/main/tutorials/security/cert-management/_index.en.md b/content/kubelb/main/tutorials/security/cert-management/_index.en.md index 2e0331e39..9d28a7b16 100644 --- a/content/kubelb/main/tutorials/security/cert-management/_index.en.md +++ b/content/kubelb/main/tutorials/security/cert-management/_index.en.md @@ -17,32 +17,37 @@ These are minimal examples to get you started quickly. Please refer to the docum {{< tabs name="cert-manager" >}} {{% tab name="Gateway API" %}} -For Gateway API, the feature gate to use Gateway APIs needs to be enabled: - -```bash -helm repo add jetstack https://charts.jetstack.io --force-update -helm upgrade --install \ - cert-manager jetstack/cert-manager \ - --namespace cert-manager \ - --create-namespace \ - --version v1.15.2 \ - --set crds.enabled=true \ - --set config.apiVersion="controller.config.cert-manager.io/v1alpha1" \ - --set config.kind="ControllerConfiguration" \ - --set config.enableGatewayAPI=true +Update values.yaml for KubeLB manager chart to enable the cert-manager addon. + +```yaml +kubelb-addons: + enabled: true + cert-manager: + enabled: true + crds: + enabled: true + config: + apiVersion: controller.config.cert-manager.io/v1alpha1 + kind: ControllerConfiguration + enableGatewayAPI: true ``` {{% /tab %}} {{% tab name="Ingress" %}} -```bash -helm repo add jetstack https://charts.jetstack.io --force-update -helm upgrade --install \ - cert-manager jetstack/cert-manager \ - --namespace cert-manager \ - --create-namespace \ - --version v1.15.2 \ - --set crds.enabled=true +Update values.yaml for KubeLB manager chart to enable the cert-manager addon. + +```yaml +kubelb-addons: + enabled: true + cert-manager: + enabled: true + crds: + enabled: true + config: + apiVersion: controller.config.cert-manager.io/v1alpha1 + kind: ControllerConfiguration + enableGatewayAPI: false ``` {{% /tab %}} @@ -76,18 +81,76 @@ Users can then either use [cert-manager annotations](https://cert-manager.io/doc ### Cluster Issuer example +{{% notice info %}} +Due to multi-tenancy, it's recommended to use DNS challenge for certificate management. Gateway API has a limitation and doesn't support wildcard domains with HTTP01 challenge. Similarly, for Ingress, unless you are using single ingress installation for all tenants, you will need to create a separate ClusterIssuer for each tenant. Same is the case for Gateway API since it needs the Gateway name to resolve the certificate challenges. +{{% /notice %}} + +#### Example for DNS challenge with AWS Route53 + ```yaml apiVersion: cert-manager.io/v1 kind: ClusterIssuer metadata: - name: letsencrypt-staging + name: letsencrypt-production-dns +spec: + acme: + email: user@example.com + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: letsencrypt-production-dns + solvers: + - dns01: + route53: + region: eu-central-1 + accessKeyIDSecretRef: + name: route53-credentials + key: access-key-id + secretAccessKeySecretRef: + name: route53-credentials + key: secret-access-key +``` + +#### Example for HTTP01 challenge + +{{< tabs name="cert-manager" >}} +{{% tab name="Gateway API" %}} + +```yaml +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-production +spec: + acme: + email: user@example.com + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: example-issuer-account-key + solvers: + - http01: + gatewayHTTPRoute: + parentRefs: + - kind: Gateway + name: default + namespace: tenant-default + sectionName: http +``` + +{{% /tab %}} +{{% tab name="Ingress" %}} + +```yaml +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-production spec: acme: # You must replace this email address with your own. # Let's Encrypt will use this to contact you about expiring # certificates, and issues related to your account. email: user@example.com - server: https://acme-staging-v02.api.letsencrypt.org/directory + server: https://acme-v02.api.letsencrypt.org/directory privateKeySecretRef: # Secret resource that will be used to store the account's private key. name: example-issuer-account-key @@ -98,6 +161,9 @@ spec: ingressClassName: nginx ``` +{{% /tab %}} +{{< /tabs >}} + The additional validation at the tenant level allows us to use a single instance of cert-manager for multiple tenants. Multiple cert-manager installations are not recommended and it's better to have a single instance of cert-manager for all tenants but different ClusterIssuers/Issuers for different tenants, if required. ## Usage diff --git a/content/kubelb/main/tutorials/security/dns/_index.en.md b/content/kubelb/main/tutorials/security/dns/_index.en.md index b58536adc..952a5002a 100644 --- a/content/kubelb/main/tutorials/security/dns/_index.en.md +++ b/content/kubelb/main/tutorials/security/dns/_index.en.md @@ -10,41 +10,44 @@ enterprise = true ### Install External-dns -Install [External-dns](https://bitnami.com/stack/external-dns/helm) to manage DNS records for the tenant clusters. A sample configuration to use external-dns with AWS Route53 and domain is shown below. +We leverage [External-dns](https://bitnami.com/stack/external-dns/helm) to manage DNS records for the tenant clusters. **This is just an example to give you a headstart. For more details on setting up external-dns for different providers, visit [Official Documentation](https://kubernetes-sigs.github.io/external-dns).** -#### Values.yaml +Update the values.yaml for KubeLB manager chart to enable the external-dns addon. ```yaml -# do not allow any domain that are now below these base domains -domainFilters: - - example.com - -# mount the credential secret we created outside of helm -extraVolumes: - - name: credentials - secret: - secretName: route53-credentials - -extraVolumeMounts: - - name: credentials - mountPath: /.aws - readOnly: true - -env: - - name: AWS_SHARED_CREDENTIALS_FILE - value: /.aws/credentials - -# NOTE: Enable/Disable based on your requirements -sources: - - service - - ingress - - gateway-httproute - - gateway-grpcroute - - gateway-tlsroute - - gateway-tcproute - - gateway-udproute +kubelb-addons: + enabled: true + + external-dns: + enabled: true + domainFilters: + - example.com + extraVolumes: + - name: credentials + secret: + secretName: route53-credentials + extraVolumeMounts: + - name: credentials + mountPath: /.aws + readOnly: true + env: + - name: AWS_SHARED_CREDENTIALS_FILE + value: /.aws/credentials + txtOwnerId: kubelb-example-aws + registry: txt + provider: aws + policy: sync + sources: + - service + - ingress + # Comment out the below resources if you are not using Gateway API. + - gateway-httproute + - gateway-grpcroute + - gateway-tlsroute + - gateway-tcproute + - gateway-udproute ``` #### Credentials secret @@ -65,12 +68,6 @@ metadata: type: Opaque ``` -#### Install helm chart - -```sh -helm install external-dns oci://registry-1.docker.io/bitnamicharts/external-dns -n external-dns --values values.yaml -``` - ### Enable DNS automation DNS can be enabled/disabled at global or tenant level. For automation purposes, you can configure allowed domains for DNS per tenant. @@ -90,12 +87,71 @@ spec: # If not empty, only the domains specified here will have automation for DNS. Everything else will be ignored. allowedDomains: - "*.shroud.example.com" + # The wildcard domain to use for auto-generated hostnames for Load balancers + # In EE Edition, this is also use to generated dynamic hostnames for tunnels. + wildcardDomain: "*.apps.example.com" + # Allow tenants to specify explicit hostnames for Load balancers and tunnels(in EE Edition) + allowExplicitHostnames: false + gatewayAPI: + class: "eg" + defaultGateway: + name: "default" + namespace: "kubelb" ``` Users can then either use [external-dns annotations](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/annotations/annotations.md) or the annotation `kubelb.k8c.io/manage-dns: true` on their resources to automate DNS management. The additional validation at the tenant level allows us to use a single instance of external-dns for multiple tenants. Although, if required, external-dns can be installed per tenant as well. +#### Configure Gateway + +Gateway resource needs to be configured for this automation to work. For example, if you are using Gateway API, you can configure the Gateway resource to manage DNS as follows: + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: default + namespace: kubelb + annotations: + cert-manager.io/cluster-issuer: letsencrypt-production +spec: + gatewayClassName: eg + listeners: + ## HTTP listener to solve DNS challenge for cert-manager + - name: http + protocol: HTTP + port: 80 + allowedRoutes: + namespaces: + from: All + - protocol: HTTPS + port: 443 + name: https + hostname: "*.apps.example.com" + allowedRoutes: + namespaces: + from: All + tls: + mode: Terminate + certificateRefs: + - kind: Secret + name: eg-https + # Required in EE for tunneling + - protocol: HTTPS + port: 443 + name: https-connection-manager + hostname: "connection-manager.example.com" + allowedRoutes: + namespaces: + from: All + tls: + mode: Terminate + certificateRefs: + - kind: Secret + name: eg-https-connection-manager +``` + ## Usage 1. Using external-dns annotations: diff --git a/content/kubelb/v1.2/_index.en.md b/content/kubelb/v1.2/_index.en.md new file mode 100644 index 000000000..9f15e962f --- /dev/null +++ b/content/kubelb/v1.2/_index.en.md @@ -0,0 +1,44 @@ ++++ +title = "Kubermatic KubeLB" +date = 2023-10-27T10:07:15+02:00 +weight = 6 +description = "Learn how you can use Kubermatic KubeLB to centrally provision and manage load balancers across multiple cloud and on-premise environments." ++++ + +![KubeLB logo](/img/kubelb/common/logo.png?classes=logo-height) + +## What is KubeLB? + +KubeLB is a project by Kubermatic, it is a Kubernetes native tool, responsible for centrally managing Layer 4 and 7 load balancing configurations for Kubernetes clusters across multi-cloud and on-premise environments. + +## Motivation and Background + +Kubernetes does not offer any implementation for load balancers and in turn relies on the in-tree or out-of-tree cloud provider implementations to take care of provisioning and managing load balancers. This means that if you are not running on a supported cloud provider, your services of type `LoadBalancer` will never be allotted a load balancer IP address. This is an obstacle for bare-metal Kubernetes environments. + +There are solutions available like [MetalLB][2], [Cilium][3], etc. that solve this issue. However, these solutions are focused on a single cluster where you have to deploy the application in the same cluster where you want the load balancers. This is not ideal for multi-cluster environments since you have to configure load balancing for each cluster separately, which makes IP address management not trivial. + +For application load balancing, we have the same case where an external application like [nginx-ingress][4], [envoy gateway][5], needs to be deployed in the cluster. To further secure traffic, additional tools are required for managing DNS, TLS certificates, Web Application Firewall, etc. + +KubeLB solves this problem by providing a centralized management solution that can manage the data plane for multiple Kubernetes clusters across multi-cloud and on-premise environments. This enables you to manage fleet of Kubernetes clusters in a centralized way, ensuring security compliance, enforcing policies, and providing a consistent experience for developers. + +[2]: https://metallb.universe.tf +[3]: https://cilium.io/use-cases/load-balancer/ +[4]: https://kubernetes.github.io/ingress-nginx/ +[5]: https://gateway.envoyproxy.io/ + +## Table of Content + +{{% children depth=5 %}} +{{% /children %}} + +## Further Information + +- [Introducing KubeLB](https://www.kubermatic.com/products/kubelb/) +- [KubeLB Whitepaper](https://www.kubermatic.com/static/KubeLB-Cloud-Native-Multi-Tenant-Load-Balancer.pdf) +- [KubeLB - GitHub Repository](https://github.com/kubermatic/kubelb) + +Visit [kubermatic.com](https://www.kubermatic.com/) for further information. + +{{% notice tip %}} +For latest updates follow us on Twitter [@Kubermatic](https://twitter.com/Kubermatic) +{{% /notice %}} diff --git a/content/kubelb/v1.2/architecture/_index.en.md b/content/kubelb/v1.2/architecture/_index.en.md new file mode 100644 index 000000000..951b94bb1 --- /dev/null +++ b/content/kubelb/v1.2/architecture/_index.en.md @@ -0,0 +1,82 @@ ++++ +title = "Architecture" +date = 2023-10-27T10:07:15+02:00 +weight = 5 ++++ + +KubeLB is an elastically scalable load balancer with a distributed data plane that can span, serve, and scale with apps across various on-premise and cloud locations. The distributed data plane empowers customers to obtain application affinity at the application microservice levels, thus significantly enhancing the overall application performance. In addition, the clean separation of planes also enables the creation of a unified, centralized control plane that significantly alleviates the operational complexity associated with integrating, operating, and managing each ADC appliance across locations individually. + +## Terminology + +In this chapter, you will find the following KubeLB specific terms: + +1. **Management Cluster/Load balancing Cluster** -- A Kubernetes cluster which is responsible for management of all the tenants and their data plane components. Requests for Layer 4 and Layer 7 load balancing are handled by the management cluster. +2. **Tenant Cluster** -- A Kubernetes cluster which acts as a consumer of the load balancer services. Workloads that need Layer 4 or Layer 7 load balancing are created in the tenant cluster. The tenant cluster hosts the KubeLB Cloud Controller Manager (CCM) component which is responsible for propagating the load balancer configurations to the management cluster. Each Kubernetes cluster where the KubeLB CCM is running is considered a unique tenant. This demarcation is based on the fact that the endpoints, simply the Node IPs and node ports, are unique for each Kubernetes cluster. + +## Design and Architecture + +KubeLB follows the **hub and spoke** model in which the "Management Cluster" acts as the hub and the "Tenant Clusters" act as the spokes. The information flow is from the tenant clusters to the management cluster. The agent running in the tenant cluster watches for nodes, services, ingresses, and Gateway API etc. resources and then propagates the configuration to the management cluster. The management cluster then deploys the load balancer and configures it according to the desired specification. Management cluster then uses Envoy Proxy to route traffic to the appropriate endpoints i.e. the node ports open on the nodes of the tenant cluster. + +For security and isolation, the tenants have no access to any native kubernetes resources in the management cluster. The tenants can only interact with the management cluster via the KubeLB CRDs. This ensures that they are not exceeding their access level and only perform controlled operations in the management cluster. + +![KubeLB Architecture](/img/kubelb/v1.1/kubelb-high-level-architecture.png?classes=shadow,border "KubeLB Architecture") + +## Components + +KubeLB comprises of two components: + +### Cloud Controller Manager + +The **KubeLB CCM** is deployed in the tenant clusters and acts as an `agent` that watches for changes in layer 4 and layer 7 load balancing components in the tenant cluster. Such as nodes, secrets, services, ingresses, Gateway API etc. Based on it's configuration and what's allowed, it processes and propagates the required resources to the `manager` cluster. + +For layer 4 load balancing `LoadBalancer` and for Layer 7 load balancing `Route` CRDs are used. + +### Manager + +The **KubeLB manager** is responsible for managing the data plane of it's tenants. The manager **registers** the tenant clusters as tenants, and then it receives the load balancer configurations from the CCM(s) in the form of `LoadBalancer` or `Route` CRDs. It then deploys the necessary workloads according to the desired specification. + +At its core, the KubeLB manager relies on [envoy proxy][1] to load balance the traffic. The manager is responsible for deploying the envoy proxy and configuring it for each load balancer service per tenant, based on the envoy proxy deployment topology. + +## Personas + +KubeLB targets the following personas: + +1. Platform Provider: The Platform Provider is responsible for the overall environment that the cluster runs in, i.e. the cloud provider. The Platform Provider will interact with GatewayClass resources. +2. Platform Operator: The Platform Operator is responsible for overall cluster administration. They manage policies, network access, application permissions and will interact with Gateway resources. +3. Service Operator: The Service Operator is responsible for defining application configuration and service composition. They will interact with HTTPRoute and TLSRoute resources and other typical Kubernetes resources. + +Inspired from [Gateway API Personas](https://gateway-api.sigs.k8s.io/#personas). + +Service Operator and Platform Operator are the more or less the same persona in KubeLB and they are responsible for defining the load balancer configurations in tenant cluster. Platform Provider is the "KubeLB provider" and manages the management cluster. + +## Concepts + +### Envoy Proxy Deployment Topology + +KubeLB manager supports two different deployment topologies for envoy proxy: + +1. **Shared (default)**: In this topology, a single envoy proxy is deployed per tenant cluster. All load balancer services in a particular tenant cluster are configured to use this envoy proxy. This is the default topology. +2. **Global**: In this topology, a single envoy proxy is deployed per KubeLB manager. All load balancer services in all tenant clusters are configured to use this envoy proxy. Pitfalls: Due to a single envoy proxy deployment, service-level network access is required from the tenant namespace to the controller namespace. + +The consumers are not aware or affected by the topology. This is only an internal detail for the management cluster. + +### User experience + +One of the most vital consideration while designing KubeLB was the user experience. There should be least possible friction and divergance of how the workflows to manage Layer 4 and Layer 7 workloads used to work like before KubeLB. + +All the end users need is to configure the CCM with there desired configuration and the CCM will take care of the rest. With default configuration, all you need is to use the Class **kubelb** for your resources instead of a provider specific class that the users used to have before. + +### Kubernetes Class + +Class is a concept in Kubernetes that is used to mark the ownership of a resource. For example an Ingress with `class: nginx` will be owned by a controller that implements the IngressClass named `nginx`. We have the similar concept in services, ingresses, gateway API resources, etc. KubeLB leverages on this concept to provide a seamless experience to the users by simply filtering out and processing the resources that are owned by KubeLB, by default. This behavior can also be changed by overriding the CCM configuration. + +## Installation + +See the [installation documentation]({{< relref "../installation/">}}) for more details on how to setup and install KubeLB. + +[1]: https://github.com/envoyproxy/envoy + +## Table of Content + +{{% children depth=5 %}} +{{% /children %}} diff --git a/content/kubelb/v1.2/architecture/application-load-balancing/_index.en.md b/content/kubelb/v1.2/architecture/application-load-balancing/_index.en.md new file mode 100644 index 000000000..2c5a171cf --- /dev/null +++ b/content/kubelb/v1.2/architecture/application-load-balancing/_index.en.md @@ -0,0 +1,43 @@ ++++ +title = "Application Load Balancing" +date = 2023-10-27T10:07:15+02:00 +weight = 10 ++++ + +This document explains the architecture for Layer 7 or Application Layer Load Balancing support in KubeLB. + +## Background + +With KubeLB, we want to build a product that can manage the data plane of a fleet of clusters(tenants) from a centralized point. Providing Layer 4 and Layer 7 load balancing capabilities through a single platform. + +KubeLB already had support for L4 load balancing and provisioning/managing load balancers for kubernetes clusters from a central cluster. With v1.1, we want to extend this functionality to managing Application level load balancing including DNS management, TLS management and termination, and other aspects. + +### Challenges + +Every Kubernetes cluster operates within its isolated network namespace, which offers several advantages. For instance, individual pods can be effortlessly accessed via unique IP addresses. Deploying your load balancing appliance such as nginx-ingress controller or Envoy Gateway would work seamlessly within the cluster because it would run as a pod inside your cluster and by gist, would have access to the same pod-level network as the rest. This enables the load balancing appliance to route and load balance traffic within the cluster. + +However, external clusters, management cluster in our case, cannot have direct access to the pod-network of the tenant kubernetes clusters. This introduces a limitation in KubeLB that the management cluster cannot directly route traffic from the load balancing appliance hosted on the management cluster to the tenant clusters. To achieve something like this, the LB cluster would need pod-level network access to ALL the consumer clusters. The options to achieve this are: + +- Share the network routes of consumer clusters with the ingress controller server via BGP peering. +- Leverage tools like Submariner, Cilium Cluster Mesh, to create stretched clusters. + +These are the options that we want to look into in the future but they do require significant effort and might not be possible to achieve in some cases since KubeLB is simply an "application" that runs in a Kubernetes Cluster. It doesn't, for now, depend or dictate the infrastructural requirements for that Kubernetes cluster. + +### Solution + +Considering the limitations, we settled for using services of type `NodePort` to route traffic from the management cluster to the tenants. This offers high level of isolation since the only infrastructural requirement for this is to have network access to the tenant cluster nodes with node port range (default: 30000-32767). This is required for the envoy proxy to be able to connect to the tenant cluster nodes. + +This is already a requirement for Layer 4 load balancing so we are not adding any new requirements specifically for this use case. This also means that no additional infrastructural level or network level modifications need to be made to your existing management or tenant clusters. + +For layer 7 requests, KubeLB will automatically create a `NodePort` service against your `ClusterIP` service hence no manual actions are required from the user's prospective. The user experience remains exactly the same as if they had the load balancing appliance installed within their own cluster. + +### Lifecycle of a request + +1. Developer creates a deployment, service, and Ingress. +2. KubeLB evaluates if the service is of type ClusterIP and generates a NodePort service against it. +3. After validation, KubeLB CCM will propagate these resources from the tenant to LB cluster using the `Route` CRD. +4. KubeLB manager then copies/creates the corresponding resources in the tenant namespace in the management cluster. +5. KubeLB CCM polls for the updated status of the Ingress, updates the status when available. +6. KubeLB manager starts routing the traffic for your resource. + +![KubeLB Architecture](/img/kubelb/v1.1/layer7-architecture.png?classes=shadow,border "KubeLB Architecture") diff --git a/content/kubelb/v1.2/architecture/layer-4-load-balancing/_index.en.md b/content/kubelb/v1.2/architecture/layer-4-load-balancing/_index.en.md new file mode 100644 index 000000000..0095a8f97 --- /dev/null +++ b/content/kubelb/v1.2/architecture/layer-4-load-balancing/_index.en.md @@ -0,0 +1,29 @@ ++++ +title = "Layer 4 Load Balancing" +date = 2023-10-27T10:07:15+02:00 +weight = 5 ++++ + +This document explains the architecture for Layer 4 or TCP/UDP Load Balancing support in KubeLB. This feature is used to provision LoadBalancers for a fleet of clusters(tenants) from a centralized platform. + +## Background + +Kubernetes does not offer an out of the box implementation of load-balancers for clusters. The Network & Application level load balancing is delegated to the IaaS platform(GCP, AWS, Azure, etc.). If you're using a cloud provider that doesn't offer load balancing capabilities then you can't provision services of type `LoadBalancer`. + +Solutions which are available e.g. MetalLB focus on a single cluster. There are significant downsides of this since the individual cluster admin needs to be aware and understand how networking works in your cluster to be able to configure some appliance such as MetalLB. + +Another use case that was common was using something like F5 for load balancing. Managing and delegating it to individual clusters had massive administrative overheads. + +### Solution + +KubeLB focuses on managing the load balancers from a centralized point. So instead of having appliances running on each individual clusters. An agent which is the `Cloud Controller Manager` is running on the tenant cluster that propagates all the load balancing request to the management cluster. KubeLB manager running in the management cluster is then responsible for provisioning the actual load balancers and routing traffic back to the tenant workloads. + +### Lifecycle of a request + +1. Developer creates a service of type LoadBalancer. +2. After validation, KubeLB CCM will propagate these resources from the tenant to LB cluster using the `LoadBalancer` CRD. +3. KubeLB manager then copies/creates the corresponding resources in the tenant namespace in the management cluster. +4. KubeLB CCM polls for the updated status of the service, updates the status when available. +5. KubeLB manager starts routing the traffic for your resource. + +![KubeLB Architecture](/img/kubelb/common/architecture.png "KubeLB Architecture") diff --git a/content/kubelb/v1.2/ce-ee-matrix/_index.en.md b/content/kubelb/v1.2/ce-ee-matrix/_index.en.md new file mode 100644 index 000000000..9d14bcee8 --- /dev/null +++ b/content/kubelb/v1.2/ce-ee-matrix/_index.en.md @@ -0,0 +1,41 @@ ++++ +title = "Community vs Enterprise Edition" +date = 2024-03-15T00:00:00+01:00 +weight = 10 ++++ + +KubeLB is available in two versions: Community and Enterprise. + +- **Community Edition (CE)**: Free, open source version that is available to the public. The CE is stable, production ready software available at +- **Enterprise Edition (EE)**: Only available through an active subscription. In addition to the commercial support, SLAs for the product, the EE version contains a larger feature set in comparison to the CE version. + +{{% notice note %}} +[Get in touch with Kubermatic](mailto:sales@kubermatic.com) to find out more about the KubeLB Enterprise offering. +{{% /notice %}} + +## Feature Matrix + +| Feature | EE (Enterprise Edition) | CE (Community Edition) | +|-------------------------------|--------------------------|-------------------------| +| Ingress | ✔️ | ✔️ | +| Gateway API v1 | ✔️ | ✔️ | +| Bring your own secrets(certificates) | ✔️ | ✔️ | +| Tunneling support through CLI | ✔️ | ❌ | +| Gateway API beta/alpha(TLS/TCP/UDP routes) | ✔️ | ❌ | +| Multiple Gateways | ✔️ | ❌ | +| DNS automation | ✔️ | ❌ | +| Certificate Management | ✔️ | ❌ | +| Limits for LoadBalancers, Gateways | ✔️ | ❌ | + +{{% notice note %}} +KubeLB supports the following products for Ingress and Gateway API resources: + +- [Ingress-nginx](https://kubernetes.github.io/ingress-nginx/) for **Ingress** resources. +- [Envoy Gateway](https://gateway.envoyproxy.io/) is supported for **Gateway API** resources. + +While other products might work for Ingress and Gateway API resources, we are not testing them and can't guarantee the compatibility. +{{% /notice %}} + +## Support Policy + +For support policy, please refer to the [KubeLB Support Policy](../support-policy/). diff --git a/content/kubelb/v1.2/cli/_index.en.md b/content/kubelb/v1.2/cli/_index.en.md new file mode 100644 index 000000000..1058a4084 --- /dev/null +++ b/content/kubelb/v1.2/cli/_index.en.md @@ -0,0 +1,62 @@ ++++ +title = "KubeLB CLI" +date = 2025-08-27T10:07:15+02:00 +weight = 30 +description = "Learn how you can use KubeLB CLI to provision Load Balancers and tunnels to expose local workloads" ++++ + +![KubeLB CLI](/img/kubelb/common/logo.png?classes=logo-height) + +## KubeLB CLI + +KubeLB CLI is a command line tool that has been introduced to complement KubeLB and make it easier to manage load balancing configurations for multiple tenants in Kube and non-Kube based environments. + +The source code is open source and available at [kubermatic/kubelb-cli](https://github.com/kubermatic/kubelb-cli). + +{{% notice note %}} +KubeLB CLI is currently in beta feature stage and is not yet ready for production use. We are actively working on the feature set and taking feedback from the community and our customers to improve the CLI. +{{% /notice %}} + +## Installation + +### Manual Installation + +Users can download the pre-compiled binaries from the [releases page](https://github.com/kubermatic/kubelb-cli/releases) for their system and copy them to the desired location. + +{{% notice note %}} +KubeLB CLI is currently available for Linux, macOS, and Windows. +{{% /notice %}} + +### Install using `go install` + +If you have Go installed, you can also build the binary from the source code using the following command: + +```bash +go install github.com/kubermatic/kubelb-cli@v0.1.0 +``` + +### Configuration + +KubeLB CLI needs the tenant scoped kubeconfig and the tenant name to be configured either via environment variables or through the CLI flags. Environment variables are preferred as you don't have to specify them for each command. + +```bash +export KUBECONFIG=/path/to/kubeconfig +export TENANT_NAME=my-tenant +``` + +## Table of Content + +{{% children depth=5 %}} +{{% /children %}} + +## Further Information + +- [Introducing KubeLB](https://www.kubermatic.com/products/kubelb/) +- [KubeLB Whitepaper](https://www.kubermatic.com/static/KubeLB-Cloud-Native-Multi-Tenant-Load-Balancer.pdf) +- [KubeLB - GitHub Repository](https://github.com/kubermatic/kubelb) + +Visit [kubermatic.com](https://www.kubermatic.com/) for further information. + +{{% notice tip %}} +For latest updates follow us on Twitter [@Kubermatic](https://twitter.com/Kubermatic) +{{% /notice %}} diff --git a/content/kubelb/v1.2/cli/compatibility-matrix/_index.en.md b/content/kubelb/v1.2/cli/compatibility-matrix/_index.en.md new file mode 100644 index 000000000..a40e2097b --- /dev/null +++ b/content/kubelb/v1.2/cli/compatibility-matrix/_index.en.md @@ -0,0 +1,21 @@ ++++ +title = "Compatibility Matrix" +date = 2025-08-27T00:00:00+01:00 +weight = 30 ++++ + +KubeLB CLI uses Kubernetes management cluster that has KubeLB installed as it's source of truth for the load balancing configurations. + +Since it has been introduced alongside KubeLB v1.2, it has a hard dependency for the KubeLB management cluster to be at least v1.2. + +{{% notice note %}} +KubeLB CLI is currently in beta feature stage and is not yet ready for production use. We are actively working on the feature set and taking feedback from the community and our customers to improve the CLI. +{{% /notice %}} + +| KubeLB CLI | KubeLB Management Cluster | +|------------|---------------------------| +| v0.1.0 | v1.2+ | + +## Support Policy + +For support policy, please refer to the [KubeLB Support Policy](../../support-policy/) diff --git a/content/kubelb/v1.2/cli/loadbalancing/_index.en.md b/content/kubelb/v1.2/cli/loadbalancing/_index.en.md new file mode 100644 index 000000000..36f51a059 --- /dev/null +++ b/content/kubelb/v1.2/cli/loadbalancing/_index.en.md @@ -0,0 +1,36 @@ ++++ +title = "Load Balancing" +date = 2025-08-27T00:00:00+01:00 +weight = 20 ++++ + +KubeLB CLI can be used to quickly provision Load Balancers that can be public/private based on your load balancing configurations and needs. KubeLB then takes care of securing your endpoint with TLS certificates, automatically creating DNS records, and managing the load balancing configurations. + +## Pre-requisites + +Please refer to the [DNS](../../tutorials/security/dns/#enable-dns-automation) documentation to configure the Gateway or Ingress to manage DNS for the load balancer. + +## Create a Load Balancer + +To create a load balancer, you can use the `kubelb loadbalancer create` command. + +For example + +```bash +kubelb loadbalancer create my-app --endpoints 10.0.1.1:8080,10.0.1.2:8080 --hostname my-app.example.com +``` + +This will create a Load Balancer resource that will forward traffic to the endpoints `10.0.1.1:8080` and `10.0.1.2:8080` and will be accessible at `https://my-app.example.com`. + +Specifying hostname is optional and if not provided, KubeLB will generate a random hostname for you if the wildcard domain is enabled for the tenant or globally. + +![Demo animation](/img/kubelb/v1.2/loadbalancer.gif?classes=shadow,border "Load Balancer Demo") + +## Further actions + +Further actions include: + +- Updating the load balancer configuration +- Deleting the load balancer +- Getting the load balancer details +- Listing all the load balancers diff --git a/content/kubelb/v1.2/cli/references/_index.en.md b/content/kubelb/v1.2/cli/references/_index.en.md new file mode 100644 index 000000000..44f9eae92 --- /dev/null +++ b/content/kubelb/v1.2/cli/references/_index.en.md @@ -0,0 +1,40 @@ ++++ +title = "References" +date = 2024-03-06T12:00:00+02:00 +weight = 50 ++++ + +This section contains a reference of the Kubermatic KubeLB CLI commands and flags. + +## kubelb + +KubeLB CLI - Manage load balancers and create secure tunnels + +### Synopsis + +KubeLB CLI provides tools to manage KubeLB load balancers and create secure tunnels +to expose local services through the KubeLB infrastructure. + +### Options + +``` + -h, --help help for kubelb + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb completion](commands/kubelb_completion) - Generate the autocompletion script for the specified shell +* [kubelb docs](commands/kubelb_docs) - Generate markdown documentation for all commands +* [kubelb expose](commands/kubelb_expose) - Expose a local port via tunnel +* [kubelb loadbalancer](commands/kubelb_loadbalancer) - Manage KubeLB load balancers +* [kubelb status](commands/kubelb_status) - Display current status of KubeLB +* [kubelb tunnel](commands/kubelb_tunnel) - Manage secure tunnels to expose local services +* [kubelb version](commands/kubelb_version) - Print the version information diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_completion.md b/content/kubelb/v1.2/cli/references/commands/kubelb_completion.md new file mode 100644 index 000000000..2ff39c182 --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_completion.md @@ -0,0 +1,41 @@ ++++ +title = "kubelb completion" +date = 2025-08-27T00:00:00+01:00 +weight = 200 ++++ + +## kubelb completion + +Generate the autocompletion script for the specified shell + +### Synopsis + +Generate the autocompletion script for kubelb for the specified shell. +See each sub-command's help for details on how to use the generated script. + +### Options + +``` + -h, --help help for completion +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels +* [kubelb completion bash](../kubelb_completion_bash) - Generate the autocompletion script for bash +* [kubelb completion fish](../kubelb_completion_fish) - Generate the autocompletion script for fish +* [kubelb completion powershell](../kubelb_completion_powershell) - Generate the autocompletion script for powershell +* [kubelb completion zsh](../kubelb_completion_zsh) - Generate the autocompletion script for zsh diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_completion_bash.md b/content/kubelb/v1.2/cli/references/commands/kubelb_completion_bash.md new file mode 100644 index 000000000..fa713d587 --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_completion_bash.md @@ -0,0 +1,60 @@ ++++ +title = "kubelb completion bash" +date = 2025-08-27T00:00:00+01:00 +weight = 210 ++++ + +## kubelb completion bash + +Generate the autocompletion script for bash + +### Synopsis + +Generate the autocompletion script for the bash shell. + +This script depends on the 'bash-completion' package. +If it is not installed already, you can install it via your OS's package manager. + +To load completions in your current shell session: + + source <(kubelb completion bash) + +To load completions for every new session, execute once: + +#### Linux + + kubelb completion bash > /etc/bash_completion.d/kubelb + +#### macOS + + kubelb completion bash > $(brew --prefix)/etc/bash_completion.d/kubelb + +You will need to start a new shell for this setup to take effect. + +``` +kubelb completion bash +``` + +### Options + +``` + -h, --help help for bash + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb completion](../kubelb_completion) - Generate the autocompletion script for the specified shell diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_completion_fish.md b/content/kubelb/v1.2/cli/references/commands/kubelb_completion_fish.md new file mode 100644 index 000000000..81cd45c0b --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_completion_fish.md @@ -0,0 +1,51 @@ ++++ +title = "kubelb completion fish" +date = 2025-08-27T00:00:00+01:00 +weight = 220 ++++ + +## kubelb completion fish + +Generate the autocompletion script for fish + +### Synopsis + +Generate the autocompletion script for the fish shell. + +To load completions in your current shell session: + + kubelb completion fish | source + +To load completions for every new session, execute once: + + kubelb completion fish > ~/.config/fish/completions/kubelb.fish + +You will need to start a new shell for this setup to take effect. + +``` +kubelb completion fish [flags] +``` + +### Options + +``` + -h, --help help for fish + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb completion](../kubelb_completion) - Generate the autocompletion script for the specified shell diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_completion_powershell.md b/content/kubelb/v1.2/cli/references/commands/kubelb_completion_powershell.md new file mode 100644 index 000000000..f01116ed0 --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_completion_powershell.md @@ -0,0 +1,48 @@ ++++ +title = "kubelb completion powershell" +date = 2025-08-27T00:00:00+01:00 +weight = 230 ++++ + +## kubelb completion powershell + +Generate the autocompletion script for powershell + +### Synopsis + +Generate the autocompletion script for powershell. + +To load completions in your current shell session: + + kubelb completion powershell | Out-String | Invoke-Expression + +To load completions for every new session, add the output of the above command +to your powershell profile. + +``` +kubelb completion powershell [flags] +``` + +### Options + +``` + -h, --help help for powershell + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb completion](../kubelb_completion) - Generate the autocompletion script for the specified shell diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_completion_zsh.md b/content/kubelb/v1.2/cli/references/commands/kubelb_completion_zsh.md new file mode 100644 index 000000000..4f8ab1f41 --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_completion_zsh.md @@ -0,0 +1,62 @@ ++++ +title = "kubelb completion zsh" +date = 2025-08-27T00:00:00+01:00 +weight = 240 ++++ + +## kubelb completion zsh + +Generate the autocompletion script for zsh + +### Synopsis + +Generate the autocompletion script for the zsh shell. + +If shell completion is not already enabled in your environment you will need +to enable it. You can execute the following once: + + echo "autoload -U compinit; compinit" >> ~/.zshrc + +To load completions in your current shell session: + + source <(kubelb completion zsh) + +To load completions for every new session, execute once: + +#### Linux + + kubelb completion zsh > "${fpath[1]}/_kubelb" + +#### macOS + + kubelb completion zsh > $(brew --prefix)/share/zsh/site-functions/_kubelb + +You will need to start a new shell for this setup to take effect. + +``` +kubelb completion zsh [flags] +``` + +### Options + +``` + -h, --help help for zsh + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb completion](../kubelb_completion) - Generate the autocompletion script for the specified shell diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_docs.md b/content/kubelb/v1.2/cli/references/commands/kubelb_docs.md new file mode 100644 index 000000000..b41a983d4 --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_docs.md @@ -0,0 +1,42 @@ ++++ +title = "kubelb docs" +date = 2025-08-27T00:00:00+01:00 +weight = 40 ++++ + +## kubelb docs + +Generate markdown documentation for all commands + +### Synopsis + +Generate markdown documentation for all CLI commands and their parameters. +This creates individual markdown files for each command with complete usage information. + +``` +kubelb docs [flags] +``` + +### Options + +``` + -h, --help help for docs + -o, --output string Output directory for generated documentation (default "./docs") +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_expose.md b/content/kubelb/v1.2/cli/references/commands/kubelb_expose.md new file mode 100644 index 000000000..6b435de09 --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_expose.md @@ -0,0 +1,62 @@ ++++ +title = "kubelb expose" +date = 2025-08-27T00:00:00+01:00 +weight = 30 ++++ + +## kubelb expose + +Expose a local port via tunnel + +### Synopsis + +Expose a local port via secure tunnel with auto-generated name. + +This is a convenience command that creates a tunnel with an auto-generated +name and immediately connects to it. + +Examples: + +# Expose port 8080 with auto-generated tunnel name + + kubelb expose 8080 + +# Expose port 3000 with custom hostname + + kubelb expose 3000 --hostname api.example.com + +``` +kubelb expose PORT [flags] +``` + +### Examples + +``` +kubelb expose 8080 --tenant=mytenant +``` + +### Options + +``` + -h, --help help for expose + --hostname string Custom hostname for the tunnel (default: auto-assigned wildcard domain) + -o, --output string Output format (summary, yaml, json) (default "summary") + --wait Wait for tunnel to be ready (default true) +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer.md b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer.md new file mode 100644 index 000000000..ea12542a3 --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer.md @@ -0,0 +1,40 @@ ++++ +title = "kubelb loadbalancer" +date = 2025-08-27T00:00:00+01:00 +weight = 60 ++++ + +## kubelb loadbalancer + +Manage KubeLB load balancers + +### Synopsis + +Manage KubeLB load balancer configurations + +### Options + +``` + -h, --help help for loadbalancer +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels +* [kubelb loadbalancer create](../kubelb_loadbalancer_create) - Create a load balancer +* [kubelb loadbalancer delete](../kubelb_loadbalancer_delete) - Delete a load balancer +* [kubelb loadbalancer get](../kubelb_loadbalancer_get) - Get a load balancer +* [kubelb loadbalancer list](../kubelb_loadbalancer_list) - List load balancers diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_create.md b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_create.md new file mode 100644 index 000000000..e542a0a56 --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_create.md @@ -0,0 +1,69 @@ ++++ +title = "kubelb loadbalancer create" +date = 2025-08-27T00:00:00+01:00 +weight = 70 ++++ + +## kubelb loadbalancer create + +Create a load balancer + +### Synopsis + +Create a new HTTP load balancer with the specified endpoints. + +The load balancer supports HTTP routing and hostname-based access. + +Examples: + +# Create HTTP load balancer with random hostname + + kubelb lb create my-app --endpoints 10.0.1.1:8080 + +# Create HTTP load balancer with custom hostname + + kubelb lb create my-app --endpoints 10.0.1.1:8080 --hostname app.example.com + +# Create HTTP load balancer without a route + + kubelb lb create my-app --endpoints 10.0.1.1:8080 --route=false + +``` +kubelb loadbalancer create NAME [flags] +``` + +### Examples + +``` +kubelb loadbalancer create my-app --endpoints 10.0.1.1:8080,10.0.1.2:8080 --tenant=mytenant +``` + +### Options + +``` + -e, --endpoints string Comma-separated list of IP:port pairs (required) + -h, --help help for create + --hostname string Custom hostname for the route + -o, --output string Output format (summary, yaml, json) (default "summary") + -p, --protocol string Protocol (http only) (default "http") + --route Create a route for HTTP traffic (default true) + --type string LoadBalancer type (ClusterIP, LoadBalancer), defaults to ClusterIP (default "ClusterIP") + --wait Wait for load balancer to be ready (default true) +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb loadbalancer](../kubelb_loadbalancer) - Manage KubeLB load balancers diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_delete.md b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_delete.md new file mode 100644 index 000000000..26535b8fa --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_delete.md @@ -0,0 +1,54 @@ ++++ +title = "kubelb loadbalancer delete" +date = 2025-08-27T00:00:00+01:00 +weight = 90 ++++ + +## kubelb loadbalancer delete + +Delete a load balancer + +### Synopsis + +Delete a load balancer by ID. + +This command will: +- Check if the load balancer was created by the CLI +- Display a warning if it wasn't created by the CLI +- Ask for confirmation before deletion (unless --force is used) +- Delete the load balancer resource + + +``` +kubelb loadbalancer delete ID [flags] +``` + +### Examples + +``` +kubelb loadbalancer delete nginx-loadbalancer --tenant=mytenant --kubeconfig=./kubeconfig +``` + +### Options + +``` + -f, --force Force deletion without confirmation + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb loadbalancer](../kubelb_loadbalancer) - Manage KubeLB load balancers diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_get.md b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_get.md new file mode 100644 index 000000000..c8259ea3f --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_get.md @@ -0,0 +1,46 @@ ++++ +title = "kubelb loadbalancer get" +date = 2025-08-27T00:00:00+01:00 +weight = 80 ++++ + +## kubelb loadbalancer get + +Get a load balancer + +### Synopsis + +Retrieve a load balancer by ID and output it's complete YAML specification. + +``` +kubelb loadbalancer get ID [flags] +``` + +### Examples + +``` +kubelb loadbalancer get nginx-loadbalancer --tenant=mytenant --kubeconfig=./kubeconfig +``` + +### Options + +``` + -h, --help help for get +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb loadbalancer](../kubelb_loadbalancer) - Manage KubeLB load balancers diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_list.md b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_list.md new file mode 100644 index 000000000..385ca74e8 --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_loadbalancer_list.md @@ -0,0 +1,47 @@ ++++ +title = "kubelb loadbalancer list" +date = 2025-08-27T00:00:00+01:00 +weight = 85 ++++ + +## kubelb loadbalancer list + +List load balancers + +### Synopsis + +List all load balancers for the tenant. + + +``` +kubelb loadbalancer list [flags] +``` + +### Examples + +``` +kubelb loadbalancer list --tenant=mytenant --kubeconfig=./kubeconfig +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb loadbalancer](../kubelb_loadbalancer) - Manage KubeLB load balancers diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_status.md b/content/kubelb/v1.2/cli/references/commands/kubelb_status.md new file mode 100644 index 000000000..b1bebd066 --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_status.md @@ -0,0 +1,47 @@ ++++ +title = "kubelb status" +date = 2025-08-27T00:00:00+01:00 +weight = 20 ++++ + +## kubelb status + +Display current status of KubeLB + +### Synopsis + +Display the current status of KubeLB including version information, configuration, and state + +``` +kubelb status [flags] +``` + +### Examples + +``` + # Display status for current tenant + kubelb status +``` + +### Options + +``` + -h, --help help for status +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel.md b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel.md new file mode 100644 index 000000000..89eb79aec --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel.md @@ -0,0 +1,41 @@ ++++ +title = "kubelb tunnel" +date = 2025-08-27T00:00:00+01:00 +weight = 100 ++++ + +## kubelb tunnel + +Manage secure tunnels to expose local services + +### Synopsis + +Create and manage secure tunnels to expose local services through the KubeLB infrastructure + +### Options + +``` + -h, --help help for tunnel +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels +* [kubelb tunnel connect](../kubelb_tunnel_connect) - Connect to an existing tunnel +* [kubelb tunnel create](../kubelb_tunnel_create) - Create a tunnel +* [kubelb tunnel delete](../kubelb_tunnel_delete) - Delete a tunnel +* [kubelb tunnel get](../kubelb_tunnel_get) - Get a tunnel +* [kubelb tunnel list](../kubelb_tunnel_list) - List tunnels diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_connect.md b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_connect.md new file mode 100644 index 000000000..7427539ac --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_connect.md @@ -0,0 +1,50 @@ ++++ +title = "kubelb tunnel connect" +date = 2025-08-27T00:00:00+01:00 +weight = 115 ++++ + +## kubelb tunnel connect + +Connect to an existing tunnel + +### Synopsis + +Connect to an existing tunnel to start forwarding traffic. + +This command establishes a secure connection to the tunnel and forwards +traffic from the tunnel to your local service. + +``` +kubelb tunnel connect NAME [flags] +``` + +### Examples + +``` +kubelb tunnel connect my-app --port 8080 --tenant=mytenant +``` + +### Options + +``` + -h, --help help for connect + -p, --port int Local port to forward to (required) +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_create.md b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_create.md new file mode 100644 index 000000000..bd164bdce --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_create.md @@ -0,0 +1,64 @@ ++++ +title = "kubelb tunnel create" +date = 2025-08-27T00:00:00+01:00 +weight = 110 ++++ + +## kubelb tunnel create + +Create a tunnel + +### Synopsis + +Create a new secure tunnel to expose a local service. + +The tunnel provides secure access to your local service through the KubeLB infrastructure. + +Examples: + # Create tunnel for local app on port 8080 + kubelb tunnel create my-app --port 8080 + + # Create tunnel with custom hostname + kubelb tunnel create my-app --port 8080 --hostname app.example.com + + # Create tunnel and connect immediately + kubelb tunnel create my-app --port 8080 --connect + + +``` +kubelb tunnel create NAME [flags] +``` + +### Examples + +``` +kubelb tunnel create my-app --port 8080 --tenant=mytenant +``` + +### Options + +``` + --connect Connect to tunnel after creation + -h, --help help for create + --hostname string Custom hostname for the tunnel (default: auto-assigned wildcard domain) + -o, --output string Output format (summary, yaml, json) (default "summary") + -p, --port int Local port to tunnel (required) + --wait Wait for tunnel to be ready (default true) +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_delete.md b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_delete.md new file mode 100644 index 000000000..e9a9cee37 --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_delete.md @@ -0,0 +1,53 @@ ++++ +title = "kubelb tunnel delete" +date = 2025-08-27T00:00:00+01:00 +weight = 130 ++++ + +## kubelb tunnel delete + +Delete a tunnel + +### Synopsis + +Delete a tunnel by name. + +This command will: +- Check if the tunnel exists +- Ask for confirmation before deletion (unless --force is used) +- Delete the tunnel resource + + +``` +kubelb tunnel delete NAME [flags] +``` + +### Examples + +``` +kubelb tunnel delete my-app --tenant=mytenant --kubeconfig=./kubeconfig +``` + +### Options + +``` + -f, --force Force deletion without confirmation + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_get.md b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_get.md new file mode 100644 index 000000000..662ac2f3f --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_get.md @@ -0,0 +1,47 @@ ++++ +title = "kubelb tunnel get" +date = 2025-08-27T00:00:00+01:00 +weight = 120 ++++ + +## kubelb tunnel get + +Get a tunnel + +### Synopsis + +Retrieve a tunnel by name and output it's complete YAML specification. + + +``` +kubelb tunnel get NAME [flags] +``` + +### Examples + +``` +kubelb tunnel get my-app --tenant=mytenant --kubeconfig=./kubeconfig +``` + +### Options + +``` + -h, --help help for get +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_list.md b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_list.md new file mode 100644 index 000000000..e46291576 --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_tunnel_list.md @@ -0,0 +1,47 @@ ++++ +title = "kubelb tunnel list" +date = 2025-08-27T00:00:00+01:00 +weight = 125 ++++ + +## kubelb tunnel list + +List tunnels + +### Synopsis + +List all tunnels for the tenant. + + +``` +kubelb tunnel list [flags] +``` + +### Examples + +``` +kubelb tunnel list --tenant=mytenant --kubeconfig=./kubeconfig +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb tunnel](../kubelb_tunnel) - Manage secure tunnels to expose local services diff --git a/content/kubelb/v1.2/cli/references/commands/kubelb_version.md b/content/kubelb/v1.2/cli/references/commands/kubelb_version.md new file mode 100644 index 000000000..3a5a117fa --- /dev/null +++ b/content/kubelb/v1.2/cli/references/commands/kubelb_version.md @@ -0,0 +1,47 @@ ++++ +title = "kubelb version" +date = 2025-08-27T00:00:00+01:00 +weight = 50 ++++ + +## kubelb version + +Print the version information + +### Synopsis + +Print the version information of the KubeLB CLI + +``` +kubelb version [flags] +``` + +### Examples + +``` +kubelb version +``` + +### Options + +``` + -h, --help help for version + --short Print only the version in short format +``` + +### Options inherited from parent commands + +``` + --kubeconfig string Path to the kubeconfig for the tenant + --log-file string Log to file instead of stderr + --log-format string Log format (cli, json, text) - defaults to cli + --log-level string Log level (error, warn, info, debug, trace) - overrides verbosity + -q, --quiet Suppress non-essential output (equivalent to --v=0) + -t, --tenant string Name of the tenant + --timeout duration Timeout for the command (e.g., 30s, 5m) (default 4m0s) + -v, --v int Verbosity level (0-4): 0=errors only, 1=basic info, 2=detailed status, 3=debug info, 4=trace (default 1) +``` + +### SEE ALSO + +* [kubelb](../kubelb) - KubeLB CLI - Manage load balancers and create secure tunnels diff --git a/content/kubelb/v1.2/cli/release-notes/_index.en.md b/content/kubelb/v1.2/cli/release-notes/_index.en.md new file mode 100644 index 000000000..3a6fbcf49 --- /dev/null +++ b/content/kubelb/v1.2/cli/release-notes/_index.en.md @@ -0,0 +1,31 @@ ++++ +title = "Release Notes" +date = 2024-03-15T00:00:00+01:00 +weight = 40 ++++ + + +## Kubermatic KubeLB CLI v0.1.0 + +- [Kubermatic KubeLB CLI v0.1.0](#kubermatic-kubelb-cli--v010) +- [v0.1.0](#v010) + - [Highlights](#highlights) + - [Community Edition(CE)](#community-editionce) + - [Enterprise Edition(EE)](#enterprise-editionee) + +## v0.1.0 + +**GitHub release: [v0.1.0](https://github.com/kubermatic/kubelb-cli/releases/tag/v0.1.0)** + +### Highlights + +#### Community Edition(CE) + +- Support for provisioning Load balancers with hostnames. THe hostnames are secured with TLS certificates and the DNS and traffic policies are managed by KubeLB. +- Status command has been introduced to get the status of the tenant. This includes the load balancer limit, allowed domains, wildcard domain, etc. +- Version command can be used to get the version of the CLI. +- Add supply chain security with SBOMs and cosign signatures for the CLI. + +#### Enterprise Edition(EE) + +- Tunneling has been introduced to allow users to tunnel locally running applications on their workstations or inside VMs and expose them over the internet without worrying about firewalls, NAT, DNS, and certificate issues. diff --git a/content/kubelb/v1.2/cli/tunneling/_index.en.md b/content/kubelb/v1.2/cli/tunneling/_index.en.md new file mode 100644 index 000000000..329c1ff2d --- /dev/null +++ b/content/kubelb/v1.2/cli/tunneling/_index.en.md @@ -0,0 +1,127 @@ ++++ +title = "Tunneling" +date = 2025-08-27T00:00:00+01:00 +weight = 10 +enterprise = true ++++ + +Tunneling allows users to tunnel locally running applications on their workstations or inside VMs and expose them over the internet without worrying about firewalls, NAT, DNS, and certificate issues. It is a great way to expose your local services to the internet without having to worry about the complexities of setting up a load balancer and a DNS record. + +KubeLB CLI will expose the workload on secure tunnel with TLS certificates and a DNS record. + +These tunnels are designed to be reusable and hence have their own dedicated API type in KubeLB i.e. `Tunnel`. Once a tunnel is created, it's registered with the KubeLB management cluster and can be connected to using the `kubelb tunnel connect` command. + +## Tunnels + +### Tunnel Configuration + +To enable tunneling, you need to configure KubeLB management cluster to expose connection management API. The values.yaml file can be modified like this: + +```yaml +kubelb: + enableGatewayAPI: true + debug: true + envoyProxy: + # -- Topology defines the deployment topology for Envoy Proxy. Valid values are: shared, dedicated, and global. + topology: shared + # -- The number of replicas for the Envoy Proxy deployment. + replicas: 1 + # -- Propagate all annotations from the LB resource to the LB service. + propagateAllAnnotations: true + + # Tunnel configuration + tunnel: + enabled: true + connectionManager: + httpRoute: + enabled: true + domain: "connection-manager.example.com" + gatewayName: "default" + gatewayNamespace: "kubelb" + annotations: + external-dns.alpha.kubernetes.io/hostname: "*.apps.example.com,connection-manager.example.com" + external-dns.alpha.kubernetes.io/ttl: "300" + cert-manager.io/cluster-issuer: "letsencrypt-production-dns" + ingress: + enabled: false + className: "nginx" + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-production-dns" + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" + external-dns.alpha.kubernetes.io/hostname: connection-manager-ingress.example.com + external-dns.alpha.kubernetes.io/ttl: "10" + nginx.ingress.kubernetes.io/backend-protocol: "HTTP" + hosts: + - host: connection-manager-ingress.example.com + paths: + - path: /tunnel + pathType: Prefix + - path: /health + pathType: Prefix + tls: + - secretName: connection-manager-tls + hosts: + - connection-manager-ingress.example.com +``` + +You can either use Ingress or HTTPRoute to expose the connection management API. Gateway API is the preferred way to expose the API. In this example `*.apps.example.com` is used as a wildard domain for these tunnels, you can use any other domain you want. + +Afterwards, you need to configure the connection manager URL at the Config or Tenant level: + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Config +metadata: + name: default + namespace: kubelb +spec: + ingress: + class: "nginx" + gatewayAPI: + class: "eg" + loadBalancer: + limit: 15 + certificates: + defaultClusterIssuer: letsencrypt-staging-dns + tunnel: + connectionManagerURL: "https://connection-manager.example.com/" +``` + +**NOTE: Apart from this the Gateway or Ingress should be configured to manage DNS for the tunnel. Please refer to the [DNS](../../tutorials/security/dns/#enable-dns-automation) documentation for more details.** + +### Provisioning Tunnels + +Tunnels are created either using the `kubelb expose 1313` command or the `kubelb tunnel create` command. + +```bash +kubelb expose 1313 +``` + +![Demo animation](/img/kubelb/v1.2/tunneling.gif?classes=shadow,border "Tunneling Demo") + +This will create a tunnel with a generated hostname and will forward traffic to the port `1313` on the local machine. The Ingress point for this traffic is KubeLB's management cluster and hence the traffic is secure and encrypted. + +An alternative way to create a tunnel is to use the `kubelb tunnel create` command. + +```bash +kubelb tunnel create my-app --port 1313 +``` + +This will create a tunnel with a generated hostname and can be used through the `kubelb tunnel connect` command. + +```bash +kubelb tunnel connect my-app --port 1313 +``` + +This will connect to the tunnel and forward traffic to the port `1313` on the local machine. The Ingress point for this traffic is KubeLB's management cluster and hence the traffic is secure and encrypted. + +## Further actions + +Further actions include: + +- Deleting the tunnel +- Getting the tunnel details +- Listing all the tunnels + +For more information, please refer to the [Tunnel API](../../references/api/tunnel/) documentation. diff --git a/content/kubelb/v1.2/compatibility-matrix/_index.en.md b/content/kubelb/v1.2/compatibility-matrix/_index.en.md new file mode 100644 index 000000000..11195dcff --- /dev/null +++ b/content/kubelb/v1.2/compatibility-matrix/_index.en.md @@ -0,0 +1,21 @@ ++++ +title = "Compatibility Matrix" +date = 2024-03-15T00:00:00+01:00 +weight = 30 ++++ + +Currently, we don't have any hard dependencies on certain components and their versions. This matrix is here to reflect any changes in the compatibility matrix of the components we are using. + +We are only testing our software with specific versions of the components, we are not enforcing these versions but these are the ones tested. It should work with other versions of Kubernetes, Gateway API, and Envoy Gateway as well, but we can't guarantee it. + +**KubeLB support [ingress-nginx](https://kubernetes.github.io/ingress-nginx/) for Ingress resources. [Envoy Gateway](https://gateway.envoyproxy.io/) is supported for Gateway API resources. While other products might work for Ingress and Gateway API resources, we are not testing them and can't guarantee the compatibility.** + +| KubeLB | Kubermatic Kubernetes Platform | Gateway API | Envoy Gateway | NGINX Ingress | Kubernetes | +|--------|-------------------------------|-------------|---------------|-------------------------|------------| +| v1.2 | v2.27, v2.28 | v1.3.0 | v1.3.0 | v1.10.0+ | v1.27+ | +| v1.1 | v2.26, v2.27 | v1.1.0 | v1.1.0 | v1.10.0+ | v1.27+ | +| v1.0 | v2.24, v2.25 | Not Supported| Not Supported | v1.10.0+ | v1.27+ | + +## Support Policy + +For support policy, please refer to the [KubeLB Support Policy](../support-policy/) diff --git a/content/kubelb/v1.2/installation/_index.en.md b/content/kubelb/v1.2/installation/_index.en.md new file mode 100644 index 000000000..16ff62eab --- /dev/null +++ b/content/kubelb/v1.2/installation/_index.en.md @@ -0,0 +1,16 @@ ++++ +title = "Installation" +date = 2018-04-28T12:07:15+02:00 +weight = 15 ++++ + +This chapter offers guidance on how to install KubeLB and setup the tenant and management clusters. + +{{% notice tip %}} +It is also recommended to first make yourself familiar with our [architecture documentation]({{< ref "../architecture/" >}}). +{{% /notice %}} + +## Table of Content + +{{% children depth=5 %}} +{{% /children %}} diff --git a/content/kubelb/v1.2/installation/management-cluster/_index.en.md b/content/kubelb/v1.2/installation/management-cluster/_index.en.md new file mode 100644 index 000000000..6a37e7052 --- /dev/null +++ b/content/kubelb/v1.2/installation/management-cluster/_index.en.md @@ -0,0 +1,314 @@ ++++ +title = "Install KubeLB Manager and setup Management Cluster" +linkTitle = "Setup Management Cluster" +date = 2023-10-27T10:07:15+02:00 +weight = 20 ++++ + +## Requirements + +* Service type `LoadBalancer` implementation. This can be a cloud solution or a self-managed implementation like [MetalLB](https://metallb.universe.tf). +* Network access to the tenant cluster nodes with node port range (default: 30000-32767). This is required for the envoy proxy to be able to connect to the tenant cluster nodes. + +## Installation for KubeLB manager + +{{% notice warning %}} In case if Gateway API needs to be enabled for the cluster. Please set `kubelb.enableGatewayAPI` to `true` in the `values.yaml`. This is required otherwise due to missing CRDs, kubelb will not be able to start. {{% /notice %}} + +{{< tabs name="KubeLB Manager" >}} +{{% tab name="Enterprise Edition" %}} + +### Prerequisites + +* Create a namespace **kubelb** for the CCM to be deployed in. +* Create **imagePullSecrets** for the chart to pull the image from the registry in kubelb namespace. + +At this point a minimal values.yaml should look like this: + +```yaml +imagePullSecrets: + - name: +``` + +### Install the helm chart + +```sh +helm pull oci://quay.io/kubermatic/helm-charts/kubelb-manager-ee --version=v1.2.0 --untardir "." --untar +## Apply CRDs +kubectl apply -f kubelb-manager-ee/crds/ +## Create and update values.yaml with the required values. +helm upgrade --install kubelb-manager kubelb-manager-ee --namespace kubelb -f kubelb-manager-ee/values.yaml --create-namespace +``` + +### KubeLB Manager EE Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | | +| autoscaling.enabled | bool | `false` | | +| autoscaling.maxReplicas | int | `10` | | +| autoscaling.minReplicas | int | `1` | | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | | +| autoscaling.targetMemoryUtilizationPercentage | int | `80` | | +| cert-manager.enabled | bool | `false` | Enable cert-manager. | +| external-dns.enabled | bool | `false` | Enable External-DNS. | +| fullnameOverride | string | `""` | | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"quay.io/kubermatic/kubelb-manager-ee"` | | +| image.tag | string | `"v1.2.0"` | | +| imagePullSecrets[0].name | string | `"kubermatic-quay.io"` | | +| kkpintegration.rbac | bool | `false` | Create RBAC for KKP integration. | +| kubelb.debug | bool | `true` | | +| kubelb.enableGatewayAPI | bool | `false` | enableGatewayAPI specifies whether to enable the Gateway API and Gateway Controllers. By default Gateway API is disabled since without Gateway APIs installed the controller cannot start. | +| kubelb.enableLeaderElection | bool | `true` | | +| kubelb.enableTenantMigration | bool | `true` | | +| kubelb.envoyProxy.affinity | object | `{}` | | +| kubelb.envoyProxy.nodeSelector | object | `{}` | | +| kubelb.envoyProxy.replicas | int | `2` | The number of replicas for the Envoy Proxy deployment. | +| kubelb.envoyProxy.resources | object | `{}` | | +| kubelb.envoyProxy.singlePodPerNode | bool | `true` | Deploy single pod per node. | +| kubelb.envoyProxy.tolerations | list | `[]` | | +| kubelb.envoyProxy.topology | string | `"shared"` | Topology defines the deployment topology for Envoy Proxy. Valid values are: shared and global. | +| kubelb.envoyProxy.useDaemonset | bool | `false` | Use DaemonSet for Envoy Proxy deployment instead of Deployment. | +| kubelb.propagateAllAnnotations | bool | `false` | Propagate all annotations from the LB resource to the LB service. | +| kubelb.propagatedAnnotations | object | `{}` | Allowed annotations that will be propagated from the LB resource to the LB service. | +| kubelb.skipConfigGeneration | bool | `false` | Set to true to skip the generation of the Config CR. Useful when the config CR needs to be managed manually. | +| kubelb.tunnel.connectionManager.affinity | object | `{}` | | +| kubelb.tunnel.connectionManager.healthCheck.enabled | bool | `true` | | +| kubelb.tunnel.connectionManager.healthCheck.livenessInitialDelay | int | `30` | | +| kubelb.tunnel.connectionManager.healthCheck.readinessInitialDelay | int | `10` | | +| kubelb.tunnel.connectionManager.httpAddr | string | `":8080"` | Server addresses | +| kubelb.tunnel.connectionManager.httpRoute.annotations | object | `{"cert-manager.io/cluster-issuer":"letsencrypt-prod","external-dns.alpha.kubernetes.io/hostname":"connection-manager.${DOMAIN}"}` | Annotations for HTTPRoute | +| kubelb.tunnel.connectionManager.httpRoute.domain | string | `"connection-manager.${DOMAIN}"` | Domain for the HTTPRoute NOTE: Replace ${DOMAIN} with your domain name. | +| kubelb.tunnel.connectionManager.httpRoute.enabled | bool | `false` | | +| kubelb.tunnel.connectionManager.httpRoute.gatewayName | string | `"gateway"` | Gateway name to attach to | +| kubelb.tunnel.connectionManager.httpRoute.gatewayNamespace | string | `""` | Gateway namespace | +| kubelb.tunnel.connectionManager.image | object | `{"pullPolicy":"IfNotPresent","repository":"quay.io/kubermatic/kubelb-connection-manager-ee","tag":""}` | Connection manager image configuration | +| kubelb.tunnel.connectionManager.ingress | object | `{"annotations":{"cert-manager.io/cluster-issuer":"letsencrypt-prod","external-dns.alpha.kubernetes.io/hostname":"connection-manager.${DOMAIN}","nginx.ingress.kubernetes.io/backend-protocol":"HTTP","nginx.ingress.kubernetes.io/proxy-read-timeout":"3600","nginx.ingress.kubernetes.io/proxy-send-timeout":"3600"},"className":"nginx","enabled":false,"hosts":[{"host":"connection-manager.${DOMAIN}","paths":[{"path":"/tunnel","pathType":"Prefix"},{"path":"/health","pathType":"Prefix"}]}],"tls":[{"hosts":["connection-manager.${DOMAIN}"],"secretName":"connection-manager-tls"}]}` | Ingress configuration for external HTTP/2 access | +| kubelb.tunnel.connectionManager.nodeSelector | object | `{}` | | +| kubelb.tunnel.connectionManager.podAnnotations | object | `{}` | Pod configuration | +| kubelb.tunnel.connectionManager.podLabels | object | `{}` | | +| kubelb.tunnel.connectionManager.podSecurityContext.fsGroup | int | `65534` | | +| kubelb.tunnel.connectionManager.podSecurityContext.runAsNonRoot | bool | `true` | | +| kubelb.tunnel.connectionManager.podSecurityContext.runAsUser | int | `65534` | | +| kubelb.tunnel.connectionManager.replicaCount | int | `1` | Number of connection manager replicas | +| kubelb.tunnel.connectionManager.requestTimeout | string | `"30s"` | | +| kubelb.tunnel.connectionManager.resources | object | `{"limits":{"cpu":"500m","memory":"256Mi"},"requests":{"cpu":"250m","memory":"128Mi"}}` | Resource limits | +| kubelb.tunnel.connectionManager.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsNonRoot":true,"runAsUser":65534}` | Security context | +| kubelb.tunnel.connectionManager.service | object | `{"httpPort":8080,"type":"ClusterIP"}` | Service configuration | +| kubelb.tunnel.connectionManager.tolerations | list | `[]` | | +| kubelb.tunnel.enabled | bool | `false` | Enable tunnel functionality | +| nameOverride | string | `""` | | +| nodeSelector | object | `{}` | | +| podAnnotations | object | `{}` | | +| podLabels | object | `{}` | | +| podSecurityContext.runAsNonRoot | bool | `true` | | +| podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| rbac.allowLeaderElectionRole | bool | `true` | | +| rbac.allowMetricsReaderRole | bool | `true` | | +| rbac.allowProxyRole | bool | `true` | | +| rbac.enabled | bool | `true` | | +| replicaCount | int | `1` | | +| resources.limits.cpu | string | `"500m"` | | +| resources.limits.memory | string | `"512Mi"` | | +| resources.requests.cpu | string | `"100m"` | | +| resources.requests.memory | string | `"128Mi"` | | +| securityContext.allowPrivilegeEscalation | bool | `false` | | +| securityContext.capabilities.drop[0] | string | `"ALL"` | | +| securityContext.runAsUser | int | `65532` | | +| service.port | int | `8001` | | +| service.protocol | string | `"TCP"` | | +| service.type | string | `"ClusterIP"` | | +| serviceAccount.annotations | object | `{}` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.name | string | `""` | | +| serviceMonitor.enabled | bool | `false` | | +| tolerations | list | `[]` | | + +{{% /tab %}} +{{% tab name="Community Edition" %}} + +### Install the helm chart + +```sh +helm pull oci://quay.io/kubermatic/helm-charts/kubelb-manager --version=v1.2.0 --untardir "." --untar +## Apply CRDs +kubectl apply -f kubelb-manager/crds/ +## Create and update values.yaml with the required values. +helm upgrade --install kubelb-manager kubelb-manager --namespace kubelb -f kubelb-manager/values.yaml --create-namespace +``` + +### KubeLB Manager CE Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | | +| autoscaling.enabled | bool | `false` | | +| autoscaling.maxReplicas | int | `10` | | +| autoscaling.minReplicas | int | `1` | | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | | +| autoscaling.targetMemoryUtilizationPercentage | int | `80` | | +| fullnameOverride | string | `""` | | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"quay.io/kubermatic/kubelb-manager"` | | +| image.tag | string | `"v1.2.0"` | | +| imagePullSecrets | list | `[]` | | +| kkpintegration.rbac | bool | `false` | Create RBAC for KKP integration. | +| kubelb.debug | bool | `true` | | +| kubelb.enableGatewayAPI | bool | `false` | enableGatewayAPI specifies whether to enable the Gateway API and Gateway Controllers. By default Gateway API is disabled since without Gateway APIs installed the controller cannot start. | +| kubelb.enableLeaderElection | bool | `true` | | +| kubelb.enableTenantMigration | bool | `true` | | +| kubelb.envoyProxy.affinity | object | `{}` | | +| kubelb.envoyProxy.nodeSelector | object | `{}` | | +| kubelb.envoyProxy.replicas | int | `2` | The number of replicas for the Envoy Proxy deployment. | +| kubelb.envoyProxy.resources | object | `{}` | | +| kubelb.envoyProxy.singlePodPerNode | bool | `true` | Deploy single pod per node. | +| kubelb.envoyProxy.tolerations | list | `[]` | | +| kubelb.envoyProxy.topology | string | `"shared"` | Topology defines the deployment topology for Envoy Proxy. Valid values are: shared and global. | +| kubelb.envoyProxy.useDaemonset | bool | `false` | Use DaemonSet for Envoy Proxy deployment instead of Deployment. | +| kubelb.propagateAllAnnotations | bool | `false` | Propagate all annotations from the LB resource to the LB service. | +| kubelb.propagatedAnnotations | object | `{}` | Allowed annotations that will be propagated from the LB resource to the LB service. | +| kubelb.skipConfigGeneration | bool | `false` | Set to true to skip the generation of the Config CR. Useful when the config CR needs to be managed manually. | +| nameOverride | string | `""` | | +| nodeSelector | object | `{}` | | +| podAnnotations | object | `{}` | | +| podLabels | object | `{}` | | +| podSecurityContext.runAsNonRoot | bool | `true` | | +| podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| rbac.allowLeaderElectionRole | bool | `true` | | +| rbac.allowMetricsReaderRole | bool | `true` | | +| rbac.allowProxyRole | bool | `true` | | +| rbac.enabled | bool | `true` | | +| replicaCount | int | `1` | | +| resources.limits.cpu | string | `"500m"` | | +| resources.limits.memory | string | `"512Mi"` | | +| resources.requests.cpu | string | `"100m"` | | +| resources.requests.memory | string | `"128Mi"` | | +| securityContext.allowPrivilegeEscalation | bool | `false` | | +| securityContext.capabilities.drop[0] | string | `"ALL"` | | +| securityContext.runAsUser | int | `65532` | | +| service.port | int | `8001` | | +| service.protocol | string | `"TCP"` | | +| service.type | string | `"ClusterIP"` | | +| serviceAccount.annotations | object | `{}` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.name | string | `""` | | +| serviceMonitor.enabled | bool | `false` | | +| tolerations | list | `[]` | | + +{{% /tab %}} +{{< /tabs >}} + +## Setup the management cluster + +{{% notice note %}} +The examples and tools shared below are for demonstration purposes, you can use any other tools or configurations as per your requirements. +{{% /notice %}} + +Management cluster acts as the dataplane and central control plane for all your load balancing configurations. It is the place where all the components required for Layer 4 and Layer 7 load balancing, AI Gateways, MCP Gateways, Agent2Agent Gateways, and API Gateways etc. are deployed. The management cluster is multi-tenant by design which makes it a perfect for managing a fleet of clusters in a scalable, robust, and secure way. + +KubeLB has introduced an addons chart to simplify the installation of the required components for the management cluster. The chart is already part of the KubeLB manager chart and can be installed by setting the `kubelb-addons.enabled` to `true` in the values.yaml. + +```yaml +kubelb: + enableGatewayAPI: true + debug: true + +## Addon configuration +kubelb-addons: + enabled: true + + gatewayClass: + create: true + + # Ingress Nginx + ingress-nginx: + enabled: false + controller: + service: + externalTrafficPolicy: Local + + # Envoy Gateway + envoy-gateway: + enabled: true + + # Cert Manager + cert-manager: + enabled: true + crds: + enabled: true + config: + apiVersion: controller.config.cert-manager.io/v1alpha1 + kind: ControllerConfiguration + enableGatewayAPI: true + + # External DNS + external-dns: + domainFilters: + - example.com + extraVolumes: + - name: credentials + secret: + secretName: route53-credentials + extraVolumeMounts: + - name: credentials + mountPath: /.aws + readOnly: true + env: + - name: AWS_SHARED_CREDENTIALS_FILE + value: /.aws/credentials + txtOwnerId: kubelb-example-aws + registry: txt + provider: aws + policy: sync + sources: + - service + - ingress + - gateway-httproute + - gateway-grpcroute + - gateway-tlsroute + - gateway-tcproute + - gateway-udproute + + ## AI and Agent2Agent Gateways Integration + # KGateway CRDs + kgateway-crds: + enabled: true + + # KGateway + kgateway: + enabled: true + gateway: + aiExtension: + enabled: true + agentgateway: + enabled: true + +``` + +### TCP/UDP Load Balancing (Layer 4) + +Refer to [Layer 4 Load Balancing Setup]({{< relref "../../tutorials/loadbalancer#setup" >}}) for more details. + +### Application Layer Load Balancing (Layer 7) + +For Application layer load balancing, **kubeLB supports both Ingress and Gateway API resources**. + +Our default recommendation is to use Gateway API and use [Envoy Gateway](https://gateway.envoyproxy.io/) as the Gateway API implementation. Most of the upcoming and current features that KubeLB will focus on will prioritize Gateway API instead of Ingress. With Envoy Gateway being the product that we'll actively support, test, and base our features on. + +While KubeLB supports integration with any Ingress or Gateway API implementation, the only limitation is that we only support native Kubernetes APIs i.e. Ingress and Gateway APIs. Provider specific APIs are not supported by KubeLB and will be completely ignored. Also, we are only testing KubeLB with Envoy Gateway and Nginx Ingress, we can't guarantee the compatibility with other Gateway API or Ingress implementations. + +#### Ingress + +Refer to [Ingress Setup]({{< relref "../../tutorials/ingress#setup" >}}) for more details. + +#### Gateway API + +Refer to [Gateway API Setup]({{< relref "../../tutorials/gatewayapi#setup" >}}) for more details. + +### Certificate Management(Enterprise Edition) + +Refer to [Certificate Management Setup]({{< relref "../../tutorials/security/cert-management#setup" >}}) for more details. + +### DNS Management(Enterprise Edition) + +Refer to [DNS Management Setup]({{< relref "../../tutorials/security/dns#setup" >}}) for more details. diff --git a/content/kubelb/v1.2/installation/tenant-cluster/_index.en.md b/content/kubelb/v1.2/installation/tenant-cluster/_index.en.md new file mode 100644 index 000000000..8b9971cb8 --- /dev/null +++ b/content/kubelb/v1.2/installation/tenant-cluster/_index.en.md @@ -0,0 +1,296 @@ ++++ +title = "Install KubeLB CCM and setup Tenant Cluster" +linkTitle = "Setup Tenant Cluster" +date = 2023-10-27T10:07:15+02:00 +weight = 20 ++++ + +## Requirements + +* KubeLB management cluster kubernetes API access. +* Registered as a tenant in the KubeLB management cluster. + +## Pre-requisites + +* Create a namespace **kubelb** for the CCM to be deployed in. +* The agent expects a **Secret** with a kubeconf file named **`kubelb`** to access the management/load balancing cluster. + * First register the tenant in LB cluster by following [tenant registration]({{< relref "../../tutorials/tenants">}}) guidelines. + * Fetch the generated kubeconfig and create a secret from the management cluster by using these command: + + ```sh + # Replace with the tenant cluster kubeconfig path + TENANT_KUBECONFIG=~/.kube/ + # Replace with the tenant name + TENANT_NAME=tenant-shroud + KUBELB_KUBECONFIG=$(kubectl get secret kubelb-ccm-kubeconfig -n $TENANT_NAME --template={{.data.kubelb}}) + # At this point we have the kubeconfig in base64 encoded format. + # Switch the context to the Tenant cluster + export KUBECONFIG=$TENANT_KUBECONFIG + kubectl --namespace kubelb create secret generic kubelb-cluster --from-literal=kubelb="$(echo $KUBELB_KUBECONFIG | base64 -d)" + ``` + +* The name of secret can be overridden using `.Values.kubelb.clusterSecretName`, if required. If not the secret needs to be named `kubelb` and look like: + + ``` + kubectl get secrets -o yaml kubelb-cluster + ``` + + ``` + apiVersion: v1 + data: + kubelb: xxx-base64-encoded-xxx + kind: Secret + metadata: + name: kubelb-cluster + namespace: kubelb + type: Opaque + ``` + +* Update the `tenantName` in the `values.yaml` to a unique identifier for the tenant. This is used to identify the tenant in the manager cluster. Tenants are registered in the management cluster by the Platform Provider and the name is prefixed with `tenant-`. So for example, a tenant named `my-tenant` will be registered as `tenant-my-tenant`. **NOTE: We have an automation in place and both tenant name without and with `tenant-` prefix are supported.** + +At this point a minimal `values.yaml` should look like this: + +```yaml +kubelb: + clusterSecretName: kubelb-cluster + tenantName: +``` + +{{% notice info %}} + +**Important configurations for private clusters!** +If your cluster only uses internal IPs for nodes (check the following example output) you would need to change the value `kubelb.nodeAddressType` to `InternalIP`: + +```bash +kubectl get nodes -o wide +``` + +``` +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +node-x Ready control-plane 208d v1.29.9 10.66.99.222 Ubuntu 5.15.0-121-generic containerd://1.6.33 +``` + +Adjust `values.yaml`: + +```yaml +kubelb: + # -- Address type to use for routing traffic to node ports. Values are ExternalIP, InternalIP. + nodeAddressType: InternalIP +``` + +{{% /notice %}} + +## Installation for KubeLB CCM + +{{% notice warning %}} In case if Gateway API needs to be enabled for the cluster. Please set the following fields in the `values.yaml`. This is required otherwise due to missing CRDs, kubelb will not be able to start. + +```yaml +kubelb: + enableGatewayAPI: true + installGatewayAPICRDs: true +``` + +{{% /notice %}} + +{{< tabs name="KubeLB CCM" >}} +{{% tab name="Enterprise Edition" %}} + +### Prerequisites + +* Create a namespace **kubelb** for the CCM to be deployed in. +* Create **imagePullSecrets** for the chart to pull the image from the registry in kubelb namespace. + +At this point a minimal values.yaml should look like this: + +```yaml +imagePullSecrets: + - name: +kubelb: + clusterSecretName: kubelb-cluster + tenantName: +``` + +### Install the helm chart + +```sh +helm pull oci://quay.io/kubermatic/helm-charts/kubelb-ccm-ee --version=v1.2.0 --untardir "." --untar +## Apply CRDs +kubectl apply -f kubelb-ccm-ee/crds/ +## Create and update values.yaml with the required values. +helm upgrade --install kubelb-ccm kubelb-ccm-ee --namespace kubelb -f kubelb-ccm-ee/values.yaml --create-namespace +``` + +### KubeLB CCM EE Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | | +| autoscaling.enabled | bool | `false` | | +| autoscaling.maxReplicas | int | `10` | | +| autoscaling.minReplicas | int | `1` | | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | | +| autoscaling.targetMemoryUtilizationPercentage | int | `80` | | +| extraVolumeMounts | list | `[]` | | +| extraVolumes | list | `[]` | | +| fullnameOverride | string | `""` | | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"quay.io/kubermatic/kubelb-ccm-ee"` | | +| image.tag | string | `"v1.2.0"` | | +| imagePullSecrets[0].name | string | `"kubermatic-quay.io"` | | +| kubelb.clusterSecretName | string | `"kubelb-cluster"` | Name of the secret that contains kubeconfig for the loadbalancer cluster | +| kubelb.disableGRPCRouteController | bool | `false` | disableGRPCRouteController specifies whether to disable the GRPCRoute Controller. | +| kubelb.disableGatewayController | bool | `false` | disableGatewayController specifies whether to disable the Gateway Controller. | +| kubelb.disableHTTPRouteController | bool | `false` | disableHTTPRouteController specifies whether to disable the HTTPRoute Controller. | +| kubelb.disableIngressController | bool | `false` | disableIngressController specifies whether to disable the Ingress Controller. | +| kubelb.disableTCPRouteController | bool | `false` | disableTCPRouteController specifies whether to disable the TCPRoute Controller. | +| kubelb.disableTLSRouteController | bool | `false` | disableTLSRouteController specifies whether to disable the TLSRoute Controller. | +| kubelb.disableUDPRouteController | bool | `false` | disableUDPRouteController specifies whether to disable the UDPRoute Controller. | +| kubelb.enableGatewayAPI | bool | `false` | enableGatewayAPI specifies whether to enable the Gateway API and Gateway Controllers. By default Gateway API is disabled since without Gateway APIs installed the controller cannot start. | +| kubelb.enableLeaderElection | bool | `true` | Enable the leader election. | +| kubelb.enableSecretSynchronizer | bool | `false` | Enable to automatically convert Secrets labelled with `kubelb.k8c.io/managed-by: kubelb` to Sync Secrets. This is used to sync secrets from tenants to the LB cluster in a controlled and secure way. | +| kubelb.gatewayAPICRDsChannel | string | `"experimental"` | gatewayAPICRDsChannel specifies the channel for the Gateway API CRDs. Options are `standard` and `experimental`. | +| kubelb.installGatewayAPICRDs | bool | `false` | installGatewayAPICRDs Installs and manages the Gateway API CRDs using gateway crd controller. | +| kubelb.nodeAddressType | string | `"ExternalIP"` | Address type to use for routing traffic to node ports. Values are ExternalIP, InternalIP. | +| kubelb.tenantName | string | `nil` | Name of the tenant, must be unique against a load balancer cluster. | +| kubelb.useGatewayClass | bool | `true` | useGatewayClass specifies whether to target resources with `kubelb` gateway class or all resources. | +| kubelb.useIngressClass | bool | `true` | useIngressClass specifies whether to target resources with `kubelb` ingress class or all resources. | +| kubelb.useLoadBalancerClass | bool | `false` | useLoadBalancerClass specifies whether to target services of type LoadBalancer with `kubelb` load balancer class or all services of type LoadBalancer. | +| nameOverride | string | `""` | | +| nodeSelector | object | `{}` | | +| podAnnotations | object | `{}` | | +| podLabels | object | `{}` | | +| podSecurityContext.runAsNonRoot | bool | `true` | | +| podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| rbac.allowLeaderElectionRole | bool | `true` | | +| rbac.allowMetricsReaderRole | bool | `true` | | +| rbac.allowProxyRole | bool | `true` | | +| rbac.enabled | bool | `true` | | +| replicaCount | int | `1` | | +| resources.limits.cpu | string | `"500m"` | | +| resources.limits.memory | string | `"512Mi"` | | +| resources.requests.cpu | string | `"100m"` | | +| resources.requests.memory | string | `"128Mi"` | | +| securityContext.allowPrivilegeEscalation | bool | `false` | | +| securityContext.capabilities.drop[0] | string | `"ALL"` | | +| securityContext.runAsUser | int | `65532` | | +| service.port | int | `8443` | | +| service.protocol | string | `"TCP"` | | +| service.type | string | `"ClusterIP"` | | +| serviceAccount.annotations | object | `{}` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.name | string | `""` | | +| serviceMonitor.enabled | bool | `false` | | +| tolerations | list | `[]` | | + +{{% /tab %}} +{{% tab name="Community Edition" %}} + +### Install the helm chart + +```sh +helm pull oci://quay.io/kubermatic/helm-charts/kubelb-ccm --version=v1.2.0 --untardir "." --untar +## Apply CRDs +kubectl apply -f kubelb-ccm/crds/ +## Create and update values.yaml with the required values. +helm upgrade --install kubelb-ccm kubelb-ccm --namespace kubelb -f kubelb-ccm/values.yaml --create-namespace +``` + +### KubeLB CCM Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | | +| autoscaling.enabled | bool | `false` | | +| autoscaling.maxReplicas | int | `10` | | +| autoscaling.minReplicas | int | `1` | | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | | +| autoscaling.targetMemoryUtilizationPercentage | int | `80` | | +| extraVolumeMounts | list | `[]` | | +| extraVolumes | list | `[]` | | +| fullnameOverride | string | `""` | | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"quay.io/kubermatic/kubelb-ccm"` | | +| image.tag | string | `"v1.2.0"` | | +| imagePullSecrets | list | `[]` | | +| kubelb.clusterSecretName | string | `"kubelb-cluster"` | Name of the secret that contains kubeconfig for the loadbalancer cluster | +| kubelb.disableGRPCRouteController | bool | `false` | disableGRPCRouteController specifies whether to disable the GRPCRoute Controller. | +| kubelb.disableGatewayController | bool | `false` | disableGatewayController specifies whether to disable the Gateway Controller. | +| kubelb.disableHTTPRouteController | bool | `false` | disableHTTPRouteController specifies whether to disable the HTTPRoute Controller. | +| kubelb.disableIngressController | bool | `false` | disableIngressController specifies whether to disable the Ingress Controller. | +| kubelb.enableGatewayAPI | bool | `false` | enableGatewayAPI specifies whether to enable the Gateway API and Gateway Controllers. By default Gateway API is disabled since without Gateway APIs installed the controller cannot start. | +| kubelb.enableLeaderElection | bool | `true` | Enable the leader election. | +| kubelb.enableSecretSynchronizer | bool | `false` | Enable to automatically convert Secrets labelled with `kubelb.k8c.io/managed-by: kubelb` to Sync Secrets. This is used to sync secrets from tenants to the LB cluster in a controlled and secure way. | +| kubelb.gatewayAPICRDsChannel | string | `"standard"` | gatewayAPICRDsChannel specifies the channel for the Gateway API CRDs. Options are `standard` and `experimental`. | +| kubelb.installGatewayAPICRDs | bool | `false` | installGatewayAPICRDs Installs and manages the Gateway API CRDs using gateway crd controller. | +| kubelb.nodeAddressType | string | `"ExternalIP"` | Address type to use for routing traffic to node ports. Values are ExternalIP, InternalIP. | +| kubelb.tenantName | string | `nil` | Name of the tenant, must be unique against a load balancer cluster. | +| kubelb.useGatewayClass | bool | `true` | useGatewayClass specifies whether to target resources with `kubelb` gateway class or all resources. | +| kubelb.useIngressClass | bool | `true` | useIngressClass specifies whether to target resources with `kubelb` ingress class or all resources. | +| kubelb.useLoadBalancerClass | bool | `false` | useLoadBalancerClass specifies whether to target services of type LoadBalancer with `kubelb` load balancer class or all services of type LoadBalancer. | +| nameOverride | string | `""` | | +| nodeSelector | object | `{}` | | +| podAnnotations | object | `{}` | | +| podLabels | object | `{}` | | +| podSecurityContext.runAsNonRoot | bool | `true` | | +| podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| rbac.allowLeaderElectionRole | bool | `true` | | +| rbac.allowMetricsReaderRole | bool | `true` | | +| rbac.allowProxyRole | bool | `true` | | +| rbac.enabled | bool | `true` | | +| replicaCount | int | `1` | | +| resources.limits.cpu | string | `"500m"` | | +| resources.limits.memory | string | `"512Mi"` | | +| resources.requests.cpu | string | `"100m"` | | +| resources.requests.memory | string | `"128Mi"` | | +| securityContext.allowPrivilegeEscalation | bool | `false` | | +| securityContext.capabilities.drop[0] | string | `"ALL"` | | +| securityContext.runAsUser | int | `65532` | | +| service.port | int | `8443` | | +| service.protocol | string | `"TCP"` | | +| service.type | string | `"ClusterIP"` | | +| serviceAccount.annotations | object | `{}` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.name | string | `""` | | +| serviceMonitor.enabled | bool | `false` | | +| tolerations | list | `[]` | | + +{{% /tab %}} +{{< /tabs >}} + +## Setup the tenant cluster + +### Install Gateway API CRDs + +Starting from KubeLB v1.2.0, the Gateway API CRDs can be installed using the `installGatewayAPICRDs` flag. + +{{< tabs name="Gateway APIs" >}} +{{% tab name="Enterprise Edition" %}} + +```yaml +imagePullSecrets: + - name: +kubelb: + clusterSecretName: kubelb-cluster + tenantName: + # This will install the experimental channel of the Gateway API CRDs + installGatewayAPICRDs: true + enableGatewayAPI: true +``` + +For more details: [Experimental Install](https://gateway-api.sigs.k8s.io/guides/#install-experimental-channel) +{{% /tab %}} +{{% tab name="Community Edition" %}} + +```yaml +kubelb: + clusterSecretName: kubelb-cluster + tenantName: + # This will install the standard channel of the Gateway API CRDs + installGatewayAPICRDs: true + enableGatewayAPI: true +``` + +For more details: [Standard Install](https://gateway-api.sigs.k8s.io/guides/#install-standard-channel) + +{{% /tab %}} +{{< /tabs >}} diff --git a/content/kubelb/v1.2/references/_index.en.md b/content/kubelb/v1.2/references/_index.en.md new file mode 100644 index 000000000..fa3b4e4f7 --- /dev/null +++ b/content/kubelb/v1.2/references/_index.en.md @@ -0,0 +1,12 @@ ++++ +title = "References" +date = 2024-03-06T12:00:00+02:00 +weight = 50 ++++ + +This section contains a reference of the Kubermatic KubeLB Custom Resource Definitions. + +## Table of Content + +{{% children depth=5 %}} +{{% /children %}} diff --git a/content/kubelb/v1.2/references/ce/_index.en.md b/content/kubelb/v1.2/references/ce/_index.en.md new file mode 100644 index 000000000..3c2159c27 --- /dev/null +++ b/content/kubelb/v1.2/references/ce/_index.en.md @@ -0,0 +1,748 @@ ++++ +title = "KubeLB Community Edition CRD References" +linkTitle = "Community Edition" +date = 2024-03-06T12:00:00+02:00 +weight = 60 ++++ + +**Source: [kubelb.k8c.io/v1alpha1](https://github.com/kubermatic/kubelb/tree/main/api/ce/kubelb.k8c.io/v1alpha1)** + +## Packages + +- [kubelb.k8c.io/v1alpha1](#kubelbk8ciov1alpha1) + +## kubelb.k8c.io/v1alpha1 + +Package v1alpha1 contains API Schema definitions for the kubelb.k8c.io v1alpha1 API group + +### Resource Types + +- [Addresses](#addresses) +- [AddressesList](#addresseslist) +- [Config](#config) +- [ConfigList](#configlist) +- [LoadBalancer](#loadbalancer) +- [LoadBalancerList](#loadbalancerlist) +- [Route](#route) +- [RouteList](#routelist) +- [SyncSecret](#syncsecret) +- [SyncSecretList](#syncsecretlist) +- [Tenant](#tenant) +- [TenantList](#tenantlist) +- [TenantState](#tenantstate) +- [TenantStateList](#tenantstatelist) + +#### Addresses + +Addresses is the Schema for the addresses API + +_Appears in:_ + +- [AddressesList](#addresseslist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `Addresses` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[AddressesSpec](#addressesspec)_ | | | | +| `status` _[AddressesStatus](#addressesstatus)_ | | | | + +#### AddressesList + +AddressesList contains a list of Addresses + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `AddressesList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Addresses](#addresses) array_ | | | | + +#### AddressesSpec + +AddressesSpec defines the desired state of Addresses + +_Appears in:_ + +- [Addresses](#addresses) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `addresses` _[EndpointAddress](#endpointaddress) array_ | Addresses contains a list of addresses. | | MinItems: 1
| + +#### AddressesStatus + +AddressesStatus defines the observed state of Addresses + +_Appears in:_ + +- [Addresses](#addresses) + +#### AnnotatedResource + +_Underlying type:_ _string_ + +_Validation:_ + +- Enum: [all service ingress gateway httproute grpcroute tcproute udproute tlsroute] + +_Appears in:_ + +- [AnnotationSettings](#annotationsettings) +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) + +| Field | Description | +| --- | --- | +| `all` | | +| `service` | | +| `ingress` | | +| `gateway` | | +| `httproute` | | +| `grpcroute` | | +| `tcproute` | | +| `udproute` | | +| `tlsroute` | | + +#### AnnotationSettings + +_Appears in:_ + +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | +| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | | +| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | + +#### Annotations + +_Underlying type:_ _object_ + +_Appears in:_ + +- [AnnotationSettings](#annotationsettings) +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) + +#### CertificatesSettings + +_Appears in:_ + +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `defaultClusterIssuer` _string_ | DefaultClusterIssuer is the Cluster Issuer to use for the certificates by default. This is only used for load balancer hostname. | | | + +#### Config + +Config is the object that represents the Config for the KubeLB management controller. + +_Appears in:_ + +- [ConfigList](#configlist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `Config` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[ConfigSpec](#configspec)_ | | | | + +#### ConfigDNSSettings + +ConfigDNSSettings defines the global settings for DNS management and automation. + +_Appears in:_ + +- [ConfigSpec](#configspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `wildcardDomain` _string_ | WildcardDomain is the domain that will be used as the base domain to create wildcard DNS records for DNS resources.
This is only used for determining the hostname for LoadBalancer resources at LoadBalancer.Spec.Hostname. | | | +| `allowExplicitHostnames` _boolean_ | AllowExplicitHostnames is a flag that can be used to allow explicit hostnames to be used for DNS resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | | +| `useDNSAnnotations` _boolean_ | UseDNSAnnotations is a flag that can be used to add DNS annotations to DNS resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | | +| `useCertificateAnnotations` _boolean_ | UseCertificateAnnotations is a flag that can be used to add Certificate annotations to Certificate resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | | + +#### ConfigList + +ConfigList contains a list of Config + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `ConfigList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Config](#config) array_ | | | | + +#### ConfigSpec + +ConfigSpec defines the desired state of the Config + +_Appears in:_ + +- [Config](#config) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | +| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | | +| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | +| `envoyProxy` _[EnvoyProxy](#envoyproxy)_ | EnvoyProxy defines the desired state of the Envoy Proxy | | | +| `loadBalancer` _[LoadBalancerSettings](#loadbalancersettings)_ | | | | +| `ingress` _[IngressSettings](#ingresssettings)_ | | | | +| `gatewayAPI` _[GatewayAPISettings](#gatewayapisettings)_ | | | | +| `dns` _[ConfigDNSSettings](#configdnssettings)_ | | | | +| `certificates` _[CertificatesSettings](#certificatessettings)_ | | | | + +#### DNSSettings + +DNSSettings defines the settings for DNS management and automation. + +_Appears in:_ + +- [TenantSpec](#tenantspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `wildcardDomain` _string_ | WildcardDomain is the domain that will be used as the base domain to create wildcard DNS records for DNS resources.
This is only used for determining the hostname for LoadBalancer resources at LoadBalancer.Spec.Hostname. | | | +| `allowExplicitHostnames` _boolean_ | AllowExplicitHostnames is a flag that can be used to allow explicit hostnames to be used for DNS resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | | +| `useDNSAnnotations` _boolean_ | UseDNSAnnotations is a flag that can be used to add DNS annotations to DNS resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | | +| `useCertificateAnnotations` _boolean_ | UseCertificateAnnotations is a flag that can be used to add Certificate annotations to Certificate resources.
This is only used when LoadBalancer.Spec.Hostname is set. | | | + +#### EndpointAddress + +EndpointAddress is a tuple that describes single IP address. + +_Appears in:_ + +- [AddressesSpec](#addressesspec) +- [LoadBalancerEndpoints](#loadbalancerendpoints) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `ip` _string_ | The IP of this endpoint.
May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
or link-local multicast ((224.0.0.0/24). | | MinLength: 7
| +| `hostname` _string_ | The Hostname of this endpoint | | | + +#### EndpointPort + +EndpointPort is a tuple that describes a single port. + +_Appears in:_ + +- [LoadBalancerEndpoints](#loadbalancerendpoints) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `name` _string_ | The name of this port. This must match the 'name' field in the
corresponding ServicePort.
Must be a DNS_LABEL.
Optional only if one port is defined. | | | +| `port` _integer_ | The port number of the endpoint. | | | +| `protocol` _[Protocol](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#protocol-v1-core)_ | The IP protocol for this port. Defaults to "TCP". | | Enum: [TCP UDP]
| + +#### EnvoyProxy + +EnvoyProxy defines the desired state of the EnvoyProxy + +_Appears in:_ + +- [ConfigSpec](#configspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `topology` _[EnvoyProxyTopology](#envoyproxytopology)_ | Topology defines the deployment topology for Envoy Proxy. Valid values are: shared and global.
DEPRECATION NOTICE: The value "dedicated" is deprecated and will be removed in a future release. Dedicated topology will now default to shared topology. | shared | Enum: [shared dedicated global]
| +| `useDaemonset` _boolean_ | UseDaemonset defines whether Envoy Proxy will run as daemonset. By default, Envoy Proxy will run as deployment.
If set to true, Replicas will be ignored. | | | +| `replicas` _integer_ | Replicas defines the number of replicas for Envoy Proxy. This field is ignored if UseDaemonset is set to true. | 3 | Minimum: 1
| +| `singlePodPerNode` _boolean_ | SinglePodPerNode defines whether Envoy Proxy pods will be spread across nodes. This ensures that multiple replicas are not running on the same node. | | | +| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector is used to select nodes to run Envoy Proxy. If specified, the node must have all the indicated labels. | | | +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#toleration-v1-core) array_ | Tolerations is used to schedule Envoy Proxy pods on nodes with matching taints. | | | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core)_ | Resources defines the resource requirements for Envoy Proxy. | | | +| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#affinity-v1-core)_ | Affinity is used to schedule Envoy Proxy pods on nodes with matching affinity. | | | + +#### EnvoyProxyTopology + +_Underlying type:_ _string_ + +_Appears in:_ + +- [EnvoyProxy](#envoyproxy) + +| Field | Description | +| --- | --- | +| `shared` | | +| `dedicated` | | +| `global` | | + +#### GatewayAPISettings + +GatewayAPISettings defines the settings for the gateway API. + +_Appears in:_ + +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `class` _string_ | Class is the class of the gateway API to use. This can be used to specify a specific gateway API implementation.
This has higher precedence than the value specified in the Config. | | | +| `defaultGateway` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectreference-v1-core)_ | DefaultGateway is the default gateway reference to use for the tenant. This is only used for load balancer hostname. | | | +| `disable` _boolean_ | Disable is a flag that can be used to disable Gateway API for a tenant. | | | + +#### HostnameStatus + +_Appears in:_ + +- [LoadBalancerStatus](#loadbalancerstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `hostname` _string_ | Hostname contains the hostname of the load-balancer. | | | +| `tlsEnabled` _boolean_ | TLSEnabled is true if certificate is created for the hostname. | | | +| `dnsRecordCreated` _boolean_ | DNSRecordCreated is true if DNS record is created for the hostname. | | | + +#### IngressSettings + +IngressSettings defines the settings for the ingress. + +_Appears in:_ + +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `class` _string_ | Class is the class of the ingress to use.
This has higher precedence than the value specified in the Config. | | | +| `disable` _boolean_ | Disable is a flag that can be used to disable Ingress for a tenant. | | | + +#### KubernetesSource + +_Appears in:_ + +- [RouteSource](#routesource) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `resource` _[Unstructured](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#unstructured-unstructured-v1)_ | | | EmbeddedResource: \{\}
| +| `services` _[UpstreamService](#upstreamservice) array_ | Services contains the list of services that are used as the source for the Route. | | | + +#### LoadBalancer + +LoadBalancer is the Schema for the loadbalancers API + +_Appears in:_ + +- [LoadBalancerList](#loadbalancerlist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `LoadBalancer` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[LoadBalancerSpec](#loadbalancerspec)_ | | | | +| `status` _[LoadBalancerStatus](#loadbalancerstatus)_ | | | | + +#### LoadBalancerEndpoints + +LoadBalancerEndpoints is a group of addresses with a common set of ports. The +expanded set of endpoints is the Cartesian product of Addresses x Ports. +For example, given: + + { + Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], + Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] + } + +The resulting set of endpoints can be viewed as: + + a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], + b: [ 10.10.1.1:309, 10.10.2.2:309 ] + +_Appears in:_ + +- [LoadBalancerSpec](#loadbalancerspec) +- [RouteSpec](#routespec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `name` _string_ | Name is the name of the endpoints. | | | +| `addresses` _[EndpointAddress](#endpointaddress) array_ | IP addresses which offer the related ports that are marked as ready. These endpoints
should be considered safe for load balancers and clients to utilize. | | MinItems: 1
| +| `addressesReference` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectreference-v1-core)_ | AddressesReference is a reference to the Addresses object that contains the IP addresses.
If this field is set, the Addresses field will be ignored. | | | +| `ports` _[EndpointPort](#endpointport) array_ | Port numbers available on the related IP addresses.
This field is ignored for routes that are using kubernetes resources as the source. | | MinItems: 1
| + +#### LoadBalancerList + +LoadBalancerList contains a list of LoadBalancer + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `LoadBalancerList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[LoadBalancer](#loadbalancer) array_ | | | | + +#### LoadBalancerPort + +LoadBalancerPort contains information on service's port. + +_Appears in:_ + +- [LoadBalancerSpec](#loadbalancerspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `name` _string_ | The name of this port within the service. This must be a DNS_LABEL.
All ports within a Spec must have unique names. When considering
the endpoints for a Service, this must match the 'name' field in the
EndpointPort.
Optional if only one ServicePort is defined on this service. | | | +| `protocol` _[Protocol](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#protocol-v1-core)_ | The IP protocol for this port. Defaults to "TCP". | | Enum: [TCP UDP]
| +| `port` _integer_ | The port that will be exposed by the LoadBalancer. | | | + +#### LoadBalancerSettings + +LoadBalancerSettings defines the settings for the load balancers. + +_Appears in:_ + +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `class` _string_ | Class is the class of the load balancer to use.
This has higher precedence than the value specified in the Config. | | | +| `disable` _boolean_ | Disable is a flag that can be used to disable L4 load balancing for a tenant. | | | + +#### LoadBalancerSpec + +LoadBalancerSpec defines the desired state of LoadBalancer + +_Appears in:_ + +- [LoadBalancer](#loadbalancer) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `endpoints` _[LoadBalancerEndpoints](#loadbalancerendpoints) array_ | Sets of addresses and ports that comprise an exposed user service on a cluster. | | MinItems: 1
| +| `ports` _[LoadBalancerPort](#loadbalancerport) array_ | The list of ports that are exposed by the load balancer service.
only needed for layer 4 | | | +| `hostname` _string_ | Hostname is the domain name at which the load balancer service will be accessible.
When hostname is set, KubeLB will create a route(ingress or httproute) for the service, and expose it with TLS on the given hostname. Currently, only HTTP protocol is supported | | | +| `type` _[ServiceType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicetype-v1-core)_ | type determines how the Service is exposed. Defaults to ClusterIP. Valid
options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
"ExternalName" maps to the specified externalName.
"ClusterIP" allocates a cluster-internal IP address for load-balancing to
endpoints. Endpoints are determined by the selector or if that is not
specified, by manual construction of an Endpoints object. If clusterIP is
"None", no virtual IP is allocated and the endpoints are published as a
set of endpoints rather than a stable IP.
"NodePort" builds on ClusterIP and allocates a port on every node which
routes to the clusterIP.
"LoadBalancer" builds on NodePort and creates an
external load-balancer (if supported in the current cloud) which routes
to the clusterIP.
More info: | ClusterIP | | + +#### LoadBalancerState + +_Appears in:_ + +- [TenantStateStatus](#tenantstatestatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `disable` _boolean_ | | | | + +#### LoadBalancerStatus + +LoadBalancerStatus defines the observed state of LoadBalancer + +_Appears in:_ + +- [LoadBalancer](#loadbalancer) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `loadBalancer` _[LoadBalancerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#loadbalancerstatus-v1-core)_ | LoadBalancer contains the current status of the load-balancer,
if one is present. | | | +| `service` _[ServiceStatus](#servicestatus)_ | Service contains the current status of the LB service. | | | +| `hostname` _[HostnameStatus](#hostnamestatus)_ | Hostname contains the status for hostname resources. | | | + +#### ResourceState + +_Appears in:_ + +- [RouteResourcesStatus](#routeresourcesstatus) +- [RouteServiceStatus](#routeservicestatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | APIVersion is the API version of the resource. | | | +| `name` _string_ | Name is the name of the resource. | | | +| `namespace` _string_ | Namespace is the namespace of the resource. | | | +| `generatedName` _string_ | GeneratedName is the generated name of the resource. | | | +| `status` _[RawExtension](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#rawextension-runtime-pkg)_ | Status is the actual status of the resource. | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | | | | + +#### Route + +Route is the object that represents a route in the cluster. + +_Appears in:_ + +- [RouteList](#routelist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `Route` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[RouteSpec](#routespec)_ | | | | +| `status` _[RouteStatus](#routestatus)_ | | | | + +#### RouteList + +RouteList contains a list of Routes + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `RouteList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Route](#route) array_ | | | | + +#### RouteResourcesStatus + +_Appears in:_ + +- [RouteStatus](#routestatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `source` _string_ | | | | +| `services` _object (keys:string, values:[RouteServiceStatus](#routeservicestatus))_ | | | | +| `route` _[ResourceState](#resourcestate)_ | | | | + +#### RouteServiceStatus + +_Appears in:_ + +- [RouteResourcesStatus](#routeresourcesstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | APIVersion is the API version of the resource. | | | +| `name` _string_ | Name is the name of the resource. | | | +| `namespace` _string_ | Namespace is the namespace of the resource. | | | +| `generatedName` _string_ | GeneratedName is the generated name of the resource. | | | +| `status` _[RawExtension](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#rawextension-runtime-pkg)_ | Status is the actual status of the resource. | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | | | | +| `ports` _[ServicePort](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#serviceport-v1-core) array_ | | | | + +#### RouteSource + +_Appears in:_ + +- [RouteSpec](#routespec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `kubernetes` _[KubernetesSource](#kubernetessource)_ | Kubernetes contains the information about the Kubernetes source.
This field is automatically populated by the KubeLB CCM and in most cases, users should not set this field manually. | | | + +#### RouteSpec + +RouteSpec defines the desired state of the Route. + +_Appears in:_ + +- [Route](#route) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `endpoints` _[LoadBalancerEndpoints](#loadbalancerendpoints) array_ | Sets of addresses and ports that comprise an exposed user service on a cluster. | | MinItems: 1
| +| `source` _[RouteSource](#routesource)_ | Source contains the information about the source of the route. This is used when the route is created from external sources. | | | + +#### RouteStatus + +RouteStatus defines the observed state of the Route. + +_Appears in:_ + +- [Route](#route) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `resources` _[RouteResourcesStatus](#routeresourcesstatus)_ | Resources contains the list of resources that are created/processed as a result of the Route. | | | + +#### ServicePort + +ServicePort contains information on service's port. + +_Appears in:_ + +- [ServiceStatus](#servicestatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `name` _string_ | The name of this port within the service. This must be a DNS_LABEL.
All ports within a ServiceSpec must have unique names. When considering
the endpoints for a Service, this must match the 'name' field in the
EndpointPort.
Optional if only one ServicePort is defined on this service. | | | +| `protocol` _[Protocol](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#protocol-v1-core)_ | The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
Default is TCP. | | | +| `appProtocol` _string_ | The application protocol for this port.
This is used as a hint for implementations to offer richer behavior for protocols that they understand.
This field follows standard Kubernetes label syntax.
Valid values are either:
_Un-prefixed protocol names - reserved for IANA standard service names (as per
RFC-6335 and ).
_ Kubernetes-defined prefixed names:
_'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in
_ 'kubernetes.io/ws' - WebSocket over cleartext as described in
_'kubernetes.io/wss' - WebSocket over TLS as described in
_ Other protocols should use implementation-defined prefixed names such as
mycompany.com/my-custom-protocol. | | | +| `port` _integer_ | The port that will be exposed by this service. | | | +| `targetPort` _[IntOrString](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#intorstring-intstr-util)_ | Number or name of the port to access on the pods targeted by the service.
Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
If this is a string, it will be looked up as a named port in the
target Pod's container ports. If this is not specified, the value
of the 'port' field is used (an identity map).
This field is ignored for services with clusterIP=None, and should be
omitted or set equal to the 'port' field.
More info: | | | +| `nodePort` _integer_ | The port on each node on which this service is exposed when type is
NodePort or LoadBalancer. Usually assigned by the system. If a value is
specified, in-range, and not in use it will be used, otherwise the
operation will fail. If not specified, a port will be allocated if this
Service requires one. If this field is specified when creating a
Service which does not need it, creation will fail. This field will be
wiped when updating a Service to no longer need it (e.g. changing type
from NodePort to ClusterIP).
More info: | | | +| `upstreamTargetPort` _integer_ | | | | + +#### ServiceStatus + +_Appears in:_ + +- [LoadBalancerStatus](#loadbalancerstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `ports` _[ServicePort](#serviceport) array_ | | | | + +#### SyncSecret + +SyncSecret is a wrapper over Kubernetes Secret object. This is used to sync secrets from tenants to the LB cluster in a controlled and secure way. + +_Appears in:_ + +- [SyncSecretList](#syncsecretlist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `SyncSecret` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `immutable` _boolean_ | | | | +| `data` _object (keys:string, values:integer array)_ | | | | +| `stringData` _object (keys:string, values:string)_ | | | | +| `type` _[SecretType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#secrettype-v1-core)_ | | | | + +#### SyncSecretList + +SyncSecretList contains a list of SyncSecrets + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `SyncSecretList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[SyncSecret](#syncsecret) array_ | | | | + +#### Tenant + +Tenant is the Schema for the tenants API + +_Appears in:_ + +- [TenantList](#tenantlist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `Tenant` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[TenantSpec](#tenantspec)_ | | | | +| `status` _[TenantStatus](#tenantstatus)_ | | | | + +#### TenantList + +TenantList contains a list of Tenant + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `TenantList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Tenant](#tenant) array_ | | | | + +#### TenantSpec + +TenantSpec defines the desired state of Tenant + +_Appears in:_ + +- [Tenant](#tenant) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | +| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | | +| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | +| `loadBalancer` _[LoadBalancerSettings](#loadbalancersettings)_ | | | | +| `ingress` _[IngressSettings](#ingresssettings)_ | | | | +| `gatewayAPI` _[GatewayAPISettings](#gatewayapisettings)_ | | | | +| `dns` _[DNSSettings](#dnssettings)_ | | | | +| `certificates` _[CertificatesSettings](#certificatessettings)_ | | | | + +#### TenantState + +TenantState is the Schema for the tenants API + +_Appears in:_ + +- [TenantStateList](#tenantstatelist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `TenantState` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[TenantStateSpec](#tenantstatespec)_ | | | | +| `status` _[TenantStateStatus](#tenantstatestatus)_ | | | | + +#### TenantStateList + +TenantStateList contains a list of TenantState + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `TenantStateList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[TenantState](#tenantstate) array_ | | | | + +#### TenantStateSpec + +TenantStateSpec defines the desired state of TenantState. + +_Appears in:_ + +- [TenantState](#tenantstate) + +#### TenantStateStatus + +TenantStateStatus defines the observed state of TenantState + +_Appears in:_ + +- [TenantState](#tenantstate) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `version` _[Version](#version)_ | | | | +| `lastUpdated` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#time-v1-meta)_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | | | | +| `loadBalancer` _[LoadBalancerState](#loadbalancerstate)_ | | | | + +#### TenantStatus + +TenantStatus defines the observed state of Tenant + +_Appears in:_ + +- [Tenant](#tenant) + +#### UpstreamService + +UpstreamService is a wrapper over the corev1.Service object. +This is required as kubebuilder:validation:EmbeddedResource marker adds the x-kubernetes-embedded-resource to the array instead of +the elements within it. Which results in a broken CRD; validation error. Without this marker, the embedded resource is not properly +serialized to the CRD. + +_Appears in:_ + +- [KubernetesSource](#kubernetessource) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[ServiceSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicespec-v1-core)_ | Spec defines the behavior of a service.
| | | +| `status` _[ServiceStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicestatus-v1-core)_ | Most recently observed status of the service.
Populated by the system.
Read-only.
More info: | | | + +#### Version + +_Appears in:_ + +- [TenantStateStatus](#tenantstatestatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `gitVersion` _string_ | | | | +| `gitCommit` _string_ | | | | +| `buildDate` _string_ | | | | +| `edition` _string_ | | | | diff --git a/content/kubelb/v1.2/references/ee/_index.en.md b/content/kubelb/v1.2/references/ee/_index.en.md new file mode 100644 index 000000000..1a71a06e4 --- /dev/null +++ b/content/kubelb/v1.2/references/ee/_index.en.md @@ -0,0 +1,934 @@ ++++ +title = "KubeLB Enterprise Edition CRD References" +linkTitle = "Enterprise Edition" +date = 2024-03-06T12:00:00+02:00 +weight = 50 +enterprise = true ++++ + +**Source: [kubelb.k8c.io/v1alpha1](https://github.com/kubermatic/kubelb/tree/main/api/ee/kubelb.k8c.io/v1alpha1)** + +## Packages + +- [kubelb.k8c.io/v1alpha1](#kubelbk8ciov1alpha1) + +## kubelb.k8c.io/v1alpha1 + +Package v1alpha1 contains API Schema definitions for the kubelb.k8c.io v1alpha1 API group + +### Resource Types + +- [Addresses](#addresses) +- [AddressesList](#addresseslist) +- [Config](#config) +- [ConfigList](#configlist) +- [LoadBalancer](#loadbalancer) +- [LoadBalancerList](#loadbalancerlist) +- [Route](#route) +- [RouteList](#routelist) +- [SyncSecret](#syncsecret) +- [SyncSecretList](#syncsecretlist) +- [Tenant](#tenant) +- [TenantList](#tenantlist) +- [TenantState](#tenantstate) +- [TenantStateList](#tenantstatelist) +- [Tunnel](#tunnel) +- [TunnelList](#tunnellist) + +#### Addresses + +Addresses is the Schema for the addresses API + +_Appears in:_ + +- [AddressesList](#addresseslist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `Addresses` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[AddressesSpec](#addressesspec)_ | | | | +| `status` _[AddressesStatus](#addressesstatus)_ | | | | + +#### AddressesList + +AddressesList contains a list of Addresses + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `AddressesList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Addresses](#addresses) array_ | | | | + +#### AddressesSpec + +AddressesSpec defines the desired state of Addresses + +_Appears in:_ + +- [Addresses](#addresses) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `addresses` _[EndpointAddress](#endpointaddress) array_ | Addresses contains a list of addresses. | | MinItems: 1
| + +#### AddressesStatus + +AddressesStatus defines the observed state of Addresses + +_Appears in:_ + +- [Addresses](#addresses) + +#### AnnotatedResource + +_Underlying type:_ _string_ + +_Validation:_ + +- Enum: [all service ingress gateway httproute grpcroute tcproute udproute tlsroute] + +_Appears in:_ + +- [AnnotationSettings](#annotationsettings) +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) + +| Field | Description | +| --- | --- | +| `all` | | +| `service` | | +| `ingress` | | +| `gateway` | | +| `httproute` | | +| `grpcroute` | | +| `tcproute` | | +| `udproute` | | +| `tlsroute` | | + +#### AnnotationSettings + +_Appears in:_ + +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | +| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | | +| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | + +#### Annotations + +_Underlying type:_ _object_ + +_Appears in:_ + +- [AnnotationSettings](#annotationsettings) +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) + +#### CertificatesSettings + +CertificatesSettings defines the settings for the certificates. + +_Appears in:_ + +- [TenantSpec](#tenantspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `disable` _boolean_ | Disable is a flag that can be used to disable certificate automation for a tenant. | | | +| `defaultClusterIssuer` _string_ | DefaultClusterIssuer is the Cluster Issuer to use for the certificates by default. This is applied when the cluster issuer is not specified in the annotations on the resource itself. | | | +| `allowedDomains` _string array_ | AllowedDomains is a list of allowed domains for automated Certificate management. Has a higher precedence than the value specified in the Config.
If empty, the value specified in `tenant.spec.allowedDomains` will be used.
Examples:
- ["_.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["_"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level. | | | + +#### Config + +Config is the object that represents the Config for the KubeLB management controller. + +_Appears in:_ + +- [ConfigList](#configlist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `Config` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[ConfigSpec](#configspec)_ | | | | + +#### ConfigCertificatesSettings + +ConfigCertificatesSettings defines the global settings for the certificates. + +_Appears in:_ + +- [ConfigSpec](#configspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `disable` _boolean_ | Disable is a flag that can be used to disable certificate automation globally for all the tenants. | | | +| `defaultClusterIssuer` _string_ | DefaultClusterIssuer is the Cluster Issuer to use for the certificates by default. This is applied when the cluster issuer is not specified in the annotations on the resource itself. | | | + +#### ConfigDNSSettings + +ConfigDNSSettings defines the global settings for DNS management and automation. + +_Appears in:_ + +- [ConfigSpec](#configspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `disable` _boolean_ | Disable is a flag that can be used to disable DNS automation globally for all the tenants. | | | +| `wildcardDomain` _string_ | WildcardDomain is the domain that will be used as the base domain to create wildcard DNS records for DNS resources.
This is only used for determining the hostname for LoadBalancer and Tunnel resources. | | | +| `allowExplicitHostnames` _boolean_ | AllowExplicitHostnames is a flag that can be used to allow explicit hostnames to be used for DNS resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | | +| `useDNSAnnotations` _boolean_ | UseDNSAnnotations is a flag that can be used to add DNS annotations to DNS resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | | +| `useCertificateAnnotations` _boolean_ | UseCertificateAnnotations is a flag that can be used to add Certificate annotations to Certificate resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | | + +#### ConfigList + +ConfigList contains a list of Config + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `ConfigList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Config](#config) array_ | | | | + +#### ConfigSpec + +ConfigSpec defines the desired state of the Config + +_Appears in:_ + +- [Config](#config) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | +| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | | +| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | +| `envoyProxy` _[EnvoyProxy](#envoyproxy)_ | EnvoyProxy defines the desired state of the Envoy Proxy | | | +| `loadBalancer` _[LoadBalancerSettings](#loadbalancersettings)_ | | | | +| `ingress` _[IngressSettings](#ingresssettings)_ | | | | +| `gatewayAPI` _[GatewayAPISettings](#gatewayapisettings)_ | | | | +| `dns` _[ConfigDNSSettings](#configdnssettings)_ | | | | +| `certificates` _[ConfigCertificatesSettings](#configcertificatessettings)_ | | | | +| `tunnel` _[TunnelSettings](#tunnelsettings)_ | | | | + +#### DNSSettings + +DNSSettings defines the tenant specific settings for DNS management and automation. + +_Appears in:_ + +- [TenantSpec](#tenantspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `disable` _boolean_ | Disable is a flag that can be used to disable DNS automation for a tenant. | | | +| `allowedDomains` _string array_ | AllowedDomains is a list of allowed domains for automated DNS management. Has a higher precedence than the value specified in the Config.
If empty, the value specified in `tenant.spec.allowedDomains` will be used.
Examples:
- ["_.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["_"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level. | | | +| `wildcardDomain` _string_ | WildcardDomain is the domain that will be used as the base domain to create wildcard DNS records for DNS resources.
This is only used for determining the hostname for LoadBalancer and Tunnel resources. | | | +| `allowExplicitHostnames` _boolean_ | AllowExplicitHostnames is a flag that can be used to allow explicit hostnames to be used for DNS resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | | +| `useDNSAnnotations` _boolean_ | UseDNSAnnotations is a flag that can be used to add DNS annotations to DNS resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | | +| `useCertificateAnnotations` _boolean_ | UseCertificateAnnotations is a flag that can be used to add Certificate annotations to Certificate resources.
This is only used when LoadBalancer.Spec.Hostname or Tunnel.Spec.Hostname is set. | | | + +#### EndpointAddress + +EndpointAddress is a tuple that describes single IP address. + +_Appears in:_ + +- [AddressesSpec](#addressesspec) +- [LoadBalancerEndpoints](#loadbalancerendpoints) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `ip` _string_ | The IP of this endpoint.
May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
or link-local multicast ((224.0.0.0/24). | | MinLength: 7
| +| `hostname` _string_ | The Hostname of this endpoint | | | + +#### EndpointPort + +EndpointPort is a tuple that describes a single port. + +_Appears in:_ + +- [LoadBalancerEndpoints](#loadbalancerendpoints) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `name` _string_ | The name of this port. This must match the 'name' field in the
corresponding ServicePort.
Must be a DNS_LABEL.
Optional only if one port is defined. | | | +| `port` _integer_ | The port number of the endpoint. | | | +| `protocol` _[Protocol](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#protocol-v1-core)_ | The IP protocol for this port. Defaults to "TCP". | | Enum: [TCP UDP]
| + +#### EnvoyProxy + +EnvoyProxy defines the desired state of the EnvoyProxy + +_Appears in:_ + +- [ConfigSpec](#configspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `topology` _[EnvoyProxyTopology](#envoyproxytopology)_ | Topology defines the deployment topology for Envoy Proxy. Valid values are: shared and global.
DEPRECATION NOTICE: The value "dedicated" is deprecated and will be removed in a future release. Dedicated topology will now default to shared topology. | shared | Enum: [shared dedicated global]
| +| `useDaemonset` _boolean_ | UseDaemonset defines whether Envoy Proxy will run as daemonset. By default, Envoy Proxy will run as deployment.
If set to true, Replicas will be ignored. | | | +| `replicas` _integer_ | Replicas defines the number of replicas for Envoy Proxy. This field is ignored if UseDaemonset is set to true. | 3 | Minimum: 1
| +| `singlePodPerNode` _boolean_ | SinglePodPerNode defines whether Envoy Proxy pods will be spread across nodes. This ensures that multiple replicas are not running on the same node. | | | +| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector is used to select nodes to run Envoy Proxy. If specified, the node must have all the indicated labels. | | | +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#toleration-v1-core) array_ | Tolerations is used to schedule Envoy Proxy pods on nodes with matching taints. | | | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core)_ | Resources defines the resource requirements for Envoy Proxy. | | | +| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#affinity-v1-core)_ | Affinity is used to schedule Envoy Proxy pods on nodes with matching affinity. | | | + +#### EnvoyProxyTopology + +_Underlying type:_ _string_ + +_Appears in:_ + +- [EnvoyProxy](#envoyproxy) + +| Field | Description | +| --- | --- | +| `shared` | | +| `dedicated` | | +| `global` | | + +#### GatewayAPISettings + +GatewayAPISettings defines the settings for the gateway API. + +_Appears in:_ + +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `class` _string_ | Class is the class of the gateway API to use. This can be used to specify a specific gateway API implementation.
This has higher precedence than the value specified in the Config. | | | +| `disable` _boolean_ | Disable is a flag that can be used to disable Gateway API for a tenant. | | | +| `defaultGateway` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectreference-v1-core)_ | DefaultGateway is the default gateway reference to use for the tenant. This is only used for load balancer hostname and tunneling. | | | +| `gateway` _[GatewaySettings](#gatewaysettings)_ | | | | +| `disableHTTPRoute` _boolean_ | | | | +| `disableGRPCRoute` _boolean_ | | | | +| `disableTCPRoute` _boolean_ | | | | +| `disableUDPRoute` _boolean_ | | | | +| `disableTLSRoute` _boolean_ | | | | + +#### GatewayAPIsSettings + +_Appears in:_ + +- [GatewayAPISettings](#gatewayapisettings) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `disableHTTPRoute` _boolean_ | | | | +| `disableGRPCRoute` _boolean_ | | | | +| `disableTCPRoute` _boolean_ | | | | +| `disableUDPRoute` _boolean_ | | | | +| `disableTLSRoute` _boolean_ | | | | + +#### GatewaySettings + +GatewaySettings defines the settings for the gateway resource. + +_Appears in:_ + +- [GatewayAPISettings](#gatewayapisettings) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `limit` _integer_ | Limit is the maximum number of gateways to create.
If a lower limit is set than the number of reources that exist, the limit will be disallow creation of new resources but will not delete existing resources. The reason behind this
is that it is not possible for KubeLB to know which resources are safe to remove. | | | + +#### HostnameStatus + +_Appears in:_ + +- [LoadBalancerStatus](#loadbalancerstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `hostname` _string_ | Hostname contains the hostname of the load-balancer. | | | +| `tlsEnabled` _boolean_ | TLSEnabled is true if certificate is created for the hostname. | | | +| `dnsRecordCreated` _boolean_ | DNSRecordCreated is true if DNS record is created for the hostname. | | | + +#### IngressSettings + +IngressSettings defines the settings for the ingress. + +_Appears in:_ + +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `class` _string_ | Class is the class of the ingress to use.
This has higher precedence than the value specified in the Config. | | | +| `disable` _boolean_ | Disable is a flag that can be used to disable Ingress for a tenant. | | | + +#### KubernetesSource + +_Appears in:_ + +- [RouteSource](#routesource) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `resource` _[Unstructured](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#unstructured-unstructured-v1)_ | | | EmbeddedResource: \{\}
| +| `services` _[UpstreamService](#upstreamservice) array_ | Services contains the list of services that are used as the source for the Route. | | | + +#### LoadBalancer + +LoadBalancer is the Schema for the loadbalancers API + +_Appears in:_ + +- [LoadBalancerList](#loadbalancerlist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `LoadBalancer` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[LoadBalancerSpec](#loadbalancerspec)_ | | | | +| `status` _[LoadBalancerStatus](#loadbalancerstatus)_ | | | | + +#### LoadBalancerEndpoints + +LoadBalancerEndpoints is a group of addresses with a common set of ports. The +expanded set of endpoints is the Cartesian product of Addresses x Ports. +For example, given: + + { + Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], + Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] + } + +The resulting set of endpoints can be viewed as: + + a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], + b: [ 10.10.1.1:309, 10.10.2.2:309 ] + +_Appears in:_ + +- [LoadBalancerSpec](#loadbalancerspec) +- [RouteSpec](#routespec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `name` _string_ | Name is the name of the endpoints. | | | +| `addresses` _[EndpointAddress](#endpointaddress) array_ | IP addresses which offer the related ports that are marked as ready. These endpoints
should be considered safe for load balancers and clients to utilize. | | MinItems: 1
| +| `addressesReference` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectreference-v1-core)_ | AddressesReference is a reference to the Addresses object that contains the IP addresses.
If this field is set, the Addresses field will be ignored. | | | +| `ports` _[EndpointPort](#endpointport) array_ | Port numbers available on the related IP addresses.
This field is ignored for routes that are using kubernetes resources as the source. | | MinItems: 1
| + +#### LoadBalancerList + +LoadBalancerList contains a list of LoadBalancer + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `LoadBalancerList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[LoadBalancer](#loadbalancer) array_ | | | | + +#### LoadBalancerPort + +LoadBalancerPort contains information on service's port. + +_Appears in:_ + +- [LoadBalancerSpec](#loadbalancerspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `name` _string_ | The name of this port within the service. This must be a DNS_LABEL.
All ports within a Spec must have unique names. When considering
the endpoints for a Service, this must match the 'name' field in the
EndpointPort.
Optional if only one ServicePort is defined on this service. | | | +| `protocol` _[Protocol](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#protocol-v1-core)_ | The IP protocol for this port. Defaults to "TCP". | | Enum: [TCP UDP]
| +| `port` _integer_ | The port that will be exposed by the LoadBalancer. | | | + +#### LoadBalancerSettings + +LoadBalancerSettings defines the settings for the load balancers. + +_Appears in:_ + +- [ConfigSpec](#configspec) +- [TenantSpec](#tenantspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `class` _string_ | Class is the class of the load balancer to use.
This has higher precedence than the value specified in the Config. | | | +| `limit` _integer_ | Limit is the maximum number of load balancers to create.
If a lower limit is set than the number of reources that exist, the limit will be disallow creation of new resources but will not delete existing resources. The reason behind this
is that it is not possible for KubeLB to know which resources are safe to remove. | | | +| `disable` _boolean_ | Disable is a flag that can be used to disable L4 load balancing for a tenant. | | | + +#### LoadBalancerSpec + +LoadBalancerSpec defines the desired state of LoadBalancer + +_Appears in:_ + +- [LoadBalancer](#loadbalancer) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `endpoints` _[LoadBalancerEndpoints](#loadbalancerendpoints) array_ | Sets of addresses and ports that comprise an exposed user service on a cluster. | | MinItems: 1
| +| `ports` _[LoadBalancerPort](#loadbalancerport) array_ | The list of ports that are exposed by the load balancer service.
only needed for layer 4 | | | +| `hostname` _string_ | Hostname is the domain name at which the load balancer service will be accessible.
When hostname is set, KubeLB will create a route(ingress or httproute) for the service, and expose it with TLS on the given hostname. | | | +| `type` _[ServiceType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicetype-v1-core)_ | type determines how the Service is exposed. Defaults to ClusterIP. Valid
options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
"ExternalName" maps to the specified externalName.
"ClusterIP" allocates a cluster-internal IP address for load-balancing to
endpoints. Endpoints are determined by the selector or if that is not
specified, by manual construction of an Endpoints object. If clusterIP is
"None", no virtual IP is allocated and the endpoints are published as a
set of endpoints rather than a stable IP.
"NodePort" builds on ClusterIP and allocates a port on every node which
routes to the clusterIP.
"LoadBalancer" builds on NodePort and creates an
external load-balancer (if supported in the current cloud) which routes
to the clusterIP.
More info: | ClusterIP | | + +#### LoadBalancerState + +_Appears in:_ + +- [TenantStateStatus](#tenantstatestatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `disable` _boolean_ | | | | +| `limit` _integer_ | | | | + +#### LoadBalancerStatus + +LoadBalancerStatus defines the observed state of LoadBalancer + +_Appears in:_ + +- [LoadBalancer](#loadbalancer) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `loadBalancer` _[LoadBalancerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#loadbalancerstatus-v1-core)_ | LoadBalancer contains the current status of the load-balancer,
if one is present. | | | +| `service` _[ServiceStatus](#servicestatus)_ | Service contains the current status of the LB service. | | | +| `hostname` _[HostnameStatus](#hostnamestatus)_ | Hostname contains the status for hostname resources. | | | + +#### ResourceState + +_Appears in:_ + +- [RouteResourcesStatus](#routeresourcesstatus) +- [RouteServiceStatus](#routeservicestatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | APIVersion is the API version of the resource. | | | +| `name` _string_ | Name is the name of the resource. | | | +| `namespace` _string_ | Namespace is the namespace of the resource. | | | +| `generatedName` _string_ | GeneratedName is the generated name of the resource. | | | +| `status` _[RawExtension](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#rawextension-runtime-pkg)_ | Status is the actual status of the resource. | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | | | | + +#### Route + +Route is the object that represents a route in the cluster. + +_Appears in:_ + +- [RouteList](#routelist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `Route` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[RouteSpec](#routespec)_ | | | | +| `status` _[RouteStatus](#routestatus)_ | | | | + +#### RouteList + +RouteList contains a list of Routes + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `RouteList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Route](#route) array_ | | | | + +#### RouteResourcesStatus + +_Appears in:_ + +- [RouteStatus](#routestatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `source` _string_ | | | | +| `services` _object (keys:string, values:[RouteServiceStatus](#routeservicestatus))_ | | | | +| `route` _[ResourceState](#resourcestate)_ | | | | + +#### RouteServiceStatus + +_Appears in:_ + +- [RouteResourcesStatus](#routeresourcesstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | APIVersion is the API version of the resource. | | | +| `name` _string_ | Name is the name of the resource. | | | +| `namespace` _string_ | Namespace is the namespace of the resource. | | | +| `generatedName` _string_ | GeneratedName is the generated name of the resource. | | | +| `status` _[RawExtension](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#rawextension-runtime-pkg)_ | Status is the actual status of the resource. | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | | | | +| `ports` _[ServicePort](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#serviceport-v1-core) array_ | | | | + +#### RouteSource + +_Appears in:_ + +- [RouteSpec](#routespec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `kubernetes` _[KubernetesSource](#kubernetessource)_ | Kubernetes contains the information about the Kubernetes source.
This field is automatically populated by the KubeLB CCM and in most cases, users should not set this field manually. | | | + +#### RouteSpec + +RouteSpec defines the desired state of the Route. + +_Appears in:_ + +- [Route](#route) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `endpoints` _[LoadBalancerEndpoints](#loadbalancerendpoints) array_ | Sets of addresses and ports that comprise an exposed user service on a cluster. | | MinItems: 1
| +| `source` _[RouteSource](#routesource)_ | Source contains the information about the source of the route. This is used when the route is created from external sources. | | | + +#### RouteStatus + +RouteStatus defines the observed state of the Route. + +_Appears in:_ + +- [Route](#route) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `resources` _[RouteResourcesStatus](#routeresourcesstatus)_ | Resources contains the list of resources that are created/processed as a result of the Route. | | | + +#### ServicePort + +ServicePort contains information on service's port. + +_Appears in:_ + +- [ServiceStatus](#servicestatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `name` _string_ | The name of this port within the service. This must be a DNS_LABEL.
All ports within a ServiceSpec must have unique names. When considering
the endpoints for a Service, this must match the 'name' field in the
EndpointPort.
Optional if only one ServicePort is defined on this service. | | | +| `protocol` _[Protocol](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#protocol-v1-core)_ | The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
Default is TCP. | | | +| `appProtocol` _string_ | The application protocol for this port.
This is used as a hint for implementations to offer richer behavior for protocols that they understand.
This field follows standard Kubernetes label syntax.
Valid values are either:
_Un-prefixed protocol names - reserved for IANA standard service names (as per
RFC-6335 and ).
_ Kubernetes-defined prefixed names:
_'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in
_ 'kubernetes.io/ws' - WebSocket over cleartext as described in
_'kubernetes.io/wss' - WebSocket over TLS as described in
_ Other protocols should use implementation-defined prefixed names such as
mycompany.com/my-custom-protocol. | | | +| `port` _integer_ | The port that will be exposed by this service. | | | +| `targetPort` _[IntOrString](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#intorstring-intstr-util)_ | Number or name of the port to access on the pods targeted by the service.
Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
If this is a string, it will be looked up as a named port in the
target Pod's container ports. If this is not specified, the value
of the 'port' field is used (an identity map).
This field is ignored for services with clusterIP=None, and should be
omitted or set equal to the 'port' field.
More info: | | | +| `nodePort` _integer_ | The port on each node on which this service is exposed when type is
NodePort or LoadBalancer. Usually assigned by the system. If a value is
specified, in-range, and not in use it will be used, otherwise the
operation will fail. If not specified, a port will be allocated if this
Service requires one. If this field is specified when creating a
Service which does not need it, creation will fail. This field will be
wiped when updating a Service to no longer need it (e.g. changing type
from NodePort to ClusterIP).
More info: | | | +| `upstreamTargetPort` _integer_ | | | | + +#### ServiceStatus + +_Appears in:_ + +- [LoadBalancerStatus](#loadbalancerstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `ports` _[ServicePort](#serviceport) array_ | | | | + +#### SyncSecret + +SyncSecret is a wrapper over Kubernetes Secret object. This is used to sync secrets from tenants to the LB cluster in a controlled and secure way. + +_Appears in:_ + +- [SyncSecretList](#syncsecretlist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `SyncSecret` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `immutable` _boolean_ | | | | +| `data` _object (keys:string, values:integer array)_ | | | | +| `stringData` _object (keys:string, values:string)_ | | | | +| `type` _[SecretType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#secrettype-v1-core)_ | | | | + +#### SyncSecretList + +SyncSecretList contains a list of SyncSecrets + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `SyncSecretList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[SyncSecret](#syncsecret) array_ | | | | + +#### Tenant + +Tenant is the Schema for the tenants API + +_Appears in:_ + +- [TenantList](#tenantlist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `Tenant` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[TenantSpec](#tenantspec)_ | | | | +| `status` _[TenantStatus](#tenantstatus)_ | | | | + +#### TenantList + +TenantList contains a list of Tenant + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `TenantList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Tenant](#tenant) array_ | | | | + +#### TenantSpec + +TenantSpec defines the desired state of Tenant + +_Appears in:_ + +- [Tenant](#tenant) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `propagatedAnnotations` _map[string]string_ | PropagatedAnnotations defines the list of annotations(key-value pairs) that will be propagated to the LoadBalancer service. Keep the `value` field empty in the key-value pair to allow any value.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | +| `propagateAllAnnotations` _boolean_ | PropagateAllAnnotations defines whether all annotations will be propagated to the LoadBalancer service. If set to true, PropagatedAnnotations will be ignored.
Tenant configuration has higher precedence than the value specified at the Config level. | | | +| `defaultAnnotations` _object (keys:[AnnotatedResource](#annotatedresource), values:[Annotations](#annotations))_ | DefaultAnnotations defines the list of annotations(key-value pairs) that will be set on the load balancing resources if not already present. A special key `all` can be used to apply the same
set of annotations to all resources.
Tenant configuration has higher precedence than the annotations specified at the Config level. | | | +| `loadBalancer` _[LoadBalancerSettings](#loadbalancersettings)_ | | | | +| `ingress` _[IngressSettings](#ingresssettings)_ | | | | +| `gatewayAPI` _[GatewayAPISettings](#gatewayapisettings)_ | | | | +| `dns` _[DNSSettings](#dnssettings)_ | | | | +| `certificates` _[CertificatesSettings](#certificatessettings)_ | | | | +| `tunnel` _[TenantTunnelSettings](#tenanttunnelsettings)_ | | | | +| `allowedDomains` _string array_ | List of allowed domains for the tenant. This is used to restrict the domains that can be used
for the tenant. If specified, applies on all the components such as Ingress, GatewayAPI, DNS, certificates, etc.
Examples:
- ["_.example.com"] -> this allows subdomains at the root level such as example.com and test.example.com but won't allow domains at one level above like test.test.example.com
- ["**.example.com"] -> this allows all subdomains of example.com such as test.dns.example.com and dns.example.com
- ["example.com"] -> this allows only example.com
- ["**"] or ["_"] -> this allows all domains
Note: "**" was added as a special case to allow any levels of subdomains that come before it. "*" works for only 1 level.
Default: value is ["**"] and all domains are allowed. | [**] | | + +#### TenantState + +TenantState is the Schema for the tenants API + +_Appears in:_ + +- [TenantStateList](#tenantstatelist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `TenantState` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[TenantStateSpec](#tenantstatespec)_ | | | | +| `status` _[TenantStateStatus](#tenantstatestatus)_ | | | | + +#### TenantStateList + +TenantStateList contains a list of TenantState + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `TenantStateList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[TenantState](#tenantstate) array_ | | | | + +#### TenantStateSpec + +TenantStateSpec defines the desired state of TenantState. + +_Appears in:_ + +- [TenantState](#tenantstate) + +#### TenantStateStatus + +TenantStateStatus defines the observed state of TenantState + +_Appears in:_ + +- [TenantState](#tenantstate) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `version` _[Version](#version)_ | | | | +| `lastUpdated` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#time-v1-meta)_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | | | | +| `tunnel` _[TunnelState](#tunnelstate)_ | | | | +| `loadBalancer` _[LoadBalancerState](#loadbalancerstate)_ | | | | +| `allowedDomains` _string array_ | | | | + +#### TenantStatus + +TenantStatus defines the observed state of Tenant + +_Appears in:_ + +- [Tenant](#tenant) + +#### TenantTunnelSettings + +TenantTunnelSettings defines the settings for the tunnel. + +_Appears in:_ + +- [TenantSpec](#tenantspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `limit` _integer_ | Limit is the maximum number of tunnels to create.
If a lower limit is set than the number of reources that exist, the limit will be disallow creation of new resources but will not delete existing resources. The reason behind this
is that it is not possible for KubeLB to know which resources are safe to remove. | | | +| `disable` _boolean_ | Disable is a flag that can be used to disable tunneling for a tenant. | | | + +#### Tunnel + +Tunnel is the Schema for the tunnels API + +_Appears in:_ + +- [TunnelList](#tunnellist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `Tunnel` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[TunnelSpec](#tunnelspec)_ | | | | +| `status` _[TunnelStatus](#tunnelstatus)_ | | | | + +#### TunnelList + +TunnelList contains a list of Tunnel + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `kubelb.k8c.io/v1alpha1` | | | +| `kind` _string_ | `TunnelList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Tunnel](#tunnel) array_ | | | | + +#### TunnelPhase + +_Underlying type:_ _string_ + +TunnelPhase represents the phase of tunnel + +_Appears in:_ + +- [TunnelStatus](#tunnelstatus) + +| Field | Description | +| --- | --- | +| `Pending` | TunnelPhasePending means the tunnel is being provisioned
| +| `Ready` | TunnelPhaseReady means the tunnel is ready to accept connections
| +| `Failed` | TunnelPhaseFailed means the tunnel provisioning failed
| +| `Terminating` | TunnelPhaseTerminating means the tunnel is being terminated
| + +#### TunnelResources + +TunnelResources contains references to resources created for the tunnel + +_Appears in:_ + +- [TunnelStatus](#tunnelstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `serviceName` _string_ | ServiceName is the name of the service created for this tunnel | | | +| `routeRef` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectreference-v1-core)_ | RouteRef is a reference to the route (HTTPRoute or Ingress) created for this tunnel | | | + +#### TunnelSettings + +TunnelSettings defines the global settings for Tunnel resources. + +_Appears in:_ + +- [ConfigSpec](#configspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `limit` _integer_ | Limit is the maximum number of tunnels to create.
If a lower limit is set than the number of reources that exist, the limit will be disallow creation of new resources but will not delete existing resources. The reason behind this
is that it is not possible for KubeLB to know which resources are safe to remove. | | | +| `connectionManagerURL` _string_ | ConnectionManagerURL is the URL of the connection manager service that handles tunnel connections.
This is required if tunneling is enabled.
For example: "" | | | +| `disable` _boolean_ | Disable indicates whether tunneling feature should be disabled. | | | + +#### TunnelSpec + +TunnelSpec defines the desired state of Tunnel + +_Appears in:_ + +- [Tunnel](#tunnel) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `hostname` _string_ | Hostname is the hostname of the tunnel. If not specified, the hostname will be generated by KubeLB. | | | + +#### TunnelState + +_Appears in:_ + +- [TenantStateStatus](#tenantstatestatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `disable` _boolean_ | | | | +| `limit` _integer_ | | | | +| `connectionManagerURL` _string_ | | | | + +#### TunnelStatus + +TunnelStatus defines the observed state of Tunnel + +_Appears in:_ + +- [Tunnel](#tunnel) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `hostname` _string_ | Hostname contains the actual hostname assigned to the tunnel | | | +| `url` _string_ | URL contains the full URL to access the tunnel | | | +| `connectionManagerURL` _string_ | ConnectionManagerURL contains the URL that clients should use to establish tunnel connections | | | +| `phase` _[TunnelPhase](#tunnelphase)_ | Phase represents the current phase of the tunnel | | | +| `resources` _[TunnelResources](#tunnelresources)_ | Resources contains references to the resources created for this tunnel | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#condition-v1-meta) array_ | Conditions represents the current conditions of the tunnel | | | + +#### UpstreamService + +UpstreamService is a wrapper over the corev1.Service object. +This is required as kubebuilder:validation:EmbeddedResource marker adds the x-kubernetes-embedded-resource to the array instead of +the elements within it. Which results in a broken CRD; validation error. Without this marker, the embedded resource is not properly +serialized to the CRD. + +_Appears in:_ + +- [KubernetesSource](#kubernetessource) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[ServiceSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicespec-v1-core)_ | Spec defines the behavior of a service.
| | | +| `status` _[ServiceStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#servicestatus-v1-core)_ | Most recently observed status of the service.
Populated by the system.
Read-only.
More info: | | | + +#### Version + +_Appears in:_ + +- [TenantStateStatus](#tenantstatestatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `gitVersion` _string_ | | | | +| `gitCommit` _string_ | | | | +| `buildDate` _string_ | | | | +| `edition` _string_ | | | | diff --git a/content/kubelb/v1.2/release-notes/_index.en.md b/content/kubelb/v1.2/release-notes/_index.en.md new file mode 100644 index 000000000..f91bf57e0 --- /dev/null +++ b/content/kubelb/v1.2/release-notes/_index.en.md @@ -0,0 +1,94 @@ ++++ +title = "Release Notes" +date = 2024-03-15T00:00:00+01:00 +weight = 60 ++++ + +## Kubermatic KubeLB v1.2 + +- [v1.2.0](#v120) + - [Community Edition](#community-edition) + - [Enterprise Edition](#enterprise-edition) + +**Full Changelog**: + +## v1.2.0 + +**GitHub release: [v1.2.0](https://github.com/kubermatic/kubelb/releases/tag/v1.2.0)** + +### Highlights + +#### Community Edition(CE) + +- Support for Load Balancer Hostname has been introduced. This allows users to specify a hostname for the load balancer. +- Default Annotations can now be configured for services, Ingress, and Gateway API resources in the management cluster. +- KubeLB Addons chart has been introduced to simplify the installation of the required components for the management cluster. + - Tools such as ingress-nginx, external-dns, cert-manager, etc. can be installed through a single KubeLB management chart through this change. + - KubeLB Addons chart will ship versions of components that we are actively testing and supporting. +- TenantState API has been introduced to share tenant status with the KubeLB consumers i.e. through CCM or CLI. This simplifies sharing details such as load balancer limit, allowed domains, wildcard domain, etc. with the consumers. +- KubeLB CCM can now install Gateway API CRDs by itself. Hence, removing the need to install them manually. +- KubeLB now maintains the required RBAC attached to the kubeconfig for KKP integration. `kkpintegration.rbac: true` can be used to manage the RBAC using KubeLB helm chart. + +#### Enterprise Edition(EE) + +- Tunneling support has been introduced in the Management Cluster. The server side and control plane components for tunneling are shipped with Enterprise Edition of KubeLB. +- AI and MCP Gateway Integration has been introduced. As running your AI, MCP, and Agent2Agent toolings alongisde your data plane is a common use case, we are now leveraging [kgateway](https://kgateway.dev/) to solidify the integration with AI, MCP, and Agent2Agent toolings. + +### Community Edition + +#### API Changes + +- Enterprise Edition APIs for KubeLB are now available at k8c.io/kubelb/api/ee/kubelb.k8c.io/v1alpha1 ([#101](https://github.com/kubermatic/kubelb/pull/101)) + +#### Features + +- Support for adding default annotations to the load balancing resources ([#78](https://github.com/kubermatic/kubelb/pull/78)) +- KubeLB now maintains the required RBAC attached to the kubeconfig for KKP integration. `kkpintegration.rbac: true` can be used to manage the RBAC using KubeLB helm chart ([#79](https://github.com/kubermatic/kubelb/pull/79)) +- Envoy: no_traffic_interval for upstream endpoints health check has been reduced to 5s from the default of 60s. Envoy will start sending health checks to a new cluster after 5s now ([#106](https://github.com/kubermatic/kubelb/pull/106)) +- KubeLB CCM will now automatically install Kubernetes Gateway API CRDs using the following flags: + - --install-gateway-api-crds: That installs and manages the Gateway API CRDs using gateway crd controller. + - --gateway-api-crds-channel: That specifies the channel for Gateway API CRDs, with possible values of 'standard' or 'experimental'. ([#110](https://github.com/kubermatic/kubelb/pull/110)) +- Improve validations for cluster-name in CCM ([#111](https://github.com/kubermatic/kubelb/pull/111)) +- Gracefully handle nodes that don't have an IP address assigned while computing Addresses ([#111](https://github.com/kubermatic/kubelb/pull/111)) +- LoadBalancer resources can now be directly assigned a hostname/URL ([#113](https://github.com/kubermatic/kubelb/pull/113)) +- TenantState API has been introduced to share tenant status with the KubeLB consumers i.e. through CCM or CLI ([#117](https://github.com/kubermatic/kubelb/pull/117)) +- Dedicated addons chart has been introduced for KubeLB at `oci://quay.io/kubermatic/helm-charts/kubelb-addons`. ([#122](https://github.com/kubermatic/kubelb/pull/122)) +- KubeLB is now built using Go 1.25 ([#126](https://github.com/kubermatic/kubelb/pull/126)) +- Update kube-rbac-proxy to v0.19.1 ([#128](https://github.com/kubermatic/kubelb/pull/128)) +- Add metallb to kubelb-addons ([#130](https://github.com/kubermatic/kubelb/pull/130)) + +#### Design + +- Restructure repository and make Enterprise Edition APIs available at k8c.io/kubelb/api/ee/kubelb.k8c.io/v1alpha1 ([#101](https://github.com/kubermatic/kubelb/pull/101)) + +#### Bug or Regression + +- Fix annotation handling for services ([#82](https://github.com/kubermatic/kubelb/pull/82)) +- Don't modify IngressClassName if it's not set in the configuration ([#88](https://github.com/kubermatic/kubelb/pull/88)) +- Fix an issue with KubeLB not respecting the already allocated NodePort in the management cluster for load balancers with large amount of open Nodeports ([#91](https://github.com/kubermatic/kubelb/pull/91)) +- Before removing RBAC for tenant, ensure that all routes, load balancers, and syncsecrets are cleaned up ([#92](https://github.com/kubermatic/kubelb/pull/92)) +- Update health checks for envoy upstream endpoint: + - UDP health checking has been removed due to limited supported from Envoy + - TCP health checking has been updated to perform a connect-only health check ([#103](https://github.com/kubermatic/kubelb/pull/103)) +- Use arbitrary ports as target port for load balancer services ([#119](https://github.com/kubermatic/kubelb/pull/119)) + +#### Other (Cleanup, Flake, or Chore) + +- Upgrade to Go 1.24.1 ([#87](https://github.com/kubermatic/kubelb/pull/87)) +- Upgrade to EnvoyProxy v1.33.1 ([#87](https://github.com/kubermatic/kubelb/pull/87)) +- Sort IPs in `addresses` Endpoint to reduce updates ([#93](https://github.com/kubermatic/kubelb/pull/93)) +- KubeLB is now built using Go 1.24.6 ([#118](https://github.com/kubermatic/kubelb/pull/118)) +- Add additional columns for TenantState and Tunnel CRDs ([#124](https://github.com/kubermatic/kubelb/pull/124)) + +**Full Changelog**: + +### Enterprise Edition + +**Enterprise Edition includes everything from Community Edition and more. The release notes below are for changes specific to just the Enterprise Edition.** + +#### EE Features + +- Default annotations support for Alpha/Beta Gateway API resources like TLSRoute, TCPRoute, and UDPRoute. +- More fine-grained load balancer hostname support. +- Tunneling support has been introduced in the Management Cluster. With the newly introduced KubeLB CLI, users can now expose workloads/applications running in their local workstations or VMs in closed networks to the outside world. Since all the traffic is routed through the KubeLB management cluster, security, observability, and other features are available and applied by default based on your configuration. +- AI and MCP Gateway Integration has been introduced. As running your AI, MCP, and Agent2Agent toolings alongisde your data plane is a common use case, we are now leveraging [kgateway](https://kgateway.dev/) to solidify the integration with AI, MCP, and Agent2Agent toolings. diff --git a/content/kubelb/v1.2/support-policy/_index.en.md b/content/kubelb/v1.2/support-policy/_index.en.md new file mode 100644 index 000000000..d2604c8ed --- /dev/null +++ b/content/kubelb/v1.2/support-policy/_index.en.md @@ -0,0 +1,27 @@ ++++ +title = "Support Policy" +date = 2024-03-15T00:00:00+01:00 +weight = 40 ++++ + +KubeLB has an open-source community edition and an enterprise edition. The community edition is free to use and is licensed under the [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0). + +The enterprise edition, backed by an active subscription for [KubeLB](https://www.kubermatic.com/products/kubelb/), provides comprehensive enterprise-grade support, guaranteed SLAs, priority issue resolution, and direct access to our expert engineering team. Our enterprise customers benefit from personalized technical guidance, architectural consulting, and our commitment to their production success. + +## Enterprise Edition Support + +As a default, our support covers the following: + +- Debugging for issues related to KubeLB +- Enhancing documentation +- Fixing bugs that block the usage of the platform + +What is not covered: + +- Issues related to the underlying Kubernetes cluster and infrastructure. +- Custom configurations for the underlying product suite including ingress-nginx, Envoy Gateway, External DNS, and Cert Manager. KubeLB only provides you with sane default configurations and an integration for those products. +- Issues related to misconfigured Ingress or Gateway API resources by the KubeLB users(tenant clusters). For example, misconfigured TLS certificates or missing hostnames in the Ingress or HTTPRoute resources. + +{{% notice info %}} +**Discover our enterprise-grade support offerings and customized solutions for your organization's needs. [Contact our solutions team](mailto:sales@kubermatic.com) to explore how we can help ensure your success.** +{{% /notice %}} diff --git a/content/kubelb/v1.2/tutorials/_index.en.md b/content/kubelb/v1.2/tutorials/_index.en.md new file mode 100644 index 000000000..d59eef078 --- /dev/null +++ b/content/kubelb/v1.2/tutorials/_index.en.md @@ -0,0 +1,18 @@ ++++ +title = "Guides" +linkTitle = "Tutorials" +date = 2023-10-27T10:07:15+02:00 +description = "Get familiar with KubeLB and read step-by-step instructions to handle important scenarios" +weight = 20 +chapter = true ++++ + + +# Guides + +Get familiar with KubeLB and read step-by-step instructions to handle important scenarios + +## Table of Content + +{{% children depth=5 %}} +{{% /children %}} diff --git a/content/kubelb/v1.2/tutorials/aigateway/_index.en.md b/content/kubelb/v1.2/tutorials/aigateway/_index.en.md new file mode 100644 index 000000000..148cb8985 --- /dev/null +++ b/content/kubelb/v1.2/tutorials/aigateway/_index.en.md @@ -0,0 +1,239 @@ ++++ +title = "AI & MCP Gateway" +linkTitle = "AI & MCP Gateway" +date = 2023-10-27T10:07:15+02:00 +weight = 7 ++++ + +This tutorial will guide you through setting up an AI and MCP Gateway using KubeLB with KGateway to securely manage Large Language Model (LLM) requests and MCP tool servers. + +## Overview + +KubeLB leverages [KGateway](https://kgateway.dev/), a CNCF Sandbox project (accepted March 2025), to provide advanced AI Gateway capabilities. KGateway is built on Envoy and implements the Kubernetes Gateway API specification, offering: + +- **AI Workload Protection**: Secure applications, models, and data from inappropriate access +- **LLM Traffic Management**: Intelligent routing to LLM providers with load balancing based on model metrics +- **Prompt Engineering**: System-level prompt enrichment and guards +- **Multi-Provider Support**: Works with OpenAI, Anthropic, Google Gemini, Mistral, and local models like Ollama +- **Model Context Protocol (MCP) Gateway**: Federates MCP tool servers into a single, secure endpoint +- **Advanced Security**: Authentication, authorization, rate limiting tailored for AI workloads + +### Key Features + +#### AI-Specific Capabilities + +- **Prompt Guards**: Protect against prompt injection and data leakage +- **Model Failover**: Automatic failover between LLM providers +- **Function Calling**: Support for LLM function/tool calling +- **AI Observability**: Detailed metrics and tracing for AI requests +- **Semantic Caching**: Cache responses based on semantic similarity +- **Token-Based Rate Limiting**: Control costs with token consumption limits + +#### Gateway API Inference Extension + +KGateway supports the Gateway API Inference Extension which introduces: + +- `InferenceModel` CRD: Define LLM models and their endpoints +- `InferencePool` CRD: Group models for load balancing and failover +- Intelligent endpoint picking based on model performance metrics + +## Setup + +### Step 1: Enable KGateway AI Extension + +Update values.yaml for KubeLB manager chart to enable KGateway with AI capabilities: + +```yaml +kubelb: + enableGatewayAPI: true + +kubelb-addons: + enabled: true + + kgateway: + enabled: true + gateway: + aiExtension: + enabled: true +``` + +### Step 2: Create Gateway Specific Resources + +1. Deploy a Gateway resource to handle AI traffic: + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: ai-gateway + namespace: kubelb + labels: + app: ai-gateway +spec: + gatewayClassName: kgateway + infrastructure: + parametersRef: + name: ai-gateway + group: gateway.kgateway.dev + kind: GatewayParameters + listeners: + - protocol: HTTP + port: 8080 + name: http + allowedRoutes: + namespaces: + from: All +``` + +2. Deploy a GatewayParameters resource to enable the AI extension: + +```yaml +apiVersion: gateway.kgateway.dev/v1alpha1 +kind: GatewayParameters +metadata: + name: ai-gateway + namespace: kubelb + labels: + app: ai-gateway +spec: + kube: + aiExtension: + enabled: true + ports: + - name: ai-monitoring + containerPort: 9092 + image: + registry: cr.kgateway.dev/kgateway-dev + repository: kgateway-ai-extension + tag: v2.1.0-main + service: + type: LoadBalancer +``` + +## OpenAI Integration Example + +This example shows how to set up secure access to OpenAI through the AI Gateway. + +### Step 1: Store OpenAI API Key + +Create a Kubernetes secret with your OpenAI API key: + +```bash +export OPENAI_API_KEY="sk-..." + +kubectl create secret generic openai-secret \ + --from-literal=Authorization="Bearer ${OPENAI_API_KEY}" \ + --namespace kubelb +``` + +### Step 2: Create Backend Configuration + +Define an AI Backend that uses the secret for authentication: + +```yaml +apiVersion: gateway.kgateway.dev/v1alpha1 +kind: Backend +metadata: + name: openai + namespace: kubelb +spec: + type: AI + ai: + llm: + provider: + openai: + authToken: + kind: SecretRef + secretRef: + name: openai-secret + namespace: kubelb + model: "gpt-3.5-turbo" +``` + +### Step 3: Create HTTPRoute + +Route traffic to the OpenAI backend: + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: openai-route + namespace: kubelb +spec: + parentRefs: + - name: ai-gateway + namespace: kubelb + rules: + - matches: + - path: + type: PathPrefix + value: /openai + filters: + - type: URLRewrite + urlRewrite: + path: + type: ReplaceFullPath + replaceFullPath: /v1/chat/completions + backendRefs: + - name: openai + namespace: kubelb + group: gateway.kgateway.dev + kind: Backend +``` + +### Step 4: Test the Configuration + +Get the Gateway's external IP: + +```bash +kubectl get gateway ai-gateway -n kubelb +export GATEWAY_IP=$(kubectl get svc -n kubelb ai-gateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +``` + +Send a test request: + +```bash +curl -X POST "http://${gateway_ip}/openai" \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + {"role": "user", "content": "Hello, how are you?"} + ] + }' +``` + +## Rate Limiting (Optional) + +Add rate limiting to control costs and prevent abuse: + +```yaml +apiVersion: gateway.kgateway.dev/v1alpha1 +kind: RateLimitPolicy +metadata: + name: openai-ratelimit + namespace: kubelb +spec: + targetRef: + kind: HTTPRoute + name: openai-route + namespace: kubelb + limits: + - requests: 100 + unit: hour +``` + +## MCP Gateway + +Similar to the AI Gateway, you can also use agentgateway to can connect to one or multiple MCP servers in any environment. + +Please follow this guide to setup the MCP Gateway: [MCP Gateway](https://kgateway.dev/docs/agentgateway/mcp/) + +## Further Reading + +For advanced configurations and features: + +- [KGateway AI Setup Documentation](https://kgateway.dev/docs/ai/setup/) +- [KGateway Authentication Guide](https://kgateway.dev/docs/ai/auth/) +- [Prompt Guards and Security](https://kgateway.dev/docs/ai/prompt-guards/) +- [Multiple LLM Providers](https://kgateway.dev/docs/ai/cloud-providers/) diff --git a/content/kubelb/v1.2/tutorials/bgp/_index.en.md b/content/kubelb/v1.2/tutorials/bgp/_index.en.md new file mode 100644 index 000000000..0852763cc --- /dev/null +++ b/content/kubelb/v1.2/tutorials/bgp/_index.en.md @@ -0,0 +1,53 @@ ++++ +title = "Layer 4 Load balancing with BGP" +linkTitle = "BGP Support" +date = 2025-08-27T10:07:15+02:00 +weight = 6 ++++ + +In Management Cluster, KubeLB offloads the provisioning of the the actual load balancers to the load balancing appliance that is being used. This can be the CCM in case of a cloud provider or a self-managed solution like [MetalLB](https://metallb.universe.tf), [Cilium Load Balancer](https://cilium.io/use-cases/load-balancer/) or any other solution. + +Due to this generic nature, KubeLB can be used with any load balancing appliance and the underlying route advertisement protocol such as BGP, OSPF, L2, are all supported. This tutorial will focus on [BGP](https://networklessons.com/bgp/introduction-to-bgp) but it assumes that the underlying infrastructure of your Kubernetes cluster is already configured to support BGP. + +## Setup + +We'll use [MetalLB](https://metallb.universe.tf) with BGP for this tutorial. Update the values.yaml file for KubeLB manager to enable metallb: + +```yaml +kubelb-addons: + metallb: + enabled: true +``` + +A minimal configuration for MetalLB for demonstration purposes is as follows: + +```yaml +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: extern + namespace: metallb-system +spec: + addresses: + - 10.10.255.200-10.10.255.250 + autoAssign: true + avoidBuggyIPs: true +--- +apiVersion: metallb.io/v1beta1 +kind: BGPAdvertisement +metadata: + name: extern + namespace: metallb-system +spec: + ipAddressPools: + - extern +``` + +This configures an address pool `extern` with an IP range from 10.10.255.200 to 10.10.255.250. This IP range can be used by the tenant clusters to allocate IP addresses for the `LoadBalancer` service type. + +Afterwards you can follow the [Layer 4 Load balancing](../loadbalancer#usage-with-kubelb) tutorial to create a `LoadBalancer` service in the tenant cluster. + +### Further reading + +- [MetalLB BGP Configuration](https://metallb.universe.tf/configuration/_advanced_bgp_configuration/) +- [MetalLB BGP Usage](https://metallb.universe.tf/usage/#bgp) diff --git a/content/kubelb/v1.2/tutorials/config/_index.en.md b/content/kubelb/v1.2/tutorials/config/_index.en.md new file mode 100644 index 000000000..9ee2a661f --- /dev/null +++ b/content/kubelb/v1.2/tutorials/config/_index.en.md @@ -0,0 +1,238 @@ ++++ +title = "KubeLB Management Cluster Configuration" +linkTitle = "Management Configuration" +date = 2023-10-27T10:07:15+02:00 +weight = 1 ++++ + +We have a dedicated CRD `config` that can be used to manage configuration for KubeLB manager in management cluster. The following is an example of a `config` CRD: + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Config +metadata: + name: default + namespace: kubelb +spec: + envoyProxy: + replicas: 3 + topology: shared +``` + +Users can skip creation of **Config** object via helm by applying the following modification to the **values.yaml** file for the helm chart: + +```yaml +kubelb: + skipConfigGeneration: true +``` + +This will de-couple the `config` from the helm chart and users can manage it separately. This is recommended since the coupling of `config` CRD with helm chart makes it dependent on the helm chart and the admin would need to upgrade the helm chart to update the `config` CRD. + +**NOTE: The Config CR named `default` is mandatory for KubeLB manager to work.** + +## Configuration Options + +{{% notice note %}} +Tenant configuration has a higher precedence than the global configuration and overrides the global configuration values for the tenant if the fields are available in both the tenant and global configuration. +{{% /notice %}} + +### Essential configurations + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Config +metadata: + name: default + namespace: kubelb +spec: + ingress: + class: "nginx" + gatewayAPI: + class: "eg" + # Enterprise Edition only + certificates: + defaultClusterIssuer: "letsencrypt-prod" +``` + +These configurations are available at a global level and also at a tenant level. The tenant level configurations will override the global configurations for that tenant. It's important to configure these options at one of those levels since they perform essential functions for KubeLB. + +1. **Ingress.Class**: The class to use for Ingress resources for tenants in management cluster. +2. **GatewayAPI.Class**: The class to use for Gateway API resources for tenants in management cluster. +3. **Certificates.DefaultClusterIssuer(EE)**: The default cluster issuer to use for certificate management. + +### Annotation Settings + +KubeLB can propagate annotations from services, ingresses, gateway API objects etc. in the tenant cluster to the corresponding LoadBalancer or Route resources in the management cluster. This is useful for setting annotations that are required by the cloud provider to configure the LoadBalancers. For example, the `service.beta.kubernetes.io/aws-load-balancer-internal` annotation is used to create an internal LoadBalancer in AWS. + +Annotations are not propagated by default since tenants can make unwanted changes to the LoadBalancer configuration. Since each tenant is treated as a separate entity, the KubeLB manager cluster needs to be configured to allow the propagation of specific annotations. + +The annotation configuration set on the tenant level will override the global annotation configuration for that tenant. + +#### 1. Propagate all annotations + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Config +metadata: + name: default + namespace: kubelb +spec: + propagateAllAnnotations: true +``` + +#### 2. Propagate specific annotations + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Config +metadata: + name: default + namespace: kubelb +spec: + propagatedAnnotations: + # If the key is empty, any value can be configured for propagation. + metalb.universe.tf/allow-shared-ip: "" + # Since the value is explicitly provided, only this value will be allowed for propagation. + metallb.universe.tf/loadBalancerIPs: "8.8.8.8" +``` + +#### 3. Default annotations + +Default annotations for resources that KubeLB generates in the management cluster can also be configured. This is useful for setting annotations that are required by the cloud provider to configure the LoadBalancers. For example, the `service.beta.kubernetes.io/aws-load-balancer-internal` annotation is used to create an internal LoadBalancer in AWS. + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Config +metadata: + name: default + namespace: kubelb +spec: + defaultAnnotations: + service: + service.beta.kubernetes.io/aws-load-balancer-internal: true + ingress: + kubernetes.io/ingress.class: "nginx" + gatewayapi: + kubernetes.io/ingress.class: "eg" + # Will be applied to all resources such as Ingress, Gateway API resources, services, etc. + all: + internal: true +``` + +### Configure Envoy Proxy + +Sample configuration, inflated with values for demonstration purposes only. All of the values are optional and have sane defaults. For more details check [CRD References]({{< relref "../../references">}}) + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Config +metadata: + name: default + namespace: kubelb +spec: + envoyProxy: + replicas: 3 + # Immutable, cannot be changed after configuration. + topology: shared + useDaemonset: false + singlePodPerNode: false + nodeSelector: + kubernetes.io/os: linux + tolerations: + - effect: NoSchedule + operator: Exists + # Can be used to configure requests/limits for envoy proxy + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + # Configure affinity for envoy proxy + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux +``` + +### Configure LoadBalancer Options + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Config +metadata: + name: default + namespace: kubelb +spec: + loadBalancer: + # The class to use for LB service in the management cluster + class: "metallb.universe.tf/metallb" + disable: false + # Enterprise Edition Only + limit: 5 +``` + +### Configure Ingress Options + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Config +metadata: + name: default + namespace: kubelb +spec: + ingress: + # The class to use for Ingress resources in the management cluster + class: "nginx" + disable: false +``` + +### Configure Gateway API Options + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Config +metadata: + name: default + namespace: kubelb +spec: + gatewayAPI: + class: "eg" + disable: false + defaultGateway: + name: "default" + namespace: "envoy-gateway" + # Enterprise Edition Only (all the below options are only available in Enterprise Edition) + gateway: + limits: 10 + disableHTTPRoute: false + disableGRPCRoute: false + disableTCPRoute: false + disableUDPRoute: false + disableTLSRoute: false +``` + +### Configure DNS Options + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Config +metadata: + name: default + namespace: kubelb +spec: + dns: + # The wildcard domain to use for auto-generated hostnames for Load balancers + # In EE Edition, this is also use to generated dynamic hostnames for tunnels. + wildcardDomain: "*.apps.example.com" + # Allow tenants to specify explicit hostnames for Load balancers and tunnels(in EE Edition) + allowExplicitHostnames: false +``` + +**For more details and options, please go through [CRD References]({{< relref "../../references">}})** diff --git a/content/kubelb/v1.2/tutorials/gatewayapi/_index.en.md b/content/kubelb/v1.2/tutorials/gatewayapi/_index.en.md new file mode 100644 index 000000000..9bd0d2c4b --- /dev/null +++ b/content/kubelb/v1.2/tutorials/gatewayapi/_index.en.md @@ -0,0 +1,195 @@ ++++ +title = "Gateway API" +linkTitle = "Gateway API" +date = 2023-10-27T10:07:15+02:00 +weight = 4 ++++ + +This tutorial will guide you through the process of setting up Layer 7 load balancing with Gateway API. + +Gateway API targets three personas: + +1. Platform Provider: The Platform Provider is responsible for the overall environment that the cluster runs in, i.e. the cloud provider. The Platform Provider will interact with GatewayClass resources. +2. Platform Operator: The Platform Operator is responsible for overall cluster administration. They manage policies, network access, application permissions and will interact with Gateway resources. +3. Service Operator: The Service Operator is responsible for defining application configuration and service composition. They will interact with HTTPRoute and TLSRoute resources and other typical Kubernetes resources. + +Further reading: + +In KubeLB, we treat the admins of management cluster as the Platform provider. Hence, they are responsible for creating the `GatewayClass` resource. Tenants are the Service Operators. For Platform Operator, this role could vary based on your configurations for the management cluster. In Enterprise edition, users can set the limit of Gateways to 0 to shift the role of "Platform Operator" to the "Platform Provider". In other case, by default, the Platform Operator role is assigned to the tenants. + +### Setup + +Kubermatic's default recommendation is to use Gateway API and use [Envoy Gateway](https://gateway.envoyproxy.io/) as the Gateway API implementation. Install Envoy Gateway by following this [guide](https://gateway.envoyproxy.io/docs/install/install-helm/) or any other Gateway API implementation of your choice. + +Update values.yaml for KubeLB manager chart to enable the Gateway API addon. + +```yaml +kubelb: + enableGatewayAPI: true + +## Addon configuration +kubelb-addons: + enabled: true + # Create the GatewayClass resource in the management cluster. + gatewayClass: + create: true + + envoy-gateway: + enabled: true +``` + +#### KubeLB Manager Configuration + +Update the KubeLB Manager configuration to use the Gateway Class name as `eg` either at a Global or Tenant level: + +#### Global + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Config +metadata: + name: default + namespace: kubelb +spec: + gatewayAPI: + # Name of the Gateway Class. + class: "eg" +``` + +#### Tenant + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Tenant +metadata: + name: shroud +spec: + gatewayAPI: + # Name of the Gateway Class. + class: "eg" +``` + +**Leave it empty if you named your Gateway Class as `kubelb`** + +### Usage with KubeLB + +#### Gateway resource + +Once you have created the GatewayClass, the next resource that is required is the Gateway. For CE version, the Gateway needs to be created in the tenant cluster. However, in Enterprise edition, the Gateway can exist in the management cluster or the tenant cluster. In Enterprise edition, users can set the limit of Gateways to 0 to shift the role of "Platform Operator" to the "Platform Provider". Otherwise, by default, the Platform Operator role is assigned to the tenants. + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: kubelb +spec: + gatewayClassName: kubelb + listeners: + - name: http + protocol: HTTP + port: 80 +``` + +It is recommended to create the Gateway in tenant cluster directly since the Gateway Object needs to be modified regularly to attach new routes etc. In cases where the Gateway exists in management cluster, set the `use-gateway-class` argument for CCM to false. + +{{% notice warning %}} +Community Edition only one gateway is allowed per tenant and that has to be named `kubelb`. +{{% /notice %}} + +#### HTTPRoute resource + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: backend +--- +apiVersion: v1 +kind: Service +metadata: + name: backend + labels: + app: backend + service: backend +spec: + ports: + - name: http + port: 3000 + targetPort: 3000 + selector: + app: backend +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: backend +spec: + replicas: 1 + selector: + matchLabels: + app: backend + version: v1 + template: + metadata: + labels: + app: backend + version: v1 + spec: + serviceAccountName: backend + containers: + - image: gcr.io/k8s-staging-gateway-api/echo-basic:v20231214-v1.0.0-140-gf544a46e + imagePullPolicy: IfNotPresent + name: backend + ports: + - containerPort: 3000 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +--- +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: backend +spec: + parentRefs: + - name: kubelb + hostnames: + - "www.example.com" + rules: + - backendRefs: + - group: "" + kind: Service + name: backend + port: 3000 + weight: 1 + matches: + - path: + type: PathPrefix + value: / +``` + +### Support + +The following resources are supported in CE and EE version: + +- Community Edition: + - HTTPRoute + - GRPCRoute +- Enterprise Edition: + - HTTPRoute + - GRPCRoute + - TCPRoute + - UDPRoute + - TLSRoute + +**For more details on how to use them and example, please refer to examples from [Envoy Gateway Documentation](https://gateway.envoyproxy.io/docs/tasks/)** + +### Limitations + +- ReferenceGrants, BackendTLSPolicy are not supported in KubeLB, yet. diff --git a/content/kubelb/v1.2/tutorials/ingress/_index.en.md b/content/kubelb/v1.2/tutorials/ingress/_index.en.md new file mode 100644 index 000000000..51a5d84b4 --- /dev/null +++ b/content/kubelb/v1.2/tutorials/ingress/_index.en.md @@ -0,0 +1,157 @@ ++++ +title = "Ingress" +linkTitle = "Ingress" +date = 2023-10-27T10:07:15+02:00 +weight = 5 ++++ + +This tutorial will guide you through the process of setting up Layer 7 load balancing with Ingress. + +Kubermatic's default recommendation is to use Gateway API and use [Envoy Gateway](https://gateway.envoyproxy.io/) as the Gateway API implementation. The features specific to Gateway API that will be built and consumed in KubeLB will be based on Envoy Gateway. Although this is not a strict binding and our consumers are free to use any Ingress or Gateway API implementation. The only limitation is that we only support native Kubernetes APIs i.e. Ingress and Gateway APIs. Provider specific APIs are not supported by KubeLB and will be completely ignored. + +Although KubeLB supports Ingress, we strongly encourage you to use Gateway API instead as Ingress has been [feature frozen](https://kubernetes.io/docs/concepts/services-networking/ingress/#:~:text=Note%3A-,Ingress%20is%20frozen,-.%20New%20features%20are) in Kubernetes and all new development is happening in the Gateway API space. The biggest advantage of Gateway API is that it is a more flexible, has extensible APIs and is **multi-tenant compliant** by default. Ingress doesn't support multi-tenancy. + +### Setup + +There are two modes in which Ingress can be setup in the management cluster: + +#### Per tenant(Recommended) + +Install your controller in the following way and scope it down to a specific namespace. This is the recommended approach as it allows you to have a single controller per tenant and the IP for ingress controller is not shared across tenants. + +Install the **Ingress Controller** in the tenant namespace. Replace **TENANT_NAME** with the name of the tenant. This has to be unique to ensure that any cluster level resource that is installed, doesn't create a conflict with existing resources. Following example is for a tenant named `shroud`: + +```sh +TENANT_NAME=shroud +TENANT_NAMESPACE=tenant-$TENANT_NAME + +helm upgrade --install ingress-nginx-${TENANT_NAME} ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --namespace ${TENANT_NAMESPACE} \ + --create-namespace \ + --set controller.scope.enabled=true \ + --set controller.scope.namespace=${TENANT_NAMESPACE} \ + --set controller.ingressClassResource.name=nginx-${TENANT_NAME} +``` + +For details: + +The next step would be to configure the tenant to use the new ingress controller: + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Tenant +metadata: + name: ${TENANT_NAME} +spec: + ingress: + class: "nginx-${TENANT_NAME}" +``` + +#### Shared + +Update values.yaml for KubeLB manager chart to enable the ingress-nginx addon. + +```yaml +kubelb-addons: + enabled: true + ingress-nginx: + enabled: true + controller: + service: + externalTrafficPolicy: Local +``` + +For details: + +### Usage with KubeLB + +In the tenant cluster, create the following resources: + +```yaml + +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: backend +spec: + ingressClassName: kubelb + rules: + # Replace with your domain + - host: "demo.example.com" + http: + paths: + - path: /backend + pathType: Exact + backend: + service: + name: backend + port: + number: 3000 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: backend +--- +apiVersion: v1 +kind: Service +metadata: + name: backend + labels: + app: backend + service: backend +spec: + ports: + - name: http + port: 3000 + targetPort: 3000 + selector: + app: backend + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: backend +spec: + replicas: 1 + selector: + matchLabels: + app: backend + version: v1 + template: + metadata: + labels: + app: backend + version: v1 + spec: + serviceAccountName: backend + containers: + - image: gcr.io/k8s-staging-gateway-api/echo-basic:v20231214-v1.0.0-140-gf544a46e + imagePullPolicy: IfNotPresent + name: backend + ports: + - containerPort: 3000 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +``` + +This will create an Ingress resource, a service and a deployment. KubeLB CCM will create a service of type `NodePort` against your service to ensure connectivity from the management cluster. Note that the class for ingress is `kubelb`, this is required for KubeLB to manage the Ingress resources. This behavior can be changed however by following the [Ingress configuration](#configurations). + +### Configurations + +KubeLB CCM helm chart can be used to further configure the CCM. Some essential options are: + +```yaml +kubelb: + # Set to false to watch all resources irrespective of the Ingress class. + useIngressClass: true +``` diff --git a/content/kubelb/v1.2/tutorials/kkp/_index.en.md b/content/kubelb/v1.2/tutorials/kkp/_index.en.md new file mode 100644 index 000000000..a2c4c4e04 --- /dev/null +++ b/content/kubelb/v1.2/tutorials/kkp/_index.en.md @@ -0,0 +1,48 @@ ++++ +title = "Kubermatic Kubernetes Platform Integration" +date = 2023-10-27T10:07:15+02:00 +weight = 9 +enterprise = true ++++ + +## Kubermatic Kubernetes Platform (Enterprise Edition Only) + +Starting with KKP v2.24, KubeLB Enterprise Edition is integrated into the Kubermatic Kubernetes Platform (KKP). This means that you can use KubeLB to provision load balancers for your KKP clusters. KKP will take care of configurations and deployments for you in the user cluster. Admins mainly need to create the KubeLB manager cluster and configure KKP to use it. + +## Prerequisites + +To configure KubeLB for KKP, you first need a KubeLB management cluster and its Kubeconfig. KubeLB requires access to certain resources like Tenants, LoadBalancer, Routes, etc. for the KKP integration to work. Instead of using admin Kubeconfig, we can use a Kubeconfig with the necessary RBAC permissions to access the required resources. + +1. Create a KubeLB management cluster with the following settings in the `values.yaml` file for the `kubelb-management` chart: + +```yaml +kkpintegration.rbac: true +``` + +2. Install the [kubectl-view-serviceaccount-kubeconfig](https://github.com/superbrothers/kubectl-view-serviceaccount-kubeconfig-plugin?tab=readme-ov-file#install-the-plugin) plugin. +3. Use the following command to generate a Kubeconfig for the service account `kubelb-manager` in the `kubelb` namespace: + +```bash +kubectl view-serviceaccount-kubeconfig kubelb-kkp -n kubelb --admin +``` + +4. Use the output of the previous command to create a file `kubelb-secret.yaml` with the required secret: + +```bash +kubectl create secret generic kubelb-management-cluster \ + --namespace=kubermatic \ + --from-literal=kubeconfig="$(kubectl view-serviceaccount-kubeconfig kubelb-kkp -n kubelb --admin)" \ + --dry-run=client -o yaml > kubelb-secret.yaml +``` + +5. Apply the file `kubelb-secret.yaml` to the `kubermatic` namespace in your KKP cluster. + +```bash +kubectl apply -f kubelb-secret.yaml +``` + +For further configuration, please refer to the [official KKP documentation](https://docs.kubermatic.com/kubermatic/latest/tutorials-howtos/kubelb). + +{{% notice note %}} +To use KubeLB enterprise offering, you need to have a valid license. Please [contact sales](mailto:sales@kubermatic.com) for more information. +{{% /notice %}} diff --git a/content/kubelb/v1.2/tutorials/loadbalancer/_index.en.md b/content/kubelb/v1.2/tutorials/loadbalancer/_index.en.md new file mode 100644 index 000000000..7a2d58459 --- /dev/null +++ b/content/kubelb/v1.2/tutorials/loadbalancer/_index.en.md @@ -0,0 +1,145 @@ ++++ +title = "Layer 4 Load balancing" +linkTitle = "Layer 4 Load balancing" +date = 2023-10-27T10:07:15+02:00 +weight = 3 ++++ + +This tutorial will guide you through the process of setting up a Layer 4 LoadBalancer using KubeLB. + +### Setup + +For layer 4 load balancing, either the kubernetes cluster should be on a cloud, using it's CCM, that supports the `LoadBalancer` service type or a self-managed solution like [MetalLB](https://metallb.universe.tf) should be installed. [This guide](https://metallb.universe.tf/installation/#installation-with-helm) can be followed to install and configure MetalLB on the management cluster. + +A minimal configuration for MetalLB for demonstration purposes is as follows: + +```yaml +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: extern + namespace: metallb-system +spec: + ipAddressPools: + - extern +--- +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: extern + namespace: metallb-system +spec: + addresses: + - 10.10.255.200-10.10.255.250 +``` + +This configures an address pool `extern` with an IP range from 10.10.255.200 to 10.10.255.250. This IP range can be used by the tenant clusters to allocate IP addresses for the `LoadBalancer` service type. + +Further reading: + +### Usage with KubeLB + +In the tenant cluster, create a service of type `LoadBalancer` and a deployment: + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: backend +--- +apiVersion: v1 +kind: Service +metadata: + name: backend + labels: + app: backend + service: backend +spec: + ports: + - name: http + port: 3000 + targetPort: 3000 + selector: + app: backend + type: LoadBalancer +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: backend +spec: + replicas: 1 + selector: + matchLabels: + app: backend + version: v1 + template: + metadata: + labels: + app: backend + version: v1 + spec: + serviceAccountName: backend + containers: + - image: gcr.io/k8s-staging-gateway-api/echo-basic:v20231214-v1.0.0-140-gf544a46e + imagePullPolicy: IfNotPresent + name: backend + ports: + - containerPort: 3000 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +``` + +This will create a service of type `LoadBalancer` and a deployment. KubeLB CCM will then propagate the request to management cluster, create a LoadBalancer CR there and retrieve the IP address allocated in the management cluster. Eventually the IP address will be assigned to the service in the tenant cluster. + +### Load Balancer Hostname Support + +KubeLB now supports assigning a hostname directly to the LoadBalancer resource. This is helpful for simpler configurations where no special routing rules are required for your Ingress or HTTPRoute resources. + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: LoadBalancer +metadata: + name: test-lb-hostname + namespace: tenant-dkrqjswsgk + annotations: + kubelb.k8c.io/request-wildcard-domain: "true" +spec: + # hostname: test.example.com + endpoints: + - addresses: + - ip: 91.99.112.254 + ports: + - name: 8080-tcp + port: 31632 + protocol: TCP + ports: + - name: 8080-tcp + port: 8080 + protocol: TCP + type: ClusterIP +``` + +This will create a LoadBalancer resource with the hostname `test.example.com` that can forward traffic to the IP address `91.99.112.254` on port `31632`. The `kubelb.k8c.io/request-wildcard-domain: "true"` annotation is used to request a wildcard domain for the hostname. Otherwise `spec.hostname` can also be used to explicitly set the hostname. + +Please take a look at [DNS Automation](../security/dns/#enable-dns-automation) for more details on how to configure DNS for the hostname. + +### Configurations + +KubeLB CCM helm chart can be used to further configure the CCM. Some essential options are: + +```yaml +kubelb: + # Use ExternalIP or InternalIP in the management cluster to route traffic back to the node ports of the tenant cluster. + nodeAddressType: ExternalIP + # This can be enabled to use KubeLB in a cluster where another load balancer provider is already running. When enabled, kubeLB will only manage + # services of type LoadBalancer that are using the `kubelb` LoadBalancerClass. + useLoadBalancerClass: false +``` diff --git a/content/kubelb/v1.2/tutorials/observability/_index.en.md b/content/kubelb/v1.2/tutorials/observability/_index.en.md new file mode 100644 index 000000000..743871860 --- /dev/null +++ b/content/kubelb/v1.2/tutorials/observability/_index.en.md @@ -0,0 +1,26 @@ ++++ +title = "Observability" +linkTitle = "Observability" +date = 2023-10-27T10:07:15+02:00 +weight = 8 ++++ + +KubeLB is a mission-critical component in the Kubernetes ecosystem, and its observability is crucial for ensuring the stability and reliability of the platform. This guide will walk you through the steps to enable and configure observability for KubeLB. + +KubeLB in itself doesn't restrict the platform providers to certain observability tools. Since we are well aware that different customers will have different Monitoring, logging, alerting, and tracing etc. stacks deployed which are based on their own requirements. Although it does offer Grafana dashboards that can be plugged into your existing monitoring stack. + +## Grafana Dashboard [WIP] + +This is work in progress and can be tracked against [Monitoring and Alerting](https://github.com/kubermatic/kubelb/issues/56) + +## Alerting and Recording rules [WIP] + +This is work in progress and can be tracked against [Monitoring and Alerting](https://github.com/kubermatic/kubelb/issues/56) + +## Recommended Tools + +Our suggested tools for observability are: + +1. [Gateway Observability](https://gateway.envoyproxy.io/docs/tasks/observability/gateway-observability/): This is the default MLA stack provided by Envoy Gateway. Since it's designed specifically for Envoy Gateway and Gateway APIs, it offers a comprehensive set of observability features tailored to the needs of Envoy Gateway users. +2. [Hubble UI](https://docs.cilium.io/en/stable/gettingstarted/hubble_setup/): When using Cilium as the CNI, Hubble UI provides a user-friendly interface for visualizing and analyzing network traffic in your Kubernetes cluster. +3. [Kiali](https://kiali.io/docs/installation/installation-guide/): When using Istio as the service mesh, Kiali is a powerful tool for visualizing and analyzing the traffic flow within your Istio-based applications. diff --git a/content/kubelb/v1.2/tutorials/security/_index.en.md b/content/kubelb/v1.2/tutorials/security/_index.en.md new file mode 100644 index 000000000..310780651 --- /dev/null +++ b/content/kubelb/v1.2/tutorials/security/_index.en.md @@ -0,0 +1,13 @@ ++++ +title = "Security" +linkTitle = "Security" +date = 2023-10-27T10:07:15+02:00 +weight = 7 ++++ + +This is a guide towards managing DNS, TLS, and other security-related configurations in KubeLB. + +## Table of Content + +{{% children depth=5 %}} +{{% /children %}} diff --git a/content/kubelb/v1.2/tutorials/security/cert-management/_index.en.md b/content/kubelb/v1.2/tutorials/security/cert-management/_index.en.md new file mode 100644 index 000000000..9d28a7b16 --- /dev/null +++ b/content/kubelb/v1.2/tutorials/security/cert-management/_index.en.md @@ -0,0 +1,261 @@ ++++ +title = "Certificate Management" +linkTitle = "Certificate Management" +date = 2023-10-27T10:07:15+02:00 +weight = 1 +enterprise = true ++++ + +## Setup + +### Install Cert-Manager + +Install [cert-manager](https://cert-manager.io) to manage certificates for your tenants. + +These are minimal examples to get you started quickly. Please refer to the documentation of [cert-manager](https://cert-manager.io/docs/installation/helm/) for further details and configurations. + +{{< tabs name="cert-manager" >}} +{{% tab name="Gateway API" %}} + +Update values.yaml for KubeLB manager chart to enable the cert-manager addon. + +```yaml +kubelb-addons: + enabled: true + cert-manager: + enabled: true + crds: + enabled: true + config: + apiVersion: controller.config.cert-manager.io/v1alpha1 + kind: ControllerConfiguration + enableGatewayAPI: true +``` + +{{% /tab %}} +{{% tab name="Ingress" %}} + +Update values.yaml for KubeLB manager chart to enable the cert-manager addon. + +```yaml +kubelb-addons: + enabled: true + cert-manager: + enabled: true + crds: + enabled: true + config: + apiVersion: controller.config.cert-manager.io/v1alpha1 + kind: ControllerConfiguration + enableGatewayAPI: false +``` + +{{% /tab %}} +{{< /tabs >}} + +### Configure Tenant + +Certificate management can be enabled/disabled at global or tenant level. For automation purposes, you can configure allowed domains and default issuer for the certificates at the tenant level. + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Tenant +metadata: + name: shroud +spec: + # These domains are allowed to be used for Ingress, Gateway API, DNS, and certs. + allowedDomains: + - "kube.example.com" + - "*.kube.example.com" + - "*.shroud.example.com" + certificates: + # can also be configured in the `Config` resource at a global level. + # Default issuer to use if `kubelb.k8c.io/manage-certificates` annotation is added to the cluster. + defaultClusterIssuer: "letsencrypt-staging" + # If not empty, only the domains specified here will have automation for Certificates. Everything else will be ignored. + allowedDomains: + - "*.shroud.example.com" +``` + +Users can then either use [cert-manager annotations](https://cert-manager.io/docs/usage/ingress/) or the annotation `kubelb.k8c.io/manage-certificates: true` on their resources to automate certificate management. + +### Cluster Issuer example + +{{% notice info %}} +Due to multi-tenancy, it's recommended to use DNS challenge for certificate management. Gateway API has a limitation and doesn't support wildcard domains with HTTP01 challenge. Similarly, for Ingress, unless you are using single ingress installation for all tenants, you will need to create a separate ClusterIssuer for each tenant. Same is the case for Gateway API since it needs the Gateway name to resolve the certificate challenges. +{{% /notice %}} + +#### Example for DNS challenge with AWS Route53 + +```yaml +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-production-dns +spec: + acme: + email: user@example.com + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: letsencrypt-production-dns + solvers: + - dns01: + route53: + region: eu-central-1 + accessKeyIDSecretRef: + name: route53-credentials + key: access-key-id + secretAccessKeySecretRef: + name: route53-credentials + key: secret-access-key +``` + +#### Example for HTTP01 challenge + +{{< tabs name="cert-manager" >}} +{{% tab name="Gateway API" %}} + +```yaml +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-production +spec: + acme: + email: user@example.com + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: example-issuer-account-key + solvers: + - http01: + gatewayHTTPRoute: + parentRefs: + - kind: Gateway + name: default + namespace: tenant-default + sectionName: http +``` + +{{% /tab %}} +{{% tab name="Ingress" %}} + +```yaml +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-production +spec: + acme: + # You must replace this email address with your own. + # Let's Encrypt will use this to contact you about expiring + # certificates, and issues related to your account. + email: user@example.com + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + # Secret resource that will be used to store the account's private key. + name: example-issuer-account-key + # Add a single challenge solver, HTTP01 using nginx + solvers: + - http01: + ingress: + ingressClassName: nginx +``` + +{{% /tab %}} +{{< /tabs >}} + +The additional validation at the tenant level allows us to use a single instance of cert-manager for multiple tenants. Multiple cert-manager installations are not recommended and it's better to have a single instance of cert-manager for all tenants but different ClusterIssuers/Issuers for different tenants, if required. + +## Usage + +In tenant cluster, create the following resources. Based on your requirements: + +1. Use cert-manager with known issuer: + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: example + annotations: + cert-manager.io/issuer: foo +spec: + gatewayClassName: kubelb + listeners: + - name: http + hostname: example.com + port: 443 + protocol: HTTPS + allowedRoutes: + namespaces: + from: All + tls: + mode: Terminate + certificateRefs: + - name: example-com-tls +``` + +2. Leave the issuer up to the management cluster: + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: example + annotations: + kubelb.k8c.io/manage-certificates: true +spec: + gatewayClassName: kubelb + listeners: + - name: http + hostname: example.com + port: 443 + protocol: HTTPS + allowedRoutes: + namespaces: + from: All + tls: + mode: Terminate + certificateRefs: + - name: example-com-tls +``` + +3. Use custom certificates: + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: example + namespace: default +spec: + gatewayClassName: kubelb + listeners: + - name: http + hostname: example.com + port: 443 + protocol: HTTPS + allowedRoutes: + namespaces: + from: All + tls: + mode: Terminate + certificateRefs: + - name: custom-certificate +--- +kind: SyncSecret +apiVersion: kubelb.k8c.io/v1alpha1 +data: + tls.crt: ZnJhbmtsYW1wYXJkCg== + tls.key: ZnJhbmtsYW1wYXJkCg== +metadata: + annotations: + name: custom-certificate + namespace: default +type: kubernetes.io/tls +--- +``` + +This will then sync the secret to the management cluster in a secure way. Refer to [Bring your own Certificates]({{< relref "../secrets" >}}) for more details. + +**For more use cases, view [cert-manager documentation](https://cert-manager.io/docs/usage/gateway/)** diff --git a/content/kubelb/v1.2/tutorials/security/dns/_index.en.md b/content/kubelb/v1.2/tutorials/security/dns/_index.en.md new file mode 100644 index 000000000..952a5002a --- /dev/null +++ b/content/kubelb/v1.2/tutorials/security/dns/_index.en.md @@ -0,0 +1,216 @@ ++++ +title = "DNS Management" +linkTitle = "DNS Management" +date = 2023-10-27T10:07:15+02:00 +weight = 1 +enterprise = true ++++ + +## Setup + +### Install External-dns + +We leverage [External-dns](https://bitnami.com/stack/external-dns/helm) to manage DNS records for the tenant clusters. + +**This is just an example to give you a headstart. For more details on setting up external-dns for different providers, visit [Official Documentation](https://kubernetes-sigs.github.io/external-dns).** + +Update the values.yaml for KubeLB manager chart to enable the external-dns addon. + +```yaml +kubelb-addons: + enabled: true + + external-dns: + enabled: true + domainFilters: + - example.com + extraVolumes: + - name: credentials + secret: + secretName: route53-credentials + extraVolumeMounts: + - name: credentials + mountPath: /.aws + readOnly: true + env: + - name: AWS_SHARED_CREDENTIALS_FILE + value: /.aws/credentials + txtOwnerId: kubelb-example-aws + registry: txt + provider: aws + policy: sync + sources: + - service + - ingress + # Comment out the below resources if you are not using Gateway API. + - gateway-httproute + - gateway-grpcroute + - gateway-tlsroute + - gateway-tcproute + - gateway-udproute +``` + +#### Credentials secret + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: external-dns +--- +apiVersion: v1 +data: + credentials: W2RlZmF1bHRdCmF3c19hY2Nlc3Nfa2V5X2lkID0gTk9UVEhBVERVTUIKYXdzX3NlY3JldF9hY2Nlc3Nfa2V5ID0gTUFZQkVJVFNBU0VDUkVU +kind: Secret +metadata: + name: route53-credentials + namespace: external-dns +type: Opaque +``` + +### Enable DNS automation + +DNS can be enabled/disabled at global or tenant level. For automation purposes, you can configure allowed domains for DNS per tenant. + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Tenant +metadata: + name: shroud +spec: + # These domains are allowed to be used for Ingress, Gateway API, DNS, and certs. + allowedDomains: + - "kube.example.com" + - "*.kube.example.com" + - "*.shroud.example.com" + dns: + # If not empty, only the domains specified here will have automation for DNS. Everything else will be ignored. + allowedDomains: + - "*.shroud.example.com" + # The wildcard domain to use for auto-generated hostnames for Load balancers + # In EE Edition, this is also use to generated dynamic hostnames for tunnels. + wildcardDomain: "*.apps.example.com" + # Allow tenants to specify explicit hostnames for Load balancers and tunnels(in EE Edition) + allowExplicitHostnames: false + gatewayAPI: + class: "eg" + defaultGateway: + name: "default" + namespace: "kubelb" +``` + +Users can then either use [external-dns annotations](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/annotations/annotations.md) or the annotation `kubelb.k8c.io/manage-dns: true` on their resources to automate DNS management. + +The additional validation at the tenant level allows us to use a single instance of external-dns for multiple tenants. Although, if required, external-dns can be installed per tenant as well. + +#### Configure Gateway + +Gateway resource needs to be configured for this automation to work. For example, if you are using Gateway API, you can configure the Gateway resource to manage DNS as follows: + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: default + namespace: kubelb + annotations: + cert-manager.io/cluster-issuer: letsencrypt-production +spec: + gatewayClassName: eg + listeners: + ## HTTP listener to solve DNS challenge for cert-manager + - name: http + protocol: HTTP + port: 80 + allowedRoutes: + namespaces: + from: All + - protocol: HTTPS + port: 443 + name: https + hostname: "*.apps.example.com" + allowedRoutes: + namespaces: + from: All + tls: + mode: Terminate + certificateRefs: + - kind: Secret + name: eg-https + # Required in EE for tunneling + - protocol: HTTPS + port: 443 + name: https-connection-manager + hostname: "connection-manager.example.com" + allowedRoutes: + namespaces: + from: All + tls: + mode: Terminate + certificateRefs: + - kind: Secret + name: eg-https-connection-manager +``` + +## Usage + +1. Using external-dns annotations: + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: example + annotations: + external-dns.alpha.kubernetes.io/hostname: example.com +spec: + gatewayClassName: kubelb + listeners: + - name: http + hostname: example.com + port: 443 + protocol: HTTPS + allowedRoutes: + namespaces: + from: All +``` + +2. Delegate DNS management to KubeLB: + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: example + annotations: + kubelb.k8c.io/manage-dns: true +spec: + gatewayClassName: kubelb + listeners: + - name: http + hostname: example.com + port: 443 + protocol: HTTPS + allowedRoutes: + namespaces: + from: All +``` + +3. Services can also be annotated to manage DNS: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: backend + annotations: + external-dns.alpha.kubernetes.io/hostname: backend.example.com +spec: + ports: + - name: http + port: 3000 + targetPort: 3000 + selector: + app: backend + type: LoadBalancer +``` diff --git a/content/kubelb/v1.2/tutorials/security/secrets/_index.en.md b/content/kubelb/v1.2/tutorials/security/secrets/_index.en.md new file mode 100644 index 000000000..45cc905ff --- /dev/null +++ b/content/kubelb/v1.2/tutorials/security/secrets/_index.en.md @@ -0,0 +1,47 @@ ++++ +title = "Bring your own Secrets" +linkTitle = "Bring your own Secrets" +date = 2023-10-27T10:07:15+02:00 +weight = 6 ++++ + +To propagate secrets from tenant to management cluster. KubeLB has introduced a custom resource `SyncSecret` which is merely a wrapper over the native Kubernetes secret. The custom resource helps us ensure that we are not exposing any secrets from the LB cluster to the tenants. + +## SyncSecret Example + +### Native Kubernetes Secret + +``` +kind: Secret +apiVersion: v1 +metadata: + name: mongodb-credentials +stringData: + mongodb-password: "123456" + mongodb-root-password: "123456" +type: Opaque +``` + +### Converted to a Sync Secret + +``` +kind: SyncSecret +apiVersion: kubelb.k8c.io/v1alpha1 +metadata: + name: mongodb-credentials +stringData: + mongodb-password: "123456" + mongodb-root-password: "123456" +type: Opaque +``` + +### Automation + +To automate the process of creating SyncSecrets from kubernetes secrets, re-deploy the kubeLB CCM with the following modifications: + +```yaml +kubelb: + enableSecretSynchronizer: true +``` + +This would assign CRUD access for secrets to KubeLB controller and enable a syncer that can convert secrets labelled with `kubelb.k8c.io/managed-by: kubelb` to SyncSecrets. diff --git a/content/kubelb/v1.2/tutorials/tenants/_index.en.md b/content/kubelb/v1.2/tutorials/tenants/_index.en.md new file mode 100644 index 000000000..af6a2445e --- /dev/null +++ b/content/kubelb/v1.2/tutorials/tenants/_index.en.md @@ -0,0 +1,73 @@ ++++ +title = "Tenants" +linkTitle = "Tenants" +date = 2023-10-27T10:07:15+02:00 +weight = 2 ++++ + +Tenants represent the consumers of the load balancer services in the management cluster. They can be individual users, teams, or applications that have their workloads, access control, and quotas isolated by using the tenant concept in management cluster. Tenants are represented by the tenant CRD and have a dedicated namespace `tenant-` in the management cluster. Each Kubernetes cluster where the KubeLB CCM is running is considered a unique tenant. This demarcation is based on the fact that the endpoints, simply the Node IPs and node ports, are unique for each Kubernetes cluster. + +{{% notice note %}} +Tenant configuration has a higher precedence than the global configuration and overrides the global configuration values for the tenant if the fields are available in both the tenant and global configuration. +{{% /notice %}} + +## Kubermatic Kubernetes Platform (Enterprise Edition Only) + +For details, go through [KKP integration details]({{< relref "../../tutorials/kkp">}}) + +## Usage + +For usage outside of KKP please follow the guide along. This guide assumes that the KubeLB manager cluster has been configured by following the [installation guide](../../installation/). + +### KubeLB Tenant + +With KubeLB v1.1, the process to register a new tenant has been simplified. Instead of running scripts to register a new tenant, the user can now create a `Tenant` CRD. + +```yaml +apiVersion: kubelb.k8c.io/v1alpha1 +kind: Tenant +metadata: + name: shroud +spec: + propagatedAnnotations: null + # Propagate all annotations to the resources. + propagateAllAnnotations: true + loadBalancer: + class: "metallb.universe.tf/metallb" + # Enterprise Edition Only + limit: 10 + ingress: + class: "nginx" + gatewayAPI: + class: "eg" + # All of the below configurations are Enterprise Edition Only + dns: + allowedDomains: + - "*.example.com" + certificates: + defaultClusterIssuer: "letsencrypt-prod" + allowedDomains: + - "*.example.com" + allowedDomains: + # All subdomains of example.com are allowed but at a single lower level. For example, kube.example.com, test.example.com, etc. + - "*.example.com" + # All subdomains of kube.com are allowed but at any lower level. For example, example.kube.com, test.tenant1.prod.kube.com etc. + - "**.kube.com" +``` + +With this CR we are creating a tenant named `shroud` with the following configurations: + +* **propagateAllAnnotations: true** - Propagate all annotations to the resources. +* **loadBalancer.class: metallb.universe.tf/metallb** - The class to use for LoadBalancer resources for tenants in the management cluster. +* **loadBalancer.limit: 10** - The limit of LoadBalancer resources that can be created by the tenant. +* **ingress.class: nginx** - The class to use for Ingress resources for tenants in the management cluster. +* **gatewayAPI.class: eg** - The class to use for Gateway API resources for tenants in the management cluster. +* For DNS configuration, we have allowed domains `*.example.com`. +* For Certificates configuration, we have the default cluster issuer `letsencrypt-prod` and allowed domains `*.example.com`. +* For Ingress and Gateway API, we have allowed domains `*.example.com` and `**.kube.com`. + +{{% notice info %}} +The tenant name provided to the consumers is the name of the namespace that is created in the management cluster against the tenant CRD. So the tenant **shroud** will be represented by the namespace **tenant-shroud** in the management cluster. For the CCM, tenantName of **tenant-shroud** needs to be used. +{{% /notice %}} + +**For more details and options, please go through [CRD References]({{< relref "../../references">}})** diff --git a/content/kubeone/main/architecture/_index.en.md b/content/kubeone/main/architecture/_index.en.md index 2bdbfd79e..af010052f 100644 --- a/content/kubeone/main/architecture/_index.en.md +++ b/content/kubeone/main/architecture/_index.en.md @@ -2,7 +2,7 @@ title = "Architecture" date = 2021-02-10T09:00:00+02:00 description = "Learn about the architecture of Kubermatic KubeOne and how you can automate cluster operations on all environments" -weight = 2 +weight = 3 +++ Kubermatic KubeOne automates cluster operations on all your cloud, on-prem, diff --git a/content/kubeone/main/architecture/compatibility/supported-versions/_index.en.md b/content/kubeone/main/architecture/compatibility/supported-versions/_index.en.md index 35fc05729..b7b9aa29f 100644 --- a/content/kubeone/main/architecture/compatibility/supported-versions/_index.en.md +++ b/content/kubeone/main/architecture/compatibility/supported-versions/_index.en.md @@ -14,17 +14,17 @@ support policy in the [Version Skew Policy document][upstream-supported-versions In the following table you can find the supported Kubernetes versions for the current KubeOne version. -| KubeOne \ Kubernetes | 1.33 | 1.32 | 1.31 | 1.30 | 1.29[^1] | +| KubeOne \ Kubernetes | 1.34 | 1.33 | 1.32 | 1.31 | 1.30[^1] | | -------------------- | ---- | ---- | ---- | -----| -------- | -| v1.11 | ✓ | ✓ | ✓ | - | - | -| v1.10 | - | ✓ | ✓ | ✓ | - | -| v1.9 | - | - | ✓ | ✓ | ✓ | +| v1.12 | ✓ | ✓ | ✓ | - | - | +| v1.11 | - | ✓ | ✓ | ✓ | - | +| v1.10 | - | - | ✓ | ✓ | ✓ | -[^1]: Kubernetes 1.29 has reached End-of-Life (EOL) and is not supported any longer. +[^1]: Kubernetes 1.30 has reached End-of-Life (EOL) and is not supported any longer. We strongly recommend upgrading to a newer supported Kubernetes release as soon as possible. We recommend using a Kubernetes release that's not older than one minor release -than the latest Kubernetes release. For example, with 1.32 being the latest -release, we recommend running at least Kubernetes 1.31. +than the latest Kubernetes release. For example, with 1.34 being the latest +release, we recommend running at least Kubernetes 1.33. [upstream-supported-versions]: https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions diff --git a/content/kubeone/main/architecture/operating-system-manager/usage/_index.en.md b/content/kubeone/main/architecture/operating-system-manager/usage/_index.en.md index abeacc163..2e5cdcc95 100644 --- a/content/kubeone/main/architecture/operating-system-manager/usage/_index.en.md +++ b/content/kubeone/main/architecture/operating-system-manager/usage/_index.en.md @@ -13,7 +13,7 @@ To fallback to legacy user-data from Machine Controller, we can disable OSM for apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.34.1 addons: enable: true operatingSystemManager: @@ -163,7 +163,7 @@ The variable `initial_machinedeployment_operating_system_profile` can also be co apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: "1.29.4" + kubernetes: "1.34.1" cloudProvider: aws: {} addons: diff --git a/content/kubeone/main/cheat-sheets/_index.en.md b/content/kubeone/main/cheat-sheets/_index.en.md index 9fae4b91a..da8ea9121 100644 --- a/content/kubeone/main/cheat-sheets/_index.en.md +++ b/content/kubeone/main/cheat-sheets/_index.en.md @@ -2,7 +2,7 @@ title = "Cheat Sheets" date = 2021-02-10T09:00:00+02:00 description = "Keep the most important concepts of Kubermatic KubeOne handy for quick reference" -weight = 7 +weight = 8 chapter = true +++ diff --git a/content/kubeone/main/creating-clusters/_index.en.md b/content/kubeone/main/creating-clusters/_index.en.md new file mode 100644 index 000000000..bc38556bd --- /dev/null +++ b/content/kubeone/main/creating-clusters/_index.en.md @@ -0,0 +1,5 @@ ++++ +title = "Creating a Kubernetes Cluster" +url = "/kubeone/main/tutorials/creating-clusters/" +weight = 2 ++++ \ No newline at end of file diff --git a/content/kubeone/main/examples/_index.en.md b/content/kubeone/main/examples/_index.en.md index 756b51b92..0ea68a23d 100644 --- a/content/kubeone/main/examples/_index.en.md +++ b/content/kubeone/main/examples/_index.en.md @@ -2,7 +2,7 @@ title = "Examples" date = 2021-02-10T09:00:00+02:00 description = "A collection of select concepts and scenarios to help you master Kubermatic KubeOne" -weight = 6 +weight = 7 chapter = true +++ diff --git a/content/kubeone/main/guides/_index.en.md b/content/kubeone/main/guides/_index.en.md index 8f1262064..fdf8ce6a4 100644 --- a/content/kubeone/main/guides/_index.en.md +++ b/content/kubeone/main/guides/_index.en.md @@ -2,7 +2,7 @@ title = "Guides" date = 2021-02-10T09:00:00+02:00 description = "Get familiar with Kubermatic KubeOne and read step-by-step instructions to handle important scenarios" -weight = 4 +weight = 5 chapter = true +++ diff --git a/content/kubeone/main/guides/addons/_index.en.md b/content/kubeone/main/guides/addons/_index.en.md index 725b59df0..ed7443363 100644 --- a/content/kubeone/main/guides/addons/_index.en.md +++ b/content/kubeone/main/guides/addons/_index.en.md @@ -64,7 +64,7 @@ the `addons` config: apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.34.1 cloudProvider: aws: {} # Addons are Kubernetes manifests to be deployed after provisioning the cluster @@ -113,7 +113,7 @@ Example: apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.34.1 addons: enable: true @@ -145,7 +145,7 @@ To delete embedded addon from the cluster, use the new `delete` field from the apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.34.1 addons: enable: true @@ -180,7 +180,7 @@ you can use it to override globally defined parameters. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.34.1 addons: enable: true diff --git a/content/kubeone/main/guides/autoscaler-addon/_index.en.md b/content/kubeone/main/guides/autoscaler-addon/_index.en.md index cc76f595c..ec64c242f 100644 --- a/content/kubeone/main/guides/autoscaler-addon/_index.en.md +++ b/content/kubeone/main/guides/autoscaler-addon/_index.en.md @@ -33,7 +33,7 @@ kubeone.yaml apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' ## kubernetes version + kubernetes: '1.34.1' ## kubernetes version cloudProvider: ## This field is sourced automatically if terraform is used for the cluster aws: {} addons: @@ -52,7 +52,7 @@ If you're running a cluster with nodes in the multiple zones for the HA purposes apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' ## kubernetes version + kubernetes: '1.34.1' ## kubernetes version cloudProvider: ## This field is sourced automatically if terraform is used for the cluster aws: {} addons: @@ -146,9 +146,9 @@ Run the following kubectl command to inspect the available Machinedeployments: ```bash $ kubectl get machinedeployments -n kube-system NAME REPLICAS AVAILABLE-REPLICAS PROVIDER OS KUBELET AGE -kb-cluster-eu-west-3a 1 1 aws ubuntu 1.20.4 10h -kb-cluster-eu-west-3b 1 1 aws ubuntu 1.20.4 10h -kb-cluster-eu-west-3c 1 1 aws ubuntu 1.20.4 10h +kb-cluster-eu-west-3a 1 1 aws ubuntu 1.34.1 10h +kb-cluster-eu-west-3b 1 1 aws ubuntu 1.34.1 10h +kb-cluster-eu-west-3c 1 1 aws ubuntu 1.34.1 10h ``` ### Step 2: Annotate Machinedeployments @@ -237,4 +237,4 @@ That is it! You have successfully deployed Kubernetes autoscaler on the KubeOne [step-5]: {{< ref "../../tutorials/creating-clusters/#step-5" >}} [embedded-addons]: {{< ref "../../guides/addons/#overriding-embedded-eddons" >}} [ca-faq]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md -[ca-faq-what-is]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-is-cluster-autoscaler \ No newline at end of file +[ca-faq-what-is]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-is-cluster-autoscaler diff --git a/content/kubeone/main/guides/cis-benchmarking/_index.en.md b/content/kubeone/main/guides/cis-benchmarking/_index.en.md deleted file mode 100644 index 141d454ce..000000000 --- a/content/kubeone/main/guides/cis-benchmarking/_index.en.md +++ /dev/null @@ -1,42 +0,0 @@ -+++ -title = "CIS Benchmarking" -date = 2024-03-06T12:00:00+02:00 -+++ - -[CIS Benchmark for Kubernetes](https://www.cisecurity.org/benchmark/kubernetes) is a guide that consists of secure configuration guidelines and best practices developed for Kubernetes. - -In this document, information how it can be run on a Kubernetes cluster created using KubeOne and what to expect as the result is described. - -## Tooling - -[kube-bench](https://github.com/aquasecurity/kube-bench) is used to create the assessment. - -### Installation -{{% notice note %}} -There are [multiple ways](https://github.com/aquasecurity/kube-bench/blob/main/docs/running.md) to run `kube-bench`. Below method describes how it's running via logging to a master and worker node to run it. -{{% /notice %}} - -```bash -# make sure you run those commands as root user: -KUBE_BENCH_VERSION="0.7.2" -KUBE_BENCH_URL="https://github.com/aquasecurity/kube-bench/releases/download/v$%7BKUBE_BENCH_VERSION%7D/kube-bench_$%7BKUBE_BENCH_VERSION%7D_linux_amd64.tar.gz" - -mkdir /root/kube-bench -cd /root/kube-bench -curl -L ${KUBE_BENCH_URL} -o kube-bench_${KUBE_BENCH_VERSION}_linux_amd64.tar.gz -tar xvf kube-bench_${KUBE_BENCH_VERSION}_linux_amd64.tar.gz -``` - -### Run on controlplane node - -```bash -cd /root/kube-bench -./kube-bench -D ./cfg/ run --targets=controlplane,master,etcd,node --benchmark=cis-1.8 -``` - -### Run on a worker node - -```bash -cd /root/kube-bench -./kube-bench -D ./cfg/ run --targets=node --benchmark=cis-1.8 -``` diff --git a/content/kubeone/main/guides/encryption-providers/_index.en.md b/content/kubeone/main/guides/encryption-providers/_index.en.md index 09c42aed9..54c78d6ee 100644 --- a/content/kubeone/main/guides/encryption-providers/_index.en.md +++ b/content/kubeone/main/guides/encryption-providers/_index.en.md @@ -34,7 +34,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: k1-cluster versions: - kubernetes: '1.29.4' + kubernetes: '1.34.1' features: # enable encryption providers encryptionProviders: @@ -82,7 +82,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: k1-cluster versions: - kubernetes: '1.29.4' + kubernetes: '1.34.1' features: # enable encryption providers encryptionProviders: @@ -140,7 +140,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: k1-cluster versions: - kubernetes: '1.29.4' + kubernetes: '1.34.1' features: encryptionProviders: enable: true @@ -175,7 +175,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: kms-test versions: - kubernetes: '1.29.4' + kubernetes: '1.34.1' cloudProvider: aws: {} features: diff --git a/content/kubeone/main/guides/mirror-registries/_index.en.md b/content/kubeone/main/guides/mirror-registries/_index.en.md index 49f7a580f..4ff32c862 100644 --- a/content/kubeone/main/guides/mirror-registries/_index.en.md +++ b/content/kubeone/main/guides/mirror-registries/_index.en.md @@ -98,7 +98,7 @@ kubeone apply --manifest kubeone.yaml --credentials credentials.yaml docker.io registry introduced pretty low rate limits for unauthenticated requests. There are few workarounds: -* Buy docker subscribtion. +* Buy docker subscription. How to use docker.io credentials is covered in the [section above][using-credentials]. * Setup own pull-through caching proxy. * Use public pull-through caching proxy. diff --git a/content/kubeone/main/guides/registry-configuration/_index.en.md b/content/kubeone/main/guides/registry-configuration/_index.en.md index 5ef593084..98d74d7c6 100644 --- a/content/kubeone/main/guides/registry-configuration/_index.en.md +++ b/content/kubeone/main/guides/registry-configuration/_index.en.md @@ -37,36 +37,58 @@ This guide assumes that: If you don't have an image registry, you can check out the [Docker Registry][docker-reg-guide] as a possible solution. -## Preloading Images +## Mirroring Images with `kubeone mirror-images` -Another prerequisites for this guide to work is that your image registry has -all images needed for your cluster to work preloaded. +KubeOne provides a built-in command `kubeone mirror-images` to simplify mirroring all required images (Kubernetes core components, CNI plugins, etc.) to your private registry. This command replaces the older `image-loader.sh` script and supports advanced filtering and multi-version mirroring. -To make this task easier, we provide the image loader script that: +### Prerequisites -* pulls all images used by components deployed by KubeOne (CNI, - metrics-server...) and Kubeadm (Kubernetes core components and CoreDNS) -* re-tag those images so the image registry (e.g. `docker.io`) is replaced - with the image registry provided by the user -* push re-tagged images to your (mirror) image registry +1. **Registry Setup**: Ensure your registry is accessible by all cluster nodes and supports TLS if using containerd. +2. **Authentication**: The registry must allow unauthenticated access (support for credentials is planned for future releases). +3. **KubeOne CLI**: Use KubeOne v1.5.0 or newer. -The image loader script (`image-loader.sh`) comes in the KubeOne release -archive, under the `hack` directory. It can also be found on [GitHub in the -`hack` directory][img-loader]. If you're downloading the script from GitHub, -it's recommended to switch to the appropriate tag depending on which KubeOne -version you're using. +### Usage -Once you have downloaded the script, you can run it in the following way. -Make sure to replace `KUBERNETES_VERSION` with the Kubernetes version you plan -to use (without the `v` prefix), as well as, replace the `TARGET_REGISTRY` with -the address to your image registry. +The `kubeone mirror-images` command pulls, re-tags, and pushes images to your registry. Use the following syntax: +```bash +kubeone mirror-images \ + [--filter base,optional,control-plane] \ + [--kubernetes-versions v1.34.1,v1.33.5] \ + [--insecure] # Allow pushing to insecure registries (HTTP) \ + --registry ``` -KUBERNETES_VERSION=1.29.4 TARGET_REGISTRY=127.0.0.1:5000 ./image-loader.sh + +#### Key Flags: +- `--filter`: Select image groups (comma-separated): + - `base`: Core images (OSM, DNS Cache, Calico, Machine-Controller). + - `optional`: Add-ons like CCMs and CSI Drivers. + - `control-plane`: Only Kubernetes core components (kube-apiserver, etcd, etc.). +- `--kubernetes-versions`: Specify versions (comma-separated). If omitted, **all KubeOne-supported versions are mirrored**. +- `--insecure`: Skip TLS verification for registries using HTTP (useful for local/insecure setups). + +### Examples + +#### 1. Mirror All Base Images for Specific Versions +```bash +kubeone mirror-images \ + --filter base \ + --kubernetes-versions v1.34.1,v1.33.5 \ + registry.example.com:5000 +``` + +#### 2. Mirror Only Control-Plane Images For All Supported Versions +```bash +kubeone mirror-images \ + --filter control-plane \ + registry.example.com:5000 ``` -The preloading process can take a several minutes, depending on your -connection speed. +### Benefits of `kubeone mirror-images` +- **Simpler Workflow**: No need to manually download or manage scripts. +- **Multi-Version Support**: Mirror images for multiple Kubernetes versions in one command. +- **Granular Control**: Use filters to mirror only the images you need. +- **Automated Retagging**: Handles registry prefixes (e.g., `docker.io` → `registry.example.com`). ## Overriding Image Registries @@ -77,7 +99,7 @@ stanza to your KubeOne configuration file, such as: apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.34.1 cloudProvider: aws: {} registryConfiguration: diff --git a/content/kubeone/main/references/_index.en.md b/content/kubeone/main/references/_index.en.md index 71dfb7b6e..2187b9ca8 100644 --- a/content/kubeone/main/references/_index.en.md +++ b/content/kubeone/main/references/_index.en.md @@ -1,7 +1,7 @@ +++ title = "References" date = 2021-02-10T09:00:00+02:00 -weight = 5 +weight = 6 chapter = true +++ diff --git a/content/kubeone/main/references/kubeone-cluster-v1beta2/_index.en.md b/content/kubeone/main/references/kubeone-cluster-v1beta2/_index.en.md index bd2e3b309..1f375ded2 100644 --- a/content/kubeone/main/references/kubeone-cluster-v1beta2/_index.en.md +++ b/content/kubeone/main/references/kubeone-cluster-v1beta2/_index.en.md @@ -1,6 +1,6 @@ +++ title = "v1beta2 API Reference" -date = 2025-06-12T15:55:08+03:00 +date = 2025-10-14T16:55:58+03:00 weight = 11 +++ ## v1beta2 @@ -159,6 +159,8 @@ CanalSpec defines the Canal CNI plugin | ----- | ----------- | ------ | -------- | | bundle | Bundle inline PEM encoded global CA | string | false | | file | File is a path to the CA bundle file, used as a replacement for Bundle | string | false | +| certificateValidityPeriod | CertificateValidityPeriod specifies the validity period for a non-CA certificate generated by kubeadm. Default value: 8760h (365 days * 24 hours = 1 year) | *metav1.Duration | false | +| caCertificateValidityPeriod | CACertificateValidityPeriod specifies the validity period for a CA certificate generated by kubeadm. Default value: 87600h (365 days * 24 hours * 10 = 10 years) | *metav1.Duration | false | [Back to Group](#v1beta2) @@ -260,6 +262,7 @@ ContainerdRegistry defines endpoints and security for given container registry | Field | Description | Scheme | Required | | ----- | ----------- | ------ | -------- | | mirrors | List of registry mirrors to use | []string | false | +| overridePath | Configure override_path | bool | false | | tlsConfig | TLSConfig for the registry | *[ContainerdTLSConfig](#containerdtlsconfig) | false | | auth | Registry authentication | *[ContainerdRegistryAuthConfig](#containerdregistryauthconfig) | false | @@ -489,6 +492,7 @@ HostConfig describes a single control plane or worker node. | bastionPort | BastionPort is SSH port to use when connecting to the bastion if it's configured in .Bastion. Default value is 22. | int | false | | bastionUser | BastionUser is system login name to use when connecting to bastion host. Default value is \"root\". | string | false | | bastionHostPublicKey | BastionHostPublicKey if not empty, will be used to verify bastion SSH public key | []byte | false | +| bastionPrivateKeyFile | BastionPrivateKeyFile is path to the file with PRIVATE AND CLEANTEXT ssh key. Default value is \"\". | string | false | | hostname | Hostname is the hostname(1) of the host. Default value is populated at the runtime via running `hostname -f` command over ssh. | string | false | | isLeader | IsLeader indicates this host as a session leader. Default value is populated at the runtime. | bool | false | | taints | Taints are taints applied to nodes. Those taints are only applied when the node is being provisioned. If not provided (i.e. nil) for control plane nodes, it defaults to TaintEffectNoSchedule with key\n node-role.kubernetes.io/control-plane\nExplicitly empty (i.e. []corev1.Taint{}) means no taints will be applied (this is default for worker nodes). | [][corev1.Taint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#taint-v1-core) | false | @@ -531,6 +535,7 @@ KubeOneCluster is KubeOne Cluster API Schema | ----- | ----------- | ------ | -------- | | name | Name is the name of the cluster. | string | true | | controlPlane | ControlPlane describes the control plane nodes and how to access them. | [ControlPlaneConfig](#controlplaneconfig) | true | +| kubeletConfig | KubeletConfig used to generate cluster's KubeletConfiguration that will be used along with kubeadm | [KubeletConfig](#kubeletconfig) | false | | apiEndpoint | APIEndpoint are pairs of address and port used to communicate with the Kubernetes API. | [APIEndpoint](#apiendpoint) | true | | cloudProvider | CloudProvider configures the cloud provider specific features. | [CloudProviderSpec](#cloudproviderspec) | true | | versions | Versions defines which Kubernetes version will be installed. | [VersionConfig](#versionconfig) | true | @@ -576,6 +581,10 @@ KubeletConfig provides some kubelet configuration options | kubeReserved | KubeReserved configure --kube-reserved command-line flag of the kubelet. See more at: https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ | map[string]string | false | | evictionHard | EvictionHard configure --eviction-hard command-line flag of the kubelet. See more at: https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ | map[string]string | false | | maxPods | MaxPods configures maximum number of pods per node. If not provided, default value provided by kubelet will be used (max. 110 pods per node) | *int32 | false | +| imageGCHighThresholdPercent | ImageGCHighThresholdPercent is the percent of disk usage after which image garbage collection is always run. The percent is calculated by dividing this field value by 100, so this field must be between 0 and 100, inclusive. When specified, the value must be greater than imageGCLowThresholdPercent. Default: 85 | *int32 | false | +| imageGCLowThresholdPercent | ImageGCLowThresholdPercent is the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The percent is calculated by dividing this field value by 100, so the field value must be between 0 and 100, inclusive. When specified, the value must be less than imageGCHighThresholdPercent. Default: 80 | *int32 | false | +| imageMinimumGCAge | ImageMinimumGCAge is the minimum age for an unused image before it is garbage collected. Default: \"2m\" | metav1.Duration | false | +| imageMaximumGCAge | ImageMaximumGCAge is the maximum age an image can be unused before it is garbage collected. The default of this field is \"0s\", which disables this field--meaning images won't be garbage collected based on being unused for too long. Default: \"0s\" (disabled) | metav1.Duration | false | [Back to Group](#v1beta2) @@ -695,6 +704,7 @@ OperatingSystemManagerConfig configures kubermatic operating-system-manager depl | Field | Description | Scheme | Required | | ----- | ----------- | ------ | -------- | | deploy | Deploy | bool | false | +| enableNonRootDeviceOwnership | EnableNonRootDeviceOwnership enables the non-root device ownership feature in the container runtime. | bool | false | [Back to Group](#v1beta2) diff --git a/content/kubeone/main/references/kubeone-cluster-v1beta3/_index.en.md b/content/kubeone/main/references/kubeone-cluster-v1beta3/_index.en.md index 1eaf83c80..8ad3c73e8 100644 --- a/content/kubeone/main/references/kubeone-cluster-v1beta3/_index.en.md +++ b/content/kubeone/main/references/kubeone-cluster-v1beta3/_index.en.md @@ -1,6 +1,6 @@ +++ title = "v1beta3 API Reference" -date = 2025-06-12T15:55:08+03:00 +date = 2025-10-14T16:55:58+03:00 weight = 11 +++ ## v1beta3 @@ -172,6 +172,8 @@ CanalSpec defines the Canal CNI plugin | ----- | ----------- | ------ | -------- | | bundle | Bundle inline PEM encoded global CA | string | false | | file | File is a path to the CA bundle file, used as a replacement for Bundle | string | false | +| certificateValidityPeriod | CertificateValidityPeriod specifies the validity period for a non-CA certificate generated by kubeadm. Default value: 8760h (365 days * 24 hours = 1 year) | *metav1.Duration | false | +| caCertificateValidityPeriod | CACertificateValidityPeriod specifies the validity period for a CA certificate generated by kubeadm. Default value: 87600h (365 days * 24 hours * 10 = 10 years) | *metav1.Duration | false | [Back to Group](#v1beta3) @@ -262,6 +264,7 @@ ContainerdRegistry defines endpoints and security for given container registry | Field | Description | Scheme | Required | | ----- | ----------- | ------ | -------- | | mirrors | List of registry mirrors to use | []string | false | +| overridePath | Configure override_path | bool | false | | tlsConfig | TLSConfig for the registry | *[ContainerdTLSConfig](#containerdtlsconfig) | false | | auth | Registry authentication | *[ContainerdRegistryAuthConfig](#containerdregistryauthconfig) | false | @@ -491,6 +494,7 @@ HostConfig describes a single control plane or worker node. | bastionPort | BastionPort is SSH port to use when connecting to the bastion if it's configured in .Bastion. Default value is 22. | int | false | | bastionUser | BastionUser is system login name to use when connecting to bastion host. Default value is \"root\". | string | false | | bastionHostPublicKey | BastionHostPublicKey if not empty, will be used to verify bastion SSH public key | []byte | false | +| bastionPrivateKeyFile | BastionPrivateKeyFile is path to the file with PRIVATE AND CLEANTEXT ssh key. Default value is \"\". | string | false | | hostname | Hostname is the hostname(1) of the host. Default value is populated at the runtime via running `hostname -f` command over ssh. | string | false | | isLeader | IsLeader indicates this host as a session leader. Default value is populated at the runtime. | bool | false | | taints | Taints are taints applied to nodes. Those taints are only applied when the node is being provisioned. If not provided (i.e. nil) for control plane nodes, it defaults to TaintEffectNoSchedule with key\n node-role.kubernetes.io/control-plane\nExplicitly empty (i.e. []corev1.Taint{}) means no taints will be applied (this is default for worker nodes). | [][corev1.Taint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#taint-v1-core) | false | @@ -535,6 +539,7 @@ KubeOneCluster is KubeOne Cluster API Schema | controlPlane | ControlPlane describes the control plane nodes and how to access them. | [ControlPlaneConfig](#controlplaneconfig) | true | | apiEndpoint | APIEndpoint are pairs of address and port used to communicate with the Kubernetes API. | [APIEndpoint](#apiendpoint) | true | | cloudProvider | CloudProvider configures the cloud provider specific features. | [CloudProviderSpec](#cloudproviderspec) | true | +| kubeletConfig | KubeletConfig used to generate cluster's KubeletConfiguration that will be used along with kubeadm | [KubeletConfig](#kubeletconfig) | false | | versions | Versions defines which Kubernetes version will be installed. | [VersionConfig](#versionconfig) | true | | containerRuntime | ContainerRuntime defines which container runtime will be installed | [ContainerRuntimeConfig](#containerruntimeconfig) | false | | clusterNetwork | ClusterNetwork configures the in-cluster networking. | [ClusterNetworkConfig](#clusternetworkconfig) | false | @@ -578,6 +583,10 @@ KubeletConfig provides some kubelet configuration options | evictionHard | EvictionHard configure --eviction-hard command-line flag of the kubelet. See more at: https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ | map[string]string | false | | maxPods | MaxPods configures maximum number of pods per node. If not provided, default value provided by kubelet will be used (max. 110 pods per node) | *int32 | false | | podPidsLimit | PodPidsLimit configures the maximum number of processes running in a Pod If not provided, default value provided by kubelet will be used -1 See more about pid-limiting at: https://kubernetes.io/docs/concepts/policy/pid-limiting/ | *int64 | false | +| imageGCHighThresholdPercent | ImageGCHighThresholdPercent is the percent of disk usage after which image garbage collection is always run. The percent is calculated by dividing this field value by 100, so this field must be between 0 and 100, inclusive. When specified, the value must be greater than imageGCLowThresholdPercent. Default: 85 | *int32 | false | +| imageGCLowThresholdPercent | ImageGCLowThresholdPercent is the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The percent is calculated by dividing this field value by 100, so the field value must be between 0 and 100, inclusive. When specified, the value must be less than imageGCHighThresholdPercent. Default: 80 | *int32 | false | +| imageMinimumGCAge | ImageMinimumGCAge is the minimum age for an unused image before it is garbage collected. Default: \"2m\" | metav1.Duration | false | +| imageMaximumGCAge | ImageMaximumGCAge is the maximum age an image can be unused before it is garbage collected. The default of this field is \"0s\", which disables this field--meaning images won't be garbage collected based on being unused for too long. Default: \"0s\" (disabled) | metav1.Duration | false | [Back to Group](#v1beta3) @@ -697,6 +706,7 @@ OperatingSystemManagerConfig configures kubermatic operating-system-manager depl | Field | Description | Scheme | Required | | ----- | ----------- | ------ | -------- | | deploy | Deploy | bool | false | +| enableNonRootDeviceOwnership | EnableNonRootDeviceOwnership enables the non-root device ownership feature in the container runtime. | bool | false | [Back to Group](#v1beta3) diff --git a/content/kubeone/main/security/_index.en.md b/content/kubeone/main/security/_index.en.md new file mode 100644 index 000000000..8eca98cb6 --- /dev/null +++ b/content/kubeone/main/security/_index.en.md @@ -0,0 +1,13 @@ ++++ +title = "Security" +date = 2025-09-19T09:00:00+02:00 +weight = 6 +chapter = true ++++ + +# Security + +## Table of Content + +{{% children depth=5 %}} +{{% /children %}} diff --git a/content/kubeone/main/security/cis-benchmarking/_index.en.md b/content/kubeone/main/security/cis-benchmarking/_index.en.md new file mode 100644 index 000000000..d1d54adcf --- /dev/null +++ b/content/kubeone/main/security/cis-benchmarking/_index.en.md @@ -0,0 +1,28 @@ ++++ +title = "CIS Benchmarking" +date = 2024-03-06T12:00:00+02:00 +weight = 10 ++++ + +[CIS Benchmark for Kubernetes](https://www.cisecurity.org/benchmark/kubernetes) is a guide that consists of secure configuration guidelines and best practices developed for Kubernetes. + +In this document, information how it can be run on a Kubernetes cluster created using KubeOne and what to expect as the result is described. + +## Tooling + +[Trivy](https://github.com/aquasecurity/trivy) is the tool used to run the benchmark. + +### Installation + +To install trivy, follow the instructions [here](https://trivy.dev/latest/getting-started/installation/). + +### Running the Benchmark + +```bash +trivy k8s --compliance=k8s-cis-1.23 --report summary --timeout=1h --tolerations node-role.kubernetes.io/control-plane="":NoSchedule +``` + +## Table of Content + +{{% children depth=5 %}} +{{% /children %}} diff --git a/content/kubeone/main/security/cis-benchmarking/kubeone1.11-k8s1.33/_index.en.md b/content/kubeone/main/security/cis-benchmarking/kubeone1.11-k8s1.33/_index.en.md new file mode 100644 index 000000000..c5a99d2c7 --- /dev/null +++ b/content/kubeone/main/security/cis-benchmarking/kubeone1.11-k8s1.33/_index.en.md @@ -0,0 +1,1067 @@ ++++ +title = "Benchmark on Kubernetes 1.33 with KubeOne 1.11.2" +date = 2025-09-19T16:39:34+02:00 ++++ + +This guide helps you evaluate the security of a Kubernetes cluster created using KubeOne against each control in the CIS Kubernetes Benchmark. + +This guide corresponds to the following versions of KubeOne, CIS Benchmarks, and Kubernetes: + +| KubeOne Version | Kubernetes Version | CIS Benchmark Version | +| ---------------- | ------------------ | --------------------- | +| 1.11.2 | 1.33.4 | CIS-1.23 | + +## Testing Methodology + +### Running the Benchmark + +[Trivy](https://github.com/aquasecurity/trivy) was used to run the benchmark. + +```bash +trivy k8s --compliance=k8s-cis-1.23 --report summary --timeout=1h --tolerations node-role.kubernetes.io/control-plane="":NoSchedule +``` + +### Results + +Summary Report for compliance: CIS Kubernetes Benchmarks v1.23 + +Each control in the CIS Kubernetes Benchmark was evaluated. These are the possible results for each control: + +🟢 **Pass:** The cluster passes the audit/control outlined in the benchmark. + +🔵 **Pass (Additional Configuration Required):** The cluster passes the audit/control outlined in the benchmark with some extra configuration. The documentation is provided. + +🔴 **Fail:** The audit/control will be fixed in a future KubeOne release. + +## Control Type: Control Plane Components + +### 1.1. Control Plane Node Configuration Files + +#### 1.1.1: Ensure that the API server pod specification file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.2: Ensure that the API server pod specification file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.3: Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.4: Ensure that the controller manager pod specification file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.5: Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.6: Ensure that the scheduler pod specification file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.7: Ensure that the etcd pod specification file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.8: Ensure that the etcd pod specification file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.9: Ensure that the Container Network Interface file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.10: Ensure that the Container Network Interface file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.11: Ensure that the etcd data directory permissions are set to 700 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.12: Ensure that the etcd data directory ownership is set to etcd:etcd + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.1.13: Ensure that the admin.conf file permissions are set to 600 + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 1.1.14: Ensure that the admin.conf file ownership is set to root:root + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 1.1.15: Ensure that the scheduler.conf file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.16: Ensure that the scheduler.conf file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.17: Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.18: Ensure that the controller-manager.conf file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.19: Ensure that the Kubernetes PKI directory and file ownership is set to root:root + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 1.1.20: Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 1.1.21: Ensure that the Kubernetes PKI key file permissions are set to 600 + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +### 1.2. API Server + +#### 1.2.1: Ensure that the --anonymous-auth argument is set to false + +**Severity:** MEDIUM + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.2.2: Ensure that the --token-auth-file parameter is not set + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.3: Ensure that the --DenyServiceExternalIPs is not set + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.4: Ensure that the --kubelet-https argument is set to true + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.5: Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.2.6: Ensure that the --kubelet-certificate-authority argument is set as appropriate + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.2.7: Ensure that the --authorization-mode argument is not set to AlwaysAllow + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.8: Ensure that the --authorization-mode argument includes Node + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.2.9: Ensure that the --authorization-mode argument includes RBAC + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.2.10: Ensure that the admission control plugin EventRateLimit is set + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.2.11: Ensure that the admission control plugin AlwaysAdmit is not set + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.12: Ensure that the admission control plugin AlwaysPullImages is set + +**Severity:** MEDIUM + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.2.13: Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 1.2.14: Ensure that the admission control plugin ServiceAccount is set + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.15: Ensure that the admission control plugin NamespaceLifecycle is set + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.16: Ensure that the admission control plugin NodeRestriction is set + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.2.17: Ensure that the --secure-port argument is not set to 0 + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.2.18: Ensure that the --profiling argument is set to false + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.19: Ensure that the --audit-log-path argument is set + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.2.20: Ensure that the --audit-log-maxage argument is set to 30 or as appropriate + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.2.21: Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.2.22: Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.2.24: Ensure that the --service-account-lookup argument is set to true + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.25: Ensure that the --service-account-key-file argument is set as appropriate + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.26: Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.27: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 1.2.28: Ensure that the --client-ca-file argument is set appropriate + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.29: Ensure that the --etcd-cafile argument is set as appropriate + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.30: Ensure that the --encryption-provider-config argument is set as appropriate + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +### 1.3. Controller Manager + +#### 1.3.1: Ensure that the --terminated-pod-gc-threshold argument is set as appropriate + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 1.3.3: Ensure that the --use-service-account-credentials argument is set to true + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 1.3.4: Ensure that the --service-account-private-key-file argument is set as appropriate + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 1.3.5: Ensure that the --root-ca-file argument is set as appropriate + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 1.3.6: Ensure that the RotateKubeletServerCertificate argument is set to true + +**Severity:** MEDIUM + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.3.7: Ensure that the --bind-address argument is set to 127.0.0.1 + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +### 1.4. Scheduler + +#### 1.4.1: Ensure that the --profiling argument is set to false + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 1.4.2: Ensure that the --bind-address argument is set to 127.0.0.1 + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +## Control Type: Etcd + +#### 2.1: Ensure that the --cert-file and --key-file arguments are set as appropriate + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 2.2: Ensure that the --client-cert-auth argument is set to true + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 2.3: Ensure that the --auto-tls argument is not set to true + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 2.4: Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 2.5: Ensure that the --peer-client-cert-auth argument is set to true + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 2.6: Ensure that the --peer-auto-tls argument is not set to true + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +## Control Type: Control Plane Configuration + +### 3.1. Authentication and Authorization + +#### 3.1.1: Client certificate authentication should not be used for users (Manual) + +**Severity:** HIGH + +**Result:** Manual check required + +--- + +### 3.2. Logging + +#### 3.2.1: Ensure that a minimal audit policy is created (Manual) + +**Severity:** HIGH + +**Result:** Manual check required + +--- + +#### 3.2.2: Ensure that the audit policy covers key security concerns (Manual) + +**Severity:** HIGH + +**Result:** Manual check required + +--- + +## Control Type: Worker Nodes + +### 4.1. Worker Node Configuration Files + +#### 4.1.1: Ensure that the kubelet service file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 4.1.2: Ensure that the kubelet service file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.1.3: If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.1.4: If proxy kubeconfig file exists ensure ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.1.5: Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 4.1.6: Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.1.7: Ensure that the certificate authorities file permissions are set to 600 or more restrictive + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 4.1.8: Ensure that the client certificate authorities file ownership is set to root:root + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 4.1.9: If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.1.10: If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +### 4.2. Kubelet + +#### 4.2.1: Ensure that the --anonymous-auth argument is set to false + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 4.2.2: Ensure that the --authorization-mode argument is not set to AlwaysAllow + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 4.2.3: Ensure that the --client-ca-file argument is set as appropriate + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 4.2.4: Verify that the --read-only-port argument is set to 0 + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.2.5: Ensure that the --streaming-connection-idle-timeout argument is not set to 0 + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.2.6: Ensure that the --protect-kernel-defaults argument is set to true + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.2.7: Ensure that the --make-iptables-util-chains argument is set to true + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.2.8: Ensure that the --hostname-override argument is not set + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 4.2.9: Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.2.10: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate + +**Severity:** CRITICAL + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 4.2.11: Ensure that the --rotate-certificates argument is not set to false + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 4.2.12: Verify that the RotateKubeletServerCertificate argument is set to true + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 4.2.13: Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +## Control Type: Policies + +### 5.1. RBAC and Service Accounts + +#### 5.1.1: Ensure that the cluster-admin role is only used where required + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.1.2: Minimize access to secrets + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.1.3: Minimize wildcard use in Roles and ClusterRoles + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.1.6: Ensure that Service Account Tokens are only mounted where necessary + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 5.1.8: Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +### 5.2. Pod Security Standards + +#### 5.2.2: Minimize the admission of privileged containers + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.3: Minimize the admission of containers wishing to share the host process ID namespace + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.4: Minimize the admission of containers wishing to share the host IPC namespace + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 5.2.5: Minimize the admission of containers wishing to share the host network namespace + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.6: Minimize the admission of containers with allowPrivilegeEscalation + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.7: Minimize the admission of root containers + +**Severity:** MEDIUM + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.8: Minimize the admission of containers with the NET_RAW capability + +**Severity:** MEDIUM + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.9: Minimize the admission of containers with added capabilities + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.10: Minimize the admission of containers with capabilities assigned + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.11: Minimize the admission of containers with capabilities assigned + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 5.2.12: Minimize the admission of HostPath volumes + +**Severity:** MEDIUM + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.13: Minimize the admission of containers which use HostPorts + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +### 5.3. Network Policies and CNI + +#### 5.3.1: Ensure that the CNI in use supports Network Policies (Manual) + +**Severity:** MEDIUM + +**Result:** Manual check required + +--- + +#### 5.3.2: Ensure that all Namespaces have Network Policies defined + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +### 5.4. Secrets Management + +#### 5.4.1: Prefer using secrets as files over secrets as environment variables (Manual) + +**Severity:** MEDIUM + +**Result:** Manual check required + +--- + +#### 5.4.2: Consider external secret storage (Manual) + +**Severity:** MEDIUM + +**Result:** Manual check required + +--- + +### 5.5. Extensible Admission Control + +#### 5.5.1: Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) + +**Severity:** MEDIUM + +**Result:** Manual check required + +--- + +### 5.7. General Policies + +#### 5.7.1: Create administrative boundaries between resources using namespaces (Manual) + +**Severity:** MEDIUM + +**Result:** Manual check required + +--- + +#### 5.7.2: Ensure that the seccomp profile is set to docker/default in your pod definitions + +**Severity:** MEDIUM + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.7.3: Apply Security Context to Your Pods and Containers + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.7.4: The default namespace should not be used + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +## References + +[audit-logging]: {{< ref "../../../tutorials/creating-clusters-oidc/#audit-logging" >}} +[encryption-providers]: {{< ref "../../../guides/encryption-providers/" >}} +[oidc]: {{< ref "../../../tutorials/creating-clusters-oidc/" >}} +[anon-req]: +[eventratelimit]: +[securitycontextdeny]: diff --git a/content/kubeone/main/security/cis-benchmarking/kubeone1.11-k8s1.33/result.json b/content/kubeone/main/security/cis-benchmarking/kubeone1.11-k8s1.33/result.json new file mode 100644 index 000000000..608377bc1 --- /dev/null +++ b/content/kubeone/main/security/cis-benchmarking/kubeone1.11-k8s1.33/result.json @@ -0,0 +1,694 @@ +{ + "ID": "k8s-cis-1.23", + "Title": "CIS Kubernetes Benchmarks v1.23", + "SummaryControls": [ + { + "ID": "1.1.1", + "Name": "Ensure that the API server pod specification file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.2", + "Name": "Ensure that the API server pod specification file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.3", + "Name": "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.4", + "Name": "Ensure that the controller manager pod specification file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.5", + "Name": "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.6", + "Name": "Ensure that the scheduler pod specification file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.7", + "Name": "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.8", + "Name": "Ensure that the etcd pod specification file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.9", + "Name": "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.10", + "Name": "Ensure that the Container Network Interface file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.11", + "Name": "Ensure that the etcd data directory permissions are set to 700 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.12", + "Name": "Ensure that the etcd data directory ownership is set to etcd:etcd", + "Severity": "LOW", + "TotalFail": 3 + }, + { + "ID": "1.1.13", + "Name": "Ensure that the admin.conf file permissions are set to 600", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "1.1.14", + "Name": "Ensure that the admin.conf file ownership is set to root:root", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "1.1.15", + "Name": "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.16", + "Name": "Ensure that the scheduler.conf file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.17", + "Name": "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.18", + "Name": "Ensure that the controller-manager.conf file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.19", + "Name": "Ensure that the Kubernetes PKI directory and file ownership is set to root:root", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "1.1.20", + "Name": "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "1.1.21", + "Name": "Ensure that the Kubernetes PKI key file permissions are set to 600", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "1.2.1", + "Name": "Ensure that the --anonymous-auth argument is set to false", + "Severity": "MEDIUM", + "TotalFail": 3 + }, + { + "ID": "1.2.2", + "Name": "Ensure that the --token-auth-file parameter is not set", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.3", + "Name": "Ensure that the --DenyServiceExternalIPs is not set", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.4", + "Name": "Ensure that the --kubelet-https argument is set to true", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.5", + "Name": "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.2.6", + "Name": "Ensure that the --kubelet-certificate-authority argument is set as appropriate", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.2.7", + "Name": "Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.8", + "Name": "Ensure that the --authorization-mode argument includes Node", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.2.9", + "Name": "Ensure that the --authorization-mode argument includes RBAC", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.2.10", + "Name": "Ensure that the admission control plugin EventRateLimit is set", + "Severity": "HIGH", + "TotalFail": 3 + }, + { + "ID": "1.2.11", + "Name": "Ensure that the admission control plugin AlwaysAdmit is not set", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.12", + "Name": "Ensure that the admission control plugin AlwaysPullImages is set", + "Severity": "MEDIUM", + "TotalFail": 3 + }, + { + "ID": "1.2.13", + "Name": "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "1.2.14", + "Name": "Ensure that the admission control plugin ServiceAccount is set", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.15", + "Name": "Ensure that the admission control plugin NamespaceLifecycle is set", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.16", + "Name": "Ensure that the admission control plugin NodeRestriction is set", + "Severity": "LOW", + "TotalFail": 3 + }, + { + "ID": "1.2.17", + "Name": "Ensure that the --secure-port argument is not set to 0", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.2.18", + "Name": "Ensure that the --profiling argument is set to false", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.19", + "Name": "Ensure that the --audit-log-path argument is set", + "Severity": "LOW", + "TotalFail": 3 + }, + { + "ID": "1.2.20", + "Name": "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate", + "Severity": "LOW", + "TotalFail": 3 + }, + { + "ID": "1.2.21", + "Name": "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate", + "Severity": "LOW", + "TotalFail": 3 + }, + { + "ID": "1.2.22", + "Name": "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate", + "Severity": "LOW", + "TotalFail": 3 + }, + { + "ID": "1.2.24", + "Name": "Ensure that the --service-account-lookup argument is set to true", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.25", + "Name": "Ensure that the --service-account-key-file argument is set as appropriate", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.26", + "Name": "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.27", + "Name": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "1.2.28", + "Name": "Ensure that the --client-ca-file argument is set appropriate", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.29", + "Name": "Ensure that the --etcd-cafile argument is set as appropriate", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.30", + "Name": "Ensure that the --encryption-provider-config argument is set as appropriate", + "Severity": "LOW", + "TotalFail": 3 + }, + { + "ID": "1.3.1", + "Name": "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "1.3.3", + "Name": "Ensure that the --use-service-account-credentials argument is set to true", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "1.3.4", + "Name": "Ensure that the --service-account-private-key-file argument is set as appropriate", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "1.3.5", + "Name": "Ensure that the --root-ca-file argument is set as appropriate", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "1.3.6", + "Name": "Ensure that the RotateKubeletServerCertificate argument is set to true", + "Severity": "MEDIUM", + "TotalFail": 3 + }, + { + "ID": "1.3.7", + "Name": "Ensure that the --bind-address argument is set to 127.0.0.1", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.4.1", + "Name": "Ensure that the --profiling argument is set to false", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "1.4.2", + "Name": "Ensure that the --bind-address argument is set to 127.0.0.1", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "2.1", + "Name": "Ensure that the --cert-file and --key-file arguments are set as appropriate", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "2.2", + "Name": "Ensure that the --client-cert-auth argument is set to true", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "2.3", + "Name": "Ensure that the --auto-tls argument is not set to true", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "2.4", + "Name": "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "2.5", + "Name": "Ensure that the --peer-client-cert-auth argument is set to true", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "2.6", + "Name": "Ensure that the --peer-auto-tls argument is not set to true", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "3.1.1", + "Name": "Client certificate authentication should not be used for users (Manual)", + "Severity": "HIGH" + }, + { + "ID": "3.2.1", + "Name": "Ensure that a minimal audit policy is created (Manual)", + "Severity": "HIGH" + }, + { + "ID": "3.2.2", + "Name": "Ensure that the audit policy covers key security concerns (Manual)", + "Severity": "HIGH" + }, + { + "ID": "4.1.1", + "Name": "Ensure that the kubelet service file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 3 + }, + { + "ID": "4.1.2", + "Name": "Ensure that the kubelet service file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.1.3", + "Name": "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.1.4", + "Name": "If proxy kubeconfig file exists ensure ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.1.5", + "Name": "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 3 + }, + { + "ID": "4.1.6", + "Name": "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.1.7", + "Name": "Ensure that the certificate authorities file permissions are set to 600 or more restrictive", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "4.1.8", + "Name": "Ensure that the client certificate authorities file ownership is set to root:root", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "4.1.9", + "Name": "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.1.10", + "Name": "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.2.1", + "Name": "Ensure that the --anonymous-auth argument is set to false", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "4.2.2", + "Name": "Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "4.2.3", + "Name": "Ensure that the --client-ca-file argument is set as appropriate", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "4.2.4", + "Name": "Verify that the --read-only-port argument is set to 0", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.2.5", + "Name": "Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.2.6", + "Name": "Ensure that the --protect-kernel-defaults argument is set to true", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.2.7", + "Name": "Ensure that the --make-iptables-util-chains argument is set to true", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.2.8", + "Name": "Ensure that the --hostname-override argument is not set", + "Severity": "HIGH", + "TotalFail": 6 + }, + { + "ID": "4.2.9", + "Name": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.2.10", + "Name": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "Severity": "CRITICAL", + "TotalFail": 1 + }, + { + "ID": "4.2.11", + "Name": "Ensure that the --rotate-certificates argument is not set to false", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "4.2.12", + "Name": "Verify that the RotateKubeletServerCertificate argument is set to true", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "4.2.13", + "Name": "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "5.1.1", + "Name": "Ensure that the cluster-admin role is only used where required", + "Severity": "HIGH", + "TotalFail": 2 + }, + { + "ID": "5.1.2", + "Name": "Minimize access to secrets", + "Severity": "HIGH", + "TotalFail": 15 + }, + { + "ID": "5.1.3", + "Name": "Minimize wildcard use in Roles and ClusterRoles", + "Severity": "HIGH", + "TotalFail": 8 + }, + { + "ID": "5.1.6", + "Name": "Ensure that Service Account Tokens are only mounted where necessary", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "5.1.8", + "Name": "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "5.2.2", + "Name": "Minimize the admission of privileged containers", + "Severity": "HIGH", + "TotalFail": 8 + }, + { + "ID": "5.2.3", + "Name": "Minimize the admission of containers wishing to share the host process ID namespace", + "Severity": "HIGH", + "TotalFail": 3 + }, + { + "ID": "5.2.4", + "Name": "Minimize the admission of containers wishing to share the host IPC namespace", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "5.2.5", + "Name": "Minimize the admission of containers wishing to share the host network namespace", + "Severity": "HIGH", + "TotalFail": 15 + }, + { + "ID": "5.2.6", + "Name": "Minimize the admission of containers with allowPrivilegeEscalation", + "Severity": "HIGH", + "TotalFail": 31 + }, + { + "ID": "5.2.7", + "Name": "Minimize the admission of root containers", + "Severity": "MEDIUM", + "TotalFail": 35 + }, + { + "ID": "5.2.8", + "Name": "Minimize the admission of containers with the NET_RAW capability", + "Severity": "MEDIUM", + "TotalFail": 2 + }, + { + "ID": "5.2.9", + "Name": "Minimize the admission of containers with added capabilities", + "Severity": "LOW", + "TotalFail": 39 + }, + { + "ID": "5.2.10", + "Name": "Minimize the admission of containers with capabilities assigned", + "Severity": "LOW", + "TotalFail": 39 + }, + { + "ID": "5.2.11", + "Name": "Minimize the admission of containers with capabilities assigned", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "5.2.12", + "Name": "Minimize the admission of HostPath volumes", + "Severity": "MEDIUM", + "TotalFail": 18 + }, + { + "ID": "5.2.13", + "Name": "Minimize the admission of containers which use HostPorts", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "5.3.1", + "Name": "Ensure that the CNI in use supports Network Policies (Manual)", + "Severity": "MEDIUM" + }, + { + "ID": "5.3.2", + "Name": "Ensure that all Namespaces have Network Policies defined", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "5.4.1", + "Name": "Prefer using secrets as files over secrets as environment variables (Manual)", + "Severity": "MEDIUM" + }, + { + "ID": "5.4.2", + "Name": "Consider external secret storage (Manual)", + "Severity": "MEDIUM" + }, + { + "ID": "5.5.1", + "Name": "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)", + "Severity": "MEDIUM" + }, + { + "ID": "5.7.1", + "Name": "Create administrative boundaries between resources using namespaces (Manual)", + "Severity": "MEDIUM" + }, + { + "ID": "5.7.2", + "Name": "Ensure that the seccomp profile is set to docker/default in your pod definitions", + "Severity": "MEDIUM", + "TotalFail": 19 + }, + { + "ID": "5.7.3", + "Name": "Apply Security Context to Your Pods and Containers", + "Severity": "HIGH", + "TotalFail": 124 + }, + { + "ID": "5.7.4", + "Name": "The default namespace should not be used", + "Severity": "MEDIUM", + "TotalFail": 0 + } + ] +} diff --git a/content/kubeone/main/guides/cis-benchmarking/cis1.8-kubeone1.7-k8s1.27/_index.en.md b/content/kubeone/main/security/cis-benchmarking/kubeone1.7-k8s1.27/_index.en.md similarity index 100% rename from content/kubeone/main/guides/cis-benchmarking/cis1.8-kubeone1.7-k8s1.27/_index.en.md rename to content/kubeone/main/security/cis-benchmarking/kubeone1.7-k8s1.27/_index.en.md diff --git a/content/kubeone/main/guides/cis-benchmarking/cis1.8-kubeone1.8-k8s1.29/_index.en.md b/content/kubeone/main/security/cis-benchmarking/kubeone1.8-k8s1.29/_index.en.md similarity index 97% rename from content/kubeone/main/guides/cis-benchmarking/cis1.8-kubeone1.8-k8s1.29/_index.en.md rename to content/kubeone/main/security/cis-benchmarking/kubeone1.8-k8s1.29/_index.en.md index 47dbb97d9..3123adc14 100644 --- a/content/kubeone/main/guides/cis-benchmarking/cis1.8-kubeone1.8-k8s1.29/_index.en.md +++ b/content/kubeone/main/security/cis-benchmarking/kubeone1.8-k8s1.29/_index.en.md @@ -22,47 +22,57 @@ Each control in the CIS Kubernetes Benchmark was evaluated. These are the possib 🔴 **Fail:** The audit/control will be fixed in a future KubeOne release. ## Control Type: master + ### 1.1. Control Plane Node Configuration Files + #### 1.1.1: Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated) **Result:** 🟢 Pass --- + #### 1.1.2: Ensure that the API server pod specification file ownership is set to root:root (Automated) **Result:** 🟢 Pass --- + #### 1.1.3: Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated) **Result:** 🟢 Pass --- + #### 1.1.4: Ensure that the controller manager pod specification file ownership is set to root:root (Automated) **Result:** 🟢 Pass --- + #### 1.1.5: Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated) **Result:** 🟢 Pass --- + #### 1.1.6: Ensure that the scheduler pod specification file ownership is set to root:root (Automated) **Result:** 🟢 Pass --- + #### 1.1.7: Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated) **Result:** 🟢 Pass --- + #### 1.1.8: Ensure that the etcd pod specification file ownership is set to root:root (Automated) **Result:** 🟢 Pass --- + #### 1.1.9: Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual) **Result:** 🔴 Fail @@ -70,16 +80,19 @@ Each control in the CIS Kubernetes Benchmark was evaluated. These are the possib _The issue is under investigation to provide a fix in a future KubeOne release_ --- + #### 1.1.10: Ensure that the Container Network Interface file ownership is set to root:root (Manual) **Result:** 🟢 Pass --- + #### 1.1.11: Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) **Result:** 🟢 Pass --- + #### 1.1.12: Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) **Result:** 🟢 Pass @@ -87,52 +100,63 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** KubeOne runs etcd cluster as containers and there is not a `etcd` user and group on the system --- + #### 1.1.13: Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated) **Result:** 🟢 Pass --- + #### 1.1.14: Ensure that the admin.conf file ownership is set to root:root (Automated) **Result:** 🟢 Pass --- + #### 1.1.15: Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated) **Result:** 🟢 Pass --- + #### 1.1.16: Ensure that the scheduler.conf file ownership is set to root:root (Automated) **Result:** 🟢 Pass --- + #### 1.1.17: Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated) **Result:** 🟢 Pass --- + #### 1.1.18: Ensure that the controller-manager.conf file ownership is set to root:root (Automated) **Result:** 🟢 Pass --- + #### 1.1.19: Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) **Result:** 🟢 Pass --- + #### 1.1.20: Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual) **Result:** 🟢 Pass --- + #### 1.1.21: Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual) **Result:** 🟢 Pass --- + ### 1.2. API Server + #### 1.2.1: Ensure that the --anonymous-auth argument is set to false (Manual) **Result:** 🔴 Fail @@ -140,11 +164,13 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ _The issue is under investigation to provide a fix in a future KubeOne release_ --- + #### 1.2.2: Ensure that the --token-auth-file parameter is not set (Automated) **Result:** 🟢 Pass --- + #### 1.2.3: Ensure that the --DenyServiceExternalIPs is set (Manual) **Result:** 🔴 Fail @@ -152,31 +178,37 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ _The issue is under investigation to provide a fix in a future KubeOne release_ --- + #### 1.2.4: Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) **Result:** 🟢 Pass --- + #### 1.2.5: Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) **Result:** 🟢 Pass --- + #### 1.2.6: Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) **Result:** 🟢 Pass --- + #### 1.2.7: Ensure that the --authorization-mode argument includes Node (Automated) **Result:** 🟢 Pass --- + #### 1.2.8: Ensure that the --authorization-mode argument includes RBAC (Automated) **Result:** 🟢 Pass --- + #### 1.2.9: Ensure that the admission control plugin EventRateLimit is set (Manual) **Result:** 🔴 Fail @@ -184,11 +216,13 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** EventRateLimit admission control plugin in in Alpha state, please see [here][eventratelimit]. Supporting Alpha features is under consideration. --- + #### 1.2.10: Ensure that the admission control plugin AlwaysAdmit is not set (Automated) **Result:** 🟢 Pass --- + #### 1.2.11: Ensure that the admission control plugin AlwaysPullImages is set (Manual) **Result:** 🔴 Fail @@ -196,6 +230,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ _The issue is under investigation to provide a fix in a future KubeOne release_ --- + #### 1.2.12: Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) **Result:** 🔴 Fail @@ -203,16 +238,19 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** SecurityContextDeny admission control plugin is deprecated as of [Kubernetes 1.27][securitycontextdeny], hence it is not enabled. --- + #### 1.2.13: Ensure that the admission control plugin ServiceAccount is set (Automated) **Result:** 🟢 Pass --- + #### 1.2.14: Ensure that the admission control plugin NamespaceLifecycle is set (Automated) **Result:** 🟢 Pass --- + #### 1.2.15: Ensure that the admission control plugin NodeRestriction is set (Automated) **Result:** 🔴 Fail @@ -220,11 +258,13 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ _The issue is under investigation to provide a fix in a future KubeOne release_ --- + #### 1.2.16: Ensure that the --profiling argument is set to false (Automated) **Result:** 🟢 Pass --- + #### 1.2.17: Ensure that the --audit-log-path argument is set (Automated) **Result:** 🔵 Pass (Additional Configuration Required) @@ -232,6 +272,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** Audit logging is not enabled by default, it can be configured as described [here][audit-logging] --- + #### 1.2.18: Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) **Result:** 🔵 Pass (Additional Configuration Required) @@ -239,6 +280,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** Audit logging is not enabled by default, it can be configured as described [here][audit-logging] --- + #### 1.2.19: Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) **Result:** 🔵 Pass (Additional Configuration Required) @@ -246,6 +288,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** Audit logging is not enabled by default, it can be configured as described [here][audit-logging] --- + #### 1.2.20: Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) **Result:** 🔵 Pass (Additional Configuration Required) @@ -253,6 +296,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** Audit logging is not enabled by default, it can be configured as described [here][audit-logging] --- + #### 1.2.21: Ensure that the --request-timeout argument is set as appropriate (Manual) **Result:** 🟢 Pass @@ -260,36 +304,43 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** The timeout is set to 60 seconds by default. Setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack. --- + #### 1.2.22: Ensure that the --service-account-lookup argument is set to true (Automated) **Result:** 🟢 Pass --- + #### 1.2.23: Ensure that the --service-account-key-file argument is set as appropriate (Automated) **Result:** 🟢 Pass --- + #### 1.2.24: Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) **Result:** 🟢 Pass --- + #### 1.2.25: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) **Result:** 🟢 Pass --- + #### 1.2.26: Ensure that the --client-ca-file argument is set as appropriate (Automated) **Result:** 🟢 Pass --- + #### 1.2.27: Ensure that the --etcd-cafile argument is set as appropriate (Automated) **Result:** 🟢 Pass --- + #### 1.2.28: Ensure that the --encryption-provider-config argument is set as appropriate (Manual) **Result:** 🔵 Pass (Additional Configuration Required) @@ -297,6 +348,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** Encryption configuration can be enabled as described [here][encryption-providers] --- + #### 1.2.29: Ensure that encryption providers are appropriately configured (Manual) **Result:** 🔵 Pass (Additional Configuration Required) @@ -304,97 +356,121 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** Encryption configuration can be enabled as described [here][encryption-providers] --- + #### 1.2.30: Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual) **Result:** 🟢 Pass --- + ### 1.3. Controller Manager + #### 1.3.1: Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual) **Result:** 🟢 Pass --- + #### 1.3.2: Ensure that the --profiling argument is set to false (Automated) **Result:** 🟢 Pass --- + #### 1.3.3: Ensure that the --use-service-account-credentials argument is set to true (Automated) **Result:** 🟢 Pass --- + #### 1.3.4: Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) **Result:** 🟢 Pass --- + #### 1.3.5: Ensure that the --root-ca-file argument is set as appropriate (Automated) **Result:** 🟢 Pass --- + #### 1.3.6: Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) **Result:** 🟢 Pass --- + #### 1.3.7: Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) **Result:** 🟢 Pass --- + ### 1.4. Scheduler + #### 1.4.1: Ensure that the --profiling argument is set to false (Automated) **Result:** 🟢 Pass --- + #### 1.4.2: Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) **Result:** 🟢 Pass --- + ## Control Type: etcd + ### 2. Etcd Node Configuration + #### 2.1: Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) **Result:** 🟢 Pass --- + #### 2.2: Ensure that the --client-cert-auth argument is set to true (Automated) **Result:** 🟢 Pass --- + #### 2.3: Ensure that the --auto-tls argument is not set to true (Automated) **Result:** 🟢 Pass --- + #### 2.4: Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) **Result:** 🟢 Pass --- + #### 2.5: Ensure that the --peer-client-cert-auth argument is set to true (Automated) **Result:** 🟢 Pass --- + #### 2.6: Ensure that the --peer-auto-tls argument is not set to true (Automated) **Result:** 🟢 Pass --- + #### 2.7: Ensure that a unique Certificate Authority is used for etcd (Manual) **Result:** 🟢 Pass --- + ## Control Type: controlplane + ### 3.1. Authentication and Authorization + #### 3.1.1: Client certificate authentication should not be used for users (Manual) **Result:** 🔵 Pass (Additional Configuration Required) @@ -402,6 +478,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** KubeOne can be configured with OIDC authentication as described [here][oidc] --- + #### 3.1.2: Service account token authentication should not be used for users (Manual) **Result:** 🔵 Pass (Additional Configuration Required) @@ -409,6 +486,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** KubeOne can be configured with OIDC authentication as described [here][oidc] --- + #### 3.1.3: Bootstrap token authentication should not be used for users (Manual) **Result:** 🔵 Pass (Additional Configuration Required) @@ -416,7 +494,9 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** KubeOne can be configured with OIDC authentication as described [here][oidc] --- + ### 3.2. Logging + #### 3.2.1: Ensure that a minimal audit policy is created (Manual) **Result:** 🔵 Pass (Additional Configuration Required) @@ -424,6 +504,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** Audit logging is not enabled by default, it can be configured as described [here][audit-logging] --- + #### 3.2.2: Ensure that the audit policy covers key security concerns (Manual) **Result:** 🔵 Pass (Additional Configuration Required) @@ -431,18 +512,23 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** Audit logging is not enabled by default, it can be configured as described [here][audit-logging] --- + ## Control Type: node + ### 4.1. Worker Node Configuration Files + #### 4.1.1: Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated) **Result:** 🟢 Pass --- + #### 4.1.2: Ensure that the kubelet service file ownership is set to root:root (Automated) **Result:** 🟢 Pass --- + #### 4.1.3: If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual) **Result:** 🟢 Pass @@ -450,6 +536,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** KubeOne does not contain `/etc/kubernetes/proxy.conf` file --- + #### 4.1.4: If proxy kubeconfig file exists ensure ownership is set to root:root (Manual) **Result:** 🟢 Pass @@ -457,67 +544,81 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** KubeOne does not contain `/etc/kubernetes/proxy.conf` file --- + #### 4.1.5: Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated) **Result:** 🟢 Pass --- + #### 4.1.6: Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) **Result:** 🟢 Pass --- + #### 4.1.7: Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual) **Result:** 🟢 Pass --- + #### 4.1.8: Ensure that the client certificate authorities file ownership is set to root:root (Manual) **Result:** 🟢 Pass --- + #### 4.1.9: If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated) **Result:** 🟢 Pass --- + #### 4.1.10: If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated) **Result:** 🟢 Pass --- + ### 4.2. Kubelet + #### 4.2.1: Ensure that the --anonymous-auth argument is set to false (Automated) **Result:** 🟢 Pass --- + #### 4.2.2: Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) **Result:** 🟢 Pass --- + #### 4.2.3: Ensure that the --client-ca-file argument is set as appropriate (Automated) **Result:** 🟢 Pass --- + #### 4.2.4: Verify that the --read-only-port argument is set to 0 (Manual) **Result:** 🟢 Pass --- + #### 4.2.5: Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual) **Result:** 🟢 Pass --- + #### 4.2.6: Ensure that the --make-iptables-util-chains argument is set to true (Automated) **Result:** 🟢 Pass --- + #### 4.2.7: Ensure that the --hostname-override argument is not set (Manual) **Result:** 🔴 Fail @@ -525,11 +626,13 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ _The issue is under investigation to provide a fix in a future KubeOne release_ --- + #### 4.2.8: Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual) **Result:** 🟢 Pass --- + #### 4.2.9: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual) **Result:** 🟢 Pass @@ -537,16 +640,19 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ **Details:** `--tls-cert-file` and `--tls-private-key-file` options are provided to Kubelet --- + #### 4.2.10: Ensure that the --rotate-certificates argument is not set to false (Automated) **Result:** 🟢 Pass --- + #### 4.2.11: Verify that the RotateKubeletServerCertificate argument is set to true (Manual) **Result:** 🟢 Pass --- + #### 4.2.12: Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual) **Result:** 🔴 Fail @@ -554,6 +660,7 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ _The issue is under investigation to provide a fix in a future KubeOne release_ --- + #### 4.2.13: Ensure that a limit is set on pod PIDs (Manual) **Result:** 🔴 Fail @@ -565,6 +672,6 @@ _The issue is under investigation to provide a fix in a future KubeOne release_ [audit-logging]: {{< ref "../../../tutorials/creating-clusters-oidc/#audit-logging" >}} [encryption-providers]: {{< ref "../../../guides/encryption-providers/" >}} [oidc]: {{< ref "../../../tutorials/creating-clusters-oidc/" >}} -[anon-req]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#anonymous-requests -[eventratelimit]: https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#eventratelimit -[securitycontextdeny]: https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#securitycontextdeny +[anon-req]: +[eventratelimit]: +[securitycontextdeny]: diff --git a/content/kubeone/main/security/system-logs/_index.en.md b/content/kubeone/main/security/system-logs/_index.en.md new file mode 100644 index 000000000..b968e9fcb --- /dev/null +++ b/content/kubeone/main/security/system-logs/_index.en.md @@ -0,0 +1,136 @@ ++++ +title = "Personally Identifiable Information Analysis: Kubernetes and KubeOne System Logs" +date = 2024-03-06T12:00:00+02:00 +weight = 10 ++++ + +This document provides a comprehensive analysis of potential Personally Identifiable Information (PII) and personal data (indirect identifiers) that may be present in system logs from Kubernetes clusters deployed using KubeOne. + +**Target Audience**: Platform operators, security teams, compliance officers + +**Prerequisites**: Basic understanding of Kubernetes and KubeOne + +While KubeOne inherently tries to avoid logging any PII, there are some cases where it is unavoidable and outside the control of the platform operator. This could be a component that KubeOne ships or the underlying Kubernetes components. + +## PII Categories (GDPR-Aligned) + +System logs from Kubernetes clusters may contain the following types of PII: + +### Direct Identifiers + +* **Usernames**: Kubernetes usernames, system usernames, service account names +* **Email addresses**: From TLS certificate subjects (CN, O, OU), OIDC claims, audit logs, or user labels +* **IP addresses**: Client IPs + +### Indirect Identifiers + +* **Resource names**: Pod names, namespace names, deployment names containing user/org identifiers + * Example: `webapp-john-deployment`, `john-doe-dev` namespace +* **Hostnames**: Node hostnames with user or organizational patterns + * Example: `worker-john-prod-01.company.com` +* **Labels and annotations**: Custom metadata that may include user data + * Example: `owner=john.doe@company.com` +* **Volume paths**: Mount paths revealing directory structures with usernames + * Example: `/home/john/data:/data` + +### Cloud Provider Identifiers + +* **Account IDs**: AWS account IDs, Azure subscription IDs, GCP project IDs +* **Resource IDs**: Instance IDs, VPC IDs, volume IDs, subnet IDs, security group IDs +* **DNS names**: Load balancer DNS, instance DNS names +* **Geographic data**: Availability zones, regions + +### Operational Data That May Reveal personal data + +* **DNS queries**: Service/pod names in DNS lookups +* **HTTP/gRPC metadata**: URLs, headers, cookies (if Layer 7 visibility enabled in CNI) +* **Error messages**: Often contain detailed context with resource IDs and user identifiers +* **Audit logs**: Comprehensive request/response data including full user context + +## Risk Assessment Matrix + +| Component | User Identity | IP Addresses | Credentials | Cloud IDs | Risk Level | +|-----------|---------------|--------------|-------------|-----------|------------| +| kube-apiserver | ✅ High | ✅ High | ✅ High | ❌ No | 🔴 **HIGH** | +| kubelet | ⚠️ Medium | ✅ High | ✅ High | ❌ No | 🔴 **HIGH** | +| etcd | ✅ High | ⚠️ Medium | ✅ High | ❌ No | 🔴 **HIGH** | +| Cloud Controller Managers | ❌ No | ✅ High | ✅ High | ✅ High | 🔴 **HIGH** | +| CSI Drivers | ❌ No | ⚠️ Medium | ✅ High | ✅ High | 🔴 **HIGH** | +| Secrets Store CSI | ❌ No | ❌ No | ✅ High | ⚠️ Low | 🔴 **HIGH** | +| Cilium | ⚠️ Medium | ✅ High | ❌ No | ❌ No | 🟡 **MEDIUM-HIGH** | +| kube-controller-manager | ⚠️ Low | ⚠️ Medium | ⚠️ Medium | ⚠️ Medium | 🟡 **MEDIUM** | +| kube-scheduler | ⚠️ Low | ❌ No | ❌ No | ❌ No | 🟡 **MEDIUM** | +| kube-proxy | ❌ No | ✅ High | ❌ No | ❌ No | 🟡 **MEDIUM** | +| CoreDNS | ⚠️ Low | ⚠️ Medium | ❌ No | ❌ No | 🟡 **MEDIUM** | +| Canal | ❌ No | ✅ High | ❌ No | ❌ No | 🟡 **MEDIUM** | +| WeaveNet | ❌ No | ✅ High | ⚠️ Low | ❌ No | 🟡 **MEDIUM** | +| cluster-autoscaler | ⚠️ Low | ⚠️ Low | ⚠️ Low | ✅ High | 🟡 **MEDIUM** | +| NodeLocalDNS | ⚠️ Low | ⚠️ Medium | ❌ No | ❌ No | 🟡 **MEDIUM** | +| metrics-server | ⚠️ Low | ❌ No | ❌ No | ❌ No | 🟢 **LOW-MEDIUM** | +| machine-controller | ⚠️ Low | ❌ No | ⚠️ Low | ✅ High | 🟢 **LOW** | +| operating-system-manager | ⚠️ Low | ❌ No | ❌ No | ⚠️ Low | 🟢 **LOW** | + +**Legend**: + +* ✅ High: Frequent and detailed PII exposure +* ⚠️ Medium: Moderate PII exposure +* ❌ No: Minimal or no PII exposure + +### Understanding Risk Context + +While the risk matrix provides a helpful overview of potential PII exposure, it is important to note that the risk is not always proportional to the exposure. For example, a low-risk component may have high exposure if it is combined with a high-risk component. + +An example of this would be a component that logs a full Kubernetes resource in case of a validation failure. The Kubernetes resource itself may contain PII, and while the fields that might contain personal data are not directly being referred to in the logs, the full resource is being logged. This results in private data being exposed to the logs. It is always recommended to review and sanitize the logs before sharing them anywhere. + +## Log Filtering and Sanitization + +### Automated PII Filtering + +Implement automated filtering in your log aggregation pipeline to remove PII and personal data from the logs. + +#### Use external tools for PII Redaction + +* [Presidio](https://microsoft.github.io/presidio/) - A set of tools for data protection and privacy +* [Azure Purview](https://learn.microsoft.com/en-us/purview/information-protection) - A cloud-based data governance service that helps you manage and protect your sensitive data + +### Manual PII Filtering - Common patterns to filter + +```regex +# Email addresses +[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,} + +# IPv4 addresses +\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b + +# Basic Auth in URLs +https?://[^:]+:[^@]+@ +``` + +## Best Practices + +### Before sharing logs with Kubermatic Support + +1. Identify the time range needed (minimize data exposure) +2. Export only relevant namespaces/components +3. Run PII redaction tool or scripts +4. Manual review of first 100 lines to verify redaction +5. Approval from data protection officer (if required) + +## Conclusion + +### Key Points + +1. Kubernetes logs contain significant PII, especially from kube-apiserver, kubelet, etcd, and all cloud provider components +2. Higher log verbosity (v=4-5) dramatically increases PII exposure +3. Cloud provider account identifiers are prevalent in Cloud Controller Managers (CCMs) and CSI drivers +4. Automated filtering tools are essential for safe log sharing at scale +5. Manual review is still necessary to catch context-specific PII + +### Best Practice for Support + +## Additional Resources + +### GDPR and Privacy + +* [GDPR Official Text](https://gdpr-info.eu/) +* [Article 29 Working Party Opinion on Personal Data](https://ec.europa.eu/justice/article-29/documentation/opinion-recommendation/index_en.htm) diff --git a/content/kubeone/main/tutorials/_index.en.md b/content/kubeone/main/tutorials/_index.en.md index 6dca2a05e..203c45d2a 100644 --- a/content/kubeone/main/tutorials/_index.en.md +++ b/content/kubeone/main/tutorials/_index.en.md @@ -2,7 +2,7 @@ title = "Tutorials & How-tos" date = 2021-02-10T09:00:00+02:00 description = "Read and learn the functions and tasks you can perform in Kubermatic KubeOne" -weight = 3 +weight = 4 chapter = true +++ diff --git a/content/kubeone/main/tutorials/creating-clusters-baremetal/_index.en.md b/content/kubeone/main/tutorials/creating-clusters-baremetal/_index.en.md index 6ed6e8c72..0b12a00b2 100644 --- a/content/kubeone/main/tutorials/creating-clusters-baremetal/_index.en.md +++ b/content/kubeone/main/tutorials/creating-clusters-baremetal/_index.en.md @@ -135,8 +135,8 @@ The following infrastructure requirements **must** be satisfied to successfully provision a Kubernetes cluster using KubeOne: * You need the appropriate number of instances dedicated for the control plane - * You need **even** number of instances with a minimum of **three** instances - for the Highly-Available control plane + * You need an **odd** number of instances with a minimum of **three** instances + for the highly-available control plane * If you decide to use a single-node control plane instead, one instance is enough, however, highly-available control plane is highly advised, especially in the production environments @@ -222,7 +222,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: bm-cluster versions: - kubernetes: '1.29.4' + kubernetes: '1.34.1' cloudProvider: none: {} @@ -233,7 +233,7 @@ controlPlane: sshUsername: root sshPrivateKeyFile: '/home/me/.ssh/id_rsa' taints: - - key: "node-role.kubernetes.io/master" + - key: "node-role.kubernetes.io/control-plane" effect: "NoSchedule" staticWorkers: @@ -298,11 +298,11 @@ INFO[11:37:28 CEST] Determine operating system… INFO[11:37:30 CEST] Running host probes… The following actions will be taken: Run with --verbose flag for more information. - + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.20.4 - + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.20.4 - + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.20.4 - + join worker node "ip-172-31-223-103.eu-west-3.compute.internal" (172.31.223.103) using 1.20.4 - + join worker node "ip-172-31-224-178.eu-west-3.compute.internal" (172.31.224.178) using 1.20.4 + + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.34.1 + + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.34.1 + + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.34.1 + + join worker node "ip-172-31-223-103.eu-west-3.compute.internal" (172.31.223.103) using 1.34.1 + + join worker node "ip-172-31-224-178.eu-west-3.compute.internal" (172.31.224.178) using 1.34.1 Do you want to proceed (yes/no): ``` @@ -356,11 +356,11 @@ You should see output such as the following one. ``` NAME STATUS ROLES AGE VERSION -ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.20.4 -ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.20.4 -ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.20.4 -ip-172-31-223-103.eu-west-3.compute.internal Ready 38m v1.20.4 -ip-172-31-224-178.eu-west-3.compute.internal Ready 38m v1.20.4 +ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.34.1 +ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.34.1 +ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.34.1 +ip-172-31-223-103.eu-west-3.compute.internal Ready 38m v1.34.1 +ip-172-31-224-178.eu-west-3.compute.internal Ready 38m v1.34.1 ``` ## Conclusion diff --git a/content/kubeone/main/tutorials/creating-clusters-oidc/_index.en.md b/content/kubeone/main/tutorials/creating-clusters-oidc/_index.en.md index f23d89388..0444e5958 100644 --- a/content/kubeone/main/tutorials/creating-clusters-oidc/_index.en.md +++ b/content/kubeone/main/tutorials/creating-clusters-oidc/_index.en.md @@ -47,7 +47,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.34.1' cloudProvider: hetzner: {} @@ -331,7 +331,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.34.1' cloudProvider: hetzner: {} @@ -482,7 +482,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.34.1' cloudProvider: hetzner: {} diff --git a/content/kubeone/main/tutorials/creating-clusters/_index.en.md b/content/kubeone/main/tutorials/creating-clusters/_index.en.md index 2361fe2f1..699c92446 100644 --- a/content/kubeone/main/tutorials/creating-clusters/_index.en.md +++ b/content/kubeone/main/tutorials/creating-clusters/_index.en.md @@ -585,7 +585,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: aws: {} @@ -613,7 +613,7 @@ with your cluster name in the cloud-config example below. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: azure: {} external: true @@ -648,7 +648,7 @@ and fetches information about nodes from the API. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: digitalocean: {} external: true @@ -666,7 +666,7 @@ configs. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: gce: {} external: true @@ -697,7 +697,7 @@ The Hetzner CCM fetches information about nodes from the API. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: hetzner: {} external: true @@ -715,7 +715,7 @@ replace the placeholder values. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: nutanix: {} addons: @@ -745,7 +745,7 @@ cloud-config section.** apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: openstack: {} external: true @@ -767,7 +767,7 @@ cloudProvider: apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: openstack: {} external: true @@ -791,7 +791,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: vmwareCloudDirector: {} @@ -810,7 +810,7 @@ automatically by KubeOne.** apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: vsphere: {} external: true @@ -858,18 +858,18 @@ In the following table, you can find a list of supported Kubernetes version for latest KubeOne versions (you can run `kubeone version` to find the version that you're running). -| KubeOne \ Kubernetes | 1.33 | 1.32 | 1.31 | 1.30 | 1.29[^1] | +| KubeOne \ Kubernetes | 1.34 | 1.33 | 1.32 | 1.31 | 1.30[^1] | | -------------------- | ---- | ---- | ---- | -----| -------- | -| v1.11 | ✓ | ✓ | ✓ | - | - | -| v1.10 | - | ✓ | ✓ | ✓ | - | -| v1.9 | - | - | ✓ | ✓ | ✓ | +| v1.12 | ✓ | ✓ | ✓ | - | - | +| v1.11 | - | ✓ | ✓ | ✓ | - | +| v1.10 | - | - | ✓ | ✓ | ✓ | -[^1]: Kubernetes 1.29 has reached End-of-Life (EOL) and is not supported any longer. +[^1]: Kubernetes 1.30 has reached End-of-Life (EOL) and is not supported any longer. We strongly recommend upgrading to a newer supported Kubernetes release as soon as possible. We recommend using a Kubernetes release that's not older than one minor release -than the latest Kubernetes release. For example, with 1.33 being the latest -release, we recommend running at least Kubernetes 1.32. +than the latest Kubernetes release. For example, with 1.34 being the latest +release, we recommend running at least Kubernetes 1.33. Now, we're ready to provision the cluster! This is done by running the `kubeone apply` command and providing it the configuration manifest and the @@ -897,9 +897,9 @@ INFO[11:37:28 CEST] Determine operating system… INFO[11:37:30 CEST] Running host probes… The following actions will be taken: Run with --verbose flag for more information. - + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.20.4 - + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.20.4 - + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.20.4 + + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.34.1 + + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.34.1 + + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.34.1 + ensure machinedeployment "marko-1-eu-west-3a" with 1 replica(s) exists + ensure machinedeployment "marko-1-eu-west-3b" with 1 replica(s) exists + ensure machinedeployment "marko-1-eu-west-3c" with 1 replica(s) exists @@ -977,12 +977,12 @@ cluster. ``` NAME STATUS ROLES AGE VERSION -ip-172-31-220-166.eu-west-3.compute.internal Ready 38m v1.20.4 -ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.20.4 -ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.20.4 -ip-172-31-221-18.eu-west-3.compute.internal Ready 38m v1.20.4 -ip-172-31-222-211.eu-west-3.compute.internal Ready 38m v1.20.4 -ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.20.4 +ip-172-31-220-166.eu-west-3.compute.internal Ready 38m v1.34.1 +ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.34.1 +ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.34.1 +ip-172-31-221-18.eu-west-3.compute.internal Ready 38m v1.34.1 +ip-172-31-222-211.eu-west-3.compute.internal Ready 38m v1.34.1 +ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.34.1 ``` ## Conclusion diff --git a/content/kubeone/v1.10/architecture/operating-system-manager/usage/_index.en.md b/content/kubeone/v1.10/architecture/operating-system-manager/usage/_index.en.md index abeacc163..23b99a368 100644 --- a/content/kubeone/v1.10/architecture/operating-system-manager/usage/_index.en.md +++ b/content/kubeone/v1.10/architecture/operating-system-manager/usage/_index.en.md @@ -13,7 +13,7 @@ To fallback to legacy user-data from Machine Controller, we can disable OSM for apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.32.9 addons: enable: true operatingSystemManager: @@ -163,7 +163,7 @@ The variable `initial_machinedeployment_operating_system_profile` can also be co apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: "1.29.4" + kubernetes: "1.32.9" cloudProvider: aws: {} addons: diff --git a/content/kubeone/v1.10/examples/addons-calico-vxlan/_index.en.md b/content/kubeone/v1.10/examples/addons-calico-vxlan/_index.en.md index cdaf9d331..320ba12a3 100644 --- a/content/kubeone/v1.10/examples/addons-calico-vxlan/_index.en.md +++ b/content/kubeone/v1.10/examples/addons-calico-vxlan/_index.en.md @@ -13,7 +13,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.32.9 cloudProvider: aws: {} diff --git a/content/kubeone/v1.10/guides/addons/_index.en.md b/content/kubeone/v1.10/guides/addons/_index.en.md index b6e654c0e..8d1e79afa 100644 --- a/content/kubeone/v1.10/guides/addons/_index.en.md +++ b/content/kubeone/v1.10/guides/addons/_index.en.md @@ -64,7 +64,7 @@ the `addons` config: apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.32.9 cloudProvider: aws: {} # Addons are Kubernetes manifests to be deployed after provisioning the cluster @@ -113,7 +113,7 @@ Example: apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.32.9 addons: enable: true @@ -145,7 +145,7 @@ To delete embedded addon from the cluster, use the new `delete` field from the apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.32.9 addons: enable: true @@ -180,7 +180,7 @@ you can use it to override globally defined parameters. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.32.9 addons: enable: true diff --git a/content/kubeone/v1.10/guides/autoscaler-addon/_index.en.md b/content/kubeone/v1.10/guides/autoscaler-addon/_index.en.md index cc76f595c..6c9fb9f28 100644 --- a/content/kubeone/v1.10/guides/autoscaler-addon/_index.en.md +++ b/content/kubeone/v1.10/guides/autoscaler-addon/_index.en.md @@ -33,7 +33,7 @@ kubeone.yaml apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' ## kubernetes version + kubernetes: '1.32.9' ## kubernetes version cloudProvider: ## This field is sourced automatically if terraform is used for the cluster aws: {} addons: @@ -52,7 +52,7 @@ If you're running a cluster with nodes in the multiple zones for the HA purposes apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' ## kubernetes version + kubernetes: '1.32.9' ## kubernetes version cloudProvider: ## This field is sourced automatically if terraform is used for the cluster aws: {} addons: @@ -146,9 +146,9 @@ Run the following kubectl command to inspect the available Machinedeployments: ```bash $ kubectl get machinedeployments -n kube-system NAME REPLICAS AVAILABLE-REPLICAS PROVIDER OS KUBELET AGE -kb-cluster-eu-west-3a 1 1 aws ubuntu 1.20.4 10h -kb-cluster-eu-west-3b 1 1 aws ubuntu 1.20.4 10h -kb-cluster-eu-west-3c 1 1 aws ubuntu 1.20.4 10h +kb-cluster-eu-west-3a 1 1 aws ubuntu 1.32.9 10h +kb-cluster-eu-west-3b 1 1 aws ubuntu 1.32.9 10h +kb-cluster-eu-west-3c 1 1 aws ubuntu 1.32.9 10h ``` ### Step 2: Annotate Machinedeployments @@ -237,4 +237,4 @@ That is it! You have successfully deployed Kubernetes autoscaler on the KubeOne [step-5]: {{< ref "../../tutorials/creating-clusters/#step-5" >}} [embedded-addons]: {{< ref "../../guides/addons/#overriding-embedded-eddons" >}} [ca-faq]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md -[ca-faq-what-is]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-is-cluster-autoscaler \ No newline at end of file +[ca-faq-what-is]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-is-cluster-autoscaler diff --git a/content/kubeone/v1.10/guides/encryption-providers/_index.en.md b/content/kubeone/v1.10/guides/encryption-providers/_index.en.md index 09c42aed9..cb6519b7e 100644 --- a/content/kubeone/v1.10/guides/encryption-providers/_index.en.md +++ b/content/kubeone/v1.10/guides/encryption-providers/_index.en.md @@ -34,7 +34,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: k1-cluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' features: # enable encryption providers encryptionProviders: @@ -82,7 +82,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: k1-cluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' features: # enable encryption providers encryptionProviders: @@ -140,7 +140,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: k1-cluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' features: encryptionProviders: enable: true @@ -175,7 +175,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: kms-test versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' cloudProvider: aws: {} features: diff --git a/content/kubeone/v1.10/guides/mirror-registries/_index.en.md b/content/kubeone/v1.10/guides/mirror-registries/_index.en.md index 49f7a580f..4ff32c862 100644 --- a/content/kubeone/v1.10/guides/mirror-registries/_index.en.md +++ b/content/kubeone/v1.10/guides/mirror-registries/_index.en.md @@ -98,7 +98,7 @@ kubeone apply --manifest kubeone.yaml --credentials credentials.yaml docker.io registry introduced pretty low rate limits for unauthenticated requests. There are few workarounds: -* Buy docker subscribtion. +* Buy docker subscription. How to use docker.io credentials is covered in the [section above][using-credentials]. * Setup own pull-through caching proxy. * Use public pull-through caching proxy. diff --git a/content/kubeone/v1.10/guides/registry-configuration/_index.en.md b/content/kubeone/v1.10/guides/registry-configuration/_index.en.md index 2d93ff12a..90b35f607 100644 --- a/content/kubeone/v1.10/guides/registry-configuration/_index.en.md +++ b/content/kubeone/v1.10/guides/registry-configuration/_index.en.md @@ -62,7 +62,7 @@ to use (without the `v` prefix), as well as, replace the `TARGET_REGISTRY` with the address to your image registry. ``` -KUBERNETES_VERSION=1.29.4 TARGET_REGISTRY=127.0.0.1:5000 ./image-loader.sh +KUBERNETES_VERSION=1.32.9 TARGET_REGISTRY=127.0.0.1:5000 ./image-loader.sh ``` The preloading process can take a several minutes, depending on your @@ -77,7 +77,7 @@ stanza to your KubeOne configuration file, such as: apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.32.9 cloudProvider: aws: {} registryConfiguration: diff --git a/content/kubeone/v1.10/tutorials/creating-clusters-baremetal/_index.en.md b/content/kubeone/v1.10/tutorials/creating-clusters-baremetal/_index.en.md index 35a887a5b..3a9b1497e 100644 --- a/content/kubeone/v1.10/tutorials/creating-clusters-baremetal/_index.en.md +++ b/content/kubeone/v1.10/tutorials/creating-clusters-baremetal/_index.en.md @@ -222,7 +222,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: bm-cluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' cloudProvider: none: {} @@ -297,11 +297,11 @@ INFO[11:37:28 CEST] Determine operating system… INFO[11:37:30 CEST] Running host probes… The following actions will be taken: Run with --verbose flag for more information. - + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.20.4 - + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.20.4 - + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.20.4 - + join worker node "ip-172-31-223-103.eu-west-3.compute.internal" (172.31.223.103) using 1.20.4 - + join worker node "ip-172-31-224-178.eu-west-3.compute.internal" (172.31.224.178) using 1.20.4 + + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.32.9 + + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.32.9 + + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.32.9 + + join worker node "ip-172-31-223-103.eu-west-3.compute.internal" (172.31.223.103) using 1.32.9 + + join worker node "ip-172-31-224-178.eu-west-3.compute.internal" (172.31.224.178) using 1.32.9 Do you want to proceed (yes/no): ``` @@ -355,11 +355,11 @@ You should see output such as the following one. ``` NAME STATUS ROLES AGE VERSION -ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.20.4 -ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.20.4 -ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.20.4 -ip-172-31-223-103.eu-west-3.compute.internal Ready 38m v1.20.4 -ip-172-31-224-178.eu-west-3.compute.internal Ready 38m v1.20.4 +ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.32.9 +ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.32.9 +ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.32.9 +ip-172-31-223-103.eu-west-3.compute.internal Ready 38m v1.32.9 +ip-172-31-224-178.eu-west-3.compute.internal Ready 38m v1.32.9 ``` ## Conclusion diff --git a/content/kubeone/v1.10/tutorials/creating-clusters-oidc/_index.en.md b/content/kubeone/v1.10/tutorials/creating-clusters-oidc/_index.en.md index f23d89388..97b45a83e 100644 --- a/content/kubeone/v1.10/tutorials/creating-clusters-oidc/_index.en.md +++ b/content/kubeone/v1.10/tutorials/creating-clusters-oidc/_index.en.md @@ -47,7 +47,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' cloudProvider: hetzner: {} @@ -331,7 +331,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' cloudProvider: hetzner: {} @@ -482,7 +482,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' cloudProvider: hetzner: {} diff --git a/content/kubeone/v1.10/tutorials/creating-clusters/_index.en.md b/content/kubeone/v1.10/tutorials/creating-clusters/_index.en.md index 02a7602eb..93ceb3cb4 100644 --- a/content/kubeone/v1.10/tutorials/creating-clusters/_index.en.md +++ b/content/kubeone/v1.10/tutorials/creating-clusters/_index.en.md @@ -615,7 +615,7 @@ supported provider. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' cloudProvider: aws: {} external: true @@ -642,7 +642,7 @@ with your cluster name in the cloud-config example below. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' cloudProvider: azure: {} external: true @@ -677,7 +677,7 @@ and fetches information about nodes from the API. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' cloudProvider: digitalocean: {} external: true @@ -695,7 +695,7 @@ configs. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' cloudProvider: gce: {} external: true @@ -726,7 +726,7 @@ The Hetzner CCM fetches information about nodes from the API. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' cloudProvider: hetzner: {} external: true @@ -744,7 +744,7 @@ replace the placeholder values. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' cloudProvider: nutanix: {} addons: @@ -774,7 +774,7 @@ cloud-config section.** apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' cloudProvider: openstack: {} external: true @@ -796,7 +796,7 @@ cloudProvider: apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' cloudProvider: openstack: {} external: true @@ -824,7 +824,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' cloudProvider: equinixmetal: {} @@ -845,7 +845,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' cloudProvider: vmwareCloudDirector: {} @@ -864,7 +864,7 @@ automatically by KubeOne.** apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.32.9' cloudProvider: vsphere: {} external: true @@ -950,9 +950,9 @@ INFO[11:37:28 CEST] Determine operating system… INFO[11:37:30 CEST] Running host probes… The following actions will be taken: Run with --verbose flag for more information. - + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.20.4 - + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.20.4 - + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.20.4 + + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.32.9 + + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.32.9 + + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.32.9 + ensure machinedeployment "marko-1-eu-west-3a" with 1 replica(s) exists + ensure machinedeployment "marko-1-eu-west-3b" with 1 replica(s) exists + ensure machinedeployment "marko-1-eu-west-3c" with 1 replica(s) exists @@ -1030,12 +1030,12 @@ cluster. ``` NAME STATUS ROLES AGE VERSION -ip-172-31-220-166.eu-west-3.compute.internal Ready 38m v1.20.4 -ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.20.4 -ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.20.4 -ip-172-31-221-18.eu-west-3.compute.internal Ready 38m v1.20.4 -ip-172-31-222-211.eu-west-3.compute.internal Ready 38m v1.20.4 -ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.20.4 +ip-172-31-220-166.eu-west-3.compute.internal Ready 38m v1.32.9 +ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.32.9 +ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.32.9 +ip-172-31-221-18.eu-west-3.compute.internal Ready 38m v1.32.9 +ip-172-31-222-211.eu-west-3.compute.internal Ready 38m v1.32.9 +ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.32.9 ``` ## Conclusion diff --git a/content/kubeone/v1.11/architecture/operating-system-manager/usage/_index.en.md b/content/kubeone/v1.11/architecture/operating-system-manager/usage/_index.en.md index abeacc163..627110b1a 100644 --- a/content/kubeone/v1.11/architecture/operating-system-manager/usage/_index.en.md +++ b/content/kubeone/v1.11/architecture/operating-system-manager/usage/_index.en.md @@ -13,7 +13,7 @@ To fallback to legacy user-data from Machine Controller, we can disable OSM for apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.33.5 addons: enable: true operatingSystemManager: @@ -163,7 +163,7 @@ The variable `initial_machinedeployment_operating_system_profile` can also be co apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: "1.29.4" + kubernetes: "1.33.5" cloudProvider: aws: {} addons: diff --git a/content/kubeone/v1.11/guides/addons/_index.en.md b/content/kubeone/v1.11/guides/addons/_index.en.md index 725b59df0..5f08591f1 100644 --- a/content/kubeone/v1.11/guides/addons/_index.en.md +++ b/content/kubeone/v1.11/guides/addons/_index.en.md @@ -64,7 +64,7 @@ the `addons` config: apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.33.5 cloudProvider: aws: {} # Addons are Kubernetes manifests to be deployed after provisioning the cluster @@ -113,7 +113,7 @@ Example: apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.33.5 addons: enable: true @@ -145,7 +145,7 @@ To delete embedded addon from the cluster, use the new `delete` field from the apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.33.5 addons: enable: true @@ -180,7 +180,7 @@ you can use it to override globally defined parameters. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.33.5 addons: enable: true diff --git a/content/kubeone/v1.11/guides/autoscaler-addon/_index.en.md b/content/kubeone/v1.11/guides/autoscaler-addon/_index.en.md index cc76f595c..7521eabcd 100644 --- a/content/kubeone/v1.11/guides/autoscaler-addon/_index.en.md +++ b/content/kubeone/v1.11/guides/autoscaler-addon/_index.en.md @@ -33,7 +33,7 @@ kubeone.yaml apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' ## kubernetes version + kubernetes: '1.33.5' ## kubernetes version cloudProvider: ## This field is sourced automatically if terraform is used for the cluster aws: {} addons: @@ -52,7 +52,7 @@ If you're running a cluster with nodes in the multiple zones for the HA purposes apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' ## kubernetes version + kubernetes: '1.33.5' ## kubernetes version cloudProvider: ## This field is sourced automatically if terraform is used for the cluster aws: {} addons: @@ -146,9 +146,9 @@ Run the following kubectl command to inspect the available Machinedeployments: ```bash $ kubectl get machinedeployments -n kube-system NAME REPLICAS AVAILABLE-REPLICAS PROVIDER OS KUBELET AGE -kb-cluster-eu-west-3a 1 1 aws ubuntu 1.20.4 10h -kb-cluster-eu-west-3b 1 1 aws ubuntu 1.20.4 10h -kb-cluster-eu-west-3c 1 1 aws ubuntu 1.20.4 10h +kb-cluster-eu-west-3a 1 1 aws ubuntu 1.33.5 10h +kb-cluster-eu-west-3b 1 1 aws ubuntu 1.33.5 10h +kb-cluster-eu-west-3c 1 1 aws ubuntu 1.33.5 10h ``` ### Step 2: Annotate Machinedeployments @@ -237,4 +237,4 @@ That is it! You have successfully deployed Kubernetes autoscaler on the KubeOne [step-5]: {{< ref "../../tutorials/creating-clusters/#step-5" >}} [embedded-addons]: {{< ref "../../guides/addons/#overriding-embedded-eddons" >}} [ca-faq]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md -[ca-faq-what-is]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-is-cluster-autoscaler \ No newline at end of file +[ca-faq-what-is]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-is-cluster-autoscaler diff --git a/content/kubeone/v1.11/guides/cis-benchmarking/cis1.23-kubeone1.11-k8s1.33/_index.en.md b/content/kubeone/v1.11/guides/cis-benchmarking/cis1.23-kubeone1.11-k8s1.33/_index.en.md new file mode 100644 index 000000000..c5a99d2c7 --- /dev/null +++ b/content/kubeone/v1.11/guides/cis-benchmarking/cis1.23-kubeone1.11-k8s1.33/_index.en.md @@ -0,0 +1,1067 @@ ++++ +title = "Benchmark on Kubernetes 1.33 with KubeOne 1.11.2" +date = 2025-09-19T16:39:34+02:00 ++++ + +This guide helps you evaluate the security of a Kubernetes cluster created using KubeOne against each control in the CIS Kubernetes Benchmark. + +This guide corresponds to the following versions of KubeOne, CIS Benchmarks, and Kubernetes: + +| KubeOne Version | Kubernetes Version | CIS Benchmark Version | +| ---------------- | ------------------ | --------------------- | +| 1.11.2 | 1.33.4 | CIS-1.23 | + +## Testing Methodology + +### Running the Benchmark + +[Trivy](https://github.com/aquasecurity/trivy) was used to run the benchmark. + +```bash +trivy k8s --compliance=k8s-cis-1.23 --report summary --timeout=1h --tolerations node-role.kubernetes.io/control-plane="":NoSchedule +``` + +### Results + +Summary Report for compliance: CIS Kubernetes Benchmarks v1.23 + +Each control in the CIS Kubernetes Benchmark was evaluated. These are the possible results for each control: + +🟢 **Pass:** The cluster passes the audit/control outlined in the benchmark. + +🔵 **Pass (Additional Configuration Required):** The cluster passes the audit/control outlined in the benchmark with some extra configuration. The documentation is provided. + +🔴 **Fail:** The audit/control will be fixed in a future KubeOne release. + +## Control Type: Control Plane Components + +### 1.1. Control Plane Node Configuration Files + +#### 1.1.1: Ensure that the API server pod specification file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.2: Ensure that the API server pod specification file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.3: Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.4: Ensure that the controller manager pod specification file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.5: Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.6: Ensure that the scheduler pod specification file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.7: Ensure that the etcd pod specification file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.8: Ensure that the etcd pod specification file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.9: Ensure that the Container Network Interface file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.10: Ensure that the Container Network Interface file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.11: Ensure that the etcd data directory permissions are set to 700 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.12: Ensure that the etcd data directory ownership is set to etcd:etcd + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.1.13: Ensure that the admin.conf file permissions are set to 600 + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 1.1.14: Ensure that the admin.conf file ownership is set to root:root + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 1.1.15: Ensure that the scheduler.conf file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.16: Ensure that the scheduler.conf file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.17: Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.18: Ensure that the controller-manager.conf file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.1.19: Ensure that the Kubernetes PKI directory and file ownership is set to root:root + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 1.1.20: Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 1.1.21: Ensure that the Kubernetes PKI key file permissions are set to 600 + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +### 1.2. API Server + +#### 1.2.1: Ensure that the --anonymous-auth argument is set to false + +**Severity:** MEDIUM + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.2.2: Ensure that the --token-auth-file parameter is not set + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.3: Ensure that the --DenyServiceExternalIPs is not set + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.4: Ensure that the --kubelet-https argument is set to true + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.5: Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.2.6: Ensure that the --kubelet-certificate-authority argument is set as appropriate + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.2.7: Ensure that the --authorization-mode argument is not set to AlwaysAllow + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.8: Ensure that the --authorization-mode argument includes Node + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.2.9: Ensure that the --authorization-mode argument includes RBAC + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.2.10: Ensure that the admission control plugin EventRateLimit is set + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.2.11: Ensure that the admission control plugin AlwaysAdmit is not set + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.12: Ensure that the admission control plugin AlwaysPullImages is set + +**Severity:** MEDIUM + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.2.13: Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 1.2.14: Ensure that the admission control plugin ServiceAccount is set + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.15: Ensure that the admission control plugin NamespaceLifecycle is set + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.16: Ensure that the admission control plugin NodeRestriction is set + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.2.17: Ensure that the --secure-port argument is not set to 0 + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 1.2.18: Ensure that the --profiling argument is set to false + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.19: Ensure that the --audit-log-path argument is set + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.2.20: Ensure that the --audit-log-maxage argument is set to 30 or as appropriate + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.2.21: Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.2.22: Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.2.24: Ensure that the --service-account-lookup argument is set to true + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.25: Ensure that the --service-account-key-file argument is set as appropriate + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.26: Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.27: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 1.2.28: Ensure that the --client-ca-file argument is set appropriate + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.29: Ensure that the --etcd-cafile argument is set as appropriate + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +#### 1.2.30: Ensure that the --encryption-provider-config argument is set as appropriate + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +### 1.3. Controller Manager + +#### 1.3.1: Ensure that the --terminated-pod-gc-threshold argument is set as appropriate + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 1.3.3: Ensure that the --use-service-account-credentials argument is set to true + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 1.3.4: Ensure that the --service-account-private-key-file argument is set as appropriate + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 1.3.5: Ensure that the --root-ca-file argument is set as appropriate + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 1.3.6: Ensure that the RotateKubeletServerCertificate argument is set to true + +**Severity:** MEDIUM + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 1.3.7: Ensure that the --bind-address argument is set to 127.0.0.1 + +**Severity:** LOW + +**Result:** 🟢 Pass + +--- + +### 1.4. Scheduler + +#### 1.4.1: Ensure that the --profiling argument is set to false + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 1.4.2: Ensure that the --bind-address argument is set to 127.0.0.1 + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +## Control Type: Etcd + +#### 2.1: Ensure that the --cert-file and --key-file arguments are set as appropriate + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 2.2: Ensure that the --client-cert-auth argument is set to true + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 2.3: Ensure that the --auto-tls argument is not set to true + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 2.4: Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 2.5: Ensure that the --peer-client-cert-auth argument is set to true + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 2.6: Ensure that the --peer-auto-tls argument is not set to true + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +## Control Type: Control Plane Configuration + +### 3.1. Authentication and Authorization + +#### 3.1.1: Client certificate authentication should not be used for users (Manual) + +**Severity:** HIGH + +**Result:** Manual check required + +--- + +### 3.2. Logging + +#### 3.2.1: Ensure that a minimal audit policy is created (Manual) + +**Severity:** HIGH + +**Result:** Manual check required + +--- + +#### 3.2.2: Ensure that the audit policy covers key security concerns (Manual) + +**Severity:** HIGH + +**Result:** Manual check required + +--- + +## Control Type: Worker Nodes + +### 4.1. Worker Node Configuration Files + +#### 4.1.1: Ensure that the kubelet service file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 4.1.2: Ensure that the kubelet service file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.1.3: If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.1.4: If proxy kubeconfig file exists ensure ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.1.5: Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 4.1.6: Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.1.7: Ensure that the certificate authorities file permissions are set to 600 or more restrictive + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 4.1.8: Ensure that the client certificate authorities file ownership is set to root:root + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 4.1.9: If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.1.10: If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +### 4.2. Kubelet + +#### 4.2.1: Ensure that the --anonymous-auth argument is set to false + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 4.2.2: Ensure that the --authorization-mode argument is not set to AlwaysAllow + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 4.2.3: Ensure that the --client-ca-file argument is set as appropriate + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 4.2.4: Verify that the --read-only-port argument is set to 0 + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.2.5: Ensure that the --streaming-connection-idle-timeout argument is not set to 0 + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.2.6: Ensure that the --protect-kernel-defaults argument is set to true + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.2.7: Ensure that the --make-iptables-util-chains argument is set to true + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.2.8: Ensure that the --hostname-override argument is not set + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 4.2.9: Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 4.2.10: Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate + +**Severity:** CRITICAL + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 4.2.11: Ensure that the --rotate-certificates argument is not set to false + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 4.2.12: Verify that the RotateKubeletServerCertificate argument is set to true + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +#### 4.2.13: Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers + +**Severity:** CRITICAL + +**Result:** 🟢 Pass + +--- + +## Control Type: Policies + +### 5.1. RBAC and Service Accounts + +#### 5.1.1: Ensure that the cluster-admin role is only used where required + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.1.2: Minimize access to secrets + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.1.3: Minimize wildcard use in Roles and ClusterRoles + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.1.6: Ensure that Service Account Tokens are only mounted where necessary + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 5.1.8: Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +### 5.2. Pod Security Standards + +#### 5.2.2: Minimize the admission of privileged containers + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.3: Minimize the admission of containers wishing to share the host process ID namespace + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.4: Minimize the admission of containers wishing to share the host IPC namespace + +**Severity:** HIGH + +**Result:** 🟢 Pass + +--- + +#### 5.2.5: Minimize the admission of containers wishing to share the host network namespace + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.6: Minimize the admission of containers with allowPrivilegeEscalation + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.7: Minimize the admission of root containers + +**Severity:** MEDIUM + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.8: Minimize the admission of containers with the NET_RAW capability + +**Severity:** MEDIUM + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.9: Minimize the admission of containers with added capabilities + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.10: Minimize the admission of containers with capabilities assigned + +**Severity:** LOW + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.11: Minimize the admission of containers with capabilities assigned + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +#### 5.2.12: Minimize the admission of HostPath volumes + +**Severity:** MEDIUM + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.2.13: Minimize the admission of containers which use HostPorts + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +### 5.3. Network Policies and CNI + +#### 5.3.1: Ensure that the CNI in use supports Network Policies (Manual) + +**Severity:** MEDIUM + +**Result:** Manual check required + +--- + +#### 5.3.2: Ensure that all Namespaces have Network Policies defined + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +### 5.4. Secrets Management + +#### 5.4.1: Prefer using secrets as files over secrets as environment variables (Manual) + +**Severity:** MEDIUM + +**Result:** Manual check required + +--- + +#### 5.4.2: Consider external secret storage (Manual) + +**Severity:** MEDIUM + +**Result:** Manual check required + +--- + +### 5.5. Extensible Admission Control + +#### 5.5.1: Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) + +**Severity:** MEDIUM + +**Result:** Manual check required + +--- + +### 5.7. General Policies + +#### 5.7.1: Create administrative boundaries between resources using namespaces (Manual) + +**Severity:** MEDIUM + +**Result:** Manual check required + +--- + +#### 5.7.2: Ensure that the seccomp profile is set to docker/default in your pod definitions + +**Severity:** MEDIUM + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.7.3: Apply Security Context to Your Pods and Containers + +**Severity:** HIGH + +**Result:** 🔴 Fail + +_The issue is under investigation to provide a fix in a future KubeOne release_ + +--- + +#### 5.7.4: The default namespace should not be used + +**Severity:** MEDIUM + +**Result:** 🟢 Pass + +--- + +## References + +[audit-logging]: {{< ref "../../../tutorials/creating-clusters-oidc/#audit-logging" >}} +[encryption-providers]: {{< ref "../../../guides/encryption-providers/" >}} +[oidc]: {{< ref "../../../tutorials/creating-clusters-oidc/" >}} +[anon-req]: +[eventratelimit]: +[securitycontextdeny]: diff --git a/content/kubeone/v1.11/guides/cis-benchmarking/cis1.23-kubeone1.11-k8s1.33/result.json b/content/kubeone/v1.11/guides/cis-benchmarking/cis1.23-kubeone1.11-k8s1.33/result.json new file mode 100644 index 000000000..608377bc1 --- /dev/null +++ b/content/kubeone/v1.11/guides/cis-benchmarking/cis1.23-kubeone1.11-k8s1.33/result.json @@ -0,0 +1,694 @@ +{ + "ID": "k8s-cis-1.23", + "Title": "CIS Kubernetes Benchmarks v1.23", + "SummaryControls": [ + { + "ID": "1.1.1", + "Name": "Ensure that the API server pod specification file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.2", + "Name": "Ensure that the API server pod specification file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.3", + "Name": "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.4", + "Name": "Ensure that the controller manager pod specification file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.5", + "Name": "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.6", + "Name": "Ensure that the scheduler pod specification file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.7", + "Name": "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.8", + "Name": "Ensure that the etcd pod specification file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.9", + "Name": "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.10", + "Name": "Ensure that the Container Network Interface file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.11", + "Name": "Ensure that the etcd data directory permissions are set to 700 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.12", + "Name": "Ensure that the etcd data directory ownership is set to etcd:etcd", + "Severity": "LOW", + "TotalFail": 3 + }, + { + "ID": "1.1.13", + "Name": "Ensure that the admin.conf file permissions are set to 600", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "1.1.14", + "Name": "Ensure that the admin.conf file ownership is set to root:root", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "1.1.15", + "Name": "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.16", + "Name": "Ensure that the scheduler.conf file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.17", + "Name": "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.18", + "Name": "Ensure that the controller-manager.conf file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.1.19", + "Name": "Ensure that the Kubernetes PKI directory and file ownership is set to root:root", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "1.1.20", + "Name": "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "1.1.21", + "Name": "Ensure that the Kubernetes PKI key file permissions are set to 600", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "1.2.1", + "Name": "Ensure that the --anonymous-auth argument is set to false", + "Severity": "MEDIUM", + "TotalFail": 3 + }, + { + "ID": "1.2.2", + "Name": "Ensure that the --token-auth-file parameter is not set", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.3", + "Name": "Ensure that the --DenyServiceExternalIPs is not set", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.4", + "Name": "Ensure that the --kubelet-https argument is set to true", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.5", + "Name": "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.2.6", + "Name": "Ensure that the --kubelet-certificate-authority argument is set as appropriate", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.2.7", + "Name": "Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.8", + "Name": "Ensure that the --authorization-mode argument includes Node", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.2.9", + "Name": "Ensure that the --authorization-mode argument includes RBAC", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.2.10", + "Name": "Ensure that the admission control plugin EventRateLimit is set", + "Severity": "HIGH", + "TotalFail": 3 + }, + { + "ID": "1.2.11", + "Name": "Ensure that the admission control plugin AlwaysAdmit is not set", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.12", + "Name": "Ensure that the admission control plugin AlwaysPullImages is set", + "Severity": "MEDIUM", + "TotalFail": 3 + }, + { + "ID": "1.2.13", + "Name": "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "1.2.14", + "Name": "Ensure that the admission control plugin ServiceAccount is set", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.15", + "Name": "Ensure that the admission control plugin NamespaceLifecycle is set", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.16", + "Name": "Ensure that the admission control plugin NodeRestriction is set", + "Severity": "LOW", + "TotalFail": 3 + }, + { + "ID": "1.2.17", + "Name": "Ensure that the --secure-port argument is not set to 0", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "1.2.18", + "Name": "Ensure that the --profiling argument is set to false", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.19", + "Name": "Ensure that the --audit-log-path argument is set", + "Severity": "LOW", + "TotalFail": 3 + }, + { + "ID": "1.2.20", + "Name": "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate", + "Severity": "LOW", + "TotalFail": 3 + }, + { + "ID": "1.2.21", + "Name": "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate", + "Severity": "LOW", + "TotalFail": 3 + }, + { + "ID": "1.2.22", + "Name": "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate", + "Severity": "LOW", + "TotalFail": 3 + }, + { + "ID": "1.2.24", + "Name": "Ensure that the --service-account-lookup argument is set to true", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.25", + "Name": "Ensure that the --service-account-key-file argument is set as appropriate", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.26", + "Name": "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.27", + "Name": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "1.2.28", + "Name": "Ensure that the --client-ca-file argument is set appropriate", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.29", + "Name": "Ensure that the --etcd-cafile argument is set as appropriate", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.2.30", + "Name": "Ensure that the --encryption-provider-config argument is set as appropriate", + "Severity": "LOW", + "TotalFail": 3 + }, + { + "ID": "1.3.1", + "Name": "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "1.3.3", + "Name": "Ensure that the --use-service-account-credentials argument is set to true", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "1.3.4", + "Name": "Ensure that the --service-account-private-key-file argument is set as appropriate", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "1.3.5", + "Name": "Ensure that the --root-ca-file argument is set as appropriate", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "1.3.6", + "Name": "Ensure that the RotateKubeletServerCertificate argument is set to true", + "Severity": "MEDIUM", + "TotalFail": 3 + }, + { + "ID": "1.3.7", + "Name": "Ensure that the --bind-address argument is set to 127.0.0.1", + "Severity": "LOW", + "TotalFail": 0 + }, + { + "ID": "1.4.1", + "Name": "Ensure that the --profiling argument is set to false", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "1.4.2", + "Name": "Ensure that the --bind-address argument is set to 127.0.0.1", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "2.1", + "Name": "Ensure that the --cert-file and --key-file arguments are set as appropriate", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "2.2", + "Name": "Ensure that the --client-cert-auth argument is set to true", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "2.3", + "Name": "Ensure that the --auto-tls argument is not set to true", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "2.4", + "Name": "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "2.5", + "Name": "Ensure that the --peer-client-cert-auth argument is set to true", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "2.6", + "Name": "Ensure that the --peer-auto-tls argument is not set to true", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "3.1.1", + "Name": "Client certificate authentication should not be used for users (Manual)", + "Severity": "HIGH" + }, + { + "ID": "3.2.1", + "Name": "Ensure that a minimal audit policy is created (Manual)", + "Severity": "HIGH" + }, + { + "ID": "3.2.2", + "Name": "Ensure that the audit policy covers key security concerns (Manual)", + "Severity": "HIGH" + }, + { + "ID": "4.1.1", + "Name": "Ensure that the kubelet service file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 3 + }, + { + "ID": "4.1.2", + "Name": "Ensure that the kubelet service file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.1.3", + "Name": "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.1.4", + "Name": "If proxy kubeconfig file exists ensure ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.1.5", + "Name": "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 3 + }, + { + "ID": "4.1.6", + "Name": "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.1.7", + "Name": "Ensure that the certificate authorities file permissions are set to 600 or more restrictive", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "4.1.8", + "Name": "Ensure that the client certificate authorities file ownership is set to root:root", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "4.1.9", + "Name": "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.1.10", + "Name": "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.2.1", + "Name": "Ensure that the --anonymous-auth argument is set to false", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "4.2.2", + "Name": "Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "4.2.3", + "Name": "Ensure that the --client-ca-file argument is set as appropriate", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "4.2.4", + "Name": "Verify that the --read-only-port argument is set to 0", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.2.5", + "Name": "Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.2.6", + "Name": "Ensure that the --protect-kernel-defaults argument is set to true", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.2.7", + "Name": "Ensure that the --make-iptables-util-chains argument is set to true", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.2.8", + "Name": "Ensure that the --hostname-override argument is not set", + "Severity": "HIGH", + "TotalFail": 6 + }, + { + "ID": "4.2.9", + "Name": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "4.2.10", + "Name": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "Severity": "CRITICAL", + "TotalFail": 1 + }, + { + "ID": "4.2.11", + "Name": "Ensure that the --rotate-certificates argument is not set to false", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "4.2.12", + "Name": "Verify that the RotateKubeletServerCertificate argument is set to true", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "4.2.13", + "Name": "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", + "Severity": "CRITICAL", + "TotalFail": 0 + }, + { + "ID": "5.1.1", + "Name": "Ensure that the cluster-admin role is only used where required", + "Severity": "HIGH", + "TotalFail": 2 + }, + { + "ID": "5.1.2", + "Name": "Minimize access to secrets", + "Severity": "HIGH", + "TotalFail": 15 + }, + { + "ID": "5.1.3", + "Name": "Minimize wildcard use in Roles and ClusterRoles", + "Severity": "HIGH", + "TotalFail": 8 + }, + { + "ID": "5.1.6", + "Name": "Ensure that Service Account Tokens are only mounted where necessary", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "5.1.8", + "Name": "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "5.2.2", + "Name": "Minimize the admission of privileged containers", + "Severity": "HIGH", + "TotalFail": 8 + }, + { + "ID": "5.2.3", + "Name": "Minimize the admission of containers wishing to share the host process ID namespace", + "Severity": "HIGH", + "TotalFail": 3 + }, + { + "ID": "5.2.4", + "Name": "Minimize the admission of containers wishing to share the host IPC namespace", + "Severity": "HIGH", + "TotalFail": 0 + }, + { + "ID": "5.2.5", + "Name": "Minimize the admission of containers wishing to share the host network namespace", + "Severity": "HIGH", + "TotalFail": 15 + }, + { + "ID": "5.2.6", + "Name": "Minimize the admission of containers with allowPrivilegeEscalation", + "Severity": "HIGH", + "TotalFail": 31 + }, + { + "ID": "5.2.7", + "Name": "Minimize the admission of root containers", + "Severity": "MEDIUM", + "TotalFail": 35 + }, + { + "ID": "5.2.8", + "Name": "Minimize the admission of containers with the NET_RAW capability", + "Severity": "MEDIUM", + "TotalFail": 2 + }, + { + "ID": "5.2.9", + "Name": "Minimize the admission of containers with added capabilities", + "Severity": "LOW", + "TotalFail": 39 + }, + { + "ID": "5.2.10", + "Name": "Minimize the admission of containers with capabilities assigned", + "Severity": "LOW", + "TotalFail": 39 + }, + { + "ID": "5.2.11", + "Name": "Minimize the admission of containers with capabilities assigned", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "5.2.12", + "Name": "Minimize the admission of HostPath volumes", + "Severity": "MEDIUM", + "TotalFail": 18 + }, + { + "ID": "5.2.13", + "Name": "Minimize the admission of containers which use HostPorts", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "5.3.1", + "Name": "Ensure that the CNI in use supports Network Policies (Manual)", + "Severity": "MEDIUM" + }, + { + "ID": "5.3.2", + "Name": "Ensure that all Namespaces have Network Policies defined", + "Severity": "MEDIUM", + "TotalFail": 0 + }, + { + "ID": "5.4.1", + "Name": "Prefer using secrets as files over secrets as environment variables (Manual)", + "Severity": "MEDIUM" + }, + { + "ID": "5.4.2", + "Name": "Consider external secret storage (Manual)", + "Severity": "MEDIUM" + }, + { + "ID": "5.5.1", + "Name": "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)", + "Severity": "MEDIUM" + }, + { + "ID": "5.7.1", + "Name": "Create administrative boundaries between resources using namespaces (Manual)", + "Severity": "MEDIUM" + }, + { + "ID": "5.7.2", + "Name": "Ensure that the seccomp profile is set to docker/default in your pod definitions", + "Severity": "MEDIUM", + "TotalFail": 19 + }, + { + "ID": "5.7.3", + "Name": "Apply Security Context to Your Pods and Containers", + "Severity": "HIGH", + "TotalFail": 124 + }, + { + "ID": "5.7.4", + "Name": "The default namespace should not be used", + "Severity": "MEDIUM", + "TotalFail": 0 + } + ] +} diff --git a/content/kubeone/v1.11/guides/encryption-providers/_index.en.md b/content/kubeone/v1.11/guides/encryption-providers/_index.en.md index 09c42aed9..b4bdb07f7 100644 --- a/content/kubeone/v1.11/guides/encryption-providers/_index.en.md +++ b/content/kubeone/v1.11/guides/encryption-providers/_index.en.md @@ -34,7 +34,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: k1-cluster versions: - kubernetes: '1.29.4' + kubernetes: '1.33.5' features: # enable encryption providers encryptionProviders: @@ -82,7 +82,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: k1-cluster versions: - kubernetes: '1.29.4' + kubernetes: '1.33.5' features: # enable encryption providers encryptionProviders: @@ -140,7 +140,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: k1-cluster versions: - kubernetes: '1.29.4' + kubernetes: '1.33.5' features: encryptionProviders: enable: true @@ -175,7 +175,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: kms-test versions: - kubernetes: '1.29.4' + kubernetes: '1.33.5' cloudProvider: aws: {} features: diff --git a/content/kubeone/v1.11/guides/mirror-registries/_index.en.md b/content/kubeone/v1.11/guides/mirror-registries/_index.en.md index 49f7a580f..4ff32c862 100644 --- a/content/kubeone/v1.11/guides/mirror-registries/_index.en.md +++ b/content/kubeone/v1.11/guides/mirror-registries/_index.en.md @@ -98,7 +98,7 @@ kubeone apply --manifest kubeone.yaml --credentials credentials.yaml docker.io registry introduced pretty low rate limits for unauthenticated requests. There are few workarounds: -* Buy docker subscribtion. +* Buy docker subscription. How to use docker.io credentials is covered in the [section above][using-credentials]. * Setup own pull-through caching proxy. * Use public pull-through caching proxy. diff --git a/content/kubeone/v1.11/guides/registry-configuration/_index.en.md b/content/kubeone/v1.11/guides/registry-configuration/_index.en.md index 5ef593084..5e74b245e 100644 --- a/content/kubeone/v1.11/guides/registry-configuration/_index.en.md +++ b/content/kubeone/v1.11/guides/registry-configuration/_index.en.md @@ -37,36 +37,58 @@ This guide assumes that: If you don't have an image registry, you can check out the [Docker Registry][docker-reg-guide] as a possible solution. -## Preloading Images +## Mirroring Images with `kubeone mirror-images` -Another prerequisites for this guide to work is that your image registry has -all images needed for your cluster to work preloaded. +KubeOne provides a built-in command `kubeone mirror-images` to simplify mirroring all required images (Kubernetes core components, CNI plugins, etc.) to your private registry. This command replaces the older `image-loader.sh` script and supports advanced filtering and multi-version mirroring. -To make this task easier, we provide the image loader script that: +### Prerequisites -* pulls all images used by components deployed by KubeOne (CNI, - metrics-server...) and Kubeadm (Kubernetes core components and CoreDNS) -* re-tag those images so the image registry (e.g. `docker.io`) is replaced - with the image registry provided by the user -* push re-tagged images to your (mirror) image registry +1. **Registry Setup**: Ensure your registry is accessible by all cluster nodes and supports TLS if using containerd. +2. **Authentication**: The registry must allow unauthenticated access (support for credentials is planned for future releases). +3. **KubeOne CLI**: Use KubeOne v1.5.0 or newer. -The image loader script (`image-loader.sh`) comes in the KubeOne release -archive, under the `hack` directory. It can also be found on [GitHub in the -`hack` directory][img-loader]. If you're downloading the script from GitHub, -it's recommended to switch to the appropriate tag depending on which KubeOne -version you're using. +### Usage -Once you have downloaded the script, you can run it in the following way. -Make sure to replace `KUBERNETES_VERSION` with the Kubernetes version you plan -to use (without the `v` prefix), as well as, replace the `TARGET_REGISTRY` with -the address to your image registry. +The `kubeone mirror-images` command pulls, re-tags, and pushes images to your registry. Use the following syntax: +```bash +kubeone mirror-images \ + [--filter base,optional,control-plane] \ + [--kubernetes-versions v1.33.5,v1.32.9] \ + [--insecure] # Allow pushing to insecure registries (HTTP) \ + --registry ``` -KUBERNETES_VERSION=1.29.4 TARGET_REGISTRY=127.0.0.1:5000 ./image-loader.sh + +#### Key Flags: +- `--filter`: Select image groups (comma-separated): + - `base`: Core images (OSM, DNS Cache, Calico, Machine-Controller). + - `optional`: Add-ons like CCMs and CSI Drivers. + - `control-plane`: Only Kubernetes core components (kube-apiserver, etcd, etc.). +- `--kubernetes-versions`: Specify versions (comma-separated). If omitted, **all KubeOne-supported versions are mirrored**. +- `--insecure`: Skip TLS verification for registries using HTTP (useful for local/insecure setups). + +### Examples + +#### 1. Mirror All Base Images for Specific Versions +```bash +kubeone mirror-images \ + --filter base \ + --kubernetes-versions v1.33.5,v1.32.9 \ + registry.example.com:5000 +``` + +#### 2. Mirror Only Control-Plane Images For All Supported Versions +```bash +kubeone mirror-images \ + --filter control-plane \ + registry.example.com:5000 ``` -The preloading process can take a several minutes, depending on your -connection speed. +### Benefits of `kubeone mirror-images` +- **Simpler Workflow**: No need to manually download or manage scripts. +- **Multi-Version Support**: Mirror images for multiple Kubernetes versions in one command. +- **Granular Control**: Use filters to mirror only the images you need. +- **Automated Retagging**: Handles registry prefixes (e.g., `docker.io` → `registry.example.com`). ## Overriding Image Registries @@ -77,7 +99,7 @@ stanza to your KubeOne configuration file, such as: apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.33.5 cloudProvider: aws: {} registryConfiguration: diff --git a/content/kubeone/v1.11/tutorials/creating-clusters-baremetal/_index.en.md b/content/kubeone/v1.11/tutorials/creating-clusters-baremetal/_index.en.md index 6ed6e8c72..9d92bca63 100644 --- a/content/kubeone/v1.11/tutorials/creating-clusters-baremetal/_index.en.md +++ b/content/kubeone/v1.11/tutorials/creating-clusters-baremetal/_index.en.md @@ -222,7 +222,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: bm-cluster versions: - kubernetes: '1.29.4' + kubernetes: '1.33.5' cloudProvider: none: {} @@ -298,11 +298,11 @@ INFO[11:37:28 CEST] Determine operating system… INFO[11:37:30 CEST] Running host probes… The following actions will be taken: Run with --verbose flag for more information. - + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.20.4 - + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.20.4 - + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.20.4 - + join worker node "ip-172-31-223-103.eu-west-3.compute.internal" (172.31.223.103) using 1.20.4 - + join worker node "ip-172-31-224-178.eu-west-3.compute.internal" (172.31.224.178) using 1.20.4 + + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.33.5 + + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.33.5 + + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.33.5 + + join worker node "ip-172-31-223-103.eu-west-3.compute.internal" (172.31.223.103) using 1.33.5 + + join worker node "ip-172-31-224-178.eu-west-3.compute.internal" (172.31.224.178) using 1.33.5 Do you want to proceed (yes/no): ``` @@ -356,11 +356,11 @@ You should see output such as the following one. ``` NAME STATUS ROLES AGE VERSION -ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.20.4 -ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.20.4 -ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.20.4 -ip-172-31-223-103.eu-west-3.compute.internal Ready 38m v1.20.4 -ip-172-31-224-178.eu-west-3.compute.internal Ready 38m v1.20.4 +ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.33.5 +ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.33.5 +ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.33.5 +ip-172-31-223-103.eu-west-3.compute.internal Ready 38m v1.33.5 +ip-172-31-224-178.eu-west-3.compute.internal Ready 38m v1.33.5 ``` ## Conclusion diff --git a/content/kubeone/v1.11/tutorials/creating-clusters-oidc/_index.en.md b/content/kubeone/v1.11/tutorials/creating-clusters-oidc/_index.en.md index f23d89388..402b2b8a9 100644 --- a/content/kubeone/v1.11/tutorials/creating-clusters-oidc/_index.en.md +++ b/content/kubeone/v1.11/tutorials/creating-clusters-oidc/_index.en.md @@ -47,7 +47,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.33.5' cloudProvider: hetzner: {} @@ -331,7 +331,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.33.5' cloudProvider: hetzner: {} @@ -482,7 +482,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.33.5' cloudProvider: hetzner: {} diff --git a/content/kubeone/v1.11/tutorials/creating-clusters/_index.en.md b/content/kubeone/v1.11/tutorials/creating-clusters/_index.en.md index 2361fe2f1..6bad11cad 100644 --- a/content/kubeone/v1.11/tutorials/creating-clusters/_index.en.md +++ b/content/kubeone/v1.11/tutorials/creating-clusters/_index.en.md @@ -585,7 +585,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: aws: {} @@ -613,7 +613,7 @@ with your cluster name in the cloud-config example below. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: azure: {} external: true @@ -648,7 +648,7 @@ and fetches information about nodes from the API. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: digitalocean: {} external: true @@ -666,7 +666,7 @@ configs. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: gce: {} external: true @@ -697,7 +697,7 @@ The Hetzner CCM fetches information about nodes from the API. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: hetzner: {} external: true @@ -715,7 +715,7 @@ replace the placeholder values. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: nutanix: {} addons: @@ -745,7 +745,7 @@ cloud-config section.** apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: openstack: {} external: true @@ -767,7 +767,7 @@ cloudProvider: apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: openstack: {} external: true @@ -791,7 +791,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.33.5' cloudProvider: vmwareCloudDirector: {} @@ -810,7 +810,7 @@ automatically by KubeOne.** apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.33.2' + kubernetes: '1.34.1' cloudProvider: vsphere: {} external: true @@ -897,9 +897,9 @@ INFO[11:37:28 CEST] Determine operating system… INFO[11:37:30 CEST] Running host probes… The following actions will be taken: Run with --verbose flag for more information. - + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.20.4 - + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.20.4 - + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.20.4 + + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.33.5 + + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.33.5 + + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.33.5 + ensure machinedeployment "marko-1-eu-west-3a" with 1 replica(s) exists + ensure machinedeployment "marko-1-eu-west-3b" with 1 replica(s) exists + ensure machinedeployment "marko-1-eu-west-3c" with 1 replica(s) exists @@ -977,12 +977,12 @@ cluster. ``` NAME STATUS ROLES AGE VERSION -ip-172-31-220-166.eu-west-3.compute.internal Ready 38m v1.20.4 -ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.20.4 -ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.20.4 -ip-172-31-221-18.eu-west-3.compute.internal Ready 38m v1.20.4 -ip-172-31-222-211.eu-west-3.compute.internal Ready 38m v1.20.4 -ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.20.4 +ip-172-31-220-166.eu-west-3.compute.internal Ready 38m v1.33.5 +ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.33.5 +ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.33.5 +ip-172-31-221-18.eu-west-3.compute.internal Ready 38m v1.33.5 +ip-172-31-222-211.eu-west-3.compute.internal Ready 38m v1.33.5 +ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.33.5 ``` ## Conclusion diff --git a/content/kubeone/v1.9/architecture/operating-system-manager/usage/_index.en.md b/content/kubeone/v1.9/architecture/operating-system-manager/usage/_index.en.md index abeacc163..d165b23fe 100644 --- a/content/kubeone/v1.9/architecture/operating-system-manager/usage/_index.en.md +++ b/content/kubeone/v1.9/architecture/operating-system-manager/usage/_index.en.md @@ -13,7 +13,7 @@ To fallback to legacy user-data from Machine Controller, we can disable OSM for apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.31.13 addons: enable: true operatingSystemManager: @@ -163,7 +163,7 @@ The variable `initial_machinedeployment_operating_system_profile` can also be co apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: "1.29.4" + kubernetes: "1.31.13" cloudProvider: aws: {} addons: diff --git a/content/kubeone/v1.9/examples/addons-calico-vxlan/_index.en.md b/content/kubeone/v1.9/examples/addons-calico-vxlan/_index.en.md index cdaf9d331..3e4136212 100644 --- a/content/kubeone/v1.9/examples/addons-calico-vxlan/_index.en.md +++ b/content/kubeone/v1.9/examples/addons-calico-vxlan/_index.en.md @@ -13,7 +13,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.31.13 cloudProvider: aws: {} diff --git a/content/kubeone/v1.9/guides/addons/_index.en.md b/content/kubeone/v1.9/guides/addons/_index.en.md index 4f439c6a7..d0fcf2e7f 100644 --- a/content/kubeone/v1.9/guides/addons/_index.en.md +++ b/content/kubeone/v1.9/guides/addons/_index.en.md @@ -64,7 +64,7 @@ the `addons` config: apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.31.13 cloudProvider: aws: {} # Addons are Kubernetes manifests to be deployed after provisioning the cluster @@ -113,7 +113,7 @@ Example: apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.31.13 addons: enable: true @@ -145,7 +145,7 @@ To delete embedded addon from the cluster, use the new `delete` field from the apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.31.13 addons: enable: true @@ -180,7 +180,7 @@ you can use it to override globally defined parameters. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.31.13 addons: enable: true diff --git a/content/kubeone/v1.9/guides/autoscaler-addon/_index.en.md b/content/kubeone/v1.9/guides/autoscaler-addon/_index.en.md index cc76f595c..f778ead63 100644 --- a/content/kubeone/v1.9/guides/autoscaler-addon/_index.en.md +++ b/content/kubeone/v1.9/guides/autoscaler-addon/_index.en.md @@ -33,7 +33,7 @@ kubeone.yaml apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' ## kubernetes version + kubernetes: '1.31.13' ## kubernetes version cloudProvider: ## This field is sourced automatically if terraform is used for the cluster aws: {} addons: @@ -52,7 +52,7 @@ If you're running a cluster with nodes in the multiple zones for the HA purposes apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' ## kubernetes version + kubernetes: '1.31.13' ## kubernetes version cloudProvider: ## This field is sourced automatically if terraform is used for the cluster aws: {} addons: @@ -146,9 +146,9 @@ Run the following kubectl command to inspect the available Machinedeployments: ```bash $ kubectl get machinedeployments -n kube-system NAME REPLICAS AVAILABLE-REPLICAS PROVIDER OS KUBELET AGE -kb-cluster-eu-west-3a 1 1 aws ubuntu 1.20.4 10h -kb-cluster-eu-west-3b 1 1 aws ubuntu 1.20.4 10h -kb-cluster-eu-west-3c 1 1 aws ubuntu 1.20.4 10h +kb-cluster-eu-west-3a 1 1 aws ubuntu 1.31.13 10h +kb-cluster-eu-west-3b 1 1 aws ubuntu 1.31.13 10h +kb-cluster-eu-west-3c 1 1 aws ubuntu 1.31.13 10h ``` ### Step 2: Annotate Machinedeployments @@ -237,4 +237,4 @@ That is it! You have successfully deployed Kubernetes autoscaler on the KubeOne [step-5]: {{< ref "../../tutorials/creating-clusters/#step-5" >}} [embedded-addons]: {{< ref "../../guides/addons/#overriding-embedded-eddons" >}} [ca-faq]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md -[ca-faq-what-is]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-is-cluster-autoscaler \ No newline at end of file +[ca-faq-what-is]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-is-cluster-autoscaler diff --git a/content/kubeone/v1.9/guides/encryption-providers/_index.en.md b/content/kubeone/v1.9/guides/encryption-providers/_index.en.md index 09c42aed9..2c47248cc 100644 --- a/content/kubeone/v1.9/guides/encryption-providers/_index.en.md +++ b/content/kubeone/v1.9/guides/encryption-providers/_index.en.md @@ -34,7 +34,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: k1-cluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' features: # enable encryption providers encryptionProviders: @@ -82,7 +82,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: k1-cluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' features: # enable encryption providers encryptionProviders: @@ -140,7 +140,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: k1-cluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' features: encryptionProviders: enable: true @@ -175,7 +175,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: kms-test versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' cloudProvider: aws: {} features: diff --git a/content/kubeone/v1.9/guides/mirror-registries/_index.en.md b/content/kubeone/v1.9/guides/mirror-registries/_index.en.md index 49f7a580f..4ff32c862 100644 --- a/content/kubeone/v1.9/guides/mirror-registries/_index.en.md +++ b/content/kubeone/v1.9/guides/mirror-registries/_index.en.md @@ -98,7 +98,7 @@ kubeone apply --manifest kubeone.yaml --credentials credentials.yaml docker.io registry introduced pretty low rate limits for unauthenticated requests. There are few workarounds: -* Buy docker subscribtion. +* Buy docker subscription. How to use docker.io credentials is covered in the [section above][using-credentials]. * Setup own pull-through caching proxy. * Use public pull-through caching proxy. diff --git a/content/kubeone/v1.9/guides/registry-configuration/_index.en.md b/content/kubeone/v1.9/guides/registry-configuration/_index.en.md index b7b39eed4..4e534997e 100644 --- a/content/kubeone/v1.9/guides/registry-configuration/_index.en.md +++ b/content/kubeone/v1.9/guides/registry-configuration/_index.en.md @@ -62,7 +62,7 @@ to use (without the `v` prefix), as well as, replace the `TARGET_REGISTRY` with the address to your image registry. ``` -KUBERNETES_VERSION=1.29.4 TARGET_REGISTRY=127.0.0.1:5000 ./image-loader.sh +KUBERNETES_VERSION=1.31.13 TARGET_REGISTRY=127.0.0.1:5000 ./image-loader.sh ``` The preloading process can take a several minutes, depending on your @@ -77,7 +77,7 @@ stanza to your KubeOne configuration file, such as: apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: 1.29.4 + kubernetes: 1.31.13 cloudProvider: aws: {} registryConfiguration: diff --git a/content/kubeone/v1.9/tutorials/creating-clusters-baremetal/_index.en.md b/content/kubeone/v1.9/tutorials/creating-clusters-baremetal/_index.en.md index 6ffdf326a..47f42cb76 100644 --- a/content/kubeone/v1.9/tutorials/creating-clusters-baremetal/_index.en.md +++ b/content/kubeone/v1.9/tutorials/creating-clusters-baremetal/_index.en.md @@ -222,7 +222,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster name: bm-cluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' cloudProvider: none: {} @@ -301,11 +301,11 @@ INFO[11:37:28 CEST] Determine operating system… INFO[11:37:30 CEST] Running host probes… The following actions will be taken: Run with --verbose flag for more information. - + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.20.4 - + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.20.4 - + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.20.4 - + join worker node "ip-172-31-223-103.eu-west-3.compute.internal" (172.31.223.103) using 1.20.4 - + join worker node "ip-172-31-224-178.eu-west-3.compute.internal" (172.31.224.178) using 1.20.4 + + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.31.13 + + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.31.13 + + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.31.13 + + join worker node "ip-172-31-223-103.eu-west-3.compute.internal" (172.31.223.103) using 1.31.13 + + join worker node "ip-172-31-224-178.eu-west-3.compute.internal" (172.31.224.178) using 1.31.13 Do you want to proceed (yes/no): ``` @@ -359,11 +359,11 @@ You should see output such as the following one. ``` NAME STATUS ROLES AGE VERSION -ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.20.4 -ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.20.4 -ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.20.4 -ip-172-31-223-103.eu-west-3.compute.internal Ready 38m v1.20.4 -ip-172-31-224-178.eu-west-3.compute.internal Ready 38m v1.20.4 +ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.31.13 +ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.31.13 +ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.31.13 +ip-172-31-223-103.eu-west-3.compute.internal Ready 38m v1.31.13 +ip-172-31-224-178.eu-west-3.compute.internal Ready 38m v1.31.13 ``` ## Conclusion diff --git a/content/kubeone/v1.9/tutorials/creating-clusters-oidc/_index.en.md b/content/kubeone/v1.9/tutorials/creating-clusters-oidc/_index.en.md index f23d89388..f59694024 100644 --- a/content/kubeone/v1.9/tutorials/creating-clusters-oidc/_index.en.md +++ b/content/kubeone/v1.9/tutorials/creating-clusters-oidc/_index.en.md @@ -47,7 +47,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' cloudProvider: hetzner: {} @@ -331,7 +331,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' cloudProvider: hetzner: {} @@ -482,7 +482,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' cloudProvider: hetzner: {} diff --git a/content/kubeone/v1.9/tutorials/creating-clusters/_index.en.md b/content/kubeone/v1.9/tutorials/creating-clusters/_index.en.md index 6cdb8a3ed..711ebc4ae 100644 --- a/content/kubeone/v1.9/tutorials/creating-clusters/_index.en.md +++ b/content/kubeone/v1.9/tutorials/creating-clusters/_index.en.md @@ -615,7 +615,7 @@ supported provider. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' cloudProvider: aws: {} external: true @@ -642,7 +642,7 @@ with your cluster name in the cloud-config example below. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' cloudProvider: azure: {} external: true @@ -677,7 +677,7 @@ and fetches information about nodes from the API. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' cloudProvider: digitalocean: {} external: true @@ -695,7 +695,7 @@ configs. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' cloudProvider: gce: {} external: true @@ -726,7 +726,7 @@ The Hetzner CCM fetches information about nodes from the API. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' cloudProvider: hetzner: {} external: true @@ -744,7 +744,7 @@ replace the placeholder values. apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' cloudProvider: nutanix: {} addons: @@ -774,7 +774,7 @@ cloud-config section.** apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' cloudProvider: openstack: {} external: true @@ -796,7 +796,7 @@ cloudProvider: apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' cloudProvider: openstack: {} external: true @@ -824,7 +824,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' cloudProvider: equinixmetal: {} @@ -845,7 +845,7 @@ apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' cloudProvider: vmwareCloudDirector: {} @@ -864,7 +864,7 @@ automatically by KubeOne.** apiVersion: kubeone.k8c.io/v1beta2 kind: KubeOneCluster versions: - kubernetes: '1.29.4' + kubernetes: '1.31.13' cloudProvider: vsphere: {} external: true @@ -954,9 +954,9 @@ INFO[11:37:28 CEST] Determine operating system… INFO[11:37:30 CEST] Running host probes… The following actions will be taken: Run with --verbose flag for more information. - + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.20.4 - + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.20.4 - + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.20.4 + + initialize control plane node "ip-172-31-220-51.eu-west-3.compute.internal" (172.31.220.51) using 1.31.13 + + join control plane node "ip-172-31-221-177.eu-west-3.compute.internal" (172.31.221.177) using 1.31.13 + + join control plane node "ip-172-31-222-48.eu-west-3.compute.internal" (172.31.222.48) using 1.31.13 + ensure machinedeployment "marko-1-eu-west-3a" with 1 replica(s) exists + ensure machinedeployment "marko-1-eu-west-3b" with 1 replica(s) exists + ensure machinedeployment "marko-1-eu-west-3c" with 1 replica(s) exists @@ -1034,12 +1034,12 @@ cluster. ``` NAME STATUS ROLES AGE VERSION -ip-172-31-220-166.eu-west-3.compute.internal Ready 38m v1.20.4 -ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.20.4 -ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.20.4 -ip-172-31-221-18.eu-west-3.compute.internal Ready 38m v1.20.4 -ip-172-31-222-211.eu-west-3.compute.internal Ready 38m v1.20.4 -ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.20.4 +ip-172-31-220-166.eu-west-3.compute.internal Ready 38m v1.31.13 +ip-172-31-220-51.eu-west-3.compute.internal Ready master 43m v1.31.13 +ip-172-31-221-177.eu-west-3.compute.internal Ready master 42m v1.31.13 +ip-172-31-221-18.eu-west-3.compute.internal Ready 38m v1.31.13 +ip-172-31-222-211.eu-west-3.compute.internal Ready 38m v1.31.13 +ip-172-31-222-48.eu-west-3.compute.internal Ready master 41m v1.31.13 ``` ## Conclusion diff --git a/content/kubermatic-virtualization/_index.md b/content/kubermatic-virtualization/_index.md new file mode 100644 index 000000000..b9b17e94d --- /dev/null +++ b/content/kubermatic-virtualization/_index.md @@ -0,0 +1,7 @@ ++++ +title = "Kubermatic Virtualization Docs" +description = "Seamlessly modernize your infrastructure by building your private cloud entirely with Kubernetes" +sitemapexclude = true ++++ + +Seamlessly modernize your infrastructure by building your private cloud entirely with Kubernetes \ No newline at end of file diff --git a/content/kubermatic-virtualization/main/_index.en.md b/content/kubermatic-virtualization/main/_index.en.md new file mode 100644 index 000000000..81ee5fd83 --- /dev/null +++ b/content/kubermatic-virtualization/main/_index.en.md @@ -0,0 +1,32 @@ ++++ +title = "" +date = 2025-07-18T16:06:34+02:00 ++++ + +## What is Kubermatic Virtualization (Kube-V)? +Kubermatic Virtualization (Kube-V) provides a unified platform that enables organizations to seamlessly orchestrate and manage both traditional virtual machines (VMs) and modern containerized applications. + +It extends the powerful automation and operational benefits of Kubernetes to your VM-based workloads, allowing for a more consistent and efficient approach to infrastructure management. + +Kubermatic Virtualization leverages Kubernetes-native management by unifying VM and container orchestration as it integrates virtual machines (VMs) directly into Kubernetes as native, first-class objects by managing, scaling, and deploying VMs using the same familiar Kubernetes tools, APIs, and workflows you already use for your containerized applications. +## Features +Kubermatic Virtualization offers a comprehensive set of features designed to modernize infrastructure and streamline operations by converging virtual machine and container management. + +### Streamlined Transition and Unified Control + +* Effortless Migration: Tools are provided to simplify the migration of existing VMs from diverse environments to the unified platform, making infrastructure modernization more accessible. +* Centralized Operations: Gain single-pane-of-glass management for the entire lifecycle of both VMs and containers. This includes everything from creation, networking, and storage to scaling and monitoring, all accessible from a centralized interface or command-line tools. + +### Infrastructure Modernization and Efficiency + +* Gradual Modernization Path: Integrate VMs into a cloud-native environment, offering a practical pathway to modernize legacy applications without the immediate need for extensive refactoring into containers. You can run new containerized applications alongside existing virtualized ones. +* Optimized Resource Use: By running VMs and containers on the same underlying physical infrastructure, organizations can achieve better hardware resource utilization and significantly reduce operational overhead. + +### Enhanced Development and Reliability + +* Improved Developer Experience: Developers can leverage familiar, native Kubernetes tools and workflows for managing both VMs and containers, which minimizes learning curves and speeds up development cycles. +* Automated Workflows (CI/CD): Integrate VMs seamlessly into Kubernetes-native CI/CD pipelines, enabling automated testing and deployment processes. +* Built-in Resilience: Benefit from the platform's inherent high availability and fault tolerance features, including automated restarts and live migration of VMs between nodes, ensuring continuous application uptime. +* Integrated Networking and Storage: VMs natively use the platform's software-defined networking (SDN) and storage capabilities, providing consistent network policies, enhanced security, and streamlined storage management. + +See [kubermatic.com](https://www.kubermatic.com/). diff --git a/content/kubermatic-virtualization/main/architecture/_index.en.md b/content/kubermatic-virtualization/main/architecture/_index.en.md new file mode 100644 index 000000000..7f70fdd10 --- /dev/null +++ b/content/kubermatic-virtualization/main/architecture/_index.en.md @@ -0,0 +1,38 @@ ++++ +title = "Architecture" +date = 2025-07-18T16:06:34+02:00 +weight = 5 + ++++ + +## Architecture Overview +Kubermatic-Virtualization (Kube-V) is an advanced platform engineered to construct private cloud infrastructures founded +entirely on Kubernetes. Its core design principle is the seamless integration of Kubernetes-native workloads (containers) +and traditional virtualized workloads (Virtual Machines - VMs) under a unified management umbrella. Kube-V achieves this +by building upon Kubernetes as its foundational layer and incorporating KubeVirt to orchestrate and manage VMs alongside +containerized applications. + +Here's a breakdown of the architecture and how these components interact: +### Host Nodes +Host nodes can operate on any popular Linux-based operating system such as Ubuntu and RockyLinux where nested virtualization +is enabled to run KVM based virtual machines. + +### Kubernetes +The foundation, providing the orchestration, scheduling, and management plane for all workloads. In addition to introduce +declarative API and custom resources (CRDs). + +### KubeVirt +An extension to Kubernetes that enables running and managing VMs as native Kubernetes objects. It utilizes Kubernetes pods +as the execution unit each running VM is encapsulated within a standard Kubernetes pod, specifically a virt-launcher pod. + +### OVN (Open Virtual Network) +The network fabric, providing advanced SDN (Software-Defined Networking) capabilities for VMs and Pods, replacing or +augmenting the default CNI (Container Network Interface). The network fabric introduces VPCs(Virtual Private Cloud) as +an operational and isolated ecosystem, through subnets and network policies. + +### CSI Drivers +A standardized interface that allows Kubernetes to connect to various storage systems, providing persistent storage for +VMs and containers. Kube-V is agnostic about the storage of the underlying infrastructure where any CSI driver can be +used to enabling dynamic provisioning, attachment, and management of persistent volumes for VMs and Pods. + +![Kubermatic-Virtualization](kube-v-architecture.png) \ No newline at end of file diff --git a/content/kubermatic-virtualization/main/architecture/compatibility/_index.en.md b/content/kubermatic-virtualization/main/architecture/compatibility/_index.en.md new file mode 100644 index 000000000..451b0ea62 --- /dev/null +++ b/content/kubermatic-virtualization/main/architecture/compatibility/_index.en.md @@ -0,0 +1,5 @@ ++++ +title = "Compatibility" +date = 2025-07-18T16:06:34+02:00 +weight = 5 ++++ diff --git a/content/kubermatic-virtualization/main/architecture/compatibility/kubev-components-versioning/_index.en.md b/content/kubermatic-virtualization/main/architecture/compatibility/kubev-components-versioning/_index.en.md new file mode 100644 index 000000000..3fc97b05b --- /dev/null +++ b/content/kubermatic-virtualization/main/architecture/compatibility/kubev-components-versioning/_index.en.md @@ -0,0 +1,23 @@ ++++ +title = "Kubermatic Virtualization Components" +date = 2025-07-18T16:06:34+02:00 +weight = 5 ++++ + +The following list is only applicable for the Kube-V version that is currently available. Kubermatic has a strong emphasis +on security and reliability of provided software and therefore releases updates regularly that also include component updates. + + +| Kube-V Component | Version | +|:---------------------------------:|:-------:| +| Kubernetes | v1.33.0 | +| KubeVirt | v1.5.2 | +| Containerized Data Importer (CDI) | v1.62.0 | +| KubeOVN | v1.13.2 | +| KubeOne | v1.11.1 | +| Kyverno | v1.14.4 | +| Cert Manager | v1.18.2 | +| MetalLB | v0.15.2 | +| Multus CNI | v4.2.2 | +| Longhorn | v1.9.1 | + diff --git a/content/kubermatic-virtualization/main/architecture/compatibility/operating-system/_index.en.md b/content/kubermatic-virtualization/main/architecture/compatibility/operating-system/_index.en.md new file mode 100644 index 000000000..d4ea5d2a6 --- /dev/null +++ b/content/kubermatic-virtualization/main/architecture/compatibility/operating-system/_index.en.md @@ -0,0 +1,21 @@ ++++ +title = "Operating Systems" +date = 2025-07-18T16:06:34+02:00 +weight = 3 ++++ + +## Supported Operating Systems + +The following operating systems are supported: + +* Ubuntu 20.04 (Focal) +* Ubuntu 22.04 (Jammy Jellyfish) +* Ubuntu 24.04 (Noble Numbat) +* Rocky Linux 8 +* RHEL 8.0, 8.1, 8.2, 8.3, 8.4 +* Flatcar + +{{% notice warning %}} +The minimum kernel version for Kubernetes 1.32 clusters is 4.19. Some operating system versions, such as RHEL 8, +do not meet this requirement and therefore do not support Kubernetes 1.32 or newer. +{{% /notice %}} \ No newline at end of file diff --git a/content/kubermatic-virtualization/main/architecture/concepts/_index.en.md b/content/kubermatic-virtualization/main/architecture/concepts/_index.en.md new file mode 100644 index 000000000..1ea21ecb8 --- /dev/null +++ b/content/kubermatic-virtualization/main/architecture/concepts/_index.en.md @@ -0,0 +1,7 @@ ++++ +title = "Concepts" +date = 2025-07-18T16:06:34+02:00 +weight = 1 ++++ + +Get to know the concepts behind Kubermatic Virtualization (KubeV). diff --git a/content/kubermatic-virtualization/main/architecture/concepts/compute/_index.en.md b/content/kubermatic-virtualization/main/architecture/concepts/compute/_index.en.md new file mode 100644 index 000000000..989f60e28 --- /dev/null +++ b/content/kubermatic-virtualization/main/architecture/concepts/compute/_index.en.md @@ -0,0 +1,5 @@ ++++ +title = "Compute" +date = 2025-07-18T16:06:34+02:00 +weight = 15 ++++ diff --git a/content/kubermatic-virtualization/main/architecture/concepts/compute/virtual-machines/_index.en.md b/content/kubermatic-virtualization/main/architecture/concepts/compute/virtual-machines/_index.en.md new file mode 100644 index 000000000..a912c3f48 --- /dev/null +++ b/content/kubermatic-virtualization/main/architecture/concepts/compute/virtual-machines/_index.en.md @@ -0,0 +1,241 @@ ++++ +title = "VirtualMachines Resources" +date = 2025-07-18T16:06:34+02:00 +weight = 15 ++++ + +## VirtualMachines +As the name suggests, a VirtualMachine(VM) represents a long-running, stateful virtual machine. It's similar to a +Kubernetes Deployment for Pods, meaning you define the desired state (e.g., "this VM should be running," "it should +have 2 CPUs and 4GB RAM") and Kubermatic-Virtualization ensures that state is maintained. It allows you to start, stop, and configure VMs. + +Here is an example of how users can create a VM: +```yaml +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + name: my-vm-with-http-data-volume +spec: + runStrategy: RerunOnFailure + template: + metadata: + labels: + app: my-vm-with-http-data-volume + annotations: + kubevirt.io/allow-pod-bridge-network-live-migration: "true" + spec: + domain: + cpu: + cores: 1 + memory: + guest: 2Gi + devices: + disks: + - name: rootdisk + disk: + bus: virtio + interfaces: + - name: default + masquerade: {} + volumes: + - name: rootdisk + dataVolume: + name: my-http-data-volume + networks: + - name: default + pod: {} + dataVolumeTemplates: + - metadata: + name: my-http-data-volume + spec: + sourceRef: + kind: DataSource + name: my-http-datasource + apiGroup: cdi.kubevirt.io + pvc: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi # <--- IMPORTANT: Adjust to your desired disk size + # storageClassName: my-storage-class # <--- OPTIONAL: Uncomment and replace with your StorageClass name if needed +--- +apiVersion: cdi.kubevirt.io/v1beta1 +kind: DataSource +metadata: + name: my-http-datasource +spec: + source: + http: + url: "http://example.com/path/to/your/image.qcow2" # <--- IMPORTANT: Replace with the actual URL of your disk image + # certConfig: # <--- OPTIONAL: Uncomment and configure if your HTTP server uses a custom CA + # caBundle: "base64encodedCABundle" + # secretRef: + # name: "my-http-cert-secret" + # cert: + # secretRef: + # name: "my-http-cert-secret" + # key: + # secretRef: + # name: "my-http-key-secret" +``` +### 1. `VirtualMachine` (apiVersion: `kubevirt.io/v1`) + +This is the main KubeVirt resource that defines your virtual machine. + +- **`spec.template.spec.domain.devices.disks`**: + Defines the disk attached to the VM. We reference `rootdisk` here, which is backed by our DataVolume. + +- **`spec.template.spec.volumes`**: + Links the `rootdisk` to a `dataVolume` named `my-http-data-volume`. + +- **`spec.dataVolumeTemplates`**: + This is the crucial part. It defines a template for a DataVolume that will be created automatically when the VM is started. + +--- + +### 2. `DataVolumeTemplate` (within `VirtualMachine.spec.dataVolumeTemplates`) + +- **`metadata.name`**: + The name of the DataVolume that will be created (referenced in `spec.template.spec.volumes`). + +- **`spec.sourceRef`**: + Points to a `DataSource` resource that defines the actual source of the disk image. A `DataSource` is used here to encapsulate HTTP details. + +- **`spec.pvc`**: + Defines the characteristics of the PersistentVolumeClaim (PVC) that will be created for this DataVolume: + + - **`accessModes`**: Typically `ReadWriteOnce` for VM disks. + - **`resources.requests.storage`**: + ⚠️ **Crucially, set this to the desired size of your VM's disk.** It should be at least as large as your source image. + - **`storageClassName`**: *(Optional)* Specify a StorageClass if needed; otherwise, the default will be used. + +--- + +### 3. `DataSource` (apiVersion: `cdi.kubevirt.io/v1beta1`) + +This is a CDI (Containerized Data Importer) resource that encapsulates the details of where your disk image comes from. + +- **`metadata.name`**: + The name of the `DataSource` (referenced in `dataVolumeTemplate.spec.sourceRef`). + +- **`spec.source.http.url`**: + 🔗 This is where you put the direct URL to your disk image (e.g., a `.qcow2`, `.raw`, etc. file). + +- **`spec.source.http.certConfig`**: *(Optional)* + If your HTTP server uses a custom CA or requires client certificates, configure them here. + +--- + +### VirtualMachinePools +KubeVirt's VirtualMachinePool is a powerful resource that allows you to manage a group of identical Virtual Machines (VMs) +as a single unit, similar to how a Kubernetes Deployment manages a set of Pods. It's designed for scenarios where you need +multiple, consistent, and often ephemeral VMs that can scale up or down based on demand. + +Here's a breakdown of the key aspects of KubeVirt VirtualMachinePools: + + +```yaml +apiVersion: kubevirt.io/v1alpha1 +kind: VirtualMachinePool +metadata: + name: my-vm-http-pool +spec: + replicas: 3 # <--- IMPORTANT: Number of VMs in the pool + selector: + matchLabels: + app: my-vm-http-pool-member + virtualMachineTemplate: + metadata: + labels: + app: my-vm-http-pool-member + annotations: + kubevirt.io/allow-pod-bridge-network-live-migration: "true" + spec: + runStrategy: RerunOnFailure # Or Always, Halted, Manual + domain: + cpu: + cores: 1 + memory: + guest: 2Gi + devices: + disks: + - name: rootdisk + disk: + bus: virtio + interfaces: + - name: default + masquerade: {} + volumes: + - name: rootdisk + dataVolume: + name: my-pool-vm-data-volume # This name will have a unique suffix appended by KubeVirt + networks: + - name: default + pod: {} + dataVolumeTemplates: + - metadata: + name: my-pool-vm-data-volume # This name will be the base for the unique DataVolume names + spec: + sourceRef: + kind: DataSource + name: my-http-datasource + apiGroup: cdi.kubevirt.io + pvc: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi # <--- IMPORTANT: Adjust to your desired disk size for each VM + # storageClassName: my-storage-class # <--- OPTIONAL: Uncomment and replace with your StorageClass name if needed +--- +apiVersion: cdi.kubevirt.io/v1beta1 +kind: DataSource +metadata: + name: my-http-datasource +spec: + source: + http: + url: "http://example.com/path/to/your/image.qcow2" # <--- IMPORTANT: Replace with the actual URL of your disk image + # certConfig: # <--- OPTIONAL: Uncomment and configure if your HTTP server uses a custom CA + # caBundle: "base64encodedCABundle" + # secretRef: + # name: "my-http-cert-secret" + # cert: + # secretRef: + # name: "my-http-cert-secret" + # key: + # secretRef: + # name: "my-http-key-secret" + +``` +### VirtualMachinePool (apiVersion: `kubevirt.io/v1alpha1`) + +1. **`API Version`** + - Use `apiVersion: kubevirt.io/v1alpha1` for `VirtualMachinePool`. + - This is a slightly different API version than `VirtualMachine`. + +2. **`spec.replicas`** + - Specifies how many `VirtualMachine` instances the pool should maintain. + +3. **`spec.selector`** + - Essential for the `VirtualMachinePool` controller to manage its VMs. + - `matchLabels` must correspond to the `metadata.labels` within `virtualMachineTemplate`. + +4. **spec.virtualMachineTemplate** + - This section contains the full `VirtualMachine` spec that serves as the template for each VM in the pool. + +5. **`dataVolumeTemplates` Naming in a Pool** + - `VirtualMachinePool` creates `DataVolumes` from `dataVolumeTemplates`. + - A unique suffix is appended to the `metadata.name` of each `DataVolume` (e.g., `my-pool-vm-data-volume-abcde`), ensuring each VM gets a distinct PVC. + +--- + +### How It Works (Similar to Deployment for Pods) + +1. Apply the `VirtualMachinePool` manifest. KubeVirt ensures the `my-http-datasource` `DataSource` exists. +2. The `VirtualMachinePool` controller creates the defined number of `VirtualMachine` replicas. +3. Each `VirtualMachine` triggers the creation of a `DataVolume` using the specified `dataVolumeTemplate` and `my-http-datasource`. +4. CDI (Containerized Data Importer) downloads the image into a new unique `PersistentVolumeClaim` (PVC) for each VM. +5. Each `VirtualMachine` then starts using its dedicated PVC. + diff --git a/content/kubermatic-virtualization/main/architecture/concepts/networks/_index.en.md b/content/kubermatic-virtualization/main/architecture/concepts/networks/_index.en.md new file mode 100644 index 000000000..662b0d8ee --- /dev/null +++ b/content/kubermatic-virtualization/main/architecture/concepts/networks/_index.en.md @@ -0,0 +1,5 @@ ++++ +title = "Networking" +date = 2025-07-18T16:06:34+02:00 +weight = 15 ++++ diff --git a/content/kubermatic-virtualization/main/architecture/concepts/networks/vms-networks-assignment/_index.en.md b/content/kubermatic-virtualization/main/architecture/concepts/networks/vms-networks-assignment/_index.en.md new file mode 100644 index 000000000..99285fd7d --- /dev/null +++ b/content/kubermatic-virtualization/main/architecture/concepts/networks/vms-networks-assignment/_index.en.md @@ -0,0 +1,159 @@ ++++ +title = "VMs Network Assignment" +date = 2025-07-18T16:06:34+02:00 +weight = 15 ++++ + +Assigning a Virtual Machine (VM) to a VPC and Subnet typically involves integrating VM’s network interface using +Multus CNI with a Kube-OVN network attachment definition (NAD). Assigning a Virtual Machine (VM) to a VPC and +Subnet involves a few key steps: + +### 1. Define or use an existing VPC: + +If you require isolated network spaces for different tenants or environments, you'll first define a Vpc resource. +This acts as a logical router for your Subnets. +```yaml +apiVersion: kubeovn.io/v1 +kind: Vpc +metadata: + name: my-vpc # Name of your VPC +spec: + # Optional: You can specify which namespaces are allowed to use this VPC. + # If left empty, all namespaces can use it. + # namespaces: + # - my-namespace + # - my-namespace-1 +``` +--- + +### 2. Define or use an existing Subnet: + +Next, you create a Subnet resource, associating it with your Vpc (or the default ovn-cluster VPC if you're not using a +custom VPC). You also define the CIDR range and, crucially, the Namespaces that will use this Subnet. +```yaml +apiVersion: kubeovn.io/v1 +kind: Subnet +metadata: + name: my-vm-subnet # Name of your Subnet +spec: + # Associate this subnet with your VPC. If omitted, it defaults to 'ovn-cluster'. + vpc: my-vpc + cidrBlock: 10.10.0.0/24 # The IP range for this subnet + gateway: 10.10.0.1 # The gateway IP for this subnet (Kube-OVN often sets this automatically) + namespaces: + - vm-namespace # The Namespace where your VMs will reside +``` + +--- +### 3. Create a Kubernetes Namespace (if it doesn't exist): + +Ensure the Namespace you defined in your Subnet exists. +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: vm-namespace +``` + +--- + +### 4. Define a NetworkAttachmentDefinition: + +While Kube-OVN can work directly by binding a Namespace to a Subnet, using a NetworkAttachmentDefinition (NAD) with +Multus provides more explicit control, especially if your VM needs multiple network interfaces or a specific CNI configuration. + +```yaml +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + name: vm-network # Name of the NAD + namespace: vm-namespace # Must be in the same namespace as the VMs using it +spec: + config: | + { + "cniVersion": "0.3.1", + "name": "vm-network", + "type": "kube-ovn", + "server_socker": "/run/openvswitch/kube-ovn-daemon.sock", + "netAttachDefName": "vm-namespace/vm-network" + } +``` +{{% notice note %}} +Note: For a VM to automatically pick up the correct Subnet via the Namespace binding, you often don't strictly +need a `NetworkAttachmentDefinition` for the primary interface if the Namespace is directly linked to the Subnet. However, +it's crucial for secondary interfaces or explicit network definitions. +{{% /notice %}} + +--- + +### 5. Assign the KubeVirt Virtual Machine to the Subnet/VPC: + +When defining your `VirtualMachine` (or `VirtualMachinePool`), you ensure it's created in the `vm-namespace` that is +bound to your `my-vm-subnet`. + +#### Option 1: Relying on Namespace-Subnet Binding (Simplest) + +If your `vm-namespace` is explicitly listed in the `spec.namespaces` of `my-vm-subnet`, any `VM` (or `Pod`) created in +`vm-namespace` will automatically get an IP from `my-vm-subnet`. + +#### Option 2: Explicitly Specifying the Subnet/NAD via Annotations (For Multiple NICs or Specificity) + +If you're using a `NetworkAttachmentDefinition` (`NAD`) or need to explicitly control which subnet is used, especially +for secondary interfaces, you'd use Multus annotations on your `VM` definition. + +```yaml +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + name: my-kubeovn-vm-multus + namespace: vm-namespace + annotations: + # Reference the NetworkAttachmentDefinition for the primary interface + # The format is / + k8s.v1.cni.cncf.io/networks: vm-network + # Optional: For static IP assignment from the subnet + # ovn.kubernetes.io/ip_address: 10.10.0.10 +spec: + runStrategy: Always + template: + spec: + domain: + devices: + disks: + - name: containerdisk + disk: + bus: virtio + - name: cloudinitdisk + disk: + bus: virtio + interfaces: + - name: primary-nic + # This interface will use the network defined by the NAD + bridge: {} # Or masquerade: {} + # Example for a secondary NIC on a different Kube-OVN Subnet/NAD + # - name: secondary-nic + # bridge: {} + resources: + requests: + memory: 2Gi + volumes: + - name: containerdisk + containerDisk: + image: kubevirt/fedora-cloud-container-disk-demo + - name: cloudinitdisk + cloudInitNoCloud: + userData: | + #cloud-config + +``` +Important Kube-OVN Annotations for VMs/Pods: + +- `ovn.kubernetes.io/logical_switch`: Explicitly assigns the workload to a specific Kube-OVN logical switch (which +corresponds to a Subnet). This overrides the Namespace's default subnet. + +- `ovn.kubernetes.io/ip_address`: Assigns a specific static IP address from the subnet. Make sure this IP is excluded from +the subnet's dynamic IP range (excludeIps in the Subnet definition) to avoid conflicts. + +- `ovn.kubernetes.io/network_attachment`: When using Multus, this annotation on the `NetworkAttachmentDefinition`'s config +can specify the Kube-OVN provider or other details if you have multiple Kube-OVN deployments or specific requirements. + diff --git a/content/kubermatic-virtualization/main/architecture/concepts/networks/vpc-subnets/_index.en.md b/content/kubermatic-virtualization/main/architecture/concepts/networks/vpc-subnets/_index.en.md new file mode 100644 index 000000000..9707271e9 --- /dev/null +++ b/content/kubermatic-virtualization/main/architecture/concepts/networks/vpc-subnets/_index.en.md @@ -0,0 +1,107 @@ ++++ +title = "Networking" +date = 2025-07-18T16:06:34+02:00 +weight = 15 ++++ +Kubermatic-Virtualization uses KubeOVN as a software defined network(SDN) and it supercharges Kubernetes networking by +integrating it with Open Virtual Network (OVN) and Open vSwitch (OVS). These aren't new players; OVN and OVS are long-standing, +industry-standard technologies in the Software-Defined Networking (SDN) space, predating Kubernetes itself. By leveraging +their robust, mature capabilities, Kube-OVN significantly expands what Kubernetes can do with its network. + +## VPC +A VPC (Virtual Private Cloud) in Kube-OVN represents an isolated layer-3 network domain that contains one or more subnets. +Each VPC provides its own routing table and default gateway, allowing you to logically separate network traffic between +tenants or workloads. + +Kubermatic Virtualization simplifies network setup by providing a default Virtual Private Cloud (VPC) and a default Subnet +right out of the box. These are pre-configured to connect directly to the underlying node network, offering a seamless link +to your existing infrastructure. This means you don't need to attach external networks to get started. + +This design is a huge win for new users. It allows customers to dive into Kubermatic Virtualization and quickly establish +network connectivity between their workloads and the hypervisor without wrestling with complex network configurations, +external appliances, or advanced networking concepts. It's all about making the initial experience as straightforward +and efficient as possible, letting you focus on your applications rather than network plumbing. + + +Here is an example of a VPC definition: +```yaml +apiVersion: kubeovn.io/v1 +kind: Vpc +metadata: + name: custom-vpc +spec: + cidr: 10.200.0.0/16 + enableNAT: false + defaultGateway: "" + staticRoutes: + - cidr: 0.0.0.0/0 + nextHopIP: 10.200.0.1 +``` + +| Field | Description | +| ---------------- | --------------------------------------------------------------------------------------- | +| `metadata.name` | Name of the VPC. Must be unique within the cluster. | +| `spec.cidr` | The overall IP range for the VPC. Subnets under this VPC should fall within this range. | +| `enableNAT` | Whether to enable NAT for outbound traffic. Useful for internet access. | +| `defaultGateway` | IP address used as the default gateway for this VPC. Usually left blank for automatic. | +| `staticRoutes` | List of manually defined routes for the VPC. | + +## Subnet + +Subnets are the fundamental building blocks for network and IP management. They serve as the primary organizational unit +for configuring network settings and IP addresses. + +- Namespace-Centric: Each Kubernetes Namespace can be assigned to a specific Subnet. +- Automatic IP Allocation: Pods deployed within a Namespace automatically receive their IP addresses from the Subnet that +Namespace is associated with. +- Shared Network Configuration: All Pods within a Namespace inherit the network configuration defined by their Subnet. This includes: + - CIDR (Classless Inter-Domain Routing): The IP address range for the Subnet. + - Gateway Type: How traffic leaves the Subnet. + - Access Control: Network policies and security rules. + - NAT Control: Network Address Translation settings. + +Here is an example of a VPC definition: +```yaml +apiVersion: kubeovn.io/v1 +kind: Subnet +metadata: + name: my-custom-subnet + namespace: kube-system +spec: + cidrBlock: 10.16.0.0/16 + gateway: 10.16.0.1 + gatewayType: distributed + excludeIps: + - 10.16.0.1 + - 10.16.0.2..10.16.0.10 + protocol: IPv4 + natOutgoing: true + private: false + vpc: custom-vpc + enableDHCP: true + allowSubnets: [] + vlan: "" + namespaces: + - default + - dev + subnetType: overlay +``` +| Field | Description | +|----------------------|---------------------------------------------------------------------------------------| +| `apiVersion` | Must be `kubeovn.io/v1`. | +| `kind` | Always set to `Subnet`. | +| `metadata.name` | Unique name for the subnet resource. | +| `metadata.namespace` | Namespace where the subnet object resides. Usually `kube-system`. | +| `spec.cidrBlock` | The IP range (CIDR notation) assigned to this subnet. | +| `spec.gateway` | IP address used as the gateway for this subnet. | +| `spec.gatewayType` | `centralized` or `distributed`. `distributed` allows egress from local node gateways. | +| `spec.excludeIps` | IPs or IP ranges excluded from dynamic allocation. | +| `spec.protocol` | Can be `IPv4`, `IPv6`, or `Dual`. | +| `spec.natOutgoing` | If true, pods using this subnet will have outbound NAT enabled. | +| `spec.private` | If true, pod traffic is restricted to this subnet only. | +| `spec.vpc` | Is the name of the VPC that the subnet belongs to. | +| `spec.enableDHCP` | Enables DHCP services in the subnet. | +| `spec.allowSubnets` | List of subnets allowed to communicate with this one (used with private=true). | +| `spec.vlan` | Optional VLAN name (empty string means no VLAN). | +| `spec.namespaces` | Namespaces whose pods will be assigned IPs from this subnet. | +| `spec.subnetType` | Can be `overlay`, `underlay`, `VLAN`, or `external`. | diff --git a/content/kubermatic-virtualization/main/architecture/concepts/storage/_index.en.md b/content/kubermatic-virtualization/main/architecture/concepts/storage/_index.en.md new file mode 100644 index 000000000..760102919 --- /dev/null +++ b/content/kubermatic-virtualization/main/architecture/concepts/storage/_index.en.md @@ -0,0 +1,133 @@ ++++ +title = "Storage" +date = 2025-07-18T16:06:34+02:00 +weight = 15 ++++ + +At its heart, Kubermatic Virtualization uses KubeVirt, a Kubernetes add-on. KubeVirt allows you to run virtual machines +(VMs) right alongside your containers, and it's built to heavily use Kubernetes' existing storage model. The Container +Storage Interface (CSI) driver is a crucial component in this setup because it allows KubeVirt to leverage the vast and +diverse storage ecosystem of Kubernetes for its VMs. + +The Container Storage Interface (CSI) is a standard for exposing arbitrary block and file storage systems to containerized +workloads on Container Orchestration Systems (COs) like Kubernetes. Before CSI, storage integrations were tightly coupled +with Kubernetes' core code. CSI revolutionized this by providing a pluggable architecture, allowing storage vendors to +develop drivers that can integrate with Kubernetes without modifying Kubernetes itself. + +# KubeVirt + CSI Drivers: How It Works + +KubeVirt’s integration with CSI (Container Storage Interface) drivers is fundamental to how it manages VM storage. This document explains how CSI enables dynamic volume provisioning, image importing, and advanced VM disk features in KubeVirt. + +--- + +## 1. Dynamic Volume Provisioning for VM Disks + +### PersistentVolumeClaims (PVCs) +KubeVirt does not directly interact with the underlying storage backend (e.g., SAN, NAS, cloud block storage). Instead, it uses Kubernetes’ PVC abstraction. When a VM is defined, KubeVirt requests a PVC. + +### StorageClasses +PVCs reference a `StorageClass`, which is configured to use a specific CSI driver as its "provisioner". + +### Driver’s Role +The CSI driver associated with the `StorageClass` handles the provisioning of persistent storage by interfacing with external systems (e.g., vCenter, Ceph, cloud providers). + +### VM Disk Attachment +Once the PV is bound, KubeVirt uses the `virt-launcher` pod to attach the volume as a virtual disk to the VM. + +--- + +## 2. Containerized Data Importer (CDI) Integration + +### Importing VM Images +KubeVirt works with the CDI project to import disk images (e.g., `.qcow2`, `.raw`) from HTTP, S3, and other sources into PVCs. + +### CSI Uses CSI +CDI relies on CSI drivers to provision the PVCs that will store the imported images. After import, KubeVirt consumes the PVC as a disk. + +### DataVolume Resource +KubeVirt’s `DataVolume` custom resource simplifies image importing and ties CDI with PVC creation in a declarative way. + +--- + +## 3. Advanced Storage Features (via CSI Capabilities) + +CSI drivers allow powerful features previously complex for VM setups: + +- **Snapshots**: If supported, KubeVirt can create `VolumeSnapshot` objects for point-in-time backups. +- **Cloning**: Allows fast provisioning of VM disks from existing PVCs without re-importing. +- **Volume Expansion**: Resize VM disks dynamically with `allowVolumeExpansion`. +- **ReadWriteMany (RWX) Mode**: Enables live migration by allowing shared access across nodes. +- **Block vs. Filesystem Modes**: CSI supports both `Filesystem` and `Block`. Choose based on workload performance needs. + +--- + +## 4. Example Scenario +Admin creates a `StorageClass`: +```yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: my-fast-storage +provisioner: csi.my-storage-vendor.com # This points to the specific CSI driver +parameters: + type: "ssd" +volumeBindingMode: WaitForFirstConsumer # Important for VM scheduling +allowVolumeExpansion: true +``` +User defines a `VirtualMachine` with a `DataVolume`: +```yaml +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + name: my-vm +spec: + dataVolumeTemplates: + - metadata: + name: my-vm-disk + spec: + storageClassName: my-fast-storage # References the StorageClass + source: + http: + url: "http://example.com/my-vm-image.qcow2" + pvc: + accessModes: + - ReadWriteOnce # Or ReadWriteMany for live migration + resources: + requests: + storage: 20Gi + template: + spec: + domain: + devices: + disks: + - name: my-vm-disk + disk: + bus: virtio + # ... other VM specs + volumes: + - name: my-vm-disk + dataVolume: + name: my-vm-disk +``` +In this flow: + +- KubeVirt sees the DataVolumeTemplate and requests a PVC (my-vm-disk) using my-fast-storage. + +- The my-fast-storage StorageClass directs the request to csi.my-storage-vendor.com (the CSI driver). + +- The CSI driver provisions a 20Gi volume on the backend storage. + +- CDI then imports my-vm-image.qcow2 into this newly provisioned PVC. + +- Once the data import is complete, KubeVirt starts the VM, and the PVC is attached as the VM's disk. + +--- + +## Summary + +KubeVirt uses CSI to: +- Abstract storage provisioning and attachment. +- Enable features like cloning, snapshots, and expansion. +- Import images using CDI with CSI-provisioned PVCs. +- Support enterprise-grade live migration and scalability. + diff --git a/content/kubermatic-virtualization/main/architecture/kube-v-architecture.png b/content/kubermatic-virtualization/main/architecture/kube-v-architecture.png new file mode 100644 index 000000000..f20425cf9 Binary files /dev/null and b/content/kubermatic-virtualization/main/architecture/kube-v-architecture.png differ diff --git a/content/kubermatic-virtualization/main/architecture/requirements/_index.en.md b/content/kubermatic-virtualization/main/architecture/requirements/_index.en.md new file mode 100644 index 000000000..76d2e4549 --- /dev/null +++ b/content/kubermatic-virtualization/main/architecture/requirements/_index.en.md @@ -0,0 +1,7 @@ ++++ +title = "Requirements" +date = 2025-06-28T12:07:15+02:00 +weight = 3 ++++ + +Find out about the requirements for the operation of Kubermatic Virtualization (Kube-V). diff --git a/content/kubermatic-virtualization/main/architecture/requirements/high-availability-deployment/_index.en.md b/content/kubermatic-virtualization/main/architecture/requirements/high-availability-deployment/_index.en.md new file mode 100644 index 000000000..4185212fc --- /dev/null +++ b/content/kubermatic-virtualization/main/architecture/requirements/high-availability-deployment/_index.en.md @@ -0,0 +1,82 @@ ++++ +title = "High-Availability Deployment" +date = 2025-06-28T12:07:15+02:00 +weight = 15 ++++ + +## High-Availability Deployment + +The hardware foundation for Kubermatic Virtualization is multi-faceted, encompassing requirements for the Kubermatic +Virtualization (Kube-V) management layer, the KubeVirt infrastructure nodes that host virtual machines, in addition to +various services that are running as part of the ecosystem. + +### Control Plane Nodes + +* Nodes: Minimum 3 control plane nodes to ensure a quorum for etcd (Kubernetes' key-value store) and prevent a single point of failure. +These should ideally be distributed across different failure domains (e.g., availability zones, racks). +* CPU: At least 2 vCPUs per control plane node. +* RAM: At least 4 GB RAM per control plane node. Recommended: 8-16 GB for robust performance. +* Storage: Fast, persistent storage for etcd (SSD-backed recommended) with sufficient capacity. + +### Worker Nodes + +* Minimum 2 worker nodes (for KubeVirt VMs): For HA, you need more than one node to run VMs. This allows for live migration +and VM rescheduling in case of a node failure. +* CPU: A minimum of 8 CPU cores per node is suggested for testing environments. For production deployments, 16 CPU cores +or more per node are recommended to accommodate multiple VMs and their workloads effectively. Each worker node must have +Intel VT-x or AMD-V hardware virtualization extensions enabled in the BIOS/UEFI. +This is a fundamental requirement for KubeVirt to leverage KVM (Kernel-based Virtual Machine) for efficient VM execution. +Without this, KubeVirt can fall back to software emulation, but it's significantly slower and not suitable for production HA. +* RAM: At least 8 GB RAM per node. Recommended: 16-32 GB, depending on the number and memory requirements of your VMs. +* Storage: SSDs or NVMe drives are highly recommended for good VM performance in addition to sufficient storage capacity +based on the disk images of your VMs and any data they store. + +### Storage + +* CSI Driver Capabilities (Crucial for HA/Live Migration): This is perhaps the most critical component for KubeVirt HA and live migration. + You need a shared storage backend that supports ReadWriteMany (RWX) access mode or Block-mode (volumeMode: Block) volumes. +* Capacity: Sufficient storage capacity based on the disk images of your VMs and any data they store. +* Performance: SSDs or NVMe drives are highly recommended for good VM performance where high-throughput services, + low-latency, high-IOPS storage (often block storage) is critical. +* Replication and Redundancy: To achieve HA, data must be replicated across multiple nodes or availability zones. + If a node fails, the data should still be accessible from another. + +### Networking + +A well-planned and correctly configured network infrastructure is fundamental to the stability and performance of +Kubermatic Virtualization. This includes considerations for IP addressing, DNS, load balancing, and inter-component communication. + +* High-bandwidth, low-latency connections: 1 Gbps NICs are a minimum; 10 Gbps or higher is recommended for performance-sensitive +workloads and efficient live migration. +* Load Balancing: External/internal load balancers for distributing traffic across control planes and worker nodes. +* Dedicated network for live migration (recommended): While not strictly minimal, a dedicated Multus network for live +migration can significantly reduce network saturation on tenant workloads during migrations. +* Connectivity: Full and unrestricted network connectivity is paramount between all host nodes. Firewalls and security +groups must be configured to permit all necessary Kubernetes control plane traffic, KubeVirt communication, and KubeV-specific +inter-cluster communication. +* DNS: DNS resolution is crucial for the Kube-V environment, enabling all nodes to find each other and external services. +A potential conflict can arise if both the KubeVirt infrastructure and guest user clusters +use NodeLocal DNSCache with the same default IP address, leading to DNS resolution issues for guest VMs. This can be +mitigated by adjusting the dnsConfig and dnsPolicy of the guest VMs. + + +| Component | Port(s) | Protocol | Direction | Purpose | +|:------------------:| :------------------: | :------: | :----------: | :-----------------------------------------------------: | +| API Server | 6443 | TCP | Inbound | All API communication with the cluster | +| etcd | 2379-2380 | TCP | Inbound | etcd database communication | +| Kubelet | 10250 | TCP | Inbound | Kubelet API for control plane communication | +| Kube-Scheduler | 10259 | TCP | Inbound | Kube-Scheduler component | +| Controller-Manager | 10257 | TCP | Inbound | Kube-Controller-Manager component | +| Kube-Proxy | 10256 | TCP | Inbound | Kube-Proxy health checks and service routing | +| NodePort Services | 30000-32767 | TCP/UDP | Inbound | Default range for exposing services on node IPs | +| KubeVirt API | 8443 | TCP | Internal | KubeVirt API communication | +| Live Migration | 61000-61009 (approx) | TCP | Node-to-Node | For migrating VM state between nodes | +| OVN NB DB | 6641 | TCP | Internal | OVN Northbound Database | +| OVN SB DB | 6642 | TCP | Internal | OVN Southbound Database | +| OVN Northd | 6643 | TCP | Internal | OVN Northd process | +| OVN Raft | 6644 | TCP | Internal | OVN Raft consensus (for HA OVN DBs) | +| Geneve Tunnel | 6081 | UDP | Node-to-Node | Default overlay network for pod communication (OVN) | +| OVN Controller | 10660 | TCP | Internal | Metrics for OVN Controller | +| OVN Daemon | 10665 | TCP | Internal | Metrics for OVN Daemon (on each node) | +| OVN Monitor | 10661 | TCP | Internal | Metrics for OVN Monitor | + diff --git a/content/kubermatic-virtualization/main/architecture/requirements/single-node-deployment/_index.en.md b/content/kubermatic-virtualization/main/architecture/requirements/single-node-deployment/_index.en.md new file mode 100644 index 000000000..b9786ee3a --- /dev/null +++ b/content/kubermatic-virtualization/main/architecture/requirements/single-node-deployment/_index.en.md @@ -0,0 +1,64 @@ ++++ +title = "Single Node Deployment" +date = 2025-06-28T12:07:15+02:00 +weight = 15 ++++ + +## Single Node Deployment + +The hardware foundation for Kubermatic Virtualization is multi-faceted, encompassing requirements for the Kubermatic +Virtualization (Kube-V) management layer, the KubeVirt infrastructure node that host virtual machines, in addition to +various services that are running as part of the ecosystem. + +### Host Configuration + +* CPU: A minimum of 8 CPU cores is suggested for testing environments. For production deployments, 16 CPU cores + or more are recommended to accommodate multiple VMs and their workloads effectively. The host node must have + Intel VT-x or AMD-V hardware virtualization extensions enabled in the BIOS/UEFI. + This is a fundamental requirement for KubeVirt to leverage KVM (Kernel-based Virtual Machine) for efficient VM execution. + Without this, KubeVirt can fall back to software emulation, but it's significantly slower and not suitable for production HA. +* RAM: At least 8 GB RAM per node. Recommended: 16-32 GB, depending on the number and memory requirements of your VMs. +* Storage: SSDs or NVMe drives are highly recommended for good VM performance in addition to sufficient storage capacity + based on the disk images of your VMs and any data they store. + +### Storage + +* CSI Driver Capabilities (Crucial for HA/Live Migration): This is perhaps the most critical component for KubeVirt. + You need a shared storage backend that supports ReadWriteMany (RWX) access mode or Block-mode (volumeMode: Block) volumes. +* Capacity: Sufficient storage capacity based on the disk images of your VMs and any data they store. +* Performance: SSDs or NVMe drives are highly recommended for good VM performance where high-throughput services, + low-latency, high-IOPS storage (often block storage) is critical. + +### Networking + +A well-planned and correctly configured network infrastructure is fundamental to the stability and performance of +Kubermatic Virtualization. This includes considerations for IP addressing, DNS, load balancing, and inter-component communication. + +* High-bandwidth, low-latency connections: 1 Gbps NICs are a minimum; 10 Gbps or higher is recommended for performance-sensitive + workloads. +* DNS: DNS resolution is crucial for the Kube-V environment, enabling all nodes to find each other and external services. + A potential conflict can arise if both the KubeVirt infrastructure and guest user clusters + use NodeLocal DNSCache with the same default IP address, leading to DNS resolution issues for guest VMs. This can be + mitigated by adjusting the dnsConfig and dnsPolicy of the guest VMs. + + +| Component | Port(s) | Protocol | Direction | Purpose | +|:------------------:| :------------------: | :------: | :----------: | :-----------------------------------------------------: | +| API Server | 6443 | TCP | Inbound | All API communication with the cluster | +| etcd | 2379-2380 | TCP | Inbound | etcd database communication | +| Kubelet | 10250 | TCP | Inbound | Kubelet API for control plane communication | +| Kube-Scheduler | 10259 | TCP | Inbound | Kube-Scheduler component | +| Controller-Manager | 10257 | TCP | Inbound | Kube-Controller-Manager component | +| Kube-Proxy | 10256 | TCP | Inbound | Kube-Proxy health checks and service routing | +| NodePort Services | 30000-32767 | TCP/UDP | Inbound | Default range for exposing services on node IPs | +| KubeVirt API | 8443 | TCP | Internal | KubeVirt API communication | +| Live Migration | 61000-61009 (approx) | TCP | Node-to-Node | For migrating VM state between nodes | +| OVN NB DB | 6641 | TCP | Internal | OVN Northbound Database | +| OVN SB DB | 6642 | TCP | Internal | OVN Southbound Database | +| OVN Northd | 6643 | TCP | Internal | OVN Northd process | +| OVN Raft | 6644 | TCP | Internal | OVN Raft consensus (for HA OVN DBs) | +| Geneve Tunnel | 6081 | UDP | Node-to-Node | Default overlay network for pod communication (OVN) | +| OVN Controller | 10660 | TCP | Internal | Metrics for OVN Controller | +| OVN Daemon | 10665 | TCP | Internal | Metrics for OVN Daemon (on each node) | +| OVN Monitor | 10661 | TCP | Internal | Metrics for OVN Monitor | + diff --git a/content/kubermatic-virtualization/main/installation/_index.en.md b/content/kubermatic-virtualization/main/installation/_index.en.md new file mode 100644 index 000000000..87bf41899 --- /dev/null +++ b/content/kubermatic-virtualization/main/installation/_index.en.md @@ -0,0 +1,161 @@ ++++ +title = "Installation" +date = 2025-06-28T12:07:15+02:00 +weight = 15 ++++ + +This chapter offers guidance on how to install Kubermatic Virtualization + +## Installing Kubermatic Virtualization with CLI + +Kubermatic Virtualization comes with an interactive installer, a CLI tool that helps administrators and users provision the entire platform easily. With just a few inputs, you can deploy and configure the stack in no time. + +{{% notice note %}} +To get started with the CLI, you will first need a Kubermatic Virtualization license. This license grants you access to the necessary resources, including the CLI. Please [contact sales](mailto:sales@kubermatic.com) to obtain your license. +{{% /notice %}} + +### **1. Navigating the Interactive CLI Installer** + +The Kubermatic Virtualization installer is an interactive CLI that guides you through the installation process with clear instructions and prompts for user input. Each page contains important information and features a help bar at the bottom to assist with navigation. + +![Welcome Page](./assets/0-welcome-page.png) + +--- + +### **2. Configuring the Network Stack** + +One of the foundational steps in setting up Kubermatic Virtualization is defining the network configuration. This step ensures that your virtual machines and containerized workloads have a dedicated IP range to operate within, similar to a default VPC. Proper network configuration is crucial for seamless communication and resource management. + + +#### **Key Components** +- **Network (CIDR)**: Specify the IP range where your virtual machines and containerized workloads will reside. This defines the subnet they will use by default. +- **DNS Server**: Provide the DNS server address to ensure proper name resolution for your workloads. +- **Gateway IP**: Define the gateway IP to facilitate network routing and connectivity. + +{{% notice warning %}} + +When setting up your network configuration, it is crucial to provide a working DNS server address to ensure proper name resolution for your virtual machines and containerized workloads, as failure to do so can result in issues like the inability to resolve domain names, failed connections to external services, or degraded functionality due to unresolved hostnames; if you do not have an internal DNS server configured, it is recommended to use a public and trusted DNS server such as Google Public DNS (`8.8.8.8` or `8.8.4.4`) or Cloudflare DNS (`1.1.1.1` or `1.0.0.1`) to ensure smooth operation and connectivity for your workloads. + +{{% /notice %}} + +![Kubermatic Virtualization Network Configuration](./assets/1-network-page.png) + + +--- + +### **3. Configuring the Load Balancer Service** + +In this step, you can enable Kubermatic Virtualization’s default Load Balancer service, **MetalLB**, to simplify the creation of load balancers for your workloads. MetalLB is ideal for evaluation and non-production environments but should not be used in production scenarios without proper enterprise-grade solutions. + +#### **Key Steps** +- **Enable MetalLB**: Toggle the checkbox to enable the Load Balancer service. +- **Define IP Range**: If MetalLB is enabled, specify the IP range that will be used by the Load Balancer. + +{{% notice warning %}} + +When defining the IP range for MetalLB, ensure that the range is valid and exists within your network infrastructure. This range will be reserved for load balancer IP addresses, and any misconfiguration or overlap with existing IPs can lead to network conflicts, service disruptions, or inaccessible workloads. Always verify the availability and uniqueness of the IP range before proceeding. + +{{% /notice %}} + +![Kubermatic Virtualization Load Balancer Configuration](./assets/2-lb-page.png) + +By configuring these settings, you ensure that your workloads have access to a simple yet effective Load Balancer solution for testing and development purposes. Let’s move on to the next configuration step! + +--- + +### **4. Configuring Nodes** + +In this section, you will define the number of nodes in your Kubermatic Virtualization cluster and provide detailed information about each node, including their IP addresses, usernames, and SSH key paths. Accurate configuration is crucial for ensuring smooth communication and management of your cluster. + +#### **Step 1: Specify the Number of Nodes** + +The first step is to determine how many nodes you want in your cluster. This number will dictate the scale of your infrastructure and influence the subsequent configuration steps. + +- **Input**: Enter the total number of nodes you plan to use. + +![Kubermatic Virtualization - How Many Nodes?](./assets/3-node-count.png) + + +#### **Step 2: Configure Each Node** + +After specifying the number of nodes, you will be prompted to configure each node individually. For each node, you need to provide the following details: + +1. **Node Address**: The IP address of the node. +2. **Username**: The username used to access the node via SSH. +3. **SSH Key Path**: The path to the SSH private key file used to authenticate with the node. + +![Kubermatic Virtualization - Node Configuration](./assets/4-node-config-page.png) + +Repeat this process for each node until all nodes are configured. + +#### **Why Accurate Configuration Matters** +- **Node Addresses**: Ensure that the IP addresses are correct and reachable within your network. +- **Usernames and SSH Keys**: Provide secure access to the nodes, enabling proper communication and management. + +By carefully configuring the number of nodes and providing accurate details for each node, you lay the foundation for a robust and manageable Kubermatic Virtualization environment. Let’s proceed to the next configuration step! + +--- +### **5. Configuring the Storage CSI Driver** + +In this step, you will decide whether to use the default Container Storage Interface (CSI) driver provided by Kubermatic Virtualization. The default CSI driver is designed for evaluation and staging environments and is not recommended for production use. + + + +#### **Key Information** +- **Default CSI Driver**: The default CSI driver (e.g., Longhorn) is included for testing purposes only. +- **Purpose**: It provides baseline storage functionality during evaluation and staging phases. +- **Recommendation**: For production environments, it is strongly advised to use a fully supported and robust storage solution. + +#### **Disclaimer** +Please be advised: +- The default CSI driver is **not intended or supported** for production environments. +- Its use in production is at your own risk and is not recommended. +- Kubermatic does not guarantee ongoing maintenance, reliability, or performance of the default CSI driver. + +![Kubermatic Virtualization - Storage CSI Driver Configuration](./assets/5-csi-page.png) + +By making an informed decision about the CSI driver, you ensure that your environment is configured appropriately for its intended purpose—whether it’s for testing or production. Let’s proceed to the next configuration step! + +--- + +### **6. Reviewing the Configuration** + +Before proceeding with the installation, it is crucial to review the full configuration to ensure all settings are correct. This step allows you to verify details such as Control Plane nodes, Worker nodes, network configurations, and other critical parameters. Once confirmed, the installation process will begin, and you will not be able to go back. + +#### **Key Information** +- **Purpose**: Confirm that all configurations are accurate before applying them. +- **Irreversible Step**: After confirming this page, the installation process will start, and changes cannot be made without restarting the entire setup. + +#### **What to Review** +- **Cluster Nodes**: + - Addresses + - Usernames + - SSH key file paths + +- **Other Configurations**: + - Network settings (CIDR, DNS server, Gateway IP) + - Load Balancer configuration (if enabled) + - Storage CSI driver selection + +![Kubermatic Virtualization - Configuration Review](./assets/6-review-page.png) + + +{{% notice warning %}} +**No Going Back**: Once you confirm this page, the installation process will begin, and you cannot modify the configuration without starting over. +By carefully reviewing the configuration, you ensure that your Kubermatic Virtualization environment is set up correctly from the start. Proceed with confidence when you’re ready! + +{{% /notice %}} + +--- + +### **8. Finalizing the Installation** + +Once you confirm the configuration, the installation process will begin, and you’ll be able to monitor its progress in real-time through detailed logs displayed on the screen. These logs provide transparency into each step of the deployment, ensuring you stay informed throughout the process. + +#### **What Happens During Installation** +- **Progress Monitoring**: Watch as the installer provisions the Control Plane, Worker Nodes, and other components. +- **Health Checks**: After deployment, the installer verifies that all parts of the stack are healthy and running as expected. +- **Completion**: Upon successful installation, the installer will generate and display the **kubeconfig** file for your Kubermatic Virtualization cluster. + +### **Congratulations!** +Your Kubermatic Virtualization environment is now up and running. With the kubeconfig file in hand, you’re ready to start managing your cluster and deploying workloads. Enjoy the power of seamless virtualization on Kubernetes! 🚀 \ No newline at end of file diff --git a/content/kubermatic-virtualization/main/installation/assets/0-welcome-page.png b/content/kubermatic-virtualization/main/installation/assets/0-welcome-page.png new file mode 100644 index 000000000..4617462b6 Binary files /dev/null and b/content/kubermatic-virtualization/main/installation/assets/0-welcome-page.png differ diff --git a/content/kubermatic-virtualization/main/installation/assets/1-network-page.png b/content/kubermatic-virtualization/main/installation/assets/1-network-page.png new file mode 100644 index 000000000..4cc7ee25b Binary files /dev/null and b/content/kubermatic-virtualization/main/installation/assets/1-network-page.png differ diff --git a/content/kubermatic-virtualization/main/installation/assets/2-lb-page.png b/content/kubermatic-virtualization/main/installation/assets/2-lb-page.png new file mode 100644 index 000000000..9d0e0798b Binary files /dev/null and b/content/kubermatic-virtualization/main/installation/assets/2-lb-page.png differ diff --git a/content/kubermatic-virtualization/main/installation/assets/3-node-count.png b/content/kubermatic-virtualization/main/installation/assets/3-node-count.png new file mode 100644 index 000000000..aecb6f70f Binary files /dev/null and b/content/kubermatic-virtualization/main/installation/assets/3-node-count.png differ diff --git a/content/kubermatic-virtualization/main/installation/assets/4-node-config-page.png b/content/kubermatic-virtualization/main/installation/assets/4-node-config-page.png new file mode 100644 index 000000000..a25486df4 Binary files /dev/null and b/content/kubermatic-virtualization/main/installation/assets/4-node-config-page.png differ diff --git a/content/kubermatic-virtualization/main/installation/assets/5-csi-page.png b/content/kubermatic-virtualization/main/installation/assets/5-csi-page.png new file mode 100644 index 000000000..23e25fa54 Binary files /dev/null and b/content/kubermatic-virtualization/main/installation/assets/5-csi-page.png differ diff --git a/content/kubermatic-virtualization/main/installation/assets/6-review-page.png b/content/kubermatic-virtualization/main/installation/assets/6-review-page.png new file mode 100644 index 000000000..2163ceb22 Binary files /dev/null and b/content/kubermatic-virtualization/main/installation/assets/6-review-page.png differ diff --git a/content/kubermatic/main/_index.en.md b/content/kubermatic/main/_index.en.md index 0ead10bba..b3b1dd9e3 100644 --- a/content/kubermatic/main/_index.en.md +++ b/content/kubermatic/main/_index.en.md @@ -9,7 +9,7 @@ date = 2019-04-27T16:06:34+02:00 Kubermatic Kubernetes Platform (KKP) is a Kubernetes management platform that helps address the operational and security challenges of enterprise customers seeking to run Kubernetes at scale. KKP automates deployment and operations of hundreds or thousands of Kubernetes clusters across hybrid-cloud, multi-cloud and edge environments while enabling DevOps teams with a self-service developer and operations portal. -KKP is directly integrated with leading cloud providers including Amazon Web Services (AWS), Google Cloud, Azure, Openstack, VMware vSphere, Open Telekom Cloud, Digital Ocean, Hetzner, Alibaba Cloud, Equinix Metal and Nutanix. For selected providers, ARM is supported as CPU architecture. +KKP is directly integrated with leading cloud providers including Amazon Web Services (AWS), Google Cloud, Azure, Openstack, VMware vSphere, Open Telekom Cloud, Digital Ocean, Hetzner, Alibaba Cloud and Nutanix. For selected providers, ARM is supported as CPU architecture. In addition to the long list of supported cloud providers, KKP allows building your own infrastructure and joining Kubernetes nodes via the popular `kubeadm` tool. @@ -17,10 +17,12 @@ KKP is the easiest and most effective software for managing cloud native IT infr ## Features -#### Powerful & Intuitive Dashboard to Visualize your Kubernetes Deployment +### Powerful & Intuitive Dashboard to Visualize your Kubernetes Deployment + Manage your [projects and clusters with the KKP dashboard]({{< ref "./tutorials-howtos/project-and-cluster-management/" >}}). Scale your cluster by adding and removing nodes in just a few clicks. As an admin, the dashboard also allows you to [customize the theme]({{< ref "./tutorials-howtos/dashboard-customization/" >}}) and disable theming options for other users. -#### Deploy, Scale & Update Multiple Kubernetes Clusters +### Deploy, Scale & Update Multiple Kubernetes Clusters + Kubernetes environments must be highly distributed to meet the performance demands of modern cloud native applications. Organizations can ensure consistent operations across all environments with effective cluster management. KKP empowers you to take advantage of all the advanced features that Kubernetes has to offer and increases the speed, flexibility and scalability of your cloud deployment workflow. At Kubermatic, we have chosen to do multi-cluster management with Kubernetes Operators. Operators (a method of packaging, deploying and managing a Kubernetes application) allow KKP to automate creation as well as the full lifecycle management of clusters. With KKP you can create a cluster for each need, fine-tune it, reuse it and continue this process hassle-free. This results in: @@ -29,15 +31,17 @@ At Kubermatic, we have chosen to do multi-cluster management with Kubernetes Ope - Smaller individual clusters being more adaptable than one big cluster. - Faster development thanks to less complex environments. -#### Kubernetes Autoscaler Integration +### Kubernetes Autoscaler Integration + Autoscaling in Kubernetes refers to the ability to increase or decrease the number of nodes as the demand for service response changes. Without autoscaling, teams would manually first provision and then scale up or down resources every time conditions change. This means, either services fail at peak demand due to the unavailability of enough resources or you pay at peak capacity to ensure availability. [The Kubernetes Autoscaler in a cluster created by KKP]({{< ref "./tutorials-howtos/kkp-autoscaler/cluster-autoscaler/" >}}) can automatically scale up/down when one of the following conditions is satisfied: 1. Some pods fail to run in the cluster due to insufficient resources. -2. There are nodes in the cluster that have been underutilized for an extended period (10 minutes by default) and pods running on those nodes can be rescheduled to other existing nodes. +1. There are nodes in the cluster that have been underutilized for an extended period (10 minutes by default) and pods running on those nodes can be rescheduled to other existing nodes. + +### Manage all KKP Users Directly from a Single Panel -#### Manage all KKP Users Directly from a Single Panel The admin panel allows KKP administrators to manage the global settings that impact all KKP users directly. As an administrator, you can do the following: - Customize the way custom links (example: Twitter, Github, Slack) are displayed in the Kubermatic dashboard. @@ -46,32 +50,39 @@ The admin panel allows KKP administrators to manage the global settings that imp - Define Preset types in a Kubernetes Custom Resource Definition (CRD) allowing the assignment of new credential types to supported providers. - Enable and configure etcd backups for your clusters through Backup Buckets. -#### Manage Worker Nodes via the UI or the CLI +### Manage Worker Nodes via the UI or the CLI + Worker nodes can be managed [via the KKP web dashboard]({{< ref "./tutorials-howtos/manage-workers-node/via-ui/" >}}). Once you have installed kubectl, you can also manage them [via CLI]({{< ref "./tutorials-howtos/manage-workers-node/via-command-line" >}}) to automate the creation, deletion, and upgrade of nodes. -#### Monitoring, Logging & Alerting +### Monitoring, Logging & Alerting + When it comes to monitoring, no approach fits all use cases. KKP allows you to adjust things to your needs by enabling certain customizations to enable easy and tactical monitoring. KKP provides two different levels of Monitoring, Logging, and Alerting. 1. The first targets only the management components (master, seed, CRDs) and is independent. This is the Master/Seed Cluster MLA Stack and only the KKP Admins can access this monitoring data. -2. The other component is the User Cluster MLA Stack which is a true multi-tenancy solution for all your end-users as well as a comprehensive overview for the KKP Admin. It helps to speed up individual progress but lets the Admin keep an overview of the big picture. It can be configured per seed to match the requirements of the organizational structure. All users can access monitoring data of the user clusters under the projects that they are members of. +1. The other component is the User Cluster MLA Stack which is a true multi-tenancy solution for all your end-users as well as a comprehensive overview for the KKP Admin. It helps to speed up individual progress but lets the Admin keep an overview of the big picture. It can be configured per seed to match the requirements of the organizational structure. All users can access monitoring data of the user clusters under the projects that they are members of. Integrated Monitoring, Logging and Alerting functionality for applications and services in KKP user clusters are built using Prometheus, Loki, Cortex and Grafana. Furthermore, this can be enabled with a single click on the KKP UI. -#### OIDC Provider Configuration +### OIDC Provider Configuration + Since Kubernetes does not provide an OpenID Connect (OIDC) Identity Provider, KKP allows the user to configure a custom OIDC. This way you can grant access and information to the right stakeholders and fulfill security requirements by managing user access in a central identity provider across your whole infrastructure. -#### Easily Upgrading Control Plane and Nodes +### Easily Upgrading Control Plane and Nodes + A specific version of Kubernetes’ control plane typically supports a specific range of kubelet versions connected to it. KKP enforces the rule “kubelet must not be newer than kube-apiserver, and maybe up to two minor versions older” on its own. KKP ensures this rule is followed by checking during each upgrade of the clusters’ control plane or node’s kubelet. Additionally, only compatible versions are listed in the UI as available for upgrades. -#### Open Policy Agent (OPA) +### Open Policy Agent (OPA) + To enforce policies and improve governance in Kubernetes, Open Policy Agent (OPA) can be used. KKP integrates it using OPA Gatekeeper as a kubernetes-native policy engine supporting OPA policies. As an admin you can enable and enforce OPA integration during cluster creation by default via the UI. -#### Cluster Templates +### Cluster Templates + Clusters can be created in a few clicks with the UI. To take the user experience one step further and make repetitive tasks redundant, cluster templates allow you to save data entered into a wizard to create multiple clusters from a single template at once. Templates can be saved to be used subsequently for new cluster creation. -#### Use Default Addons to Extend the Functionality of Kubernetes +### Use Default Addons to Extend the Functionality of Kubernetes + [Addons]({{< ref "./architecture/concept/kkp-concepts/addons/" >}}) are specific services and tools extending the functionality of Kubernetes. Default addons are installed in each user cluster in KKP. The KKP Operator comes with a tool to output full default KKP configuration, serving as a starting point for adjustments. Accessible addons can be installed in each user cluster in KKP on user demand. {{% notice tip %}} diff --git a/content/kubermatic/main/architecture/compatibility/kkp-components-versioning/_index.en.md b/content/kubermatic/main/architecture/compatibility/kkp-components-versioning/_index.en.md index f3483089c..aa9a64b81 100644 --- a/content/kubermatic/main/architecture/compatibility/kkp-components-versioning/_index.en.md +++ b/content/kubermatic/main/architecture/compatibility/kkp-components-versioning/_index.en.md @@ -14,7 +14,7 @@ of provided software and therefore releases updates regularly that also include | ------------------------------ | ------------------------------ | | backup/velero | 1.14.0 | | cert-manager | 1.17.4 | -| dex | 2.42.0 | +| dex | 2.44.0 | | gitops/kkp-argocd-apps | 1.16.1 | | iap | 7.8.2 | | kubermatic-operator | 9.9.9-dev | @@ -38,6 +38,6 @@ of provided software and therefore releases updates regularly that also include | monitoring/kube-state-metrics | 2.15.0 | | monitoring/node-exporter | 1.9.0 | | monitoring/prometheus | 2.51.1 | -| nginx-ingress-controller | 1.12.1 | +| nginx-ingress-controller | 1.13.2 | | s3-exporter | 0.7.1 | | telemetry | 0.5.2 | diff --git a/content/kubermatic/main/architecture/compatibility/os-support-matrix/_index.en.md b/content/kubermatic/main/architecture/compatibility/os-support-matrix/_index.en.md index 1eab1534c..e4c3b01d9 100644 --- a/content/kubermatic/main/architecture/compatibility/os-support-matrix/_index.en.md +++ b/content/kubermatic/main/architecture/compatibility/os-support-matrix/_index.en.md @@ -11,11 +11,12 @@ KKP supports a multitude of operating systems. One of the unique features of KKP The following operating systems are currently supported by Kubermatic: -* Ubuntu 20.04, 22.04 and 24.04 -* RHEL beginning with 8.0 (support is cloud provider-specific) -* Flatcar (Stable channel) -* Rocky Linux beginning with 8.0 -* Amazon Linux 2 +- Ubuntu 20.04, 22.04 and 24.04 +- RHEL beginning with 8.0 (support is cloud provider-specific) +- Flatcar (Stable channel) +- Rocky Linux beginning with 8.0 +- Amazon Linux 2 + **Note:** CentOS was removed as a supported OS in KKP 2.26.3 This table shows the combinations of operating systems and cloud providers that KKP supports: @@ -26,7 +27,6 @@ This table shows the combinations of operating systems and cloud providers that | Azure | ✓ | ✓ | ✓ | x | ✓ | | Digitalocean | ✓ | x | x | x | ✓ | | Edge | ✓ | x | x | x | x | -| Equinix Metal | ✓ | ✓ | x | x | ✓ | | Google Cloud Platform | ✓ | ✓ | x | x | x | | Hetzner | ✓ | x | x | x | ✓ | | KubeVirt | ✓ | ✓ | ✓ | x | ✓ | diff --git a/content/kubermatic/main/architecture/compatibility/os-support-matrix/ubuntu-requirements/_index.en.md b/content/kubermatic/main/architecture/compatibility/os-support-matrix/ubuntu-requirements/_index.en.md new file mode 100644 index 000000000..82b7053b7 --- /dev/null +++ b/content/kubermatic/main/architecture/compatibility/os-support-matrix/ubuntu-requirements/_index.en.md @@ -0,0 +1,132 @@ ++++ +title = "KKP Requirements for Ubuntu" +date = 2025-08-21T20:07:15+02:00 +weight = 15 + ++++ + +## KKP Package and Configurations for Ubuntu + +This document provides an overview of the system packages and Kubernetes-related binaries installed, along with their respective sources. + +{{% notice note %}} +This document serves as a guideline for users who want to harden their Ubuntu hosts, providing instructions for installing +and configuring the required packages and settings. By default, OSM handles these installations and configurations through +an Operating System Profile. However, users who prefer to manage them manually can follow the steps outlined below. +{{% /notice %}} + +--- + +## System Packages (via `apt`) + +The following packages are installed using the **APT package manager**: + +| Package | Source | +|-----------------------------|--------| +| curl | apt | +| jq | apt | +| ca-certificates | apt | +| ceph-common | apt | +| cifs-utils | apt | +| conntrack | apt | +| e2fsprogs | apt | +| ebtables | apt | +| ethtool | apt | +| glusterfs-client | apt | +| iptables | apt | +| kmod | apt | +| openssh-client | apt | +| nfs-common | apt | +| socat | apt | +| util-linux | apt | +| ipvsadm | apt | +| apt-transport-https | apt | +| software-properties-common | apt | +| lsb-release | apt | +| containerd.io | apt | + +--- + +## Kubernetes Dependencies (Manual Download) + +The following components are **manually downloaded** (usually from the official [Kubernetes GitHub releases](https://github.com/kubernetes/kubernetes/releases)): + +| Package | Source | +|----------------|--------------------------| +| CNI plugins | Manual Download (GitHub) | +| CRI-tools | Manual Download (GitHub) | +| kubelet | Manual Download (GitHub) | +| kubeadm | Manual Download (GitHub) | +| kubectl | Manual Download (GitHub) | + +--- + + +## Notes +- **APT packages**: Installed via the system’s package manager for base functionality (networking, file systems, utilities, etc.). +- **Manual downloads**: Required for Kubernetes setup and cluster management, ensuring version consistency across nodes. +- **containerd.io**: Installed via apt as the container runtime. + +## Kubernetes Node Bootstrap Configuration + +This repository contains scripts and systemd unit files that configure a Linux host to function as a Kubernetes node. These scripts do not install Kubernetes packages directly but apply system, kernel, and service configurations required for proper operation. + +--- + +## 🔧 Configurations Applied + +### 1. Environment Variables +- Adds `NO_PROXY` and `no_proxy` to `/etc/environment` to bypass proxying for: + - `.svc` + - `.cluster.local` + - `localhost` + - `127.0.0.1` + +- Creates an empty APT proxy configuration file: /etc/apt/apt.conf.d/proxy.conf + +(Placeholder for proxy settings, not configured by default). + +--- + +### 2. Kernel Modules +The script loads and enables essential kernel modules for networking and container orchestration: +- `ip_vs` – IP Virtual Server (transport-layer load balancing). +- `ip_vs_rr` – Round-robin scheduling algorithm. +- `ip_vs_wrr` – Weighted round-robin scheduling algorithm. +- `ip_vs_sh` – Source-hash scheduling algorithm. +- `nf_conntrack_ipv4` or `nf_conntrack` – Connection tracking support. +- `br_netfilter` – Enables netfilter for bridged network traffic (required by Kubernetes). + +--- + +### 3. Kernel Parameters (`sysctl`) +The following runtime kernel parameters are configured: + +- `net.bridge.bridge-nf-call-ip6tables = 1` +- `net.bridge.bridge-nf-call-iptables = 1` +- `kernel.panic_on_oops = 1` +- `kernel.panic = 10` +- `net.ipv4.ip_forward = 1` +- `vm.overcommit_memory = 1` +- `fs.inotify.max_user_watches = 1048576` +- `fs.inotify.max_user_instances = 8192` + +--- + +### 4. System Services & Management +- **Firewall**: Disables and masks UFW to avoid interfering with Kubernetes networking. +- **Hostname**: Overrides hostname with `/etc/machine-name` value if available. +- **APT Repositories**: Adds the official Docker APT repository and imports GPG key. +- **Symbolic Links**: Makes `kubelet`, `kubeadm`, `kubectl`, and `crictl` binaries available in `$PATH`. + +--- + +### 5. Node IP & Hostname Configuration +- Discovers IP via: `ip -o route get 1` +- Discovers hostname via: `hostname -f` + +### 6. Swap Disabling +Kubernetes requires swap to be disabled: + +- Removes swap entries from /etc/fstab: `sed -i.orig '/.*swap.*/d' /etc/fstab` +- Disables active swap immediately: `swapoff -a` diff --git a/content/kubermatic/main/architecture/compatibility/supported-versions/_index.en.md b/content/kubermatic/main/architecture/compatibility/supported-versions/_index.en.md index bfc0b4eb0..190cf8fed 100644 --- a/content/kubermatic/main/architecture/compatibility/supported-versions/_index.en.md +++ b/content/kubermatic/main/architecture/compatibility/supported-versions/_index.en.md @@ -28,19 +28,19 @@ these migrations. In the following table you can find the supported Kubernetes versions for the current KKP version. -| KKP version | 1.33 |1.32 | 1.31 | 1.30 | 1.29[^2] | 1.28[^2] | +| KKP version | 1.34 |1.33 | 1.32 | 1.31 | 1.30[^2] | 1.29[^2] | | -------------------- | -----|-----|-----| ---- | ---- | ---- | -| 2.28.x | ✓ | ✓ | ✓ | ✓ | -- | -- | -| 2.27.x | -- | ✓ | ✓ | ✓ | ✓ | -- | -| 2.26.x | -- | -- | ✓ | ✓ | ✓ | ✓ | +| 2.29.x | ✓ | ✓ | ✓ | ✓ | -- | -- | +| 2.28.x | -- | ✓ | ✓ | ✓ | ✓ | -- | +| 2.27.x | -- | -- | ✓ | ✓ | ✓ | ✓ | -[^2]: Kubernetes releases below version 1.27 have reached End-of-Life (EOL). We strongly +[^2]: Kubernetes releases below version 1.31 have reached End-of-Life (EOL). We strongly recommend upgrading to a supported Kubernetes release as soon as possible. Refer to the [Kubernetes website](https://kubernetes.io/releases/) for more information on the supported releases. Upgrades from a previous Kubernetes version are generally supported whenever a version is -marked as supported, for example KKP 2.27 supports updating clusters from Kubernetes 1.30 to 1.31. +marked as supported, for example KKP 2.28 supports updating clusters from Kubernetes 1.32 to 1.33. ## Provider Incompatibilities diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/addons/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/addons/_index.en.md index 9fa9d2ac6..feb4805ea 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/addons/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/addons/_index.en.md @@ -24,22 +24,22 @@ In general, we recommend the usage of Applications for workloads running inside Default addons are installed in each user-cluster in KKP. The default addons are: -* [Canal](https://github.com/projectcalico/canal): policy based networking for cloud native applications -* [Dashboard](https://github.com/kubernetes/dashboard): General-purpose web UI for Kubernetes clusters -* [kube-proxy](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/): Kubernetes network proxy -* [rbac](https://kubernetes.io/docs/reference/access-authn-authz/rbac/): Kubernetes Role-Based Access Control, needed for +- [Canal](https://github.com/projectcalico/canal): policy based networking for cloud native applications +- [Dashboard](https://github.com/kubernetes/dashboard): General-purpose web UI for Kubernetes clusters +- [kube-proxy](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/): Kubernetes network proxy +- [rbac](https://kubernetes.io/docs/reference/access-authn-authz/rbac/): Kubernetes Role-Based Access Control, needed for [TLS node bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/) -* [OpenVPN client](https://openvpn.net/index.php/open-source/overview.html): virtual private network (VPN). Lets the control +- [OpenVPN client](https://openvpn.net/index.php/open-source/overview.html): virtual private network (VPN). Lets the control plan access the Pod & Service network. Required for functionality like `kubectl proxy` & `kubectl port-forward`. -* pod-security-policy: Policies to configure KKP access when PSPs are enabled -* default-storage-class: A cloud provider specific StorageClass -* kubeadm-configmap & kubelet-configmap: A set of ConfigMaps used by kubeadm +- pod-security-policy: Policies to configure KKP access when PSPs are enabled +- default-storage-class: A cloud provider specific StorageClass +- kubeadm-configmap & kubelet-configmap: A set of ConfigMaps used by kubeadm Installation and configuration of these addons is done by 2 controllers which are part of the KKP seed-controller-manager: -* `addon-installer-controller`: Ensures a given set of addons will be installed in all clusters -* `addon-controller`: Templates the addons & applies the manifests in the user clusters +- `addon-installer-controller`: Ensures a given set of addons will be installed in all clusters +- `addon-controller`: Templates the addons & applies the manifests in the user clusters The KKP binaries come with a `kubermatic-installer` tool, which can output a full default `KubermaticConfiguration` (`kubermatic-installer print`). This will also include the default configuration for addons and can serve as @@ -86,7 +86,7 @@ regular addons, which are always installed and cannot be removed by the user). I and accessible, then it will be installed in the user-cluster, but also be visible to the user, who can manage it from the KKP dashboard like the other accessible addons. The accessible addons are: -* [node-exporter](https://github.com/prometheus/node_exporter): Exports metrics from the node +- [node-exporter](https://github.com/prometheus/node_exporter): Exports metrics from the node Accessible addons can be managed in the UI from the cluster details view: @@ -256,6 +256,7 @@ spec: ``` There is a short explanation of the single `formSpec` fields: + - `displayName` is the name that is displayed in the UI as the control label. - `internalName` is the name used internally. It can be referenced with template variables (see the description below). - `required` indicates if the control should be required in the UI. @@ -317,7 +318,7 @@ the exact templating syntax. KKP injects an instance of the `TemplateData` struct into each template. The following Go snippet shows the available information: -``` +```plaintext {{< readfile "kubermatic/main/data/addondata.go" >}} ``` diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/addons/aws-node-termination-handler/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/addons/aws-node-termination-handler/_index.en.md index c4191f0a8..e3e440b9d 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/addons/aws-node-termination-handler/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/addons/aws-node-termination-handler/_index.en.md @@ -32,6 +32,7 @@ AWS node termination handler is deployed with any aws user cluster created by KK cluster once the spot instance is interrupted. ## AWS Spot Instances Creation + To create a user cluster which runs some spot instance machines, the user can specify the machine type whether it's a spot instance or not at the step number four (Initial Nodes). A checkbox that has the label "Spot Instance" should be checked. diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/addons/kubeflow/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/addons/kubeflow/_index.en.md index da43a0e4f..d50274b7f 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/addons/kubeflow/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/addons/kubeflow/_index.en.md @@ -28,12 +28,12 @@ Before this addon can be deployed in a KKP user cluster, the KKP installation ha as an [accessible addon](../#accessible-addons). This needs to be done by the KKP installation administrator, once per KKP installation. -* Request the KKP addon Docker image with Kubeflow Addon matching your KKP version from Kubermatic +- Request the KKP addon Docker image with Kubeflow Addon matching your KKP version from Kubermatic (or [build it yourself](../#creating-a-docker-image) from the [Flowmatic repository](https://github.com/kubermatic/flowmatic)). -* Configure KKP - edit `KubermaticConfiguration` as follows: - * modify `spec.userClusters.addons.kubernetes.dockerRepository` to point to the provided addon Docker image repository, - * add `kubeflow` into `spec.api.accessibleAddons`. -* Apply the [AddonConfig from the Flowmatic repository](https://raw.githubusercontent.com/kubermatic/flowmatic/master/addon/addonconfig.yaml) in your KKP installation. +- Configure KKP - edit `KubermaticConfiguration` as follows: + - modify `spec.userClusters.addons.kubernetes.dockerRepository` to point to the provided addon Docker image repository, + - add `kubeflow` into `spec.api.accessibleAddons`. +- Apply the [AddonConfig from the Flowmatic repository](https://raw.githubusercontent.com/kubermatic/flowmatic/master/addon/addonconfig.yaml) in your KKP installation. ### Kubeflow prerequisites @@ -66,7 +66,8 @@ For a LoadBalancer service, an external IP address will be assigned by the cloud This address can be retrieved by reviewing the `istio-ingressgateway` Service in `istio-system` Namespace, e.g.: ```bash -$ kubectl get service istio-ingressgateway -n istio-system +kubectl get service istio-ingressgateway -n istio-system + NAME TYPE CLUSTER-IP EXTERNAL-IP istio-ingressgateway LoadBalancer 10.240.28.214 a286f5a47e9564e43ab4165039e58e5e-1598660756.eu-central-1.elb.amazonaws.com ``` @@ -162,33 +163,33 @@ This section contains a list of known issues in different Kubeflow components: **Kubermatic Kubernetes Platform** -* Not all GPU instances of various providers can be started from the KKP UI: +- Not all GPU instances of various providers can be started from the KKP UI: **Istio RBAC in Kubeflow:** -* If enabled, this issue can be hit in the pipelines: +- If enabled, this issue can be hit in the pipelines: **Kubeflow UI issues:** -* Error by adding notebook server: 500 Internal Server Error: +- Error by adding notebook server: 500 Internal Server Error: -* Experiment run status shows as unknown: +- Experiment run status shows as unknown: **Kale Pipeline:** -* "Namespace is empty" exception: +- "Namespace is empty" exception: **NVIDIA GPU Operator** -* Please see the official NVIDIA GPU documentation for known limitations: +- Please see the official NVIDIA GPU documentation for known limitations: **AMD GPU Support** -* The latest AMD GPU -enabled instances in AWS ([EC2 G4ad](https://aws.amazon.com/blogs/aws/new-amazon-ec2-g4ad-instances-featuring-amd-gpus-for-graphics-workloads/)) +- The latest AMD GPU -enabled instances in AWS ([EC2 G4ad](https://aws.amazon.com/blogs/aws/new-amazon-ec2-g4ad-instances-featuring-amd-gpus-for-graphics-workloads/)) featuring Radeon Pro V520 GPUs do not seem to be working with Kubeflow (yet). The GPUs are successfully attached to the pods but the notebook runtime does not seem to recognize them. diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/_index.en.md index af587dace..c41fc5d04 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/_index.en.md @@ -15,6 +15,7 @@ Currently, helm is exclusively supported as a templating method, but integration Helm Applications can both be installed from helm registries directly or from a git repository. ## Concepts + KKP manages Applications using two key mechanisms: [ApplicationDefinitions]({{< ref "./application-definition" >}}) and [ApplicationInstallations]({{< ref "./application-installation" >}}). `ApplicationDefinitions` are managed by KKP Admins and contain all the necessary information for an application's installation. diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/application-definition/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/application-definition/_index.en.md index d8455e6e4..24af0167a 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/application-definition/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/application-definition/_index.en.md @@ -8,8 +8,9 @@ weight = 1 An `ApplicationDefinition` represents a single Application and contains all its versions. It holds the necessary information to install an application. Two types of information are required to install an application: -* How to download the application's source (i.e Kubernetes manifest, helm chart...). We refer to this as `source`. -* How to render (i.e. templating) the application's source to install it into user-cluster. We refer to this as`templating method`. + +- How to download the application's source (i.e Kubernetes manifest, helm chart...). We refer to this as `source`. +- How to render (i.e. templating) the application's source to install it into user-cluster. We refer to this as`templating method`. Each version can have a different `source` (`.spec.version[].template.source`) but share the same `templating method` (`.spec.method`). Here is the minimal example of `ApplicationDefinition`. More advanced configurations are described in subsequent paragraphs. @@ -43,13 +44,17 @@ spec: In this example, the `ApplicationDefinition` allows the installation of two versions of apache using the [helm method](#helm-method). Notice that one source originates from a [Helm repository](#helm-source) and the other from a [git repository](#git-source) ## Templating Method + Templating Method describes how the Kubernetes manifests are being packaged and rendered. ### Helm Method + This method use [Helm](https://helm.sh/docs/) to install, upgrade and uninstall the application into the user-cluster. ## Templating Source + ### Helm Source + The Helm Source allows downloading the application's source from a Helm [HTTP repository](https://helm.sh/docs/topics/chart_repository/) or an [OCI repository](https://helm.sh/blog/storing-charts-in-oci/#helm). The following parameters are required: @@ -57,8 +62,8 @@ The following parameters are required: - `chartName` -> Name of the chart within the repository - `chartVersion` -> Version of the chart; corresponds to the chartVersion field - **Example of Helm source with HTTP repository:** + ```yaml - template: source: @@ -69,6 +74,7 @@ The following parameters are required: ``` **Example of Helm source with OCI repository:** + ```yaml - template: source: @@ -77,11 +83,12 @@ The following parameters are required: chartVersion: 1.13.0-rc5 url: oci://quay.io/kubermatic/helm-charts ``` + For private git repositories, please check the [working with private registries](#working-with-private-registries) section. Currently, the best way to obtain `chartName` and `chartVersion` for an HTTP repository is to make use of `helm search`: -```sh +```bash # initial preparation helm repo add helm repo update @@ -99,9 +106,11 @@ helm search repo prometheus-community/prometheus --versions --version ">=15" For OCI repositories, there is currently [no native helm search](https://github.com/helm/helm/issues/9983). Instead, you have to rely on the capabilities of your OCI registry. For example, harbor supports searching for helm-charts directly [in their UI](https://goharbor.io/docs/2.4.0/working-with-projects/working-with-images/managing-helm-charts/#list-charts). ### Git Source + The Git source allows you to download the application's source from a Git repository. **Example of Git Source:** + ```yaml - template: source: @@ -121,7 +130,6 @@ The Git source allows you to download the application's source from a Git reposi For private git repositories, please check the [working with private registries](#working-with-private-registries) section. - ## Working With Private Registries For private registries, the Applications Feature supports storing credentials in Kubernetes secrets in the KKP master and referencing the secrets in your ApplicationDefinitions. @@ -134,67 +142,68 @@ In order for the controller to sync your secrets, they must be annotated with `a ### Git Repositories KKP supports three types of authentication for git repositories: -* `password`: authenticate with a username and password. -* `Token`: authenticate with a Bearer token -* `SSH-Key`: authenticate with an ssh private key. + +- `password`: authenticate with a username and password. +- `Token`: authenticate with a Bearer token +- `SSH-Key`: authenticate with an ssh private key. Their setup is comparable: 1. Create a secret containing our credentials + ```bash + # inside KKP master + + # user-pass + kubectl create secret -n generic --from-literal=pass= --from-literal=user= + + # token + kubectl create secret -n generic --from-literal=token= + + # ssh-key + kubectl create secret -n generic --from-literal=sshKey= + + # after creation, annotate + kubectl annotate secret apps.kubermatic.k8c.io/secret-type="git" + ``` + +1. Reference the secret in the ApplicationDefinition + ```yaml + spec: + versions: + - template: + source: + git: + path: + ref: + branch: + remote: # for ssh-key, an ssh url must be chosen (e.g. git@example.com/repo.git) + credentials: + method: + # user-pass + username: + key: user + name: + password: + key: pass + name: + # token + token: + key: token + name: + # ssh-key + sshKey: + key: sshKey + name: + ``` -```sh -# inside KKP master - -# user-pass -kubectl create secret -n generic --from-literal=pass= --from-literal=user= - -# token -kubectl create secret -n generic --from-literal=token= - -# ssh-key -kubectl create secret -n generic --from-literal=sshKey= - -# after creation, annotate -kubectl annotate secret apps.kubermatic.k8c.io/secret-type="git" -``` - -2. Reference the secret in the ApplicationDefinition - -```yaml -spec: - versions: - - template: - source: - git: - path: - ref: - branch: - remote: # for ssh-key, an ssh url must be chosen (e.g. git@example.com/repo.git) - credentials: - method: - # user-pass - username: - key: user - name: - password: - key: pass - name: - # token - token: - key: token - name: - # ssh-key - sshKey: - key: sshKey - name: -``` #### Compatibility Warning Be aware that all authentication methods may be available on your git server. More and more servers disable the authentication with username and password. More over on some providers like GitHub, to authenticate with an access token, you must use `password` method instead of `token`. Example of secret to authenticate with [GitHub access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token#using-a-token-on-the-command-line): -```sh + +```bash kubectl create secret -n generic --from-literal=pass= --from-literal=user= ``` @@ -205,73 +214,71 @@ For other providers, please refer to their official documentation. [Helm OCI registries](https://helm.sh/docs/topics/registries/#enabling-oci-support) are being accessed using a JSON configuration similar to the `~/.docker/config.json` on the local machine. It should be noted, that all OCI server urls need to be prefixed with `oci://`. 1. Create a secret containing our credentials - -```sh -# inside KKP master -kubectl create secret -n docker-registry --docker-server= --docker-username= --docker-password= -kubectl annotate secret apps.kubermatic.k8c.io/secret-type="helm" - -# example -kubectl create secret -n kubermatic docker-registry --docker-server=harbor.example.com/my-project --docker-username=someuser --docker-password=somepaswword oci-cred -kubectl annotate secret oci-cred apps.kubermatic.k8c.io/secret-type="helm" -``` - -2. Reference the secret in the ApplicationDefinition - -```yaml -spec: - versions: - - template: - source: - helm: - chartName: examplechart - chartVersion: 0.1.0 - credentials: - registryConfigFile: - key: .dockerconfigjson # `kubectl create secret docker-registry` stores by default the creds under this key - name: - url: -``` + ```bash + # inside KKP master + kubectl create secret -n docker-registry --docker-server= --docker-username= --docker-password= + kubectl annotate secret apps.kubermatic.k8c.io/secret-type="helm" + + # example + kubectl create secret -n kubermatic docker-registry --docker-server=harbor.example.com/my-project --docker-username=someuser --docker-password=somepaswword oci-cred + kubectl annotate secret oci-cred apps.kubermatic.k8c.io/secret-type="helm" + ``` + +1. Reference the secret in the ApplicationDefinition + ```yaml + spec: + versions: + - template: + source: + helm: + chartName: examplechart + chartVersion: 0.1.0 + credentials: + registryConfigFile: + key: .dockerconfigjson # `kubectl create secret docker-registry` stores by default the creds under this key + name: + url: + ``` ### Helm Userpass Registries To use KKP Applications with a helm [userpass auth](https://helm.sh/docs/topics/registries/#auth) registry, you can configure the following: 1. Create a secret containing our credentials - -```sh -# inside KKP master -kubectl create secret -n generic --from-literal=pass= --from-literal=user= -kubectl annotate secret apps.kubermatic.k8c.io/secret-type="helm" -``` - -2. Reference the secret in the ApplicationDefinition - -```yaml -spec: - versions: - - template: - source: - helm: - chartName: examplechart - chartVersion: 0.1.0 - credentials: - password: - key: pass - name: - username: - key: user - name: - url: -``` + ```bash + # inside KKP master + kubectl create secret -n generic --from-literal=pass= --from-literal=user= + kubectl annotate secret apps.kubermatic.k8c.io/secret-type="helm" + ``` + +1. Reference the secret in the ApplicationDefinition + ```yaml + spec: + versions: + - template: + source: + helm: + chartName: examplechart + chartVersion: 0.1.0 + credentials: + password: + key: pass + name: + username: + key: user + name: + url: + ``` ### Templating Credentials + There is a particular case where credentials may be needed at the templating stage to render the manifests. For example, if the template method is `helm` and the source is git. To install the chart into the user cluster, we have to build the chart dependencies. These dependencies may be hosted on a private registry requiring authentication. You can specify the templating credentials by settings `.spec.version[].template.templateCredentials`. It works the same way as source credentials. **Example of template credentials:** + ```yaml spec: versions: @@ -293,7 +300,9 @@ spec: ``` ## Advanced Configuration + ### Default Values + The `.spec.defaultValuesBlock` field describes overrides for manifest-rendering in UI when creating an application. For example if the method is Helm, then this field contains the Helm values. **Example for helm values** @@ -308,20 +317,23 @@ spec: ``` ### Customize Deployment + You can tune how the application will be installed by setting `.spec.defaultDeployOptions`. The options depend on the template method (i.e. `.spec.method`). -*note: `defaultDeployOptions` can be overridden at `ApplicationInstallation` level by settings `.spec.deployOptions`* +*Note: `defaultDeployOptions` can be overridden at `ApplicationInstallation` level by settings `.spec.deployOptions`* #### Customize Deployment For Helm Method + You may tune how Helm deploys the application with the following options: -* `atomic`: corresponds to the `--atomic` flag on Helm CLI. If set, the installation process deletes the installation on failure; the upgrade process rolls back changes made in case of failed upgrade. -* `wait`: corresponds to the `--wait` flag on Helm CLI. If set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as `--timeout` -* `timeout`: corresponds to the `--timeout` flag on Helm CLI. It's time to wait for any individual Kubernetes operation. -* `enableDNS`: corresponds to the `-enable-dns ` flag on Helm CLI. It enables DNS lookups when rendering templates. if you enable this flag, you have to verify that helm template function 'getHostByName' is not being used in a chart to disclose any information you do not want to be passed to DNS servers.(c.f. [CVE-2023-25165](https://github.com/helm/helm/security/advisories/GHSA-pwcw-6f5g-gxf8)) +- `atomic`: corresponds to the `--atomic` flag on Helm CLI. If set, the installation process deletes the installation on failure; the upgrade process rolls back changes made in case of failed upgrade. +- `wait`: corresponds to the `--wait` flag on Helm CLI. If set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as `--timeout` +- `timeout`: corresponds to the `--timeout` flag on Helm CLI. It's time to wait for any individual Kubernetes operation. +- `enableDNS`: corresponds to the `-enable-dns ` flag on Helm CLI. It enables DNS lookups when rendering templates. if you enable this flag, you have to verify that helm template function 'getHostByName' is not being used in a chart to disclose any information you do not want to be passed to DNS servers.(c.f. [CVE-2023-25165](https://github.com/helm/helm/security/advisories/GHSA-pwcw-6f5g-gxf8)) Example: + ```yaml apiVersion: apps.kubermatic.k8c.io/v1 kind: ApplicationDefinition @@ -335,11 +347,11 @@ spec: timeout: "5m" ``` -*note: if `atomic` is true, then wait must be true. If `wait` is true then `timeout` must be defined.* - +*Note: if `atomic` is true, then wait must be true. If `wait` is true then `timeout` must be defined.* ## ApplicationDefinition Reference -**The following is an example of ApplicationDefinition, showing all the possible options**. + +**The following is an example of ApplicationDefinition, showing all the possible options** ```yaml {{< readfile "kubermatic/main/data/applicationDefinition.yaml" >}} diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/application-installation/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/application-installation/_index.en.md index 5c31e4798..add323fe1 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/application-installation/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/application-installation/_index.en.md @@ -6,10 +6,12 @@ weight = 2 +++ -An `ApplicationInstallation` is an instance of an application to install into user-cluster. -It abstracts to the end user how to get the application deployment sources (i.e. the k8s manifests, hem chart... ) and how to install it into the cluster. So he can install and use the application with minimal knowledge of Kubernetes. +An `ApplicationInstallation` is an instance of an ApplicationDefinition to be installed into a user cluster. +It abstracts the complexities of acquiring application deployment sources (e.g., Kubernetes manifests, Helm charts) and deploying them to the cluster. +This means users can install and utilize applications without requiring deep Kubernetes expertise. ## Anatomy of an Application + ```yaml apiVersion: apps.kubermatic.k8c.io/v1 kind: ApplicationInstallation @@ -31,23 +33,27 @@ spec: ``` The `applicationRef` is a reference to the `applicationDefinition` that handles this installation. -The `.spec.namespace` defines in which namespace the application will be installed. If `.spec.namespace.create` is `true`, then it will ensure that the namespace exists and have the desired labels. -The `values` is a schemaless field that describes overrides for manifest-rendering (e.g. if the method is Helm, then this field contains the Helm values.) +The `.spec.namespace` defines in which namespace the application will be installed. If `.spec.namespace.create` is `true`, then it will ensure that the namespace exists and has the desired labels. +The `values` is a schemaless field that describes overrides for manifest-rendering (e.g., if the method is Helm, then this field contains the Helm values.) ## Application Life Cycle + It mainly composes of 2 steps: download the application's source and install or upgrade the application. You can monitor these steps thanks to the conditions in the applicationInstallation's status. -- `ManifestsRetrieved` condition indicates if application's source has been correctly downloaded. -- `Ready` condition indicates the installation / upgrade status. it can have four states: +- `ManifestsRetrieved` condition indicates if the application's source has been correctly downloaded. +- `Ready` condition indicates the installation / upgrade status. It can have four states: + - `{status: "Unknown", reason: "InstallationInProgress"}`: meaning the application installation / upgrade is in progress. - `{status: "True", reason: "InstallationSuccessful"}`: meaning the application installation / upgrade was successful. - `{status: "False", reason: "InstallationFailed"}`: meaning the installation / upgrade has failed. - `{status: "False", reason: "InstallationFailedRetriesExceeded"}`: meaning the max number of retries was exceeded. ### Helm additional information + If the [templating method]({{< ref "../application-definition#templating-method" >}}) is `Helm`, then additional information regarding the install or upgrade is provided under `.status.helmRelease`. Example: + ```yaml status: [...] @@ -81,13 +87,16 @@ status: ``` ## Advanced Configuration + This section is relevant to advanced users. However, configuring advanced parameters may impact performance, load, and workload stability. Consequently, it must be treated carefully. ### Periodic Reconciliation + By default, Applications are only reconciled on changes in the spec, annotations, or the parent application definition. Meaning that if the user manually deletes the workload deployed by the application, nothing will happen until the `ApplicationInstallation` CR changes. -You can periodically force the reconciliation of the application by settings `.spec.reconciliationInterval`: -- a value greater than zero force reconciliation even if no changes occurred on application CR. +You can periodically force the reconciliation of the application by setting `.spec.reconciliationInterval`: + +- a value greater than zero forces reconciliation even if no changes occurred on application CR. - a value equal to 0 disables the force reconciliation of the application (default behavior). {{% notice warning %}} @@ -97,20 +106,23 @@ Setting this too low can cause a heavy load and disrupt your application workloa The application will not be reconciled if the maximum number of retries is exceeded. ### Customize Deployment + You can tune how the application will be installed by setting `.spec.deployOptions`. -The options depends of the template method (i.e. `.spec.method`) of the `ApplicationDefinition`. +The options depend on the template method (i.e., `.spec.method`) of the `ApplicationDefinition`. -*note: if `deployOptions` is not set then it used the default defined at the `ApplicationDefinition` level (`.spec.defaultDeployOptions`)* +*Note: if `deployOptions` is not set, then it uses the default defined at the `ApplicationDefinition` level (`.spec.defaultDeployOptions`)* #### Customize Deployment for Helm Method + You may tune how Helm deploys the application with the following options: -* `atomic`: corresponds to the `--atomic` flag on Helm CLI. If set, the installation process deletes the installation on failure; the upgrade process rolls back changes made in case of failed upgrade. -* `wait`: corresponds to the `--wait` flag on Helm CLI. If set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as `--timeout` -* `timeout`: corresponds to the `--timeout` flag on Helm CLI. It's time to wait for any individual Kubernetes operation. -* `enableDNS`: corresponds to the `-enable-dns ` flag on Helm CLI. It enables DNS lookups when rendering templates. if you enable this flag, you have to verify that helm template function 'getHostByName' is not being used in a chart to disclose any information you do not want to be passed to DNS servers.(c.f. [CVE-2023-25165](https://github.com/helm/helm/security/advisories/GHSA-pwcw-6f5g-gxf8)) +- `atomic`: corresponds to the `--atomic` flag on Helm CLI. If set, the installation process deletes the installation on failure; the upgrade process rolls back changes made in case of a failed upgrade. +- `wait`: corresponds to the `--wait` flag on Helm CLI. If set, will wait until all Pods, PVCs, Services, and a minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as `--timeout` +- `timeout`: corresponds to the `--timeout` flag on Helm CLI. It's time to wait for any individual Kubernetes operation. +- `enableDNS`: corresponds to the `-enable-dns ` flag on Helm CLI. It enables DNS lookups when rendering templates. If you enable this flag, you have to verify that the Helm template function 'getHostByName' is not being used in a chart to disclose any information you do not want to be passed to DNS servers. (c.f. [CVE-2023-25165](https://github.com/helm/helm/security/advisories/GHSA-pwcw-6f5g-gxf8)) Example: + ```yaml apiVersion: apps.kubermatic.k8c.io/v1 kind: ApplicationInstallation @@ -124,13 +136,14 @@ spec: timeout: "5m" ``` -*note: if `atomic` is true, then wait must be true. If `wait` is true then `timeout` must be defined.* +*Note: if `atomic` is true, then wait must be true. If `wait` is true, then `timeout` must be defined.* If `.spec.deployOptions.helm.atomic` is true, then when installation or upgrade of an application fails, `ApplicationsInstallation.Status.Failures` counter is incremented. -If it reaches the max number of retries (hardcoded to 5), then the applicationInstallation controller will stop trying to install or upgrade the application until applicationInstallation 's spec changes. -This behavior reduces the load on the cluster and avoids an infinite loop that disrupts workload. +If it reaches the max number of retries (hardcoded to 5), then the ApplicationInstallation controller will stop trying to install or upgrade the application until ApplicationInstallation's spec changes. +This behavior reduces the load on the cluster and avoids an infinite loop that disrupts the workload. ## ApplicationInstallation Reference + **The following is an example of ApplicationInstallation, showing all the possible options**. ```yaml diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/application-templating/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/application-templating/_index.en.md index 634bbec58..27f78546c 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/application-templating/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/application-templating/_index.en.md @@ -15,7 +15,7 @@ the exact templating syntax. KKP injects an instance of the `TemplateData` struct into each template. The following Go snippet shows the available information: -``` +```text {{< readfile "kubermatic/main/data/applicationdata.go" >}} ``` diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/_index.en.md index 6d51d03a6..2f836fbc4 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/_index.en.md @@ -18,6 +18,7 @@ Here is the list of the applications that come as part of the Default Applicatio * [K8sGPT-Operator]({{< ref "./k8sgpt-operator/" >}}) * [KubeVIP]({{< ref "./kube-vip/" >}}) * [KubeVirt]({{< ref "./kubevirt/" >}}) +* [Kueue]({{< ref "./kueue/" >}}) * [LocalAI]({{< ref "./local-ai/" >}}) * [MetalLB]({{< ref "./metallb/" >}}) * [Nginx]({{< ref "./nginx/" >}}) diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/aikit/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/aikit/_index.en.md index 0bce1a83c..46b57e768 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/aikit/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/aikit/_index.en.md @@ -23,16 +23,16 @@ For more information on AIKit, please refer to the [official documentation](http AIKit is available as part of the KKP's default application catalog. It can be deployed to the user cluster either during the cluster creation or after the cluster is ready (existing cluster) from the Applications tab via UI. -* Select the AIKit application from the Application Catalog. +- Select the AIKit application from the Application Catalog. ![Select AIKit Application](01-select-application-aikit-app.png) -* Under the Settings section, select and provide appropriate details and click `-> Next` button. +- Under the Settings section, select and provide appropriate details and click `-> Next` button. ![Settings for AIKit Application](02-settings-aikit-app.png) -* Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the AIKit application to the user cluster. +- Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the AIKit application to the user cluster. ![Application Values for AIKit Application](03-applicationvalues-aikit-app.png) -To further configure the `values.yaml`, find more information on the [AIKit Helm Chart Configuration](https://github.com/sozercan/aikit/tree/v0.16.0/charts/aikit) \ No newline at end of file +To further configure the `values.yaml`, find more information on the [AIKit Helm Chart Configuration](https://github.com/sozercan/aikit/tree/v0.16.0/charts/aikit) diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/argocd/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/argocd/_index.en.md index 445948042..f516e2b39 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/argocd/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/argocd/_index.en.md @@ -7,7 +7,8 @@ weight = 1 +++ -# What is ArgoCD? +## What is ArgoCD? + ArgoCD is a declarative, GitOps continuous delivery tool for Kubernetes. ArgoCD follows the GitOps pattern of using Git repositories as the source of truth for defining the desired application state. Kubernetes manifests can be specified in several ways: @@ -18,23 +19,22 @@ ArgoCD follows the GitOps pattern of using Git repositories as the source of tru - Plain directory of YAML/json manifests - Any custom config management tool configured as a config management plugin - For more information on the ArgoCD, please refer to the [official documentation](https://argoproj.github.io/cd/) -# How to deploy? +## How to deploy? ArgoCD is available as part of the KKP's default application catalog. It can be deployed to the user cluster either during the cluster creation or after the cluster is ready(existing cluster) from the Applications tab via UI. -* Select the ArgoCD application from the Application Catalog. +- Select the ArgoCD application from the Application Catalog. ![Select ArgoCD Application](01-select-application-argocd-app.png) -* Under the Settings section, select and provide appropriate details and click `-> Next` button. +- Under the Settings section, select and provide appropriate details and click `-> Next` button. ![Settings for ArgoCD Application](02-settings-argocd-app.png) -* Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the ArgoCD application to the user cluster. +- Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the ArgoCD application to the user cluster. ![Application Values for ArgoCD Application](03-applicationvalues-argocd-app.png) diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/cert-manager/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/cert-manager/_index.en.md index 3822070f7..7f797ada8 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/cert-manager/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/cert-manager/_index.en.md @@ -7,7 +7,7 @@ weight = 3 +++ -# What is cert-manager? +## What is cert-manager? cert-manager adds certificates and certificate issuers as resource types in Kubernetes clusters. It simplifies the process of obtaining, renewing and using certificates. @@ -17,20 +17,20 @@ It will ensure certificates are valid and up to date, and attempt to renew certi For more information on the cert-manager, please refer to the [official documentation](https://cert-manager.io/) -# How to deploy? +## How to deploy? cert-manager is available as part of the KKP's default application catalog. It can be deployed to the user cluster either during the cluster creation or after the cluster is ready(existing cluster) from the Applications tab via UI. -* Select the cert-manager application from the Application Catalog. +- Select the cert-manager application from the Application Catalog. ![Select cert-manager Application](01-select-application-cert-manager-app.png) -* Under the Settings section, select and provide appropriate details and clck `-> Next` button. +- Under the Settings section, select and provide appropriate details and click `-> Next` button. ![Settings for cert-manager Application](02-settings-cert-manager-app.png) -* Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the cert-manager application to the user cluster. +- Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the cert-manager application to the user cluster. ![Application Values for cert-manager Application](03-applicationvalues-cert-manager-app.png) diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/cluster-autoscaler/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/cluster-autoscaler/_index.en.md index 013815e66..a7c194c95 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/cluster-autoscaler/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/cluster-autoscaler/_index.en.md @@ -6,24 +6,24 @@ weight = 1 +++ -# What is the Kubernetes Cluster Autoscaler? +## What is the Kubernetes Cluster Autoscaler? Kubernetes Cluster Autoscaler is a tool that automatically adjusts the size of the worker’s node up or down depending on the consumption. This means that the cluster autoscaler, for example, automatically scale up a cluster by increasing the node count when there are not enough node resources for cluster workload scheduling and scale down when the node resources have continuously staying idle, or there are more than enough node resources available for cluster workload scheduling. In a nutshell, it is a component that automatically adjusts the size of a Kubernetes cluster so that all pods have a place to run and there are no unneeded nodes. -# How to deploy? +## How to deploy? Kubernetes Cluster Autoscaler is available as part of the KKP's default application catalog. It can be deployed to the user cluster either during the cluster creation or after the cluster is ready(existing cluster) from the Applications tab via UI. -* Select the Cluster Autoscaler application from the Application Catalog. +- Select the Cluster Autoscaler application from the Application Catalog. ![Select Cluster Autoscaler Application](01-select-application-cluster-autoscaler-app.png) -* Under the Settings section, select and provide appropriate details and click `-> Next` button. +- Under the Settings section, select and provide appropriate details and click `-> Next` button. ![Settings for Cluster Autoscaler Application](02-settings-cluster-autoscaler-app.png) -* Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Clustet Autoscaler application to the user cluster. +- Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Clustet Autoscaler application to the user cluster. ![Application Values for Cluster Autoscaler Application](03-applicationvalues-cluster-autoscaler-app.png) diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/falco/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/falco/_index.en.md index 70ad45722..ee6118a1d 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/falco/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/falco/_index.en.md @@ -7,25 +7,25 @@ weight = 7 +++ -# What is Falco? +## What is Falco? Falco is a cloud-native security tool designed for Linux systems. It employs custom rules on kernel events, which are enriched with container and Kubernetes metadata, to provide real-time alerts. Falco helps you gain visibility into abnormal behavior, potential security threats, and compliance violations, contributing to comprehensive runtime security. For more information on the Falco, please refer to the [official documentation](https://falco.org/) -# How to deploy? +## How to deploy? Falco is available as part of the KKP's default application catalog. It can be deployed to the user cluster either during the cluster creation or after the cluster is ready(existing cluster) from the Applications tab via UI. -* Select the Falco application from the Application Catalog. +- Select the Falco application from the Application Catalog. ![Select Falco Application](01-select-application-falco-app.png) -* Under the Settings section, select and provide appropriate details and clck `-> Next` button. +- Under the Settings section, select and provide appropriate details and click `-> Next` button. ![Settings for Falco Application](02-settings-falco-app.png) -* Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Falco application to the user cluster. +- Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Falco application to the user cluster. To further configure the values.yaml, find more information on the [Falco Helm chart documentation](https://github.com/falcosecurity/charts/tree/master/charts/falco). diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/flux2/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/flux2/_index.en.md index 0b06f4e86..f261968e1 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/flux2/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/flux2/_index.en.md @@ -7,7 +7,7 @@ weight = 2 +++ -# What is Flux2? +## What is Flux2? Flux is a tool for keeping Kubernetes clusters in sync with sources of configuration (like Git repositories and OCI artifacts), automating updates to configuration when there is new code to deploy. @@ -19,19 +19,19 @@ Flux is a Cloud Native Computing Foundation [CNCF](https://www.cncf.io/) project For more information on the Flux2, please refer to the [official documentation](https://github.com/fluxcd-community/helm-charts) -# How to deploy? +## How to deploy? Flux2 is available as part of the KKP's default application catalog. It can be deployed to the user cluster either during the cluster creation or after the cluster is ready(existing cluster) from the Applications tab via UI. -* Select the Flux2 application from the Application Catalog. +- Select the Flux2 application from the Application Catalog. ![Select Flux2 Application](01-select-application-flux2-app.png) -* Under the Settings section, select and provide appropriate details and clck `-> Next` button. +- Under the Settings section, select and provide appropriate details and click `-> Next` button. ![Settings for Flux2 Application](02-settings-flux2-app.png) -* Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Flux2 application to the user cluster. +- Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Flux2 application to the user cluster. A full list of available Helm values is on [flux2's ArtifactHub page](https://artifacthub.io/packages/helm/fluxcd-community/flux2) diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/k8sgpt-operator/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/k8sgpt-operator/_index.en.md index 474b59f4e..f317b8b3d 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/k8sgpt-operator/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/k8sgpt-operator/_index.en.md @@ -7,7 +7,8 @@ weight = 11 +++ -# What is K8sGPT-Operator? +## What is K8sGPT-Operator? + This operator is designed to enable K8sGPT within a Kubernetes cluster. It will allow you to create a custom resource that defines the behaviour and scope of a managed K8sGPT workload. @@ -16,20 +17,20 @@ Analysis and outputs will also be configurable to enable integration into existi For more information on the K8sGPT-Operator, please refer to the [official documentation](https://docs.k8sgpt.ai/reference/operator/overview/) -# How to deploy? +## How to deploy? K8sGPT-Operator is available as part of the KKP's default application catalog. It can be deployed to the user cluster either during the cluster creation or after the cluster is ready(existing cluster) from the Applications tab via UI. -* Select the K8sGPT-Operator application from the Application Catalog. +- Select the K8sGPT-Operator application from the Application Catalog. ![Select K8sGPT-Operator Application](01-select-application-k8sgpt-operator-app.png) -* Under the Settings section, select and provide appropriate details and click `-> Next` button. +- Under the Settings section, select and provide appropriate details and click `-> Next` button. ![Settings for K8sGPT-Operator Application](02-settings-k8sgpt-operator-app.png) -* Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the K8sGPT-Operator application to the user cluster. +- Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the K8sGPT-Operator application to the user cluster. ![Application Values for K8sGPT-Operator Application](03-applicationvalues-k8sgpt-operator-app.png) diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/k8sgpt/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/k8sgpt/_index.en.md index fc7630a64..e85a05ba8 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/k8sgpt/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/k8sgpt/_index.en.md @@ -7,7 +7,8 @@ weight = 11 +++ -# What is K8sGPT? +## What is K8sGPT? + K8sGPT gives Kubernetes SRE superpowers to everyone. It is a tool for scanning your Kubernetes clusters, diagnosing, and triaging issues in simple English. It has SRE experience codified into its analyzers and helps to pull out the most relevant information to enrich it with AI. @@ -16,20 +17,20 @@ Out of the box integration with OpenAI, Azure, Cohere, Amazon Bedrock and local For more information on the K8sGPT, please refer to the [official documentation](https://docs.k8sgpt.ai/) -# How to deploy? +## How to deploy? K8sGPT is available as part of the KKP's default application catalog. It can be deployed to the user cluster either during the cluster creation or after the cluster is ready(existing cluster) from the Applications tab via UI. -* Select the K8sGPT application from the Application Catalog. +- Select the K8sGPT application from the Application Catalog. ![Select K8sGPT Application](01-select-application-k8sgpt-app.png) -* Under the Settings section, select and provide appropriate details and clck `-> Next` button. +- Under the Settings section, select and provide appropriate details and click `-> Next` button. ![Settings for K8sGPT Application](02-settings-k8sgpt-app.png) -* Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the K8sGPT application to the user cluster. +- Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the K8sGPT application to the user cluster. ![Application Values for K8sGPT Application](03-applicationvalues-k8sgpt-app.png) diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kube-vip/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kube-vip/_index.en.md index e52f88ac5..fc5f18cd6 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kube-vip/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kube-vip/_index.en.md @@ -7,25 +7,25 @@ weight = 6 +++ -# What is Kube-VIP? +## What is Kube-VIP? Kube-VIP provides Kubernetes clusters with a virtual IP and load balancer for both the control plane (for building a highly-available cluster) and Kubernetes Services of type LoadBalancer without relying on any external hardware or software. For more information on the Kube-VIP, please refer to the [official documentation](https://kube-vip.io/) -# How to deploy? +## How to deploy? Kube-VIP is available as part of the KKP's default application catalog. It can be deployed to the user cluster either during the cluster creation or after the cluster is ready(existing cluster) from the Applications tab via UI. -* Select the Kube-VIP application from the Application Catalog. +- Select the Kube-VIP application from the Application Catalog. ![Select Kube-VIP Application](01-select-application-kube-vip-app.png) -* Under the Settings section, select and provide appropriate details and clck `-> Next` button. +- Under the Settings section, select and provide appropriate details and click `-> Next` button. ![Settings for Kube-VIP Application](02-settings-kube-vip-app.png) -* Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Kube-VIP application to the user cluster. +- Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Kube-VIP application to the user cluster. To further configure the values.yaml, find more information on the [Kube-vip Helm chart documentation](https://github.com/kube-vip/helm-charts). diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kubevirt/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kubevirt/_index.en.md index d621829b4..37ba16493 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kubevirt/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kubevirt/_index.en.md @@ -7,7 +7,7 @@ weight = 10 +++ -# What is KubeVirt? +## What is KubeVirt? KubeVirt is a virtual machine management add-on for Kubernetes. Its aim is to provide a common ground for virtualization solutions on top of Kubernetes. @@ -21,15 +21,15 @@ As of today KubeVirt can be used to declaratively: For more information on the KubeVirt, please refer to the [official documentation](https://kubevirt.io/) -# How to deploy? +## How to deploy? KubeVirt is available as part of the KKP's default application catalog. It can be deployed to the user cluster either during the cluster creation or after the cluster is ready(existing cluster) from the Applications tab via UI. -* Select the KubeVirt application from the Application Catalog. +- Select the KubeVirt application from the Application Catalog. ![Select KubeVirt Application](01-select-application-kubevirt-app.png) -* Under the Settings section, select and provide appropriate details and clck `-> Next` button. +- Under the Settings section, select and provide appropriate details and click `-> Next` button. ![Settings for KubeVirt Application](02-settings-kubevirt-app.png) diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kueue/01-select-application-kueue-app.png b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kueue/01-select-application-kueue-app.png new file mode 100644 index 000000000..3b9c9444b Binary files /dev/null and b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kueue/01-select-application-kueue-app.png differ diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kueue/02-settings-kueue-app.png b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kueue/02-settings-kueue-app.png new file mode 100644 index 000000000..d788793da Binary files /dev/null and b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kueue/02-settings-kueue-app.png differ diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kueue/03-values-kueue-app.png b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kueue/03-values-kueue-app.png new file mode 100644 index 000000000..97c3ca2d2 Binary files /dev/null and b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kueue/03-values-kueue-app.png differ diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kueue/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kueue/_index.en.md new file mode 100644 index 000000000..06638a397 --- /dev/null +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/kueue/_index.en.md @@ -0,0 +1,36 @@ ++++ +title = "Kueue Application" +linkTitle = "Kueue" +enterprise = true +date = 2025-09-19 +weight = 10 ++++ + +# What is Kueue? + +Kueue is a cloud-native job queueing system for batch, HPC, AI/ML, and similar applications in a Kubernetes cluster. + +For more information on the Kueue, please refer to the [official documentation](https://kueue.sigs.k8s.io/). + +# How to deploy? + +Kueue is available as part of the KKP's default application catalog. +It can be deployed to the user cluster either during the cluster creation or after the cluster is ready(existing cluster) from the Applications tab via UI. + +* Select the Kueue application from the Application Catalog. + +![Select Kueue Application](01-select-application-kueue-app.png) + +* Under the Settings section, select and provide appropriate details and click `-> Next` button. + +![Settings for Kueue Application](02-settings-kueue-app.png) + + +* Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Kueue application to the user cluster. + +![Application Values for Kueue Application](03-values-kueue-app.png) + +To further configure the values.yaml, find more information on the [official documentation](https://kueue.sigs.k8s.io/) + +Due to current limitations of the Kueue Helm Chart, Kueue ApplicationDefinition only supports versions v0.13.4 and newer. +Additionally, please ensure that the `nameOverride` and `fullnameOverride` configurations are specified within the values.yaml file. diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/local-ai/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/local-ai/_index.en.md index f69bace53..5f467e68d 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/local-ai/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/local-ai/_index.en.md @@ -15,18 +15,18 @@ LocalAI is an open-source alternative to OpenAI’s API, designed to run AI mode Local AI is available as part of the KKP's default application catalog. It can be deployed to the user cluster either during the cluster creation or after the cluster is ready (existing cluster) from the Applications tab via UI. -* Select the Local AI application from the Application Catalog. +- Select the Local AI application from the Application Catalog. ![Select Local AI Application](01-select-local-ai-app.png) -* Under the Settings section, select and provide appropriate details and click `-> Next` button. +- Under the Settings section, select and provide appropriate details and click `-> Next` button. ![Settings for Local AI Application](02-settings-local-ai-app.png) -* Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the LocalAI application to the user cluster. +- Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the LocalAI application to the user cluster. ![Application Values for LocalAI Application](03-applicationvalues-local-ai-app.png) To further configure the `values.yaml`, find more information on the [LocalAI Helm Chart Configuration](https://github.com/go-skynet/helm-charts/tree/main/charts/local-ai) -Please take care about the size of the default models which can vary from the default configuration. \ No newline at end of file +Please take care about the size of the default models which can vary from the default configuration. diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/metallb/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/metallb/_index.en.md index ec43a963c..d1f64d83f 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/metallb/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/metallb/_index.en.md @@ -7,26 +7,25 @@ weight = 4 +++ -# What is MetalLB? +## What is MetalLB? MetalLB is a load-balancer implementation for bare metal Kubernetes clusters, using standard routing protocols. - For more information on the MetalLB, please refer to the [official documentation](https://metallb.universe.tf/) -# How to deploy? +## How to deploy? MetalLB is available as part of the KKP's default application catalog. It can be deployed to the user cluster either during the cluster creation or after the cluster is ready(existing cluster) from the Applications tab via UI. -* Select the MetalLB application from the Application Catalog. +- Select the MetalLB application from the Application Catalog. ![Select MetalLB Application](01-select-application-metallb-app.png) -* Under the Settings section, select and provide appropriate details and clck `-> Next` button. +- Under the Settings section, select and provide appropriate details and click `-> Next` button. ![Settings for MetalLB Application](02-settings-metallb-app.png) -* Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the MetalLB application to the user cluster. +- Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the MetalLB application to the user cluster. To further configure the values.yaml, find more information on the [MetalLB Helm chart documentation](https://github.com/metallb/metallb/tree/main/charts/metallb). diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/nginx/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/nginx/_index.en.md index bcfe336b5..572292997 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/nginx/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/nginx/_index.en.md @@ -7,25 +7,25 @@ weight = 5 +++ -# What is Nginx? +## What is Nginx? Nginx is an ingress-controller for Kubernetes using NGINX as a reverse proxy and load balancer. For more information on the Nginx, please refer to the [official documentation](https://kubernetes.github.io/ingress-nginx/) -# How to deploy? +## How to deploy? Nginx is available as part of the KKP's default application catalog. It can be deployed to the user cluster either during the cluster creation or after the cluster is ready(existing cluster) from the Applications tab via UI. -* Select the Nginx application from the Application Catalog. +- Select the Nginx application from the Application Catalog. ![Select Nginx Application](01-select-application-nginx-app.png) -* Under the Settings section, select and provide appropriate details and clck `-> Next` button. +- Under the Settings section, select and provide appropriate details and click `-> Next` button. ![Settings for Nginx Application](02-settings-nginx-app.png) -* Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Nginx application to the user cluster. +- Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Nginx application to the user cluster. To further configure the values.yaml, find more information on the [Nginx Helm chart documentation](https://github.com/kubernetes/ingress-nginx/tree/main/charts/ingress-nginx). diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/nvidia-gpu-operator/03-node-labels.png b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/nvidia-gpu-operator/03-node-labels.png new file mode 100644 index 000000000..43e91df50 Binary files /dev/null and b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/nvidia-gpu-operator/03-node-labels.png differ diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/nvidia-gpu-operator/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/nvidia-gpu-operator/_index.en.md index 471ad652d..e92c5b6bf 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/nvidia-gpu-operator/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/nvidia-gpu-operator/_index.en.md @@ -7,24 +7,73 @@ weight = 12 +++ -# What is Nvidia GPU Operator? +## What is Nvidia GPU Operator? + The NVIDIA GPU Operator uses the operator framework within Kubernetes to automate the management of all NVIDIA software components needed to provision GPU. For more information on the Nvidia GPU Operator, please refer to the [official documentation](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/overview.html) -# How to deploy? +## How to deploy? Nvidia GPU Operator is available as part of the KKP's default application catalog. It can be deployed to the user cluster either during the cluster creation or after the cluster is ready(existing cluster) from the Applications tab via UI. -* Select the Nvidia GPU Operator application from the Application Catalog. +- Select the Nvidia GPU Operator application from the Application Catalog. ![Select Nvidia GPU Operator Application](01-select-application-nvidia-gpu-operator-app.png) -* Under the Settings section, select and provide appropriate details and clck `-> Next` button. +- Under the Settings section, select and provide appropriate details and click `-> Next` button. ![Settings for Nvidia GPU Operator Application](02-settings-nvidia-gpu-operator-app.png) -* Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Nvidia GPU Operator application to the user cluster. +- Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Nvidia GPU Operator application to the user cluster. To further configure the values.yaml, find more information on the [Nvidia GPU Operator Helm chart documentation](https://github.com/NVIDIA/gpu-operator/) + +## DCGM metrics for NVIDIA GPUs + +### What are DCGM metrics? + +DCGM (Data Center GPU Manager) metrics are health and performance measurements exported by NVIDIA software. They include useful signals such as GPU temperature, memory usage, and utilization. These metrics are ready to be consumed by Prometheus and visualized in Grafana. + +The following explains how DCGM metrics are exposed when you deploy the NVIDIA GPU Operator via the KKP application catalog and how to check that everything is working. + +### How it works in KKP + +When you deploy the Nvidia GPU Operator from the Application Catalog, DCGM metrics are enabled by default. It also deploys Node Feature Discovery (NFD), which automatically labels GPU nodes. These labels help the operator deploy a small exporter (dcgm-exporter) as a DaemonSet on those GPU nodes. + +Key points: + +- DCGM exporter listens on port 9400 and exposes metrics at the /metrics endpoint. +- By default, the gpu-operator Helm chart enables the `dcgmExporter` and `nfd` components. + +### Quick check + +1. Deploy the Nvidia GPU Operator from the Applications tab in KKP. +2. Wait for the application to finish installing (status should show deployed). +3. Confirm GPU nodes are labeled with the `feature.node.kubernetes.io/pci-10de.present=true` label (this is done automatically by NFD). +4. Confirm all pods in the `nvidia-gpu-operator` namespace are in the `Running` state. + +### Troubleshooting + +- No metrics found: make sure your nodes have NVIDIA GPUs and the Nvidia GPU Operator application is deployed. Check the DaemonSet for dcgm-exporter in the cluster. +- Exporter not running on a node: verify the node has the GPU label (NFD adds it). If not, re-check your operator deployment or the node configuration. + +### Want to dig deeper? + +If you'd like more detailed, technical steps (for example, changing scrape intervals or customizing the chart values), check the official GPU Operator Helm chart and the dcgm-exporter documentation: + +- [NVIDIA GPU Operator on GitHub](https://github.com/NVIDIA/gpu-operator) +- [dcgm-exporter on GitHub](https://github.com/NVIDIA/dcgm-exporter) + +## AI Conformance + +To support AI workloads, Kubermatic Kubernetes Platform uses the NVIDIA GPU Operator to automatically expose GPU information through node labels. + +Once the operator is installed, it discovers the GPUs available on your cluster nodes and applies a set of descriptive labels. + +These labels provide useful details about the hardware, such as the GPU product name and the installed CUDA driver and runtime versions. + +You can view these labels on the Nodes page. + +![GPU Labels on Node](03-node-labels.png) diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/trivy-operator/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/trivy-operator/_index.en.md index 7cf115bfa..27723d0cb 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/trivy-operator/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/trivy-operator/_index.en.md @@ -7,7 +7,7 @@ weight = 9 +++ -# What is Trivy Operator? +## What is Trivy Operator? The Trivy Operator leverages Trivy to continuously scan your Kubernetes cluster for security issues. The scans are summarised in security reports as Kubernetes Custom Resources, which become accessible through the Kubernetes API. The Operator does this by watching Kubernetes for state changes and automatically triggering security scans in response. For example, a vulnerability scan is initiated when a new Pod is created. This way, users can find and view the risks that relate to different resources in a Kubernetes-native way. @@ -15,23 +15,23 @@ Trivy Operator can be deployed and used for scanning the resources deployed on t For more information on the Trivy Operator, please refer to the [official documentation](https://aquasecurity.github.io/trivy-operator/latest/) -# How to deploy? +## How to deploy? Trivy Operator is available as part of the KKP's default application catalog. It can be deployed to the user cluster either during the cluster creation or after the cluster is ready(existing cluster) from the Applications tab via UI. -* Select the Trivy Operator application from the Application Catalog. +- Select the Trivy Operator application from the Application Catalog. ![Select Trivy Operator Application](01-select-application-trivy-operator-app.png) -* Under the Settings section, select and provide appropriate details and clck `-> Next` button. +- Under the Settings section, select and provide appropriate details and click `-> Next` button. ![Settings for Trivy Operator Application](02-settings-trivy-operator-app.png) -* Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Trivy Operator application to the user cluster. +- Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Trivy Operator application to the user cluster. ![Application Values for Trivy Operator Application](03-applicationvalues-trivy-operator-app.png) -* Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Trivy Operator application to the user cluster. +- Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Trivy Operator application to the user cluster. To further configure the values.yaml, find more information on the [Trivy Operator Helm chart documentation](https://github.com/aquasecurity/trivy-operator/tree/main/deploy/helm). diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/trivy/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/trivy/_index.en.md index 0638d29ed..5e052daa8 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/trivy/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/applications/default-applications-catalog/trivy/_index.en.md @@ -7,7 +7,7 @@ weight = 8 +++ -# What is Trivy? +## What is Trivy? Trivy is a comprehensive and versatile security scanner. Trivy has scanners that look for security issues, and targets where it can find those issues. @@ -32,19 +32,19 @@ Trivy supports most popular programming languages, operating systems, and platfo For more information on the Trivy, please refer to the [official documentation](https://aquasecurity.github.io/trivy/v0.49/docs/) -# How to deploy? +## How to deploy? Trivy is available as part of the KKP's default application catalog. It can be deployed to the user cluster either during the cluster creation or after the cluster is ready(existing cluster) from the Applications tab via UI. -* Select the Trivy application from the Application Catalog. +- Select the Trivy application from the Application Catalog. ![Select Trivy Application](01-select-application-trivy-app.png) -* Under the Settings section, select and provide appropriate details and clck `-> Next` button. +- Under the Settings section, select and provide appropriate details and click `-> Next` button. ![Settings for Trivy Application](02-settings-trivy-app.png) -* Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Trivy application to the user cluster. +- Under the Application values page section, check the default values and add values if any required to be configured explicitly. Finally click on the `+ Add Application` to deploy the Trivy application to the user cluster. To further configure the values.yaml, find more information on the [Trivy Helm chart documentation](https://github.com/aquasecurity/trivy/tree/main/helm/trivy). diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/cluster-templates/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/cluster-templates/_index.en.md index 792fb6880..a62adbfee 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/cluster-templates/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/cluster-templates/_index.en.md @@ -7,6 +7,7 @@ weight = 1 +++ ## Understanding Cluster Templates + Cluster templates are designed to standardize and simplify the creation of Kubernetes clusters. A cluster template is a reusable cluster template object. It guarantees that every cluster it provisions from the template is uniform and consistent in the way it is produced. @@ -15,22 +16,26 @@ A cluster template allows you to specify a provider, node layout, and configurat via Kubermatic API or UI. ## Scope + The cluster templates are accessible from different levels. - - global: (managed by admin user) visible to everyone - - project: accessible to the project users - - user: accessible to the template owner in every project, where the user is in the owner or editor group + +- global: (managed by admin user) visible to everyone +- project: accessible to the project users +- user: accessible to the template owner in every project, where the user is in the owner or editor group Template management is available from project level. The regular user with owner or editor privileges can create template in project or user scope. The admin user can create a template for every project in every scope. Template in `global` scope can be created only by admins. ## Credentials + Creating a cluster from the template requires credentials to authenticate with the cloud provider. During template creation, the credentials are stored in the secret which is assigned to the cluster template. The credential secret is independent. It's just a copy of credentials specified manually by the user or taken from the preset. Any credentials update must be processed on the cluster template. ## Creating and Using Templates + Cluster templates can be created from scratch to pre-define the cluster configuration. The whole process is done in the UI wizard for the cluster creation. During the cluster creation process, the end user can pick a template and specify the desired number of cluster instances. diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/kkp-security/pod-security-policy/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/kkp-security/pod-security-policy/_index.en.md index 30bdc650c..4b120a99e 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/kkp-security/pod-security-policy/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/kkp-security/pod-security-policy/_index.en.md @@ -6,7 +6,7 @@ weight = 130 [Pod Security Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/), (PSP), is a key security feature in Kubernetes. It allows cluster administrators to set [granular controls](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#policy-reference) over security sensitive aspects of pod and container specs. -PSP is implemented using an optional admission controller that's disabled by default. It's important to have an initial authorizing policy on the cluster _before_ enabling the PSP admission controller. +PSP is implemented using an optional admission controller that's disabled by default. It's important to have an initial authorizing policy on the cluster *before* enabling the PSP admission controller. This is also true for existing clusters. Without an authorizing policy, the controller will prevent all pods from being created on the cluster. PSP objects are cluster-level objects. They define a set of conditions that a pod must pass to be accepted by the PSP admission controller. The most common way to apply this is using RBAC. For a pod to use a specific Pod Security Policy, the pod should run using a Service Account or a User that has `use` permission to that particular Pod Security policy. @@ -29,12 +29,12 @@ For existing clusters, it's also possible to enable/disable PSP: ![Edit Cluster](@/images/ui/psp-edit.png?classes=shadow,border "Edit Cluster") - {{% notice note %}} Activating Pod Security Policy will mean that a lot of Pod specifications, Operators and Helm charts will not work out of the box. KKP will apply a default authorizing policy to prevent this. Additionally, all KKP user-clusters are configured to be compatible with enabled PSPs. Make sure that you know the consequences of activating this feature on your workloads. {{% /notice %}} ### Datacenter Level Support + It is also possible to enforce enabling Pod Security Policies on the datacenter level. In this case, user cluster level configuration will be ignored, and PSP will be enabled for all user clusters in the datacenter. To enable this, you will need to update your [Seed Cluster CRD]({{< ref "../../../../../tutorials-howtos/project-and-cluster-management/seed-cluster" >}}), and set `enforcePodSecurityPolicy` to `true` in the datacenter spec. diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/networking/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/networking/_index.en.md index 9afdaa93d..c291cfba4 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/networking/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/networking/_index.en.md @@ -13,7 +13,6 @@ The [expose strategy]({{< ref "../../../../tutorials-howtos/networking/expose-st This section explains how the connection between user clusters and the control plane is established, as well as the general networking concept in KKP. - ![KKP Network](images/network.png?classes=shadow,border "This diagram illustrates the necessary connections for KKP.") The following diagrams illustrate all available [expose strategy]({{< ref "../../../../tutorials-howtos/networking/expose-strategies" >}}) available in KKP. @@ -33,11 +32,11 @@ Any port numbers marked with * are overridable, so you will need to ensure any c ** Default port range for [NodePort Services](https://kubernetes.io/docs/concepts/services-networking/service/). All ports listed are using TCP. -#### Worker Nodes +### Worker Nodes Worker nodes in user clusters must have full connectivity to each other to ensure the functionality of various components, including different Container Network Interfaces (CNIs) and Container Storage Interfaces (CSIs) supported by KKP. -#### API Server +### API Server For each user cluster, an API server is deployed in the Seed and exposed depending on the chosen expose strategy. Its purpose is not only to make the apiserver accessible to users, but also to ensure the proper functioning of the cluster. @@ -46,7 +45,7 @@ In addition, the apiserver is used for [in-cluster API](https://kubernetes.io/do In Tunneling mode, to forward traffic to the correct apiserver, an envoy proxy is deployed on each node, serving as an endpoint for the Kubernetes cluster service to proxy traffic to the apiserver. -#### Kubernetes Konnectivity proxy +### Kubernetes Konnectivity proxy To enable Kubernetes to work properly, parts of the control plane need to be connected to the internal Kubernetes cluster network. This is done via the [konnectivity proxy](https://kubernetes.io/docs/tasks/extend-kubernetes/setup-konnectivity/), which is deployed for each cluster. diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/resource-quotas/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/resource-quotas/_index.en.md index 5b2ac7022..3ddf9ef74 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/resource-quotas/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/resource-quotas/_index.en.md @@ -10,6 +10,7 @@ Resource Quotas in KKP allow administrators to set quotas on the amount of resou subject which is supported is Project, so the resource quotas currently limit the amount of resources that can be used project-wide. The resources in question are the resources of the user cluster: + - CPU - the cumulated CPU used by the nodes on all clusters. - Memory - the cumulated RAM used by the nodes on all clusters. - Storage - the cumulated disk size of the nodes on all clusters. @@ -21,12 +22,12 @@ This feature is available in the EE edition only. That one just controls the size of the machines suggested to users in the KKP Dashboard during the cluster creation. {{% /notice %}} - ## Setting up Resource Quotas The resource quotas are managed by administrators either through the KKP UI/API or through the Resource Quota CRDs. Example ResourceQuota: + ```yaml apiVersion: kubermatic.k8c.io/v1 kind: ResourceQuota @@ -53,6 +54,7 @@ set in the ResourceQuota is done automatically by the API. ## Calculating Quota Usage The ResourceQuota has 2 status fields: + - `globalUsage` which shows the resource usage across all seeds - `localUsage` which shows the resource usage on the local seed @@ -93,12 +95,10 @@ resulting K8s Node `.status.capacity`. | Openstack | VCPUs (query to provider) | Memory (query to provider) | Disk (query to provider) | | KubeVirt | If flavor set: calculate from the provider flavor, otherwise get from the Machine spec | If flavor set: calculate from the provider flavor, otherwise get from the Machine spec | Add up Primary and Secondary disks (from Machine spec) | | Nutanix | CPU * CPUCores (Machine spec) | MemoryMB (from Machine spec) | DiskSize (from Machine spec) | -| Equinox | Add up all CPUs (query to provider) | Memory.Total (query to provider) | Add up all Drives (query to provider) | | vSphere | CPUs (set in Machine spec) | MemoryMB (from Machine spec) | DiskSizeGB (from Machine spec) | | Anexia | CPUs (set in Machine spec) | Memory (from Machine spec) | DiskSize (from Machine spec) | | VMWare Cloud Director | CPU * CPUCores (Machine spec) | MemoryMB (from Machine spec) | DiskSizeGB (from Machine spec) | - ## Enforcing Quotas The quotas are enforced through a validating webhook on Machine resources in the user clusters. This means that the quota validation diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/service-account/service-account-token-projection/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/service-account/service-account-token-projection/_index.en.md index c381e7d3c..1c38fb091 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/service-account/service-account-token-projection/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/service-account/service-account-token-projection/_index.en.md @@ -12,14 +12,17 @@ is used by some applications to enhance security when using service accounts (e.g. [Istio uses it by default](https://istio.io/latest/docs/ops/best-practices/security/#configure-third-party-service-account-tokens) as of version v1.3). As of KKP version v2.16, KKP supports Service Account Token Volume Projection as follows: + - in clusters with Kubernetes version v1.20+, it is enabled by default with the default configuration as described below, - in clusters with Kubernetes below v1.20, it has to be explicitly enabled. ## Prerequisites + `TokenRequest` and `TokenRequestProjection` Kubernetes feature gates have to be enabled (enabled by default since Kubernetes v1.11 and v1.12 respectively). ## Configuration + In KKP v2.16, the Service Account Token Volume Projection feature can be configured only via KKP API. The `Cluster` API object provides the `serviceAccount` field of the `ServiceAccountSettings` type, with the following definition: @@ -58,8 +61,8 @@ The following table summarizes the supported properties of the `ServiceAccountSe | `issuer` | Identifier of the service account token issuer. The issuer will assert this identifier in `iss` claim of issued tokens. | The URL of the apiserver, e.g., `https://`. | | `apiAudiences` | Identifiers of the API. The service account token authenticator will validate that tokens used against the API are bound to at least one of these audiences. Multiple audiences can be separated by comma (`,`). | Equal to `issuer`. | - ### Example: Configuration using a Request to KKP API + To configure the feature in an existing cluster, execute a `PATCH` request to URL: `https:///api/v1/projects//dc//clusters/` @@ -78,8 +81,8 @@ with the following content: You can use the Swagger UI at `https:///rest-api` to construct and send the API request. - ### Example: Configuration using Cluster CR + Alternatively, the feature can be also configured via the `Cluster` Custom Resource in the KKP seed cluster. For example, to enable the feature in an existing cluster via kubectl, edit the `Cluster` CR with `kubectl edit cluster ` and add the following configuration: diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/service-account/using-service-account/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/service-account/using-service-account/_index.en.md index 850b8bb63..89699e749 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/service-account/using-service-account/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/service-account/using-service-account/_index.en.md @@ -37,35 +37,39 @@ You can also change a token name. It is possible to delete a service account tok You can see when a token was created and when will expire. ## Using Service Accounts with KKP + You can control service account access in your project by provided groups. There are three basic access level groups: + - viewers - editors - project managers -#### Viewers +### Viewers **A viewer can:** - - list projects - - get project details - - get project SSH keys - - list clusters - - get cluster details - - get cluster resources details + +- list projects +- get project details +- get project SSH keys +- list clusters +- get cluster details +- get cluster resources details Permissions for read-only actions that do not affect state, such as viewing. + - viewers are not allowed to interact with service accounts (User) - viewers are not allowed to interact with members of a project (UserProjectBinding) - -#### Editors +### Editors **All viewer permissions, plus permissions to create, edit & delete cluster** - - editors are not allowed to delete a project - - editors are not allowed to interact with members of a project (UserProjectBinding) - - editors are not allowed to interact with service accounts (User) -#### Project Managers +- editors are not allowed to delete a project +- editors are not allowed to interact with members of a project (UserProjectBinding) +- editors are not allowed to interact with service accounts (User) + +### Project Managers **The `project managers` is service account specific group. Which allows** @@ -90,6 +94,6 @@ Authorization: Bearer aaa.bbb.ccc You can also use `curl` command to reach API endpoint: -``` +```bash curl -i -H "Accept: application/json" -H "Authorization: Bearer aaa.bbb.ccc" -X GET http://localhost:8080/api/v2/projects/jnpllgp66z/clusters ``` diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/user-interface/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/user-interface/_index.en.md index d63f1765c..8877d168d 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/user-interface/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/user-interface/_index.en.md @@ -9,15 +9,18 @@ Get information on how to get the most ouf of the Kubermatic Dashboard, the offi ![Admin Panel](dashboard.png?height=400px&classes=shadow,border "Kubermatic Dashboard") ## Preparing New Themes + A set of [tutorials]({{< ref "./theming" >}}) that will teach you how to prepare custom themes and apply them to be used by the KKP Dashboard. ## Admin Panel + The Admin Panel is a place for the Kubermatic administrators where they can manage the global settings that directly impact all Kubermatic users. Check out the [Admin Panel]({{< ref "../../../../tutorials-howtos/administration/admin-panel" >}}) section for more details. ## Theming + Theme and customize the KKP Dashboard according to your needs, but be aware that theming capabilities are available in the Enterprise Edition only. Check out [Customizing the Dashboard]({{< ref "../../../../tutorials-howtos/dashboard-customization" >}}) section diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/user-interface/theming/with-src/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/user-interface/theming/with-src/_index.en.md index 4ad79d5d1..d3f004519 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/user-interface/theming/with-src/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/user-interface/theming/with-src/_index.en.md @@ -20,21 +20,20 @@ All available themes can be found inside `src/assets/themes` directory. Follow t - `name` - refers to the theme file name stored inside `assets/themes` directory. - `displayName` - will be used by the theme picker available in the `Account` view to display a new theme. - `isDark` - defines the icon to be used by the theme picker (sun/moon). - ```json - { - "openstack": { - "wizard_use_default_user": false - }, - "themes": [ - { - "name": "custom", - "displayName": "Custom", - "isDark": false - } - ] - } - ``` - + ```json + { + "openstack": { + "wizard_use_default_user": false + }, + "themes": [ + { + "name": "custom", + "displayName": "Custom", + "isDark": false + } + ] + } + ``` - Make sure that theme is registered in the `angular.json` file before running the application locally. It is done for `custom` theme by default. - Run the application using `npm start`, open the `Account` view under `User settings`, select your new theme and update `custom.scss` according to your needs. It is possible to override basically everything inside this theme file. In example if you want to change background color of a `mat-form-field` do this: diff --git a/content/kubermatic/main/architecture/concept/kkp-concepts/user-interface/theming/without-src/_index.en.md b/content/kubermatic/main/architecture/concept/kkp-concepts/user-interface/theming/without-src/_index.en.md index cab747001..889161814 100644 --- a/content/kubermatic/main/architecture/concept/kkp-concepts/user-interface/theming/without-src/_index.en.md +++ b/content/kubermatic/main/architecture/concept/kkp-concepts/user-interface/theming/without-src/_index.en.md @@ -6,42 +6,49 @@ weight = 50 +++ ### Preparing a New Theme Without Access to the Sources + In this case the easiest way of preparing a new theme is to download one of the existing themes light/dark. This can be done in a few different ways. We'll describe here two possible ways of downloading enabled themes. #### Download Theme Using the Browser + 1. Open KKP UI -2. Open `Developer tools` and navigate to `Sources` tab. -3. There should be a CSS file of a currently selected theme available to be downloaded inside `assts/themes` directory. +1. Open `Developer tools` and navigate to `Sources` tab. +1. There should be a CSS file of a currently selected theme available to be downloaded inside `assts/themes` directory. ![Dev tools](@/images/ui/developer-tools.png?height=300px&classes=shadow,border "Dev tools") #### Download Themes Directly From the KKP Dashboard container + Assuming that you know how to exec into the container and copy resources from/to it, themes can be simply copied over to your machine from the running KKP Dashboard container. They are stored inside the container in `dist/assets/themes` directory. ##### Kubernetes + Assuming that the KKP Dashboard pod name is `kubermatic-dashboard-5b96d7f5df-mkmgh` you can copy themes to your `${HOME}/themes` directory using below command: + ```bash kubectl -n kubermatic cp kubermatic-dashboard-5b96d7f5df-mkmgh:/dist/assets/themes ~/themes ``` ##### Docker + Assuming that the KKP Dashboard container name is `kubermatic-dashboard` you can copy themes to your `${HOME}/themes` directory using below command: + ```bash docker cp kubermatic-dashboard:/dist/assets/themes/. ~/themes ``` #### Using Compiled Theme to Prepare a New Theme + Once you have a base theme file ready, we can use it to prepare a new theme. To easier understand the process, let's assume that we have downloaded a `light.css` file and will be preparing a new theme called `solar.css`. 1. Rename `light.css` to `solar.css`. -2. Update `solar.css` file according to your needs. Anything in the file can be changed or new rules can be added. +1. Update `solar.css` file according to your needs. Anything in the file can be changed or new rules can be added. In case you are changing colors, remember to update it in the whole file. -3. Mount new `solar.css` file to `dist/assets/themes` directory inside the application container. **Make sure not to override whole directory.** -4. Update `config.json` file inside `dist/config` directory and register the new theme. - +1. Mount new `solar.css` file to `dist/assets/themes` directory inside the application container. **Make sure not to override whole directory.** +1. Update `config.json` file inside `dist/config` directory and register the new theme. ```json { "openstack": { diff --git a/content/kubermatic/main/architecture/editions/_index.md b/content/kubermatic/main/architecture/editions/_index.md index be7bd50de..49d905144 100644 --- a/content/kubermatic/main/architecture/editions/_index.md +++ b/content/kubermatic/main/architecture/editions/_index.md @@ -23,7 +23,6 @@ Kubermatic Kubernetes Platform (KKP) is an open-source product, and both the Com |      KubeVirt | ✔ | ✔ | | |      Nutanix | ✔ | ✔ | | |      OpenStack | ✔ | ✔ | | -|      Equinix Metal (formerly Packet) | ✔ | ✔ | | |      VMware Cloud Director | ✔ | ✔ | | |      vSphere | ✔ | ✔ | | |      Bare Metal (Tinkerbell) | ✔ | ✔ | | diff --git a/content/kubermatic/main/architecture/feature-stages/_index.en.md b/content/kubermatic/main/architecture/feature-stages/_index.en.md index f897eec92..ece457538 100644 --- a/content/kubermatic/main/architecture/feature-stages/_index.en.md +++ b/content/kubermatic/main/architecture/feature-stages/_index.en.md @@ -15,7 +15,6 @@ weight = 4 - The whole feature can be revoked immediately and without notice - Recommended only for testing and providing feedback - ## Beta / Technical Preview - Targeted users: experienced KKP administrators @@ -27,7 +26,6 @@ weight = 4 - The whole feature can still be revoked, but with prior notice and respecting a deprecation cycle - Recommended for only non-business-critical uses, testing usability, performance, and compatibility in real-world environments - ## General Availability (GA) - Users: All users diff --git a/content/kubermatic/main/architecture/iam-role-based-access-control/_index.en.md b/content/kubermatic/main/architecture/iam-role-based-access-control/_index.en.md index 68cebea75..8719ec556 100644 --- a/content/kubermatic/main/architecture/iam-role-based-access-control/_index.en.md +++ b/content/kubermatic/main/architecture/iam-role-based-access-control/_index.en.md @@ -11,23 +11,26 @@ By default, KKP provides [Dex](#authentication-with-dex) as OIDC provider, but y please refer to the [OIDC provider]({{< ref "../../tutorials-howtos/oidc-provider-configuration" >}}) chapter. ## Authentication with Dex + [Dex](https://dexidp.io/) is an identity service that uses OIDC to drive authentication for KKP components. It acts as a portal to other identity providers through [connectors](https://dexidp.io/docs/connectors/). This lets Dex defer authentication to these connectors. Multiple connectors may be configured at the same time. Most popular are: -* [GitHub](https://dexidp.io/docs/connectors/github/) -* [Google](https://dexidp.io/docs/connectors/google/) -* [LDAP](https://dexidp.io/docs/connectors/ldap/) -* [Microsoft](https://dexidp.io/docs/connectors/microsoft/) -* [OAuth 2.0](https://dexidp.io/docs/connectors/oauth/) -* [OpenID Connect](https://dexidp.io/docs/connectors/oidc/) -* [SAML2.0](https://dexidp.io/docs/connectors/saml/) + +- [GitHub](https://dexidp.io/docs/connectors/github/) +- [Google](https://dexidp.io/docs/connectors/google/) +- [LDAP](https://dexidp.io/docs/connectors/ldap/) +- [Microsoft](https://dexidp.io/docs/connectors/microsoft/) +- [OAuth 2.0](https://dexidp.io/docs/connectors/oauth/) +- [OpenID Connect](https://dexidp.io/docs/connectors/oidc/) +- [SAML2.0](https://dexidp.io/docs/connectors/saml/) Check out the [Dex documentation](https://dexidp.io/docs/connectors/) for a list of available providers and how to setup their configuration. To configure Dex connectors, edit `.dex.connectors` in the `values.yaml` Example to update or set up Github connector: -``` + +```yaml dex: ingress: [...] @@ -50,17 +53,18 @@ And apply the changes to the cluster: ``` ## Authorization + Authorization is managed at multiple levels to ensure users only have access to authorized resources. KKP uses its own authorization system to control access to various resources within the platform, including projects and clusters. Administrators and project owners define and manage these policies and provide specific access control rules for users and groups. - The Kubernetes Role-Based Access Control (RBAC) system is also used to control access to user cluster level resources, such as namespaces, pods, and services. Please refer to [Cluster Access]({{< ref "../../tutorials-howtos/cluster-access" >}}) to configure RBAC. ### Kubermatic Kubernetes Platform (KKP) Users + There are two kinds of users in KKP: **admin** and **non-admin** users. **Admin** users can manage settings that impact the whole Kubermatic installation and users. For example, they can set default diff --git a/content/kubermatic/main/architecture/known-issues/_index.en.md b/content/kubermatic/main/architecture/known-issues/_index.en.md index a895a4d47..a2c0f2a0c 100644 --- a/content/kubermatic/main/architecture/known-issues/_index.en.md +++ b/content/kubermatic/main/architecture/known-issues/_index.en.md @@ -1,20 +1,59 @@ +++ title = "Known Issues" -date = 2022-07-22T12:22:15+02:00 +date = 2025-10-22T12:00:00+02:00 weight = 25 +++ ## Overview -This page documents the list of known issues and possible work arounds/solutions. +This page documents the list of known issues and possible workarounds/solutions. -## Oidc refresh tokens are invalidated when the same user/client id pair is authenticated multiple times +## Cilium 1.18 fails installation on older Ubuntu 22.04 kernels + +_**Affected Components**_: Cilium 1.18.x deployed as a system application on User Clusters + +_**Affected OS Image**_: `Ubuntu 22.04.1 LTS (GNU/Linux 5.15.0-47-generic x86_64)` ### Problem -For oidc authentication to user cluster there is always the same issuer used. This leads to invalidation of refresh tokens when a new authentication happens with the same user because existing refresh tokens for the same user/client pair are invalidated when a new one is requested. +Clusters running on Ubuntu 22.04 nodes with the kernel version `5.15.0-47-generic` experience Cilium pod failures. During initialization, the Cilium agent is unable to load certain eBPF programs (`tail_nodeport_nat_egress_ipv4`) into the kernel due to a verifier bug in older kernel versions. +The kernel verifier will report: + +```bash +error="attaching cilium_host: loading eBPF collection into the kernel: +program tail_nodeport_nat_egress_ipv4: load program: +permission denied: 1074: (71) r1 = *(u8 *)(r2 +23): R2 invalid mem access 'inv' (665 line(s) omitted)" +``` + +Because of this issue we have `cilium-agent` failing, and `hubble-generate-certs` jobs timing out when attempting to create the CA secrets in the specified namespace. + +### Root Cause + +`Ubuntu’s 5.15.0-47 kernel` (and older builds) lacks critical eBPF verifier precision propagation fixes. Cilium 1.18 has datapath programs that depend on these verifier improvements. + +### Workarounds +1. On cluster creation in KKP, enable the option to `Upgrade system on first boot`. For existing clusters we can edit the machine deployment and enable the `Upgrade system on first boot` option. +2. Upgrade the kernel on Ubuntu 22.04 nodes: + + ```bash + sudo apt update && sudo apt upgrade -y && sudo reboot + ``` + + The node will boot into **5.15.0-160-generic**, and Cilium starts successfully. + +3. For OpenStack, switch worker image (in your data center provider options) from kubermatic-ubuntu (22.04) to Ubuntu 24.04 LTS (6.8.x kernel). + +### Planned resolution + +Future Kubermatic images will default to Ubuntu 24.04 to ensure compatibility with newer Cilium releases. + +## OIDC refresh tokens are invalidated when the same user/client ID pair is authenticated multiple times + +### Problem + +For oidc authentication to user cluster there is always the same issuer used. This leads to invalidation of refresh tokens when a new authentication happens with the same user because existing refresh tokens for the same user/client pair are invalidated when a new one is requested. ### Root Cause @@ -26,36 +65,44 @@ One example would be to download a kubeconfig of one cluster and then of another You can either change this in dex configuration by setting `userIDKey` to `jti` in the connector section or you could configure an other oidc provider which supports multiple refresh tokens per user-client pair like keycloak does by default. -#### dex +#### Dex The following yaml snippet is an example how to configure an oidc connector to keep the refresh tokens. ```yaml connectors: - - config: + - id: oidc + name: OIDC + type: Google + config: clientID: clientSecret: - orgs: - - name: - redirectURI: https://kubermatic.test/dex/callback - id: github - name: GitHub - type: github - userIDKey: jti - userNameKey: email + redirectURI: https://kkp.example.com/dex/callback + scopes: + - openid + - profile + - email + - offline_access + # Workaround to support multiple user_id/client_id pairs concurrently + # Configurable key for user ID look up + # Default: id + userIDKey: <> + # Optional: Configurable key for user name look up + # Default: user_name + userNameKey: <> ``` -#### external provider +#### External provider For an explanation how to configure an other oidc provider than dex take a look at [oidc-provider-configuration]({{< ref "../../tutorials-howtos/oidc-provider-configuration" >}}). -### security implications regarding dex solution +### Security implications regarding dex solution For dex this has some implications. With this configuration a token is generated for each user session. The number of objects stored in kubernetes regarding refresh tokens has no limit anymore. The principle that one refresh belongs to one user/client pair is a security consideration which would be ignored in that case. The only way to revoke a refresh token is then to do it via grpc api which is not exposed by default or by manually deleting the related refreshtoken resource in the kubernetes cluster. ## API server Overload Leading to Instability in Seed due to Konnectivity -Issue: https://github.com/kubermatic/kubermatic/issues/13321 +Issue: Status: Fixed diff --git a/content/kubermatic/main/architecture/monitoring-logging-alerting/master-seed/_index.en.md b/content/kubermatic/main/architecture/monitoring-logging-alerting/master-seed/_index.en.md index f0c3d49bf..9392508e1 100644 --- a/content/kubermatic/main/architecture/monitoring-logging-alerting/master-seed/_index.en.md +++ b/content/kubermatic/main/architecture/monitoring-logging-alerting/master-seed/_index.en.md @@ -11,31 +11,32 @@ It uses [Prometheus](https://prometheus.io) and its [Alertmanager](https://prome ## Overview -There is a single Prometheus service in each seed cluster's `monitoring` namespace, which is responsible for monitoring the cluster's components (like the KKP controller manager) and serves as the main datasource for the accompanying Grafana service. Besides that there is a Prometheus inside each user cluster namespace, which in turn monitors the Kubernetes control plane (apiserver, controller manager, etcd cluster etc.) of that user cluster. The seed-level Prometheus scrapes all the user cluster Prometheus instances and combines their metrics for creating the dashboards in Grafana. +There is a single Prometheus service in each seed cluster's `monitoring` namespace, which is responsible for monitoring the cluster's components (like the KKP controller manager) and serves as the main datasource for the accompanying Grafana service. Besides that there is a Prometheus inside each user cluster namespace, which in turn monitors the Kubernetes control plane (apiserver, controller manager, etcd cluster, etc.) of that user cluster. The seed-level Prometheus scrapes all the user cluster Prometheus instances and combines their metrics to create the dashboards in Grafana. -Along the seed-level Prometheus, there is a single alertmanager running in the seed, which *all* Prometheus instances are using to relay their alerts (i.e. the Prometheus inside the user clusters send their alerts to the seed cluster's alertmanager). +Along the seed-level Prometheus, there is a single Alertmanager running in the seed, which *all* Prometheus instances are using to relay their alerts (i.e., the Prometheus that monitors a user cluster from its namespace within the seed sends its alerts to the seed cluster's Alertmanager). ![Monitoring architecture diagram](architecture.png) ## Federation -The seed-level Prometheus uses Prometheus' native federation mechanism to scrape the user cluster Prometheus instances. To prevent excessive amount of data in the seed, it will however only scrape a few selected metrics, namely those labelled with `kubermatic=federate`. +The seed-level Prometheus uses Prometheus' native federation mechanism to scrape the user cluster Prometheus instances. To prevent an excessive amount of data in the seed, it will, however, only scrape a few selected metrics, namely those labelled with `kubermatic=federate`. The last of these options is used for pre-aggregated metrics, which combine highly detailed time series (like from etcd) into smaller, easier to handle metrics that can be readily used inside Grafana. ## Grafana In a default KKP installation, we ship Grafana as *readonly* metrics dashboard. -When working with Grafana please keep in mind, that **ALL CHANGES** done using the Grafana UI (like adding datasources, etc.) **WILL NOT BE PERSISTED**. Dashboards, graphs, datasources, etc. will be defined using the Helm chart. +When working with Grafana, please keep in mind that **ALL CHANGES** done using the Grafana UI (like adding datasources, etc.) **WILL NOT BE PERSISTED**. Dashboards, graphs, datasources, etc. will be defined using the Helm chart. ## Storage Requirements -Depending on how user clusters are used, disk usage for Prometheus can vary greatly. As the operator you should however plan for +Depending on how user clusters are used, disk usage for Prometheus can vary greatly. As the operator, you should however plan for -* 100 MiB used by the seed-level Prometheus for each user cluster -* 50-300 MiB used by the user-level Prometheus, depending on its WAL size. +- 100 MiB used by the seed-level Prometheus for each user cluster +- 50-300 MiB used by the user-level Prometheus, depending on its WAL size. -These values can also vary, if you tweak the retention periods. +These values can also vary if you tweak the retention periods. ## Installation + Please follow the [Installation of the Master / Seed MLA Stack Guide]({{< relref "../../../tutorials-howtos/monitoring-logging-alerting/master-seed/installation/" >}}). diff --git a/content/kubermatic/main/architecture/monitoring-logging-alerting/user-cluster/_index.en.md b/content/kubermatic/main/architecture/monitoring-logging-alerting/user-cluster/_index.en.md index 38c5ad986..206380b0a 100644 --- a/content/kubermatic/main/architecture/monitoring-logging-alerting/user-cluster/_index.en.md +++ b/content/kubermatic/main/architecture/monitoring-logging-alerting/user-cluster/_index.en.md @@ -25,11 +25,13 @@ Unlike the [Master / Seed Cluster MLA stack]({{< ref "../master-seed/">}}), it i ![Monitoring architecture diagram](architecture.png) ### User Cluster Components + When User Cluster MLA is enabled in a KKP user cluster, it automatically deploys two components into it - Prometheus and Loki Promtail. These components are configured to stream (remote write) the logs and metrics into backends running in the Seed Cluster (Cortex for metrics and Loki-Distributed for logs). The connection between the user cluster components and Seed cluster components is secured by HTTPS with mutual TLS certificate authentication. This makes the MLA setup in user clusters very simple and low footprint, as no MLA data is stored in the user clusters and user clusters are not involved when doing data lookups. Data of all user clusters can be accessed from a central place (Grafana UI) in the Seed Cluster. ### Seed Cluster Components + As mentioned above, metrics and logs data from all user clusters are streamed into their Seed Cluster, where they are processed and stored in a long term object store (Minio). Data can be looked up in a multi-tenant Grafana instance which is running in the Seed, and provides each user a view to metrics and logs of all clusters which they have privileges to access in the KKP platform. **MLA Gateway**: @@ -47,4 +49,5 @@ The backend for processing, storing and retrieving metrics data from user Cluste The backend for processing, storing and retrieving logs data from user Cluster Clusters is based on the [Loki](https://grafana.com/docs/loki/latest/) - distributed deployment. It allows horizontal scalability of individual Loki components that can be fine-tuned to fit any use-case. For more details about Loki architecture, please refer to the [Loki Architecture](https://grafana.com/docs/loki/latest/architecture/) documentation. ## Installation + Please follow the [User Cluster MLA Stack Admin Guide]({{< relref "../../../tutorials-howtos/monitoring-logging-alerting/user-cluster/admin-guide/" >}}). diff --git a/content/kubermatic/main/architecture/requirements/cluster-requirements/_index.en.md b/content/kubermatic/main/architecture/requirements/cluster-requirements/_index.en.md index ed1461203..8aa7eb68e 100644 --- a/content/kubermatic/main/architecture/requirements/cluster-requirements/_index.en.md +++ b/content/kubermatic/main/architecture/requirements/cluster-requirements/_index.en.md @@ -6,39 +6,43 @@ weight = 15 +++ ## Master Cluster + The Master Cluster hosts the KKP components and might also act as a seed cluster and host the master components of user clusters (see [Architecture]({{< ref "../../../architecture/">}})). Therefore, it should run in a highly-available setup with at least 3 master nodes and 3 worker nodes. **Minimal Requirements:** -* Six or more machines running one of: - * Ubuntu 20.04+ - * Debian 10 - * RHEL 7 - * Flatcar -* 4 GB or more of RAM per machine (any less will leave little room for your apps) -* 2 CPUs or more + +- Six or more machines running one of: + - Ubuntu 20.04+ + - Debian 10 + - RHEL 7 + - Flatcar +- 4 GB or more of RAM per machine (any less will leave little room for your apps) +- 2 CPUs or more ## User Cluster + The User Cluster is a Kubernetes cluster created and managed by KKP. The exact requirements may depend on the type of workloads that will be running in the user cluster. **Minimal Requirements:** -* One or more machines running one of: - * Ubuntu 20.04+ - * Debian 10 - * RHEL 7 - * Flatcar -* 2 GB or more of RAM per machine (any less will leave little room for your apps) -* 2 CPUs or more -* Full network connectivity between all machines in the cluster (public or private network is fine) -* Unique hostname, MAC address, and product\_uuid for every node. See more details in the next [**topic**](#Verify-the-MAC-Address-and-product-uuid-Are-Unique-for-Every-Node). -* Certain ports are open on your machines. See below for more details. -* Swap disabled. You **MUST** disable swap in order for the kubelet to work properly. + +- One or more machines running one of: + - Ubuntu 20.04+ + - Debian 10 + - RHEL 7 + - Flatcar +- 2 GB or more of RAM per machine (any less will leave little room for your apps) +- 2 CPUs or more +- Full network connectivity between all machines in the cluster (public or private network is fine) +- Unique hostname, MAC address, and product\_uuid for every node. See more details in the next [**topic**](#Verify-the-MAC-Address-and-product-uuid-Are-Unique-for-Every-Node). +- Certain ports are open on your machines. See below for more details. +- Swap disabled. You **MUST** disable swap in order for the kubelet to work properly. ## Verify Node Uniqueness You will need to verify that MAC address and `product_uuid` are unique on every node. This should usually be the case but might not be, especially for on-premise providers. -* You can get the MAC address of the network interfaces using the command `ip link` or `ifconfig -a` -* The product\_uuid can be checked by using the command `sudo cat /sys/class/dmi/id/product_uuid` +- You can get the MAC address of the network interfaces using the command `ip link` or `ifconfig -a` +- The product\_uuid can be checked by using the command `sudo cat /sys/class/dmi/id/product_uuid` It is very likely that hardware devices will have unique addresses, although some virtual machines may have identical values. Kubernetes uses these values to uniquely identify the nodes in the cluster. If these values are not unique to each node, the installation process may [fail](https://github.com/kubernetes/kubeadm/issues/31). diff --git a/content/kubermatic/main/architecture/requirements/storage/_index.en.md b/content/kubermatic/main/architecture/requirements/storage/_index.en.md index 832624b73..8c8e90ef4 100644 --- a/content/kubermatic/main/architecture/requirements/storage/_index.en.md +++ b/content/kubermatic/main/architecture/requirements/storage/_index.en.md @@ -6,7 +6,7 @@ weight = 15 +++ -Running KKP requires at least one persistent storage layer that can be accessed via a Kubernetes [CSI driver](https://kubernetes-csi.github.io/docs/drivers.html). The Kubermatic Installer attempts to discover pre-existing CSI drivers for known cloud providers to create a suitable _kubermatic-fast_ `StorageClass`. +Running KKP requires at least one persistent storage layer that can be accessed via a Kubernetes [CSI driver](https://kubernetes-csi.github.io/docs/drivers.html). The Kubermatic Installer attempts to discover pre-existing CSI drivers for known cloud providers to create a suitable *kubermatic-fast* `StorageClass`. In particular for setups in private datacenters, setting up a dedicated storage layer might be necessary to reach adequate performance. Make sure to configure and install the corresponding CSI driver (from the list linked above) for your storage solution onto the KKP Seed clusters before installing KKP. diff --git a/content/kubermatic/main/architecture/supported-providers/_index.en.md b/content/kubermatic/main/architecture/supported-providers/_index.en.md index 8569c79f4..7a2c6941f 100644 --- a/content/kubermatic/main/architecture/supported-providers/_index.en.md +++ b/content/kubermatic/main/architecture/supported-providers/_index.en.md @@ -13,7 +13,6 @@ Kubermatic Kubernetes Platform supports a multitude of different cloud providers * AWS (excluding AWS GovCloud and China Cloud) * Azure (excluding GovCloud and China Cloud) -* Equinix Metal * GCP (excluding GovCloud and China Cloud) * vSphere beginning with v6.5 * OpenStack (Releases with maintenance or extended maintenance) diff --git a/content/kubermatic/main/architecture/supported-providers/azure/_index.en.md b/content/kubermatic/main/architecture/supported-providers/azure/_index.en.md index e432e279d..9687f2119 100644 --- a/content/kubermatic/main/architecture/supported-providers/azure/_index.en.md +++ b/content/kubermatic/main/architecture/supported-providers/azure/_index.en.md @@ -25,7 +25,7 @@ az account show --query id -o json Create a role that is used by the service account. -``` +```text az role definition create --role-definition '{ "Name": "Kubermatic", "Description": "Manage VM and Networks as well to manage Resource Groups and Tags", @@ -47,7 +47,7 @@ Get your Tenant ID az account show --query tenantId -o json ``` -create a new app with +Create a new app with ```bash az ad sp create-for-rbac --role="Kubermatic" --scopes="/subscriptions/********-****-****-****-************" @@ -73,6 +73,7 @@ Enter provider credentials using the values from step "Prepare Azure Environment - `Subscription ID`: your subscription ID ### Resources cleanup + During the machines cleanup, if KKP's Machine-Controller failed to delete the Cloud Provider instance and the user deleted that instance manually, Machine-Controller won't be able to delete any referenced resources to that machine, such as Public IPs, Disks and NICs. In that case, the user should cleanup those resources manually due to the fact that, Azure won't cleanup diff --git a/content/kubermatic/main/architecture/supported-providers/baremetal/_index.en.md b/content/kubermatic/main/architecture/supported-providers/baremetal/_index.en.md index 71a803bdc..bcf96a864 100644 --- a/content/kubermatic/main/architecture/supported-providers/baremetal/_index.en.md +++ b/content/kubermatic/main/architecture/supported-providers/baremetal/_index.en.md @@ -17,12 +17,13 @@ KKP’s Baremetal provider uses Tinkerbell to automate the setup and management With Tinkerbell, the provisioning process is driven by workflows that ensure each server is configured according to the desired specifications. Whether you are managing servers in a single location or across multiple data centers, Tinkerbell provides a reliable and automated way to manage your physical infrastructure, making it as easy to handle as cloud-based resources. ## Requirement + To successfully use the KKP Baremetal provider with Tinkerbell, ensure the following: -* **Tinkerbell Cluster**: A working Tinkerbell cluster must be in place. -* **Direct Access to Servers**: You must have access to your bare-metal servers, allowing you to provision and manage them. -* **Network Connectivity**: Establish a network connection between the API server of Tinkerbell cluster and the KKP seed cluster. This allows the Kubermatic Machine Controller to communicate with the Tinkerbell stack. -* **Tinkerbell Hardware Objects**: Create Hardware Objects within Tinkerbell that represent each bare-metal server you want to provision as a worker node in your Kubernetes cluster. +- **Tinkerbell Cluster**: A working Tinkerbell cluster must be in place. +- **Direct Access to Servers**: You must have access to your bare-metal servers, allowing you to provision and manage them. +- **Network Connectivity**: Establish a network connection between the API server of Tinkerbell cluster and the KKP seed cluster. This allows the Kubermatic Machine Controller to communicate with the Tinkerbell stack. +- **Tinkerbell Hardware Objects**: Create Hardware Objects within Tinkerbell that represent each bare-metal server you want to provision as a worker node in your Kubernetes cluster. ## Usage @@ -53,9 +54,9 @@ In Tinkerbell, Hardware Objects represent your physical bare-metal servers. To s Before proceeding, ensure you gather the following information for each server: -* **Disk Devices**: Specify the available disk devices, including bootable storage. -* **Network Interfaces**: Define the network interfaces available on the server, including MAC addresses and interface names. -* **Network Configuration**: Configure the IP addresses, gateways, and DNS settings for the server's network setup. +- **Disk Devices**: Specify the available disk devices, including bootable storage. +- **Network Interfaces**: Define the network interfaces available on the server, including MAC addresses and interface names. +- **Network Configuration**: Configure the IP addresses, gateways, and DNS settings for the server's network setup. It’s essential to allow PXE booting and workflows for the provisioning process. This is done by ensuring the following settings in the hardware spec object: @@ -68,6 +69,7 @@ netboot: This configuration allows Tinkerbell to initiate network booting and enables iPXE to start the provisioning workflow for your bare-metal server. This is an example for Hardware Object Configuration + ```yaml apiVersion: tinkerbell.org/v1alpha1 kind: Hardware @@ -118,10 +120,10 @@ Once the MachineDeployment is created and reconciled, the provisioning workflow The Machine Controller generates the necessary actions for this workflow, which are then executed on the bare-metal server by the `tink-worker` container. The key actions include: -* **Wiping the Disk Devices**: All existing data on the disk will be erased to prepare for the new OS installation. -* **Installing the Operating System**: The specified OS image (e.g., Ubuntu 20.04 or 22.04) will be installed on the server. -* **Network Configuration**: The server’s network settings will be configured based on the Hardware Object and the defined network settings. -* **Cloud-init Propagation**: The Operating System Manager (OSM) will propagate the cloud-init settings to the node to ensure proper configuration of the OS and related services. +- **Wiping the Disk Devices**: All existing data on the disk will be erased to prepare for the new OS installation. +- **Installing the Operating System**: The specified OS image (e.g., Ubuntu 20.04 or 22.04) will be installed on the server. +- **Network Configuration**: The server’s network settings will be configured based on the Hardware Object and the defined network settings. +- **Cloud-init Propagation**: The Operating System Manager (OSM) will propagate the cloud-init settings to the node to ensure proper configuration of the OS and related services. Once the provisioning workflow is complete, the bare-metal server will be fully operational as a worker node in the Kubernetes cluster. @@ -131,4 +133,4 @@ Currently, the baremetal provider only support Ubuntu as an operating system. Mo ## Future Enhancements -Currently, the Baremetal provider requires users to manually create Hardware Objects in Tinkerbell and manually boot up bare-metal servers for provisioning. However, future improvements aim to automate these steps to make the process smoother and more efficient. The goal is to eliminate the need for manual intervention by automatically detecting hardware, creating the necessary objects, and initiating the provisioning process without user input. This will make the Baremetal provider more dynamic and scalable, allowing users to manage their infrastructure with even greater ease and flexibility. \ No newline at end of file +Currently, the Baremetal provider requires users to manually create Hardware Objects in Tinkerbell and manually boot up bare-metal servers for provisioning. However, future improvements aim to automate these steps to make the process smoother and more efficient. The goal is to eliminate the need for manual intervention by automatically detecting hardware, creating the necessary objects, and initiating the provisioning process without user input. This will make the Baremetal provider more dynamic and scalable, allowing users to manage their infrastructure with even greater ease and flexibility. diff --git a/content/kubermatic/main/architecture/supported-providers/edge/_index.en.md b/content/kubermatic/main/architecture/supported-providers/edge/_index.en.md index c37bccdad..4714e537e 100644 --- a/content/kubermatic/main/architecture/supported-providers/edge/_index.en.md +++ b/content/kubermatic/main/architecture/supported-providers/edge/_index.en.md @@ -13,6 +13,7 @@ staging environment for testing before. {{% /notice %}} ## Requirement + To leverage KKP's edge capabilities, you'll need to: * Provide Target Devices: Identify the edge devices you want to function as worker nodes within your Kubermatic user cluster. diff --git a/content/kubermatic/main/architecture/supported-providers/kubevirt/_index.en.md b/content/kubermatic/main/architecture/supported-providers/kubevirt/_index.en.md index f72b0176a..08b15e1ff 100644 --- a/content/kubermatic/main/architecture/supported-providers/kubevirt/_index.en.md +++ b/content/kubermatic/main/architecture/supported-providers/kubevirt/_index.en.md @@ -14,16 +14,18 @@ weight = 5 ### Requirements A Kubernetes cluster (KubeVirt infrastructure cluster), which consists of nodes that **have a hardware virtualization support** with at least: -* 3 Bare Metal Server -* CPUs: Minimum 8-core for testing; minimum 16-core or more for production -* Memory: Minimum 32 GB for testing; minimum 64 GB or more for production -* Storage: Minimum 100 GB for testing; minimum 500 GB or more for production + +- 3 Bare Metal Server +- CPUs: Minimum 8-core for testing; minimum 16-core or more for production +- Memory: Minimum 32 GB for testing; minimum 64 GB or more for production +- Storage: Minimum 100 GB for testing; minimum 500 GB or more for production Software requirement: -* KubeOne = 1.7 or higher -* KubeOVN = 1.12 or higher or Canal = 3.26 or higher -* KubeVirt = 1.2.2 -* Containerized Data Importer (CDI) = v1.60 + +- KubeOne = 1.7 or higher +- KubeOVN = 1.12 or higher or Canal = 3.26 or higher +- KubeVirt = 1.2.2 +- Containerized Data Importer (CDI) = v1.60 The cluster version must be in the scope of [supported KKP Kubernetes clusters]({{< ref "../../../tutorials-howtos/operating-system-manager/compatibility/#kubernetes-versions" >}}) and it must be in the [KubeVirt Support Matrix](https://github.com/kubevirt/sig-release/blob/main/releases/k8s-support-matrix.md). @@ -37,6 +39,7 @@ Follow [KubeVirt](https://kubevirt.io/user-guide/operations/installation/#instal documentation to find out how to install them. We require the following KubeVirt configuration: + ```yaml apiVersion: kubevirt.io/v1 kind: KubeVirt @@ -85,29 +88,31 @@ Currently, it is not recommended to use local or any topology constrained storag Once you have Kubernetes with all needed components, the last thing is to configure KubeVirt datacenter on seed. We allow to configure: -* `customNetworkPolicies` - Network policies that are deployed on the infrastructure cluster (where VMs run). - * Check [Network Policy documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/#networkpolicy-resource) to see available options in the spec. - * Also check a [common services connectivity issue](#i-created-a-load-balancer-service-on-a-user-cluster-but-services-outside-cannot-reach-it) that can be solved by a custom network policy. -* `ccmZoneAndRegionEnabled` - Indicates if region and zone labels from the cloud provider should be fetched. This field is enabled by default and should be disabled if the infra kubeconfig that is provided for KKP has no permission to access cluster role resources such as node objects. -* `dnsConfig` and `dnsPolicy` - DNS config and policy which are set up on a guest. Defaults to `ClusterFirst`. - * You should set those fields when you suffer from DNS loop or collision issue. [Refer to this section for more details.](#i-discovered-a-dns-collision-on-my-cluster-why-does-it-happen) -* `images` - Images for Virtual Machines that are selectable from KKP dashboard. - * Set this field according to [supported operating systems]({{< ref "../../compatibility/os-support-matrix/" >}}) to make sure that users can select operating systems for their VMs. -* `infraStorageClasses` - Storage classes that are initialized on user clusters that end users can work with. - * `isDefaultClass` - If true, the created StorageClass in the tenant cluster will be annotated with. - * `labels` - Is a map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. - * `regions` - Represents a larger domain, made up of one or more zones. It is uncommon for Kubernetes clusters to span multiple regions. - * `volumeBindingMode` - indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. - * `volumeProvisioner` - The field specifies whether a storage class will be utilized by the infra cluster csi driver where the Containerized Data Importer (CDI) can use to create VM disk images or by the KubeVirt CSI Driver to provision volumes in the user cluster. If not specified, the storage class can be used as a VM disk image or user clusters volumes. - * `infra-csi-driver` - When set in the infraStorageClass, the storage class can be listed in the UI while creating the machine deployments and won't be available in the user cluster. - * `kubevirt-csi-driver` - When set in the infraStorageClass, the storage class won't be listed in the UI and will be available in the user cluster. - * `zones` - Represent a logical failure domain. It is common for Kubernetes clusters to span multiple zones for increased availability. -* `namespacedMode(experimental)` - Represents the configuration for enabling the single namespace mode for all user-clusters in the KubeVirt datacenter. -* `vmEvictionStrategy` - Indicates the strategy to follow when a node drain occurs. If not set the default value is External and the VM will be protected by a PDB. Currently, we only support two strategies, `External` or `LiveMigrate`. - * `LiveMigrate`: the VirtualMachineInstance will be migrated instead of being shutdown. - * `External`: the VirtualMachineInstance will be protected by a PDB and `vmi.Status.EvacuationNodeName` will be set on eviction. This is mainly useful for machine-controller which needs a way for VMI's to be blocked from eviction, yet inform machine-controller that eviction has been called on the VMI, so it can handle tearing the VMI down. -* `csiDriverOperator` - Contains the KubeVirt CSI Driver Operator configurations, where users can override the default configurations of the csi driver. - * `overwriteRegistry`: overwrite the images registry for the csi driver daemonset that runs in the user cluster. +- `customNetworkPolicies` - Network policies that are deployed on the infrastructure cluster (where VMs run). + - Check [Network Policy documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/#networkpolicy-resource) to see available options in the spec. + - Also check a [common services connectivity issue](#i-created-a-load-balancer-service-on-a-user-cluster-but-services-outside-cannot-reach-it) that can be solved by a custom network policy. +- `ccmZoneAndRegionEnabled` - Indicates if region and zone labels from the cloud provider should be fetched. This field is enabled by default and should be disabled if the infra kubeconfig that is provided for KKP has no permission to access cluster role resources such as node objects. +- `dnsConfig` and `dnsPolicy` - DNS config and policy which are set up on a guest. Defaults to `ClusterFirst`. + - You should set those fields when you suffer from DNS loop or collision issue. [Refer to this section for more details.](#i-discovered-a-dns-collision-on-my-cluster-why-does-it-happen) +- `images` - Images for Virtual Machines that are selectable from KKP dashboard. + - Set this field according to [supported operating systems]({{< ref "../../compatibility/os-support-matrix/" >}}) to make sure that users can select operating systems for their VMs. +- `infraStorageClasses` - Storage classes that are initialized on user clusters that end users can work with. + - `isDefaultClass` - If true, the created StorageClass in the tenant cluster will be annotated with. + - `labels` - Is a map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. + - `regions` - Represents a larger domain, made up of one or more zones. It is uncommon for Kubernetes clusters to span multiple regions. + - `volumeBindingMode` - indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. + - `volumeProvisioner` - The field specifies whether a storage class will be utilized by the infra cluster csi driver where the Containerized Data Importer (CDI) can use to create VM disk images or by the KubeVirt CSI Driver to provision volumes in the user cluster. If not specified, the storage class can be used as a VM disk image or user clusters volumes. + - `infra-csi-driver` - When set in the infraStorageClass, the storage class can be listed in the UI while creating the machine deployments and won't be available in the user cluster. + - `kubevirt-csi-driver` - When set in the infraStorageClass, the storage class won't be listed in the UI and will be available in the user cluster. + - `zones` - Represent a logical failure domain. It is common for Kubernetes clusters to span multiple zones for increased availability. +- `namespacedMode(experimental)` - Represents the configuration for enabling the single namespace mode for all user-clusters in the KubeVirt datacenter. +- `vmEvictionStrategy` - Indicates the strategy to follow when a node drain occurs. If not set the default value is External and the VM will be protected by a PDB. Currently, we only support two strategies, `External` or `LiveMigrate`. + - `LiveMigrate`: the VirtualMachineInstance will be migrated instead of being shutdown. + - `External`: the VirtualMachineInstance will be protected by a PDB and `vmi.Status.EvacuationNodeName` will be set on eviction. This is mainly useful for machine-controller which needs a way for VMI's to be blocked from eviction, yet inform machine-controller that eviction has been called on the VMI, so it can handle tearing the VMI down. +- `csiDriverOperator` - Contains the KubeVirt CSI Driver Operator configurations, where users can override the default configurations of the csi driver. + - `overwriteRegistry`: overwrite the images registry for the csi driver daemonset that runs in the user cluster. +- `enableDedicatedCPUs` (deprecated) - Represents the configuration for virtual machine cpu assignment by using `domain.cpu` when set to `true` or using `resources.requests` and `resources.limits` when set to `false` which is the default +- `usePodResourcesCPU` - Represents the new way of configuring for cpu assignment virtual machine by using `domain.cpu` when set to `false` which is the default or using `resources.requests` and `resources.limits` when set to `true` {{% notice note %}} The `infraStorageClasses` pass names of KubeVirt storage classes that can be used from user clusters. @@ -117,6 +122,14 @@ The `infraStorageClasses` pass names of KubeVirt storage classes that can be use The `namespacedMode` feature is highly experimental and should never be used in production environments. Additionally, enabling this mode in an existing KubeVirt setup utilized by KKP can cause serious issues, such as storage and networking incompatibilities. {{% /notice %}} +{{% notice warning %}} +The `enableDedicatedCPUs` feature takes only effect for new created machines. If you want to use this feature for existing machine deployments you need to rotate the machines after updating this value in seed kubevirt provider spec. +{{% /notice %}} + +{{% notice warning %}} +The `usePodResourcesCPU` feature will replace `enableDedicatedCPUs` flag. In the time of deprecation both are taking effect but the new value will have more priority. When `enableDedicatedCPUs` is set to `false` which is also the default value, you need to set `usePodResourcesCPU` to `true` to keep the same behaviour as before for new created machines. If `enableDedicatedCPUs` was set to `true` nothing needs to be changed. +{{% /notice %}} + Refer to this [document](https://github.com/kubermatic/kubermatic/blob/release/v2.26/docs/zz_generated.seed.ce.yaml#L115) for more details and configuration example. @@ -130,6 +143,7 @@ only inside the cluster. You should use `customNetworkPolicies` to customize the Install [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) on the KubeVirt cluster. Then update `KubeVirt` configuration with the following spec: + ```yaml apiVersion: kubevirt.io/v1 kind: KubeVirt @@ -158,12 +172,14 @@ We provide a Virtual Machine templating functionality over [Instance Types and P You can use our standard Instance Types: -* standard-2 - 2 CPUs, 8Gi RAM -* standard-4 - 4 CPUs, 16Gi RAM -* standard-8 - 8 CPUs, 32Gi RAM + +- standard-2 - 2 CPUs, 8Gi RAM +- standard-4 - 4 CPUs, 16Gi RAM +- standard-8 - 8 CPUs, 32Gi RAM and Preferences (which are optional): -* sockets-advantage - cpu guest topology where number of cpus is equal to number of sockets + +- sockets-advantage - cpu guest topology where number of cpus is equal to number of sockets or you can just simply adjust the amount of CPUs and RAM of our default template according to your needs. @@ -173,6 +189,7 @@ instance types and preferences that users can select later. [Read how to add new ### Virtual Machine Scheduling KubeVirt can take advantage of Kubernetes inner features to provide an advanced scheduling mechanism to virtual machines (VMs): + - [Kubernetes topology spread constraints](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/) - [Kubernetes node affinity/anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) @@ -182,6 +199,7 @@ This allows you to restrict KubeVirt VMs ([see architecture](#architecture)) to {{% notice note %}} Note that topology spread constraints and node affinity presets are applicable to KubeVirt infra nodes. {{% /notice %}} + #### Default Scheduling Behavior Each Virtual Machine you create has default [topology spread constraints](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/) applied: @@ -192,7 +210,7 @@ topologyKey: kubernetes.io/hostname whenUnsatisfiable: ScheduleAnyway ``` -this allows us to spread Virtual Machine equally across a cluster. +This allows us to spread Virtual Machine equally across a cluster. #### Customize Scheduling Behavior @@ -205,6 +223,7 @@ You can do it by expanding *ADVANCED SCHEDULING SETTINGS* on the initial nodes d - `Node Affinity Preset Values` refers to the values of KubeVirt infra node labels. Node Affinity Preset type can be `hard` or `soft` and refers to the same notion of [Pod affinity/anti-affinity types](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#types-of-inter-pod-affinity-and-anti-affinity): + - `hard`: the scheduler can't schedule the VM unless the rule is met. - `soft`: the scheduler tries to find a node that meets the rule. If a matching node is not available, the scheduler still schedules the VM. @@ -271,16 +290,17 @@ parameter of Machine Controller that sets the timeout for workload eviction. Usually it happens when both infrastructure and user clusters points to the same address of NodeLocal DNS Cache servers, even if they have separate server instances running. Let us imagine that: -* On the infrastructure cluster there is a running NodeLocal DNS Cache under 169.254.20.10 address. -* Then we create a new user cluster, start a few Virtual Machines that finally gives a fully functional k8s cluster that runs on another k8s cluster. -* Next we observe that on the user cluster there is another NodeLocal DNS Cache that has the same 169.254.20.10 address. -* Since Virtual Machine can have access to subnets on the infra and user clusters (depends on your network policy rules) having the same address of DNS cache leads to conflict. + +- On the infrastructure cluster there is a running NodeLocal DNS Cache under 169.254.20.10 address. +- Then we create a new user cluster, start a few Virtual Machines that finally gives a fully functional k8s cluster that runs on another k8s cluster. +- Next we observe that on the user cluster there is another NodeLocal DNS Cache that has the same 169.254.20.10 address. +- Since Virtual Machine can have access to subnets on the infra and user clusters (depends on your network policy rules) having the same address of DNS cache leads to conflict. One way to prevent that situation is to set a `dnsPolicy` and `dnsConfig` rules that Virtual Machines do not copy DNS configuration from their pods and points to different addresses. Follow [Configure KKP With KubeVirt](#configure-kkp-with-kubevirt) to learn how set DNS config correctly. -### I created a load balancer service on a user cluster but services outside cannot reach it. +### I created a load balancer service on a user cluster but services outside cannot reach it In most cases it is due to `cluster-isolation` network policy that is deployed as default on each user cluster. It only allows in-cluster communication. You should adjust network rules to your needs by adding [customNetworkPolicies configuration]({{< ref "../../../tutorials-howtos/project-and-cluster-management/seed-cluster/" >}})). @@ -316,16 +336,18 @@ Kubermatic Virtualization graduates to GA from KKP 2.22! On the way, we have changed many things that improved our implementation of KubeVirt Cloud Provider. Just to highlight the most important: -* Safe Virtual Machine workload eviction has been implemented. -* Virtual Machine templating is based on InstanceTypes and Preferences. -* KubeVirt CSI controller has been moved to control plane of a user cluster. -* Users can influence scheduling of VMs over topology spread constraints and node affinity presets. -* KubeVirt Cloud Controller Manager has been improved and optimized. -* Cluster admin can define the list of supported OS images and initialized storage classes. + +- Safe Virtual Machine workload eviction has been implemented. +- Virtual Machine templating is based on InstanceTypes and Preferences. +- KubeVirt CSI controller has been moved to control plane of a user cluster. +- Users can influence scheduling of VMs over topology spread constraints and node affinity presets. +- KubeVirt Cloud Controller Manager has been improved and optimized. +- Cluster admin can define the list of supported OS images and initialized storage classes. Additionally, we removed some features that didn't leave technology preview stage, those are: -* Custom Local Disks -* Secondary Disks + +- Custom Local Disks +- Secondary Disks {{% notice warning %}} The official upgrade procedure will not break clusters that already exist, however, **scaling cluster nodes will not lead to expected results**. @@ -348,12 +370,12 @@ Or if you provisioned the cluster over KubeOne please follow [the update procedu Next you can update KubeVirt control plane and Containerized Data Importer by executing: -```shell +```bash export RELEASE= kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${RELEASE}/kubevirt-operator.yaml ``` -```shell +```bash export RELEASE= kubectl apply -f https://github.com/kubevirt/containerized-data-importer/releases/download/${RELEASE}/cdi-operator.yaml ``` diff --git a/content/kubermatic/main/architecture/supported-providers/vmware-cloud-director/_index.en.md b/content/kubermatic/main/architecture/supported-providers/vmware-cloud-director/_index.en.md index b2cf2e8ec..8b9eac150 100644 --- a/content/kubermatic/main/architecture/supported-providers/vmware-cloud-director/_index.en.md +++ b/content/kubermatic/main/architecture/supported-providers/vmware-cloud-director/_index.en.md @@ -10,9 +10,9 @@ weight = 7 Prerequisites for provisioning Kubernetes clusters with the KKP are as follows: 1. An Organizational Virtual Data Center (VDC). -2. `Edge Gateway` is required for connectivity with the internet, network address translation, and network firewall. -3. Organizational Virtual Data Center network is connected to the edge gateway. -4. Ensure that the distributed firewalls are configured in a way that allows traffic flow within and out of the VDC. +1. `Edge Gateway` is required for connectivity with the internet, network address translation, and network firewall. +1. Organizational Virtual Data Center network is connected to the edge gateway. +1. Ensure that the distributed firewalls are configured in a way that allows traffic flow within and out of the VDC. Kubermatic Kubernetes Platform (KKP) integration has been tested with `VMware Cloud Director 10.4`. @@ -57,7 +57,7 @@ spec: CSI driver settings can be configured at the cluster level when creating a cluster using UI or API. The following settings are required: 1. Storage Profile: Used for creating persistent volumes. -2. Filesystem: Filesystem to use for named disks. Allowed values are ext4 or xfs. +1. Filesystem: Filesystem to use for named disks. Allowed values are ext4 or xfs. ## Known Limitations diff --git a/content/kubermatic/main/architecture/supported-providers/vsphere/_index.en.md b/content/kubermatic/main/architecture/supported-providers/vsphere/_index.en.md index 9ae4d15cb..8b0fe217d 100644 --- a/content/kubermatic/main/architecture/supported-providers/vsphere/_index.en.md +++ b/content/kubermatic/main/architecture/supported-providers/vsphere/_index.en.md @@ -17,10 +17,9 @@ When creating worker nodes for a user cluster, the user can specify an existing ### Supported Operating Systems -* Ubuntu 20.04 [ova](https://cloud-images.ubuntu.com/releases/20.04/release/ubuntu-20.04-server-cloudimg-amd64.ova) -* Ubuntu 22.04 [ova](https://cloud-images.ubuntu.com/releases/22.04/release/ubuntu-22.04-server-cloudimg-amd64.ova) -* Flatcar (Stable channel) [ova](https://stable.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vmware_ova.ova) - +- Ubuntu 20.04 [ova](https://cloud-images.ubuntu.com/releases/20.04/release/ubuntu-20.04-server-cloudimg-amd64.ova) +- Ubuntu 22.04 [ova](https://cloud-images.ubuntu.com/releases/22.04/release/ubuntu-22.04-server-cloudimg-amd64.ova) +- Flatcar (Stable channel) [ova](https://stable.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vmware_ova.ova) ### Importing the OVA @@ -55,7 +54,7 @@ are needed to manage VMs, storage, networking and tags. The vSphere provider allows to split permissions into two sets of credentials: 1. Credentials passed to the [vSphere Cloud Controller Manager (CCM) and CSI Storage driver](#cloud-controller-manager-ccm--csi). These credentials are currently inherited into the user cluster and should therefore be individual per user cluster. This type of credentials can be passed when creating a user cluster or setting up a preset. -2. Credentials used for [creating and managing infrastructure](#infrastructure-management) (VMs, tags, networks). This set of credentials is not shared with the user cluster and is kept on the seed cluster. This type of credentials can either be passed in the Seed configuration ([.spec.datacenters.EXAMPLEDC.vpshere.infraManagementUser]({{< ref "../../../references/crds/#datacenterspecvsphere" >}})) for all user clusters created in this datacenter or individually while creating a user cluster. +1. Credentials used for [creating and managing infrastructure](#infrastructure-management) (VMs, tags, networks). This set of credentials is not shared with the user cluster and is kept on the seed cluster. This type of credentials can either be passed in the Seed configuration ([.spec.datacenters.EXAMPLEDC.vpshere.infraManagementUser]({{< ref "../../../references/crds/#datacenterspecvsphere" >}})) for all user clusters created in this datacenter or individually while creating a user cluster. If such a split is not desired, one set of credentials used for both use cases can be provided instead. Providing two sets of credentials is optional. @@ -64,6 +63,7 @@ If such a split is not desired, one set of credentials used for both use cases c The vsphere users has to have to following permissions on the correct resources. Note that if a shared set of credentials is used, roles for both use cases need to be assigned to the technical user which will be used for credentials. #### Cloud Controller Manager (CCM) / CSI + **Note:** Below roles were updated based on [vsphere-storage-plugin-roles] for external CCM which is available from kkp v2.18+ and vsphere v7.0.2+ For the Cloud Controller Manager (CCM) and CSI components used to provide cloud provider and storage integration to the user cluster, @@ -71,23 +71,25 @@ a technical user (e.g. `cust-ccm-cluster`) is needed. The user should be assigne {{< tabs name="CCM/CSI User Roles" >}} {{% tab name="k8c-ccm-storage-vmfolder-propagate" %}} + ##### Role `k8c-ccm-storage-vmfolder-propagate` -* Granted at **VM Folder** and **Template Folder**, propagated -* Permissions - * Virtual machine - * Change Configuration - * Add existing disk - * Add new disk - * Add or remove device - * Remove disk - * Folder - * Create folder - * Delete dolder + +- Granted at **VM Folder** and **Template Folder**, propagated +- Permissions + - Virtual machine + - Change Configuration + - Add existing disk + - Add new disk + - Add or remove device + - Remove disk + - Folder + - Create folder + - Delete dolder --- -``` -$ govc role.ls k8c-ccm-storage-vmfolder-propagate +```bash +govc role.ls k8c-ccm-storage-vmfolder-propagate Folder.Create Folder.Delete VirtualMachine.Config.AddExistingDisk @@ -95,50 +97,61 @@ VirtualMachine.Config.AddNewDisk VirtualMachine.Config.AddRemoveDevice VirtualMachine.Config.RemoveDisk ``` + {{% /tab %}} {{% tab name="k8c-ccm-storage-datastore-propagate" %}} + ##### Role `k8c-ccm-storage-datastore-propagate` -* Granted at **Datastore**, propagated -* Permissions - * Datastore - * Allocate space - * Low level file operations + +- Granted at **Datastore**, propagated +- Permissions + - Datastore + - Allocate space + - Low level file operations --- -``` -$ govc role.ls k8c-ccm-storage-datastore-propagate +```bash +govc role.ls k8c-ccm-storage-datastore-propagate Datastore.AllocateSpace Datastore.FileManagement ``` + {{% /tab %}} {{% tab name="k8c-ccm-storage-cns" %}} + ##### Role `k8c-ccm-storage-cns` -* Granted at **vcenter** level, not propagated -* Permissions - * CNS - * Searchable + +- Granted at **vcenter** level, not propagated +- Permissions + - CNS + - Searchable + --- -``` -$ govc role.ls k8c-ccm-storage-cns +```bash +govc role.ls k8c-ccm-storage-cns Cns.Searchable ``` + {{% /tab %}} {{% tab name="Read-only (predefined)" %}} + ##### Role `Read-only` (predefined) -* Granted at ..., **not** propagated - * Datacenter - * All hosts where the nodes VMs reside. + +- Granted at ..., **not** propagated + - Datacenter + - All hosts where the nodes VMs reside. --- -``` -$ govc role.ls ReadOnly +```bash +govc role.ls ReadOnly System.Anonymous System.Read System.View ``` + {{% /tab %}} {{< /tabs >}} @@ -148,33 +161,36 @@ For infrastructure (e.g. VMs, tags and networking) provisioning actions of KKP i {{< tabs name="Infrastructure Management" >}} {{% tab name="k8c-user-vcenter" %}} + ##### Role `k8c-user-vcenter` -* Granted at **vcenter** level, **not** propagated -* Needed to customize VM during provisioning -* Permissions - * CNS - * Searchable - * Profile-driven storage - * Profile-driven storage view - * VirtualMachine - * Provisioning - * Modify customization specification - * Read customization specifications - * vSphere Tagging - * Assign or Unassign vSphere Tag - * Assign or Unassign vSphere Tag on Object - * Create vSphere Tag - * Create vSphere Tag Category - * Delete vSphere Tag - * Delete vSphere Tag Category - * Edit vSphere Tag - * Edit vSphere Tag Category - * Modify UsedBy Field For Category - * Modify UsedBy Field For Tag + +- Granted at **vcenter** level, **not** propagated +- Needed to customize VM during provisioning +- Permissions + - CNS + - Searchable + - Profile-driven storage + - Profile-driven storage view + - VirtualMachine + - Provisioning + - Modify customization specification + - Read customization specifications + - vSphere Tagging + - Assign or Unassign vSphere Tag + - Assign or Unassign vSphere Tag on Object + - Create vSphere Tag + - Create vSphere Tag Category + - Delete vSphere Tag + - Delete vSphere Tag Category + - Edit vSphere Tag + - Edit vSphere Tag Category + - Modify UsedBy Field For Category + - Modify UsedBy Field For Tag + --- -``` -$ govc role.ls k8c-user-vcenter +```bash +govc role.ls k8c-user-vcenter Cns.Searchable InventoryService.Tagging.AttachTag InventoryService.Tagging.CreateCategory @@ -193,34 +209,37 @@ System.View VirtualMachine.Provisioning.ModifyCustSpecs VirtualMachine.Provisioning.ReadCustSpecs ``` + {{% /tab %}} {{% tab name="k8c-user-datacenter" %}} + ##### Role `k8c-user-datacenter` -* Granted at **datacenter** level, **not** propagated -* Needed for cloning the template VM (obviously this is not done in a folder at this time) -* Permissions - * Datastore - * Allocate space - * Browse datastore - * Low level file operations - * Remove file - * vApp - * vApp application configuration - * vApp instance configuration - * Virtual Machine - * Change Configuration - * Change CPU count - * Change Memory - * Change Settings - * Edit Inventory - * Create from existing - * vSphere Tagging - * Assign or Unassign vSphere Tag on Object + +- Granted at **datacenter** level, **not** propagated +- Needed for cloning the template VM (obviously this is not done in a folder at this time) +- Permissions + - Datastore + - Allocate space + - Browse datastore + - Low level file operations + - Remove file + - vApp + - vApp application configuration + - vApp instance configuration + - Virtual Machine + - Change Configuration + - Change CPU count + - Change Memory + - Change Settings + - Edit Inventory + - Create from existing + - vSphere Tagging + - Assign or Unassign vSphere Tag on Object --- -``` -$ govc role.ls k8c-user-datacenter +```bash +govc role.ls k8c-user-datacenter Datastore.AllocateSpace Datastore.Browse Datastore.DeleteFile @@ -236,40 +255,44 @@ VirtualMachine.Config.Memory VirtualMachine.Config.Settings VirtualMachine.Inventory.CreateFromExisting ``` + {{% /tab %}} {{% tab name="k8c-user-cluster-propagate" %}} -* Role `k8c-user-cluster-propagate` - * Granted at **cluster** level, propagated - * Needed for upload of `cloud-init.iso` (Ubuntu) or defining the Ignition config into Guestinfo (CoreOS) - * Permissions - * AutoDeploy - * Rule - * Create - * Delete - * Edit - * Folder - * Create folder - * Host - * Configuration - * Storage partition configuration - * System Management - * Local operations - * Reconfigure virtual machine - * Inventory - * Modify cluster - * Resource - * Assign virtual machine to resource pool - * Migrate powered off virtual machine - * Migrate powered on virtual machine - * vApp - * vApp application configuration - * vApp instance configuration - * vSphere Tagging - * Assign or Unassign vSphere Tag on Object + +##### Role `k8c-user-cluster-propagate` + +- Granted at **cluster** level, propagated +- Needed for upload of `cloud-init.iso` (Ubuntu) or defining the Ignition config into Guestinfo (CoreOS) +- Permissions + - AutoDeploy + - Rule + - Create + - Delete + - Edit + - Folder + - Create folder + - Host + - Configuration + - Storage partition configuration + - System Management + - Local operations + - Reconfigure virtual machine + - Inventory + - Modify cluster + - Resource + - Assign virtual machine to resource pool + - Migrate powered off virtual machine + - Migrate powered on virtual machine + - vApp + - vApp application configuration + - vApp instance configuration + - vSphere Tagging + - Assign or Unassign vSphere Tag on Object + --- -``` -$ govc role.ls k8c-user-cluster-propagate +```bash +govc role.ls k8c-user-cluster-propagate AutoDeploy.Rule.Create AutoDeploy.Rule.Delete AutoDeploy.Rule.Edit @@ -285,19 +308,23 @@ Resource.HotMigrate VApp.ApplicationConfig VApp.InstanceConfig ``` + {{% /tab %}} {{% tab name="k8c-network-attach" %}} + ##### Role `k8c-network-attach` -* Granted for each network that should be used (distributed switch + network) -* Permissions - * Network - * Assign network - * vSphere Tagging - * Assign or Unassign vSphere Tag on Object + +- Granted for each network that should be used (distributed switch + network) +- Permissions + - Network + - Assign network + - vSphere Tagging + - Assign or Unassign vSphere Tag on Object + --- -``` -$ govc role.ls k8c-network-attach +```bash +govc role.ls k8c-network-attach InventoryService.Tagging.ObjectAttachable Network.Assign System.Anonymous @@ -307,27 +334,30 @@ System.View {{% /tab %}} {{% tab name="k8c-user-datastore-propagate" %}} + ##### Role `k8c-user-datastore-propagate` -* Granted at **datastore / datastore cluster** level, propagated -* Also provides permission to create vSphere tags for a dedicated category, which are required by KKP seed controller manager -* Please note below points about tagging. + +- Granted at **datastore / datastore cluster** level, propagated +- Also provides permission to create vSphere tags for a dedicated category, which are required by KKP seed controller manager +- Please note below points about tagging. **Note**: If a category id is assigned to a user cluster, KKP would claim the ownership of any tags it creates. KKP would try to delete tags assigned to the cluster upon cluster deletion. Thus, make sure that the assigned category isn't shared across other lingering resources. **Note**: Tags can be attached to machine deployments regardless if the tags are created via KKP or not. If a tag was not attached to the user cluster, machine controller will only detach it. -* Permissions - * Datastore - * Allocate space - * Browse datastore - * Low level file operations - * vSphere Tagging - * Assign or Unassign vSphere Tag on an Object + +- Permissions + - Datastore + - Allocate space + - Browse datastore + - Low level file operations + - vSphere Tagging + - Assign or Unassign vSphere Tag on an Object --- -``` -$ govc role.ls k8c-user-datastore-propagate +```bash +govc role.ls k8c-user-datastore-propagate Datastore.AllocateSpace Datastore.Browse Datastore.FileManagement @@ -336,34 +366,37 @@ System.Anonymous System.Read System.View ``` + {{% /tab %}} {{% tab name="k8c-user-folder-propagate" %}} + ##### Role `k8c-user-folder-propagate` -* Granted at **VM Folder** and **Template Folder** level, propagated -* Needed for managing the node VMs -* Permissions - * Folder - * Create folder - * Delete folder - * Global - * Set custom attribute - * Virtual machine - * Change Configuration - * Edit Inventory - * Guest operations - * Interaction - * Provisioning - * Snapshot management - * vSphere Tagging - * Assign or Unassign vSphere Tag - * Assign or Unassign vSphere Tag on an Object - * Create vSphere Tag - * Delete vSphere Tag + +- Granted at **VM Folder** and **Template Folder** level, propagated +- Needed for managing the node VMs +- Permissions + - Folder + - Create folder + - Delete folder + - Global + - Set custom attribute + - Virtual machine + - Change Configuration + - Edit Inventory + - Guest operations + - Interaction + - Provisioning + - Snapshot management + - vSphere Tagging + - Assign or Unassign vSphere Tag + - Assign or Unassign vSphere Tag on an Object + - Create vSphere Tag + - Delete vSphere Tag --- -``` -$ govc role.ls k8c-user-folder-propagate +```bash +govc role.ls k8c-user-folder-propagate Folder.Create Folder.Delete Global.SetCustomField @@ -459,20 +492,15 @@ VirtualMachine.State.RenameSnapshot VirtualMachine.State.RevertToSnapshot ``` + {{% /tab %}} {{< /tabs >}} - - - - - - The described permissions have been tested with vSphere 8.0.2 and might be different for other vSphere versions. ## Datastores and Datastore Clusters @@ -483,8 +511,8 @@ shared management interface. In KKP *Datastores* are used for two purposes: -* Storing the VMs files for the worker nodes of vSphere user clusters. -* Generating the vSphere cloud provider storage configuration for user clusters. +- Storing the VMs files for the worker nodes of vSphere user clusters. +- Generating the vSphere cloud provider storage configuration for user clusters. In particular to provide the `default-datastore` value, that is the default datastore for dynamic volume provisioning. @@ -494,24 +522,20 @@ specified directly in [vSphere cloud configuration][vsphere-cloud-config]. There are three places where Datastores and Datastore Clusters can be configured in KKP: -* At datacenter level (configured in the [Seed CRD]({{< ref "../../../tutorials-howtos/project-and-cluster-management/seed-cluster" >}}))) +- At datacenter level (configured in the [Seed CRD]({{< ref "../../../tutorials-howtos/project-and-cluster-management/seed-cluster" >}}))) it is possible to specify the default *Datastore* that will be used for user clusters dynamic volume provisioning and workers VMs placement in case no *Datastore* or *Datastore Cluster* is specified at cluster level. -* At *Cluster* level it is possible to provide either a *Datastore* or a +- At *Cluster* level it is possible to provide either a *Datastore* or a *Datastore Cluster* respectively with `spec.cloud.vsphere.datastore` and `spec.cloud.vsphere.datastoreCluster` fields. -* It is possible to specify *Datastore* or *Datastore Clusters* in a preset +- It is possible to specify *Datastore* or *Datastore Clusters* in a preset than is later used to create a user cluster from it. These settings can also be configured as part of the "Advanced Settings" step when creating a user cluster from the [KKP dashboard]({{< ref "../../../tutorials-howtos/project-and-cluster-management/#create-cluster" >}}). -[vsphere-cloud-config]: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.0/vmware-vsphere-csp-getting-started/GUID-BFF39F1D-F70A-4360-ABC9-85BDAFBE8864.html?hWord=N4IghgNiBcIMYQK4GcAuBTATgWgJYBMACAYQGUBJEAXyA -[vsphere-storage-plugin-roles]: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.0/vmware-vsphere-csp-getting-started/GUID-0AB6E692-AA47-4B6A-8CEA-38B754E16567.html#GUID-043ACF65-9E0B-475C-A507-BBBE2579AA58__GUID-E51466CB-F1EA-4AD7-A541-F22CDC6DE881 - - ## Known Issues ### Volume Detach Bug @@ -520,24 +544,24 @@ After a node is powered-off, the Kubernetes vSphere driver doesn't detach disks Upstream Kubernetes has been working on the issue for a long time now and tracking it under the following tickets: -* -* -* -* -* +- +- +- +- +- ## Internal Kubernetes endpoints unreachable ### Symptoms -* Unable to perform CRUD operations on resources governed by webhooks (e.g. ValidatingWebhookConfiguration, MutatingWebhookConfiguration, etc.). The following error is observed: +- Unable to perform CRUD operations on resources governed by webhooks (e.g. ValidatingWebhookConfiguration, MutatingWebhookConfiguration, etc.). The following error is observed: -```sh +```bash Internal error occurred: failed calling webhook "webhook-name": failed to call webhook: Post "https://webhook-service-name.namespace.svc/webhook-endpoint": context deadline exceeded ``` -* Unable to reach internal Kubernetes endpoints from pods/nodes. -* ICMP is working but TCP/UDP is not. +- Unable to reach internal Kubernetes endpoints from pods/nodes. +- ICMP is working but TCP/UDP is not. ### Cause @@ -545,7 +569,7 @@ On recent enough VMware hardware compatibility version (i.e >=15 or maybe >=14), ### Solution -```sh +```bash sudo ethtool -K ens192 tx-udp_tnl-segmentation off sudo ethtool -K ens192 tx-udp_tnl-csum-segmentation off ``` @@ -554,10 +578,13 @@ These flags are related to the hardware segmentation offload done by the vSphere We have two options to configure these flags for KKP installations: -* When configuring the VM template, set these flags as well. -* Create a [custom Operating System Profile]({{< ref "../../../tutorials-howtos/operating-system-manager/usage#custom-operatingsystemprofiles" >}}) and configure the flags there. +- When configuring the VM template, set these flags as well. +- Create a [custom Operating System Profile]({{< ref "../../../tutorials-howtos/operating-system-manager/usage#custom-operatingsystemprofiles" >}}) and configure the flags there. ### References -* -* +- +- + +[vsphere-cloud-config]: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.0/vmware-vsphere-csp-getting-started/GUID-BFF39F1D-F70A-4360-ABC9-85BDAFBE8864.html?hWord=N4IghgNiBcIMYQK4GcAuBTATgWgJYBMACAYQGUBJEAXyA +[vsphere-storage-plugin-roles]: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.0/vmware-vsphere-csp-getting-started/GUID-0AB6E692-AA47-4B6A-8CEA-38B754E16567.html#GUID-043ACF65-9E0B-475C-A507-BBBE2579AA58__GUID-E51466CB-F1EA-4AD7-A541-F22CDC6DE881 diff --git a/content/kubermatic/main/cheat-sheets/etcd/etcd-launcher/_index.en.md b/content/kubermatic/main/cheat-sheets/etcd/etcd-launcher/_index.en.md index a01669d2a..2102b076b 100644 --- a/content/kubermatic/main/cheat-sheets/etcd/etcd-launcher/_index.en.md +++ b/content/kubermatic/main/cheat-sheets/etcd/etcd-launcher/_index.en.md @@ -12,12 +12,12 @@ API and flexibly control how the user cluster etcd ring is started. - **v2.19.0**: Peer TLS connections have been added to etcd-launcher. - **v2.22.0**: `EtcdLauncher` feature gate is enabled by default in `KubermaticConfiguration`. - ## Comparison to static etcd Prior to v2.15.0, user cluster etcd ring was based on a static StatefulSet with 3 pods running the etcd ring nodes. With `etcd-launcher`, the etcd `StatefulSet` is updated to include: + - An init container that is responsible for copying the etcd-launcher into the main etcd pod. - Additional environment variables used by the etcd-launcher and etcdctl binary for simpler operations. - A liveness probe to improve stability. @@ -58,6 +58,7 @@ spec: If the feature gate was disabled explicitly, etcd Launcher can still be configured for individual user clusters. ### Enabling etcd Launcher + In this mode, the feature is only enabled for a specific user cluster. This can be done by editing the object cluster and enabling the feature gate for `etcdLauncher`: diff --git a/content/kubermatic/main/cheat-sheets/kubelogin-plugin/_index.en.md b/content/kubermatic/main/cheat-sheets/kubelogin-plugin/_index.en.md deleted file mode 100644 index 65a4c6769..000000000 --- a/content/kubermatic/main/cheat-sheets/kubelogin-plugin/_index.en.md +++ /dev/null @@ -1,125 +0,0 @@ -+++ -title = "Kubelogin Plugin Usage (kubectl oidc-login)" -date = 2018-08-17T12:07:15+02:00 -weight = 30 -+++ - -In this document, we will describe the using [kubelogin plugin](https://github.com/int128/kubelogin) to access the KKP user clusters. - -`kubelogin` is a kubectl plugin for Kubernetes OpenID Connect (OIDC) authentication, also known as `kubectl oidc-login`. - -## Installation - -Install the latest release from Homebrew, Krew, Chocolatey or GitHub Releases. - -```bash -# Homebrew (macOS and Linux) -brew install kubelogin - -# Krew (macOS, Linux, Windows and ARM) -kubectl krew install oidc-login - -# Chocolatey (Windows) -choco install kubelogin -``` - -## Update KKP Settings - -When the plugin is executed, it starts the local server at port 8000 or 18000 by default. You need to register the following redirect URIs to the provider: - -```text -http://localhost:8000 -http://localhost:18000 (used if port 8000 is already in use) -``` - -To achieve this, below lines need to be added to the issuer configuration (most likely `kubermaticIssuer`): - -```yaml -## kubermatic values.yaml - - id: kubermaticIssuer - name: KubermaticIssuer - secret: xxx - RedirectURIs: - - https://kkp.example.com/api/v1/kubeconfig - - https://kkp.example.com/api/v2/dashboard/login - - https://kkp.example.com/api/v2/kubeconfig/secret - - http://localhost:8000 # -> add this line - - http://localhost:18000 # -> add this line -``` - -You need to add the last 2 lines, and run the `kubermatic-installer`. - -## Usage with KKP - -Currently, KKP allows you to download a kubeconfig file proxied by the OIDC provider, when the [OIDC Kubeconfig](https://docs.kubermatic.com/kubermatic/v2.27/tutorials-howtos/administration/admin-panel/interface/#enable-oidc-kubeconfig) is enabled. - -In order to use kubeconfig plugin, you can download that file and update it to use `kubectl oidc-login`. - -The downloaded file would look like this: - -```yaml -apiVersion: v1 -kind: Config -... -users: -- name: user@example.com - user: - auth-provider: - config: - client-id: kubermaticIssuer - client-secret: xxx - id-token: xxx - idp-issuer-url: https://kkp.example.com/dex - refresh-token: xxx - name: oidc -``` - -It needs to be converted this way: - -```yaml -apiVersion: v1 -kind: Config -... -users: - - name: user@example.com - user: - exec: - apiVersion: client.authentication.k8s.io/v1 - args: - - oidc-login - - get-token - - --oidc-issuer-url=https://kkp.example.com/dex - - --oidc-client-id=kubermaticIssuer - - --oidc-client-secret=xxx - - --oidc-extra-scope=email - command: kubectl - env: null - interactiveMode: Never - provideClusterInfo: false -``` - -This can be achieved by running [yq](https://github.com/mikefarah/yq): - -```bash -cat downloaded_kubeconfig | yq ' - .users[0].user as $old | - .users[0].user = { - "exec": { - "apiVersion": "client.authentication.k8s.io/v1", - "args": [ - "oidc-login", - "get-token", - "--oidc-issuer-url=\($old[\"auth-provider\"].config[\"idp-issuer-url\"])", - "--oidc-client-id=\($old[\"auth-provider\"].config[\"client-id\"])", - "--oidc-client-secret=\($old[\"auth-provider\"].config[\"client-secret\"])", - "--oidc-extra-scope=email" - ], - "command": "kubectl", - "env": null, - "interactiveMode": "Never", - "provideClusterInfo": false - } - }' > kubelogin_enabled_kubeconfig -``` - -After this step, you can export `KUBECONFIG` variable, and continue with the `kubectl` commands. For the first command, a browser window will be opened to authenticate on KKP. The OIDC token will be stored under the `~/.kube/cache/oidc-login` directory. When the token is expired, same authentication process will be executed again. diff --git a/content/kubermatic/main/cheat-sheets/vsphere-cluster-id/_index.en.md b/content/kubermatic/main/cheat-sheets/vsphere-cluster-id/_index.en.md index a1051f238..88a0411ab 100644 --- a/content/kubermatic/main/cheat-sheets/vsphere-cluster-id/_index.en.md +++ b/content/kubermatic/main/cheat-sheets/vsphere-cluster-id/_index.en.md @@ -43,15 +43,16 @@ The following steps should be done in the **seed cluster** for each vSphere user cluster. + First, get all user clusters and filter vSphere user clusters using `grep`: -```shell +```bash kubectl --kubeconfig= get clusters | grep vsphere ``` You should get output similar to the following: -``` +```bash NAME HUMANREADABLENAME OWNER VERSION PROVIDER DATACENTER PHASE PAUSED AGE s8kkpcccfq focused-spence test@kubermatic.com 1.23.8 vsphere your-dc Running false 16h ``` @@ -60,14 +61,14 @@ s8kkpcccfq focused-spence test@kubermatic.com 1.23.8 vspher `s8kkpcccfq`) and inspect the vSphere CSI cloud-config to check value of the `cluster-id` field. -```shell +```bash kubectl --kubeconfig= get configmap -n cluster- cloud-config-csi -o yaml ``` The following excerpt shows the most important part of the output. You need to locate the `cluster-id` field under the `[Global]` group. -``` +```yaml apiVersion: v1 data: config: |+ @@ -102,8 +103,9 @@ The second approach assumes changing `cluster-id` without stopping the CSI driver. This approach is **not documented** by VMware, however, it worked in our environment. In this case, there's no significant downtime. Since this approach is not documented by VMware, we **heavily advise** that you: - - follow the first approach - - if you decide to follow this approach, make sure to extensively test it in + + * follow the first approach + * if you decide to follow this approach, make sure to extensively test it in a staging/testing environment before applying it in the production ### Approach 1 (recommended) @@ -141,7 +143,7 @@ user cluster. First, pause affected user clusters by running the following command in the **seed cluster** for **each affected** user cluster: -```shell +```bash clusterPatch='{"spec":{"pause":true,"features":{"vsphereCSIClusterID":true}}}' kubectl --kubeconfig= patch cluster --type=merge -p $clusterPatch ... @@ -151,7 +153,7 @@ kubectl --kubeconfig= patch cluster --type=merge Once done, scale down the vSphere CSI driver deployment in **each affected user cluster**: -```shell +```bash kubectl --kubeconfig= scale deployment -n kube-system vsphere-csi-controller --replicas=0 ... kubectl --kubeconfig= scale deployment -n kube-system vsphere-csi-controller --replicas=0 @@ -190,13 +192,13 @@ config and update the Secret. The following command reads the config stored in the Secret, decodes it and then saves it to a file called `cloud-config-csi`: -```shell +```bash kubectl --kubeconfig= get secret -n kube-system cloud-config-csi -o=jsonpath='{.data.config}' | base64 -d > cloud-config-csi ``` Open the `cloud-config-csi` file in some text editor: -```shell +```bash vi cloud-config-csi ``` @@ -205,7 +207,7 @@ locate the `cluster-id` field under the `[Global]` group, and replace `` with the name of your user cluster (e.g. `s8kkpcccfq`). -``` +```yaml [Global] user = "username" password = "password" @@ -218,13 +220,13 @@ cluster-id = "" Save the file, exit your editor, and then encode the file: -```shell +```bash cat cloud-config-csi | base64 -w0 ``` Copy the encoded output and run the following `kubectl edit` command: -```shell +```bash kubectl --kubeconfig= edit secret -n kube-system cloud-config-csi ``` @@ -268,7 +270,7 @@ the `cluster-id` value to the name of the user cluster. Run the following `kubectl edit` command. Replace `` in the command with the name of user cluster (e.g. `s8kkpcccfq`). -```shell +```bash kubectl --kubeconfig= edit configmap -n cluster- cloud-config-csi ``` @@ -303,7 +305,7 @@ to vSphere to de-register all volumes. cluster. The `vsphereCSIClusterID` feature flag enabled at the beginning ensures that your `cluster-id` changes are persisted once the clusters are unpaused. -```shell +```bash clusterPatch='{"spec":{"pause":false}}' kubectl patch cluster --type=merge -p $clusterPatch ... @@ -351,7 +353,7 @@ Start with patching the Cluster object for **each affected** user clusters to enable the `vsphereCSIClusterID` feature flag. Enabling this feature flag automatically changes the `cluster-id` value to the cluster name. -```shell +```bash clusterPatch='{"spec":{"features":{"vsphereCSIClusterID":true}}}' kubectl patch cluster --type=merge -p $clusterPatch ... @@ -375,7 +377,7 @@ the seed cluster **AND** the `cloud-config-csi` Secret in the user cluster the ConfigMap in the user cluster namespace in seed cluster, and the second commands reads the config from the Secret in the user cluster. -```shell +```bash kubectl --kubeconfig= get configmap -n cluster- cloud-config-csi kubectl --kubeconfig= get secret -n kube-system cloud-config-csi -o jsonpath='{.data.config}' | base64 -d ``` @@ -383,7 +385,7 @@ kubectl --kubeconfig= get secret -n kube-system cloud-c Both the Secret and the ConfigMap should have config with `cluster-id` set to the user cluster name (e.g. `s8kkpcccfq`). -``` +```yaml [Global] user = "username" password = "password" @@ -402,7 +404,7 @@ to the next section. Finally, restart the vSphere CSI controller pods in the **each affected user cluster** to put those changes in the effect: -```shell +```bash kubectl --kubeconfig= delete pods -n kube-system -l app=vsphere-csi-controller ... kubectl --kubeconfig= delete pods -n kube-system -l app=vsphere-csi-controller diff --git a/content/kubermatic/main/data/addondata.go b/content/kubermatic/main/data/addondata.go index 228e25110..c4ee3ebac 100644 --- a/content/kubermatic/main/data/addondata.go +++ b/content/kubermatic/main/data/addondata.go @@ -38,7 +38,7 @@ type ClusterData struct { // CloudProviderName is the name of the cloud provider used, one of // "alibaba", "aws", "azure", "bringyourown", "digitalocean", "gcp", - // "hetzner", "kubevirt", "openstack", "packet", "vsphere" depending on + // "hetzner", "kubevirt", "openstack", "vsphere" depending on // the configured datacenters. CloudProviderName string // Version is the exact current cluster version. @@ -151,7 +151,6 @@ type Credentials struct { GCP GCPCredentials Hetzner HetznerCredentials Openstack OpenstackCredentials - Packet PacketCredentials Kubevirt KubevirtCredentials VSphere VSphereCredentials Alibaba AlibabaCredentials @@ -206,11 +205,6 @@ type OpenstackCredentials struct { Token string } -type PacketCredentials struct { - APIKey string - ProjectID string -} - type KubevirtCredentials struct { // Admin kubeconfig for KubeVirt cluster KubeConfig string diff --git a/content/kubermatic/main/data/applicationdata.go b/content/kubermatic/main/data/applicationdata.go index a709dd6ce..2ebbd7d42 100644 --- a/content/kubermatic/main/data/applicationdata.go +++ b/content/kubermatic/main/data/applicationdata.go @@ -20,6 +20,12 @@ type ClusterData struct { MajorMinorVersion string // AutoscalerVersion is the tag which should be used for the cluster autoscaler AutoscalerVersion string + // Annotations holds arbitrary non-identifying metadata attached to the cluster. + // Transferred from the Kubermatic cluster object. + Annotations map[string]string + // Labels are key-value pairs used to organize, categorize, and select clusters. + // Transferred from the Kubermatic cluster object. + Labels map[string]string } // ClusterAddress stores access and address information of a cluster. diff --git a/content/kubermatic/main/data/kubermaticConfiguration.ce.yaml b/content/kubermatic/main/data/kubermaticConfiguration.ce.yaml index 46e5b3fa6..059be54ee 100644 --- a/content/kubermatic/main/data/kubermaticConfiguration.ce.yaml +++ b/content/kubermatic/main/data/kubermaticConfiguration.ce.yaml @@ -30,7 +30,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -49,6 +49,72 @@ spec: memory: 150Mi # Applications contains configuration for Application settings. applications: + # CatalogManager configures the Application Catalog CatalogManager, which is responsible for managing ApplicationDefinitions + # in the cluster from specified OCI registries. + # Note: The Application Catalog CatalogManager requires its feature flag to be enabled as it is currently in beta. + catalogManager: + # Image configures the container image for the application-catalog manager. + image: + # Repository is used to override the application-catalog manager image repository. + # The default value is "quay.io/kubermatic/application-catalog-manager" + repository: quay.io/kubermatic/application-catalog-manager + # Tag is used to override the application-catalog manager image tag. + tag: v0.1.0 + # Limit defines filtering criteria for ApplicationDefinitions to be reconciled from the OCI registry. + # When undefined, all ApplicationDefinitions from the registry are pulled and reconciled. + # When defined, only ApplicationDefinitions matching the specified criteria are processed. + limit: + # MetadataSelector defines criteria for selecting ApplicationDefinitions based on their metadata attributes. + # For example, to select ApplicationDefinitions with a specific support tier (e.g., 'gold'), + # specify that tier here. + # When multiple tiers are specified, the Application Catalog Manager uses additive logic + # to determine which ApplicationDefinitions to retrieve from the OCI registry. + metadataSelector: + # Tiers specifies the support tiers to filter ApplicationDefinitions. + # ApplicationDefinitions matching any of the specified tiers will be selected. + tiers: null + # NameSelector defines criteria for selecting ApplicationDefinitions by name. + # Each name must correspond to an ApplicationDefinition's `metadata.name` field. + # When multiple names are specified, the Application Catalog Manager uses additive logic + # to retrieve all matching ApplicationDefinitions from the OCI registry. + # Example: Specifying ['nginx', 'cert-manager'] will retrieve only those specific ApplicationDefinitions. + nameSelector: null + # LogLevel specifies the logging verbosity level for the Application Catalog Manager. + logLevel: "" + # ReconciliationInterval is the interval at which application-catalog manager reconcile ApplicationDefinitions. + # By default, ApplicationsDefinitions are reconciled at every 10 minutes. + # Setting a value equal to 0 disables the force reconciliation of the default Application Catalog. + reconciliationInterval: 0s + # RegistrySettings configures the OCI registry from which the Application Catalog Manager + # retrieves ApplicationDefinition manifests. + registrySettings: + # Credentials optionally references a secret containing Helm registry authentication credentials. + # Either username/password or registryConfigFile can be specified, but not both. + credentials: null + # RegistryURL specifies the OCI registry URL where ApplicationDefinitions are stored. + # Example: oci://localhost:5000/myrepo + registryURL: quay.io/kubermatic/applications + # Tag specifies the version tag for ApplicationDefinitions in the OCI registry. + # Example: v1.0.0 + tag: 7fd8340dc8f0b3f6aae519301a1c9f8aff34d939 + # Resources describes the requested and maximum allowed CPU/memory usage. + resources: + # Claims lists the names of resources, defined in spec.resourceClaims, + # that are used by this container. + + # This field depends on the + # DynamicResourceAllocation feature gate. + + # This field is immutable. It can only be set for containers. + claims: null + # Limits describes the maximum amount of compute resources allowed. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + limits: null + # Requests describes the minimum amount of compute resources required. + # If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + # otherwise to an implementation-defined value. Requests cannot exceed Limits. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + requests: null # DefaultApplicationCatalog contains configuration for the default application catalog. defaultApplicationCatalog: # Applications is a list of application definition names that should be installed in the master cluster. @@ -152,7 +218,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -279,7 +345,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -317,7 +383,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -496,7 +562,7 @@ spec: # Versions configures the available and default Kubernetes versions and updates. versions: # Default is the default version to offer users. - default: v1.32.7 + default: v1.33.5 # ExternalClusters contains the available and default Kubernetes versions and updates for ExternalClusters. externalClusters: aks: @@ -507,9 +573,6 @@ spec: # Versions lists the available versions. versions: - v1.31 - - v1.30 - - v1.29 - - v1.28 eks: # Default is the default version to offer users. default: v1.31 @@ -518,9 +581,6 @@ spec: # Versions lists the available versions. versions: - v1.31 - - v1.30 - - v1.29 - - v1.28 # ProviderIncompatibilities lists all the Kubernetes version incompatibilities providerIncompatibilities: - # Condition is the cluster or datacenter condition that must be met to block a specific version @@ -549,16 +609,10 @@ spec: # for the worker nodes of all matching user clusters. automaticNodeUpdate: false # From is the version from which an update is allowed. Wildcards are allowed, e.g. "1.18.*". - from: 1.29.* + from: 1.31.* # To is the version to which an update is allowed. # Must be a valid version if `automatic` is set to true, e.g. "1.20.13". # Can be a wildcard otherwise, e.g. "1.20.*". - to: 1.30.* - - from: 1.30.* - to: 1.30.* - - from: 1.30.* - to: 1.31.* - - from: 1.31.* to: 1.31.* - from: 1.31.* to: 1.32.* @@ -568,27 +622,30 @@ spec: to: 1.33.* - from: 1.33.* to: 1.33.* + - from: 1.33.* + to: 1.34.* + - from: 1.34.* + to: 1.34.* # Versions lists the available versions. versions: - - v1.30.5 - - v1.30.9 - - v1.30.11 - - v1.30.12 - - v1.30.14 - v1.31.1 - v1.31.5 - v1.31.7 - v1.31.8 - v1.31.10 - v1.31.11 + - v1.31.13 - v1.32.1 - v1.32.3 - v1.32.4 - v1.32.6 - v1.32.7 + - v1.32.9 - v1.33.0 - v1.33.2 - v1.33.3 + - v1.33.5 + - v1.34.1 # VerticalPodAutoscaler configures the Kubernetes VPA integration. verticalPodAutoscaler: admissionController: @@ -599,7 +656,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -624,7 +681,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -649,7 +706,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -682,7 +739,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. diff --git a/content/kubermatic/main/data/kubermaticConfiguration.ee.yaml b/content/kubermatic/main/data/kubermaticConfiguration.ee.yaml index 4789626a9..3e85ad880 100644 --- a/content/kubermatic/main/data/kubermaticConfiguration.ee.yaml +++ b/content/kubermatic/main/data/kubermaticConfiguration.ee.yaml @@ -30,7 +30,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -49,6 +49,72 @@ spec: memory: 150Mi # Applications contains configuration for Application settings. applications: + # CatalogManager configures the Application Catalog CatalogManager, which is responsible for managing ApplicationDefinitions + # in the cluster from specified OCI registries. + # Note: The Application Catalog CatalogManager requires its feature flag to be enabled as it is currently in beta. + catalogManager: + # Image configures the container image for the application-catalog manager. + image: + # Repository is used to override the application-catalog manager image repository. + # The default value is "quay.io/kubermatic/application-catalog-manager" + repository: quay.io/kubermatic/application-catalog-manager + # Tag is used to override the application-catalog manager image tag. + tag: v0.1.0 + # Limit defines filtering criteria for ApplicationDefinitions to be reconciled from the OCI registry. + # When undefined, all ApplicationDefinitions from the registry are pulled and reconciled. + # When defined, only ApplicationDefinitions matching the specified criteria are processed. + limit: + # MetadataSelector defines criteria for selecting ApplicationDefinitions based on their metadata attributes. + # For example, to select ApplicationDefinitions with a specific support tier (e.g., 'gold'), + # specify that tier here. + # When multiple tiers are specified, the Application Catalog Manager uses additive logic + # to determine which ApplicationDefinitions to retrieve from the OCI registry. + metadataSelector: + # Tiers specifies the support tiers to filter ApplicationDefinitions. + # ApplicationDefinitions matching any of the specified tiers will be selected. + tiers: null + # NameSelector defines criteria for selecting ApplicationDefinitions by name. + # Each name must correspond to an ApplicationDefinition's `metadata.name` field. + # When multiple names are specified, the Application Catalog Manager uses additive logic + # to retrieve all matching ApplicationDefinitions from the OCI registry. + # Example: Specifying ['nginx', 'cert-manager'] will retrieve only those specific ApplicationDefinitions. + nameSelector: null + # LogLevel specifies the logging verbosity level for the Application Catalog Manager. + logLevel: "" + # ReconciliationInterval is the interval at which application-catalog manager reconcile ApplicationDefinitions. + # By default, ApplicationsDefinitions are reconciled at every 10 minutes. + # Setting a value equal to 0 disables the force reconciliation of the default Application Catalog. + reconciliationInterval: 0s + # RegistrySettings configures the OCI registry from which the Application Catalog Manager + # retrieves ApplicationDefinition manifests. + registrySettings: + # Credentials optionally references a secret containing Helm registry authentication credentials. + # Either username/password or registryConfigFile can be specified, but not both. + credentials: null + # RegistryURL specifies the OCI registry URL where ApplicationDefinitions are stored. + # Example: oci://localhost:5000/myrepo + registryURL: quay.io/kubermatic/applications + # Tag specifies the version tag for ApplicationDefinitions in the OCI registry. + # Example: v1.0.0 + tag: 7fd8340dc8f0b3f6aae519301a1c9f8aff34d939 + # Resources describes the requested and maximum allowed CPU/memory usage. + resources: + # Claims lists the names of resources, defined in spec.resourceClaims, + # that are used by this container. + + # This field depends on the + # DynamicResourceAllocation feature gate. + + # This field is immutable. It can only be set for containers. + claims: null + # Limits describes the maximum amount of compute resources allowed. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + limits: null + # Requests describes the minimum amount of compute resources required. + # If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + # otherwise to an implementation-defined value. Requests cannot exceed Limits. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + requests: null # DefaultApplicationCatalog contains configuration for the default application catalog. defaultApplicationCatalog: # Applications is a list of application definition names that should be installed in the master cluster. @@ -152,7 +218,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -279,7 +345,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -317,7 +383,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -496,7 +562,7 @@ spec: # Versions configures the available and default Kubernetes versions and updates. versions: # Default is the default version to offer users. - default: v1.32.7 + default: v1.33.5 # ExternalClusters contains the available and default Kubernetes versions and updates for ExternalClusters. externalClusters: aks: @@ -507,9 +573,6 @@ spec: # Versions lists the available versions. versions: - v1.31 - - v1.30 - - v1.29 - - v1.28 eks: # Default is the default version to offer users. default: v1.31 @@ -518,9 +581,6 @@ spec: # Versions lists the available versions. versions: - v1.31 - - v1.30 - - v1.29 - - v1.28 # ProviderIncompatibilities lists all the Kubernetes version incompatibilities providerIncompatibilities: - # Condition is the cluster or datacenter condition that must be met to block a specific version @@ -549,16 +609,10 @@ spec: # for the worker nodes of all matching user clusters. automaticNodeUpdate: false # From is the version from which an update is allowed. Wildcards are allowed, e.g. "1.18.*". - from: 1.29.* + from: 1.31.* # To is the version to which an update is allowed. # Must be a valid version if `automatic` is set to true, e.g. "1.20.13". # Can be a wildcard otherwise, e.g. "1.20.*". - to: 1.30.* - - from: 1.30.* - to: 1.30.* - - from: 1.30.* - to: 1.31.* - - from: 1.31.* to: 1.31.* - from: 1.31.* to: 1.32.* @@ -568,27 +622,30 @@ spec: to: 1.33.* - from: 1.33.* to: 1.33.* + - from: 1.33.* + to: 1.34.* + - from: 1.34.* + to: 1.34.* # Versions lists the available versions. versions: - - v1.30.5 - - v1.30.9 - - v1.30.11 - - v1.30.12 - - v1.30.14 - v1.31.1 - v1.31.5 - v1.31.7 - v1.31.8 - v1.31.10 - v1.31.11 + - v1.31.13 - v1.32.1 - v1.32.3 - v1.32.4 - v1.32.6 - v1.32.7 + - v1.32.9 - v1.33.0 - v1.33.2 - v1.33.3 + - v1.33.5 + - v1.34.1 # VerticalPodAutoscaler configures the Kubernetes VPA integration. verticalPodAutoscaler: admissionController: @@ -599,7 +656,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -624,7 +681,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -649,7 +706,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -682,7 +739,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. diff --git a/content/kubermatic/main/data/kubermaticConfiguration.yaml b/content/kubermatic/main/data/kubermaticConfiguration.yaml index 46e5b3fa6..059be54ee 100644 --- a/content/kubermatic/main/data/kubermaticConfiguration.yaml +++ b/content/kubermatic/main/data/kubermaticConfiguration.yaml @@ -30,7 +30,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -49,6 +49,72 @@ spec: memory: 150Mi # Applications contains configuration for Application settings. applications: + # CatalogManager configures the Application Catalog CatalogManager, which is responsible for managing ApplicationDefinitions + # in the cluster from specified OCI registries. + # Note: The Application Catalog CatalogManager requires its feature flag to be enabled as it is currently in beta. + catalogManager: + # Image configures the container image for the application-catalog manager. + image: + # Repository is used to override the application-catalog manager image repository. + # The default value is "quay.io/kubermatic/application-catalog-manager" + repository: quay.io/kubermatic/application-catalog-manager + # Tag is used to override the application-catalog manager image tag. + tag: v0.1.0 + # Limit defines filtering criteria for ApplicationDefinitions to be reconciled from the OCI registry. + # When undefined, all ApplicationDefinitions from the registry are pulled and reconciled. + # When defined, only ApplicationDefinitions matching the specified criteria are processed. + limit: + # MetadataSelector defines criteria for selecting ApplicationDefinitions based on their metadata attributes. + # For example, to select ApplicationDefinitions with a specific support tier (e.g., 'gold'), + # specify that tier here. + # When multiple tiers are specified, the Application Catalog Manager uses additive logic + # to determine which ApplicationDefinitions to retrieve from the OCI registry. + metadataSelector: + # Tiers specifies the support tiers to filter ApplicationDefinitions. + # ApplicationDefinitions matching any of the specified tiers will be selected. + tiers: null + # NameSelector defines criteria for selecting ApplicationDefinitions by name. + # Each name must correspond to an ApplicationDefinition's `metadata.name` field. + # When multiple names are specified, the Application Catalog Manager uses additive logic + # to retrieve all matching ApplicationDefinitions from the OCI registry. + # Example: Specifying ['nginx', 'cert-manager'] will retrieve only those specific ApplicationDefinitions. + nameSelector: null + # LogLevel specifies the logging verbosity level for the Application Catalog Manager. + logLevel: "" + # ReconciliationInterval is the interval at which application-catalog manager reconcile ApplicationDefinitions. + # By default, ApplicationsDefinitions are reconciled at every 10 minutes. + # Setting a value equal to 0 disables the force reconciliation of the default Application Catalog. + reconciliationInterval: 0s + # RegistrySettings configures the OCI registry from which the Application Catalog Manager + # retrieves ApplicationDefinition manifests. + registrySettings: + # Credentials optionally references a secret containing Helm registry authentication credentials. + # Either username/password or registryConfigFile can be specified, but not both. + credentials: null + # RegistryURL specifies the OCI registry URL where ApplicationDefinitions are stored. + # Example: oci://localhost:5000/myrepo + registryURL: quay.io/kubermatic/applications + # Tag specifies the version tag for ApplicationDefinitions in the OCI registry. + # Example: v1.0.0 + tag: 7fd8340dc8f0b3f6aae519301a1c9f8aff34d939 + # Resources describes the requested and maximum allowed CPU/memory usage. + resources: + # Claims lists the names of resources, defined in spec.resourceClaims, + # that are used by this container. + + # This field depends on the + # DynamicResourceAllocation feature gate. + + # This field is immutable. It can only be set for containers. + claims: null + # Limits describes the maximum amount of compute resources allowed. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + limits: null + # Requests describes the minimum amount of compute resources required. + # If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + # otherwise to an implementation-defined value. Requests cannot exceed Limits. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + requests: null # DefaultApplicationCatalog contains configuration for the default application catalog. defaultApplicationCatalog: # Applications is a list of application definition names that should be installed in the master cluster. @@ -152,7 +218,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -279,7 +345,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -317,7 +383,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -496,7 +562,7 @@ spec: # Versions configures the available and default Kubernetes versions and updates. versions: # Default is the default version to offer users. - default: v1.32.7 + default: v1.33.5 # ExternalClusters contains the available and default Kubernetes versions and updates for ExternalClusters. externalClusters: aks: @@ -507,9 +573,6 @@ spec: # Versions lists the available versions. versions: - v1.31 - - v1.30 - - v1.29 - - v1.28 eks: # Default is the default version to offer users. default: v1.31 @@ -518,9 +581,6 @@ spec: # Versions lists the available versions. versions: - v1.31 - - v1.30 - - v1.29 - - v1.28 # ProviderIncompatibilities lists all the Kubernetes version incompatibilities providerIncompatibilities: - # Condition is the cluster or datacenter condition that must be met to block a specific version @@ -549,16 +609,10 @@ spec: # for the worker nodes of all matching user clusters. automaticNodeUpdate: false # From is the version from which an update is allowed. Wildcards are allowed, e.g. "1.18.*". - from: 1.29.* + from: 1.31.* # To is the version to which an update is allowed. # Must be a valid version if `automatic` is set to true, e.g. "1.20.13". # Can be a wildcard otherwise, e.g. "1.20.*". - to: 1.30.* - - from: 1.30.* - to: 1.30.* - - from: 1.30.* - to: 1.31.* - - from: 1.31.* to: 1.31.* - from: 1.31.* to: 1.32.* @@ -568,27 +622,30 @@ spec: to: 1.33.* - from: 1.33.* to: 1.33.* + - from: 1.33.* + to: 1.34.* + - from: 1.34.* + to: 1.34.* # Versions lists the available versions. versions: - - v1.30.5 - - v1.30.9 - - v1.30.11 - - v1.30.12 - - v1.30.14 - v1.31.1 - v1.31.5 - v1.31.7 - v1.31.8 - v1.31.10 - v1.31.11 + - v1.31.13 - v1.32.1 - v1.32.3 - v1.32.4 - v1.32.6 - v1.32.7 + - v1.32.9 - v1.33.0 - v1.33.2 - v1.33.3 + - v1.33.5 + - v1.34.1 # VerticalPodAutoscaler configures the Kubernetes VPA integration. verticalPodAutoscaler: admissionController: @@ -599,7 +656,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -624,7 +681,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -649,7 +706,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -682,7 +739,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. diff --git a/content/kubermatic/main/data/seed.ce.yaml b/content/kubermatic/main/data/seed.ce.yaml index 901903f38..593a9ea25 100644 --- a/content/kubermatic/main/data/seed.ce.yaml +++ b/content/kubermatic/main/data/seed.ce.yaml @@ -33,6 +33,8 @@ spec: # List of registry mirrors to use mirrors: - mirror.gcr.io + # Optional: EnableNonRootDeviceOwnership enables the non-root device ownership feature in the container runtime. + enableNonRootDeviceOwnership: false # Optional: If set, this proxy will be configured for both HTTP and HTTPS. httpProxy: "" # Optional: These image registries will be configured as insecure @@ -355,16 +357,6 @@ spec: rhel: "" rockylinux: "" ubuntu: "" - # Deprecated: The Packet / Equinix Metal provider is deprecated and will be REMOVED IN VERSION 2.29. - # This provider is no longer supported. Migrate your configurations away from "packet" immediately. - # Packet configures an Equinix Metal datacenter. - packet: - # The list of enabled facilities, for example "ams1", for a full list of available - # facilities see https://metal.equinix.com/developers/docs/locations/facilities/ - facilities: [] - # Metros are facilities that are grouped together geographically and share capacity - # and networking features, see https://metal.equinix.com/developers/docs/locations/metros/ - metro: "" # Optional: ProviderReconciliationInterval is the time that must have passed since a # Cluster's status.lastProviderReconciliation to make the cluster controller # perform an in-depth provider reconciliation, where for example missing security @@ -445,7 +437,6 @@ spec: # to default all new created clusters defaultClusterTemplate: "" # DefaultComponentSettings are default values to set for newly created clusters. - # Deprecated: Use DefaultClusterTemplate instead. defaultComponentSettings: # Apiserver configures kube-apiserver settings. apiserver: @@ -526,7 +517,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -669,7 +660,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -695,7 +686,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -726,7 +717,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. diff --git a/content/kubermatic/main/data/seed.ee.yaml b/content/kubermatic/main/data/seed.ee.yaml index 82f6eddd0..be8614a62 100644 --- a/content/kubermatic/main/data/seed.ee.yaml +++ b/content/kubermatic/main/data/seed.ee.yaml @@ -33,6 +33,8 @@ spec: # List of registry mirrors to use mirrors: - mirror.gcr.io + # Optional: EnableNonRootDeviceOwnership enables the non-root device ownership feature in the container runtime. + enableNonRootDeviceOwnership: false # Optional: If set, this proxy will be configured for both HTTP and HTTPS. httpProxy: "" # Optional: These image registries will be configured as insecure @@ -156,8 +158,7 @@ spec: kubelb: # DisableIngressClass is used to disable the ingress class `kubelb` filter for kubeLB. disableIngressClass: false - # EnableGatewayAPI is used to configure the use of gateway API for kubeLB. - # When this option is enabled for the user cluster, KKP installs the Gateway API CRDs for the user cluster. + # EnableGatewayAPI is used to configure the use of gateway API for kubeLB. Once enabled, Gateway API CRDs are installed for the user cluster. enableGatewayAPI: false # EnableSecretSynchronizer is used to configure the use of secret synchronizer for kubeLB. enableSecretSynchronizer: true @@ -404,16 +405,6 @@ spec: rhel: "" rockylinux: "" ubuntu: "" - # Deprecated: The Packet / Equinix Metal provider is deprecated and will be REMOVED IN VERSION 2.29. - # This provider is no longer supported. Migrate your configurations away from "packet" immediately. - # Packet configures an Equinix Metal datacenter. - packet: - # The list of enabled facilities, for example "ams1", for a full list of available - # facilities see https://metal.equinix.com/developers/docs/locations/facilities/ - facilities: [] - # Metros are facilities that are grouped together geographically and share capacity - # and networking features, see https://metal.equinix.com/developers/docs/locations/metros/ - metro: "" # Optional: ProviderReconciliationInterval is the time that must have passed since a # Cluster's status.lastProviderReconciliation to make the cluster controller # perform an in-depth provider reconciliation, where for example missing security @@ -494,7 +485,6 @@ spec: # to default all new created clusters defaultClusterTemplate: "" # DefaultComponentSettings are default values to set for newly created clusters. - # Deprecated: Use DefaultClusterTemplate instead. defaultComponentSettings: # Apiserver configures kube-apiserver settings. apiserver: @@ -575,7 +565,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -751,7 +741,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -777,7 +767,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -808,7 +798,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. diff --git a/content/kubermatic/main/data/seed.yaml b/content/kubermatic/main/data/seed.yaml index 901903f38..593a9ea25 100644 --- a/content/kubermatic/main/data/seed.yaml +++ b/content/kubermatic/main/data/seed.yaml @@ -33,6 +33,8 @@ spec: # List of registry mirrors to use mirrors: - mirror.gcr.io + # Optional: EnableNonRootDeviceOwnership enables the non-root device ownership feature in the container runtime. + enableNonRootDeviceOwnership: false # Optional: If set, this proxy will be configured for both HTTP and HTTPS. httpProxy: "" # Optional: These image registries will be configured as insecure @@ -355,16 +357,6 @@ spec: rhel: "" rockylinux: "" ubuntu: "" - # Deprecated: The Packet / Equinix Metal provider is deprecated and will be REMOVED IN VERSION 2.29. - # This provider is no longer supported. Migrate your configurations away from "packet" immediately. - # Packet configures an Equinix Metal datacenter. - packet: - # The list of enabled facilities, for example "ams1", for a full list of available - # facilities see https://metal.equinix.com/developers/docs/locations/facilities/ - facilities: [] - # Metros are facilities that are grouped together geographically and share capacity - # and networking features, see https://metal.equinix.com/developers/docs/locations/metros/ - metro: "" # Optional: ProviderReconciliationInterval is the time that must have passed since a # Cluster's status.lastProviderReconciliation to make the cluster controller # perform an in-depth provider reconciliation, where for example missing security @@ -445,7 +437,6 @@ spec: # to default all new created clusters defaultClusterTemplate: "" # DefaultComponentSettings are default values to set for newly created clusters. - # Deprecated: Use DefaultClusterTemplate instead. defaultComponentSettings: # Apiserver configures kube-apiserver settings. apiserver: @@ -526,7 +517,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -669,7 +660,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -695,7 +686,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. @@ -726,7 +717,7 @@ spec: # Claims lists the names of resources, defined in spec.resourceClaims, # that are used by this container. - # This is an alpha field and requires enabling the + # This field depends on the # DynamicResourceAllocation feature gate. # This field is immutable. It can only be set for containers. diff --git a/content/kubermatic/main/data/swagger.json b/content/kubermatic/main/data/swagger.json deleted file mode 100644 index 3c7892ce2..000000000 --- a/content/kubermatic/main/data/swagger.json +++ /dev/null @@ -1,42389 +0,0 @@ -{ - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "schemes": [ - "https" - ], - "swagger": "2.0", - "info": { - "description": "This OpenAPI 2.0 specification describes the REST APIs used by the Kubermatic Kubernetes Platform Dashboard.", - "title": "Kubermatic Kubernetes Platform API", - "version": "2.28" - }, - "paths": { - "/api/projects/{project_id}/clusters/{cluster_id}/sshkeys/{key_id}": { - "delete": { - "description": "Unassignes an ssh key from the given cluster", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "detachSSHKeyFromClusterV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "KeyID", - "name": "key_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/addonconfigs": { - "get": { - "produces": [ - "application/json" - ], - "summary": "Returns all available addon configs.", - "operationId": "listAddonConfigs", - "responses": { - "200": { - "description": "AddonConfig", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/AddonConfig" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/addonconfigs/{addon_id}": { - "get": { - "produces": [ - "application/json" - ], - "summary": "Returns specified addon config.", - "operationId": "getAddonConfig", - "parameters": [ - { - "type": "string", - "x-go-name": "AddonID", - "name": "addon_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "AddonConfig", - "schema": { - "$ref": "#/definitions/AddonConfig" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/addons": { - "post": { - "description": "Lists names of addons that can be configured inside the user clusters", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "addon" - ], - "operationId": "listAccessibleAddons", - "responses": { - "200": { - "description": "AccessibleAddons", - "schema": { - "$ref": "#/definitions/AccessibleAddons" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/admin": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "summary": "Returns list of admin users.", - "operationId": "getAdmins", - "responses": { - "200": { - "description": "Admin", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Admin" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "put": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "summary": "Allows setting and clearing admin role for users.", - "operationId": "setAdmin", - "parameters": [ - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/Admin" - } - } - ], - "responses": { - "200": { - "description": "Admin", - "schema": { - "$ref": "#/definitions/Admin" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/admin/admission/plugins": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "summary": "Returns all admission plugins from the CRDs.", - "operationId": "listAdmissionPlugins", - "responses": { - "200": { - "description": "AdmissionPlugin", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/AdmissionPlugin" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/admin/admission/plugins/{name}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "summary": "Gets the admission plugin.", - "operationId": "getAdmissionPlugin", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "AdmissionPlugin", - "schema": { - "$ref": "#/definitions/AdmissionPlugin" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "summary": "Deletes the admission plugin.", - "operationId": "deleteAdmissionPlugin", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "summary": "Updates the admission plugin.", - "operationId": "updateAdmissionPlugin", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "name", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/AdmissionPlugin" - } - } - ], - "responses": { - "200": { - "description": "AdmissionPlugin", - "schema": { - "$ref": "#/definitions/AdmissionPlugin" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/admin/metering/configurations": { - "put": { - "description": "Configures KKP metering tool. Only available in Kubermatic Enterprise Edition", - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "operationId": "createOrUpdateMeteringConfigurations", - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/admin/metering/configurations/reports": { - "get": { - "description": "Lists report configurations for KKP metering tool. Only available in Kubermatic Enterprise Edition", - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "operationId": "listMeteringReportConfigurations", - "responses": { - "200": { - "description": "MeteringReportConfiguration", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/MeteringReportConfiguration" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/admin/metering/configurations/reports/{name}": { - "get": { - "description": "Gets report configuration for KKP metering tool. Only available in Kubermatic Enterprise Edition", - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "operationId": "getMeteringReportConfiguration", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "MeteringReportConfiguration", - "schema": { - "$ref": "#/definitions/MeteringReportConfiguration" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "put": { - "description": "Updates existing report configuration for KKP metering tool. Only available in Kubermatic Enterprise Edition", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "operationId": "updateMeteringReportConfiguration", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "name", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "type": "object", - "properties": { - "interval": { - "type": "integer", - "format": "int32", - "x-go-name": "Interval" - }, - "retention": { - "type": "integer", - "format": "int32", - "x-go-name": "Retention" - }, - "schedule": { - "type": "string", - "x-go-name": "Schedule" - }, - "types": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Types" - } - } - } - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates report configuration for KKP metering tool. Only available in Kubermatic Enterprise Edition", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "operationId": "createMeteringReportConfiguration", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "name", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "type": "object", - "properties": { - "interval": { - "type": "integer", - "format": "int32", - "x-go-name": "Interval" - }, - "retention": { - "type": "integer", - "format": "int32", - "x-go-name": "Retention" - }, - "schedule": { - "type": "string", - "x-go-name": "Schedule" - }, - "types": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Types" - } - } - } - } - ], - "responses": { - "201": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Removes report configuration for KKP metering tool. Only available in Kubermatic Enterprise Edition", - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "operationId": "deleteMeteringReportConfiguration", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/admin/metering/credentials": { - "put": { - "description": "Creates or updates the metering tool credentials. Only available in Kubermatic Enterprise Edition", - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "operationId": "createOrUpdateMeteringCredentials", - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/admin/metering/reports": { - "get": { - "description": "List metering reports. Only available in Kubermatic Enterprise Edition", - "produces": [ - "application/json" - ], - "tags": [ - "metering", - "reports" - ], - "operationId": "listMeteringReports", - "parameters": [ - { - "type": "string", - "x-go-name": "StartAfter", - "name": "start_after", - "in": "query" - }, - { - "type": "integer", - "format": "int64", - "x-go-name": "MaxKeys", - "name": "max_keys", - "in": "query" - }, - { - "type": "string", - "x-go-name": "ConfigurationName", - "name": "configuration_name", - "in": "query" - } - ], - "responses": { - "200": { - "description": "MeteringReport", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/MeteringReport" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/admin/metering/reports/{report_name}": { - "get": { - "description": "Download a specific metering report. Provides an S3 pre signed URL valid for 1 hour. Only available in Kubermatic Enterprise Edition", - "produces": [ - "application/json" - ], - "tags": [ - "metering", - "report" - ], - "operationId": "getMeteringReport", - "parameters": [ - { - "type": "string", - "x-go-name": "ReportName", - "name": "report_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ConfigurationName", - "name": "configuration_name", - "in": "query" - } - ], - "responses": { - "200": { - "description": "MeteringReportURL", - "schema": { - "$ref": "#/definitions/MeteringReportURL" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Removes a specific metering report. Only available in Kubermatic Enterprise Edition", - "produces": [ - "application/json" - ], - "tags": [ - "metering", - "report" - ], - "operationId": "deleteMeteringReport", - "parameters": [ - { - "type": "string", - "x-go-name": "ReportName", - "name": "report_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ConfigurationName", - "name": "configuration_name", - "in": "query" - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/admin/seeds": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "summary": "Returns all seeds from the CRDs.", - "operationId": "listSeeds", - "responses": { - "200": { - "description": "Seed", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Seed" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "summary": "Creates a new seed object.", - "operationId": "createSeed", - "parameters": [ - { - "name": "Body", - "in": "body", - "schema": { - "type": "object", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/CreateSeedSpec" - } - } - } - } - ], - "responses": { - "200": { - "description": "Seed", - "schema": { - "$ref": "#/definitions/Seed" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/admin/seeds/{seed_name}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "summary": "Returns the seed object.", - "operationId": "getSeed", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "seed_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Seed", - "schema": { - "$ref": "#/definitions/Seed" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "summary": "Deletes the seed CRD object from the Kubermatic.", - "operationId": "deleteSeed", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "seed_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "summary": "Updates the seed.", - "operationId": "updateSeed", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "seed_name", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "type": "object", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "raw_kubeconfig": { - "description": "RawKubeconfig raw kubeconfig decoded to base64", - "type": "string", - "x-go-name": "RawKubeconfig" - }, - "spec": { - "$ref": "#/definitions/SeedSpec" - } - } - } - } - ], - "responses": { - "200": { - "description": "Seed", - "schema": { - "$ref": "#/definitions/Seed" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/admin/seeds/{seed_name}/backupdestinations/{backup_destination}": { - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "summary": "Deletes a backup destination from the Seed.", - "operationId": "deleteBackupDestination", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "seed_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "BackupDestination", - "name": "backup_destination", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/admin/settings": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "summary": "Gets the global settings.", - "operationId": "getKubermaticSettings", - "responses": { - "200": { - "description": "GlobalSettings", - "schema": { - "$ref": "#/definitions/GlobalSettings" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "summary": "Patches the global settings.", - "operationId": "patchKubermaticSettings", - "parameters": [ - { - "name": "Patch", - "in": "body", - "schema": { - "type": "object" - } - } - ], - "responses": { - "200": { - "description": "GlobalSettings", - "schema": { - "$ref": "#/definitions/GlobalSettings" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/admin/settings/customlinks": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "summary": "Gets the custom links.", - "operationId": "getKubermaticCustomLinks", - "responses": { - "200": { - "description": "GlobalCustomLinks", - "schema": { - "$ref": "#/definitions/GlobalCustomLinks" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/admission/plugins/{version}": { - "get": { - "produces": [ - "application/json" - ], - "summary": "Returns specified addon config.", - "operationId": "getAdmissionPlugins", - "parameters": [ - { - "type": "string", - "x-go-name": "Version", - "name": "version", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "AdmissionPluginList", - "schema": { - "$ref": "#/definitions/AdmissionPluginList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/dc": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "datacenter" - ], - "operationId": "listDatacenters", - "responses": { - "200": { - "description": "DatacenterList", - "schema": { - "$ref": "#/definitions/DatacenterList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/dc/{dc}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "datacenter" - ], - "operationId": "getDatacenter", - "parameters": [ - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Datacenter", - "schema": { - "$ref": "#/definitions/Datacenter" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/kubeconfig": { - "get": { - "description": "Starts OIDC flow and generates kubeconfig, the generated config\ncontains OIDC provider authentication info", - "produces": [ - "application/json" - ], - "operationId": "createOIDCKubeconfig", - "parameters": [ - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "query" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "query" - }, - { - "type": "string", - "x-go-name": "UserID", - "name": "user_id", - "in": "query" - } - ], - "responses": { - "200": { - "$ref": "#/responses/Kubeconfig" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/labels/system": { - "get": { - "description": "List restricted system labels", - "produces": [ - "application/json" - ], - "operationId": "listSystemLabels", - "responses": { - "200": { - "description": "ResourceLabelMap", - "schema": { - "$ref": "#/definitions/ResourceLabelMap" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/me": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "users" - ], - "summary": "Returns information about the current user.", - "operationId": "getCurrentUser", - "responses": { - "200": { - "description": "User", - "schema": { - "$ref": "#/definitions/User" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/me/logout": { - "post": { - "description": "Enforces user to login again with the new token.", - "produces": [ - "application/json" - ], - "tags": [ - "users" - ], - "summary": "Adds current authorization bearer token to the blacklist.", - "operationId": "logoutCurrentUser", - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/me/readannouncements": { - "patch": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "users" - ], - "summary": "Updates read announcements of the current user.", - "operationId": "patchCurrentUserReadAnnouncements", - "responses": { - "200": { - "description": "User", - "schema": { - "$ref": "#/definitions/User" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/me/settings": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "settings" - ], - "summary": "Returns settings of the current user.", - "operationId": "getCurrentUserSettings", - "responses": { - "200": { - "description": "UserSettings", - "schema": { - "$ref": "#/definitions/UserSettings" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "settings" - ], - "summary": "Updates settings of the current user.", - "operationId": "patchCurrentUserSettings", - "parameters": [ - { - "name": "Patch", - "in": "body", - "schema": { - "type": "object" - } - } - ], - "responses": { - "200": { - "description": "User", - "schema": { - "$ref": "#/definitions/User" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Lists projects that an authenticated user is a member of.", - "operationId": "listProjects", - "parameters": [ - { - "type": "boolean", - "x-go-name": "DisplayAll", - "name": "displayAll", - "in": "query" - } - ], - "responses": { - "200": { - "description": "Project", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Project" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "409": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Note that this endpoint can be consumed by every authenticated user.", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Creates a brand new project.", - "operationId": "createProject", - "parameters": [ - { - "name": "Body", - "in": "body", - "schema": { - "type": "object", - "properties": { - "Spec": { - "$ref": "#/definitions/ProjectSpec" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "users": { - "description": "human user email list for the service account in projectmanagers group", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Users" - } - } - } - } - ], - "responses": { - "201": { - "description": "Project", - "schema": { - "$ref": "#/definitions/Project" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "409": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}": { - "get": { - "description": "Gets the project with the given ID", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "getProject", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Project", - "schema": { - "$ref": "#/definitions/Project" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "409": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "put": { - "description": "Updates the given project", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "updateProject", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/Project" - } - } - ], - "responses": { - "200": { - "description": "Project", - "schema": { - "$ref": "#/definitions/Project" - } - }, - "400": { - "$ref": "#/responses/empty" - }, - "404": { - "$ref": "#/responses/empty" - }, - "500": { - "$ref": "#/responses/empty" - }, - "501": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Deletes the project with the given ID.", - "operationId": "deleteProject", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/clusters": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Lists clusters for the specified project.", - "operationId": "listClustersForProject", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterList", - "schema": { - "$ref": "#/definitions/ClusterList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Lists clusters for the specified project and data center.", - "operationId": "listClusters", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterList", - "schema": { - "$ref": "#/definitions/ClusterList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Creates a cluster for the given project.", - "operationId": "createCluster", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/CreateClusterSpec" - } - } - ], - "responses": { - "201": { - "description": "Cluster", - "schema": { - "$ref": "#/definitions/Cluster" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}": { - "get": { - "description": "Gets the cluster with the given name", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "getCluster", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Cluster", - "schema": { - "$ref": "#/definitions/Cluster" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Deletes the specified cluster", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "deleteCluster", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "boolean", - "name": "DeleteVolumes", - "in": "header" - }, - { - "type": "boolean", - "name": "DeleteLoadBalancers", - "in": "header" - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Patches the given cluster using JSON Merge Patch method (https://tools.ietf.org/html/rfc7396).", - "operationId": "patchCluster", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Patch", - "in": "body", - "schema": { - "type": "object" - } - } - ], - "responses": { - "200": { - "description": "Cluster", - "schema": { - "$ref": "#/definitions/Cluster" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/addons": { - "get": { - "description": "Lists addons that belong to the given cluster", - "produces": [ - "application/json" - ], - "tags": [ - "addon" - ], - "operationId": "listAddons", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Addon", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Addon" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates an addon that will belong to the given cluster", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "addon" - ], - "operationId": "createAddon", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/Addon" - } - } - ], - "responses": { - "201": { - "description": "Addon", - "schema": { - "$ref": "#/definitions/Addon" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/addons/{addon_id}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "addon" - ], - "summary": "Gets an addon that is assigned to the given cluster.", - "operationId": "getAddon", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "AddonID", - "name": "addon_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Addon", - "schema": { - "$ref": "#/definitions/Addon" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "addon" - ], - "summary": "Deletes the given addon that belongs to the cluster.", - "operationId": "deleteAddon", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "AddonID", - "name": "addon_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "addon" - ], - "summary": "Patches an addon that is assigned to the given cluster.", - "operationId": "patchAddon", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "AddonID", - "name": "addon_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/Addon" - } - } - ], - "responses": { - "200": { - "description": "Addon", - "schema": { - "$ref": "#/definitions/Addon" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/bindings": { - "get": { - "description": "List role binding", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listRoleBinding", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "RoleBinding", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/RoleBinding" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/clusterrolenames": { - "get": { - "description": "Lists all ClusterRoles", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listClusterRoleNames", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterRoleName", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/ClusterRoleName" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/clusterroles": { - "get": { - "description": "Lists all ClusterRoles", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listClusterRole", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterRole", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/ClusterRole" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates cluster role", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "createClusterRole", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/ClusterRole" - } - } - ], - "responses": { - "201": { - "description": "ClusterRole", - "schema": { - "$ref": "#/definitions/ClusterRole" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/clusterroles/{role_id}": { - "delete": { - "description": "Delete the cluster role with the given name", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "deleteClusterRole", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RoleID", - "name": "role_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "description": "Patch the cluster role with the given name", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "patchClusterRole", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RoleID", - "name": "role_id", - "in": "path", - "required": true - }, - { - "name": "Patch", - "in": "body", - "schema": { - "type": "object" - } - } - ], - "responses": { - "200": { - "description": "ClusterRole", - "schema": { - "$ref": "#/definitions/ClusterRole" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/clusterroles/{role_id}/clusterbindings": { - "post": { - "description": "Binds user to cluster role", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "bindUserToClusterRole", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RoleID", - "name": "role_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/ClusterRoleUser" - } - } - ], - "responses": { - "200": { - "description": "ClusterRoleBinding", - "schema": { - "$ref": "#/definitions/ClusterRoleBinding" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/events": { - "get": { - "produces": [ - "application/yaml" - ], - "tags": [ - "project" - ], - "summary": "Gets the events related to the specified cluster.", - "operationId": "getClusterEvents", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Type", - "name": "type", - "in": "query" - } - ], - "responses": { - "200": { - "description": "Event", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Event" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/health": { - "get": { - "description": "Returns the cluster's component health status", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "getClusterHealth", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterHealth", - "schema": { - "$ref": "#/definitions/ClusterHealth" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/installableaddons": { - "get": { - "description": "Lists names of addons that can be installed inside the user cluster", - "produces": [ - "application/json" - ], - "tags": [ - "addon" - ], - "operationId": "listInstallableAddons", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "AccessibleAddons", - "schema": { - "$ref": "#/definitions/AccessibleAddons" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/kubeconfig": { - "get": { - "produces": [ - "application/octet-stream" - ], - "tags": [ - "project" - ], - "summary": "Gets the kubeconfig for the specified cluster.", - "operationId": "getClusterKubeconfig", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/Kubeconfig" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/metrics": { - "get": { - "description": "Gets cluster metrics", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "getClusterMetrics", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterMetrics", - "schema": { - "$ref": "#/definitions/ClusterMetrics" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/namespaces": { - "get": { - "description": "Lists all namespaces in the cluster", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listNamespace", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Namespace", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Namespace" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/nodedeployments": { - "get": { - "description": "Lists node deployments that belong to the given cluster", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listNodeDeployments", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "NodeDeployment", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/NodeDeployment" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates a node deployment that will belong to the given cluster", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "createNodeDeployment", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/NodeDeployment" - } - } - ], - "responses": { - "201": { - "description": "NodeDeployment", - "schema": { - "$ref": "#/definitions/NodeDeployment" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/nodedeployments/{nodedeployment_id}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets a node deployment that is assigned to the given cluster.", - "operationId": "getNodeDeployment", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "NodeDeploymentID", - "name": "nodedeployment_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "NodeDeployment", - "schema": { - "$ref": "#/definitions/NodeDeployment" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Deletes the given node deployment that belongs to the cluster.", - "operationId": "deleteNodeDeployment", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "NodeDeploymentID", - "name": "nodedeployment_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "description": "Patches a node deployment that is assigned to the given cluster. Please note that at the moment only\nnode deployment's spec can be updated by a patch, no other fields can be changed using this endpoint.", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "patchNodeDeployment", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "NodeDeploymentID", - "name": "nodedeployment_id", - "in": "path", - "required": true - }, - { - "name": "Patch", - "in": "body", - "schema": { - "type": "object" - } - } - ], - "responses": { - "200": { - "description": "NodeDeployment", - "schema": { - "$ref": "#/definitions/NodeDeployment" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/nodedeployments/{nodedeployment_id}/nodes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Lists nodes that belong to the given node deployment.", - "operationId": "listNodeDeploymentNodes", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "NodeDeploymentID", - "name": "nodedeployment_id", - "in": "path", - "required": true - }, - { - "type": "boolean", - "x-go-name": "HideInitialConditions", - "name": "hideInitialConditions", - "in": "query" - } - ], - "responses": { - "200": { - "description": "Node", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Node" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/nodedeployments/{nodedeployment_id}/nodes/events": { - "get": { - "description": "If the value is 'normal' then normal events are returned. If the query parameter is missing method returns all events.", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Lists node deployment events. If query parameter `type` is set to `warning` then only warning events are retrieved.", - "operationId": "listNodeDeploymentNodesEvents", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Type", - "name": "type", - "in": "query" - }, - { - "type": "string", - "x-go-name": "NodeDeploymentID", - "name": "nodedeployment_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Event", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Event" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/nodedeployments/{nodedeployment_id}/nodes/metrics": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "metric" - ], - "summary": "Lists metrics that belong to the given node deployment.", - "operationId": "listNodeDeploymentMetrics", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "NodeDeploymentID", - "name": "nodedeployment_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "NodeMetric", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/NodeMetric" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/nodes/upgrades": { - "put": { - "description": "Upgrades node deployments in a cluster", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "upgradeClusterNodeDeployments", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/MasterVersion" - } - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/oidckubeconfig": { - "get": { - "produces": [ - "application/yaml" - ], - "tags": [ - "project" - ], - "summary": "Gets the kubeconfig for the specified cluster with oidc authentication.", - "operationId": "getOidcClusterKubeconfig", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/Kubeconfig" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/rolenames": { - "get": { - "description": "Lists all Role names with namespaces", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listRoleNames", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "RoleName", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/RoleName" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/roles": { - "get": { - "description": "Lists all Roles", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listRole", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Role", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Role" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates cluster role", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "createRole", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/Role" - } - } - ], - "responses": { - "201": { - "description": "Role", - "schema": { - "$ref": "#/definitions/Role" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/roles/{namespace}/{role_id}": { - "get": { - "description": "Gets the role with the given name", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "getRole", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RoleID", - "name": "role_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Namespace", - "name": "namespace", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Role", - "schema": { - "$ref": "#/definitions/Role" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Delete the cluster role with the given name", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "deleteRole", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RoleID", - "name": "role_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Namespace", - "name": "namespace", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "description": "Patch the role with the given name", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "patchRole", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RoleID", - "name": "role_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Namespace", - "name": "namespace", - "in": "path", - "required": true - }, - { - "name": "Patch", - "in": "body", - "schema": { - "type": "object" - } - } - ], - "responses": { - "200": { - "description": "Role", - "schema": { - "$ref": "#/definitions/Role" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/roles/{namespace}/{role_id}/bindings": { - "post": { - "description": "Binds user to the role", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "bindUserToRole", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RoleID", - "name": "role_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Namespace", - "name": "namespace", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/RoleUser" - } - } - ], - "responses": { - "200": { - "description": "RoleBinding", - "schema": { - "$ref": "#/definitions/RoleBinding" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Unbinds user from the role binding", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "unbindUserFromRoleBinding", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RoleID", - "name": "role_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Namespace", - "name": "namespace", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/RoleUser" - } - } - ], - "responses": { - "200": { - "description": "RoleBinding", - "schema": { - "$ref": "#/definitions/RoleBinding" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/roles/{role_id}": { - "get": { - "description": "Gets the cluster role with the given name", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "getClusterRole", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RoleID", - "name": "role_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterRole", - "schema": { - "$ref": "#/definitions/ClusterRole" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/sshkeys": { - "get": { - "description": "Lists ssh keys that are assigned to the cluster\nThe returned collection is sorted by creation timestamp.", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listSSHKeysAssignedToCluster", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "SSHKey", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/SSHKey" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/sshkeys/{key_id}": { - "put": { - "description": "Assigns an existing ssh key to the given cluster", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "assignSSHKeyToCluster", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "KeyID", - "name": "key_id", - "in": "path", - "required": true - } - ], - "responses": { - "201": { - "description": "SSHKey", - "schema": { - "$ref": "#/definitions/SSHKey" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Unassignes an ssh key from the given cluster", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "detachSSHKeyFromCluster", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "KeyID", - "name": "key_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/token": { - "put": { - "description": "Revokes the current admin token", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "revokeClusterAdminToken", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/upgrades": { - "get": { - "description": "Gets possible cluster upgrades", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "getClusterUpgrades", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "MasterVersion", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/MasterVersion" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/dc/{dc}/clusters/{cluster_id}/viewertoken": { - "put": { - "description": "Revokes the current viewer token", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "revokeClusterViewerToken", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/serviceaccounts": { - "get": { - "description": "List Service Accounts for the given project", - "produces": [ - "application/json" - ], - "tags": [ - "serviceaccounts" - ], - "operationId": "listServiceAccounts", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ServiceAccount", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/ServiceAccount" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Adds the given service account to the given project", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "serviceaccounts" - ], - "operationId": "addServiceAccountToProject", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/ServiceAccount" - } - } - ], - "responses": { - "201": { - "description": "ServiceAccount", - "schema": { - "$ref": "#/definitions/ServiceAccount" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/serviceaccounts/{serviceaccount_id}": { - "put": { - "description": "Updates service account for the given project", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "serviceaccounts" - ], - "operationId": "updateServiceAccount", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/ServiceAccount" - } - }, - { - "type": "string", - "x-go-name": "ServiceAccountID", - "name": "serviceaccount_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ServiceAccount", - "schema": { - "$ref": "#/definitions/ServiceAccount" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Deletes service account for the given project", - "produces": [ - "application/json" - ], - "tags": [ - "serviceaccounts" - ], - "operationId": "deleteServiceAccount", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ServiceAccountID", - "name": "serviceaccount_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/serviceaccounts/{serviceaccount_id}/tokens": { - "get": { - "description": "List tokens for the given service account", - "produces": [ - "application/json" - ], - "tags": [ - "tokens" - ], - "operationId": "listServiceAccountTokens", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ServiceAccountID", - "name": "serviceaccount_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "PublicServiceAccountToken", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/PublicServiceAccountToken" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Generates a token for the given service account", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "tokens" - ], - "operationId": "addTokenToServiceAccount", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ServiceAccountID", - "name": "serviceaccount_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/ServiceAccountToken" - } - } - ], - "responses": { - "201": { - "description": "ServiceAccountToken", - "schema": { - "$ref": "#/definitions/ServiceAccountToken" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/serviceaccounts/{serviceaccount_id}/tokens/{token_id}": { - "put": { - "description": "Updates and regenerates the token", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "tokens" - ], - "operationId": "updateServiceAccountToken", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ServiceAccountID", - "name": "serviceaccount_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "TokenID", - "name": "token_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/PublicServiceAccountToken" - } - } - ], - "responses": { - "200": { - "description": "ServiceAccountToken", - "schema": { - "$ref": "#/definitions/ServiceAccountToken" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Deletes the token", - "produces": [ - "application/json" - ], - "tags": [ - "tokens" - ], - "operationId": "deleteServiceAccountToken", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ServiceAccountID", - "name": "serviceaccount_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "TokenID", - "name": "token_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "description": "Patches the token name", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "tokens" - ], - "operationId": "patchServiceAccountToken", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ServiceAccountID", - "name": "serviceaccount_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "TokenID", - "name": "token_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "type": "array", - "items": { - "type": "integer", - "format": "uint8" - } - } - } - ], - "responses": { - "200": { - "description": "PublicServiceAccountToken", - "schema": { - "$ref": "#/definitions/PublicServiceAccountToken" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/sshkeys": { - "get": { - "description": "The returned collection is sorted by creation timestamp.", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Lists SSH Keys that belong to the given project.", - "operationId": "listSSHKeys", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "SSHKey", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/SSHKey" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Adds the given SSH key to the specified project.", - "operationId": "createSSHKey", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "name": "Key", - "in": "body", - "schema": { - "$ref": "#/definitions/SSHKey" - } - } - ], - "responses": { - "201": { - "description": "SSHKey", - "schema": { - "$ref": "#/definitions/SSHKey" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/sshkeys/{key_id}": { - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Removes the given SSH Key from the system.", - "operationId": "deleteSSHKey", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "SSHKeyID", - "name": "key_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/users": { - "get": { - "description": "Get list of users for the given project", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "users" - ], - "operationId": "getUsersForProject", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "User", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/User" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Adds the given user to the given project", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "users" - ], - "operationId": "addUserToProject", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/User" - } - } - ], - "responses": { - "201": { - "description": "User", - "schema": { - "$ref": "#/definitions/User" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/projects/{project_id}/users/{user_id}": { - "put": { - "description": "Changes membership of the given user for the given project", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "users" - ], - "operationId": "editUserInProject", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/User" - } - }, - { - "type": "string", - "x-go-name": "UserID", - "name": "user_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "User", - "schema": { - "$ref": "#/definitions/User" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Removes the given member from the project", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "users" - ], - "operationId": "deleteUserFromProject", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "UserID", - "name": "user_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "User", - "schema": { - "$ref": "#/definitions/User" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/providers/{provider_name}/dc": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "datacenter" - ], - "summary": "Returns all datacenters for the specified provider.", - "operationId": "listDCForProvider", - "parameters": [ - { - "type": "string", - "x-go-name": "Provider", - "name": "provider_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Datacenter", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Datacenter" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/providers/{provider_name}/dc/{dc}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "datacenter" - ], - "summary": "Get the datacenter for the specified provider.", - "operationId": "getDCForProvider", - "parameters": [ - { - "type": "string", - "x-go-name": "Provider", - "name": "provider_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Datacenter", - "name": "dc", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Datacenter", - "schema": { - "$ref": "#/definitions/Datacenter" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/providers/{provider_name}/presets/credentials": { - "get": { - "description": "Lists credential names for the provider", - "produces": [ - "application/json" - ], - "tags": [ - "credentials" - ], - "operationId": "listCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProviderName", - "name": "provider_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Datacenter", - "name": "datacenter", - "in": "query" - } - ], - "responses": { - "200": { - "description": "CredentialList", - "schema": { - "$ref": "#/definitions/CredentialList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/seed": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "seed" - ], - "operationId": "listSeedNames", - "responses": { - "200": { - "description": "SeedNamesList", - "schema": { - "$ref": "#/definitions/SeedNamesList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/seed/{seed_name}/dc": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "datacenter" - ], - "summary": "Returns all datacenters for the specified seed.", - "operationId": "listDCForSeed", - "parameters": [ - { - "type": "string", - "x-go-name": "Seed", - "name": "seed_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Datacenter", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Datacenter" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "datacenter" - ], - "summary": "Create the datacenter for a specified seed.", - "operationId": "createDC", - "parameters": [ - { - "type": "string", - "x-go-name": "Seed", - "name": "seed_name", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "type": "object", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/DatacenterSpec" - } - } - } - } - ], - "responses": { - "201": { - "description": "Datacenter", - "schema": { - "$ref": "#/definitions/Datacenter" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/seed/{seed_name}/dc/{dc}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "datacenter" - ], - "summary": "Returns the specified datacenter for the specified seed.", - "operationId": "getDCForSeed", - "parameters": [ - { - "type": "string", - "x-go-name": "Seed", - "name": "seed_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Datacenter", - "schema": { - "$ref": "#/definitions/Datacenter" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "put": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "datacenter" - ], - "summary": "Update the datacenter. The datacenter spec will be overwritten with the one provided in the request.", - "operationId": "updateDC", - "parameters": [ - { - "type": "string", - "x-go-name": "Seed", - "name": "seed_name", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "type": "object", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/DatacenterSpec" - } - } - } - }, - { - "type": "string", - "x-go-name": "DCToUpdate", - "name": "dc", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Datacenter", - "schema": { - "$ref": "#/definitions/Datacenter" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "datacenter" - ], - "summary": "Delete the datacenter.", - "operationId": "deleteDC", - "parameters": [ - { - "type": "string", - "x-go-name": "Seed", - "name": "seed_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "datacenter" - ], - "summary": "Patch the datacenter.", - "operationId": "patchDC", - "parameters": [ - { - "name": "Patch", - "in": "body", - "schema": { - "type": "object" - } - }, - { - "type": "string", - "x-go-name": "DCToPatch", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Seed", - "name": "seed_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Datacenter", - "schema": { - "$ref": "#/definitions/Datacenter" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/upgrades/cluster": { - "get": { - "description": "Lists all versions which don't result in automatic updates", - "produces": [ - "application/json" - ], - "tags": [ - "versions" - ], - "operationId": "getMasterVersions", - "responses": { - "200": { - "description": "MasterVersion", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/MasterVersion" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/upgrades/node": { - "get": { - "description": "Gets possible node upgrades for a specific control plane version", - "produces": [ - "application/json" - ], - "tags": [ - "versions" - ], - "operationId": "getNodeUpgrades", - "parameters": [ - { - "type": "string", - "x-go-name": "Type", - "description": "Type is deprecated and not used anymore.", - "name": "type", - "in": "query" - }, - { - "type": "string", - "x-go-name": "ControlPlaneVersion", - "name": "control_plane_version", - "in": "query" - } - ], - "responses": { - "200": { - "description": "MasterVersion", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/MasterVersion" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v1/version": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "versions" - ], - "summary": "Get versions of running Kubermatic components.", - "operationId": "getKubermaticVersion", - "responses": { - "200": { - "description": "KubermaticVersions", - "schema": { - "$ref": "#/definitions/KubermaticVersions" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/allowedregistries": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "allowedregistry" - ], - "summary": "List allowed registries.", - "operationId": "listAllowedRegistries", - "responses": { - "200": { - "description": "AllowedRegistry", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/AllowedRegistry" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates a allowed registry", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "allowedregistry" - ], - "operationId": "createAllowedRegistry", - "parameters": [ - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/wrBody" - } - } - ], - "responses": { - "201": { - "description": "AllowedRegistry", - "schema": { - "$ref": "#/definitions/AllowedRegistry" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/allowedregistries/{allowed_registry}": { - "get": { - "description": "Get allowed registries specified by name", - "produces": [ - "application/json" - ], - "tags": [ - "allowedregistries" - ], - "operationId": "getAllowedRegistry", - "parameters": [ - { - "type": "string", - "x-go-name": "AllowedRegistryName", - "name": "allowed_registry", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "AllowedRegistry", - "schema": { - "$ref": "#/definitions/AllowedRegistry" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "allowedregistries" - ], - "summary": "Deletes the given allowed registry.", - "operationId": "deleteAllowedRegistry", - "parameters": [ - { - "type": "string", - "x-go-name": "AllowedRegistryName", - "name": "allowed_registry", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "description": "Patch a specified allowed registry", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "allowedregistries" - ], - "operationId": "patchAllowedRegistry", - "parameters": [ - { - "type": "string", - "x-go-name": "AllowedRegistryName", - "name": "allowed_registry", - "in": "path", - "required": true - }, - { - "name": "Patch", - "in": "body", - "schema": { - "type": "object" - } - } - ], - "responses": { - "200": { - "description": "ConstraintTemplate", - "schema": { - "$ref": "#/definitions/ConstraintTemplate" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/applicationdefinitions": { - "get": { - "description": "List ApplicationDefinitions which are available in the KKP installation", - "produces": [ - "application/json" - ], - "tags": [ - "applications" - ], - "operationId": "listApplicationDefinitions", - "responses": { - "200": { - "description": "ApplicationDefinitionListItem", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/ApplicationDefinitionListItem" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates ApplicationDefinition into the given cluster", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "applications" - ], - "operationId": "createApplicationDefinition", - "parameters": [ - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/ApplicationDefinitionBody" - } - } - ], - "responses": { - "201": { - "description": "ApplicationDefinition", - "schema": { - "$ref": "#/definitions/ApplicationDefinition" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/applicationdefinitions/{appdef_name}": { - "get": { - "description": "Gets the given ApplicationDefinition", - "produces": [ - "application/json" - ], - "tags": [ - "applications" - ], - "operationId": "getApplicationDefinition", - "parameters": [ - { - "type": "string", - "x-go-name": "AppDefName", - "name": "appdef_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ApplicationDefinition", - "schema": { - "$ref": "#/definitions/ApplicationDefinition" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "put": { - "description": "Updates the given ApplicationDefinition", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "applications" - ], - "operationId": "updateApplicationDefinition", - "parameters": [ - { - "type": "string", - "x-go-name": "AppDefName", - "name": "appdef_name", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/ApplicationDefinitionBody" - } - } - ], - "responses": { - "200": { - "description": "ApplicationDefinition", - "schema": { - "$ref": "#/definitions/ApplicationDefinition" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Deletes the given ApplicationDefinition", - "produces": [ - "application/json" - ], - "tags": [ - "applications" - ], - "operationId": "deleteApplicationDefinition", - "parameters": [ - { - "type": "string", - "x-go-name": "AppDefName", - "name": "appdef_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "description": "Patch the given ApplicationDefinition", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "applications" - ], - "operationId": "patchApplicationDefinition", - "parameters": [ - { - "type": "string", - "x-go-name": "AppDefName", - "name": "appdef_name", - "in": "path", - "required": true - }, - { - "name": "Patch", - "in": "body", - "schema": { - "type": "object" - } - } - ], - "responses": { - "200": { - "description": "ApplicationDefinition", - "schema": { - "$ref": "#/definitions/ApplicationDefinition" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/applicationsettings": { - "get": { - "description": "Get application settings", - "produces": [ - "application/json" - ], - "tags": [ - "applications" - ], - "operationId": "getApplicationSettings", - "responses": { - "200": { - "description": "ApplicationSettings", - "schema": { - "$ref": "#/definitions/ApplicationSettings" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/cni/{cni_plugin_type}/versions": { - "get": { - "description": "Lists all CNI Plugin versions that are supported for a given CNI plugin type", - "produces": [ - "application/json" - ], - "tags": [ - "cniversion" - ], - "operationId": "listVersionsByCNIPlugin", - "parameters": [ - { - "type": "string", - "x-go-name": "CNIPluginType", - "name": "cni_plugin_type", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "CNIVersions", - "schema": { - "$ref": "#/definitions/CNIVersions" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/constraints": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "constraint" - ], - "summary": "List default constraint.", - "operationId": "listDefaultConstraint", - "responses": { - "200": { - "description": "Constraint", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Constraint" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates default constraint", - "produces": [ - "application/json" - ], - "tags": [ - "constraint" - ], - "operationId": "createDefaultConstraint", - "parameters": [ - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/constraintBody" - } - } - ], - "responses": { - "200": { - "description": "Constraint", - "schema": { - "$ref": "#/definitions/Constraint" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/constraints/{constraint_name}": { - "get": { - "description": "Gets an specified default constraint", - "produces": [ - "application/json" - ], - "tags": [ - "constraint" - ], - "operationId": "getDefaultConstraint", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "constraint_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Constraint", - "schema": { - "$ref": "#/definitions/Constraint" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "constraints" - ], - "summary": "Deletes a specified default constraint.", - "operationId": "deleteDefaultConstraint", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "constraint_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "description": "Patch a specified default constraint", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "constraint" - ], - "operationId": "patchDefaultConstraint", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "constraint_name", - "in": "path", - "required": true - }, - { - "name": "Patch", - "in": "body", - "schema": { - "type": "object" - } - } - ], - "responses": { - "200": { - "description": "Constraint", - "schema": { - "$ref": "#/definitions/Constraint" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/constrainttemplates": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "constrainttemplates" - ], - "summary": "List constraint templates.", - "operationId": "listConstraintTemplates", - "responses": { - "200": { - "description": "ConstraintTemplate", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/ConstraintTemplate" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Create constraint template", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "constrainttemplates" - ], - "operationId": "createConstraintTemplate", - "parameters": [ - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/ctBody" - } - } - ], - "responses": { - "200": { - "description": "ConstraintTemplate", - "schema": { - "$ref": "#/definitions/ConstraintTemplate" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/constrainttemplates/{ct_name}": { - "get": { - "description": "Get constraint templates specified by name", - "produces": [ - "application/json" - ], - "tags": [ - "constrainttemplates" - ], - "operationId": "getConstraintTemplate", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "ct_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ConstraintTemplate", - "schema": { - "$ref": "#/definitions/ConstraintTemplate" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Deletes the specified cluster", - "produces": [ - "application/json" - ], - "tags": [ - "constrainttemplates" - ], - "operationId": "deleteConstraintTemplate", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "ct_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "description": "Patch a specified constraint template", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "constrainttemplates" - ], - "operationId": "patchConstraintTemplate", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "ct_name", - "in": "path", - "required": true - }, - { - "name": "Patch", - "in": "body", - "schema": { - "type": "object" - } - } - ], - "responses": { - "200": { - "description": "ConstraintTemplate", - "schema": { - "$ref": "#/definitions/ConstraintTemplate" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/eks/amitypes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "Gets the EKS AMI types for node group.", - "operationId": "listEKSAMITypes", - "responses": { - "200": { - "description": "EKSAMITypeList", - "schema": { - "$ref": "#/definitions/EKSAMITypeList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/eks/capacitytypes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "Gets the EKS Capacity types for node group.", - "operationId": "listEKSCapacityTypes", - "responses": { - "200": { - "description": "EKSCapacityTypeList", - "schema": { - "$ref": "#/definitions/EKSCapacityTypeList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/featuregates": { - "get": { - "description": "Status of feature gates", - "produces": [ - "application/json" - ], - "tags": [ - "get", - "status", - "of", - "feature" - ], - "operationId": "gates", - "responses": { - "200": { - "description": "FeatureGates", - "schema": { - "$ref": "#/definitions/FeatureGates" - } - }, - "401": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - }, - "403": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/kubeconfig/secret": { - "get": { - "description": "Starts OIDC flow and generates kubeconfig, the generated config\ncontains OIDC provider authentication info. The kubeconfig is stored in the secret.", - "produces": [ - "application/json" - ], - "operationId": "createOIDCKubeconfigSecret", - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "201": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/policytemplates": { - "get": { - "description": "List all policy templates, If query parameter `project_id` is set then the endpoint will return only the policy templates that are associated with the project. Only available in Kubermatic Enterprise Edition", - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "operationId": "listPolicyTemplate", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "query" - } - ], - "responses": { - "200": { - "description": "PolicyTemplate", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/PolicyTemplate" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Create policy template. Only available in Kubermatic Enterprise Edition", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "operationId": "createPolicyTemplate", - "parameters": [ - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/PolicyTemplate" - } - } - ], - "responses": { - "200": { - "description": "PolicyTemplate", - "schema": { - "$ref": "#/definitions/PolicyTemplate" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/policytemplates/{template_name}": { - "get": { - "description": "Get policy template, If the query parameter project_id is set, the endpoint will return the policy template if it is associated with the specified project_id. Only available in Kubermatic Enterprise Edition", - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "operationId": "getPolicyTemplate", - "parameters": [ - { - "type": "string", - "x-go-name": "PolicyTemplateName", - "name": "template_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "query" - } - ], - "responses": { - "200": { - "description": "PolicyTemplate", - "schema": { - "$ref": "#/definitions/PolicyTemplate" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Delete policy template, If the query parameter project_id is set, the endpoint will delete the policy template if it is associated with the specified project_id. Only available in Kubermatic Enterprise Edition", - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "operationId": "deletePolicyTemplate", - "parameters": [ - { - "type": "string", - "x-go-name": "PolicyTemplateName", - "name": "template_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "query" - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "description": "Patch policy template. Only available in Kubermatic Enterprise Edition", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "admin" - ], - "operationId": "patchpolicyTemplate", - "parameters": [ - { - "type": "string", - "x-go-name": "PolicyTemplateName", - "name": "template_name", - "in": "path", - "required": true - }, - { - "x-go-name": "Spec", - "name": "spec", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/PolicyTemplateSpec" - } - } - ], - "responses": { - "200": { - "description": "PolicyTemplate", - "schema": { - "$ref": "#/definitions/PolicyTemplate" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/presets": { - "get": { - "description": "Lists presets", - "produces": [ - "application/json" - ], - "tags": [ - "preset" - ], - "operationId": "listPresets", - "parameters": [ - { - "type": "boolean", - "x-go-name": "Disabled", - "name": "disabled", - "in": "query" - }, - { - "type": "string", - "x-go-name": "Name", - "name": "name", - "in": "query" - } - ], - "responses": { - "200": { - "description": "PresetList", - "schema": { - "$ref": "#/definitions/PresetList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/presets/{preset_name}": { - "delete": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "preset" - ], - "summary": "Removes preset.", - "operationId": "deletePreset", - "parameters": [ - { - "type": "string", - "x-go-name": "PresetName", - "name": "preset_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "404": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/presets/{preset_name}/linkages": { - "get": { - "description": "Gets preset linkages information for UI display", - "produces": [ - "application/json" - ], - "tags": [ - "preset" - ], - "operationId": "getPresetLinkages", - "parameters": [ - { - "type": "string", - "x-go-name": "PresetName", - "name": "preset_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "PresetLinkages", - "schema": { - "$ref": "#/definitions/PresetLinkages" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "404": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/presets/{preset_name}/provider/{provider_name}": { - "delete": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "preset" - ], - "summary": "Removes selected preset's provider.", - "operationId": "deletePresetProvider", - "parameters": [ - { - "type": "string", - "x-go-name": "PresetName", - "name": "preset_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ProviderName", - "name": "provider_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "404": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/presets/{preset_name}/stats": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "preset" - ], - "summary": "Gets presets stats.", - "operationId": "getPresetStats", - "parameters": [ - { - "type": "string", - "x-go-name": "PresetName", - "name": "preset_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "PresetStats", - "schema": { - "$ref": "#/definitions/PresetStats" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "404": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/presets/{preset_name}/status": { - "put": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "preset" - ], - "summary": "Updates the status of a preset. It can enable or disable it, so that it won't be listed by the list endpoints.", - "operationId": "updatePresetStatus", - "parameters": [ - { - "type": "string", - "x-go-name": "PresetName", - "name": "preset_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Provider", - "name": "provider", - "in": "query" - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean", - "x-go-name": "Enabled" - } - } - } - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusterbackupstoragelocation": { - "get": { - "description": "List cluster backup storage location for a given project", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listClusterBackupStorageLocation", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterBackupStorageLocation", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/ClusterBackupStorageLocation" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates a cluster backup storage location that will belong to the given project", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "createClusterBackupStorageLocation", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/CbslBody" - } - } - ], - "responses": { - "201": { - "description": "ClusterBackupStorageLocation", - "schema": { - "$ref": "#/definitions/ClusterBackupStorageLocation" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusterbackupstoragelocation/{cbsl_name}": { - "get": { - "description": "Gets a cluster backup storage location for a given project based on its name", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "getClusterBackupStorageLocation", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterBackupStorageLocationName", - "name": "cbsl_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterBackupStorageLocation", - "schema": { - "$ref": "#/definitions/ClusterBackupStorageLocation" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Deletes a cluster backup storage location for a given project based on its name", - "tags": [ - "project" - ], - "operationId": "deleteClusterBackupStorageLocation", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterBackupStorageLocationName", - "name": "cbsl_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "description": "Patches a cluster backup storage location for a given project based on its name", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "patchClusterBackupStorageLocation", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterBackupStorageLocationName", - "name": "cbsl_name", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/CbslBody" - } - } - ], - "responses": { - "200": { - "description": "ClusterBackupStorageLocation", - "schema": { - "$ref": "#/definitions/ClusterBackupStorageLocation" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusterbackupstoragelocation/{cbsl_name}/bucketobjects": { - "get": { - "description": "List objects from bucket of a cluster backup storage location for a given project based on its name", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listClusterBackupStorageLocationBucketObjects", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterBackupStorageLocationName", - "name": "cbsl_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "BackupStorageLocationBucketObjectList", - "schema": { - "$ref": "#/definitions/BackupStorageLocationBucketObjectList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusterbackupstoragelocation/{cbsl_name}/credentials": { - "get": { - "description": "Get credentials of a cluster backup storage location for a given project based on its name", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "getClusterBackupStorageLocationCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterBackupStorageLocationName", - "name": "cbsl_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "S3BackupCredentials", - "schema": { - "$ref": "#/definitions/S3BackupCredentials" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Lists clusters for the specified project. If query parameter `show_dm_count` is set to `true` then the endpoint will also return the number of machine deployments of each cluster.", - "operationId": "listClustersV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "boolean", - "x-go-name": "ShowDeploymentMachineCount", - "name": "show_dm_count", - "in": "query" - } - ], - "responses": { - "200": { - "description": "ProjectClusterList", - "schema": { - "$ref": "#/definitions/ProjectClusterList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Creates a cluster for the given project.", - "operationId": "createClusterV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/CreateClusterSpec" - } - } - ], - "responses": { - "201": { - "description": "Cluster", - "schema": { - "$ref": "#/definitions/Cluster" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}": { - "get": { - "description": "Gets the cluster with the given name", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "getClusterV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Cluster", - "schema": { - "$ref": "#/definitions/Cluster" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Deletes the specified cluster", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "deleteClusterV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "boolean", - "name": "DeleteVolumes", - "in": "header" - }, - { - "type": "boolean", - "name": "DeleteLoadBalancers", - "in": "header" - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Patches the given cluster using JSON Merge Patch method (https://tools.ietf.org/html/rfc7396).", - "operationId": "patchClusterV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Patch", - "in": "body", - "schema": { - "type": "object" - } - }, - { - "type": "boolean", - "x-go-name": "SkipKubeletVersionValidation", - "name": "skip_kubelet_version_validation", - "in": "query" - } - ], - "responses": { - "200": { - "description": "Cluster", - "schema": { - "$ref": "#/definitions/Cluster" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/addons": { - "get": { - "description": "Lists addons that belong to the given cluster", - "produces": [ - "application/json" - ], - "tags": [ - "addon" - ], - "operationId": "listAddonsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Addon", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Addon" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates an addon that will belong to the given cluster", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "addon" - ], - "operationId": "createAddonV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/Addon" - } - } - ], - "responses": { - "201": { - "description": "Addon", - "schema": { - "$ref": "#/definitions/Addon" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/addons/{addon_id}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "addon" - ], - "summary": "Gets an addon that is assigned to the given cluster.", - "operationId": "getAddonV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "AddonID", - "name": "addon_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Addon", - "schema": { - "$ref": "#/definitions/Addon" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "addon" - ], - "summary": "Deletes the given addon that belongs to the cluster.", - "operationId": "deleteAddonV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "AddonID", - "name": "addon_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "addon" - ], - "summary": "Patches an addon that is assigned to the given cluster.", - "operationId": "patchAddonV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "AddonID", - "name": "addon_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/Addon" - } - } - ], - "responses": { - "200": { - "description": "Addon", - "schema": { - "$ref": "#/definitions/Addon" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/alertmanager/config": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets the alertmanager configuration for the specified cluster.", - "operationId": "getAlertmanager", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Alertmanager", - "schema": { - "$ref": "#/definitions/Alertmanager" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "put": { - "description": "Updates an alertmanager configuration for the given cluster", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "updateAlertmanager", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/Alertmanager" - } - } - ], - "responses": { - "200": { - "description": "Alertmanager", - "schema": { - "$ref": "#/definitions/Alertmanager" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Resets the alertmanager configuration to default for the specified cluster.", - "operationId": "resetAlertmanager", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/applicationinstallations": { - "get": { - "description": "List ApplicationInstallations which belong to the given cluster", - "produces": [ - "application/json" - ], - "tags": [ - "applications" - ], - "operationId": "listApplicationInstallations", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ApplicationInstallationListItem", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/ApplicationInstallationListItem" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates ApplicationInstallation into the given cluster", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "applications" - ], - "operationId": "createApplicationInstallation", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/ApplicationInstallationBody" - } - } - ], - "responses": { - "201": { - "description": "ApplicationInstallation", - "schema": { - "$ref": "#/definitions/ApplicationInstallation" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/applicationinstallations/{namespace}/{appinstall_name}": { - "get": { - "description": "Gets the given ApplicationInstallation", - "produces": [ - "application/json" - ], - "tags": [ - "applications" - ], - "operationId": "getApplicationInstallation", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Namespace", - "name": "namespace", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ApplicationInstallationName", - "name": "appinstall_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ApplicationInstallation", - "schema": { - "$ref": "#/definitions/ApplicationInstallation" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "put": { - "description": "Updates the given ApplicationInstallation", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "applications" - ], - "operationId": "updateApplicationInstallation", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Namespace", - "name": "namespace", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ApplicationInstallationName", - "name": "appinstall_name", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/ApplicationInstallationBody" - } - } - ], - "responses": { - "200": { - "description": "ApplicationInstallation", - "schema": { - "$ref": "#/definitions/ApplicationInstallation" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Deletes the given ApplicationInstallation", - "produces": [ - "application/json" - ], - "tags": [ - "applications" - ], - "operationId": "deleteApplicationInstallation", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Namespace", - "name": "namespace", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ApplicationInstallationName", - "name": "appinstall_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/backupdestinations": { - "get": { - "description": "Gets possible backup destination names for a cluster", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "getBackupDestinationNames", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "BackupDestinationNames", - "schema": { - "$ref": "#/definitions/BackupDestinationNames" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/backupstoragelocation": { - "get": { - "description": "List all the backup storage location objects present in the cluster. Only available in Kubermatic Enterprise Edition", - "produces": [ - "application/json" - ], - "tags": [ - "backupstoragelocation" - ], - "operationId": "listBackupStorageLocation", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "BackupStorageLocationList", - "schema": { - "$ref": "#/definitions/BackupStorageLocationList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates a backup storage location which would be used for restoring backup of different clusters. Only available in Kubermatic Enterprise Edition", - "produces": [ - "application/json" - ], - "tags": [ - "backupstoragelocation" - ], - "operationId": "createBackupStorageLocation", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/BSLBody" - } - } - ], - "responses": { - "200": { - "description": "BackupStorageLocation", - "schema": { - "$ref": "#/definitions/BackupStorageLocation" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/backupstoragelocation/{bsl_name}": { - "get": { - "description": "Get a backup storage location object present in the cluster specified by bsl_name. Only available in Kubermatic Enterprise Edition", - "produces": [ - "application/json" - ], - "tags": [ - "backupstoragelocation" - ], - "operationId": "getBackupStorageLocation", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "BSLName", - "name": "bsl_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "BackupStorageLocation", - "schema": { - "$ref": "#/definitions/BackupStorageLocation" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Delete a backup storage location object present in the cluster specified by bsl_name. Only available in Kubermatic Enterprise Edition", - "produces": [ - "application/json" - ], - "tags": [ - "backupstoragelocation" - ], - "operationId": "deleteBackupStorageLocation", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "BSLName", - "name": "bsl_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/bindings": { - "get": { - "description": "List role binding", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listRoleBindingV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "RoleBinding", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/RoleBinding" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/clusterbackup/{cluster_backup}/downloadurl": { - "post": { - "description": "Creates and get download url for a backup that belong to the given cluster", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "postBackupDownloadUrl", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterBackup", - "name": "cluster_backup", - "in": "path", - "required": true - } - ], - "responses": { - "201": { - "description": "BackupDownloadUrl", - "schema": { - "$ref": "#/definitions/BackupDownloadUrl" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/clusterbindings": { - "get": { - "description": "List cluster role binding", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listClusterRoleBindingV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterRoleBinding", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/ClusterRoleBinding" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/clusterrolenames": { - "get": { - "description": "Lists all ClusterRoles", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listClusterRoleNamesV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterRoleName", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/ClusterRoleName" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/clusterroles": { - "get": { - "description": "Lists all ClusterRoles", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listClusterRoleV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterRole", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/ClusterRole" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/clusterroles/{role_id}/clusterbindings": { - "post": { - "description": "Binds user to cluster role", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "bindUserToClusterRoleV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RoleID", - "name": "role_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/ClusterRoleUser" - } - } - ], - "responses": { - "200": { - "description": "ClusterRoleBinding", - "schema": { - "$ref": "#/definitions/ClusterRoleBinding" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Unbinds user from cluster role binding", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "unbindUserFromClusterRoleBindingV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RoleID", - "name": "role_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/ClusterRoleUser" - } - } - ], - "responses": { - "200": { - "description": "ClusterRoleBinding", - "schema": { - "$ref": "#/definitions/ClusterRoleBinding" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/cniversions": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Lists CNI plugin versions for a given cluster.", - "operationId": "listCNIPluginVersionsForCluster", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "CNIVersions", - "schema": { - "$ref": "#/definitions/CNIVersions" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/constraints": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Lists constraints for the specified cluster.", - "operationId": "listConstraints", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Constraint", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Constraint" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Creates a given constraint for the specified cluster.", - "operationId": "createConstraint", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/constraintBody" - } - } - ], - "responses": { - "200": { - "description": "Constraint", - "schema": { - "$ref": "#/definitions/Constraint" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/constraints/{constraint_name}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets an specified constraint for the given cluster.", - "operationId": "getConstraint", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Name", - "name": "constraint_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Constraint", - "schema": { - "$ref": "#/definitions/Constraint" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Deletes a specified constraint for the given cluster.", - "operationId": "deleteConstraint", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Name", - "name": "constraint_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Patches a given constraint for the specified cluster.", - "operationId": "patchConstraint", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Name", - "name": "constraint_name", - "in": "path", - "required": true - }, - { - "name": "Patch", - "in": "body", - "schema": { - "type": "object" - } - } - ], - "responses": { - "200": { - "description": "Constraint", - "schema": { - "$ref": "#/definitions/Constraint" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/etcdbackupconfigs": { - "get": { - "description": "List etcd backup configs for a given cluster", - "produces": [ - "application/json" - ], - "tags": [ - "etcdbackupconfig" - ], - "operationId": "listEtcdBackupConfig", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "EtcdBackupConfig", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/EtcdBackupConfig" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates a etcd backup config that will belong to the given cluster", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "etcdbackupconfig" - ], - "operationId": "createEtcdBackupConfig", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/ebcBody" - } - } - ], - "responses": { - "201": { - "description": "EtcdBackupConfig", - "schema": { - "$ref": "#/definitions/EtcdBackupConfig" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/etcdbackupconfigs/{ebc_id}": { - "get": { - "description": "Gets a etcd backup config for a given cluster based on its id", - "produces": [ - "application/json" - ], - "tags": [ - "etcdbackupconfig" - ], - "operationId": "getEtcdBackupConfig", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "EtcdBackupConfigID", - "name": "ebc_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "EtcdBackupConfig", - "schema": { - "$ref": "#/definitions/EtcdBackupConfig" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Deletes a etcd backup config for a given cluster based on its id", - "tags": [ - "etcdbackupconfig" - ], - "operationId": "deleteEtcdBackupConfig", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "EtcdBackupConfigID", - "name": "ebc_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "description": "Patches a etcd backup config for a given cluster based on its id", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "etcdbackupconfig" - ], - "operationId": "patchEtcdBackupConfig", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "EtcdBackupConfigID", - "name": "ebc_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/EtcdBackupConfigSpec" - } - } - ], - "responses": { - "200": { - "description": "EtcdBackupConfig", - "schema": { - "$ref": "#/definitions/EtcdBackupConfig" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/etcdrestores": { - "get": { - "description": "List etcd backup restores for a given cluster", - "produces": [ - "application/json" - ], - "tags": [ - "etcdrestore" - ], - "operationId": "listEtcdRestore", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "EtcdRestore", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/EtcdRestore" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates a etcd backup restore for a given cluster", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "etcdrestore" - ], - "operationId": "createEtcdRestore", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/erBody" - } - } - ], - "responses": { - "201": { - "description": "EtcdBackupConfig", - "schema": { - "$ref": "#/definitions/EtcdBackupConfig" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/etcdrestores/{er_name}": { - "get": { - "description": "Gets a etcd backup restore for a given cluster based on its name", - "produces": [ - "application/json" - ], - "tags": [ - "etcdrestore" - ], - "operationId": "getEtcdRestore", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "EtcdRestoreName", - "name": "er_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "EtcdRestore", - "schema": { - "$ref": "#/definitions/EtcdRestore" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Deletes a etcd restore config for a given cluster based on its name", - "tags": [ - "etcdrestore" - ], - "operationId": "deleteEtcdRestore", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "EtcdRestoreName", - "name": "er_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "409": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/events": { - "get": { - "produces": [ - "application/yaml" - ], - "tags": [ - "project" - ], - "summary": "Gets the events related to the specified cluster.", - "operationId": "getClusterEventsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Type", - "name": "type", - "in": "query" - } - ], - "responses": { - "200": { - "description": "Event", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Event" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/externalccmmigration": { - "post": { - "description": "Enable the migration to the external CCM for the given cluster", - "produces": [ - "application/json" - ], - "operationId": "migrateClusterToExternalCCM", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/gatekeeper/config": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets the gatekeeper sync config for the specified cluster.", - "operationId": "getGatekeeperConfig", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "GatekeeperConfig", - "schema": { - "$ref": "#/definitions/GatekeeperConfig" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates a gatekeeper config for the given cluster", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "createGatekeeperConfig", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/GatekeeperConfig" - } - } - ], - "responses": { - "201": { - "description": "GatekeeperConfig", - "schema": { - "$ref": "#/definitions/GatekeeperConfig" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Deletes the gatekeeper sync config for the specified cluster.", - "operationId": "deleteGatekeeperConfig", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Patches the gatekeeper config for the specified cluster.", - "operationId": "patchGatekeeperConfig", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Patch", - "in": "body", - "schema": { - "type": "object" - } - } - ], - "responses": { - "200": { - "description": "GatekeeperConfig", - "schema": { - "$ref": "#/definitions/GatekeeperConfig" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/health": { - "get": { - "description": "Returns the cluster's component health status", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "getClusterHealthV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterHealth", - "schema": { - "$ref": "#/definitions/ClusterHealth" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/installableaddons": { - "get": { - "description": "Lists names of addons that can be installed inside the user cluster", - "produces": [ - "application/json" - ], - "tags": [ - "addon" - ], - "operationId": "listInstallableAddonsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "AccessibleAddons", - "schema": { - "$ref": "#/definitions/AccessibleAddons" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/kubeconfig": { - "get": { - "produces": [ - "application/octet-stream" - ], - "tags": [ - "project" - ], - "summary": "Gets the kubeconfig for the specified cluster.", - "operationId": "getClusterKubeconfigV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/Kubeconfig" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/machinedeployments": { - "get": { - "description": "Lists machine deployments that belong to the given cluster", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listMachineDeployments", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "NodeDeployment", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/NodeDeployment" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates a machine deployment that will belong to the given cluster", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "createMachineDeployment", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/NodeDeployment" - } - } - ], - "responses": { - "201": { - "description": "NodeDeployment", - "schema": { - "$ref": "#/definitions/NodeDeployment" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/machinedeployments/nodes/{node_id}": { - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Deletes the given node that belongs to the machine deployment.", - "operationId": "deleteMachineDeploymentNode", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "NodeID", - "name": "node_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/machinedeployments/{machinedeployment_id}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets a machine deployment that is assigned to the given cluster.", - "operationId": "getMachineDeployment", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "MachineDeploymentID", - "name": "machinedeployment_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "NodeDeployment", - "schema": { - "$ref": "#/definitions/NodeDeployment" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Schedules rolling restart of a machine deployment that is assigned to the given cluster.", - "operationId": "restartMachineDeployment", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "MachineDeploymentID", - "name": "machinedeployment_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "NodeDeployment", - "schema": { - "$ref": "#/definitions/NodeDeployment" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Deletes the given machine deployment that belongs to the cluster.", - "operationId": "deleteMachineDeployment", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "MachineDeploymentID", - "name": "machinedeployment_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "description": "Patches a machine deployment that is assigned to the given cluster. Please note that at the moment only\nnode deployment's spec can be updated by a patch, no other fields can be changed using this endpoint.", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "patchMachineDeployment", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "MachineDeploymentID", - "name": "machinedeployment_id", - "in": "path", - "required": true - }, - { - "name": "Patch", - "in": "body", - "schema": { - "type": "object" - } - } - ], - "responses": { - "200": { - "description": "NodeDeployment", - "schema": { - "$ref": "#/definitions/NodeDeployment" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/machinedeployments/{machinedeployment_id}/joiningscript": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets a machine deployment joining script for the edge provider.", - "operationId": "getMachineDeploymentJoinScript", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "MachineDeploymentID", - "name": "machinedeployment_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "JoiningScript", - "schema": { - "$ref": "#/definitions/JoiningScript" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/machinedeployments/{machinedeployment_id}/nodes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Lists nodes that belong to the given machine deployment.", - "operationId": "listMachineDeploymentNodes", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "MachineDeploymentID", - "name": "machinedeployment_id", - "in": "path", - "required": true - }, - { - "type": "boolean", - "x-go-name": "HideInitialConditions", - "name": "hideInitialConditions", - "in": "query" - } - ], - "responses": { - "200": { - "description": "Node", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Node" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/machinedeployments/{machinedeployment_id}/nodes/events": { - "get": { - "description": "If the value is 'normal' then normal events are returned. If the query parameter is missing method returns all events.", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Lists machine deployment events. If query parameter `type` is set to `warning` then only warning events are retrieved.", - "operationId": "listMachineDeploymentNodesEvents", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "MachineDeploymentID", - "name": "machinedeployment_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Type", - "name": "type", - "in": "query" - } - ], - "responses": { - "200": { - "description": "Event", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Event" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/machinedeployments/{machinedeployment_id}/nodes/metrics": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "metric" - ], - "summary": "Lists metrics that belong to the given machine deployment.", - "operationId": "listMachineDeploymentMetrics", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "MachineDeploymentID", - "name": "machinedeployment_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "NodeMetric", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/NodeMetric" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/metrics": { - "get": { - "description": "Gets cluster metrics", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "getClusterMetricsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterMetrics", - "schema": { - "$ref": "#/definitions/ClusterMetrics" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/mlaadminsetting": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "mlaadminsetting" - ], - "summary": "Gets MLA Admin settings for the given cluster.", - "operationId": "getMLAAdminSetting", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "MLAAdminSetting", - "schema": { - "$ref": "#/definitions/MLAAdminSetting" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "put": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "mlaadminsetting" - ], - "summary": "Updates the MLA admin setting for the given cluster.", - "operationId": "updateMLAAdminSetting", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/MLAAdminSetting" - } - } - ], - "responses": { - "200": { - "description": "MLAAdminSetting", - "schema": { - "$ref": "#/definitions/MLAAdminSetting" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates MLA admin setting that will belong to the given cluster", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "mlaadminsetting" - ], - "operationId": "createMLAAdminSetting", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/MLAAdminSetting" - } - } - ], - "responses": { - "201": { - "description": "MLAAdminSetting", - "schema": { - "$ref": "#/definitions/MLAAdminSetting" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "mlaadminsetting" - ], - "summary": "Deletes the MLA admin setting that belongs to the cluster.", - "operationId": "deleteMLAAdminSetting", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/namespaces": { - "get": { - "description": "Lists all namespaces in the cluster", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listNamespaceV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Namespace", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Namespace" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/nodes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "This endpoint is used for kubeadm cluster.", - "operationId": "listNodesForCluster", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "boolean", - "x-go-name": "HideInitialConditions", - "name": "hideInitialConditions", - "in": "query" - } - ], - "responses": { - "200": { - "description": "Node", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Node" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/nodes/upgrades": { - "put": { - "description": "Upgrades node deployments in a cluster", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "upgradeClusterNodeDeploymentsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/MasterVersion" - } - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/oidc": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets the OIDC params for the specified cluster with OIDC authentication.", - "operationId": "getClusterOidc", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OIDCSpec", - "schema": { - "$ref": "#/definitions/OIDCSpec" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/oidckubeconfig": { - "get": { - "produces": [ - "application/octet-stream" - ], - "tags": [ - "project" - ], - "summary": "Gets the kubeconfig for the specified cluster with oidc authentication.", - "operationId": "getOidcClusterKubeconfigV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/Kubeconfig" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/operatingsystemprofiles": { - "get": { - "description": "Lists all available Operating System Profiles for a cluster", - "produces": [ - "application/json" - ], - "tags": [ - "operatingsystemprofile" - ], - "operationId": "listOperatingSystemProfilesForCluster", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OperatingSystemProfile", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OperatingSystemProfile" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/policybindings": { - "get": { - "description": "List all policy bindings, Only available in Kubermatic Enterprise Edition", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listPolicyBinding", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "PolicyBinding", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/PolicyBinding" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Create policy binding, Only available in Kubermatic Enterprise Edition", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "createPolicyBinding", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/createPolicyBindingBody" - } - } - ], - "responses": { - "200": { - "description": "PolicyBinding", - "schema": { - "$ref": "#/definitions/PolicyBinding" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/policybindings/{binding_name}": { - "get": { - "description": "Get policy binding, Only available in Kubermatic Enterprise Edition", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "getPolicyBinding", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "PolicyBindingName", - "name": "binding_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "PolicyBinding", - "schema": { - "$ref": "#/definitions/PolicyBinding" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Delete policy binding, Only available in Kubermatic Enterprise Edition", - "tags": [ - "project" - ], - "operationId": "deletePolicyBinding", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "PolicyBindingName", - "name": "binding_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "description": "Patch policy binding. Only available in Kubermatic Enterprise Edition", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "patchPolicyBinding", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "PolicyBindingName", - "name": "binding_name", - "in": "path", - "required": true - }, - { - "x-go-name": "Body", - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/patchPolicyBindingBody" - } - } - ], - "responses": { - "200": { - "description": "PolicyBinding", - "schema": { - "$ref": "#/definitions/PolicyBinding" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/alibaba/instancetypes": { - "get": { - "description": "Lists available Alibaba Instance Types", - "produces": [ - "application/json" - ], - "tags": [ - "alibaba" - ], - "operationId": "listAlibabaInstanceTypesNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Region", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AlibabaInstanceTypeList", - "schema": { - "$ref": "#/definitions/AlibabaInstanceTypeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/alibaba/vswitches": { - "get": { - "description": "Lists available Alibaba vSwitches", - "produces": [ - "application/json" - ], - "tags": [ - "alibaba" - ], - "operationId": "listAlibabaVSwitchesNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Region", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AlibabaVSwitchList", - "schema": { - "$ref": "#/definitions/AlibabaVSwitchList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/alibaba/zones": { - "get": { - "description": "Lists available Alibaba Instance Types", - "produces": [ - "application/json" - ], - "tags": [ - "alibaba" - ], - "operationId": "listAlibabaZonesNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Region", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AlibabaZoneList", - "schema": { - "$ref": "#/definitions/AlibabaZoneList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/anexia/disk-types": { - "get": { - "description": "Lists disk-types from Anexia", - "produces": [ - "application/json" - ], - "tags": [ - "anexia" - ], - "operationId": "listAnexiaDiskTypesNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "AnexiaDiskTypeList", - "schema": { - "$ref": "#/definitions/AnexiaDiskTypeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/anexia/templates": { - "get": { - "description": "Lists templates from Anexia", - "produces": [ - "application/json" - ], - "tags": [ - "anexia" - ], - "operationId": "listAnexiaTemplatesNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "AnexiaTemplateList", - "schema": { - "$ref": "#/definitions/AnexiaTemplateList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/anexia/vlans": { - "get": { - "description": "Lists vlans from Anexia", - "produces": [ - "application/json" - ], - "tags": [ - "anexia" - ], - "operationId": "listAnexiaVlansNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "AnexiaVlanList", - "schema": { - "$ref": "#/definitions/AnexiaVlanList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/aws/sizes": { - "get": { - "description": "Lists available AWS sizes", - "produces": [ - "application/json" - ], - "tags": [ - "aws" - ], - "operationId": "listAWSSizesNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Architecture", - "description": "architecture query parameter. Supports: arm64 and x64 types.", - "name": "architecture", - "in": "query" - } - ], - "responses": { - "200": { - "description": "AWSSizeList", - "schema": { - "$ref": "#/definitions/AWSSizeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/aws/subnets": { - "get": { - "description": "Lists available AWS subnets", - "produces": [ - "application/json" - ], - "tags": [ - "aws" - ], - "operationId": "listAWSSubnetsNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "AWSSubnetList", - "schema": { - "$ref": "#/definitions/AWSSubnetList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/azure/availabilityzones": { - "get": { - "description": "Lists available VM availability zones in an Azure region", - "produces": [ - "application/json" - ], - "tags": [ - "azure" - ], - "operationId": "listAzureAvailabilityZonesNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "SKUName", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AzureAvailabilityZonesList", - "schema": { - "$ref": "#/definitions/AzureAvailabilityZonesList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/azure/sizes": { - "get": { - "description": "Lists available VM sizes in an Azure region", - "produces": [ - "application/json" - ], - "tags": [ - "azure" - ], - "operationId": "listAzureSizesNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "AzureSizeList", - "schema": { - "$ref": "#/definitions/AzureSizeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/digitalocean/sizes": { - "get": { - "description": "Lists sizes from digitalocean", - "produces": [ - "application/json" - ], - "tags": [ - "digitalocean" - ], - "operationId": "listDigitaloceanSizesNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "DigitaloceanSizeList", - "schema": { - "$ref": "#/definitions/DigitaloceanSizeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/equinixmetal/sizes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "equinixmetal" - ], - "summary": "Lists sizes from Equinix Metal.", - "operationId": "listEquinixMetalSizesNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "PacketSizeList", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/PacketSizeList" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/gcp/disktypes": { - "get": { - "description": "Lists disk types from GCP", - "produces": [ - "application/json" - ], - "tags": [ - "gcp" - ], - "operationId": "listGCPDiskTypesNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Zone", - "in": "header" - } - ], - "responses": { - "200": { - "description": "GCPDiskTypeList", - "schema": { - "$ref": "#/definitions/GCPDiskTypeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/gcp/networks": { - "get": { - "description": "Lists available GCP networks", - "produces": [ - "application/json" - ], - "tags": [ - "gcp" - ], - "operationId": "listGCPNetworksNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "GCPNetworkList", - "schema": { - "$ref": "#/definitions/GCPNetworkList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/gcp/sizes": { - "get": { - "description": "Lists machine sizes from GCP", - "produces": [ - "application/json" - ], - "tags": [ - "gcp" - ], - "operationId": "listGCPSizesNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Zone", - "in": "header" - } - ], - "responses": { - "200": { - "description": "GCPMachineSizeList", - "schema": { - "$ref": "#/definitions/GCPMachineSizeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/gcp/subnetworks": { - "get": { - "description": "Lists available GCP subnetworks", - "produces": [ - "application/json" - ], - "tags": [ - "gcp" - ], - "operationId": "listGCPSubnetworksNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Network", - "in": "header" - } - ], - "responses": { - "200": { - "description": "GCPSubnetworkList", - "schema": { - "$ref": "#/definitions/GCPSubnetworkList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/gcp/zones": { - "get": { - "description": "Lists available GCP zones", - "produces": [ - "application/json" - ], - "tags": [ - "gcp" - ], - "operationId": "listGCPZonesNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "GCPZoneList", - "schema": { - "$ref": "#/definitions/GCPZoneList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/hetzner/sizes": { - "get": { - "description": "Lists sizes from hetzner", - "produces": [ - "application/json" - ], - "tags": [ - "hetzner" - ], - "operationId": "listHetznerSizesNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "HetznerSizeList", - "schema": { - "$ref": "#/definitions/HetznerSizeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/kubevirt/instancetypes": { - "get": { - "description": "Lists available VirtualMachineInstancetype", - "produces": [ - "application/json" - ], - "tags": [ - "kubevirt" - ], - "operationId": "listKubeVirtInstancetypesNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "VirtualMachineInstancetypeList", - "schema": { - "$ref": "#/definitions/VirtualMachineInstancetypeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/kubevirt/preferences": { - "get": { - "description": "Lists available VirtualMachinePreference", - "produces": [ - "application/json" - ], - "tags": [ - "kubevirt" - ], - "operationId": "listKubeVirtPreferencesNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "VirtualMachinePreferenceList", - "schema": { - "$ref": "#/definitions/VirtualMachinePreferenceList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/kubevirt/storageclasses": { - "get": { - "description": "List Storage Classes", - "produces": [ - "application/json" - ], - "tags": [ - "kubevirt" - ], - "operationId": "listKubevirtStorageClassesNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "StorageClassList", - "schema": { - "$ref": "#/definitions/StorageClassList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/kubevirt/subnets": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "kubevirt" - ], - "summary": "List Subnets for a VPC associated with a cluster.", - "operationId": "listKubeVirtSubnetsNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "StorageClassName", - "name": "storageClassName", - "in": "query" - } - ], - "responses": { - "200": { - "description": "KubeVirtSubnetList", - "schema": { - "$ref": "#/definitions/KubeVirtSubnetList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/kubevirt/vpcs": { - "get": { - "description": "List VPCs for a cluster", - "produces": [ - "application/json" - ], - "tags": [ - "kubevirt" - ], - "operationId": "listKubeVirtVPCsNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "KubeVirtVPCList", - "schema": { - "$ref": "#/definitions/KubeVirtVPCList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/nutanix/categories": { - "get": { - "description": "Lists available Nutanix categories", - "produces": [ - "application/json" - ], - "tags": [ - "nutanix" - ], - "operationId": "listNutanixCategoriesNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "NutanixCategoryList", - "schema": { - "$ref": "#/definitions/NutanixCategoryList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/nutanix/categories/{category}/values": { - "get": { - "description": "Lists available Nutanix category values for a specific category", - "produces": [ - "application/json" - ], - "tags": [ - "nutanix" - ], - "operationId": "listNutanixCategoryValuesNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Category", - "description": "Category to query the available values for", - "name": "category", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "NutanixCategoryValueList", - "schema": { - "$ref": "#/definitions/NutanixCategoryValueList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/nutanix/subnets": { - "get": { - "description": "Lists available Nutanix Subnets", - "produces": [ - "application/json" - ], - "tags": [ - "nutanix" - ], - "operationId": "listNutanixSubnetsNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "NutanixSubnetList", - "schema": { - "$ref": "#/definitions/NutanixSubnetList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/openstack/availabilityzones": { - "get": { - "description": "Lists availability zones from openstack", - "produces": [ - "application/json" - ], - "tags": [ - "openstack" - ], - "operationId": "listOpenstackAvailabilityZonesNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OpenstackAvailabilityZone", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OpenstackAvailabilityZone" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/openstack/networks": { - "get": { - "description": "Lists networks from openstack", - "produces": [ - "application/json" - ], - "tags": [ - "openstack" - ], - "operationId": "listOpenstackNetworksNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OpenstackNetwork", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OpenstackNetwork" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/openstack/securitygroups": { - "get": { - "description": "Lists security groups from openstack", - "produces": [ - "application/json" - ], - "tags": [ - "openstack" - ], - "operationId": "listOpenstackSecurityGroupsNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OpenstackSecurityGroup", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OpenstackSecurityGroup" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/openstack/servergroups": { - "get": { - "description": "Lists server groups from openstack", - "produces": [ - "application/json" - ], - "tags": [ - "openstack" - ], - "operationId": "listOpenstackServerGroupsNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OpenstackServerGroup", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OpenstackServerGroup" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/openstack/sizes": { - "get": { - "description": "Lists sizes from openstack", - "produces": [ - "application/json" - ], - "tags": [ - "openstack" - ], - "operationId": "listOpenstackSizesNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OpenstackSize", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OpenstackSize" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/openstack/subnets": { - "get": { - "description": "Lists subnets from openstack", - "produces": [ - "application/json" - ], - "tags": [ - "openstack" - ], - "operationId": "listOpenstackSubnetsNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "NetworkID", - "name": "network_id", - "in": "query" - } - ], - "responses": { - "200": { - "description": "OpenstackSubnet", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OpenstackSubnet" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/openstack/tenants": { - "get": { - "description": "Lists tenants from openstack", - "produces": [ - "application/json" - ], - "tags": [ - "openstack" - ], - "operationId": "listOpenstackTenantsNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OpenstackTenant", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OpenstackTenant" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/packet/sizes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "packet" - ], - "summary": "Lists sizes from packet (use Equinix Metal API endpoints instead).", - "operationId": "listPacketSizesNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "PacketSizeList", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/PacketSizeList" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/vmwareclouddirector/catalogs": { - "get": { - "description": "List VMware Cloud Director Catalogs", - "produces": [ - "application/json" - ], - "tags": [ - "vmwareclouddirector" - ], - "operationId": "listVMwareCloudDirectorCatalogsNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "VMwareCloudDirectorCatalogList", - "schema": { - "$ref": "#/definitions/VMwareCloudDirectorCatalogList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/vmwareclouddirector/computepolicies": { - "get": { - "description": "List VMware Cloud Director Compute Policies", - "produces": [ - "application/json" - ], - "tags": [ - "vmwareclouddirector" - ], - "operationId": "listVMwareCloudDirectorComputePoliciesNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "VMwareCloudDirectorComputePolicyList", - "schema": { - "$ref": "#/definitions/VMwareCloudDirectorComputePolicyList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/vmwareclouddirector/networks": { - "get": { - "description": "List VMware Cloud Director OVDC Networks", - "produces": [ - "application/json" - ], - "tags": [ - "vmwareclouddirector" - ], - "operationId": "listVMwareCloudDirectorNetworksNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "VMwareCloudDirectorNetworkList", - "schema": { - "$ref": "#/definitions/VMwareCloudDirectorNetworkList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/vmwareclouddirector/storageprofiles": { - "get": { - "description": "List VMware Cloud Director Storage Profiles", - "produces": [ - "application/json" - ], - "tags": [ - "vmwareclouddirector" - ], - "operationId": "listVMwareCloudDirectorStorageProfilesNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "VMwareCloudDirectorStorageProfileList", - "schema": { - "$ref": "#/definitions/VMwareCloudDirectorStorageProfileList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/vmwareclouddirector/templates/{catalog_name}": { - "get": { - "description": "List VMware Cloud Director Templates", - "produces": [ - "application/json" - ], - "tags": [ - "vmwareclouddirector" - ], - "operationId": "listVMwareCloudDirectorTemplatesNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "CatalogName", - "description": "Catalog name to fetch the templates from", - "name": "catalog_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "VMwareCloudDirectorTemplateList", - "schema": { - "$ref": "#/definitions/VMwareCloudDirectorTemplateList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/vsphere/folders": { - "get": { - "description": "Lists folders from vsphere datacenter", - "produces": [ - "application/json" - ], - "tags": [ - "vsphere" - ], - "operationId": "listVSphereFoldersNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "VSphereFolder", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/VSphereFolder" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/vsphere/networks": { - "get": { - "description": "Lists networks from vsphere datacenter", - "produces": [ - "application/json" - ], - "tags": [ - "vsphere" - ], - "operationId": "listVSphereNetworksNoCredentialsV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "VSphereNetwork", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/VSphereNetwork" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/vsphere/tagcategories": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "vsphere" - ], - "summary": "Lists tag categories from vSphere datacenter.", - "operationId": "listVSphereTagCategoriesNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "VSphereTagCategory", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/VSphereTagCategory" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/vsphere/tagcategories/{tag_category}/tags": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "vsphere" - ], - "summary": "Lists tags for a tag category from vSphere datacenter.", - "operationId": "listVSphereTagsForTagCategoryNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "TagCategory", - "name": "tag_category", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "VSphereTag", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/VSphereTag" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/providers/vsphere/vmgroups": { - "get": { - "description": "Lists VM groups from vsphere datacenter", - "produces": [ - "application/json" - ], - "tags": [ - "vsphere" - ], - "operationId": "listVSphereVMGroupsNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "VSphereVMGroup", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/VSphereVMGroup" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/rolenames": { - "get": { - "description": "Lists all Role names with namespaces", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listRoleNamesV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "RoleName", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/RoleName" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/roles": { - "get": { - "description": "Lists all Roles", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listRoleV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Role", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Role" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/roles/{namespace}/{role_id}/bindings": { - "post": { - "description": "Binds user to the role", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "bindUserToRoleV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RoleID", - "name": "role_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Namespace", - "name": "namespace", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/RoleUser" - } - } - ], - "responses": { - "200": { - "description": "RoleBinding", - "schema": { - "$ref": "#/definitions/RoleBinding" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Unbinds user from the role binding", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "unbindUserFromRoleBindingV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RoleID", - "name": "role_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Namespace", - "name": "namespace", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/RoleUser" - } - } - ], - "responses": { - "200": { - "description": "RoleBinding", - "schema": { - "$ref": "#/definitions/RoleBinding" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/rulegroups": { - "get": { - "description": "Lists rule groups that belong to the given cluster", - "produces": [ - "application/json" - ], - "tags": [ - "rulegroup" - ], - "operationId": "listRuleGroups", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Type", - "name": "type", - "in": "query" - } - ], - "responses": { - "200": { - "description": "RuleGroup", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/RuleGroup" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates a rule group that will belong to the given cluster", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "rulegroup" - ], - "operationId": "createRuleGroup", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/RuleGroup" - } - } - ], - "responses": { - "201": { - "description": "RuleGroup", - "schema": { - "$ref": "#/definitions/RuleGroup" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/rulegroups/{rulegroup_id}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "rulegroup" - ], - "summary": "Gets a specified rule group for the given cluster.", - "operationId": "getRuleGroup", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RuleGroupID", - "name": "rulegroup_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "RuleGroup", - "schema": { - "$ref": "#/definitions/RuleGroup" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "put": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "rulegroup" - ], - "summary": "Updates the specified rule group for the given cluster.", - "operationId": "updateRuleGroup", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RuleGroupID", - "name": "rulegroup_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/RuleGroup" - } - } - ], - "responses": { - "200": { - "description": "RuleGroup", - "schema": { - "$ref": "#/definitions/RuleGroup" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "rulegroup" - ], - "summary": "Deletes the given rule group that belongs to the cluster.", - "operationId": "deleteRuleGroup", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RuleGroupID", - "name": "rulegroup_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/serviceaccount": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "List service accounts in cluster.", - "operationId": "listClusterServiceAccount", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterServiceAccount", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/ClusterServiceAccount" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Creates a service account in cluster.", - "operationId": "createClusterServiceAccount", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/ClusterServiceAccount" - } - } - ], - "responses": { - "201": { - "description": "ClusterServiceAccount", - "schema": { - "$ref": "#/definitions/ClusterServiceAccount" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/serviceaccount/{namespace}/{service_account_id}": { - "delete": { - "tags": [ - "project" - ], - "summary": "Deletes service account in cluster.", - "operationId": "deleteClusterServiceAccount", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Namespace", - "name": "namespace", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ServiceAccountID", - "name": "service_account_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/serviceaccount/{namespace}/{service_account_id}/kubeconfig": { - "get": { - "produces": [ - "application/octet-stream" - ], - "tags": [ - "project" - ], - "summary": "Gets the kubeconfig for the specified service account in cluster.", - "operationId": "getClusterServiceAccountKubeconfig", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Namespace", - "name": "namespace", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ServiceAccountID", - "name": "service_account_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/Kubeconfig" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/serviceaccount/{namespace}/{service_account_id}/permissions": { - "get": { - "description": "get Service Account permissions", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "getClusterServiceAccountPermissions", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Namespace", - "name": "namespace", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ServiceAccountID", - "name": "service_account_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Permission", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Permission" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/sshkeys": { - "get": { - "description": "Lists ssh keys that are assigned to the cluster\nThe returned collection is sorted by creation timestamp.", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listSSHKeysAssignedToClusterV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "SSHKey", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/SSHKey" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/sshkeys/{key_id}": { - "put": { - "description": "Assigns an existing ssh key to the given cluster", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "assignSSHKeyToClusterV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "KeyID", - "name": "key_id", - "in": "path", - "required": true - } - ], - "responses": { - "201": { - "description": "SSHKey", - "schema": { - "$ref": "#/definitions/SSHKey" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/token": { - "put": { - "description": "Revokes the current admin token", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "revokeClusterAdminTokenV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/upgrades": { - "get": { - "description": "Gets possible cluster upgrades", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "getClusterUpgradesV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "MasterVersion", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/MasterVersion" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clusters/{cluster_id}/viewertoken": { - "put": { - "description": "Revokes the current viewer token", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "revokeClusterViewerTokenV2", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clustertemplates": { - "get": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "List cluster templates for the given project.", - "operationId": "listClusterTemplates", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterTemplateList", - "schema": { - "$ref": "#/definitions/ClusterTemplateList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Creates a cluster templates for the given project.", - "operationId": "createClusterTemplate", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "type": "object", - "properties": { - "applications": { - "type": "array", - "items": { - "$ref": "#/definitions/Application" - }, - "x-go-name": "Applications" - }, - "cluster": { - "$ref": "#/definitions/Cluster" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "nodeDeployment": { - "$ref": "#/definitions/NodeDeployment" - }, - "scope": { - "type": "string", - "x-go-name": "Scope" - }, - "userSshKeys": { - "type": "array", - "items": { - "$ref": "#/definitions/ClusterTemplateSSHKey" - }, - "x-go-name": "UserSSHKeys" - } - } - } - } - ], - "responses": { - "201": { - "description": "ClusterTemplate", - "schema": { - "$ref": "#/definitions/ClusterTemplate" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clustertemplates/import": { - "post": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Import a cluster templates for the given project.", - "operationId": "importClusterTemplate", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "applications": { - "type": "array", - "items": { - "$ref": "#/definitions/Application" - }, - "x-go-name": "Applications" - }, - "cluster": { - "$ref": "#/definitions/ClusterTemplateInfo" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "nodeDeployment": { - "$ref": "#/definitions/ClusterTemplateNodeDeployment" - }, - "projectID": { - "type": "string", - "x-go-name": "ProjectID" - }, - "scope": { - "type": "string", - "x-go-name": "Scope" - }, - "user": { - "type": "string", - "x-go-name": "User" - }, - "userSshKeys": { - "type": "array", - "items": { - "$ref": "#/definitions/ClusterTemplateSSHKey" - }, - "x-go-name": "UserSSHKeys" - } - } - } - } - ], - "responses": { - "201": { - "description": "ClusterTemplate", - "schema": { - "$ref": "#/definitions/ClusterTemplate" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clustertemplates/{template_id}": { - "get": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Get cluster template.", - "operationId": "getClusterTemplate", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterTemplateID", - "name": "template_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterTemplate", - "schema": { - "$ref": "#/definitions/ClusterTemplate" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "put": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Update a specified cluster templates for the given project.", - "operationId": "updateClusterTemplate", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "type": "object", - "properties": { - "applications": { - "type": "array", - "items": { - "$ref": "#/definitions/Application" - }, - "x-go-name": "Applications" - }, - "cluster": { - "$ref": "#/definitions/Cluster" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "nodeDeployment": { - "$ref": "#/definitions/NodeDeployment" - }, - "scope": { - "type": "string", - "x-go-name": "Scope" - }, - "userSshKeys": { - "type": "array", - "items": { - "$ref": "#/definitions/ClusterTemplateSSHKey" - }, - "x-go-name": "UserSSHKeys" - } - } - } - }, - { - "type": "string", - "x-go-name": "ClusterTemplateID", - "name": "template_id", - "in": "path", - "required": true - } - ], - "responses": { - "201": { - "description": "ClusterTemplate", - "schema": { - "$ref": "#/definitions/ClusterTemplate" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Delete cluster template.", - "operationId": "deleteClusterTemplate", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterTemplateID", - "name": "template_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clustertemplates/{template_id}/export": { - "get": { - "produces": [ - "application/octet-stream" - ], - "tags": [ - "project" - ], - "summary": "Export cluster template to file.", - "operationId": "exportClusterTemplate", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterTemplateID", - "name": "template_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Format", - "name": "format", - "in": "query" - } - ], - "responses": { - "200": { - "description": "ClusterTemplate", - "schema": { - "$ref": "#/definitions/ClusterTemplate" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/clustertemplates/{template_id}/instances": { - "post": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Create cluster template instance.", - "operationId": "createClusterTemplateInstance", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterTemplateID", - "name": "template_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "type": "object", - "properties": { - "replicas": { - "type": "integer", - "format": "int64", - "x-go-name": "Replicas" - } - } - } - } - ], - "responses": { - "201": { - "description": "ClusterTemplateInstance", - "schema": { - "$ref": "#/definitions/ClusterTemplateInstance" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/etcdbackupconfigs": { - "get": { - "description": "List etcd backup configs for a given project", - "produces": [ - "application/json" - ], - "tags": [ - "etcdbackupconfig" - ], - "operationId": "listProjectEtcdBackupConfig", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Type", - "name": "type", - "in": "query" - } - ], - "responses": { - "200": { - "description": "EtcdBackupConfig", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/EtcdBackupConfig" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/etcdrestores": { - "get": { - "description": "List etcd backup restores for a given project", - "produces": [ - "application/json" - ], - "tags": [ - "etcdrestore" - ], - "operationId": "listProjectEtcdRestore", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "EtcdRestore", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/EtcdRestore" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/groupbindings": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Lists project's group bindings.", - "operationId": "listGroupProjectBinding", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "GroupProjectBinding", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/GroupProjectBinding" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Create project group binding.", - "operationId": "createGroupProjectBinding", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/groupProjectBindingBody" - } - } - ], - "responses": { - "201": { - "description": "GroupProjectBinding", - "schema": { - "$ref": "#/definitions/GroupProjectBinding" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/groupbindings/{binding_name}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Get project group binding.", - "operationId": "getGroupProjectBinding", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "BindingName", - "name": "binding_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "GroupProjectBinding", - "schema": { - "$ref": "#/definitions/GroupProjectBinding" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Delete project group binding.", - "operationId": "deleteGroupProjectBinding", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "BindingName", - "name": "binding_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Patch project group binding.", - "operationId": "patchGroupProjectBinding", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "BindingName", - "name": "binding_name", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "type": "object", - "properties": { - "group": { - "type": "string", - "x-go-name": "Group" - }, - "role": { - "type": "string", - "x-go-name": "Role" - } - } - } - } - ], - "responses": { - "200": { - "description": "GroupProjectBinding", - "schema": { - "$ref": "#/definitions/GroupProjectBinding" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Lists external clusters for the specified project.", - "operationId": "listExternalClusters", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ExternalCluster", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/ExternalCluster" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Creates an external cluster for the given project.", - "operationId": "createExternalCluster", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "description": "The credential name used in the preset for the provider", - "name": "Credential", - "in": "header" - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/body" - } - } - ], - "responses": { - "201": { - "description": "ExternalCluster", - "schema": { - "$ref": "#/definitions/ExternalCluster" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets an external cluster for the given project.", - "operationId": "getExternalCluster", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ExternalCluster", - "schema": { - "$ref": "#/definitions/ExternalCluster" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "put": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Updates an external cluster for the given project.", - "operationId": "updateExternalCluster", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "type": "object", - "properties": { - "kubeconfig": { - "description": "Kubeconfig Base64 encoded kubeconfig", - "type": "string", - "x-go-name": "Kubeconfig" - }, - "name": { - "description": "Name is human readable name for the external cluster", - "type": "string", - "x-go-name": "Name" - } - } - } - } - ], - "responses": { - "200": { - "description": "ExternalCluster", - "schema": { - "$ref": "#/definitions/ExternalCluster" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "description": "Deletes the specified external cluster", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "deleteExternalCluster", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Action", - "description": "The Action is used to check if to `Delete` the cluster:\nboth the actual cluster from the provider\nand the respective KKP cluster object\nBy default the cluster will `Disconnect` which means the KKP cluster object will be deleted,\ncluster still exists on the provider, but is no longer connected/imported in KKP", - "name": "action", - "in": "header" - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Patches the given cluster using JSON Merge Patch method (https://tools.ietf.org/html/rfc7396).", - "operationId": "patchExternalCluster", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Patch", - "in": "body", - "schema": { - "type": "object" - } - } - ], - "responses": { - "200": { - "description": "ExternalCluster", - "schema": { - "$ref": "#/definitions/ExternalCluster" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/events": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets an external cluster events.", - "operationId": "listExternalClusterEvents", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Type", - "name": "type", - "in": "query" - } - ], - "responses": { - "200": { - "description": "Event", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Event" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/kubeconfig": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets the kubeconfig for the specified external cluster.", - "operationId": "getExternalClusterKubeconfig", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/Kubeconfig" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/machinedeployments": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets an external cluster machine deployments.", - "operationId": "listExternalClusterMachineDeployments", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ExternalClusterMachineDeployment", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/ExternalClusterMachineDeployment" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Create an external cluster machine deployments.", - "operationId": "createExternalClusterMachineDeployment", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/ExternalClusterMachineDeployment" - } - } - ], - "responses": { - "200": { - "description": "ExternalClusterMachineDeployment", - "schema": { - "$ref": "#/definitions/ExternalClusterMachineDeployment" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/machinedeployments/{machinedeployment_id}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets an external cluster machine deployments.", - "operationId": "getExternalClusterMachineDeployment", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "MachineDeploymentID", - "name": "machinedeployment_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ExternalClusterMachineDeployment", - "schema": { - "$ref": "#/definitions/ExternalClusterMachineDeployment" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Delete an external cluster machine deployment.", - "operationId": "deleteExternalClusterMachineDeployment", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "MachineDeploymentID", - "name": "machinedeployment_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "description": "Patches the given cluster using JSON Merge Patch method", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "patchExternalClusterMachineDeployments", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "MachineDeploymentID", - "name": "machinedeployment_id", - "in": "path", - "required": true - }, - { - "name": "Patch", - "in": "body", - "schema": { - "type": "object" - } - } - ], - "responses": { - "200": { - "description": "ExternalClusterMachineDeployment", - "schema": { - "$ref": "#/definitions/ExternalClusterMachineDeployment" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/machinedeployments/{machinedeployment_id}/nodes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets an external cluster machine deployment nodes.", - "operationId": "listExternalClusterMachineDeploymentNodes", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "MachineDeploymentID", - "name": "machinedeployment_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ExternalClusterNode", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/ExternalClusterNode" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/machinedeployments/{machinedeployment_id}/nodes/events": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "List an external cluster machine deployment events.", - "operationId": "listExternalClusterMachineDeploymentEvents", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "MachineDeploymentID", - "name": "machinedeployment_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Type", - "name": "type", - "in": "query" - } - ], - "responses": { - "200": { - "description": "Event", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Event" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/machinedeployments/{machinedeployment_id}/nodes/metrics": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "List an external cluster machine deployment metrics.", - "operationId": "listExternalClusterMachineDeploymentMetrics", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "MachineDeploymentID", - "name": "machinedeployment_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "NodeMetric", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/NodeMetric" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/machinedeployments/{machinedeployment_id}/upgrades": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets an external cluster machine deployments upgrade versions.", - "operationId": "getExternalClusterMachineDeploymentUpgrades", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "MachineDeploymentID", - "name": "machinedeployment_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "MasterVersion", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/MasterVersion" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/metrics": { - "get": { - "description": "Gets cluster metrics", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "getExternalClusterMetrics", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ClusterMetrics", - "schema": { - "$ref": "#/definitions/ClusterMetrics" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/nodes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets an external cluster nodes.", - "operationId": "listExternalClusterNodes", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ExternalClusterNode", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/ExternalClusterNode" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/nodes/{node_id}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets an external cluster node.", - "operationId": "getExternalClusterNode", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "NodeID", - "name": "node_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ExternalClusterNode", - "schema": { - "$ref": "#/definitions/ExternalClusterNode" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/nodesmetrics": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets an external cluster nodes metrics.", - "operationId": "listExternalClusterNodesMetrics", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "NodeMetric", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/NodeMetric" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/providers/aks/versions": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "aks" - ], - "summary": "Gets AKS nodepool available versions.", - "operationId": "listAKSNodeVersionsNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "MasterVersion", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/MasterVersion" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/providers/aks/vmsizes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "aks" - ], - "summary": "Gets AKS available VM sizes in an Azure region.", - "operationId": "listAKSVMSizesNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "description": "Location - Resource location", - "name": "Location", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AKSVMSizeList", - "schema": { - "$ref": "#/definitions/AKSVMSizeList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/providers/eks/instancetypes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "Gets the EKS Instance types for node group based on architecture.", - "operationId": "listEKSInstanceTypesNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Architecture", - "description": "architecture query parameter. Supports: arm64 and x86_64 types.", - "name": "architecture", - "in": "query" - } - ], - "responses": { - "200": { - "description": "EKSInstanceTypeList", - "schema": { - "$ref": "#/definitions/EKSInstanceTypeList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/providers/eks/noderoles": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "List EKS Node IAM Roles.", - "operationId": "listEKSNodeRolesNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "EKSNodeRoleList", - "schema": { - "$ref": "#/definitions/EKSNodeRoleList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/providers/eks/subnets": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "Gets the EKS Subnets for node group.", - "operationId": "listEKSSubnetsNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "VpcId", - "in": "header" - } - ], - "responses": { - "200": { - "description": "EKSSubnetList", - "schema": { - "$ref": "#/definitions/EKSSubnetList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/providers/eks/vpcs": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "Gets the EKS vpc's for node group.", - "operationId": "listEKSVPCsNoCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "EKSVPCList", - "schema": { - "$ref": "#/definitions/EKSVPCList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/providers/gke/disktypes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "gke" - ], - "summary": "Gets GKE cluster machine disk types.", - "operationId": "listGKEClusterDiskTypes", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "GCPDiskTypeList", - "schema": { - "$ref": "#/definitions/GCPDiskTypeList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/providers/gke/images": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "gke" - ], - "summary": "Gets GKE cluster images.", - "operationId": "listGKEClusterImages", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "GKEImageList", - "schema": { - "$ref": "#/definitions/GKEImageList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/providers/gke/sizes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "gke" - ], - "summary": "Gets GKE cluster machine sizes.", - "operationId": "listGKEClusterSizes", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "GCPMachineSizeList", - "schema": { - "$ref": "#/definitions/GCPMachineSizeList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/providers/gke/zones": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "gke" - ], - "summary": "Gets GKE cluster zones.", - "operationId": "listGKEClusterZones", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "GKEZoneList", - "schema": { - "$ref": "#/definitions/GKEZoneList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/kubernetes/clusters/{cluster_id}/upgrades": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Gets an external cluster upgrades.", - "operationId": "getExternalClusterUpgrades", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "ClusterID", - "name": "cluster_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "MasterVersion", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/MasterVersion" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/presets": { - "get": { - "description": "Lists presets in a specific project", - "produces": [ - "application/json" - ], - "tags": [ - "preset" - ], - "operationId": "listProjectPresets", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "boolean", - "x-go-name": "Disabled", - "name": "disabled", - "in": "query" - }, - { - "type": "string", - "x-go-name": "Name", - "name": "name", - "in": "query" - } - ], - "responses": { - "200": { - "description": "PresetList", - "schema": { - "$ref": "#/definitions/PresetList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/aks/clusters": { - "get": { - "description": "Lists AKS clusters", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listAKSClusters", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AKSClusterList", - "schema": { - "$ref": "#/definitions/AKSClusterList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/aks/locations": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "aks" - ], - "summary": "List AKS recommended Locations.", - "operationId": "listProjectAKSLocations", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AKSLocationList", - "schema": { - "$ref": "#/definitions/AKSLocationList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/aks/resourcegroups": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "aks" - ], - "summary": "List resource groups in an Azure subscription.", - "operationId": "listProjectAKSResourceGroups", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AzureResourceGroupList", - "schema": { - "$ref": "#/definitions/AzureResourceGroupList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/aks/validatecredentials": { - "get": { - "description": "Validates AKS credentials", - "produces": [ - "application/json" - ], - "tags": [ - "aks" - ], - "operationId": "validateProjectAKSCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/aks/versions": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "aks" - ], - "summary": "Lists AKS versions.", - "operationId": "listProjectAKSVersions", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "MasterVersion", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/MasterVersion" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/aks/vmsizes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "aks" - ], - "summary": "List AKS available VM sizes in an Azure region.", - "operationId": "listProjectAKSVMSizes", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "description": "Location - Resource location", - "name": "Location", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AKSVMSizeList", - "schema": { - "$ref": "#/definitions/AKSVMSizeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/alibaba/instancetypes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "alibaba" - ], - "summary": "Lists available Alibaba instance types.", - "operationId": "listProjectAlibabaInstanceTypes", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "AccessKeySecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "Region", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AlibabaInstanceTypeList", - "schema": { - "$ref": "#/definitions/AlibabaInstanceTypeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/alibaba/vswitches": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "alibaba" - ], - "summary": "Lists available Alibaba vSwitches.", - "operationId": "listProjectAlibabaVSwitches", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "AccessKeySecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "Region", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AlibabaVSwitchList", - "schema": { - "$ref": "#/definitions/AlibabaVSwitchList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/alibaba/zones": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "alibaba" - ], - "summary": "Lists available Alibaba zones.", - "operationId": "listProjectAlibabaZones", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "AccessKeySecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "Region", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AlibabaZoneList", - "schema": { - "$ref": "#/definitions/AlibabaZoneList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/anexia/disk-types": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "anexia" - ], - "summary": "Lists disk-types from Anexia.", - "operationId": "listProjectAnexiaDiskTypes", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Token", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "Location", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AnexiaDiskTypeList", - "schema": { - "$ref": "#/definitions/AnexiaDiskTypeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/anexia/templates": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "anexia" - ], - "summary": "Lists templates from Anexia.", - "operationId": "listProjectAnexiaTemplates", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Token", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "Location", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AnexiaTemplateList", - "schema": { - "$ref": "#/definitions/AnexiaTemplateList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/anexia/vlans": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "anexia" - ], - "summary": "Lists VLANs from Anexia.", - "operationId": "listProjectAnexiaVlans", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Token", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AnexiaVlanList", - "schema": { - "$ref": "#/definitions/AnexiaVlanList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/aws/sizes": { - "get": { - "description": "Lists available AWS sizes", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listProjectAWSSizes", - "parameters": [ - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "SecretAccessKey", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleARN", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleExternalID", - "in": "header" - }, - { - "type": "string", - "name": "VPC", - "in": "header" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Region", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "x-go-name": "Architecture", - "description": "architecture query parameter. Supports: arm64 and x64 types.", - "name": "architecture", - "in": "query" - } - ], - "responses": { - "200": { - "description": "AWSSizeList", - "schema": { - "$ref": "#/definitions/AWSSizeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/aws/{dc}/securitygroups": { - "get": { - "description": "Lists available AWS security groups", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listProjectAWSSecurityGroups", - "parameters": [ - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "SecretAccessKey", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleARN", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleExternalID", - "in": "header" - }, - { - "type": "string", - "name": "VPC", - "in": "header" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "AWSSecurityGroupList", - "schema": { - "$ref": "#/definitions/AWSSecurityGroupList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/aws/{dc}/subnets": { - "get": { - "description": "Lists available AWS subnets", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listProjectAWSSubnets", - "parameters": [ - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "SecretAccessKey", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleARN", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleExternalID", - "in": "header" - }, - { - "type": "string", - "name": "VPC", - "in": "header" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "AWSSubnetList", - "schema": { - "$ref": "#/definitions/AWSSubnetList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/aws/{dc}/vpcs": { - "get": { - "description": "Lists available AWS VPCs", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listProjectAWSVPCs", - "parameters": [ - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "SecretAccessKey", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleARN", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleExternalID", - "in": "header" - }, - { - "type": "string", - "name": "VPC", - "in": "header" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "AWSVPCList", - "schema": { - "$ref": "#/definitions/AWSVPCList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/azure/availabilityzones": { - "get": { - "description": "Lists VM availability zones in an Azure region", - "produces": [ - "application/json" - ], - "tags": [ - "azure" - ], - "operationId": "listProjectAzureAvailabilityZones", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "Location", - "in": "header" - }, - { - "type": "string", - "name": "SKUName", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AzureAvailabilityZonesList", - "schema": { - "$ref": "#/definitions/AzureAvailabilityZonesList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/azure/resourcegroups": { - "get": { - "description": "Lists available VM resource groups", - "produces": [ - "application/json" - ], - "tags": [ - "azure" - ], - "operationId": "listProjectAzureResourceGroups", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "Location", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AzureResourceGroupsList", - "schema": { - "$ref": "#/definitions/AzureResourceGroupsList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/azure/routetables": { - "get": { - "description": "Lists available VM route tables", - "produces": [ - "application/json" - ], - "tags": [ - "azure" - ], - "operationId": "listProjectAzureRouteTables", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "ResourceGroup", - "in": "header" - }, - { - "type": "string", - "name": "Location", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AzureRouteTablesList", - "schema": { - "$ref": "#/definitions/AzureRouteTablesList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/azure/securitygroups": { - "get": { - "description": "Lists available VM security groups", - "produces": [ - "application/json" - ], - "tags": [ - "azure" - ], - "operationId": "listProjectAzureSecurityGroups", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "ResourceGroup", - "in": "header" - }, - { - "type": "string", - "name": "Location", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AzureSecurityGroupsList", - "schema": { - "$ref": "#/definitions/AzureSecurityGroupsList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/azure/sizes": { - "get": { - "description": "Lists available VM sizes in an Azure region", - "produces": [ - "application/json" - ], - "tags": [ - "azure" - ], - "operationId": "listProjectAzureSizes", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "Location", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AzureSizeList", - "schema": { - "$ref": "#/definitions/AzureSizeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/azure/subnets": { - "get": { - "description": "Lists available VM subnets", - "produces": [ - "application/json" - ], - "tags": [ - "azure" - ], - "operationId": "listProjectAzureSubnets", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "ResourceGroup", - "in": "header" - }, - { - "type": "string", - "name": "VirtualNetwork", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AzureSubnetsList", - "schema": { - "$ref": "#/definitions/AzureSubnetsList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/azure/vnets": { - "get": { - "description": "Lists available VM virtual networks", - "produces": [ - "application/json" - ], - "tags": [ - "azure" - ], - "operationId": "listProjectAzureVnets", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "ResourceGroup", - "in": "header" - }, - { - "type": "string", - "name": "Location", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AzureVirtualNetworksList", - "schema": { - "$ref": "#/definitions/AzureVirtualNetworksList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/digitalocean/sizes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "digitalocean" - ], - "summary": "Lists sizes from digitalocean.", - "operationId": "listProjectDigitaloceanSizes", - "parameters": [ - { - "type": "string", - "name": "DoToken", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "DigitaloceanSizeList", - "schema": { - "$ref": "#/definitions/DigitaloceanSizeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/eks/amitypes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "Gets the EKS AMI types for node groups.", - "operationId": "listProjectEKSAMITypes", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "EKSAMITypeList", - "schema": { - "$ref": "#/definitions/EKSAMITypeList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/eks/capacitytypes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "Gets the EKS Capacity types for node groups.", - "operationId": "listProjectEKSCapacityTypes", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "EKSCapacityTypeList", - "schema": { - "$ref": "#/definitions/EKSCapacityTypeList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/eks/clusterroles": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "Lists EKS cluster service roles.", - "operationId": "listProjectEKSClusterRoles", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "SecretAccessKey", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleARN", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleExternalID", - "in": "header" - }, - { - "type": "string", - "name": "Region", - "in": "header" - } - ], - "responses": { - "200": { - "description": "EKSClusterRoleList", - "schema": { - "$ref": "#/definitions/EKSClusterRoleList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/eks/clusters": { - "get": { - "description": "Lists EKS clusters", - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listEKSClusters", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "SecretAccessKey", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleARN", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleExternalID", - "in": "header" - }, - { - "type": "string", - "name": "Region", - "in": "header" - } - ], - "responses": { - "200": { - "description": "EKSClusterList", - "schema": { - "$ref": "#/definitions/EKSClusterList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/eks/regions": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "Lists EKS regions.", - "operationId": "listProjectEKSRegions", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "SecretAccessKey", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleARN", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleExternalID", - "in": "header" - } - ], - "responses": { - "200": { - "description": "EKSRegionList", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/EKSRegionList" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/eks/securitygroups": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "Lists EKS security groups.", - "operationId": "listProjectEKSSecurityGroups", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "SecretAccessKey", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleARN", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleExternalID", - "in": "header" - }, - { - "type": "string", - "name": "Region", - "in": "header" - }, - { - "type": "string", - "name": "VpcId", - "in": "header" - } - ], - "responses": { - "200": { - "description": "EKSSecurityGroupList", - "schema": { - "$ref": "#/definitions/EKSSecurityGroupList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/eks/subnets": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "Lists EKS subnets.", - "operationId": "listProjectEKSSubnets", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "SecretAccessKey", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleARN", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleExternalID", - "in": "header" - }, - { - "type": "string", - "name": "Region", - "in": "header" - }, - { - "type": "string", - "name": "VpcId", - "in": "header" - } - ], - "responses": { - "200": { - "description": "EKSSubnetList", - "schema": { - "$ref": "#/definitions/EKSSubnetList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/eks/validatecredentials": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "Validates EKS credentials.", - "operationId": "validateProjectEKSCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "SecretAccessKey", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleARN", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleExternalID", - "in": "header" - }, - { - "type": "string", - "name": "Region", - "in": "header" - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/eks/versions": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "Lists EKS versions.", - "operationId": "listProjectEKSVersions", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "MasterVersion", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/MasterVersion" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/eks/vpcs": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "List EKS VPCs.", - "operationId": "listProjectEKSVPCs", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "SecretAccessKey", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleARN", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleExternalID", - "in": "header" - }, - { - "type": "string", - "name": "Region", - "in": "header" - } - ], - "responses": { - "200": { - "description": "EKSVPCList", - "schema": { - "$ref": "#/definitions/EKSVPCList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/equinixmetal/sizes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "equinixmetal" - ], - "summary": "Lists sizes from Equinix Metal.", - "operationId": "listProjectEquinixMetalSizes", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "APIKey", - "in": "header" - }, - { - "type": "string", - "name": "EquinixProjectID", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - } - ], - "responses": { - "200": { - "description": "PacketSizeList", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/PacketSizeList" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/gcp/disktypes": { - "get": { - "description": "List disktypes for a given project", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "operationId": "listProjectGCPDiskTypes", - "parameters": [ - { - "type": "string", - "name": "ServiceAccount", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Zone", - "in": "query" - } - ], - "responses": { - "200": { - "description": "GCPDiskTypeList", - "schema": { - "$ref": "#/definitions/GCPDiskTypeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/gcp/networks": { - "get": { - "description": "Lists available GCP networks", - "produces": [ - "application/json" - ], - "tags": [ - "gcp" - ], - "operationId": "listProjectGCPNetworks", - "parameters": [ - { - "type": "string", - "name": "ServiceAccount", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "GCPNetworkList", - "schema": { - "$ref": "#/definitions/GCPNetworkList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/gcp/vmsizes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "gke" - ], - "summary": "Lists GCP VM sizes.", - "operationId": "listProjectGCPVMSizes", - "parameters": [ - { - "type": "string", - "name": "ServiceAccount", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Zone", - "in": "query" - }, - { - "type": "string", - "name": "DC", - "in": "query" - } - ], - "responses": { - "200": { - "description": "GCPMachineSizeList", - "schema": { - "$ref": "#/definitions/GCPMachineSizeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/gcp/{dc}/subnetworks": { - "get": { - "description": "Lists available GCP subnetworks", - "produces": [ - "application/json" - ], - "tags": [ - "gcp" - ], - "operationId": "listProjectGCPSubnetworks", - "parameters": [ - { - "type": "string", - "name": "ServiceAccount", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Network", - "in": "query" - }, - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "GCPSubnetworkList", - "schema": { - "$ref": "#/definitions/GCPSubnetworkList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/gcp/{dc}/zones": { - "get": { - "description": "Produces\napplication/json", - "tags": [ - "gke" - ], - "summary": "Lists GCP zones.", - "operationId": "listProjectGCPZones", - "parameters": [ - { - "type": "string", - "name": "ServiceAccount", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "GCPZoneList", - "schema": { - "$ref": "#/definitions/GCPZoneList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/gke/clusters": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Lists GKE clusters.", - "operationId": "listGKEClusters", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "description": "The plain GCP service account", - "name": "ServiceAccount", - "in": "header" - }, - { - "type": "string", - "description": "The credential name used in the preset for the GCP provider", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "GKEClusterList", - "schema": { - "$ref": "#/definitions/GKEClusterList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/gke/disktypes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "gke" - ], - "summary": "Lists GKE machine disk types.", - "operationId": "listProjectGKEDiskTypes", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "description": "The plain GCP service account", - "name": "ServiceAccount", - "in": "header" - }, - { - "type": "string", - "description": "The credential name used in the preset for the GCP provider", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "description": "The zone name", - "name": "Zone", - "in": "header" - } - ], - "responses": { - "200": { - "description": "GKEDiskTypeList", - "schema": { - "$ref": "#/definitions/GKEDiskTypeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/gke/images": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "gke" - ], - "summary": "Lists GKE image types.", - "operationId": "listProjectGKEImages", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "description": "The plain GCP service account", - "name": "ServiceAccount", - "in": "header" - }, - { - "type": "string", - "description": "The credential name used in the preset for the GCP provider", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "description": "The zone name", - "name": "Zone", - "in": "header" - } - ], - "responses": { - "200": { - "description": "GKEImageList", - "schema": { - "$ref": "#/definitions/GKEImageList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/gke/validatecredentials": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "gke" - ], - "summary": "Validates GKE credentials.", - "operationId": "validateProjectGKECredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "description": "The plain GCP service account", - "name": "ServiceAccount", - "in": "header" - }, - { - "type": "string", - "description": "The credential name used in the preset for the GCP provider", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/gke/versions": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "gke" - ], - "summary": "Lists GKE versions.", - "operationId": "listProjectGKEVersions", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "description": "The plain GCP service account", - "name": "ServiceAccount", - "in": "header" - }, - { - "type": "string", - "description": "The credential name used in the preset for the GCP provider", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "description": "The zone name", - "name": "Zone", - "in": "header" - }, - { - "type": "string", - "description": "The Mode is how you want GKE Control plane version to be managed.\nManual: Manually manage the version upgrades.\nAuto: automatically manage the cluster's control plane version.", - "name": "Mode", - "in": "header" - }, - { - "type": "string", - "description": "The ReleaseChannel", - "name": "ReleaseChannel", - "in": "header" - } - ], - "responses": { - "200": { - "description": "MasterVersion", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/MasterVersion" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/gke/vmsizes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "gke" - ], - "summary": "Lists GKE VM sizes.", - "operationId": "listProjectGKEVMSizes", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "description": "The plain GCP service account", - "name": "ServiceAccount", - "in": "header" - }, - { - "type": "string", - "description": "The credential name used in the preset for the GCP provider", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "description": "The zone name", - "name": "Zone", - "in": "header" - } - ], - "responses": { - "200": { - "description": "GCPMachineSizeList", - "schema": { - "$ref": "#/definitions/GCPMachineSizeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/gke/zones": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "gke" - ], - "summary": "Lists GKE zones.", - "operationId": "listProjectGKEZones", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "description": "The plain GCP service account", - "name": "ServiceAccount", - "in": "header" - }, - { - "type": "string", - "description": "The credential name used in the preset for the GCP provider", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "GKEZoneList", - "schema": { - "$ref": "#/definitions/GKEZoneList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/hetzner/sizes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "hetzner" - ], - "summary": "Lists sizes from Hetzner.", - "operationId": "listProjectHetznerSizes", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "HetznerToken", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - } - ], - "responses": { - "200": { - "description": "HetznerSizeList", - "schema": { - "$ref": "#/definitions/HetznerSizeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/kubevirt/instancetypes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "kubevirt" - ], - "summary": "Lists available KubeVirt VirtualMachineInstancetype.", - "operationId": "listProjectKubeVirtInstancetypes", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Kubeconfig", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VirtualMachineInstancetypeList", - "schema": { - "$ref": "#/definitions/VirtualMachineInstancetypeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/kubevirt/preferences": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "kubevirt" - ], - "summary": "Lists available KubeVirt VirtualMachinePreference.", - "operationId": "listProjectKubeVirtPreferences", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Kubeconfig", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VirtualMachinePreferenceList", - "schema": { - "$ref": "#/definitions/VirtualMachinePreferenceList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/kubevirt/storageclasses": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "kubevirt" - ], - "summary": "Lists available K8s StorageClasses in the Kubevirt cluster.", - "operationId": "listProjectKubeVirtStorageClasses", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Kubeconfig", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - } - ], - "responses": { - "200": { - "description": "StorageClassList", - "schema": { - "$ref": "#/definitions/StorageClassList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/kubevirt/subnets": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "kubevirt" - ], - "summary": "Lists available subnets in the KubeVirt cluster.", - "operationId": "listProjectKubevirtSubnets", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Kubeconfig", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "VPCName", - "in": "header" - }, - { - "type": "string", - "name": "StorageClassName", - "in": "header" - } - ], - "responses": { - "200": { - "description": "KubeVirtSubnetList", - "schema": { - "$ref": "#/definitions/KubeVirtSubnetList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/kubevirt/vpcs": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "kubevirt" - ], - "summary": "Lists available VPCs in the Kubevirt cluster.", - "operationId": "listProjectKubevirtVPCs", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Kubeconfig", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - } - ], - "responses": { - "200": { - "description": "KubeVirtVPCList", - "schema": { - "$ref": "#/definitions/KubeVirtVPCList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/nutanix/{dc}/categories": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "nutanix" - ], - "summary": "List category keys from Nutanix.", - "operationId": "listProjectNutanixCategories", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "NutanixUsername", - "in": "header" - }, - { - "type": "string", - "name": "NutanixPassword", - "in": "header" - }, - { - "type": "string", - "name": "NutanixProxyURL", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "NutanixCategoryList", - "schema": { - "$ref": "#/definitions/NutanixCategoryList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/nutanix/{dc}/categories/{category}/values": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "nutanix" - ], - "summary": "List available category values for a specific category from Nutanix.", - "operationId": "listProjectNutanixCategoryValues", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "NutanixUsername", - "in": "header" - }, - { - "type": "string", - "name": "NutanixPassword", - "in": "header" - }, - { - "type": "string", - "name": "NutanixProxyURL", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "x-go-name": "Category", - "description": "Category to query the available values for", - "name": "category", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "NutanixCategoryValueList", - "schema": { - "$ref": "#/definitions/NutanixCategoryValueList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/nutanix/{dc}/clusters": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "nutanix" - ], - "summary": "List clusters from Nutanix.", - "operationId": "listProjectNutanixClusters", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "NutanixUsername", - "in": "header" - }, - { - "type": "string", - "name": "NutanixPassword", - "in": "header" - }, - { - "type": "string", - "name": "NutanixProxyURL", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "NutanixClusterList", - "schema": { - "$ref": "#/definitions/NutanixClusterList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/nutanix/{dc}/projects": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "nutanix" - ], - "summary": "List projects from Nutanix.", - "operationId": "listProjectNutanixProjects", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "NutanixUsername", - "in": "header" - }, - { - "type": "string", - "name": "NutanixPassword", - "in": "header" - }, - { - "type": "string", - "name": "NutanixProxyURL", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "NutanixProjectList", - "schema": { - "$ref": "#/definitions/NutanixProjectList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/nutanix/{dc}/subnets": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "nutanix" - ], - "summary": "List subnets from Nutanix.", - "operationId": "listProjectNutanixSubnets", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "NutanixUsername", - "in": "header" - }, - { - "type": "string", - "name": "NutanixPassword", - "in": "header" - }, - { - "type": "string", - "name": "NutanixProxyURL", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "NutanixCluster", - "in": "header", - "required": true - }, - { - "type": "string", - "description": "Project query parameter. Can be omitted to query subnets without project scope", - "name": "NutanixProject", - "in": "header" - } - ], - "responses": { - "200": { - "description": "NutanixSubnetList", - "schema": { - "$ref": "#/definitions/NutanixSubnetList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/openstack/availabilityzones": { - "get": { - "description": "Lists availability zones from openstack", - "produces": [ - "application/json" - ], - "tags": [ - "openstack" - ], - "operationId": "listProjectOpenstackAvailabilityZones", - "parameters": [ - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "Domain", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenant", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenantID", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProject", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProjectID", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialID", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialSecret", - "in": "header" - }, - { - "type": "boolean", - "name": "OIDCAuthentication", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OpenstackAvailabilityZone", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OpenstackAvailabilityZone" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/openstack/networks": { - "get": { - "description": "Lists networks from openstack", - "produces": [ - "application/json" - ], - "tags": [ - "openstack" - ], - "operationId": "listProjectOpenstackNetworks", - "parameters": [ - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "Domain", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenant", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenantID", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProject", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProjectID", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialID", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialSecret", - "in": "header" - }, - { - "type": "boolean", - "name": "OIDCAuthentication", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OpenstackNetwork", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OpenstackNetwork" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/openstack/securitygroups": { - "get": { - "description": "Lists security groups from openstack", - "produces": [ - "application/json" - ], - "tags": [ - "openstack" - ], - "operationId": "listProjectOpenstackSecurityGroups", - "parameters": [ - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "Domain", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenant", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenantID", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProject", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProjectID", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialID", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialSecret", - "in": "header" - }, - { - "type": "boolean", - "name": "OIDCAuthentication", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OpenstackSecurityGroup", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OpenstackSecurityGroup" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/openstack/servergroups": { - "get": { - "description": "Lists server groups from openstack", - "produces": [ - "application/json" - ], - "tags": [ - "openstack" - ], - "operationId": "listProjectOpenstackServerGroups", - "parameters": [ - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "Domain", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenant", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenantID", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProject", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProjectID", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialID", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialSecret", - "in": "header" - }, - { - "type": "boolean", - "name": "OIDCAuthentication", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OpenstackServerGroup", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OpenstackServerGroup" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/openstack/sizes": { - "get": { - "description": "Lists sizes from openstack", - "produces": [ - "application/json" - ], - "tags": [ - "openstack" - ], - "operationId": "listProjectOpenstackSizes", - "parameters": [ - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "Domain", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenant", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenantID", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProject", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProjectID", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialID", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialSecret", - "in": "header" - }, - { - "type": "boolean", - "name": "OIDCAuthentication", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OpenstackSize", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OpenstackSize" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/openstack/subnetpools": { - "get": { - "description": "Lists subnet pools from openstack", - "produces": [ - "application/json" - ], - "tags": [ - "openstack" - ], - "operationId": "listProjectOpenstackSubnetPools", - "parameters": [ - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "Domain", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenant", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenantID", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProject", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProjectID", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialID", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialSecret", - "in": "header" - }, - { - "type": "boolean", - "name": "OIDCAuthentication", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "integer", - "format": "int64", - "x-go-name": "IPVersion", - "name": "ip_version", - "in": "query" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OpenstackSubnetPool", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OpenstackSubnetPool" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/openstack/subnets": { - "get": { - "description": "Lists subnets from openstack", - "produces": [ - "application/json" - ], - "tags": [ - "openstack" - ], - "operationId": "listProjectOpenstackSubnets", - "parameters": [ - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "Domain", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenant", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenantID", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProject", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProjectID", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialID", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialSecret", - "in": "header" - }, - { - "type": "boolean", - "name": "OIDCAuthentication", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "x-go-name": "NetworkID", - "name": "network_id", - "in": "query" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OpenstackSubnet", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OpenstackSubnet" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/openstack/tenants": { - "get": { - "description": "Lists tenants from openstack", - "produces": [ - "application/json" - ], - "tags": [ - "openstack" - ], - "operationId": "listProjectOpenstackTenants", - "parameters": [ - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "Domain", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialID", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialSecret", - "in": "header" - }, - { - "type": "boolean", - "name": "OIDCAuthentication", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OpenstackTenant", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OpenstackTenant" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/vmwareclouddirector/{dc}/catalogs": { - "get": { - "description": "List VMware Cloud Director Catalogs", - "produces": [ - "application/json" - ], - "tags": [ - "vmwareclouddirector" - ], - "operationId": "listProjectVMwareCloudDirectorCatalogs", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "APIToken", - "in": "header" - }, - { - "type": "string", - "name": "Organization", - "in": "header" - }, - { - "type": "string", - "name": "VDC", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VMwareCloudDirectorCatalogList", - "schema": { - "$ref": "#/definitions/VMwareCloudDirectorCatalogList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/vmwareclouddirector/{dc}/computepolicies": { - "get": { - "description": "List VMware Cloud Director Compute Policies", - "produces": [ - "application/json" - ], - "tags": [ - "vmwareclouddirector" - ], - "operationId": "listProjectVMwareCloudDirectorComputePolicies", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "APIToken", - "in": "header" - }, - { - "type": "string", - "name": "Organization", - "in": "header" - }, - { - "type": "string", - "name": "VDC", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VMwareCloudDirectorComputePolicyList", - "schema": { - "$ref": "#/definitions/VMwareCloudDirectorComputePolicyList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/vmwareclouddirector/{dc}/networks": { - "get": { - "description": "List VMware Cloud Director OVDC Networks", - "produces": [ - "application/json" - ], - "tags": [ - "vmwareclouddirector" - ], - "operationId": "listProjectVMwareCloudDirectorNetworks", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "APIToken", - "in": "header" - }, - { - "type": "string", - "name": "Organization", - "in": "header" - }, - { - "type": "string", - "name": "VDC", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VMwareCloudDirectorNetworkList", - "schema": { - "$ref": "#/definitions/VMwareCloudDirectorNetworkList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/vmwareclouddirector/{dc}/storageprofiles": { - "get": { - "description": "List VMware Cloud Director Storage Profiles", - "produces": [ - "application/json" - ], - "tags": [ - "vmwareclouddirector" - ], - "operationId": "listProjectVMwareCloudDirectorStorageProfiles", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "APIToken", - "in": "header" - }, - { - "type": "string", - "name": "Organization", - "in": "header" - }, - { - "type": "string", - "name": "VDC", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VMwareCloudDirectorStorageProfileList", - "schema": { - "$ref": "#/definitions/VMwareCloudDirectorStorageProfileList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/vmwareclouddirector/{dc}/templates/{catalog_name}": { - "get": { - "description": "List VMware Cloud Director Templates", - "produces": [ - "application/json" - ], - "tags": [ - "vmwareclouddirector" - ], - "operationId": "listProjectVMwareCloudDirectorTemplates", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "APIToken", - "in": "header" - }, - { - "type": "string", - "name": "Organization", - "in": "header" - }, - { - "type": "string", - "name": "VDC", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "x-go-name": "CatalogName", - "description": "Catalog name to fetch the templates from", - "name": "catalog_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "VMwareCloudDirectorTemplateList", - "schema": { - "$ref": "#/definitions/VMwareCloudDirectorTemplateList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/vsphere/datastores": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "vsphere" - ], - "summary": "Lists datastores from vSphere datacenter.", - "operationId": "listProjectVSphereDatastores", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VSphereDatastoreList", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/VSphereDatastoreList" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/vsphere/folders": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "vsphere" - ], - "summary": "Lists folders from vSphere datacenter.", - "operationId": "listProjectVSphereFolders", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VSphereFolder", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/VSphereFolder" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/vsphere/networks": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "vsphere" - ], - "summary": "Lists networks from vSphere datacenter.", - "operationId": "listProjectVSphereNetworks", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VSphereNetwork", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/VSphereNetwork" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/vsphere/tagcategories": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "vsphere" - ], - "summary": "Lists tag categories from vSphere datacenter.", - "operationId": "listProjectVSphereTagCategories", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VSphereTagCategory", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/VSphereTagCategory" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/vsphere/tagcategories/{tag_category}/tags": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "vsphere" - ], - "summary": "Lists tags for a tag category from vSphere datacenter.", - "operationId": "listProjectVSphereTagsForTagCategories", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "x-go-name": "TagCategory", - "name": "tag_category", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "VSphereTag", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/VSphereTag" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/vsphere/vmgroups": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "vsphere" - ], - "summary": "Lists VM Groups from vSphere datacenter.", - "operationId": "listProjectVSphereVMGroups", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VSphereVMGroupList", - "schema": { - "$ref": "#/definitions/VSphereVMGroupList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/providers/{provider_name}/presets": { - "get": { - "description": "Lists presets for the provider in a specific project", - "produces": [ - "application/json" - ], - "tags": [ - "preset" - ], - "operationId": "listProjectProviderPresets", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "type": "boolean", - "x-go-name": "Disabled", - "name": "disabled", - "in": "query" - }, - { - "type": "string", - "x-go-name": "Name", - "name": "name", - "in": "query" - }, - { - "type": "string", - "x-go-name": "ProviderName", - "name": "provider_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Datacenter", - "name": "datacenter", - "in": "query" - } - ], - "responses": { - "200": { - "description": "PresetList", - "schema": { - "$ref": "#/definitions/PresetList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/quota": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Returns Resource Quota for a given project.", - "operationId": "getProjectQuota", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ResourceQuota", - "schema": { - "$ref": "#/definitions/ResourceQuota" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/projects/{project_id}/quotacalculation": { - "post": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "project" - ], - "summary": "Calculates the projects resource quota updated by the given resources.", - "operationId": "calculateProjectResourceQuotaUpdate", - "parameters": [ - { - "type": "string", - "x-go-name": "ProjectID", - "name": "project_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "type": "object", - "properties": { - "alibabaInstanceType": { - "$ref": "#/definitions/AlibabaInstanceType" - }, - "anexiaNodeSpec": { - "$ref": "#/definitions/AnexiaNodeSpec" - }, - "awsSize": { - "$ref": "#/definitions/AWSSize" - }, - "azureSize": { - "$ref": "#/definitions/AzureSize" - }, - "diskSizeGB": { - "description": "DiskSizeGB will be processed only for those providers which don't have the disk size in their API objects, like AWS, Alibabla and GCP.", - "type": "integer", - "format": "int64", - "x-go-name": "DiskSizeGB" - }, - "doSize": { - "$ref": "#/definitions/DigitaloceanSize" - }, - "equinixSize": { - "$ref": "#/definitions/PacketSize" - }, - "gcpSize": { - "$ref": "#/definitions/GCPMachineSize" - }, - "hetznerSize": { - "$ref": "#/definitions/HetznerSize" - }, - "kubevirtNodeSize": { - "$ref": "#/definitions/KubevirtNodeSize" - }, - "nutanixNodeSpec": { - "$ref": "#/definitions/NutanixNodeSpec" - }, - "openstackSize": { - "$ref": "#/definitions/OpenstackSize" - }, - "replacedResources": { - "$ref": "#/definitions/ReplacedResources" - }, - "replicas": { - "type": "integer", - "format": "int64", - "x-go-name": "Replicas" - }, - "vSphereNodeSpec": { - "$ref": "#/definitions/VSphereNodeSpec" - }, - "vmDirectorNodeSpec": { - "$ref": "#/definitions/VMwareCloudDirectorNodeSpec" - } - } - } - } - ], - "responses": { - "200": { - "description": "ResourceQuotaUpdateCalculation", - "schema": { - "$ref": "#/definitions/ResourceQuotaUpdateCalculation" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/aks/locations": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "aks" - ], - "summary": "List AKS recommended Locations.", - "operationId": "listAKSLocations", - "parameters": [ - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AKSLocationList", - "schema": { - "$ref": "#/definitions/AKSLocationList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/aks/modes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "aks" - ], - "summary": "Gets the AKS node pool modes.", - "operationId": "listAKSNodePoolModes", - "responses": { - "200": { - "description": "AKSNodePoolModes", - "schema": { - "$ref": "#/definitions/AKSNodePoolModes" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/aks/resourcegroups": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "aks" - ], - "summary": "List resource groups in an Azure subscription.", - "operationId": "listAKSResourceGroups", - "parameters": [ - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AzureResourceGroupList", - "schema": { - "$ref": "#/definitions/AzureResourceGroupList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/aks/validatecredentials": { - "get": { - "description": "Validates AKS credentials", - "produces": [ - "application/json" - ], - "tags": [ - "aks" - ], - "operationId": "validateAKSCredentials", - "parameters": [ - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/aks/versions": { - "get": { - "description": "Lists AKS versions", - "produces": [ - "application/json" - ], - "tags": [ - "aks" - ], - "operationId": "listAKSVersions", - "responses": { - "200": { - "description": "MasterVersion", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/MasterVersion" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/aks/vmsizes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "aks" - ], - "summary": "List AKS available VM sizes in an Azure region.", - "operationId": "listAKSVMSizes", - "parameters": [ - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "description": "Location - Resource location", - "name": "Location", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AKSVMSizeList", - "schema": { - "$ref": "#/definitions/AKSVMSizeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/azure/resourcegroups": { - "get": { - "description": "Lists available VM resource groups", - "produces": [ - "application/json" - ], - "tags": [ - "azure" - ], - "operationId": "listAzureResourceGroups", - "parameters": [ - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "Location", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AzureResourceGroupsList", - "schema": { - "$ref": "#/definitions/AzureResourceGroupsList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/azure/routetables": { - "get": { - "description": "Lists available VM route tables", - "produces": [ - "application/json" - ], - "tags": [ - "azure" - ], - "operationId": "listAzureRouteTables", - "parameters": [ - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "ResourceGroup", - "in": "header" - }, - { - "type": "string", - "name": "Location", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AzureRouteTablesList", - "schema": { - "$ref": "#/definitions/AzureRouteTablesList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/azure/securitygroups": { - "get": { - "description": "Lists available VM security groups", - "produces": [ - "application/json" - ], - "tags": [ - "azure" - ], - "operationId": "listAzureSecurityGroups", - "parameters": [ - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "ResourceGroup", - "in": "header" - }, - { - "type": "string", - "name": "Location", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AzureSecurityGroupsList", - "schema": { - "$ref": "#/definitions/AzureSecurityGroupsList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/azure/subnets": { - "get": { - "description": "Lists available VM subnets", - "produces": [ - "application/json" - ], - "tags": [ - "azure" - ], - "operationId": "listAzureSubnets", - "parameters": [ - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "ResourceGroup", - "in": "header" - }, - { - "type": "string", - "name": "VirtualNetwork", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AzureSubnetsList", - "schema": { - "$ref": "#/definitions/AzureSubnetsList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/azure/vnets": { - "get": { - "description": "Lists available VM virtual networks", - "produces": [ - "application/json" - ], - "tags": [ - "azure" - ], - "operationId": "listAzureVnets", - "parameters": [ - { - "type": "string", - "name": "SubscriptionID", - "in": "header" - }, - { - "type": "string", - "name": "TenantID", - "in": "header" - }, - { - "type": "string", - "name": "ClientID", - "in": "header" - }, - { - "type": "string", - "name": "ClientSecret", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "ResourceGroup", - "in": "header" - }, - { - "type": "string", - "name": "Location", - "in": "header" - } - ], - "responses": { - "200": { - "description": "AzureVirtualNetworksList", - "schema": { - "$ref": "#/definitions/AzureVirtualNetworksList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/baremetal/tinkerbell/dc/{dc}/images": { - "get": { - "description": "List Tinkerbell images", - "produces": [ - "application/json" - ], - "tags": [ - "tinkerbell" - ], - "operationId": "listTinkerbellImages", - "parameters": [ - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "TinkerbellImagesList", - "schema": { - "$ref": "#/definitions/TinkerbellImagesList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/eks/clusterroles": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "List EKS Cluster Service Roles.", - "operationId": "listEKSClusterRoles", - "parameters": [ - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "SecretAccessKey", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleARN", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleExternalID", - "in": "header" - } - ], - "responses": { - "200": { - "description": "EKSClusterRoleList", - "schema": { - "$ref": "#/definitions/EKSClusterRoleList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/eks/regions": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "List EKS regions.", - "operationId": "listEKSRegions", - "parameters": [ - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "SecretAccessKey", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleARN", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleExternalID", - "in": "header" - } - ], - "responses": { - "200": { - "description": "EKSRegionList", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/EKSRegionList" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/eks/securitygroups": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "List EKS securitygroup list.", - "operationId": "listEKSSecurityGroups", - "parameters": [ - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "SecretAccessKey", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleARN", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleExternalID", - "in": "header" - }, - { - "type": "string", - "name": "Region", - "in": "header" - }, - { - "type": "string", - "name": "VpcId", - "in": "header" - } - ], - "responses": { - "200": { - "description": "EKSSecurityGroupList", - "schema": { - "$ref": "#/definitions/EKSSecurityGroupList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/eks/subnets": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "summary": "Lists EKS subnet list.", - "operationId": "listEKSSubnets", - "parameters": [ - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "SecretAccessKey", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleARN", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleExternalID", - "in": "header" - }, - { - "type": "string", - "name": "Region", - "in": "header" - }, - { - "type": "string", - "name": "VpcId", - "in": "header" - } - ], - "responses": { - "200": { - "description": "EKSSubnetList", - "schema": { - "$ref": "#/definitions/EKSSubnetList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/eks/validatecredentials": { - "get": { - "description": "Validates EKS credentials", - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "operationId": "validateEKSCredentials", - "parameters": [ - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "SecretAccessKey", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleARN", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleExternalID", - "in": "header" - }, - { - "type": "string", - "name": "Region", - "in": "header" - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/eks/versions": { - "get": { - "description": "Lists EKS versions", - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "operationId": "listEKSVersions", - "responses": { - "200": { - "description": "MasterVersion", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/MasterVersion" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/eks/vpcs": { - "get": { - "description": "Lists EKS vpc's", - "produces": [ - "application/json" - ], - "tags": [ - "eks" - ], - "operationId": "listEKSVPCS", - "parameters": [ - { - "type": "string", - "name": "AccessKeyID", - "in": "header" - }, - { - "type": "string", - "name": "SecretAccessKey", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleARN", - "in": "header" - }, - { - "type": "string", - "name": "AssumeRoleExternalID", - "in": "header" - }, - { - "type": "string", - "name": "Region", - "in": "header" - } - ], - "responses": { - "200": { - "description": "EKSVPCList", - "schema": { - "$ref": "#/definitions/EKSVPCList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/gke/disktypes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "gke" - ], - "summary": "Gets GKE machine disk types.", - "operationId": "listGKEDiskTypes", - "parameters": [ - { - "type": "string", - "description": "The plain GCP service account", - "name": "ServiceAccount", - "in": "header" - }, - { - "type": "string", - "description": "The credential name used in the preset for the GCP provider", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "description": "The zone name", - "name": "Zone", - "in": "header" - } - ], - "responses": { - "200": { - "description": "GKEDiskTypeList", - "schema": { - "$ref": "#/definitions/GKEDiskTypeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/gke/images": { - "get": { - "description": "Lists GKE image types", - "produces": [ - "application/json" - ], - "tags": [ - "gke" - ], - "operationId": "listGKEImages", - "parameters": [ - { - "type": "string", - "description": "The plain GCP service account", - "name": "ServiceAccount", - "in": "header" - }, - { - "type": "string", - "description": "The credential name used in the preset for the GCP provider", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "description": "The zone name", - "name": "Zone", - "in": "header" - } - ], - "responses": { - "200": { - "description": "GKEImageList", - "schema": { - "$ref": "#/definitions/GKEImageList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/gke/validatecredentials": { - "get": { - "description": "Validates GKE credentials", - "produces": [ - "application/json" - ], - "tags": [ - "gke" - ], - "operationId": "validateGKECredentials", - "parameters": [ - { - "type": "string", - "description": "The plain GCP service account", - "name": "ServiceAccount", - "in": "header" - }, - { - "type": "string", - "description": "The credential name used in the preset for the GCP provider", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/gke/versions": { - "get": { - "description": "Lists GKE versions", - "produces": [ - "application/json" - ], - "tags": [ - "gke" - ], - "operationId": "listGKEVersions", - "parameters": [ - { - "type": "string", - "description": "The plain GCP service account", - "name": "ServiceAccount", - "in": "header" - }, - { - "type": "string", - "description": "The credential name used in the preset for the GCP provider", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "description": "The zone name", - "name": "Zone", - "in": "header" - }, - { - "type": "string", - "description": "The Mode is how you want GKE Control plane version to be managed.\nManual: Manually manage the version upgrades.\nAuto: automatically manage the cluster's control plane version.", - "name": "Mode", - "in": "header" - }, - { - "type": "string", - "description": "The ReleaseChannel", - "name": "ReleaseChannel", - "in": "header" - } - ], - "responses": { - "200": { - "description": "MasterVersion", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/MasterVersion" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/gke/vmsizes": { - "get": { - "description": "Lists GKE vmsizes", - "produces": [ - "application/json" - ], - "tags": [ - "gke" - ], - "operationId": "listGKEVMSizes", - "parameters": [ - { - "type": "string", - "description": "The plain GCP service account", - "name": "ServiceAccount", - "in": "header" - }, - { - "type": "string", - "description": "The credential name used in the preset for the GCP provider", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "description": "The zone name", - "name": "Zone", - "in": "header" - } - ], - "responses": { - "200": { - "description": "GCPMachineSizeList", - "schema": { - "$ref": "#/definitions/GCPMachineSizeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/gke/zones": { - "get": { - "description": "Lists GKE zones", - "produces": [ - "application/json" - ], - "tags": [ - "gke" - ], - "operationId": "listGKEZones", - "responses": { - "200": { - "description": "GKEZoneList", - "schema": { - "$ref": "#/definitions/GKEZoneList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/kubevirt/dc/{dc}/images": { - "get": { - "description": "List KubeVirt images", - "produces": [ - "application/json" - ], - "tags": [ - "kubevirt" - ], - "operationId": "listKubevirtImages", - "parameters": [ - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "KubeVirtImagesList", - "schema": { - "$ref": "#/definitions/KubeVirtImagesList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/kubevirt/instancetypes": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "kubevirt" - ], - "summary": "Lists available KubeVirt VirtualMachineInstancetype.", - "operationId": "listKubeVirtInstancetypes", - "parameters": [ - { - "type": "string", - "name": "Kubeconfig", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VirtualMachineInstancetypeList", - "schema": { - "$ref": "#/definitions/VirtualMachineInstancetypeList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/kubevirt/preferences": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "kubevirt" - ], - "summary": "Lists available KubeVirt VirtualMachinePreference.", - "operationId": "listKubeVirtPreferences", - "parameters": [ - { - "type": "string", - "name": "Kubeconfig", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VirtualMachinePreferenceList", - "schema": { - "$ref": "#/definitions/VirtualMachinePreferenceList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/kubevirt/storageclasses": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "kubevirt" - ], - "summary": "Lists available K8s StorageClasses in the Kubevirt cluster.", - "operationId": "listKubeVirtStorageClasses", - "parameters": [ - { - "type": "string", - "name": "Kubeconfig", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - } - ], - "responses": { - "200": { - "description": "StorageClassList", - "schema": { - "$ref": "#/definitions/StorageClassList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/nutanix/{dc}/categories": { - "get": { - "description": "List category keys from Nutanix", - "produces": [ - "application/json" - ], - "tags": [ - "nutanix" - ], - "operationId": "listNutanixCategories", - "parameters": [ - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "NutanixUsername", - "in": "header" - }, - { - "type": "string", - "name": "NutanixPassword", - "in": "header" - }, - { - "type": "string", - "name": "NutanixProxyURL", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "NutanixCategoryList", - "schema": { - "$ref": "#/definitions/NutanixCategoryList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/nutanix/{dc}/categories/{category}/values": { - "get": { - "description": "List available category values for a specific category from Nutanix", - "produces": [ - "application/json" - ], - "tags": [ - "nutanix" - ], - "operationId": "listNutanixCategoryValues", - "parameters": [ - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "NutanixUsername", - "in": "header" - }, - { - "type": "string", - "name": "NutanixPassword", - "in": "header" - }, - { - "type": "string", - "name": "NutanixProxyURL", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "x-go-name": "Category", - "description": "Category to query the available values for", - "name": "category", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "NutanixCategoryValueList", - "schema": { - "$ref": "#/definitions/NutanixCategoryValueList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/nutanix/{dc}/clusters": { - "get": { - "description": "List clusters from Nutanix", - "produces": [ - "application/json" - ], - "tags": [ - "nutanix" - ], - "operationId": "listNutanixClusters", - "parameters": [ - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "NutanixUsername", - "in": "header" - }, - { - "type": "string", - "name": "NutanixPassword", - "in": "header" - }, - { - "type": "string", - "name": "NutanixProxyURL", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "NutanixClusterList", - "schema": { - "$ref": "#/definitions/NutanixClusterList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/nutanix/{dc}/projects": { - "get": { - "description": "List projects from Nutanix", - "produces": [ - "application/json" - ], - "tags": [ - "nutanix" - ], - "operationId": "listNutanixProjects", - "parameters": [ - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "NutanixUsername", - "in": "header" - }, - { - "type": "string", - "name": "NutanixPassword", - "in": "header" - }, - { - "type": "string", - "name": "NutanixProxyURL", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "NutanixProjectList", - "schema": { - "$ref": "#/definitions/NutanixProjectList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/nutanix/{dc}/subnets": { - "get": { - "description": "List subnets from Nutanix", - "produces": [ - "application/json" - ], - "tags": [ - "nutanix" - ], - "operationId": "listNutanixSubnets", - "parameters": [ - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "NutanixUsername", - "in": "header" - }, - { - "type": "string", - "name": "NutanixPassword", - "in": "header" - }, - { - "type": "string", - "name": "NutanixProxyURL", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "name": "NutanixCluster", - "in": "header", - "required": true - }, - { - "type": "string", - "description": "Project query parameter. Can be omitted to query subnets without project scope", - "name": "NutanixProject", - "in": "header" - } - ], - "responses": { - "200": { - "description": "NutanixSubnetList", - "schema": { - "$ref": "#/definitions/NutanixSubnetList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/openstack/servergroups": { - "get": { - "description": "Lists server groups from openstack", - "produces": [ - "application/json" - ], - "tags": [ - "openstack" - ], - "operationId": "listOpenstackServerGroups", - "parameters": [ - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "Domain", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenant", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenantID", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProject", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProjectID", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialID", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialSecret", - "in": "header" - }, - { - "type": "boolean", - "name": "OIDCAuthentication", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "OpenstackServerGroup", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OpenstackServerGroup" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/openstack/subnetpools": { - "get": { - "description": "Lists subnet pools from openstack", - "produces": [ - "application/json" - ], - "tags": [ - "openstack" - ], - "operationId": "listOpenstackSubnetPools", - "parameters": [ - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "Domain", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenant", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackTenantID", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProject", - "in": "header" - }, - { - "type": "string", - "name": "OpenstackProjectID", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialID", - "in": "header" - }, - { - "type": "string", - "name": "ApplicationCredentialSecret", - "in": "header" - }, - { - "type": "boolean", - "name": "OIDCAuthentication", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "integer", - "format": "int64", - "x-go-name": "IPVersion", - "name": "ip_version", - "in": "query" - } - ], - "responses": { - "200": { - "description": "OpenstackSubnetPool", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OpenstackSubnetPool" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/vmwareclouddirector/{dc}/catalogs": { - "get": { - "description": "List VMware Cloud Director Catalogs", - "produces": [ - "application/json" - ], - "tags": [ - "vmwareclouddirector" - ], - "operationId": "listVMwareCloudDirectorCatalogs", - "parameters": [ - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "APIToken", - "in": "header" - }, - { - "type": "string", - "name": "Organization", - "in": "header" - }, - { - "type": "string", - "name": "VDC", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VMwareCloudDirectorCatalogList", - "schema": { - "$ref": "#/definitions/VMwareCloudDirectorCatalogList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/vmwareclouddirector/{dc}/networks": { - "get": { - "description": "List VMware Cloud Director OVDC Networks", - "produces": [ - "application/json" - ], - "tags": [ - "vmwareclouddirector" - ], - "operationId": "listVMwareCloudDirectorNetworks", - "parameters": [ - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "APIToken", - "in": "header" - }, - { - "type": "string", - "name": "Organization", - "in": "header" - }, - { - "type": "string", - "name": "VDC", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VMwareCloudDirectorNetworkList", - "schema": { - "$ref": "#/definitions/VMwareCloudDirectorNetworkList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/vmwareclouddirector/{dc}/storageprofiles": { - "get": { - "description": "List VMware Cloud Director Storage Profiles", - "produces": [ - "application/json" - ], - "tags": [ - "vmwareclouddirector" - ], - "operationId": "listVMwareCloudDirectorStorageProfiles", - "parameters": [ - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "APIToken", - "in": "header" - }, - { - "type": "string", - "name": "Organization", - "in": "header" - }, - { - "type": "string", - "name": "VDC", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VMwareCloudDirectorStorageProfileList", - "schema": { - "$ref": "#/definitions/VMwareCloudDirectorStorageProfileList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/vmwareclouddirector/{dc}/templates/{catalog_name}": { - "get": { - "description": "List VMware Cloud Director Templates", - "produces": [ - "application/json" - ], - "tags": [ - "vmwareclouddirector" - ], - "operationId": "listVMwareCloudDirectorTemplates", - "parameters": [ - { - "type": "string", - "x-go-name": "DC", - "description": "KKP Datacenter to use for endpoint", - "name": "dc", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "APIToken", - "in": "header" - }, - { - "type": "string", - "name": "Organization", - "in": "header" - }, - { - "type": "string", - "name": "VDC", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - }, - { - "type": "string", - "x-go-name": "CatalogName", - "description": "Catalog name to fetch the templates from", - "name": "catalog_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "VMwareCloudDirectorTemplateList", - "schema": { - "$ref": "#/definitions/VMwareCloudDirectorTemplateList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/vsphere/datastores": { - "get": { - "description": "Lists datastores from vsphere datacenter", - "produces": [ - "application/json" - ], - "tags": [ - "vsphere" - ], - "operationId": "listVSphereDatastores", - "parameters": [ - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VSphereDatastoreList", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/VSphereDatastoreList" - } - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/vsphere/vmgroups": { - "get": { - "description": "Lists VM Groups from vsphere datacenter", - "produces": [ - "application/json" - ], - "tags": [ - "vsphere" - ], - "operationId": "listVSphereVMGroups", - "parameters": [ - { - "type": "string", - "name": "Username", - "in": "header" - }, - { - "type": "string", - "name": "Password", - "in": "header" - }, - { - "type": "string", - "name": "DatacenterName", - "in": "header" - }, - { - "type": "string", - "name": "Credential", - "in": "header" - } - ], - "responses": { - "200": { - "description": "VSphereVMGroupList", - "schema": { - "$ref": "#/definitions/VSphereVMGroupList" - } - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/{provider_name}/dc/{dc}/defaultcluster": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "defaultCluster" - ], - "summary": "Retrieves the default cluster spec for the given provider and datacenter.", - "operationId": "getDefaultCluster", - "parameters": [ - { - "type": "string", - "x-go-name": "ProviderName", - "name": "provider_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "Cluster", - "schema": { - "$ref": "#/definitions/Cluster" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/{provider_name}/dc/{dc}/networkdefaults": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "networkdefaults" - ], - "summary": "Retrieves the cluster networking defaults for the given provider and datacenter.", - "operationId": "getNetworkDefaults", - "parameters": [ - { - "type": "string", - "x-go-name": "ProviderName", - "name": "provider_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "DC", - "name": "dc", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "NetworkDefaults", - "schema": { - "$ref": "#/definitions/NetworkDefaults" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/{provider_name}/presets": { - "get": { - "description": "Lists presets for the provider", - "produces": [ - "application/json" - ], - "tags": [ - "preset" - ], - "operationId": "listProviderPresets", - "parameters": [ - { - "type": "boolean", - "x-go-name": "Disabled", - "name": "disabled", - "in": "query" - }, - { - "type": "string", - "x-go-name": "Name", - "name": "name", - "in": "query" - }, - { - "type": "string", - "x-go-name": "ProviderName", - "name": "provider_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Datacenter", - "name": "datacenter", - "in": "query" - } - ], - "responses": { - "200": { - "description": "PresetList", - "schema": { - "$ref": "#/definitions/PresetList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "put": { - "description": "Updates provider preset", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "preset" - ], - "operationId": "updatePreset", - "parameters": [ - { - "type": "string", - "x-go-name": "ProviderName", - "name": "provider_name", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/PresetBody" - } - } - ], - "responses": { - "200": { - "description": "Preset", - "schema": { - "$ref": "#/definitions/Preset" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates the preset", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "preset" - ], - "operationId": "createPreset", - "parameters": [ - { - "type": "string", - "x-go-name": "ProviderName", - "name": "provider_name", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/PresetBody" - } - } - ], - "responses": { - "200": { - "description": "Preset", - "schema": { - "$ref": "#/definitions/Preset" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/{provider_name}/presets/{preset_name}": { - "delete": { - "description": "This endpoint has been depreciated in favour of /presets/{presets_name} and /presets/{preset_name}/providers/{provider_name}.", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "preset" - ], - "summary": "Deletes provider preset.", - "operationId": "deleteProviderPreset", - "deprecated": true, - "parameters": [ - { - "type": "string", - "x-go-name": "ProviderName", - "name": "provider_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "PresetName", - "name": "preset_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/providers/{provider_name}/versions": { - "get": { - "description": "Lists all versions which don't result in automatic updates for a given provider", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "version" - ], - "operationId": "listVersionsByProvider", - "parameters": [ - { - "type": "string", - "x-go-name": "ProviderName", - "name": "provider_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Type", - "description": "Type is deprecated and not used anymore.", - "name": "type", - "in": "query" - } - ], - "responses": { - "200": { - "description": "VersionList", - "schema": { - "$ref": "#/definitions/VersionList" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/quotas": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "resourceQuota", - "admin" - ], - "summary": "Gets a Resource Quota list. If `accumulate` is set to `true` then all the quota's quotas and global usage are accumulated and a `totalquota` is returned.", - "operationId": "listResourceQuotas", - "parameters": [ - { - "type": "string", - "x-go-name": "SubjectName", - "name": "subject_name", - "in": "query" - }, - { - "type": "string", - "x-go-name": "SubjectKind", - "name": "subject_kind", - "in": "query" - }, - { - "type": "boolean", - "x-go-name": "Accumulate", - "name": "accumulate", - "in": "query" - } - ], - "responses": { - "200": { - "description": "ResourceQuota", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/ResourceQuota" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "produces": [ - "application/json" - ], - "tags": [ - "resourceQuota", - "admin" - ], - "summary": "Creates a new Resource Quota.", - "operationId": "createResourceQuota", - "parameters": [ - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "type": "object", - "properties": { - "quota": { - "$ref": "#/definitions/Quota" - }, - "subjectKind": { - "type": "string", - "x-go-name": "SubjectKind" - }, - "subjectName": { - "type": "string", - "x-go-name": "SubjectName" - } - } - } - } - ], - "responses": { - "201": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/quotas/{quota_name}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "resourceQuota", - "admin" - ], - "summary": "Gets a specific Resource Quota.", - "operationId": "getResourceQuota", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "quota_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "ResourceQuota", - "schema": { - "$ref": "#/definitions/ResourceQuota" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "put": { - "produces": [ - "application/json" - ], - "tags": [ - "resourceQuota", - "admin" - ], - "summary": "Updates an existing Resource Quota.", - "operationId": "putResourceQuota", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "quota_name", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/Quota" - } - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "resourceQuota", - "admin" - ], - "summary": "Removes an existing Resource Quota.", - "operationId": "deleteResourceQuota", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "quota_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/seeds/status": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "seed" - ], - "summary": "Lists Seeds and their status.", - "operationId": "listSeedStatus", - "responses": { - "200": { - "description": "SeedStatus", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/SeedStatus" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/seeds/{seed_name}/backupcredentials": { - "put": { - "description": "Creates or updates backup credentials for a given seed", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "backupcredentials" - ], - "operationId": "createOrUpdateBackupCredentials", - "parameters": [ - { - "type": "string", - "x-go-name": "SeedName", - "name": "seed_name", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "schema": { - "$ref": "#/definitions/bcBody" - } - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/seeds/{seed_name}/ipampools": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "ipampool" - ], - "summary": "Lists IPAM pools.", - "operationId": "listIPAMPools", - "parameters": [ - { - "type": "string", - "x-go-name": "SeedName", - "name": "seed_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "IPAMPool", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/IPAMPool" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "consumes": [ - "application/json" - ], - "tags": [ - "ipampool" - ], - "summary": "Creates a IPAM pool.", - "operationId": "createIPAMPool", - "parameters": [ - { - "type": "string", - "x-go-name": "SeedName", - "name": "seed_name", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/IPAMPool" - } - } - ], - "responses": { - "201": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/seeds/{seed_name}/ipampools/{ipampool_name}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "ipampool" - ], - "summary": "Gets a specific IPAM pool.", - "operationId": "getIPAMPool", - "parameters": [ - { - "type": "string", - "x-go-name": "SeedName", - "name": "seed_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "IPAMPoolName", - "name": "ipampool_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "IPAMPool", - "schema": { - "$ref": "#/definitions/IPAMPool" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "tags": [ - "ipampool" - ], - "summary": "Removes an existing IPAM pool.", - "operationId": "deleteIPAMPool", - "parameters": [ - { - "type": "string", - "x-go-name": "SeedName", - "name": "seed_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "IPAMPoolName", - "name": "ipampool_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "patch": { - "consumes": [ - "application/json" - ], - "tags": [ - "ipampool" - ], - "summary": "Patches a IPAM pool.", - "operationId": "patchIPAMPool", - "parameters": [ - { - "type": "string", - "x-go-name": "SeedName", - "name": "seed_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "IPAMPoolName", - "name": "ipampool_name", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/IPAMPool" - } - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/seeds/{seed_name}/operatingsystemprofiles": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "operatingsystemprofile" - ], - "summary": "Lists Operating System Profiles.", - "operationId": "listOperatingSystemProfiles", - "parameters": [ - { - "type": "string", - "x-go-name": "SeedName", - "name": "seed_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OperatingSystemProfile", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/OperatingSystemProfile" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/seeds/{seed_name}/overview": { - "get": { - "tags": [ - "seed", - "admin" - ], - "summary": "Returns seed's overview.", - "operationId": "getSeedOverview", - "parameters": [ - { - "type": "string", - "x-go-name": "SeedName", - "name": "seed_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "SeedOverview", - "schema": { - "$ref": "#/definitions/SeedOverview" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/seeds/{seed_name}/rulegroups": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "rulegroup" - ], - "summary": "Lists rule groups that belong to a given Seed.", - "operationId": "listAdminRuleGroups", - "parameters": [ - { - "type": "string", - "x-go-name": "SeedName", - "name": "seed_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "Type", - "name": "type", - "in": "query" - } - ], - "responses": { - "200": { - "description": "RuleGroup", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/RuleGroup" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "post": { - "description": "Creates a rule group that will belong to the given Seed", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "rulegroup" - ], - "operationId": "createAdminRuleGroup", - "parameters": [ - { - "type": "string", - "x-go-name": "SeedName", - "name": "seed_name", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/RuleGroup" - } - } - ], - "responses": { - "201": { - "description": "RuleGroup", - "schema": { - "$ref": "#/definitions/RuleGroup" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/seeds/{seed_name}/rulegroups/{rulegroup_id}": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "rulegroup" - ], - "summary": "Gets a specified rule group for a given Seed.", - "operationId": "getAdminRuleGroup", - "parameters": [ - { - "type": "string", - "x-go-name": "SeedName", - "name": "seed_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RuleGroupID", - "name": "rulegroup_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "RuleGroup", - "schema": { - "$ref": "#/definitions/RuleGroup" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "put": { - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "rulegroup" - ], - "summary": "Updates the specified rule group for the given Seed.", - "operationId": "updateAdminRuleGroup", - "parameters": [ - { - "type": "string", - "x-go-name": "SeedName", - "name": "seed_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RuleGroupID", - "name": "rulegroup_id", - "in": "path", - "required": true - }, - { - "name": "Body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/RuleGroup" - } - } - ], - "responses": { - "200": { - "description": "RuleGroup", - "schema": { - "$ref": "#/definitions/RuleGroup" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - }, - "delete": { - "produces": [ - "application/json" - ], - "tags": [ - "rulegroup" - ], - "summary": "Deletes the given rule group that belongs to the Seed.", - "operationId": "deleteAdminRuleGroup", - "parameters": [ - { - "type": "string", - "x-go-name": "SeedName", - "name": "seed_name", - "in": "path", - "required": true - }, - { - "type": "string", - "x-go-name": "RuleGroupID", - "name": "rulegroup_id", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "$ref": "#/responses/empty" - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/seeds/{seed_name}/settings": { - "get": { - "produces": [ - "application/json" - ], - "tags": [ - "seed" - ], - "summary": "Gets the seed settings.", - "operationId": "getSeedSettings", - "parameters": [ - { - "type": "string", - "x-go-name": "Name", - "name": "seed_name", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "SeedSettings", - "schema": { - "$ref": "#/definitions/SeedSettings" - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - }, - "/api/v2/users": { - "get": { - "description": "List users", - "produces": [ - "application/json" - ], - "tags": [ - "user" - ], - "operationId": "listUser", - "responses": { - "200": { - "description": "User", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/User" - } - } - }, - "401": { - "$ref": "#/responses/empty" - }, - "403": { - "$ref": "#/responses/empty" - }, - "default": { - "description": "errorResponse", - "schema": { - "$ref": "#/definitions/errorResponse" - } - } - } - } - } - }, - "definitions": { - "AKS": { - "type": "object", - "properties": { - "clientID": { - "description": "The service principal used to access Azure.", - "type": "string", - "x-go-name": "ClientID" - }, - "clientSecret": { - "description": "The client secret corresponding to the given service principal.", - "type": "string", - "x-go-name": "ClientSecret" - }, - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "subscriptionID": { - "description": "The Azure Subscription used for the user cluster.", - "type": "string", - "x-go-name": "SubscriptionID" - }, - "tenantID": { - "description": "The Azure Active Directory Tenant used for the user cluster.", - "type": "string", - "x-go-name": "TenantID" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "AKSCloudSpec": { - "type": "object", - "properties": { - "clientID": { - "type": "string", - "x-go-name": "ClientID" - }, - "clientSecret": { - "type": "string", - "x-go-name": "ClientSecret" - }, - "location": { - "type": "string", - "x-go-name": "Location" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "resourceGroup": { - "type": "string", - "x-go-name": "ResourceGroup" - }, - "subscriptionID": { - "type": "string", - "x-go-name": "SubscriptionID" - }, - "tenantID": { - "type": "string", - "x-go-name": "TenantID" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AKSCluster": { - "type": "object", - "title": "AKSCluster represents an object of AKS cluster.", - "properties": { - "imported": { - "type": "boolean", - "x-go-name": "IsImported" - }, - "location": { - "type": "string", - "x-go-name": "Location" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "resourceGroup": { - "type": "string", - "x-go-name": "ResourceGroup" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AKSClusterList": { - "type": "array", - "title": "AKSClusterList represents an list of AKS clusters.", - "items": { - "$ref": "#/definitions/AKSCluster" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AKSClusterSpec": { - "type": "object", - "title": "AKSClusterSpec Azure Kubernetes Service cluster.", - "properties": { - "createdAt": { - "description": "The timestamp of resource creation (UTC).", - "type": "string", - "format": "date-time", - "x-go-name": "CreatedAt" - }, - "createdBy": { - "description": "The identity that created the resource.", - "type": "string", - "x-go-name": "CreatedBy" - }, - "dnsPrefix": { - "description": "DNSPrefix - This cannot be updated once the Managed Cluster has been created.", - "type": "string", - "x-go-name": "DNSPrefix" - }, - "enableRBAC": { - "description": "EnableRBAC - Whether Kubernetes Role-Based Access Control Enabled.", - "type": "boolean", - "x-go-name": "EnableRBAC" - }, - "fqdn": { - "description": "Fqdn - READ-ONLY; The FQDN of the master pool.", - "type": "string", - "x-go-name": "Fqdn" - }, - "fqdnSubdomain": { - "description": "FqdnSubdomain - This cannot be updated once the Managed Cluster has been created.", - "type": "string", - "x-go-name": "FqdnSubdomain" - }, - "kubernetesVersion": { - "description": "KubernetesVersion - When you upgrade a supported AKS cluster, Kubernetes minor versions cannot be skipped. All upgrades must be performed sequentially by major version number. For example, upgrades between 1.14.x -\u003e 1.15.x or 1.15.x -\u003e 1.16.x are allowed, however 1.14.x -\u003e 1.16.x is not allowed. See [upgrading an AKS cluster](https://docs.microsoft.com/azure/aks/upgrade-cluster) for more details.", - "type": "string", - "x-go-name": "KubernetesVersion" - }, - "machineDeploymentSpec": { - "$ref": "#/definitions/AKSMachineDeploymentCloudSpec" - }, - "networkProfile": { - "$ref": "#/definitions/AKSNetworkProfile" - }, - "privateFQDN": { - "description": "PrivateFQDN - READ-ONLY; The FQDN of private cluster.", - "type": "string", - "x-go-name": "PrivateFQDN" - }, - "tags": { - "description": "Resource tags.", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Tags" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AKSClusterStatus": { - "type": "object", - "properties": { - "powerState": { - "$ref": "#/definitions/AKSPowerState" - }, - "provisioningState": { - "$ref": "#/definitions/AKSProvisioningState" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AKSLocation": { - "type": "object", - "title": "AKSLocation represents an object of Azure Location.", - "properties": { - "name": { - "description": "The location name.", - "type": "string", - "x-go-name": "Name" - }, - "regionCategory": { - "description": "READ-ONLY; The category of the region.", - "type": "string", - "x-go-name": "RegionCategory" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AKSLocationList": { - "type": "array", - "title": "AKSLocationList represents a list of AKS Locations.", - "items": { - "$ref": "#/definitions/AKSLocation" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AKSMDPhase": { - "type": "object", - "properties": { - "powerState": { - "$ref": "#/definitions/AKSPowerState" - }, - "provisioningState": { - "$ref": "#/definitions/AKSProvisioningState" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AKSMachineDeploymentCloudSpec": { - "type": "object", - "properties": { - "basicSettings": { - "$ref": "#/definitions/AgentPoolBasics" - }, - "configuration": { - "$ref": "#/definitions/AgentPoolConfig" - }, - "name": { - "description": "Name - Node pool name must contain only lowercase letters and numbers. For Linux node pools must be 12 or fewer characters.", - "type": "string", - "x-go-name": "Name" - }, - "optionalSettings": { - "$ref": "#/definitions/AgentPoolOptionalSettings" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AKSNetworkProfile": { - "type": "object", - "title": "AKS NetworkProfile profile of network configuration.", - "properties": { - "dnsServiceIP": { - "description": "DNSServiceIP - An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes service address range specified in serviceCidr.", - "type": "string", - "x-go-name": "DNSServiceIP" - }, - "dockerBridgeCidr": { - "description": "DockerBridgeCidr - A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes service address range.", - "type": "string", - "x-go-name": "DockerBridgeCidr" - }, - "loadBalancerSku": { - "description": "LoadBalancerSku - The default is 'standard'. See [Azure Load Balancer SKUs](https://docs.microsoft.com/azure/load-balancer/skus) for more information about the differences between load balancer SKUs. Possible values include: 'LoadBalancerSkuStandard', 'LoadBalancerSkuBasic'", - "type": "string", - "x-go-name": "LoadBalancerSku" - }, - "networkMode": { - "description": "NetworkMode - This cannot be specified if networkPlugin is anything other than 'azure'. Possible values include: 'Transparent', 'Bridge'", - "type": "string", - "x-go-name": "NetworkMode" - }, - "networkPlugin": { - "description": "NetworkPlugin - Network plugin used for building the Kubernetes network. Possible values include: 'Azure', 'Kubenet'", - "type": "string", - "x-go-name": "NetworkPlugin" - }, - "networkPolicy": { - "description": "NetworkPolicy - Network policy used for building the Kubernetes network. Possible values include: 'Calico', 'Azure'", - "type": "string", - "x-go-name": "NetworkPolicy" - }, - "outboundType": { - "description": "OutboundType - This can only be set at cluster creation time and cannot be changed later. For more information see [egress outbound type](https://docs.microsoft.com/azure/aks/egress-outboundtype). Possible values include: 'OutboundTypeLoadBalancer', 'OutboundTypeUserDefinedRouting', 'OutboundTypeManagedNATGateway', 'OutboundTypeUserAssignedNATGateway'", - "type": "string", - "x-go-name": "OutboundType" - }, - "podCidr": { - "description": "PodCidr - A CIDR notation IP range from which to assign pod IPs when kubenet is used.", - "type": "string", - "x-go-name": "PodCidr" - }, - "serviceCidr": { - "description": "ServiceCidr - A CIDR notation IP range from which to assign service cluster IPs. It must not overlap with any Subnet IP ranges.", - "type": "string", - "x-go-name": "ServiceCidr" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AKSNodePoolModes": { - "type": "array", - "title": "AKSNodePoolModes represents nodepool modes.", - "items": { - "type": "string" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AKSNodegroupScalingConfig": { - "type": "object", - "properties": { - "maxCount": { - "description": "MaxCount - The maximum number of nodes for auto-scaling", - "type": "integer", - "format": "int32", - "x-go-name": "MaxCount" - }, - "minCount": { - "description": "MinCount - The minimum number of nodes for auto-scaling", - "type": "integer", - "format": "int32", - "x-go-name": "MinCount" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AKSPowerState": { - "type": "string", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AKSProvisioningState": { - "type": "string", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AKSVMSize": { - "type": "object", - "title": "AKSVMSize is the object representing Azure VM sizes.", - "properties": { - "maxDataDiskCount": { - "type": "integer", - "format": "int32", - "x-go-name": "MaxDataDiskCount" - }, - "memoryInMB": { - "type": "integer", - "format": "int32", - "x-go-name": "MemoryInMB" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "numberOfCores": { - "type": "integer", - "format": "int32", - "x-go-name": "NumberOfCores" - }, - "numberOfGPUs": { - "type": "integer", - "format": "int32", - "x-go-name": "NumberOfGPUs" - }, - "osDiskSizeInMB": { - "type": "integer", - "format": "int32", - "x-go-name": "OsDiskSizeInMB" - }, - "resourceDiskSizeInMB": { - "type": "integer", - "format": "int32", - "x-go-name": "ResourceDiskSizeInMB" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AKSVMSizeList": { - "type": "array", - "title": "AKSVMSizeList represents an array of AKS VM sizes.", - "items": { - "$ref": "#/definitions/AKSVMSize" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AWS": { - "type": "object", - "properties": { - "accessKeyID": { - "description": "The Access key ID used to authenticate against AWS.", - "type": "string", - "x-go-name": "AccessKeyID" - }, - "assumeRoleARN": { - "description": "Defines the ARN for an IAM role that should be assumed when handling resources on AWS. It will be used\nto acquire temporary security credentials using an STS AssumeRole API operation whenever creating an AWS session.\n+optional", - "type": "string", - "x-go-name": "AssumeRoleARN" - }, - "assumeRoleExternalID": { - "description": "An arbitrary string that may be needed when calling the STS AssumeRole API operation.\nUsing an external ID can help to prevent the \"confused deputy problem\".\n+optional", - "type": "string", - "x-go-name": "AssumeRoleExternalID" - }, - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "instanceProfileName": { - "description": "Instance profile to use. This can be configured, but if left empty will be\nautomatically filled in during reconciliation.", - "type": "string", - "x-go-name": "InstanceProfileName" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "roleARN": { - "description": "ARN to use. This can be configured, but if left empty will be\nautomatically filled in during reconciliation.", - "type": "string", - "x-go-name": "ControlPlaneRoleARN" - }, - "routeTableID": { - "description": "Route table to use. This can be configured, but if left empty will be\nautomatically filled in during reconciliation.", - "type": "string", - "x-go-name": "RouteTableID" - }, - "secretAccessKey": { - "description": "The Secret Access Key used to authenticate against AWS.", - "type": "string", - "x-go-name": "SecretAccessKey" - }, - "securityGroupID": { - "description": "Security group to use. This can be configured, but if left empty will be\nautomatically filled in during reconciliation.", - "type": "string", - "x-go-name": "SecurityGroupID" - }, - "vpcID": { - "description": "AWS VPC to use. Must be configured.", - "type": "string", - "x-go-name": "VPCID" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "AWSCloudSpec": { - "type": "object", - "title": "AWSCloudSpec specifies access data to Amazon Web Services.", - "properties": { - "accessKeyID": { - "description": "The Access key ID used to authenticate against AWS.", - "type": "string", - "x-go-name": "AccessKeyID" - }, - "assumeRoleARN": { - "description": "Defines the ARN for an IAM role that should be assumed when handling resources on AWS. It will be used\nto acquire temporary security credentials using an STS AssumeRole API operation whenever creating an AWS session.\n+optional", - "type": "string", - "x-go-name": "AssumeRoleARN" - }, - "assumeRoleExternalID": { - "description": "An arbitrary string that may be needed when calling the STS AssumeRole API operation.\nUsing an external ID can help to prevent the \"confused deputy problem\".\n+optional", - "type": "string", - "x-go-name": "AssumeRoleExternalID" - }, - "credentialsReference": { - "$ref": "#/definitions/GlobalSecretKeySelector" - }, - "disableIAMReconciling": { - "description": "DisableIAMReconciling is used to disable reconciliation for IAM related configuration. This is useful in air-gapped\nsetups where access to IAM service is not possible.", - "type": "boolean", - "x-go-name": "DisableIAMReconciling" - }, - "instanceProfileName": { - "type": "string", - "x-go-name": "InstanceProfileName" - }, - "nodePortsAllowedIPRange": { - "description": "A CIDR range that will be used to allow access to the node port range in the security group to. Only applies if\nthe security group is generated by KKP and not preexisting.\nIf NodePortsAllowedIPRange nor NodePortsAllowedIPRanges is set, the node port range can be accessed from anywhere.", - "type": "string", - "x-go-name": "NodePortsAllowedIPRange" - }, - "nodePortsAllowedIPRanges": { - "$ref": "#/definitions/NetworkRanges" - }, - "roleARN": { - "description": "The IAM role, the control plane will use. The control plane will perform an assume-role", - "type": "string", - "x-go-name": "ControlPlaneRoleARN" - }, - "routeTableID": { - "type": "string", - "x-go-name": "RouteTableID" - }, - "secretAccessKey": { - "description": "The Secret Access Key used to authenticate against AWS.", - "type": "string", - "x-go-name": "SecretAccessKey" - }, - "securityGroupID": { - "type": "string", - "x-go-name": "SecurityGroupID" - }, - "vpcID": { - "type": "string", - "x-go-name": "VPCID" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "AWSNodeSpec": { - "description": "AWSNodeSpec aws specific node settings", - "type": "object", - "required": [ - "instanceType", - "diskSize", - "volumeType" - ], - "properties": { - "ami": { - "description": "ami to use. Will be defaulted to a ami for your selected operating system and region. Only set this when you know what you do.", - "type": "string", - "x-go-name": "AMI" - }, - "assignPublicIP": { - "description": "This flag controls a property of the AWS instance. When set the AWS instance will get a public IP address\nassigned during launch overriding a possible setting in the used AWS subnet.", - "type": "boolean", - "x-go-name": "AssignPublicIP" - }, - "assumeRoleARN": { - "description": "AssumeRoleARN defines the ARN for an IAM role that should be assumed when handling resources on AWS. It will be used\nto acquire temporary security credentials using an STS AssumeRole API operation whenever creating an AWS session.", - "type": "string", - "x-go-name": "AssumeRoleARN" - }, - "assumeRoleExternalID": { - "description": "AssumeRoleExternalID is an arbitrary string that may be needed when calling the STS AssumeRole API operation.\nUsing an external ID can help to prevent the \"confused deputy problem\".", - "type": "string", - "x-go-name": "AssumeRoleExternalID" - }, - "availabilityZone": { - "description": "Availability zone in which to place the node. It is coupled with the subnet to which the node will belong.", - "type": "string", - "x-go-name": "AvailabilityZone" - }, - "diskSize": { - "description": "size of the volume in gb. Only one volume will be created", - "type": "integer", - "format": "int32", - "x-go-name": "VolumeSize" - }, - "ebsVolumeEncrypted": { - "description": "EBSVolumeEncrypted indicates whether EBS volume encryption is enabled.", - "type": "boolean", - "x-go-name": "EBSVolumeEncrypted" - }, - "instanceType": { - "type": "string", - "x-go-name": "InstanceType", - "example": "t2.micro" - }, - "isSpotInstance": { - "description": "IsSpotInstance indicates whether the created machine is an aws ec2 spot instance or on-demand ec2 instance.", - "type": "boolean", - "x-go-name": "IsSpotInstance" - }, - "spotInstanceInterruptionBehavior": { - "description": "SpotInstanceInterruptionBehavior sets the interruption behavior for the spot instance when capacity is no longer\navailable at the price you specified, if there is no capacity, or if a constraint cannot be met. Charges for EBS\nvolume storage apply when an instance is stopped.", - "type": "string", - "x-go-name": "SpotInstanceInterruptionBehavior" - }, - "spotInstanceMaxPrice": { - "description": "SpotInstanceMaxPrice is the maximum price you are willing to pay per instance hour. Your instance runs when\nyour maximum price is greater than the Spot Price.", - "type": "string", - "x-go-name": "SpotInstanceMaxPrice" - }, - "spotInstancePersistentRequest": { - "description": "SpotInstancePersistentRequest ensures that your request will be submitted every time your Spot Instance is terminated.", - "type": "boolean", - "x-go-name": "SpotInstancePersistentRequest" - }, - "subnetID": { - "description": "The VPC subnet to which the node shall be connected.", - "type": "string", - "x-go-name": "SubnetID" - }, - "tags": { - "description": "additional instance tags", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Tags" - }, - "volumeType": { - "type": "string", - "x-go-name": "VolumeType", - "example": "gp2, io1, st1, sc1, standard" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AWSSecurityGroupList": { - "type": "object", - "title": "AWSSecurityGroupList represents an array of AWS Security Group.", - "properties": { - "ids": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "IDs" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AWSSize": { - "type": "object", - "title": "AWSSize represents a object of AWS size.", - "properties": { - "architecture": { - "type": "string", - "x-go-name": "Architecture" - }, - "gpus": { - "type": "integer", - "format": "int64", - "x-go-name": "GPUs" - }, - "memory": { - "type": "number", - "format": "float", - "x-go-name": "Memory" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "pretty_name": { - "type": "string", - "x-go-name": "PrettyName" - }, - "price": { - "type": "number", - "format": "double", - "x-go-name": "Price" - }, - "vcpus": { - "type": "integer", - "format": "int64", - "x-go-name": "VCPUs" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AWSSizeList": { - "type": "array", - "title": "AWSSizeList represents an array of AWS sizes.", - "items": { - "$ref": "#/definitions/AWSSize" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AWSSubnet": { - "type": "object", - "title": "AWSSubnet represents a object of AWS availability subnet.", - "properties": { - "availability_zone": { - "type": "string", - "x-go-name": "AvailabilityZone" - }, - "availability_zone_id": { - "type": "string", - "x-go-name": "AvailabilityZoneID" - }, - "available_ip_address_count": { - "type": "integer", - "format": "int64", - "x-go-name": "AvailableIPAddressCount" - }, - "default": { - "type": "boolean", - "x-go-name": "DefaultForAz" - }, - "id": { - "type": "string", - "x-go-name": "ID" - }, - "ipv4cidr": { - "type": "string", - "x-go-name": "IPv4CIDR" - }, - "ipv6cidr": { - "type": "string", - "x-go-name": "IPv6CIDR" - }, - "isDefaultSubnet": { - "type": "boolean", - "x-go-name": "IsDefaultSubnet" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "state": { - "type": "string", - "x-go-name": "State" - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/AWSTag" - }, - "x-go-name": "Tags" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AWSSubnetList": { - "type": "array", - "title": "AWSSubnetList represents an array of AWS availability subnets.", - "items": { - "$ref": "#/definitions/AWSSubnet" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AWSTag": { - "type": "object", - "title": "AWSTag represents a object of AWS tags.", - "properties": { - "key": { - "type": "string", - "x-go-name": "Key" - }, - "value": { - "type": "string", - "x-go-name": "Value" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AWSVPC": { - "type": "object", - "title": "AWSVPC represents a object of AWS VPC.", - "properties": { - "cidrBlock": { - "description": "The primary IPv4 CIDR block for the VPC.", - "type": "string", - "x-go-name": "CidrBlock" - }, - "cidrBlockAssociationSet": { - "description": "Information about the IPv4 CIDR blocks associated with the VPC.", - "type": "array", - "items": { - "$ref": "#/definitions/AWSVpcCidrBlockAssociation" - }, - "x-go-name": "CidrBlockAssociationSet" - }, - "dhcpOptionsId": { - "description": "The ID of the set of DHCP options you've associated with the VPC (or default\nif the default options are associated with the VPC).", - "type": "string", - "x-go-name": "DhcpOptionsID" - }, - "instanceTenancy": { - "description": "The allowed tenancy of instances launched into the VPC.", - "type": "string", - "x-go-name": "InstanceTenancy" - }, - "ipv6CidrBlockAssociationSet": { - "description": "Information about the IPv6 CIDR blocks associated with the VPC.", - "type": "array", - "items": { - "$ref": "#/definitions/AWSVpcIpv6CidrBlockAssociation" - }, - "x-go-name": "Ipv6CidrBlockAssociationSet" - }, - "isDefault": { - "description": "Indicates whether the VPC is the default VPC.", - "type": "boolean", - "x-go-name": "IsDefault" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "ownerId": { - "description": "The ID of the AWS account that owns the VPC.", - "type": "string", - "x-go-name": "OwnerID" - }, - "state": { - "description": "The current state of the VPC.", - "type": "string", - "x-go-name": "State" - }, - "tags": { - "description": "Any tags assigned to the VPC.", - "type": "array", - "items": { - "$ref": "#/definitions/AWSTag" - }, - "x-go-name": "Tags" - }, - "vpcId": { - "description": "The ID of the VPC.", - "type": "string", - "x-go-name": "VpcID" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AWSVPCList": { - "type": "array", - "title": "AWSVPCList represents an array of AWS VPC's.", - "items": { - "$ref": "#/definitions/AWSVPC" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AWSVpcCidrBlockAssociation": { - "type": "object", - "title": "AWSVpcCidrBlockAssociation describes an IPv4 CIDR block associated with a VPC.", - "properties": { - "associationId": { - "description": "The association ID for the IPv4 CIDR block.", - "type": "string", - "x-go-name": "AssociationID" - }, - "cidrBlock": { - "description": "The IPv4 CIDR block.", - "type": "string", - "x-go-name": "CidrBlock" - }, - "state": { - "description": "The state of the CIDR block.", - "type": "string", - "x-go-name": "State" - }, - "statusMessage": { - "description": "A message about the status of the CIDR block, if applicable.", - "type": "string", - "x-go-name": "StatusMessage" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AWSVpcIpv6CidrBlockAssociation": { - "type": "object", - "title": "AWSVpcIpv6CidrBlockAssociation describes an IPv6 CIDR block associated with a VPC.", - "properties": { - "associationId": { - "description": "The association ID for the IPv4 CIDR block.", - "type": "string", - "x-go-name": "AssociationID" - }, - "cidrBlock": { - "description": "The IPv4 CIDR block.", - "type": "string", - "x-go-name": "CidrBlock" - }, - "state": { - "description": "The state of the CIDR block.", - "type": "string", - "x-go-name": "State" - }, - "statusMessage": { - "description": "A message about the status of the CIDR block, if applicable.", - "type": "string", - "x-go-name": "StatusMessage" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AccessibleAddons": { - "type": "array", - "title": "AccessibleAddons represents an array of addons that can be configured in the user clusters.", - "items": { - "type": "string" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "Addon": { - "description": "Addon represents a predefined addon that users may install into their cluster", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/AddonSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AddonConfig": { - "description": "AddonConfig represents a addon configuration", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/AddonConfigSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AddonConfigSpec": { - "type": "object", - "title": "AddonConfigSpec specifies configuration of addon.", - "properties": { - "description": { - "description": "Description of the configured addon, it will be displayed in the addon overview in the UI", - "type": "string", - "x-go-name": "Description" - }, - "formSpec": { - "description": "Controls that can be set for configured addon", - "type": "array", - "items": { - "$ref": "#/definitions/AddonFormControl" - }, - "x-go-name": "Controls" - }, - "logo": { - "description": "Logo of the configured addon, encoded in base64", - "type": "string", - "x-go-name": "Logo" - }, - "logoFormat": { - "description": "LogoFormat contains logo format of the configured addon, i.e. svg+xml", - "type": "string", - "x-go-name": "LogoFormat" - }, - "shortDescription": { - "description": "ShortDescription of the configured addon that contains more detailed information about the addon,\nit will be displayed in the addon details view in the UI", - "type": "string", - "x-go-name": "ShortDescription" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "AddonFormControl": { - "type": "object", - "title": "AddonFormControl specifies addon form control.", - "properties": { - "displayName": { - "description": "DisplayName is visible in the UI", - "type": "string", - "x-go-name": "DisplayName" - }, - "helpText": { - "description": "HelpText is visible in the UI next to the control", - "type": "string", - "x-go-name": "HelpText" - }, - "internalName": { - "description": "InternalName is used internally to save in the addon object", - "type": "string", - "x-go-name": "InternalName" - }, - "required": { - "description": "Required indicates if the control has to be set", - "type": "boolean", - "x-go-name": "Required" - }, - "type": { - "description": "Type of displayed control", - "type": "string", - "x-go-name": "Type" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "AddonSpec": { - "description": "AddonSpec addon specification", - "type": "object", - "properties": { - "continuouslyReconcile": { - "description": "ContinuouslyReconcile indicates that the addon cannot be deleted or modified outside of the UI after installation", - "type": "boolean", - "x-go-name": "ContinuouslyReconcile" - }, - "isDefault": { - "description": "IsDefault indicates whether the addon is default", - "type": "boolean", - "x-go-name": "IsDefault" - }, - "variables": { - "description": "Variables is free form data to use for parsing the manifest templates", - "type": "object", - "additionalProperties": {}, - "x-go-name": "Variables" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "Admin": { - "description": "Admin represents admin user", - "type": "object", - "properties": { - "email": { - "description": "Email address of the admin user", - "type": "string", - "x-go-name": "Email" - }, - "isAdmin": { - "description": "IsAdmin indicates admin role", - "type": "boolean", - "x-go-name": "IsAdmin" - }, - "isGlobalViewer": { - "description": "IsGlobalViewer indicates GlobalViewer role", - "type": "boolean", - "x-go-name": "IsGlobalViewer" - }, - "name": { - "description": "Name of the admin user", - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AdmissionPlugin": { - "description": "AdmissionPlugin represents an admission plugin", - "type": "object", - "properties": { - "fromVersion": { - "$ref": "#/definitions/Semver" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "plugin": { - "type": "string", - "x-go-name": "Plugin" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AdmissionPluginList": { - "description": "AdmissionPluginList represents a list of admission plugins", - "type": "array", - "items": { - "type": "string" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AgentPoolBasics": { - "type": "object", - "properties": { - "availabilityZones": { - "description": "AvailabilityZones - The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "AvailabilityZones" - }, - "count": { - "description": "Required: Count - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1.", - "type": "integer", - "format": "int32", - "x-go-name": "Count" - }, - "enableAutoScaling": { - "description": "EnableAutoScaling - Whether to enable auto-scaler", - "type": "boolean", - "x-go-name": "EnableAutoScaling" - }, - "mode": { - "description": "Mode - Possible values include: 'System', 'User'.", - "type": "string", - "x-go-name": "Mode" - }, - "orchestratorVersion": { - "description": "OrchestratorVersion - As a best practice, you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node pool version must have the same major version as the control plane. The node pool minor version must be within two minor versions of the control plane version. The node pool version cannot be greater than the control plane version. For more information see [upgrading a node pool](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#upgrade-a-node-pool).", - "type": "string", - "x-go-name": "OrchestratorVersion" - }, - "osDiskSizeGB": { - "description": "The OSDiskSize for Agent agentpool cannot be less than 30GB or larger than 2048GB.", - "type": "integer", - "format": "int32", - "x-go-name": "OsDiskSizeGB" - }, - "scalingConfig": { - "$ref": "#/definitions/AKSNodegroupScalingConfig" - }, - "vmSize": { - "description": "Required: VMSize - VM size availability varies by region. If a node contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions", - "type": "string", - "x-go-name": "VMSize" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AgentPoolConfig": { - "type": "object", - "properties": { - "enableNodePublicIP": { - "description": "EnableNodePublicIP - Some scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. A common scenario is for gaming workloads, where a console needs to make a direct connection to a cloud virtual machine to minimize hops. For more information see [assigning a public IP per node](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#assign-a-public-ip-per-node-for-your-node-pools). The default is false.", - "type": "boolean", - "x-go-name": "EnableNodePublicIP" - }, - "maxPods": { - "description": "MaxPods - The maximum number of pods that can run on a node.", - "type": "integer", - "format": "int32", - "x-go-name": "MaxPods" - }, - "maxSurge": { - "description": "MaxSurgeUpgradeSetting - This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade", - "type": "string", - "x-go-name": "MaxSurgeUpgradeSetting" - }, - "osDiskType": { - "description": "OsDiskType - Possible values include: 'Managed', 'Ephemeral'", - "type": "string", - "x-go-name": "OsDiskType" - }, - "osType": { - "description": "OsType - Possible values include: 'Linux', 'Windows'. The default value is 'Linux'.\nWindows node pools are not supported on kubenet clusters", - "type": "string", - "x-go-name": "OsType" - }, - "podSubnetID": { - "description": "PodSubnetID - If omitted, pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}", - "type": "string", - "x-go-name": "PodSubnetID" - }, - "vnetSubnetID": { - "description": "VnetSubnetID - If this is not specified, a VNET and subnet will be generated and used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}", - "type": "string", - "x-go-name": "VnetSubnetID" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AgentPoolOptionalSettings": { - "type": "object", - "properties": { - "nodeLabels": { - "description": "NodeLabels - The node labels to be persisted across all nodes in agent pool.", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "NodeLabels" - }, - "nodeTaints": { - "description": "NodeTaints - The taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.\nPlacing custom taints on system pool is not supported(except 'CriticalAddonsOnly' taint or taint effect is 'PreferNoSchedule'). Please refer to https://aka.ms/aks/system-taints for detail", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "NodeTaints" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "Alertmanager": { - "description": "Alertmanager represents an Alertmanager Configuration", - "type": "object", - "properties": { - "spec": { - "$ref": "#/definitions/AlertmanagerSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AlertmanagerSpec": { - "type": "object", - "properties": { - "config": { - "description": "Config contains the alertmanager configuration in YAML", - "type": "array", - "items": { - "type": "integer", - "format": "uint8" - }, - "x-go-name": "Config" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "Alibaba": { - "type": "object", - "properties": { - "accessKeyID": { - "description": "The Access Key ID used to authenticate against Alibaba.", - "type": "string", - "x-go-name": "AccessKeyID" - }, - "accessKeySecret": { - "description": "The Access Key Secret used to authenticate against Alibaba.", - "type": "string", - "x-go-name": "AccessKeySecret" - }, - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "AlibabaCloudSpec": { - "type": "object", - "title": "AlibabaCloudSpec specifies the access data to Alibaba.", - "properties": { - "accessKeyID": { - "description": "The Access Key ID used to authenticate against Alibaba.", - "type": "string", - "x-go-name": "AccessKeyID" - }, - "accessKeySecret": { - "description": "The Access Key Secret used to authenticate against Alibaba.", - "type": "string", - "x-go-name": "AccessKeySecret" - }, - "credentialsReference": { - "$ref": "#/definitions/GlobalSecretKeySelector" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "AlibabaInstanceType": { - "type": "object", - "title": "AlibabaInstanceType represents a object of Alibaba instance type.", - "properties": { - "cpuCoreCount": { - "type": "integer", - "format": "int64", - "x-go-name": "CPUCoreCount" - }, - "gpuCoreCount": { - "type": "integer", - "format": "int64", - "x-go-name": "GPUCoreCount" - }, - "id": { - "type": "string", - "x-go-name": "ID" - }, - "memorySize": { - "type": "number", - "format": "double", - "x-go-name": "MemorySize" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AlibabaInstanceTypeList": { - "type": "array", - "title": "AlibabaInstanceTypeList represents an array of Alibaba instance types.", - "items": { - "$ref": "#/definitions/AlibabaInstanceType" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AlibabaNodeSpec": { - "description": "AlibabaNodeSpec alibaba specific node settings", - "type": "object", - "properties": { - "diskSize": { - "type": "string", - "x-go-name": "DiskSize" - }, - "diskType": { - "type": "string", - "x-go-name": "DiskType" - }, - "instanceType": { - "type": "string", - "x-go-name": "InstanceType" - }, - "internetMaxBandwidthOut": { - "type": "string", - "x-go-name": "InternetMaxBandwidthOut" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "vSwitchID": { - "type": "string", - "x-go-name": "VSwitchID" - }, - "zoneID": { - "type": "string", - "x-go-name": "ZoneID" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AlibabaVSwitch": { - "type": "object", - "title": "AlibabaVSwitch represents a object of Alibaba vSwitch.", - "properties": { - "id": { - "type": "string", - "x-go-name": "ID" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AlibabaVSwitchList": { - "type": "array", - "title": "AlibabaVSwitchList represents an array of Alibaba vSwitches.", - "items": { - "$ref": "#/definitions/AlibabaVSwitch" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AlibabaZone": { - "type": "object", - "title": "AlibabaZone represents a object of Alibaba zone.", - "properties": { - "id": { - "type": "string", - "x-go-name": "ID" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AlibabaZoneList": { - "type": "array", - "title": "AlibabaZoneList represents an array of Alibaba zones.", - "items": { - "$ref": "#/definitions/AlibabaZone" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AllowedRegistry": { - "description": "AllowedRegistry represents a object containing a allowed image registry prefix", - "type": "object", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/AllowedRegistrySpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AllowedRegistrySpec": { - "type": "object", - "title": "AllowedRegistrySpec specifies the data for allowed registry spec.", - "properties": { - "registryPrefix": { - "description": "RegistryPrefix contains the prefix of the registry which will be allowed. User clusters will be able to deploy\nonly images which are prefixed with one of the allowed image registry prefixes.", - "type": "string", - "x-go-name": "RegistryPrefix" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "AmazonLinuxSpec": { - "description": "AmazonLinuxSpec amazon linux specific settings", - "type": "object", - "properties": { - "distUpgradeOnBoot": { - "description": "do a dist-upgrade on boot and reboot it required afterwards", - "type": "boolean", - "x-go-name": "DistUpgradeOnBoot" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "Anexia": { - "type": "object", - "properties": { - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "token": { - "description": "Token is used to authenticate with the Anexia API.", - "type": "string", - "x-go-name": "Token" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "AnexiaCloudSpec": { - "type": "object", - "title": "AnexiaCloudSpec specifies the access data to Anexia.", - "properties": { - "credentialsReference": { - "$ref": "#/definitions/GlobalSecretKeySelector" - }, - "token": { - "description": "Token is used to authenticate with the Anexia API.", - "type": "string", - "x-go-name": "Token" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "AnexiaDiskConfig": { - "description": "AnexiaDiskConfig defines a single disk for a node at anexia", - "type": "object", - "required": [ - "size" - ], - "properties": { - "performanceType": { - "description": "PerformanceType configures the performance type this disks of each node will have.\nKnown values are something like \"ENT3\" or \"HPC2\".", - "type": "string", - "x-go-name": "PerformanceType" - }, - "size": { - "description": "Disks configures this disk of each node will have.", - "type": "integer", - "format": "int64", - "x-go-name": "Size" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AnexiaDiskType": { - "type": "object", - "title": "AnexiaDiskType represents a object of Anexia Disk Type.", - "properties": { - "id": { - "type": "string", - "x-go-name": "ID" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AnexiaDiskTypeList": { - "type": "array", - "title": "AnexiaDiskTypeList represents an array of Anexia Disk Types.", - "items": { - "$ref": "#/definitions/AnexiaDiskType" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AnexiaNodeSpec": { - "description": "AnexiaNodeSpec anexia specific node settings", - "type": "object", - "required": [ - "vlanID", - "cpus", - "memory" - ], - "properties": { - "cpus": { - "description": "CPUs states how many cpus the node will have.", - "type": "integer", - "format": "int64", - "x-go-name": "CPUs" - }, - "diskSize": { - "description": "DiskSize states the disk size that node will have.\nDeprecated: please use the new Disks attribute instead.", - "type": "integer", - "format": "int64", - "x-go-name": "DiskSize" - }, - "disks": { - "description": "Disks configures the disks each node will have.", - "type": "array", - "items": { - "$ref": "#/definitions/AnexiaDiskConfig" - }, - "x-go-name": "Disks" - }, - "memory": { - "description": "Memory states the memory that node will have.", - "type": "integer", - "format": "int64", - "x-go-name": "Memory" - }, - "template": { - "description": "Template instance template", - "type": "string", - "x-go-name": "Template" - }, - "templateBuild": { - "description": "TemplateBuild instance template", - "type": "string", - "x-go-name": "TemplateBuild" - }, - "templateID": { - "description": "TemplateID instance template", - "type": "string", - "x-go-name": "TemplateID" - }, - "vlanID": { - "description": "VlanID Instance vlanID", - "type": "string", - "x-go-name": "VlanID" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AnexiaTemplate": { - "type": "object", - "title": "AnexiaTemplate represents a object of Anexia template.", - "properties": { - "build": { - "type": "string", - "x-go-name": "Build" - }, - "id": { - "type": "string", - "x-go-name": "ID" - }, - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AnexiaTemplateList": { - "type": "array", - "title": "AnexiaTemplateList represents an array of Anexia templates.", - "items": { - "$ref": "#/definitions/AnexiaTemplate" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AnexiaVlan": { - "type": "object", - "title": "AnexiaVlan represents a object of Anexia Vlan.", - "properties": { - "id": { - "type": "string", - "x-go-name": "ID" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AnexiaVlanList": { - "type": "array", - "title": "AnexiaVlanList represents an array of Anexia Vlans.", - "items": { - "$ref": "#/definitions/AnexiaVlan" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AnnotationSettings": { - "type": "object", - "title": "AnnotationSettings is the settings for the annotations.", - "properties": { - "hiddenAnnotations": { - "description": "HiddenAnnotations are the annotations that are hidden from the user in the UI.\n+optional", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "HiddenAnnotations" - }, - "protectedAnnotations": { - "description": "ProtectedAnnotations are the annotations that are visible in the UI but cannot be added or modified by the user.\n+optional", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "ProtectedAnnotations" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "Announcement": { - "type": "object", - "title": "The announcement feature allows administrators to broadcast important messages to all users.", - "properties": { - "createdAt": { - "description": "Timestamp when the announcement was created.", - "type": "string", - "x-go-name": "CreatedAt" - }, - "expires": { - "description": "Expiration date for the announcement.\n+optional", - "type": "string", - "x-go-name": "Expires" - }, - "isActive": { - "description": "Indicates whether the announcement is active.", - "type": "boolean", - "x-go-name": "IsActive" - }, - "message": { - "description": "The message content of the announcement.", - "type": "string", - "x-go-name": "Message" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "Anything": { - "description": "Anything is a struct wrapper around a field of type `interface{}`\nthat plays nicely with controller-gen\n+kubebuilder:object:generate=false\n+kubebuilder:validation:Type=\"\"", - "type": "object", - "x-go-package": "github.com/open-policy-agent/frameworks/constraint/pkg/core/templates" - }, - "AppNamespaceSpec": { - "type": "object", - "title": "AppNamespaceSpec describe the desired state of the namespace where application will be created.", - "properties": { - "annotations": { - "description": "Annotations of the namespace\nMore info: http://kubernetes.io/docs/user-guide/annotations\n+optional", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "create": { - "description": "Create defines whether the namespace should be created if it does not exist. Defaults to true", - "type": "boolean", - "x-go-name": "Create" - }, - "labels": { - "description": "Labels of the namespace\nMore info: http://kubernetes.io/docs/user-guide/labels\n+optional", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "name": { - "description": "Name is the namespace to deploy the Application into.\nShould be a valid lowercase RFC1123 domain name", - "type": "string", - "pattern": "=`^(|[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)`", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/apps.kubermatic/v1" - }, - "Application": { - "description": "Application represents a set of applications that are to be installed for the cluster", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/ApplicationSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ApplicationDefinition": { - "type": "object", - "title": "ApplicationDefinition is the object representing an ApplicationDefinition.", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/ApplicationDefinitionSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ApplicationDefinitionBody": { - "description": "ApplicationDefinitionBody is the object representing the POST/PUT payload of an ApplicationDefinition", - "type": "object", - "properties": { - "Spec": { - "$ref": "#/definitions/ApplicationDefinitionSpec" - }, - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ApplicationDefinitionListItem": { - "type": "object", - "title": "ApplicationDefinitionListItem is the object representing an ApplicationDefinitionListItem.", - "properties": { - "annotations": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/ApplicationDefinitionListItemSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ApplicationDefinitionListItemSpec": { - "type": "object", - "title": "ApplicationDefinitionListItemSpec defines the desired state of ApplicationDefinitionListItemSpec.", - "properties": { - "default": { - "description": "Default specifies if the application should be installed by default when a new user cluster is created. Default applications are\nnot enforced and users can update/delete them. KKP will only install them during cluster creation if the user didn't explicitly\nopt out from installing default applications.", - "type": "boolean", - "x-go-name": "Default" - }, - "defaultVersion": { - "description": "DefaultVersion of the application to use, if not specified the latest available version will be used.", - "type": "string", - "x-go-name": "DefaultVersion" - }, - "description": { - "description": "Description of the application. what is its purpose", - "type": "string", - "x-go-name": "Description" - }, - "displayName": { - "description": "DisplayName is the name for the application that will be displayed in the UI.", - "type": "string", - "x-go-name": "DisplayName" - }, - "documentationURL": { - "description": "DocumentationURL holds a link to official documentation of the Application\nAlternatively this can be a link to the Readme of a chart in a git repository", - "type": "string", - "x-go-name": "DocumentationURL" - }, - "enforced": { - "description": "Enforced specifies if the application is enforced to be installed on the user clusters. Enforced applications are\ninstalled/updated by KKP for the user clusters. Users are not allowed to update/delete them. KKP will revert the changes\ndone by the application to the desired state specified in the ApplicationDefinition.", - "type": "boolean", - "x-go-name": "Enforced" - }, - "logo": { - "description": "Logo of the Application as a base64 encoded svg", - "type": "string", - "x-go-name": "Logo" - }, - "logoFormat": { - "description": "LogoFormat contains logo format of the configured Application. Options are \"svg+xml\" and \"png\"\n+kubebuilder:validation:Enum=svg+xml;png", - "type": "string", - "x-go-name": "LogoFormat" - }, - "selector": { - "$ref": "#/definitions/DefaultingSelector" - }, - "sourceURL": { - "description": "SourceURL holds a link to the official source code mirror or git repository of the application", - "type": "string", - "x-go-name": "SourceURL" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ApplicationDefinitionSpec": { - "type": "object", - "title": "ApplicationDefinitionSpec defines the desired state of ApplicationDefinition.", - "properties": { - "default": { - "description": "Default specifies if the application should be installed by default when a new user cluster is created. Default applications are\nnot enforced and users can update/delete them. KKP will only install them during cluster creation if the user didn't explicitly\nopt out from installing default applications.\n+optional", - "type": "boolean", - "x-go-name": "Default" - }, - "defaultDeployOptions": { - "$ref": "#/definitions/DeployOptions" - }, - "defaultNamespace": { - "$ref": "#/definitions/AppNamespaceSpec" - }, - "defaultValues": { - "$ref": "#/definitions/RawExtension" - }, - "defaultValuesBlock": { - "description": "DefaultValuesBlock specifies default values for the UI which are passed to helm templating when creating an application. Comments are preserved.", - "type": "string", - "x-go-name": "DefaultValuesBlock" - }, - "defaultVersion": { - "description": "DefaultVersion of the application to use, if not specified the latest available version will be used.\n+optional", - "type": "string", - "x-go-name": "DefaultVersion" - }, - "description": { - "description": "Description of the application. what is its purpose", - "type": "string", - "x-go-name": "Description" - }, - "displayName": { - "description": "DisplayName is the name for the application that will be displayed in the UI.", - "type": "string", - "x-go-name": "DisplayName" - }, - "documentationURL": { - "description": "DocumentationURL holds a link to official documentation of the Application\nAlternatively this can be a link to the Readme of a chart in a git repository", - "type": "string", - "x-go-name": "DocumentationURL" - }, - "enforced": { - "description": "Enforced specifies if the application is enforced to be installed on the user clusters. Enforced applications are\ninstalled/updated by KKP for the user clusters. Users are not allowed to update/delete them. KKP will revert the changes\ndone by the application to the desired state specified in the ApplicationDefinition.\n+optional", - "type": "boolean", - "x-go-name": "Enforced" - }, - "logo": { - "description": "Logo of the Application as a base64 encoded svg", - "type": "string", - "x-go-name": "Logo" - }, - "logoFormat": { - "description": "LogoFormat contains logo format of the configured Application. Options are \"svg+xml\" and \"png\"\n+kubebuilder:validation:Enum=svg+xml;png", - "type": "string", - "x-go-name": "LogoFormat" - }, - "method": { - "$ref": "#/definitions/TemplateMethod" - }, - "selector": { - "$ref": "#/definitions/DefaultingSelector" - }, - "sourceURL": { - "description": "SourceURL holds a link to the official source code mirror or git repository of the application", - "type": "string", - "x-go-name": "SourceURL" - }, - "versions": { - "description": "Available version for this application", - "type": "array", - "items": { - "$ref": "#/definitions/ApplicationVersion" - }, - "x-go-name": "Versions" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/apps.kubermatic/v1" - }, - "ApplicationInstallation": { - "type": "object", - "title": "ApplicationInstallation is the object representing an ApplicationInstallation.", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "namespace": { - "type": "string", - "x-go-name": "Namespace" - }, - "spec": { - "$ref": "#/definitions/ApplicationInstallationSpec" - }, - "status": { - "$ref": "#/definitions/ApplicationInstallationStatus" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ApplicationInstallationBody": { - "description": "ApplicationInstallationBody is the object representing the POST/PUT payload of an ApplicationInstallation", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "namespace": { - "type": "string", - "x-go-name": "Namespace" - }, - "spec": { - "$ref": "#/definitions/ApplicationInstallationSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ApplicationInstallationCondition": { - "type": "object", - "properties": { - "lastHeartbeatTime": { - "description": "Last time we got an update on a given condition.\n+optional", - "type": "string", - "format": "date-time", - "x-go-name": "LastHeartbeatTime" - }, - "lastTransitionTime": { - "description": "Last time the condition transit from one status to another.\n+optional", - "type": "string", - "format": "date-time", - "x-go-name": "LastTransitionTime" - }, - "message": { - "description": "Human readable message indicating details about last transition.", - "type": "string", - "x-go-name": "Message" - }, - "reason": { - "description": "(brief) reason for the condition's last transition.", - "type": "string", - "x-go-name": "Reason" - }, - "status": { - "$ref": "#/definitions/ConditionStatus" - }, - "type": { - "description": "Type of ApplicationInstallation condition.\nManifestsRetrieved ManifestsRetrieved ManifestsRetrieved indicates all necessary manifests have been fetched from the external source.\nReady Ready Ready describes all components have been successfully rolled out and are ready.", - "type": "string", - "enum": [ - "ManifestsRetrieved", - "Ready" - ], - "x-go-enum-desc": "ManifestsRetrieved ManifestsRetrieved ManifestsRetrieved indicates all necessary manifests have been fetched from the external source.\nReady Ready Ready describes all components have been successfully rolled out and are ready.", - "x-go-name": "Type" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ApplicationInstallationListItem": { - "type": "object", - "title": "ApplicationInstallationListItem is the object representing an ApplicationInstallationListItem.", - "properties": { - "annotations": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "type": "string", - "x-go-name": "CreationTimestamp" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "namespace": { - "type": "string", - "x-go-name": "Namespace" - }, - "spec": { - "$ref": "#/definitions/ApplicationInstallationListItemSpec" - }, - "status": { - "$ref": "#/definitions/ApplicationInstallationListItemStatus" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ApplicationInstallationListItemSpec": { - "type": "object", - "title": "ApplicationInstallationListItemSpec is the object representing an ApplicationInstallationListItemSpec.", - "properties": { - "applicationRef": { - "$ref": "#/definitions/ApplicationRef" - }, - "namespace": { - "$ref": "#/definitions/NamespaceSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ApplicationInstallationListItemStatus": { - "type": "object", - "properties": { - "applicationVersion": { - "$ref": "#/definitions/ApplicationVersion" - }, - "conditions": { - "description": "Conditions contains conditions an installation is in, its primary use case is status signaling between controllers or between controllers and the API", - "type": "array", - "items": { - "$ref": "#/definitions/ApplicationInstallationCondition" - }, - "x-go-name": "Conditions" - }, - "method": { - "$ref": "#/definitions/TemplateMethod" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ApplicationInstallationSpec": { - "type": "object", - "properties": { - "applicationRef": { - "$ref": "#/definitions/ApplicationRef" - }, - "deployOptions": { - "$ref": "#/definitions/DeployOptions" - }, - "namespace": { - "$ref": "#/definitions/NamespaceSpec" - }, - "reconciliationInterval": { - "$ref": "#/definitions/Duration" - }, - "values": { - "$ref": "#/definitions/RawExtension" - }, - "valuesBlock": { - "description": "ValuesBlock specifies values overrides that are passed to helm templating. Comments are preserved.", - "type": "string", - "x-go-name": "ValuesBlock" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ApplicationInstallationStatus": { - "type": "object", - "title": "ApplicationInstallationStatus is the object representing the status of an Application.", - "properties": { - "applicationVersion": { - "$ref": "#/definitions/ApplicationVersion" - }, - "conditions": { - "description": "Conditions contains conditions an installation is in, its primary use case is status signaling between controllers or between controllers and the API", - "type": "array", - "items": { - "$ref": "#/definitions/ApplicationInstallationCondition" - }, - "x-go-name": "Conditions" - }, - "method": { - "$ref": "#/definitions/TemplateMethod" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ApplicationRef": { - "type": "object", - "properties": { - "name": { - "description": "Name of the Application", - "type": "string", - "x-go-name": "Name" - }, - "version": { - "description": "Version of the Application. Must be a valid SemVer version\nNOTE: We are not using Masterminds/semver here, as it keeps data in unexported fields witch causes issues for\nDeepEqual used in our reconciliation packages. At the same time, we are not using sdk/semver because\nof the reasons stated in https://github.com/kubermatic/kubermatic/pull/10891.", - "type": "string", - "x-go-name": "Version" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ApplicationSettings": { - "description": "ApplicationSettings defines common settings for applications", - "type": "object", - "properties": { - "defaultNamespace": { - "type": "string", - "x-go-name": "DefaultNamespace" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ApplicationSource": { - "type": "object", - "properties": { - "git": { - "$ref": "#/definitions/GitSource" - }, - "helm": { - "$ref": "#/definitions/HelmSource" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/apps.kubermatic/v1" - }, - "ApplicationSpec": { - "description": "ApplicationSpec represents the specification for an application", - "type": "object", - "properties": { - "applicationRef": { - "$ref": "#/definitions/ApplicationRef" - }, - "namespace": { - "$ref": "#/definitions/NamespaceSpec" - }, - "values": { - "description": "Values specify values overrides that are passed to helm templating. Comments are not preserved.\nDeprecated: Use ValuesBlock instead.", - "type": "object", - "x-go-name": "Values" - }, - "valuesBlock": { - "description": "ValuesBlock specifies values overrides that are passed to helm templating. Comments are preserved.", - "type": "string", - "x-go-name": "ValuesBlock" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ApplicationTemplate": { - "type": "object", - "properties": { - "source": { - "$ref": "#/definitions/ApplicationSource" - }, - "templateCredentials": { - "$ref": "#/definitions/DependencyCredentials" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/apps.kubermatic/v1" - }, - "ApplicationVersion": { - "type": "object", - "properties": { - "template": { - "$ref": "#/definitions/ApplicationTemplate" - }, - "version": { - "description": "Version of the application (e.g. v1.2.3)", - "type": "string", - "x-go-name": "Version" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/apps.kubermatic/v1" - }, - "AuditLoggingSettings": { - "type": "object", - "title": "AuditLoggingSettings configures audit logging functionality.", - "properties": { - "enabled": { - "description": "Enabled will enable or disable audit logging.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "policyPreset": { - "$ref": "#/definitions/AuditPolicyPreset" - }, - "sidecar": { - "$ref": "#/definitions/AuditSidecarSettings" - }, - "webhookBackend": { - "$ref": "#/definitions/AuditWebhookBackendSettings" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "AuditPolicyPreset": { - "description": "AuditPolicyPreset refers to a pre-defined set of audit policy rules. Supported values\nare `metadata`, `recommended` and `minimal`. See KKP documentation for what each policy preset includes.", - "type": "string", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "AuditSidecarConfiguration": { - "description": "Also see https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/configuration-file.", - "type": "object", - "title": "AuditSidecarConfiguration defines custom configuration for the fluent-bit sidecar deployed with a kube-apiserver.", - "properties": { - "filters": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "x-go-name": "Filters" - }, - "outputs": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "x-go-name": "Outputs" - }, - "service": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Service" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "AuditSidecarSettings": { - "type": "object", - "properties": { - "config": { - "$ref": "#/definitions/AuditSidecarConfiguration" - }, - "extraEnvs": { - "description": "ExtraEnvs are the additional environment variables that can be set for the audit logging sidecar.\nAdditional environment variables can be set and passed to the AuditSidecarConfiguration field\nto allow passing variables to the fluent-bit configuration.\nOnly, `Value` field is supported for the environment variables; `ValueFrom` field is not supported.\nBy default, `CLUSTER_ID` is set as an environment variable in the audit-logging sidecar.", - "type": "array", - "items": { - "$ref": "#/definitions/EnvVar" - }, - "x-go-name": "ExtraEnvs" - }, - "resources": { - "$ref": "#/definitions/ResourceRequirements" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "AuditWebhookBackendSettings": { - "type": "object", - "title": "AuditWebhookBackendSettings configures webhook backend for audit logging functionality.", - "properties": { - "auditWebhookConfig": { - "$ref": "#/definitions/SecretReference" - }, - "auditWebhookInitialBackoff": { - "description": "+kubebuilder:default=\"10s\"", - "type": "string", - "x-go-name": "AuditWebhookInitialBackoff" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "Azure": { - "type": "object", - "properties": { - "clientID": { - "description": "The service principal used to access Azure.", - "type": "string", - "x-go-name": "ClientID" - }, - "clientSecret": { - "description": "The client secret corresponding to the given service principal.", - "type": "string", - "x-go-name": "ClientSecret" - }, - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "loadBalancerSKU": { - "$ref": "#/definitions/LBSKU" - }, - "resourceGroup": { - "description": "The resource group that will be used to look up and create resources for the cluster in.\nIf set to empty string at cluster creation, a new resource group will be created and this field will be updated to\nthe generated resource group's name.", - "type": "string", - "x-go-name": "ResourceGroup" - }, - "routeTable": { - "description": "The name of a route table associated with the subnet referenced by `subnet`.\nIf set to empty string at cluster creation, a new route table will be created and this field will be updated to\nthe generated route table's name. If no subnet is defined at cluster creation, this field should be empty as well.", - "type": "string", - "x-go-name": "RouteTableName" - }, - "securityGroup": { - "description": "The name of a security group associated with the subnet referenced by `subnet`.\nIf set to empty string at cluster creation, a new security group will be created and this field will be updated to\nthe generated security group's name. If no subnet is defined at cluster creation, this field should be empty as well.", - "type": "string", - "x-go-name": "SecurityGroup" - }, - "subnet": { - "description": "The name of a subnet in the VNet referenced by `vnet`.\nIf set to empty string at cluster creation, a new subnet will be created and this field will be updated to\nthe generated subnet's name. If no VNet is defined at cluster creation, this field should be empty as well.", - "type": "string", - "x-go-name": "SubnetName" - }, - "subscriptionID": { - "description": "The Azure Subscription used for the user cluster.", - "type": "string", - "x-go-name": "SubscriptionID" - }, - "tenantID": { - "description": "The Azure Active Directory Tenant used for the user cluster.", - "type": "string", - "x-go-name": "TenantID" - }, - "vnet": { - "description": "The name of the VNet resource used for setting up networking in.\nIf set to empty string at cluster creation, a new VNet will be created and this field will be updated to\nthe generated VNet's name.", - "type": "string", - "x-go-name": "VNetName" - }, - "vnetResourceGroup": { - "description": "Optional: Defines a second resource group that will be used for VNet related resources instead.\nIf left empty, NO additional resource group will be created and all VNet related resources use the resource group defined by `resourceGroup`.", - "type": "string", - "x-go-name": "VNetResourceGroup" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "AzureAvailabilityZonesList": { - "description": "AzureAvailabilityZonesList is the object representing the availability zones for vms in azure cloud provider", - "type": "object", - "properties": { - "zones": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Zones" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AzureCloudSpec": { - "type": "object", - "title": "AzureCloudSpec defines cloud resource references for Microsoft Azure.", - "properties": { - "assignAvailabilitySet": { - "description": "Optional: AssignAvailabilitySet determines whether KKP creates and assigns an AvailabilitySet to machines.\nDefaults to `true` internally if not set.", - "type": "boolean", - "x-go-name": "AssignAvailabilitySet" - }, - "availabilitySet": { - "description": "An availability set that will be associated with nodes created for this cluster. If this field is set to empty string\nat cluster creation and `AssignAvailabilitySet` is set to `true`, a new availability set will be created and this field\nwill be updated to the generated availability set's name.", - "type": "string", - "x-go-name": "AvailabilitySet" - }, - "clientID": { - "description": "The service principal used to access Azure.\nCan be read from `credentialsReference` instead.", - "type": "string", - "x-go-name": "ClientID" - }, - "clientSecret": { - "description": "The client secret corresponding to the given service principal.\nCan be read from `credentialsReference` instead.", - "type": "string", - "x-go-name": "ClientSecret" - }, - "credentialsReference": { - "$ref": "#/definitions/GlobalSecretKeySelector" - }, - "loadBalancerSKU": { - "$ref": "#/definitions/LBSKU" - }, - "nodePortsAllowedIPRange": { - "description": "A CIDR range that will be used to allow access to the node port range in the security group to. Only applies if\nthe security group is generated by KKP and not preexisting.\nIf NodePortsAllowedIPRange nor NodePortsAllowedIPRanges is set, the node port range can be accessed from anywhere.", - "type": "string", - "x-go-name": "NodePortsAllowedIPRange" - }, - "nodePortsAllowedIPRanges": { - "$ref": "#/definitions/NetworkRanges" - }, - "resourceGroup": { - "description": "The resource group that will be used to look up and create resources for the cluster in.\nIf set to empty string at cluster creation, a new resource group will be created and this field will be updated to\nthe generated resource group's name.", - "type": "string", - "x-go-name": "ResourceGroup" - }, - "routeTable": { - "description": "The name of a route table associated with the subnet referenced by `subnet`.\nIf set to empty string at cluster creation, a new route table will be created and this field will be updated to\nthe generated route table's name. If no subnet is defined at cluster creation, this field should be empty as well.", - "type": "string", - "x-go-name": "RouteTableName" - }, - "securityGroup": { - "description": "The name of a security group associated with the subnet referenced by `subnet`.\nIf set to empty string at cluster creation, a new security group will be created and this field will be updated to\nthe generated security group's name. If no subnet is defined at cluster creation, this field should be empty as well.", - "type": "string", - "x-go-name": "SecurityGroup" - }, - "subnet": { - "description": "The name of a subnet in the VNet referenced by `vnet`.\nIf set to empty string at cluster creation, a new subnet will be created and this field will be updated to\nthe generated subnet's name. If no VNet is defined at cluster creation, this field should be empty as well.", - "type": "string", - "x-go-name": "SubnetName" - }, - "subscriptionID": { - "description": "The Azure Subscription used for this cluster.\nCan be read from `credentialsReference` instead.", - "type": "string", - "x-go-name": "SubscriptionID" - }, - "tenantID": { - "description": "The Azure Active Directory Tenant used for this cluster.\nCan be read from `credentialsReference` instead.", - "type": "string", - "x-go-name": "TenantID" - }, - "vnet": { - "description": "The name of the VNet resource used for setting up networking in.\nIf set to empty string at cluster creation, a new VNet will be created and this field will be updated to\nthe generated VNet's name.", - "type": "string", - "x-go-name": "VNetName" - }, - "vnetResourceGroup": { - "description": "Optional: Defines a second resource group that will be used for VNet related resources instead.\nIf left empty, NO additional resource group will be created and all VNet related resources use the resource group defined by `resourceGroup`.", - "type": "string", - "x-go-name": "VNetResourceGroup" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "AzureNodeSpec": { - "description": "AzureNodeSpec describes settings for an Azure node", - "type": "object", - "required": [ - "size" - ], - "properties": { - "assignAvailabilitySet": { - "description": "AssignAvailabilitySet is used to check if an availability set should be created and assigned to the cluster.", - "type": "boolean", - "x-go-name": "AssignAvailabilitySet" - }, - "assignPublicIP": { - "description": "should the machine have a publicly accessible IP address", - "type": "boolean", - "x-go-name": "AssignPublicIP" - }, - "dataDiskSize": { - "description": "Data disk size in GB", - "type": "integer", - "format": "int32", - "x-go-name": "DataDiskSize" - }, - "enableAcceleratedNetworking": { - "description": "EnableAcceleratedNetworking is used to check if an accelerating networking should be used for azure vms.", - "type": "boolean", - "x-go-name": "EnableAcceleratedNetworking" - }, - "imageID": { - "description": "ImageID represents the ID of the image that should be used to run the node", - "type": "string", - "x-go-name": "ImageID" - }, - "osDiskSize": { - "description": "OS disk size in GB", - "type": "integer", - "format": "int32", - "x-go-name": "OSDiskSize" - }, - "size": { - "description": "VM size", - "type": "string", - "x-go-name": "Size" - }, - "tags": { - "description": "Additional metadata to set", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Tags" - }, - "zones": { - "description": "Zones represents the availability zones for azure vms", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Zones" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AzureResourceGroup": { - "type": "object", - "title": "AzureResourceGroup represents an object of Azure ResourceGroup information.", - "properties": { - "name": { - "description": "The name of the resource group.", - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AzureResourceGroupList": { - "type": "array", - "title": "AzureResourceGroupList represents an list of AKS ResourceGroups.", - "items": { - "$ref": "#/definitions/AzureResourceGroup" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "AzureResourceGroupsList": { - "description": "AzureResourceGroupsList is the object representing the resource groups for vms in azure cloud provider", - "type": "object", - "properties": { - "resourceGroups": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "ResourceGroups" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AzureRouteTablesList": { - "description": "AzureRouteTablesList is the object representing the route tables for vms in azure cloud provider", - "type": "object", - "properties": { - "routeTables": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "RouteTables" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AzureSecurityGroupsList": { - "description": "AzureSecurityGroupsList is the object representing the security groups for vms in azure cloud provider", - "type": "object", - "properties": { - "securityGroups": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "SecurityGroups" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AzureSize": { - "type": "object", - "title": "AzureSize is the object representing Azure VM sizes.", - "properties": { - "acceleratedNetworkingEnabled": { - "type": "boolean", - "x-go-name": "AcceleratedNetworkingEnabled" - }, - "maxDataDiskCount": { - "type": "integer", - "format": "int32", - "x-go-name": "MaxDataDiskCount" - }, - "memoryInMB": { - "type": "integer", - "format": "int32", - "x-go-name": "MemoryInMB" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "numberOfCores": { - "type": "integer", - "format": "int32", - "x-go-name": "NumberOfCores" - }, - "numberOfGPUs": { - "type": "integer", - "format": "int32", - "x-go-name": "NumberOfGPUs" - }, - "osDiskSizeInMB": { - "type": "integer", - "format": "int32", - "x-go-name": "OsDiskSizeInMB" - }, - "resourceDiskSizeInMB": { - "type": "integer", - "format": "int32", - "x-go-name": "ResourceDiskSizeInMB" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AzureSizeList": { - "type": "array", - "title": "AzureSizeList represents an array of Azure VM sizes.", - "items": { - "$ref": "#/definitions/AzureSize" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AzureSubnetsList": { - "description": "AzureSubnetsList is the object representing the subnets for vms in azure cloud provider", - "type": "object", - "properties": { - "subnets": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Subnets" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "AzureVirtualNetworksList": { - "description": "AzureVirtualNetworksList is the object representing the virtual network for vms in azure cloud provider", - "type": "object", - "properties": { - "virtualNetworks": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "VirtualNetworks" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "BSLBody": { - "type": "object", - "properties": { - "bslSpec": { - "$ref": "#/definitions/BackupStorageLocationSpec" - }, - "cbslName": { - "type": "string", - "x-go-name": "CBSLName" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/ee/clusterbackup/backupstoragelocation" - }, - "BackupConfig": { - "type": "object", - "properties": { - "backupStorageLocation": { - "$ref": "#/definitions/LocalObjectReference" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "BackupCredentials": { - "description": "BackupCredentials contains credentials for etcd backups", - "type": "object", - "properties": { - "destination": { - "description": "Destination corresponds to the Seeds Seed.Spec.EtcdBackupRestore.Destinations, it defines for which destination\nthe backup credentials will be created. If set, it updates the credentials ref in the related Seed BackupDestination", - "type": "string", - "x-go-name": "Destination" - }, - "s3": { - "$ref": "#/definitions/S3BackupCredentials" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "BackupDestination": { - "type": "object", - "title": "BackupDestination defines the bucket name and endpoint as a backup destination, and holds reference to the credentials secret.", - "properties": { - "bucketName": { - "description": "BucketName is the bucket name to use for backup and restore.", - "type": "string", - "x-go-name": "BucketName" - }, - "credentials": { - "$ref": "#/definitions/SecretReference" - }, - "endpoint": { - "description": "Endpoint is the API endpoint to use for backup and restore.", - "type": "string", - "x-go-name": "Endpoint" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "BackupDestinationNames": { - "type": "array", - "title": "BackupDestinationNames represents an list of backup destination names.", - "items": { - "type": "string" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "BackupDownloadUrl": { - "type": "object", - "title": "BackupDownloadUrl is the object representing a Backup Download URL.", - "properties": { - "downloadURL": { - "type": "string", - "x-go-name": "DownloadURL" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "BackupStatus": { - "type": "object", - "properties": { - "backupFinishedTime": { - "type": "string", - "format": "date-time", - "x-go-name": "BackupFinishedTime" - }, - "backupMessage": { - "type": "string", - "x-go-name": "BackupMessage" - }, - "backupName": { - "type": "string", - "x-go-name": "BackupName" - }, - "backupPhase": { - "$ref": "#/definitions/BackupStatusPhase" - }, - "backupStartTime": { - "type": "string", - "format": "date-time", - "x-go-name": "BackupStartTime" - }, - "deleteFinishedTime": { - "type": "string", - "format": "date-time", - "x-go-name": "DeleteFinishedTime" - }, - "deleteJobName": { - "type": "string", - "x-go-name": "DeleteJobName" - }, - "deleteMessage": { - "type": "string", - "x-go-name": "DeleteMessage" - }, - "deletePhase": { - "$ref": "#/definitions/BackupStatusPhase" - }, - "deleteStartTime": { - "type": "string", - "format": "date-time", - "x-go-name": "DeleteStartTime" - }, - "jobName": { - "type": "string", - "x-go-name": "JobName" - }, - "scheduledTime": { - "description": "ScheduledTime will always be set when the BackupStatus is created, so it'll never be nil", - "type": "string", - "format": "date-time", - "x-go-name": "ScheduledTime" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "BackupStatusPhase": { - "type": "string", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "BackupStorageLocation": { - "type": "object", - "title": "BackupStorageLocation is the object representing a Backup Storage Location.", - "properties": { - "cbslName": { - "type": "string", - "x-go-name": "CBSLName" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/BackupStorageLocationSpec" - }, - "status": { - "$ref": "#/definitions/BackupStorageLocationStatus" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "BackupStorageLocationAccessMode": { - "description": "+kubebuilder:validation:Enum=ReadOnly;ReadWrite", - "type": "string", - "title": "BackupStorageLocationAccessMode represents the permissions for a BackupStorageLocation.", - "x-go-package": "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - }, - "BackupStorageLocationBucketObject": { - "type": "object", - "title": "BackupStorageLocationBucketObject represents a S3 object of Backup Storage Location Bucket.", - "properties": { - "key": { - "type": "string", - "x-go-name": "Key" - }, - "size": { - "type": "integer", - "format": "int64", - "x-go-name": "Size" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "BackupStorageLocationBucketObjectList": { - "type": "array", - "title": "BackupStorageLocationBucketObjectList represents an array of Backup Storage Location Bucket Objects.", - "items": { - "$ref": "#/definitions/BackupStorageLocationBucketObject" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "BackupStorageLocationList": { - "type": "object", - "title": "BackupStorageLocationList is the list of object representing a Backup Storage Location overview.", - "properties": { - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/BackupStorageLocationOverview" - }, - "x-go-name": "Items" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "BackupStorageLocationOverview": { - "type": "object", - "title": "BackupStorageLocationOverview is the object representing a backup storage location with essential fields only for list views.", - "properties": { - "cbslName": { - "type": "string", - "x-go-name": "CBSLName" - }, - "creationTime": { - "type": "string", - "format": "date-time", - "x-go-name": "CreationDate" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "prefix": { - "type": "string", - "x-go-name": "Prefix" - }, - "region": { - "type": "string", - "x-go-name": "Region" - }, - "status": { - "$ref": "#/definitions/BackupStorageLocationStatus" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "BackupStorageLocationPhase": { - "description": "+kubebuilder:validation:Enum=Available;Unavailable\n+kubebuilder:default=Unavailable", - "type": "string", - "title": "BackupStorageLocationPhase is the lifecycle phase of a Velero BackupStorageLocation.", - "x-go-package": "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - }, - "BackupStorageLocationSpec": { - "description": "BackupStorageLocationSpec defines the desired state of a Velero BackupStorageLocation", - "type": "object", - "properties": { - "accessMode": { - "$ref": "#/definitions/BackupStorageLocationAccessMode" - }, - "backupSyncPeriod": { - "$ref": "#/definitions/Duration" - }, - "config": { - "description": "Config is for provider-specific configuration fields.\n+optional", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Config" - }, - "credential": { - "$ref": "#/definitions/SecretKeySelector" - }, - "default": { - "description": "Default indicates this location is the default backup storage location.\n+optional", - "type": "boolean", - "x-go-name": "Default" - }, - "objectStorage": { - "$ref": "#/definitions/ObjectStorageLocation" - }, - "provider": { - "description": "Provider is the provider of the backup storage.", - "type": "string", - "x-go-name": "Provider" - }, - "validationFrequency": { - "$ref": "#/definitions/Duration" - } - }, - "x-go-package": "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - }, - "BackupStorageLocationStatus": { - "description": "BackupStorageLocationStatus defines the observed state of BackupStorageLocation", - "type": "object", - "properties": { - "accessMode": { - "$ref": "#/definitions/BackupStorageLocationAccessMode" - }, - "lastSyncedRevision": { - "$ref": "#/definitions/UID" - }, - "lastSyncedTime": { - "description": "LastSyncedTime is the last time the contents of the location were synced into\nthe cluster.\n+optional\n+nullable", - "type": "string", - "x-go-name": "LastSyncedTime" - }, - "lastValidationTime": { - "description": "LastValidationTime is the last time the backup store location was validated\nthe cluster.\n+optional\n+nullable", - "type": "string", - "x-go-name": "LastValidationTime" - }, - "message": { - "description": "Message is a message about the backup storage location's status.\n+optional", - "type": "string", - "x-go-name": "Message" - }, - "phase": { - "$ref": "#/definitions/BackupStorageLocationPhase" - } - }, - "x-go-package": "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - }, - "Baremetal": { - "type": "object", - "properties": { - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "tinkerbell": { - "$ref": "#/definitions/Tinkerbell" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "BaremetalCloudSpec": { - "type": "object", - "title": "BaremetalCloudSpec specifies access data for a baremetal cluster.", - "properties": { - "credentialsReference": { - "$ref": "#/definitions/GlobalSecretKeySelector" - }, - "tinkerbell": { - "$ref": "#/definitions/TinkerbellCloudSpec" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "BaremetalNodeSpec": { - "description": "BaremetalNodeSpec baremetal specific node settings", - "type": "object", - "properties": { - "tinkerbell": { - "$ref": "#/definitions/TinkerbellNodeSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "BringYourOwnCloudSpec": { - "type": "object", - "title": "BringYourOwnCloudSpec specifies access data for a bring your own cluster.", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "BringYourOwnSpec": { - "type": "object", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ByPodStatus": { - "description": "ByPodStatus defines the observed state of ConstraintTemplate as seen by\nan individual controller\n+kubebuilder:pruning:PreserveUnknownFields", - "type": "object", - "properties": { - "errors": { - "type": "array", - "items": { - "$ref": "#/definitions/CreateCRDError" - }, - "x-go-name": "Errors" - }, - "id": { - "description": "a unique identifier for the pod that wrote the status", - "type": "string", - "x-go-name": "ID" - }, - "observedGeneration": { - "type": "integer", - "format": "int64", - "x-go-name": "ObservedGeneration" - } - }, - "x-go-package": "github.com/open-policy-agent/frameworks/constraint/pkg/apis/templates/v1" - }, - "CNIPluginSettings": { - "type": "object", - "title": "CNIPluginSettings contains the spec of the CNI plugin used by the Cluster.", - "properties": { - "type": { - "$ref": "#/definitions/CNIPluginType" - }, - "version": { - "description": "Version defines the CNI plugin version to be used. This varies by chosen CNI plugin type.", - "type": "string", - "x-go-name": "Version" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "CNIPluginType": { - "description": "Possible values are `canal`, `cilium` or `none`.", - "type": "string", - "title": "CNIPluginType defines the type of CNI plugin installed.", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "CNIVersions": { - "description": "CNIVersions is a list of versions for a CNI Plugin", - "type": "object", - "properties": { - "cniDefaultVersion": { - "description": "CNIDefaultVersion represents the default CNI Plugin version", - "type": "string", - "x-go-name": "CNIDefaultVersion" - }, - "cniPluginType": { - "description": "CNIPluginType represents the type of the CNI Plugin", - "type": "string", - "x-go-name": "CNIPluginType" - }, - "versions": { - "description": "Versions represents the list of the CNI Plugin versions that are supported", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Versions" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "CRD": { - "type": "object", - "properties": { - "spec": { - "$ref": "#/definitions/CRDSpec" - } - }, - "x-go-package": "github.com/open-policy-agent/frameworks/constraint/pkg/apis/templates/v1" - }, - "CRDSpec": { - "type": "object", - "properties": { - "names": { - "$ref": "#/definitions/Names" - }, - "validation": { - "$ref": "#/definitions/Validation" - } - }, - "x-go-package": "github.com/open-policy-agent/frameworks/constraint/pkg/apis/templates/v1" - }, - "CbslBody": { - "type": "object", - "properties": { - "cbslSpec": { - "$ref": "#/definitions/BackupStorageLocationSpec" - }, - "credentials": { - "$ref": "#/definitions/S3BackupCredentials" - }, - "name": { - "description": "Name of the cluster backup", - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/ee/clusterbackup/storage-location" - }, - "CleanupOptions": { - "type": "object", - "properties": { - "enabled": { - "description": "Enable checkboxes that allow the user to ask for LoadBalancers and PVCs\nto be deleted in order to not leave potentially expensive resources behind.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "enforced": { - "description": "If enforced is set to true, the cleanup of LoadBalancers and PVCs is\nenforced.", - "type": "boolean", - "x-go-name": "Enforced" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "CloudSpec": { - "type": "object", - "title": "CloudSpec stores configuration options for a given cloud provider. Provider specs are mutually exclusive.", - "properties": { - "alibaba": { - "$ref": "#/definitions/AlibabaCloudSpec" - }, - "anexia": { - "$ref": "#/definitions/AnexiaCloudSpec" - }, - "aws": { - "$ref": "#/definitions/AWSCloudSpec" - }, - "azure": { - "$ref": "#/definitions/AzureCloudSpec" - }, - "baremetal": { - "$ref": "#/definitions/BaremetalCloudSpec" - }, - "bringyourown": { - "$ref": "#/definitions/BringYourOwnCloudSpec" - }, - "dc": { - "description": "DatacenterName states the name of a cloud provider \"datacenter\" (defined in `Seed` resources)\nthis cluster should be deployed into.", - "type": "string", - "x-go-name": "DatacenterName" - }, - "digitalocean": { - "$ref": "#/definitions/DigitaloceanCloudSpec" - }, - "edge": { - "$ref": "#/definitions/EdgeCloudSpec" - }, - "fake": { - "$ref": "#/definitions/FakeCloudSpec" - }, - "gcp": { - "$ref": "#/definitions/GCPCloudSpec" - }, - "hetzner": { - "$ref": "#/definitions/HetznerCloudSpec" - }, - "kubevirt": { - "$ref": "#/definitions/KubevirtCloudSpec" - }, - "nutanix": { - "$ref": "#/definitions/NutanixCloudSpec" - }, - "openstack": { - "$ref": "#/definitions/OpenstackCloudSpec" - }, - "packet": { - "$ref": "#/definitions/PacketCloudSpec" - }, - "providerName": { - "description": "ProviderName is the name of the cloud provider used for this cluster.\nThis must match the given provider spec (e.g. if the providerName is\n\"aws\", then the `aws` field must be set).", - "type": "string", - "x-go-name": "ProviderName" - }, - "vmwareclouddirector": { - "$ref": "#/definitions/VMwareCloudDirectorCloudSpec" - }, - "vsphere": { - "$ref": "#/definitions/VSphereCloudSpec" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "Cluster": { - "description": "Note:\nCluster has a custom MarshalJSON method defined\nand thus the output may vary", - "type": "object", - "title": "Cluster defines the cluster resource", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "credential": { - "type": "string", - "x-go-name": "Credential" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "inheritedLabels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "InheritedLabels" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "machineDeploymentCount": { - "type": "integer", - "format": "int64", - "x-go-name": "MachineDeploymentCount" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/ClusterSpec" - }, - "status": { - "$ref": "#/definitions/ClusterStatus" - }, - "type": { - "description": "Type is deprecated and not used anymore.", - "type": "string", - "x-go-name": "Type" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ClusterAssociation": { - "description": "ClusterAssociation shows cluster details using a preset", - "type": "object", - "properties": { - "clusterId": { - "type": "string", - "x-go-name": "ClusterID" - }, - "clusterName": { - "type": "string", - "x-go-name": "ClusterName" - }, - "projectId": { - "type": "string", - "x-go-name": "ProjectID" - }, - "projectName": { - "type": "string", - "x-go-name": "ProjectName" - }, - "provider": { - "type": "string", - "x-go-name": "Provider" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ClusterBackupOptions": { - "type": "object", - "properties": { - "defaultChecksumAlgorithm": { - "description": "DefaultChecksumAlgorithm allows setting a default checksum algorithm used by Velero for uploading objects to S3.\n\nOptional", - "type": "string", - "x-go-name": "DefaultChecksumAlgorithm" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ClusterBackupStorageLocation": { - "type": "object", - "title": "ClusterBackupStorageLocation is the object representing a Cluster Backup Storage Location.", - "properties": { - "displayName": { - "type": "string", - "x-go-name": "DisplayName" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/BackupStorageLocationSpec" - }, - "status": { - "$ref": "#/definitions/BackupStorageLocationStatus" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ClusterHealth": { - "type": "object", - "title": "ClusterHealth stores health information about the cluster's components.", - "properties": { - "alertmanagerConfig": { - "$ref": "#/definitions/HealthStatus" - }, - "apiserver": { - "$ref": "#/definitions/HealthStatus" - }, - "applicationController": { - "$ref": "#/definitions/HealthStatus" - }, - "cloudProviderInfrastructure": { - "$ref": "#/definitions/HealthStatus" - }, - "controller": { - "$ref": "#/definitions/HealthStatus" - }, - "etcd": { - "$ref": "#/definitions/HealthStatus" - }, - "gatekeeperAudit": { - "$ref": "#/definitions/HealthStatus" - }, - "gatekeeperController": { - "$ref": "#/definitions/HealthStatus" - }, - "kubelb": { - "$ref": "#/definitions/HealthStatus" - }, - "kubernetesDashboard": { - "$ref": "#/definitions/HealthStatus" - }, - "kyverno": { - "$ref": "#/definitions/HealthStatus" - }, - "logging": { - "$ref": "#/definitions/HealthStatus" - }, - "machineController": { - "$ref": "#/definitions/HealthStatus" - }, - "mlaGateway": { - "$ref": "#/definitions/HealthStatus" - }, - "monitoring": { - "$ref": "#/definitions/HealthStatus" - }, - "operatingSystemManager": { - "$ref": "#/definitions/HealthStatus" - }, - "scheduler": { - "$ref": "#/definitions/HealthStatus" - }, - "userClusterControllerManager": { - "$ref": "#/definitions/HealthStatus" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ClusterList": { - "description": "ClusterList represents a list of clusters", - "type": "array", - "items": { - "$ref": "#/definitions/Cluster" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ClusterMetrics": { - "description": "ClusterMetrics defines a metric for the given cluster", - "type": "object", - "properties": { - "controlPlane": { - "$ref": "#/definitions/ControlPlaneMetrics" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "nodes": { - "$ref": "#/definitions/NodesMetric" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ClusterNetworkingConfig": { - "description": "ClusterNetworkingConfig specifies the different networking\nparameters for a cluster.", - "type": "object", - "properties": { - "coreDNSReplicas": { - "description": "CoreDNSReplicas is the number of desired pods of user cluster coredns deployment.\nDeprecated: This field should not be used anymore, use cluster.componentsOverride.coreDNS.replicas\ninstead. Only one of the two fields can be set at any time.", - "type": "integer", - "format": "int32", - "x-go-name": "CoreDNSReplicas" - }, - "dnsDomain": { - "description": "Domain name for services.", - "type": "string", - "x-go-name": "DNSDomain" - }, - "ipFamily": { - "$ref": "#/definitions/IPFamily" - }, - "ipvs": { - "$ref": "#/definitions/IPVSConfiguration" - }, - "konnectivityEnabled": { - "description": "Deprecated: KonnectivityEnabled enables konnectivity for controlplane to node network communication.\nKonnectivity is the only supported choice for controlplane to node network communication. This field is\ndefaulted to true and setting it to false is rejected. It will be removed in a future release.", - "type": "boolean", - "x-go-name": "KonnectivityEnabled" - }, - "nodeCidrMaskSizeIPv4": { - "description": "NodeCIDRMaskSizeIPv4 is the mask size used to address the nodes within provided IPv4 Pods CIDR.\nIt has to be larger than the provided IPv4 Pods CIDR. Defaults to 24.\n+optional", - "type": "integer", - "format": "int32", - "x-go-name": "NodeCIDRMaskSizeIPv4" - }, - "nodeCidrMaskSizeIPv6": { - "description": "NodeCIDRMaskSizeIPv6 is the mask size used to address the nodes within provided IPv6 Pods CIDR.\nIt has to be larger than the provided IPv6 Pods CIDR. Defaults to 64.\n+optional", - "type": "integer", - "format": "int32", - "x-go-name": "NodeCIDRMaskSizeIPv6" - }, - "nodeLocalDNSCacheEnabled": { - "description": "NodeLocalDNSCacheEnabled controls whether the NodeLocal DNS Cache feature is enabled.\nDefaults to true.", - "type": "boolean", - "x-go-name": "NodeLocalDNSCacheEnabled" - }, - "pods": { - "$ref": "#/definitions/NetworkRanges" - }, - "proxyMode": { - "description": "ProxyMode defines the kube-proxy mode (\"ipvs\" / \"iptables\" / \"ebpf\").\nDefaults to \"ipvs\". \"ebpf\" disables kube-proxy and requires CNI support.", - "type": "string", - "x-go-name": "ProxyMode" - }, - "services": { - "$ref": "#/definitions/NetworkRanges" - }, - "tunnelingAgentIP": { - "description": "TunnelingAgentIP is the address used by the tunneling agents", - "type": "string", - "x-go-name": "TunnelingAgentIP" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ClusterRole": { - "description": "ClusterRole defines cluster RBAC role for the user cluster", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "rules": { - "description": "Rules holds all the PolicyRules for this ClusterRole", - "type": "array", - "items": { - "$ref": "#/definitions/PolicyRule" - }, - "x-go-name": "Rules" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ClusterRoleBinding": { - "type": "object", - "title": "ClusterRoleBinding references a cluster role, but does not contain it.", - "properties": { - "roleRefName": { - "type": "string", - "x-go-name": "RoleRefName" - }, - "subjects": { - "description": "Subjects holds references to the objects the role applies to.", - "type": "array", - "items": { - "$ref": "#/definitions/Subject" - }, - "x-go-name": "Subjects" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ClusterRoleName": { - "description": "ClusterRoleName defines RBAC cluster role name object for the user cluster", - "type": "object", - "properties": { - "name": { - "description": "Name of the cluster role.", - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ClusterRoleUser": { - "description": "ClusterRoleUser defines associated user with cluster role", - "type": "object", - "properties": { - "group": { - "type": "string", - "x-go-name": "Group" - }, - "serviceAccount": { - "type": "string", - "x-go-name": "ServiceAccount" - }, - "serviceAccountNamespace": { - "type": "string", - "x-go-name": "ServiceAccountNamespace" - }, - "userEmail": { - "type": "string", - "x-go-name": "UserEmail" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ClusterServiceAccount": { - "type": "object", - "title": "ClusterServiceAccount represent a k8s service account to access cluster.", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "namespace": { - "description": "Namespace is the namespace where the service account lives.", - "type": "string", - "x-go-name": "Namespace" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ClusterSpec": { - "type": "object", - "title": "ClusterSpec defines the cluster specification.", - "properties": { - "admissionPlugins": { - "description": "Additional Admission Controller plugins", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "AdmissionPlugins" - }, - "apiServerAllowedIPRanges": { - "$ref": "#/definitions/NetworkRanges" - }, - "auditLogging": { - "$ref": "#/definitions/AuditLoggingSettings" - }, - "backupConfig": { - "$ref": "#/definitions/BackupConfig" - }, - "cloud": { - "$ref": "#/definitions/CloudSpec" - }, - "clusterNetwork": { - "$ref": "#/definitions/ClusterNetworkingConfig" - }, - "cniPlugin": { - "$ref": "#/definitions/CNIPluginSettings" - }, - "containerRuntime": { - "description": "ContainerRuntime to use, i.e. Docker or containerd. By default containerd will be used.", - "type": "string", - "x-go-name": "ContainerRuntime" - }, - "disableCsiDriver": { - "description": "Optional: DisableCSIDriver disables the installation of CSI driver on the cluster\nIf this is true at the data center then it can't be over-written in the cluster configuration", - "type": "boolean", - "x-go-name": "DisableCSIDriver" - }, - "enableUserSSHKeyAgent": { - "description": "EnableUserSSHKeyAgent control whether the UserSSHKeyAgent will be deployed in the user cluster or not.\nIf it was enabled, the agent will be deployed and used to sync the user ssh keys, that the user attach\nto the created cluster. If the agent was disabled, it won't be deployed in the user cluster, thus after\nthe cluster creation any attached ssh keys won't be synced to the worker nodes. Once the agent is enabled/disabled\nit cannot be changed after the cluster is being created.", - "type": "boolean", - "x-go-name": "EnableUserSSHKeyAgent" - }, - "eventRateLimitConfig": { - "$ref": "#/definitions/EventRateLimitConfig" - }, - "exposeStrategy": { - "$ref": "#/definitions/ExposeStrategy" - }, - "kubelb": { - "$ref": "#/definitions/KubeLB" - }, - "kubernetesDashboard": { - "$ref": "#/definitions/KubernetesDashboard" - }, - "kyverno": { - "$ref": "#/definitions/KyvernoSettings" - }, - "machineNetworks": { - "description": "MachineNetworks optionally specifies the parameters for IPAM.", - "type": "array", - "items": { - "$ref": "#/definitions/MachineNetworkingConfig" - }, - "x-go-name": "MachineNetworks" - }, - "mla": { - "$ref": "#/definitions/MLASettings" - }, - "oidc": { - "$ref": "#/definitions/OIDCSettings" - }, - "opaIntegration": { - "$ref": "#/definitions/OPAIntegrationSettings" - }, - "podNodeSelectorAdmissionPluginConfig": { - "description": "PodNodeSelectorAdmissionPluginConfig provides the configuration for the PodNodeSelector.\nIt's used by the backend to create a configuration file for this plugin.\nThe key:value from the map is converted to the namespace:\u003cnode-selectors-labels\u003e in the file.\nThe format in a file:\npodNodeSelectorPluginConfig:\nclusterDefaultNodeSelector: \u003cnode-selectors-labels\u003e\nnamespace1: \u003cnode-selectors-labels\u003e\nnamespace2: \u003cnode-selectors-labels\u003e", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "PodNodeSelectorAdmissionPluginConfig" - }, - "serviceAccount": { - "$ref": "#/definitions/ServiceAccountSettings" - }, - "updateWindow": { - "$ref": "#/definitions/UpdateWindow" - }, - "useEventRateLimitAdmissionPlugin": { - "description": "If active the EventRateLimit admission plugin is configured at the apiserver", - "type": "boolean", - "x-go-name": "UseEventRateLimitAdmissionPlugin" - }, - "usePodNodeSelectorAdmissionPlugin": { - "description": "If active the PodNodeSelector admission plugin is configured at the apiserver", - "type": "boolean", - "x-go-name": "UsePodNodeSelectorAdmissionPlugin" - }, - "usePodSecurityPolicyAdmissionPlugin": { - "description": "If active the PodSecurityPolicy admission plugin is configured at the apiserver", - "type": "boolean", - "x-go-name": "UsePodSecurityPolicyAdmissionPlugin" - }, - "version": { - "$ref": "#/definitions/Semver" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ClusterStatus": { - "type": "object", - "title": "ClusterStatus defines the cluster status.", - "properties": { - "externalCCMMigration": { - "$ref": "#/definitions/ExternalCCMMigrationStatus" - }, - "url": { - "description": "URL specifies the address at which the cluster is available", - "type": "string", - "x-go-name": "URL" - }, - "version": { - "$ref": "#/definitions/Semver" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ClusterTemplate": { - "description": "ClusterTemplate represents a ClusterTemplate object", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "applications": { - "type": "array", - "items": { - "$ref": "#/definitions/Application" - }, - "x-go-name": "Applications" - }, - "cluster": { - "$ref": "#/definitions/ClusterTemplateInfo" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "nodeDeployment": { - "$ref": "#/definitions/ClusterTemplateNodeDeployment" - }, - "projectID": { - "type": "string", - "x-go-name": "ProjectID" - }, - "scope": { - "type": "string", - "x-go-name": "Scope" - }, - "user": { - "type": "string", - "x-go-name": "User" - }, - "userSshKeys": { - "type": "array", - "items": { - "$ref": "#/definitions/ClusterTemplateSSHKey" - }, - "x-go-name": "UserSSHKeys" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ClusterTemplateAssociation": { - "description": "ClusterTemplateAssociation shows cluster template details using a preset", - "type": "object", - "properties": { - "projectId": { - "type": "string", - "x-go-name": "ProjectID" - }, - "projectName": { - "type": "string", - "x-go-name": "ProjectName" - }, - "provider": { - "type": "string", - "x-go-name": "Provider" - }, - "templateId": { - "type": "string", - "x-go-name": "TemplateID" - }, - "templateName": { - "type": "string", - "x-go-name": "TemplateName" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ClusterTemplateInfo": { - "type": "object", - "title": "ClusterTemplateInfo represents a ClusterTemplateInfo object.", - "properties": { - "annotations": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "credential": { - "description": "indicates the preset name", - "type": "string", - "x-go-name": "Credential" - }, - "inheritedLabels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "InheritedLabels" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "spec": { - "$ref": "#/definitions/ClusterSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ClusterTemplateInstance": { - "description": "ClusterTemplateInstance represents a ClusterTemplateInstance object", - "type": "object", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/ClusterTemplateInstanceSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ClusterTemplateInstanceSpec": { - "type": "object", - "title": "ClusterTemplateInstanceSpec specifies the data for cluster instances.", - "properties": { - "clusterTemplateID": { - "type": "string", - "x-go-name": "ClusterTemplateID" - }, - "clusterTemplateName": { - "type": "string", - "x-go-name": "ClusterTemplateName" - }, - "projectID": { - "type": "string", - "x-go-name": "ProjectID" - }, - "replicas": { - "type": "integer", - "format": "int64", - "x-go-name": "Replicas" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ClusterTemplateList": { - "description": "ClusterTemplateList represents a ClusterTemplate list", - "type": "array", - "items": { - "$ref": "#/definitions/ClusterTemplate" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ClusterTemplateNodeDeployment": { - "type": "object", - "properties": { - "annotations": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "spec": { - "$ref": "#/definitions/NodeDeploymentSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ClusterTemplateSSHKey": { - "description": "ClusterTemplateSSHKey represents SSH Key object for Cluster Template", - "type": "object", - "properties": { - "id": { - "type": "string", - "x-go-name": "ID" - }, - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "Code": { - "type": "object", - "properties": { - "engine": { - "type": "string", - "x-go-name": "Engine", - "example": "\"Rego\". Required." - }, - "source": { - "$ref": "#/definitions/Anything" - } - }, - "x-go-package": "github.com/open-policy-agent/frameworks/constraint/pkg/apis/templates/v1" - }, - "Condition": { - "description": "This struct is intended for direct use as an array at the field path .status.conditions. For example,\n\ntype FooStatus struct{\nRepresents the observations of a foo's current state.\nKnown .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\"\n+patchMergeKey=type\n+patchStrategy=merge\n+listType=map\n+listMapKey=type\nConditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\nother fields\n}", - "type": "object", - "title": "Condition contains details for one aspect of the current state of this API Resource.", - "properties": { - "lastTransitionTime": { - "description": "lastTransitionTime is the last time the condition transitioned from one status to another.\nThis should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.\n+required\n+kubebuilder:validation:Required\n+kubebuilder:validation:Type=string\n+kubebuilder:validation:Format=date-time", - "type": "string", - "x-go-name": "LastTransitionTime" - }, - "message": { - "description": "message is a human readable message indicating details about the transition.\nThis may be an empty string.\n+required\n+kubebuilder:validation:Required\n+kubebuilder:validation:MaxLength=32768", - "type": "string", - "x-go-name": "Message" - }, - "observedGeneration": { - "description": "observedGeneration represents the .metadata.generation that the condition was set based upon.\nFor instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date\nwith respect to the current state of the instance.\n+optional\n+kubebuilder:validation:Minimum=0", - "type": "integer", - "format": "int64", - "x-go-name": "ObservedGeneration" - }, - "reason": { - "description": "reason contains a programmatic identifier indicating the reason for the condition's last transition.\nProducers of specific condition types may define expected values and meanings for this field,\nand whether the values are considered a guaranteed API.\nThe value should be a CamelCase string.\nThis field may not be empty.\n+required\n+kubebuilder:validation:Required\n+kubebuilder:validation:MaxLength=1024\n+kubebuilder:validation:MinLength=1\n+kubebuilder:validation:Pattern=`^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$`", - "type": "string", - "x-go-name": "Reason" - }, - "status": { - "$ref": "#/definitions/ConditionStatus" - }, - "type": { - "description": "type of condition in CamelCase or in foo.example.com/CamelCase.\n\nMany .condition.type values are consistent across resources like Available, but because arbitrary conditions can be\nuseful (see .node.status.conditions), the ability to deconflict is important.\nThe regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)\n+required\n+kubebuilder:validation:Required\n+kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`\n+kubebuilder:validation:MaxLength=316", - "type": "string", - "x-go-name": "Type" - } - }, - "x-go-package": "k8s.io/apimachinery/pkg/apis/meta/v1" - }, - "ConditionStatus": { - "type": "string", - "x-go-package": "k8s.io/apimachinery/pkg/apis/meta/v1" - }, - "ConfigMapKeySelector": { - "description": "+structType=atomic", - "type": "object", - "title": "Selects a key from a ConfigMap.", - "properties": { - "key": { - "description": "The key to select.", - "type": "string", - "x-go-name": "Key" - }, - "name": { - "description": "Name of the referent.\nThis field is effectively required, but due to backwards compatibility is\nallowed to be empty. Instances of this type with an empty value here are\nalmost certainly wrong.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\n+optional\n+default=\"\"\n+kubebuilder:default=\"\"\nTODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.", - "type": "string", - "x-go-name": "Name" - }, - "optional": { - "description": "Specify whether the ConfigMap or its key must be defined\n+optional", - "type": "boolean", - "x-go-name": "Optional" - } - }, - "x-go-package": "k8s.io/api/core/v1" - }, - "Constraint": { - "description": "Constraint represents a gatekeeper Constraint", - "type": "object", - "properties": { - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/ConstraintSpec" - }, - "status": { - "$ref": "#/definitions/ConstraintStatus" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ConstraintSelector": { - "type": "object", - "title": "ConstraintSelector is the object holding the cluster selection filters.", - "properties": { - "labelSelector": { - "$ref": "#/definitions/LabelSelector" - }, - "providers": { - "description": "Providers is a list of cloud providers to which the Constraint applies to. Empty means all providers are selected.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Providers" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ConstraintSpec": { - "type": "object", - "title": "ConstraintSpec specifies the data for the constraint.", - "properties": { - "constraintType": { - "description": "ConstraintType specifies the type of gatekeeper constraint that the constraint applies to", - "type": "string", - "x-go-name": "ConstraintType" - }, - "disabled": { - "description": "Disabled is the flag for disabling OPA constraints", - "type": "boolean", - "x-go-name": "Disabled" - }, - "enforcementAction": { - "description": "EnforcementAction defines the action to take in response to a constraint being violated.\nBy default, EnforcementAction is set to deny as the default behavior is to deny admission requests with any violation.", - "type": "string", - "x-go-name": "EnforcementAction" - }, - "match": { - "$ref": "#/definitions/Match" - }, - "parameters": { - "$ref": "#/definitions/Parameters" - }, - "selector": { - "$ref": "#/definitions/ConstraintSelector" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ConstraintStatus": { - "type": "object", - "title": "ConstraintStatus represents a constraint status which holds audit info.", - "properties": { - "auditTimestamp": { - "type": "string", - "x-go-name": "AuditTimestamp" - }, - "enforcement": { - "type": "string", - "x-go-name": "Enforcement" - }, - "synced": { - "type": "boolean", - "x-go-name": "Synced" - }, - "violations": { - "type": "array", - "items": { - "$ref": "#/definitions/Violation" - }, - "x-go-name": "Violations" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ConstraintTemplate": { - "description": "ConstraintTemplate represents a gatekeeper ConstraintTemplate", - "type": "object", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/ConstraintTemplateSpec" - }, - "status": { - "$ref": "#/definitions/ConstraintTemplateStatus" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ConstraintTemplateSelector": { - "type": "object", - "title": "ConstraintTemplateSelector is the object holding the cluster selection filters.", - "properties": { - "labelSelector": { - "$ref": "#/definitions/LabelSelector" - }, - "providers": { - "description": "Providers is a list of cloud providers to which the Constraint Template applies to. Empty means all providers are selected.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Providers" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ConstraintTemplateSpec": { - "type": "object", - "title": "ConstraintTemplateSpec is the object representing the gatekeeper constraint template spec and kubermatic related spec.", - "properties": { - "crd": { - "$ref": "#/definitions/CRD" - }, - "selector": { - "$ref": "#/definitions/ConstraintTemplateSelector" - }, - "targets": { - "type": "array", - "items": { - "$ref": "#/definitions/Target" - }, - "x-go-name": "Targets" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ConstraintTemplateStatus": { - "type": "object", - "title": "ConstraintTemplateStatus defines the observed state of ConstraintTemplate.", - "properties": { - "byPod": { - "type": "array", - "items": { - "$ref": "#/definitions/ByPodStatus" - }, - "x-go-name": "ByPod" - }, - "created": { - "type": "boolean", - "x-go-name": "Created" - } - }, - "x-go-package": "github.com/open-policy-agent/frameworks/constraint/pkg/apis/templates/v1" - }, - "ContainerRuntimeContainerd": { - "type": "object", - "title": "ContainerRuntimeContainerd defines containerd container runtime registries configs.", - "properties": { - "registries": { - "description": "A map of registries to use to render configs and mirrors for containerd registries", - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/ContainerdRegistry" - }, - "x-go-name": "Registries" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ContainerRuntimeOpts": { - "type": "object", - "title": "ContainerRuntimeOpts represents a set of options to configure container-runtime binary used in nodes.", - "properties": { - "containerdRegistryMirrors": { - "$ref": "#/definitions/ContainerRuntimeContainerd" - }, - "insecureRegistries": { - "description": "Optional: These image registries will be configured as insecure\non the container runtime.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "InsecureRegistries" - }, - "pauseImage": { - "description": "Optional: Translates to --pod-infra-container-image on the kubelet.\nIf not set, the kubelet will default it.", - "type": "string", - "x-go-name": "PauseImage" - }, - "registryMirrors": { - "description": "Optional: These image registries will be configured as registry mirrors\non the container runtime.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "RegistryMirrors" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ContainerdRegistry": { - "type": "object", - "title": "ContainerdRegistry defines endpoints and security for given container registry.", - "properties": { - "mirrors": { - "description": "List of registry mirrors to use", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Mirrors" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ControlPlaneMetrics": { - "description": "ControlPlaneMetrics defines a metric for the user cluster control plane resources", - "type": "object", - "properties": { - "cpuTotalMillicores": { - "description": "CPUTotalMillicores in m cores", - "type": "integer", - "format": "int64", - "x-go-name": "CPUTotalMillicores" - }, - "memoryTotalBytes": { - "description": "MemoryTotalBytes in bytes", - "type": "integer", - "format": "int64", - "x-go-name": "MemoryTotalBytes" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "CreateCRDError": { - "type": "object", - "title": "CreateCRDError represents a single error caught during parsing, compiling, etc.", - "properties": { - "code": { - "type": "string", - "x-go-name": "Code" - }, - "location": { - "type": "string", - "x-go-name": "Location" - }, - "message": { - "type": "string", - "x-go-name": "Message" - } - }, - "x-go-package": "github.com/open-policy-agent/frameworks/constraint/pkg/apis/templates/v1" - }, - "CreateClusterSpec": { - "description": "CreateClusterSpec is the structure that is used to create cluster with its initial node deployment", - "type": "object", - "properties": { - "applications": { - "type": "array", - "items": { - "$ref": "#/definitions/Application" - }, - "x-go-name": "Applications" - }, - "cluster": { - "$ref": "#/definitions/Cluster" - }, - "nodeDeployment": { - "$ref": "#/definitions/NodeDeployment" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "CreateSeedMLASettings": { - "type": "object", - "properties": { - "userClusterMLAEnabled": { - "description": "Optional: UserClusterMLAEnabled controls whether the user cluster MLA (Monitoring, Logging \u0026 Alerting) stack is enabled in the seed.", - "type": "boolean", - "x-go-name": "UserClusterMLAEnabled" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "CreateSeedProxySettings": { - "description": "CreateSeedProxySettings allow configuring a HTTP proxy for the controlplanes\nand nodes.", - "type": "object", - "properties": { - "httpProxy": { - "description": "Optional: If set, this proxy will be configured for both HTTP and HTTPS.", - "type": "string", - "x-go-name": "HTTPProxy" - }, - "noProxy": { - "description": "Optional: If set this will be set as NO_PROXY environment variable on the node;\nThe value must be a comma-separated list of domains for which no proxy\nshould be used, e.g. \"*.example.com,internal.dev\".\nNote that the in-cluster apiserver URL will be automatically prepended\nto this value.", - "type": "string", - "x-go-name": "NoProxy" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "CreateSeedSpec": { - "type": "object", - "title": "CreateSeedSpec is the structure that is used to create seed.", - "properties": { - "country": { - "description": "Optional: Country of the seed as ISO-3166 two-letter code, e.g. DE or UK.\nFor informational purposes in the Kubermatic dashboard only.", - "type": "string", - "x-go-name": "Country" - }, - "defaultClusterTemplate": { - "description": "DefaultClusterTemplate is the name of a cluster template of scope \"seed\" that is used\nto default all new created clusters", - "type": "string", - "x-go-name": "DefaultClusterTemplate" - }, - "expose_strategy": { - "$ref": "#/definitions/ExposeStrategy" - }, - "kubeconfig": { - "description": "The raw Kubeconfig encoded to base64. This field is used for cluster creation or update.", - "type": "string", - "x-go-name": "Kubeconfig" - }, - "location": { - "description": "Optional: Detailed location of the cluster, like \"Hamburg\" or \"Datacenter 7\".\nFor informational purposes in the Kubermatic dashboard only.", - "type": "string", - "x-go-name": "Location" - }, - "mla": { - "$ref": "#/definitions/CreateSeedMLASettings" - }, - "proxy_settings": { - "$ref": "#/definitions/CreateSeedProxySettings" - }, - "seed_dns_overwrite": { - "description": "Optional: This can be used to override the DNS name used for this seed.\nBy default the seed name is used.", - "type": "string", - "x-go-name": "SeedDNSOverwrite" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "CredentialList": { - "type": "object", - "title": "CredentialList represents a object for provider credential names.", - "properties": { - "names": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Names" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "CustomLink": { - "type": "object", - "properties": { - "icon": { - "type": "string", - "x-go-name": "Icon" - }, - "label": { - "type": "string", - "x-go-name": "Label" - }, - "location": { - "type": "string", - "x-go-name": "Location" - }, - "url": { - "type": "string", - "x-go-name": "URL" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "CustomLinks": { - "type": "array", - "items": { - "$ref": "#/definitions/CustomLink" - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "CustomNetworkPolicy": { - "type": "object", - "title": "CustomNetworkPolicy contains a name and the Spec of a NetworkPolicy.", - "properties": { - "name": { - "description": "Name is the name of the Custom Network Policy.", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/NetworkPolicySpec" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DNSConfig": { - "type": "object", - "title": "DNSConfig contains a machine's DNS configuration.", - "properties": { - "servers": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Servers" - } - }, - "x-go-package": "k8c.io/machine-controller/sdk/providerconfig" - }, - "Datacenter": { - "type": "object", - "title": "Datacenter is the object representing a Kubernetes infra datacenter.", - "properties": { - "metadata": { - "$ref": "#/definitions/DatacenterMeta" - }, - "spec": { - "$ref": "#/definitions/DatacenterSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "DatacenterList": { - "description": "DatacenterList represents a list of datacenters", - "type": "array", - "items": { - "$ref": "#/definitions/Datacenter" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "DatacenterMeta": { - "type": "object", - "title": "DatacenterMeta holds datacenter metadata information.", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "DatacenterSpec": { - "type": "object", - "title": "DatacenterSpec specifies the data for a datacenter.", - "properties": { - "alibaba": { - "$ref": "#/definitions/DatacenterSpecAlibaba" - }, - "anexia": { - "$ref": "#/definitions/DatacenterSpecAnexia" - }, - "aws": { - "$ref": "#/definitions/DatacenterSpecAWS" - }, - "azure": { - "$ref": "#/definitions/DatacenterSpecAzure" - }, - "baremetal": { - "$ref": "#/definitions/DatacenterSpecBaremetal" - }, - "bringyourown": { - "$ref": "#/definitions/DatacenterSpecBringYourOwn" - }, - "country": { - "description": "Optional: Country of the seed as ISO-3166 two-letter code, e.g. DE or UK.\nIt is used for informational purposes.", - "type": "string", - "x-go-name": "Country" - }, - "digitalocean": { - "$ref": "#/definitions/DatacenterSpecDigitalocean" - }, - "disableCsiDriver": { - "description": "Optional: DisableCSIDriver disables the installation of CSI driver on every clusters within the DC\nIf true it can't be over-written in the cluster configuration", - "type": "boolean", - "x-go-name": "DisableCSIDriver" - }, - "edge": { - "$ref": "#/definitions/DatacenterSpecEdge" - }, - "enforceAuditLogging": { - "description": "EnforceAuditLogging enforces audit logging on every cluster within the DC,\nignoring cluster-specific settings.", - "type": "boolean", - "x-go-name": "EnforceAuditLogging" - }, - "enforcePodSecurityPolicy": { - "description": "EnforcePodSecurityPolicy enforces pod security policy plugin on every clusters within the DC,\nignoring cluster-specific settings", - "type": "boolean", - "x-go-name": "EnforcePodSecurityPolicy" - }, - "enforcedAuditWebhookSettings": { - "$ref": "#/definitions/AuditWebhookBackendSettings" - }, - "fake": { - "$ref": "#/definitions/DatacenterSpecFake" - }, - "gcp": { - "$ref": "#/definitions/DatacenterSpecGCP" - }, - "hetzner": { - "$ref": "#/definitions/DatacenterSpecHetzner" - }, - "ipv6Enabled": { - "description": "IPv6Enabled is a flag to indicate if the ipv6 is enabled for the datacenter.", - "type": "boolean", - "x-go-name": "IPv6Enabled" - }, - "kubelb": { - "$ref": "#/definitions/KubeLBDatacenterSettings" - }, - "kubevirt": { - "$ref": "#/definitions/DatacenterSpecKubevirt" - }, - "location": { - "description": "Optional: Detailed location of the cluster, like \"Hamburg\" or \"Datacenter 7\".\nIt is used for informational purposes.", - "type": "string", - "x-go-name": "Location" - }, - "machineFlavorFilter": { - "$ref": "#/definitions/MachineFlavorFilter" - }, - "node": { - "$ref": "#/definitions/NodeSettings" - }, - "nutanix": { - "$ref": "#/definitions/DatacenterSpecNutanix" - }, - "openstack": { - "$ref": "#/definitions/DatacenterSpecOpenstack" - }, - "operatingSystemProfiles": { - "$ref": "#/definitions/OperatingSystemProfileList" - }, - "packet": { - "$ref": "#/definitions/DatacenterSpecPacket" - }, - "provider": { - "description": "Name of the datacenter provider. Extracted based on which provider is defined in the spec.\nIt is used for informational purposes.", - "type": "string", - "x-go-name": "Provider" - }, - "requiredEmails": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "RequiredEmails" - }, - "seed": { - "description": "Name of the seed this datacenter belongs to.", - "type": "string", - "x-go-name": "Seed" - }, - "vmwareclouddirector": { - "$ref": "#/definitions/DatacenterSpecVMwareCloudDirector" - }, - "vsphere": { - "$ref": "#/definitions/DatacenterSpecVSphere" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "DatacenterSpecAWS": { - "type": "object", - "title": "DatacenterSpecAWS describes an AWS datacenter.", - "properties": { - "images": { - "$ref": "#/definitions/ImageList" - }, - "region": { - "description": "The AWS region to use, e.g. \"us-east-1\". For a list of available regions, see\nhttps://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html", - "type": "string", - "x-go-name": "Region" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DatacenterSpecAlibaba": { - "type": "object", - "title": "DatacenterSpecAlibaba describes a alibaba datacenter.", - "properties": { - "region": { - "description": "Region to use, for a full list of regions see\nhttps://www.alibabacloud.com/help/doc-detail/40654.htm", - "type": "string", - "x-go-name": "Region" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DatacenterSpecAnexia": { - "type": "object", - "title": "DatacenterSpecAnexia describes a anexia datacenter.", - "properties": { - "locationID": { - "description": "LocationID the location of the region", - "type": "string", - "x-go-name": "LocationID" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DatacenterSpecAzure": { - "type": "object", - "title": "DatacenterSpecAzure describes an Azure cloud datacenter.", - "properties": { - "images": { - "$ref": "#/definitions/ImageList" - }, - "location": { - "description": "Region to use, for example \"westeurope\". A list of available regions can be\nfound at https://azure.microsoft.com/en-us/global-infrastructure/locations/", - "type": "string", - "x-go-name": "Location" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DatacenterSpecBaremetal": { - "type": "object", - "title": "DatacenterSpecBaremetal describes a datacenter of baremetal nodes.", - "properties": { - "tinkerbell": { - "$ref": "#/definitions/DatacenterSpecTinkerbell" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DatacenterSpecBringYourOwn": { - "type": "object", - "title": "DatacenterSpecBringYourOwn describes a datacenter our of bring your own nodes.", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DatacenterSpecDigitalocean": { - "type": "object", - "title": "DatacenterSpecDigitalocean describes a DigitalOcean datacenter.", - "properties": { - "region": { - "description": "Datacenter location, e.g. \"ams3\". A list of existing datacenters can be found\nat https://www.digitalocean.com/docs/platform/availability-matrix/", - "type": "string", - "x-go-name": "Region" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DatacenterSpecEdge": { - "type": "object", - "title": "DatacenterSpecEdge describes a datacenter of edge nodes.", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DatacenterSpecFake": { - "type": "object", - "title": "DatacenterSpecFake describes a fake datacenter.", - "properties": { - "fakeProperty": { - "type": "string", - "x-go-name": "FakeProperty" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DatacenterSpecGCP": { - "type": "object", - "title": "DatacenterSpecGCP describes a GCP datacenter.", - "properties": { - "region": { - "description": "Region to use, for example \"europe-west3\", for a full list of regions see\nhttps://cloud.google.com/compute/docs/regions-zones/", - "type": "string", - "x-go-name": "Region" - }, - "regional": { - "description": "Optional: Regional clusters spread their resources across multiple availability zones.\nRefer to the official documentation for more details on this:\nhttps://cloud.google.com/kubernetes-engine/docs/concepts/regional-clusters", - "type": "boolean", - "x-go-name": "Regional" - }, - "zoneSuffixes": { - "description": "List of enabled zones, for example [a, c]. See the link above for the available\nzones in your chosen region.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "ZoneSuffixes" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DatacenterSpecHetzner": { - "type": "object", - "title": "DatacenterSpecHetzner describes a Hetzner cloud datacenter.", - "properties": { - "datacenter": { - "description": "Datacenter location, e.g. \"nbg1-dc3\". A list of existing datacenters can be found\nat https://docs.hetzner.com/general/others/data-centers-and-connection/", - "type": "string", - "x-go-name": "Datacenter" - }, - "location": { - "description": "Optional: Detailed location of the datacenter, like \"Hamburg\" or \"Datacenter 7\".\nFor informational purposes only.", - "type": "string", - "x-go-name": "Location" - }, - "network": { - "description": "Network is the pre-existing Hetzner network in which the machines are running.\nWhile machines can be in multiple networks, a single one must be chosen for the\nHCloud CCM to work.", - "type": "string", - "x-go-name": "Network" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DatacenterSpecKubevirt": { - "type": "object", - "title": "DatacenterSpecKubevirt describes a kubevirt datacenter.", - "properties": { - "ccmLoadBalancerEnabled": { - "description": "Optional: indicates if the ccm should create and manage the clusters load balancers.", - "type": "boolean", - "x-go-name": "CCMLoadBalancerEnabled" - }, - "ccmZoneAndRegionEnabled": { - "description": "Optional: indicates if region and zone labels from the cloud provider should be fetched.", - "type": "boolean", - "x-go-name": "CCMZoneAndRegionEnabled" - }, - "csiDriverOperator": { - "$ref": "#/definitions/KubeVirtCSIDriverOperator" - }, - "customNetworkPolicies": { - "description": "Optional: CustomNetworkPolicies allows to add some extra custom NetworkPolicies, that are deployed\nin the dedicated infra KubeVirt cluster. They are added to the defaults.", - "type": "array", - "items": { - "$ref": "#/definitions/CustomNetworkPolicy" - }, - "x-go-name": "CustomNetworkPolicies" - }, - "disableDefaultInstanceTypes": { - "description": "DisableDefaultInstanceTypes prevents KKP from automatically creating default instance types.\n(standard-2, standard-4, standard-8) in KubeVirt environments.", - "type": "boolean", - "x-go-name": "DisableDefaultInstanceTypes" - }, - "disableDefaultPreferences": { - "description": "DisableKubermaticPreferences prevents KKP from setting default KubeVirt preferences.", - "type": "boolean", - "x-go-name": "DisableDefaultPreferences" - }, - "dnsConfig": { - "$ref": "#/definitions/PodDNSConfig" - }, - "dnsPolicy": { - "description": "DNSPolicy represents the dns policy for the pod. Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst',\n'Default' or 'None'. Defaults to \"ClusterFirst\". DNS parameters given in DNSConfig will be merged with the\npolicy selected with DNSPolicy.", - "type": "string", - "x-go-name": "DNSPolicy" - }, - "enableDedicatedCpus": { - "description": "Optional: EnableDedicatedCPUs enables the assignment of dedicated cpus instead of resource requests and limits for a virtual machine.\nDefaults to false.\nDeprecated: Use .kubevirt.usePodResourcesCPU instead.", - "type": "boolean", - "x-go-name": "EnableDedicatedCPUs" - }, - "enableDefaultNetworkPolicies": { - "description": "Optional: EnableDefaultNetworkPolicies enables deployment of default network policies like cluster isolation.\nDefaults to true.", - "type": "boolean", - "x-go-name": "EnableDefaultNetworkPolicies" - }, - "images": { - "$ref": "#/definitions/KubeVirtImageSources" - }, - "infraStorageClasses": { - "description": "Optional: InfraStorageClasses contains a list of KubeVirt infra cluster StorageClasses names\nthat will be used to initialise StorageClasses in the tenant cluster.\nIn the tenant cluster, the created StorageClass name will have as name:\nkubevirt-\u003cinfra-storageClass-name\u003e", - "type": "array", - "items": { - "$ref": "#/definitions/KubeVirtInfraStorageClass" - }, - "x-go-name": "InfraStorageClasses" - }, - "matchSubnetAndStorageLocation": { - "description": "Optional: MatchSubnetAndStorageLocation if set to true, the region and zone of the subnet and storage class must match. For\nexample, if the storage class has the region `eu` and zone was `central`, the subnet must be in the same region and zone.\notherwise KKP will reject the creation of the machine deployment and eventually the cluster.", - "type": "boolean", - "x-go-name": "MatchSubnetAndStorageLocation" - }, - "namespacedMode": { - "$ref": "#/definitions/NamespacedMode" - }, - "providerNetwork": { - "$ref": "#/definitions/ProviderNetwork" - }, - "usePodResourcesCPU": { - "description": "Optional: UsePodResourcesCPU enables CPU assignment via Kubernetes Pod resource requests/limits.\nWhen false (default), CPUs are assigned via KubeVirt's spec.domain.cpu.", - "type": "boolean", - "x-go-name": "UsePodResourcesCPU" - }, - "vmEvictionStrategy": { - "$ref": "#/definitions/EvictionStrategy" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DatacenterSpecNutanix": { - "type": "object", - "title": "DatacenterSpecNutanix describes a Nutanix datacenter.", - "properties": { - "allowInsecure": { - "description": "Optional: AllowInsecure allows to disable the TLS certificate check against the endpoint (defaults to false)", - "type": "boolean", - "x-go-name": "AllowInsecure" - }, - "endpoint": { - "description": "Endpoint to use for accessing Nutanix Prism Central. No protocol or port should be passed,\nfor example \"nutanix.example.com\" or \"10.0.0.1\"", - "type": "string", - "x-go-name": "Endpoint" - }, - "images": { - "$ref": "#/definitions/ImageList" - }, - "port": { - "description": "Optional: Port to use when connecting to the Nutanix Prism Central endpoint (defaults to 9440)", - "type": "integer", - "format": "int32", - "x-go-name": "Port" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DatacenterSpecOpenstack": { - "type": "object", - "title": "DatacenterSpecOpenstack describes an OpenStack datacenter.", - "properties": { - "authURL": { - "description": "Authentication URL", - "type": "string", - "x-go-name": "AuthURL" - }, - "availabilityZone": { - "description": "Used to configure availability zone.", - "type": "string", - "x-go-name": "AvailabilityZone" - }, - "csiCinderTopologyEnabled": { - "description": "Optional: configures enablement of topology support for the Cinder CSI Plugin.\nThis requires Nova and Cinder to have matching availability zones configured.", - "type": "boolean", - "x-go-name": "CSICinderTopologyEnabled" - }, - "dnsServers": { - "description": "Used for automatic network creation", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "DNSServers" - }, - "enableConfigDrive": { - "description": "Optional: enable a configuration drive that will be attached to the instance when it boots.\nThe instance can mount this drive and read files from it to get information", - "type": "boolean", - "x-go-name": "EnableConfigDrive" - }, - "enabledFlavors": { - "description": "Optional: List of enabled flavors for the given datacenter", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "EnabledFlavors" - }, - "enforceFloatingIP": { - "description": "Optional", - "type": "boolean", - "x-go-name": "EnforceFloatingIP" - }, - "ignoreVolumeAZ": { - "description": "Optional", - "type": "boolean", - "x-go-name": "IgnoreVolumeAZ" - }, - "images": { - "$ref": "#/definitions/ImageList" - }, - "ipv6Enabled": { - "description": "Optional: defines if the IPv6 is enabled for the datacenter", - "type": "boolean", - "x-go-name": "IPv6Enabled" - }, - "loadBalancerClasses": { - "description": "Optional: List of LoadBalancerClass configurations to be used for the OpenStack cloud provider.", - "type": "array", - "items": { - "$ref": "#/definitions/LoadBalancerClass" - }, - "x-go-name": "LoadBalancerClasses" - }, - "loadBalancerMethod": { - "description": "Optional: Gets mapped to the \"lb-method\" setting in the cloud config.\ndefaults to \"ROUND_ROBIN\".", - "type": "string", - "x-go-name": "LoadBalancerMethod" - }, - "loadBalancerProvider": { - "description": "Optional: Gets mapped to the \"lb-provider\" setting in the cloud config.\ndefaults to \"\"", - "type": "string", - "x-go-name": "LoadBalancerProvider" - }, - "manageSecurityGroups": { - "description": "Optional: Gets mapped to the \"manage-security-groups\" setting in the cloud config.\nThis setting defaults to true.", - "type": "boolean", - "x-go-name": "ManageSecurityGroups" - }, - "nodePortsAllowedIPRange": { - "$ref": "#/definitions/NetworkRanges" - }, - "nodeSizeRequirements": { - "$ref": "#/definitions/OpenstackNodeSizeRequirements" - }, - "region": { - "description": "Authentication region name", - "type": "string", - "x-go-name": "Region" - }, - "trustDevicePath": { - "description": "Optional: Gets mapped to the \"trust-device-path\" setting in the cloud config.\nThis setting defaults to false.", - "type": "boolean", - "x-go-name": "TrustDevicePath" - }, - "useOctavia": { - "description": "Optional: Gets mapped to the \"use-octavia\" setting in the cloud config.\nuse-octavia is enabled by default in CCM since v1.17.0, and disabled by\ndefault with the in-tree cloud provider.", - "type": "boolean", - "x-go-name": "UseOctavia" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DatacenterSpecPacket": { - "description": "This provider is no longer supported. Migrate your configurations away from \"packet\" immediately.\nDatacenterSpecPacket describes a Packet datacenter.", - "type": "object", - "title": "Deprecated: The Packet / Equinix Metal provider is deprecated and will be REMOVED IN VERSION 2.29.", - "properties": { - "facilities": { - "description": "The list of enabled facilities, for example \"ams1\", for a full list of available\nfacilities see https://metal.equinix.com/developers/docs/locations/facilities/", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Facilities" - }, - "metro": { - "description": "Metros are facilities that are grouped together geographically and share capacity\nand networking features, see https://metal.equinix.com/developers/docs/locations/metros/", - "type": "string", - "x-go-name": "Metro" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DatacenterSpecTinkerbell": { - "type": "object", - "title": "DatacenterSepcTinkerbell contains spec for tinkerbell provider.", - "properties": { - "images": { - "$ref": "#/definitions/TinkerbellImageSources" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DatacenterSpecVMwareCloudDirector": { - "type": "object", - "properties": { - "allowInsecure": { - "description": "If set to true, disables the TLS certificate check against the endpoint.", - "type": "boolean", - "x-go-name": "AllowInsecure" - }, - "catalog": { - "description": "The default catalog which contains the VM templates.", - "type": "string", - "x-go-name": "DefaultCatalog" - }, - "storageProfile": { - "description": "The name of the storage profile to use for disks attached to the VMs.", - "type": "string", - "x-go-name": "DefaultStorageProfile" - }, - "templates": { - "$ref": "#/definitions/ImageList" - }, - "url": { - "description": "Endpoint URL to use, including protocol, for example \"https://vclouddirector.example.com/".", - "type": "string", - "x-go-name": "URL" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DatacenterSpecVSphere": { - "type": "object", - "title": "DatacenterSpecVSphere describes a vSphere datacenter.", - "properties": { - "allowInsecure": { - "description": "If set to true, disables the TLS certificate check against the endpoint.", - "type": "boolean", - "x-go-name": "AllowInsecure" - }, - "cluster": { - "description": "The name of the vSphere cluster to use. Used for out-of-tree CSI Driver.", - "type": "string", - "x-go-name": "Cluster" - }, - "datacenter": { - "description": "The name of the datacenter to use.", - "type": "string", - "x-go-name": "Datacenter" - }, - "datastore": { - "description": "The default Datastore to be used for provisioning volumes using storage\nclasses/dynamic provisioning and for storing virtual machine files in\ncase no `Datastore` or `DatastoreCluster` is provided at Cluster level.", - "type": "string", - "x-go-name": "DefaultDatastore" - }, - "defaultTagCategoryID": { - "description": "DefaultTagCategoryID is the tag category id that will be used as default, if users don't specify it on a cluster level,\nand they don't wish KKP to create default generated tag category, upon cluster creation.", - "type": "string", - "x-go-name": "DefaultTagCategoryID" - }, - "endpoint": { - "description": "Endpoint URL to use, including protocol, for example \"https://vcenter.example.com/".", - "type": "string", - "x-go-name": "Endpoint" - }, - "infraManagementUser": { - "$ref": "#/definitions/VSphereCredentials" - }, - "ipv6Enabled": { - "description": "Optional: defines if the IPv6 is enabled for the datacenter", - "type": "boolean", - "x-go-name": "IPv6Enabled" - }, - "rootPath": { - "description": "Optional: The root path for cluster specific VM folders. Each cluster gets its own\nfolder below the root folder. Must be the FQDN (for example\n\"/datacenter-1/vm/all-kubermatic-vms-in-here\") and defaults to the root VM\nfolder: \"/datacenter-1/vm\"", - "type": "string", - "x-go-name": "RootPath" - }, - "storagePolicy": { - "description": "The name of the storage policy to use for the storage class created in the user cluster.", - "type": "string", - "x-go-name": "DefaultStoragePolicy" - }, - "templates": { - "$ref": "#/definitions/ImageList" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DefaultingSelector": { - "type": "object", - "title": "DefaultingSelector is used to select the targeted user clusters for defaulting and enforcing applications.", - "properties": { - "datacenters": { - "description": "Datacenters is a list of datacenters where the application can be installed.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Datacenters" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/apps.kubermatic/v1" - }, - "DependencyCredentials": { - "type": "object", - "properties": { - "helmCredentials": { - "$ref": "#/definitions/HelmCredentials" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/apps.kubermatic/v1" - }, - "DeployOptions": { - "type": "object", - "title": "DeployOptions holds the settings specific to the templating method used to deploy the application.", - "properties": { - "helm": { - "$ref": "#/definitions/HelmDeployOptions" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/apps.kubermatic/v1" - }, - "Digitalocean": { - "type": "object", - "properties": { - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "token": { - "description": "Token is used to authenticate with the DigitalOcean API.", - "type": "string", - "x-go-name": "Token" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DigitaloceanCloudSpec": { - "type": "object", - "title": "DigitaloceanCloudSpec specifies access data to DigitalOcean.", - "properties": { - "credentialsReference": { - "$ref": "#/definitions/GlobalSecretKeySelector" - }, - "token": { - "description": "Token is used to authenticate with the DigitalOcean API.", - "type": "string", - "x-go-name": "Token" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "DigitaloceanNodeSpec": { - "description": "DigitaloceanNodeSpec digitalocean node settings", - "type": "object", - "required": [ - "size" - ], - "properties": { - "backups": { - "description": "enable backups for the droplet", - "type": "boolean", - "x-go-name": "Backups" - }, - "ipv6": { - "description": "DEPRECATED\nIPv6 is enabled automatically based on IP Family of the cluster so setting this field is not needed.\nenable ipv6 for the droplet", - "type": "boolean", - "x-go-name": "IPv6" - }, - "monitoring": { - "description": "enable monitoring for the droplet", - "type": "boolean", - "x-go-name": "Monitoring" - }, - "size": { - "description": "droplet size slug", - "type": "string", - "x-go-name": "Size" - }, - "tags": { - "description": "additional droplet tags", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Tags" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "DigitaloceanSize": { - "type": "object", - "title": "DigitaloceanSize is the object representing digitalocean sizes.", - "properties": { - "available": { - "type": "boolean", - "x-go-name": "Available" - }, - "disk": { - "type": "integer", - "format": "int64", - "x-go-name": "Disk" - }, - "memory": { - "type": "integer", - "format": "int64", - "x-go-name": "Memory" - }, - "price_hourly": { - "type": "number", - "format": "double", - "x-go-name": "PriceHourly" - }, - "price_monthly": { - "type": "number", - "format": "double", - "x-go-name": "PriceMonthly" - }, - "regions": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Regions" - }, - "slug": { - "type": "string", - "x-go-name": "Slug" - }, - "transfer": { - "type": "number", - "format": "double", - "x-go-name": "Transfer" - }, - "vcpus": { - "type": "integer", - "format": "int64", - "x-go-name": "VCPUs" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "DigitaloceanSizeList": { - "type": "object", - "title": "DigitaloceanSizeList represents a object of digitalocean sizes.", - "properties": { - "optimized": { - "type": "array", - "items": { - "$ref": "#/definitions/DigitaloceanSize" - }, - "x-go-name": "Optimized" - }, - "standard": { - "type": "array", - "items": { - "$ref": "#/definitions/DigitaloceanSize" - }, - "x-go-name": "Standard" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "Duration": { - "description": "Duration is a wrapper around time.Duration which supports correct\nmarshaling to YAML and JSON. In particular, it marshals into strings, which\ncan be used as map keys in json.", - "type": "object", - "x-go-package": "k8s.io/apimachinery/pkg/apis/meta/v1" - }, - "EKS": { - "type": "object", - "properties": { - "accessKeyID": { - "description": "The Access key ID used to authenticate against AWS.", - "type": "string", - "x-go-name": "AccessKeyID" - }, - "assumeRoleARN": { - "description": "Defines the ARN for an IAM role that should be assumed when handling resources on AWS. It will be used\nto acquire temporary security credentials using an STS AssumeRole API operation whenever creating an AWS session.", - "type": "string", - "x-go-name": "AssumeRoleARN" - }, - "assumeRoleExternalID": { - "description": "An arbitrary string that may be needed when calling the STS AssumeRole API operation.\nUsing an external ID can help to prevent the \"confused deputy problem\".", - "type": "string", - "x-go-name": "AssumeRoleExternalID" - }, - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "secretAccessKey": { - "description": "The Secret Access Key used to authenticate against AWS.", - "type": "string", - "x-go-name": "SecretAccessKey" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "EKSAMITypeList": { - "type": "array", - "title": "EKSAMITypeList represents a list of EKS AMI Types for node group.", - "items": { - "type": "string" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSCapacityTypeList": { - "type": "array", - "title": "EKSCapacityTypeList represents a list of EKS Capacity Types for node group.", - "items": { - "type": "string" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSCloudSpec": { - "type": "object", - "properties": { - "accessKeyID": { - "type": "string", - "x-go-name": "AccessKeyID" - }, - "assumeRoleARN": { - "type": "string", - "x-go-name": "AssumeRoleARN" - }, - "assumeRoleExternalID": { - "type": "string", - "x-go-name": "AssumeRoleExternalID" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "region": { - "type": "string", - "x-go-name": "Region" - }, - "secretAccessKey": { - "type": "string", - "x-go-name": "SecretAccessKey" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSCluster": { - "type": "object", - "title": "EKSCluster represents a object of EKS cluster.", - "properties": { - "imported": { - "type": "boolean", - "x-go-name": "IsImported" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "region": { - "type": "string", - "x-go-name": "Region" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSClusterList": { - "type": "array", - "title": "EKSClusterList represents a list of EKS clusters.", - "items": { - "$ref": "#/definitions/EKSCluster" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSClusterRole": { - "type": "object", - "title": "EKSClusterRole represents a EKS Cluster Service Role.", - "properties": { - "arn": { - "description": "The Amazon Resource Name (ARN) specifying the role. For more information\nabout ARNs and how to use them in policies, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html)\nin the IAM User Guide guide.", - "type": "string", - "x-go-name": "Arn" - }, - "roleName": { - "description": "RoleName represents the friendly name that identifies the role.", - "type": "string", - "x-go-name": "RoleName" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSClusterRoleList": { - "type": "array", - "title": "EKSClusterRoleList represents a list of EKS Cluster Service Roles.", - "items": { - "$ref": "#/definitions/EKSClusterRole" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSClusterSpec": { - "type": "object", - "properties": { - "createdAt": { - "description": "The Unix epoch timestamp in seconds for when the cluster was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreatedAt" - }, - "kubernetesNetworkConfig": { - "$ref": "#/definitions/EKSKubernetesNetworkConfigResponse" - }, - "roleArn": { - "description": "The Amazon Resource Name (ARN) of the IAM role that provides permissions\nfor the Kubernetes control plane to make calls to AWS API operations on your\nbehalf. For more information, see Amazon EKS Service IAM Role (https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html)\nin the Amazon EKS User Guide .\n\nRoleArn is a required field", - "type": "string", - "x-go-name": "RoleArn" - }, - "tags": { - "description": "The metadata that you apply to the cluster to assist with categorization\nand organization. Each tag consists of a key and an optional value. You define\nboth. Cluster tags do not propagate to any other resources associated with\nthe cluster.", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Tags" - }, - "version": { - "description": "The desired Kubernetes version for your cluster. If you don't specify a value\nhere, the latest version available in Amazon EKS is used.", - "type": "string", - "x-go-name": "Version" - }, - "vpcConfigRequest": { - "$ref": "#/definitions/VpcConfigRequest" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSInstanceType": { - "type": "object", - "title": "EKSInstanceType is the object representing EKS nodegroup instancetype..", - "properties": { - "architecture": { - "type": "string", - "x-go-name": "Architecture" - }, - "gpus": { - "type": "integer", - "format": "int64", - "x-go-name": "GPUs" - }, - "memory": { - "type": "number", - "format": "float", - "x-go-name": "Memory" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "pretty_name": { - "type": "string", - "x-go-name": "PrettyName" - }, - "vcpus": { - "type": "integer", - "format": "int64", - "x-go-name": "VCPUs" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSInstanceTypeList": { - "type": "array", - "title": "EKSInstanceTypeList represents a list of EKS InstanceType object for node group.", - "items": { - "$ref": "#/definitions/EKSInstanceType" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSKubernetesNetworkConfigResponse": { - "description": "The Kubernetes network configuration for the cluster. The response contains\na value for serviceIpv6Cidr or serviceIpv4Cidr, but not both.", - "type": "object", - "properties": { - "ipFamily": { - "description": "The IP family used to assign Kubernetes pod and service IP addresses. The\nIP family is always ipv4, unless you have a 1.21 or later cluster running\nversion 1.10.1 or later of the Amazon VPC CNI add-on and specified ipv6 when\nyou created the cluster.", - "type": "string", - "x-go-name": "IpFamily" - }, - "serviceIpv4Cidr": { - "description": "The CIDR block that Kubernetes pod and service IP addresses are assigned\nfrom. Kubernetes assigns addresses from an IPv4 CIDR block assigned to a\nsubnet that the node is in. If you didn't specify a CIDR block when you created\nthe cluster, then Kubernetes assigns addresses from either the 10.100.0.0/16\nor 172.20.0.0/16 CIDR blocks. If this was specified, then it was specified\nwhen the cluster was created and it can't be changed.", - "type": "string", - "x-go-name": "ServiceIpv4Cidr" - }, - "serviceIpv6Cidr": { - "description": "The CIDR block that Kubernetes pod and service IP addresses are assigned\nfrom if you created a 1.21 or later cluster with version 1.10.1 or later\nof the Amazon VPC CNI add-on and specified ipv6 for ipFamily when you created\nthe cluster. Kubernetes assigns service addresses from the unique local address\nrange (fc00::/7) because you can't specify a custom IPv6 CIDR block when\nyou create the cluster.", - "type": "string", - "x-go-name": "ServiceIpv6Cidr" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSMachineDeploymentCloudSpec": { - "type": "object", - "properties": { - "amiType": { - "description": "The AMI type for your node group. GPU instance types should use the AL2_x86_64_GPU\nAMI type. Non-GPU instances should use the AL2_x86_64 AMI type. Arm instances\nshould use the AL2_ARM_64 AMI type. All types use the Amazon EKS optimized\nAmazon Linux 2 AMI. If you specify launchTemplate, and your launch template\nuses a custom AMI, then don't specify amiType, or the node group deployment\nwill fail. For more information about using launch templates with Amazon\nEKS, see Launch template support (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html)\nin the Amazon EKS User Guide.", - "type": "string", - "x-go-name": "AmiType" - }, - "architecture": { - "description": "The architecture of the machine image.", - "type": "string", - "x-go-name": "Architecture" - }, - "capacityType": { - "description": "The capacity type for your node group. Possible values ON_DEMAND | SPOT", - "type": "string", - "x-go-name": "CapacityType" - }, - "createdAt": { - "description": "The Unix epoch timestamp in seconds for when the managed node group was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreatedAt" - }, - "diskSize": { - "description": "The root device disk size (in GiB) for your node group instances. The default\ndisk size is 20 GiB. If you specify launchTemplate, then don't specify diskSize,\nor the node group deployment will fail. For more information about using\nlaunch templates with Amazon EKS, see Launch template support (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html)\nin the Amazon EKS User Guide.", - "type": "integer", - "format": "int32", - "x-go-name": "DiskSize" - }, - "instanceTypes": { - "description": "Specify the instance types for a node group. If you specify a GPU instance\ntype, be sure to specify AL2_x86_64_GPU with the amiType parameter. If you\nspecify launchTemplate, then you can specify zero or one instance type in\nyour launch template or you can specify 0-20 instance types for instanceTypes.\nIf however, you specify an instance type in your launch template and specify\nany instanceTypes, the node group deployment will fail. If you don't specify\nan instance type in a launch template or for instanceTypes, then t3.medium\nis used, by default. If you specify Spot for capacityType, then we recommend\nspecifying multiple values for instanceTypes. For more information, see Managed\nnode group capacity types (https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types)\nand Launch template support (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html)\nin the Amazon EKS User Guide.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "InstanceTypes" - }, - "labels": { - "description": "The Kubernetes labels to be applied to the nodes in the node group when they\nare created.", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "nodeRole": { - "description": "The Amazon Resource Name (ARN) of the IAM role to associate with your node\ngroup. The Amazon EKS worker node kubelet daemon makes calls to AWS APIs\non your behalf. Nodes receive permissions for these API calls through an\nIAM instance profile and associated policies. Before you can launch nodes\nand register them into a cluster, you must create an IAM role for those nodes\nto use when they are launched. For more information, see Amazon EKS node\nIAM role (https://docs.aws.amazon.com/eks/latest/userguide/worker_node_IAM_role.html)\nin the Amazon EKS User Guide . If you specify launchTemplate, then don't\nspecify IamInstanceProfile (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IamInstanceProfile.html)\nin your launch template, or the node group deployment will fail. For more\ninformation about using launch templates with Amazon EKS, see Launch template\nsupport (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html)\nin the Amazon EKS User Guide.\n\nNodeRole is a required field", - "type": "string", - "x-go-name": "NodeRole" - }, - "scalingConfig": { - "$ref": "#/definitions/EKSNodegroupScalingConfig" - }, - "subnets": { - "description": "The subnets to use for the Auto Scaling group that is created for your node\ngroup. These subnets must have the tag key kubernetes.io/cluster/CLUSTER_NAME\nwith a value of shared, where CLUSTER_NAME is replaced with the name of your\ncluster. If you specify launchTemplate, then don't specify SubnetId (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateNetworkInterface.html)\nin your launch template, or the node group deployment will fail. For more\ninformation about using launch templates with Amazon EKS, see Launch template\nsupport (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html)\nin the Amazon EKS User Guide.\n\nSubnets is a required field", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Subnets" - }, - "tags": { - "description": "The metadata applied to the node group to assist with categorization and\norganization. Each tag consists of a key and an optional value. You define\nboth. Node group tags do not propagate to any other resources associated\nwith the node group, such as the Amazon EC2 instances or subnets.", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Tags" - }, - "version": { - "description": "The Kubernetes version to use for your managed nodes. By default, the Kubernetes\nversion of the cluster is used, and this is the only accepted specified value.\nIf you specify launchTemplate, and your launch template uses a custom AMI,\nthen don't specify version, or the node group deployment will fail. For more\ninformation about using launch templates with Amazon EKS, see Launch template\nsupport (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html)\nin the Amazon EKS User Guide.", - "type": "string", - "x-go-name": "Version" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSNodeRole": { - "type": "object", - "title": "EKSNodeRole represents a EKS Node IAM Role.", - "properties": { - "arn": { - "description": "The Amazon Resource Name (ARN) specifying the role. For more information\nabout ARNs and how to use them in policies, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html)\nin the IAM User Guide guide.", - "type": "string", - "x-go-name": "Arn" - }, - "roleName": { - "description": "RoleName represents the friendly name that identifies the role.", - "type": "string", - "x-go-name": "RoleName" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSNodeRoleList": { - "type": "array", - "title": "EKSNodeRoleList represents a list of EKS Node IAM Roles.", - "items": { - "$ref": "#/definitions/EKSNodeRole" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSNodegroupScalingConfig": { - "type": "object", - "properties": { - "desiredSize": { - "description": "The current number of nodes that the managed node group should maintain.", - "type": "integer", - "format": "int32", - "x-go-name": "DesiredSize" - }, - "maxSize": { - "description": "The maximum number of nodes that the managed node group can scale out to.\nFor information about the maximum number that you can specify, see Amazon\nEKS service quotas (https://docs.aws.amazon.com/eks/latest/userguide/service-quotas.html)\nin the Amazon EKS User Guide.", - "type": "integer", - "format": "int32", - "x-go-name": "MaxSize" - }, - "minSize": { - "description": "The minimum number of nodes that the managed node group can scale in to.\nThis number must be greater than zero.", - "type": "integer", - "format": "int32", - "x-go-name": "MinSize" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSRegionList": { - "type": "array", - "title": "EKSRegionList represents a list of EKS regions.", - "items": { - "type": "string" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSSecurityGroup": { - "type": "object", - "title": "EKSSecurityGroup represents a object of EKS securityGroup.", - "properties": { - "groupId": { - "description": "The ID of the security group.", - "type": "string", - "x-go-name": "GroupId" - }, - "vpcId": { - "description": "[VPC only] The ID of the VPC for the security group.", - "type": "string", - "x-go-name": "VpcId" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSSecurityGroupList": { - "type": "array", - "title": "EKSSecurityGroupList represents an array of EKS securityGroup.", - "items": { - "$ref": "#/definitions/EKSSecurityGroup" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSSubnet": { - "type": "object", - "title": "EKSSubnet represents a object of EKS subnet.", - "properties": { - "availabilityZone": { - "description": "The Availability Zone of the subnet.", - "type": "string", - "x-go-name": "AvailabilityZone" - }, - "default": { - "type": "boolean", - "x-go-name": "Default" - }, - "subnetId": { - "description": "The ID of the subnet.", - "type": "string", - "x-go-name": "SubnetId" - }, - "vpcId": { - "description": "The ID of the VPC the subnet is in.", - "type": "string", - "x-go-name": "VpcId" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSSubnetList": { - "type": "array", - "title": "EKSSubnetList represents an array of EKS subnet.", - "items": { - "$ref": "#/definitions/EKSSubnet" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSVPC": { - "type": "object", - "title": "EKSVPC represents a object of EKS VpcId.", - "properties": { - "default": { - "type": "boolean", - "x-go-name": "IsDefault" - }, - "id": { - "type": "string", - "x-go-name": "ID" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EKSVPCList": { - "type": "array", - "title": "EKSVPCList represents an array of EKS VPC.", - "items": { - "$ref": "#/definitions/EKSVPC" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EdgeCloudSpec": { - "type": "object", - "title": "EdgeCloudSpec specifies access data for an edge cluster.", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "EdgeNodeSpec": { - "description": "EdgeNodeSpec specifies edge specific node settings", - "type": "object", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "EnvVar": { - "type": "object", - "title": "EnvVar represents an environment variable present in a Container.", - "properties": { - "name": { - "description": "Name of the environment variable. Must be a C_IDENTIFIER.", - "type": "string", - "x-go-name": "Name" - }, - "value": { - "description": "Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n\"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to \"\".\n+optional", - "type": "string", - "x-go-name": "Value" - }, - "valueFrom": { - "$ref": "#/definitions/EnvVarSource" - } - }, - "x-go-package": "k8s.io/api/core/v1" - }, - "EnvVarSource": { - "type": "object", - "title": "EnvVarSource represents a source for the value of an EnvVar.", - "properties": { - "configMapKeyRef": { - "$ref": "#/definitions/ConfigMapKeySelector" - }, - "fieldRef": { - "$ref": "#/definitions/ObjectFieldSelector" - }, - "resourceFieldRef": { - "$ref": "#/definitions/ResourceFieldSelector" - }, - "secretKeyRef": { - "$ref": "#/definitions/SecretKeySelector" - } - }, - "x-go-package": "k8s.io/api/core/v1" - }, - "ErrorDetails": { - "type": "object", - "title": "ErrorDetails contains details about the error.", - "required": [ - "code", - "message" - ], - "properties": { - "code": { - "description": "The error code", - "type": "integer", - "format": "int64", - "x-go-name": "Code" - }, - "details": { - "description": "Additional error messages", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Additional" - }, - "message": { - "description": "The error message", - "type": "string", - "x-go-name": "Message" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/handler" - }, - "EtcdBackupConfig": { - "description": "EtcdBackupConfig represents an object holding the configuration for etcd backups", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/EtcdBackupConfigSpec" - }, - "status": { - "$ref": "#/definitions/EtcdBackupConfigStatus" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EtcdBackupConfigCondition": { - "type": "object", - "properties": { - "lastHeartbeatTime": { - "description": "Last time we got an update on a given condition.\n+optional", - "type": "string", - "format": "date-time", - "x-go-name": "LastHeartbeatTime" - }, - "lastTransitionTime": { - "description": "Last time the condition transit from one status to another.\n+optional", - "type": "string", - "format": "date-time", - "x-go-name": "LastTransitionTime" - }, - "message": { - "description": "Human readable message indicating details about last transition.\n+optional", - "type": "string", - "x-go-name": "Message" - }, - "reason": { - "description": "(brief) reason for the condition's last transition.\n+optional", - "type": "string", - "x-go-name": "Reason" - }, - "status": { - "$ref": "#/definitions/ConditionStatus" - }, - "type": { - "$ref": "#/definitions/EtcdBackupConfigConditionType" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EtcdBackupConfigConditionType": { - "description": "EtcdBackupConfigConditionType is used to indicate the type of a EtcdBackupConfig condition. For all condition\ntypes, the `true` value must indicate success. All condition types must be registered within\nthe `AllClusterConditionTypes` variable.", - "type": "string", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "EtcdBackupConfigSpec": { - "description": "EtcdBackupConfigSpec represents an object holding the etcd backup configuration specification", - "type": "object", - "properties": { - "clusterId": { - "description": "ClusterID is the id of the cluster which will be backed up", - "type": "string", - "x-go-name": "ClusterID" - }, - "destination": { - "description": "Destination indicates where the backup will be stored. The destination name should correspond to a destination in\nthe cluster's Seed.Spec.EtcdBackupRestore.", - "type": "string", - "x-go-name": "Destination" - }, - "keep": { - "description": "Keep is the number of backups to keep around before deleting the oldest one\nIf not set, defaults to DefaultKeptBackupsCount. Only used if Schedule is set.", - "type": "integer", - "format": "int64", - "x-go-name": "Keep" - }, - "schedule": { - "description": "Schedule is a cron expression defining when to perform\nthe backup. If not set, the backup is performed exactly\nonce, immediately.", - "type": "string", - "x-go-name": "Schedule" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EtcdBackupConfigStatus": { - "type": "object", - "properties": { - "cleanupRunning": { - "description": "If the controller was configured with a cleanupContainer, CleanupRunning keeps track of the corresponding job", - "type": "boolean", - "x-go-name": "CleanupRunning" - }, - "conditions": { - "description": "Conditions contains conditions of the EtcdBackupConfig", - "type": "array", - "items": { - "$ref": "#/definitions/EtcdBackupConfigCondition" - }, - "x-go-name": "Conditions" - }, - "lastBackups": { - "description": "CurrentBackups tracks the creation and deletion progress if all backups managed by the EtcdBackupConfig", - "type": "array", - "items": { - "$ref": "#/definitions/BackupStatus" - }, - "x-go-name": "CurrentBackups" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EtcdBackupRestore": { - "type": "object", - "title": "EtcdBackupRestore holds the configuration of the automatic backup and restores.", - "properties": { - "backupCount": { - "description": "BackupCount specifies the maximum number of backups to retain (defaults to DefaultKeptBackupsCount).\nOldest backups are automatically deleted when this limit is exceeded. Only applies when Schedule is configured.", - "type": "integer", - "format": "int64", - "x-go-name": "BackupCount" - }, - "backupInterval": { - "$ref": "#/definitions/Duration" - }, - "defaultDestination": { - "description": "DefaultDestination marks the default destination that will be used for the default etcd backup config which is\ncreated for every user cluster. Has to correspond to a destination in Destinations.\nIf removed, it removes the related default etcd backup configs.", - "type": "string", - "x-go-name": "DefaultDestination" - }, - "destinations": { - "description": "Destinations stores all the possible destinations where the backups for the Seed can be stored. If not empty,\nit enables automatic backup and restore for the seed.", - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/BackupDestination" - }, - "x-go-name": "Destinations" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "EtcdRestore": { - "description": "EtcdRestore represents an object holding the configuration for etcd backup restore", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/EtcdRestoreSpec" - }, - "status": { - "$ref": "#/definitions/EtcdRestoreStatus" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EtcdRestorePhase": { - "type": "string", - "title": "EtcdRestorePhase represents the lifecycle phase of an EtcdRestore.", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "EtcdRestoreSpec": { - "description": "EtcdRestoreSpec represents an object holding the etcd backup restore configuration specification", - "type": "object", - "properties": { - "backupDownloadCredentialsSecret": { - "description": "BackupDownloadCredentialsSecret is the name of a secret in the cluster-xxx namespace containing\ncredentials needed to download the backup", - "type": "string", - "x-go-name": "BackupDownloadCredentialsSecret" - }, - "backupName": { - "description": "BackupName is the name of the backup to restore from", - "type": "string", - "x-go-name": "BackupName" - }, - "clusterId": { - "description": "ClusterID is the id of the cluster which will be restored from the backup", - "type": "string", - "x-go-name": "ClusterID" - }, - "destination": { - "description": "Destination indicates where the backup was stored. The destination name should correspond to a destination in\nthe cluster's Seed.Spec.EtcdBackupRestore.", - "type": "string", - "x-go-name": "Destination" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "EtcdRestoreStatus": { - "type": "object", - "properties": { - "phase": { - "$ref": "#/definitions/EtcdRestorePhase" - }, - "restoreTime": { - "type": "string", - "format": "date-time", - "x-go-name": "RestoreTime" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "Event": { - "type": "object", - "title": "Event is a report of an event somewhere in the cluster.", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "count": { - "description": "The number of times this event has occurred.", - "type": "integer", - "format": "int32", - "x-go-name": "Count" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "involvedObject": { - "$ref": "#/definitions/ObjectReferenceResource" - }, - "lastTimestamp": { - "description": "The time at which the most recent occurrence of this event was recorded.", - "type": "string", - "format": "date-time", - "x-go-name": "LastTimestamp" - }, - "message": { - "description": "A human-readable description of the status of this operation.", - "type": "string", - "x-go-name": "Message" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "type": { - "description": "Type of this event (i.e. normal or warning). New types could be added in the future.", - "type": "string", - "x-go-name": "Type" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "EventRateLimitConfig": { - "description": "More info: https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#eventratelimit", - "type": "object", - "title": "EventRateLimitConfig configures the `EventRateLimit` admission plugin.", - "properties": { - "namespace": { - "$ref": "#/definitions/EventRateLimitConfigItem" - }, - "server": { - "$ref": "#/definitions/EventRateLimitConfigItem" - }, - "sourceAndObject": { - "$ref": "#/definitions/EventRateLimitConfigItem" - }, - "user": { - "$ref": "#/definitions/EventRateLimitConfigItem" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "EventRateLimitConfigItem": { - "type": "object", - "properties": { - "burst": { - "type": "integer", - "format": "int32", - "x-go-name": "Burst" - }, - "cacheSize": { - "type": "integer", - "format": "int32", - "x-go-name": "CacheSize" - }, - "qps": { - "type": "integer", - "format": "int32", - "x-go-name": "QPS" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "EvictionStrategy": { - "type": "string", - "x-go-package": "kubevirt.io/api/core/v1" - }, - "ExposeStrategy": { - "description": "Possible values are `NodePort`, `LoadBalancer` or `Tunneling` (requires a feature gate).", - "type": "string", - "title": "ExposeStrategy is the strategy used to expose a cluster control plane.", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ExternalCCMMigrationStatus": { - "type": "string", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ExternalCluster": { - "description": "ExternalCluster represents an object holding cluster details", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "cloud": { - "$ref": "#/definitions/ExternalClusterCloudSpec" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/ExternalClusterSpec" - }, - "status": { - "$ref": "#/definitions/ExternalClusterStatus" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ExternalClusterCloudSpec": { - "description": "ExternalClusterCloudSpec represents an object holding cluster cloud details", - "type": "object", - "properties": { - "aks": { - "$ref": "#/definitions/AKSCloudSpec" - }, - "bringYourOwn": { - "$ref": "#/definitions/BringYourOwnSpec" - }, - "eks": { - "$ref": "#/definitions/EKSCloudSpec" - }, - "gke": { - "$ref": "#/definitions/GKECloudSpec" - }, - "kubeOne": { - "$ref": "#/definitions/KubeOneSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ExternalClusterMDPhase": { - "type": "object", - "title": "ExternalClusterMDPhase defines the external cluster machinedeployment phase.", - "properties": { - "aks": { - "$ref": "#/definitions/AKSMDPhase" - }, - "state": { - "$ref": "#/definitions/ExternalClusterMDState" - }, - "statusMessage": { - "type": "string", - "x-go-name": "StatusMessage" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ExternalClusterMDState": { - "type": "string", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ExternalClusterMachineDeployment": { - "description": "ExternalClusterMachineDeployment represents an object holding external cluster machine deployment", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "cloud": { - "$ref": "#/definitions/ExternalClusterMachineDeploymentCloudSpec" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "phase": { - "$ref": "#/definitions/ExternalClusterMDPhase" - }, - "spec": { - "$ref": "#/definitions/NodeDeploymentSpec" - }, - "status": { - "$ref": "#/definitions/MachineDeploymentStatus" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ExternalClusterMachineDeploymentCloudSpec": { - "type": "object", - "title": "ExternalClusterMachineDeploymentCloudSpec represents an object holding machine deployment cloud details.", - "properties": { - "aks": { - "$ref": "#/definitions/AKSMachineDeploymentCloudSpec" - }, - "eks": { - "$ref": "#/definitions/EKSMachineDeploymentCloudSpec" - }, - "gke": { - "$ref": "#/definitions/GKEMachineDeploymentCloudSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ExternalClusterNode": { - "description": "ExternalClusterNode represents an object holding external cluster node", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/NodeSpec" - }, - "status": { - "$ref": "#/definitions/NodeStatus" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ExternalClusterSpec": { - "type": "object", - "title": "ExternalClusterSpec defines the external cluster specification.", - "properties": { - "aksclusterSpec": { - "$ref": "#/definitions/AKSClusterSpec" - }, - "containerRuntime": { - "type": "string", - "x-go-name": "ContainerRuntime" - }, - "eksclusterSpec": { - "$ref": "#/definitions/EKSClusterSpec" - }, - "gkeclusterSpec": { - "$ref": "#/definitions/GKEClusterSpec" - }, - "version": { - "$ref": "#/definitions/Semver" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ExternalClusterState": { - "type": "string", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ExternalClusterStatus": { - "type": "object", - "title": "ExternalClusterStatus defines the external cluster status.", - "properties": { - "aks": { - "$ref": "#/definitions/AKSClusterStatus" - }, - "state": { - "$ref": "#/definitions/ExternalClusterState" - }, - "statusMessage": { - "type": "string", - "x-go-name": "StatusMessage" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ExternalDocumentation": { - "type": "object", - "title": "ExternalDocumentation allows referencing an external resource for extended documentation.", - "properties": { - "description": { - "type": "string", - "x-go-name": "Description" - }, - "url": { - "type": "string", - "x-go-name": "URL" - } - }, - "x-go-package": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - }, - "Fake": { - "type": "object", - "properties": { - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "token": { - "type": "string", - "x-go-name": "Token" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "FakeCloudSpec": { - "type": "object", - "title": "FakeCloudSpec specifies access data for a fake cloud.", - "properties": { - "token": { - "type": "string", - "x-go-name": "Token" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "FeatureGates": { - "description": "FeatureGates represents an object holding feature gate settings", - "type": "object", - "properties": { - "disableUserSSHKey": { - "type": "boolean", - "x-go-name": "DisableUserSSHKey" - }, - "oidcKubeCfgEndpoint": { - "type": "boolean", - "x-go-name": "OIDCKubeCfgEndpoint" - }, - "openIDAuthPlugin": { - "type": "boolean", - "x-go-name": "OpenIDAuthPlugin" - }, - "operatingSystemManager": { - "type": "boolean", - "x-go-name": "OperatingSystemManager" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "FieldValueErrorReason": { - "description": "+enum", - "type": "string", - "title": "FieldValueErrorReason is a machine-readable value providing more detail about why a field failed the validation.", - "x-go-package": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - }, - "FlatcarSpec": { - "description": "FlatcarSpec contains Flatcar Linux specific settings", - "type": "object", - "properties": { - "disableAutoUpdate": { - "description": "disable flatcar linux auto-update feature", - "type": "boolean", - "x-go-name": "DisableAutoUpdate" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "GCP": { - "type": "object", - "properties": { - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "network": { - "type": "string", - "x-go-name": "Network" - }, - "serviceAccount": { - "description": "ServiceAccount is the Google Service Account (JSON format), encoded with base64.", - "type": "string", - "x-go-name": "ServiceAccount" - }, - "subnetwork": { - "type": "string", - "x-go-name": "Subnetwork" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "GCPCloudSpec": { - "type": "object", - "title": "GCPCloudSpec specifies access data to GCP.", - "properties": { - "credentialsReference": { - "$ref": "#/definitions/GlobalSecretKeySelector" - }, - "network": { - "type": "string", - "x-go-name": "Network" - }, - "nodePortsAllowedIPRange": { - "description": "A CIDR range that will be used to allow access to the node port range in the firewall rules to.\nIf NodePortsAllowedIPRange nor NodePortsAllowedIPRanges is set, the node port range can be accessed from anywhere.", - "type": "string", - "x-go-name": "NodePortsAllowedIPRange" - }, - "nodePortsAllowedIPRanges": { - "$ref": "#/definitions/NetworkRanges" - }, - "serviceAccount": { - "description": "The Google Service Account (JSON format), encoded with base64.", - "type": "string", - "x-go-name": "ServiceAccount" - }, - "subnetwork": { - "type": "string", - "x-go-name": "Subnetwork" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "GCPDiskType": { - "type": "object", - "title": "GCPDiskType represents a object of GCP disk type.", - "properties": { - "description": { - "type": "string", - "x-go-name": "Description" - }, - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "GCPDiskTypeList": { - "type": "array", - "title": "GCPDiskTypeList represents an array of GCP disk types.", - "items": { - "$ref": "#/definitions/GCPDiskType" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "GCPMachineSize": { - "type": "object", - "title": "GCPMachineSize represents a object of GCP machine size.", - "properties": { - "description": { - "type": "string", - "x-go-name": "Description" - }, - "memory": { - "type": "integer", - "format": "int64", - "x-go-name": "Memory" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "vcpus": { - "type": "integer", - "format": "int64", - "x-go-name": "VCPUs" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "GCPMachineSizeList": { - "type": "array", - "title": "GCPMachineSizeList represents an array of GCP machine sizes.", - "items": { - "$ref": "#/definitions/GCPMachineSize" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "GCPNetwork": { - "type": "object", - "title": "GCPNetwork represents a object of GCP networks.", - "properties": { - "autoCreateSubnetworks": { - "type": "boolean", - "x-go-name": "AutoCreateSubnetworks" - }, - "id": { - "type": "integer", - "format": "uint64", - "x-go-name": "ID" - }, - "kind": { - "type": "string", - "x-go-name": "Kind" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "path": { - "type": "string", - "x-go-name": "Path" - }, - "subnetworks": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Subnetworks" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "GCPNetworkList": { - "type": "array", - "title": "GCPNetworkList represents an array of GCP networks.", - "items": { - "$ref": "#/definitions/GCPNetwork" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "GCPNodeSpec": { - "description": "GCPNodeSpec gcp specific node settings", - "type": "object", - "properties": { - "customImage": { - "type": "string", - "x-go-name": "CustomImage" - }, - "diskSize": { - "type": "integer", - "format": "int64", - "x-go-name": "DiskSize" - }, - "diskType": { - "type": "string", - "x-go-name": "DiskType" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "machineType": { - "type": "string", - "x-go-name": "MachineType" - }, - "preemptible": { - "type": "boolean", - "x-go-name": "Preemptible" - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Tags" - }, - "zone": { - "type": "string", - "x-go-name": "Zone" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "GCPSubnetwork": { - "type": "object", - "title": "GCPSubnetwork represents a object of GCP subnetworks.", - "properties": { - "gatewayAddress": { - "type": "string", - "x-go-name": "GatewayAddress" - }, - "id": { - "type": "integer", - "format": "uint64", - "x-go-name": "ID" - }, - "ipCidrRange": { - "type": "string", - "x-go-name": "IPCidrRange" - }, - "ipFamily": { - "$ref": "#/definitions/IPFamily" - }, - "kind": { - "type": "string", - "x-go-name": "Kind" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "network": { - "type": "string", - "x-go-name": "Network" - }, - "path": { - "type": "string", - "x-go-name": "Path" - }, - "privateIpGoogleAccess": { - "type": "boolean", - "x-go-name": "PrivateIPGoogleAccess" - }, - "region": { - "type": "string", - "x-go-name": "Region" - }, - "selfLink": { - "type": "string", - "x-go-name": "SelfLink" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "GCPSubnetworkList": { - "type": "array", - "title": "GCPSubnetworkList represents an array of GCP subnetworks.", - "items": { - "$ref": "#/definitions/GCPSubnetwork" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "GCPZone": { - "type": "object", - "title": "GCPZone represents a object of GCP zone.", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "GCPZoneList": { - "type": "array", - "title": "GCPZoneList represents an array of GCP zones.", - "items": { - "$ref": "#/definitions/GCPZone" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "GKE": { - "type": "object", - "properties": { - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "serviceAccount": { - "type": "string", - "x-go-name": "ServiceAccount" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "GKEAutoprovisioningNodePoolDefaults": { - "description": "GKEAutoprovisioningNodePoolDefaults\ncontains defaults for a node pool created by NAP.", - "type": "object", - "properties": { - "bootDiskKmsKey": { - "description": "BootDiskKmsKey: The Customer Managed Encryption Key used to encrypt\nthe boot disk attached to each node in the node pool. This should be\nof the form\nprojects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cr\nyptoKeys/[KEY_NAME]. For more information about protecting resources\nwith Cloud KMS Keys please see:\nhttps://cloud.google.com/compute/docs/disks/customer-managed-encryption", - "type": "string", - "x-go-name": "BootDiskKmsKey" - }, - "diskSizeGb": { - "description": "DiskSizeGb: Size of the disk attached to each node, specified in GB.\nThe smallest allowed disk size is 10GB. If unspecified, the default\ndisk size is 100GB.", - "type": "integer", - "format": "int64", - "x-go-name": "DiskSizeGb" - }, - "diskType": { - "description": "DiskType: Type of the disk attached to each node (e.g. 'pd-standard',\n'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is\n'pd-standard'", - "type": "string", - "x-go-name": "DiskType" - }, - "management": { - "$ref": "#/definitions/GKENodeManagement" - }, - "minCpuPlatform": { - "description": "MinCpuPlatform: Minimum CPU platform to be used for NAP created node\npools. The instance may be scheduled on the specified or newer CPU\nplatform. Applicable values are the friendly names of CPU platforms,\nsuch as minCpuPlatform: Intel Haswell or minCpuPlatform: Intel Sandy\nBridge. For more information, read how to specify min CPU platform\n(https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)\nTo unset the min cpu platform field pass \"automatic\" as field value.", - "type": "string", - "x-go-name": "MinCpuPlatform" - }, - "oauthScopes": { - "description": "OauthScopes: Scopes that are used by NAP when creating node pools.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "OauthScopes" - }, - "serviceAccount": { - "description": "ServiceAccount: The Google Cloud Platform Service Account to be used\nby the node VMs.", - "type": "string", - "x-go-name": "ServiceAccount" - }, - "shieldedInstanceConfig": { - "$ref": "#/definitions/GKEShieldedInstanceConfig" - }, - "upgradeSettings": { - "$ref": "#/definitions/GKEUpgradeSettings" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKECloudSpec": { - "type": "object", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "serviceAccount": { - "type": "string", - "x-go-name": "ServiceAccount" - }, - "zone": { - "type": "string", - "x-go-name": "Zone" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKECluster": { - "type": "object", - "title": "GKECluster represents a object of GKE cluster.", - "properties": { - "imported": { - "type": "boolean", - "x-go-name": "IsImported" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "zone": { - "type": "string", - "x-go-name": "Zone" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKEClusterAutoscaling": { - "description": "GKEClusterAutoscaling contains global, per-cluster\ninformation required by Cluster Autoscaler to automatically adjust\nthe size of the cluster and create/delete node pools based on the\ncurrent needs.", - "type": "object", - "properties": { - "autoprovisioningLocations": { - "description": "AutoprovisioningLocations: The list of Google Compute Engine zones\n(https://cloud.google.com/compute/docs/zones#available) in which the\nNodePool's nodes can be created by NAP.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "AutoprovisioningLocations" - }, - "autoprovisioningNodePoolDefaults": { - "$ref": "#/definitions/GKEAutoprovisioningNodePoolDefaults" - }, - "enableNodeAutoprovisioning": { - "description": "EnableNodeAutoprovisioning: Enables automatic node pool creation and\ndeletion.", - "type": "boolean", - "x-go-name": "EnableNodeAutoprovisioning" - }, - "resourceLimits": { - "description": "ResourceLimits: Contains global constraints regarding minimum and\nmaximum amount of resources in the cluster.", - "type": "array", - "items": { - "$ref": "#/definitions/GKEResourceLimit" - }, - "x-go-name": "ResourceLimits" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKEClusterList": { - "type": "array", - "title": "GKEClusterList represents an array of GKE clusters.", - "items": { - "$ref": "#/definitions/GKECluster" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKEClusterSpec": { - "type": "object", - "title": "GKEClusterSpec A Google Kubernetes Engine cluster.", - "properties": { - "autopilot": { - "description": "Autopilot: Autopilot configuration for the cluster.", - "type": "boolean", - "x-go-name": "Autopilot" - }, - "autoscaling": { - "$ref": "#/definitions/GKEClusterAutoscaling" - }, - "clusterIpv4Cidr": { - "description": "ClusterIpv4Cidr: The IP address range of the container pods in this\ncluster, in CIDR\n(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`). Leave blank to have one automatically\nchosen or specify a `/14` block in `10.0.0.0/8`.", - "type": "string", - "x-go-name": "ClusterIpv4Cidr" - }, - "createTime": { - "description": "CreateTime: [Output only] The time the cluster was created, in\nRFC3339 (https://www.ietf.org/rfc/rfc3339.txt) text format.", - "type": "string", - "x-go-name": "CreateTime" - }, - "defaultMaxPodsConstraint": { - "description": "DefaultMaxPodsConstraint: The default constraint on the maximum\nnumber of pods that can be run simultaneously on a node in the node\npool of this cluster. Only honored if cluster created with IP Alias\nsupport.", - "type": "integer", - "format": "int64", - "x-go-name": "DefaultMaxPodsConstraint" - }, - "enableKubernetesAlpha": { - "description": "EnableKubernetesAlpha: Kubernetes alpha features are enabled on this\ncluster. This includes alpha API groups (e.g. v1alpha1) and features\nthat may not be production ready in the kubernetes version of the\nmaster and nodes. The cluster has no SLA for uptime and master/node\nupgrades are disabled. Alpha enabled clusters are automatically\ndeleted thirty days after creation.", - "type": "boolean", - "x-go-name": "EnableKubernetesAlpha" - }, - "enableTpu": { - "description": "EnableTpu: Enable the ability to use Cloud TPUs in this cluster.", - "type": "boolean", - "x-go-name": "EnableTpu" - }, - "initialClusterVersion": { - "description": "InitialClusterVersion: The initial Kubernetes version for this\ncluster. Valid versions are those found in validMasterVersions\nreturned by getServerConfig. The version can be upgraded over time;\nsuch upgrades are reflected in currentMasterVersion and\ncurrentNodeVersion. Users may specify either explicit versions\noffered by Kubernetes Engine or version aliases, which have the\nfollowing behavior: - \"latest\": picks the highest valid Kubernetes\nversion - \"1.X\": picks the highest valid patch+gke.N patch in the 1.X\nversion - \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y\nversion - \"1.X.Y-gke.N\": picks an explicit Kubernetes version -\n\"\",\"-\": picks the default Kubernetes version", - "type": "string", - "x-go-name": "InitialClusterVersion" - }, - "initialNodeCount": { - "description": "InitialNodeCount: The number of nodes to create in this cluster. You\nmust ensure that your Compute Engine resource quota\n(https://cloud.google.com/compute/quotas) is sufficient for this\nnumber of instances. You must also have available firewall and routes\nquota. For requests, this field should only be used in lieu of a\n\"node_pool\" object, since this configuration (along with the\n\"node_config\") will be used to create a \"NodePool\" object with an\nauto-generated name. Do not use this and a node_pool at the same\ntime. This field is deprecated, use node_pool.initial_node_count\ninstead.", - "type": "integer", - "format": "int64", - "x-go-name": "InitialNodeCount" - }, - "locations": { - "description": "Locations: The list of Google Compute Engine zones\n(https://cloud.google.com/compute/docs/zones#available) in which the\ncluster's nodes should be located. This field provides a default\nvalue if NodePool.Locations\n(https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools#NodePool.FIELDS.locations)\nare not specified during node pool creation. Warning: changing\ncluster locations will update the NodePool.Locations\n(https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools#NodePool.FIELDS.locations)\nof all node pools and will result in nodes being added and/or\nremoved.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Locations" - }, - "network": { - "description": "Network: The name of the Google Compute Engine network\n(https://cloud.google.com/compute/docs/networks-and-firewalls#networks)\nto which the cluster is connected. If left unspecified, the `default`\nnetwork will be used.", - "type": "string", - "x-go-name": "Network" - }, - "nodeConfig": { - "$ref": "#/definitions/GKENodeConfig" - }, - "releaseChannel": { - "description": "ReleaseChannel: channel specifies which release channel the cluster is\nsubscribed to.\n\nPossible values:\n\"UNSPECIFIED\" - No channel specified.\n\"RAPID\" - RAPID channel is offered on an early access basis for\ncustomers who want to test new releases. WARNING: Versions available\nin the RAPID Channel may be subject to unresolved issues with no\nknown workaround and are not subject to any SLAs.\n\"REGULAR\" - Clusters subscribed to REGULAR receive versions that\nare considered GA quality. REGULAR is intended for production users\nwho want to take advantage of new features.\n\"STABLE\" - Clusters subscribed to STABLE receive versions that are\nknown to be stable and reliable in production.", - "type": "string", - "x-go-name": "ReleaseChannel" - }, - "subnetwork": { - "description": "Subnetwork: The name of the Google Compute Engine subnetwork\n(https://cloud.google.com/compute/docs/subnetworks) to which the\ncluster is connected.", - "type": "string", - "x-go-name": "Subnetwork" - }, - "tpuIpv4CidrBlock": { - "description": "TpuIpv4CidrBlock: [Output only] The IP address range of the Cloud\nTPUs in this cluster, in CIDR\n(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `1.2.3.4/29`).", - "type": "string", - "x-go-name": "TpuIpv4CidrBlock" - }, - "verticalPodAutoscaling": { - "description": "VerticalPodAutoscaling: Cluster-level Vertical Pod Autoscaling\nconfiguration.", - "type": "boolean", - "x-go-name": "VerticalPodAutoscaling" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKEDiskType": { - "type": "object", - "title": "GKEDiskType represents a object of GKE disk type.", - "properties": { - "defaultDiskSizeGb": { - "description": "DefaultDiskSizeGb: Server-defined default disk size in GB.", - "type": "integer", - "format": "int64", - "x-go-name": "DefaultDiskSizeGb" - }, - "description": { - "description": "Description: An optional description of this resource.", - "type": "string", - "x-go-name": "Description" - }, - "kind": { - "description": "Kind: Type of the resource. Always compute#diskType for\ndisk types.", - "type": "string", - "x-go-name": "Kind" - }, - "name": { - "description": "Name of the resource.", - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKEDiskTypeList": { - "type": "array", - "title": "GKEDiskTypeList represents an array of GKE disk types.", - "items": { - "$ref": "#/definitions/GKEDiskType" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKEImage": { - "type": "object", - "title": "GKEImage represents an object of GKE image.", - "properties": { - "default": { - "type": "boolean", - "x-go-name": "IsDefault" - }, - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKEImageList": { - "type": "array", - "title": "GKEImageList represents an array of GKE images.", - "items": { - "$ref": "#/definitions/GKEImage" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKEMachineDeploymentCloudSpec": { - "type": "object", - "title": "GKEMachineDeploymentCloudSpec represents an object holding GKE machine deployment cloud details.", - "properties": { - "autoscaling": { - "$ref": "#/definitions/GKENodePoolAutoscaling" - }, - "config": { - "$ref": "#/definitions/GKENodeConfig" - }, - "locations": { - "description": "Locations: The list of Google Compute Engine zones\n(https://cloud.google.com/compute/docs/zones#available) in which the\nNodePool's nodes should be located. If this value is unspecified\nduring node pool creation, the Cluster.Locations\n(https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters#Cluster.FIELDS.locations)\nvalue will be used, instead. Warning: changing node pool locations\nwill result in nodes being added and/or removed.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Locations" - }, - "management": { - "$ref": "#/definitions/GKENodeManagement" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKENodeConfig": { - "type": "object", - "title": "GKENodeConfig Parameters that describe the nodes in a cluster.", - "properties": { - "diskSizeGb": { - "description": "DiskSizeGb: Size of the disk attached to each node, specified in GB.\nThe smallest allowed disk size is 10GB. If unspecified, the default\ndisk size is 100GB.", - "type": "integer", - "format": "int64", - "x-go-name": "DiskSizeGb" - }, - "diskType": { - "description": "DiskType: Type of the disk attached to each node (e.g. 'pd-standard',\n'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is\n'pd-standard'", - "type": "string", - "x-go-name": "DiskType" - }, - "imageType": { - "description": "ImageType: The image type to use for this node. Note that for a given\nimage type, the latest version of it will be used.", - "type": "string", - "x-go-name": "ImageType" - }, - "labels": { - "description": "Labels: The map of Kubernetes labels (key/value pairs) to be applied\nto each node. These will added in addition to any default label(s)\nthat Kubernetes may apply to the node. In case of conflict in label\nkeys, the applied set may differ depending on the Kubernetes version\nit's best to assume the behavior is undefined and conflicts should\nbe avoided. For more information, including usage and the valid\nvalues, see:\nhttps://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "localSsdCount": { - "description": "LocalSsdCount: The number of local SSD disks to be attached to the\nnode. The limit for this value is dependent upon the maximum number\nof disks available on a machine per zone. See:\nhttps://cloud.google.com/compute/docs/disks/local-ssd for more\ninformation.", - "type": "integer", - "format": "int64", - "x-go-name": "LocalSsdCount" - }, - "machineType": { - "description": "MachineType: The name of a Google Compute Engine machine type\n(https://cloud.google.com/compute/docs/machine-types) If unspecified,\nthe default machine type is `e2-medium`.", - "type": "string", - "x-go-name": "MachineType" - }, - "preemptible": { - "description": "Preemptible: Whether the nodes are created as preemptible VM\ninstances. See:\nhttps://cloud.google.com/compute/docs/instances/preemptible for more\ninformation about preemptible VM instances.", - "type": "boolean", - "x-go-name": "Preemptible" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKENodeManagement": { - "description": "GKENodeManagement defines the set of node management\nservices turned on for the node pool.", - "type": "object", - "properties": { - "autoRepair": { - "description": "AutoRepair: A flag that specifies whether the node auto-repair is\nenabled for the node pool. If enabled, the nodes in this node pool\nwill be monitored and, if they fail health checks too many times, an\nautomatic repair action will be triggered.", - "type": "boolean", - "x-go-name": "AutoRepair" - }, - "autoUpgrade": { - "description": "AutoUpgrade: A flag that specifies whether node auto-upgrade is\nenabled for the node pool. If enabled, node auto-upgrade helps keep\nthe nodes in your node pool up to date with the latest release\nversion of Kubernetes.", - "type": "boolean", - "x-go-name": "AutoUpgrade" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKENodePoolAutoscaling": { - "description": "GKENodePoolAutoscaling contains information\nrequired by cluster autoscaler to adjust the size of the node pool to\nthe current cluster usage.", - "type": "object", - "properties": { - "autoprovisioned": { - "description": "Autoprovisioned: Can this node pool be deleted automatically.", - "type": "boolean", - "x-go-name": "Autoprovisioned" - }, - "enabled": { - "description": "Enabled: Is autoscaling enabled for this node pool.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "maxNodeCount": { - "description": "MaxNodeCount: Maximum number of nodes in the NodePool. Must be \u003e=\nmin_node_count. There has to enough quota to scale up the cluster.", - "type": "integer", - "format": "int64", - "x-go-name": "MaxNodeCount" - }, - "minNodeCount": { - "description": "MinNodeCount: Minimum number of nodes in the NodePool. Must be \u003e= 1\nand \u003c= max_node_count.", - "type": "integer", - "format": "int64", - "x-go-name": "MinNodeCount" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKEResourceLimit": { - "description": "GKEResourceLimit Contains information about amount of some resource in\nthe cluster. For memory, value should be in GB.", - "type": "object", - "properties": { - "maximum": { - "description": "Maximum: Maximum amount of the resource in the cluster.", - "type": "string", - "format": "int64", - "x-go-name": "Maximum" - }, - "minimum": { - "description": "Minimum: Minimum amount of the resource in the cluster.", - "type": "string", - "format": "int64", - "x-go-name": "Minimum" - }, - "resourceType": { - "description": "ResourceType: Resource name \"cpu\", \"memory\" or gpu-specific string.", - "type": "string", - "x-go-name": "ResourceType" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKEShieldedInstanceConfig": { - "type": "object", - "title": "GKEShieldedInstanceConfig a set of Shielded Instance options.", - "properties": { - "enableIntegrityMonitoring": { - "description": "EnableIntegrityMonitoring: Defines whether the instance has integrity\nmonitoring enabled. Enables monitoring and attestation of the boot\nintegrity of the instance. The attestation is performed against the\nintegrity policy baseline. This baseline is initially derived from\nthe implicitly trusted boot image when the instance is created.", - "type": "boolean", - "x-go-name": "EnableIntegrityMonitoring" - }, - "enableSecureBoot": { - "description": "EnableSecureBoot: Defines whether the instance has Secure Boot\nenabled. Secure Boot helps ensure that the system only runs authentic\nsoftware by verifying the digital signature of all boot components,\nand halting the boot process if signature verification fails.", - "type": "boolean", - "x-go-name": "EnableSecureBoot" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKEUpgradeSettings": { - "description": "GKEUpgradeSettings These upgrade settings control the level of\nparallelism and the level of disruption caused by an upgrade.\nmaxUnavailable controls the number of nodes that can be\nsimultaneously unavailable. maxSurge controls the number of\nadditional nodes that can be added to the node pool temporarily for\nthe time of the upgrade to increase the number of available nodes.\n(maxUnavailable + maxSurge) determines the level of parallelism (how\nmany nodes are being upgraded at the same time). Note: upgrades\ninevitably introduce some disruption since workloads need to be moved\nfrom old nodes to new, upgraded ones. Even if maxUnavailable=0, this\nholds true. (Disruption stays within the limits of\nPodDisruptionBudget, if it is configured.) Consider a hypothetical\nnode pool with 5 nodes having maxSurge=2, maxUnavailable=1. This\nmeans the upgrade process upgrades 3 nodes simultaneously. It creates\n2 additional (upgraded) nodes, then it brings down 3 old (not yet\nupgraded) nodes at the same time. This ensures that there are always\nat least 4 nodes available.", - "type": "object", - "properties": { - "maxSurge": { - "description": "MaxSurge: The maximum number of nodes that can be created beyond the\ncurrent size of the node pool during the upgrade process.", - "type": "integer", - "format": "int64", - "x-go-name": "MaxSurge" - }, - "maxUnavailable": { - "description": "MaxUnavailable: The maximum number of nodes that can be\nsimultaneously unavailable during the upgrade process. A node is\nconsidered available if its status is Ready.", - "type": "integer", - "format": "int64", - "x-go-name": "MaxUnavailable" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKEZone": { - "type": "object", - "title": "GKEZone represents a object of GKE zone.", - "properties": { - "default": { - "type": "boolean", - "x-go-name": "IsDefault" - }, - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GKEZoneList": { - "type": "array", - "title": "GKEZoneList represents an array of GKE zones.", - "items": { - "$ref": "#/definitions/GKEZone" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GVK": { - "type": "object", - "title": "GVK group version kind of a resource.", - "properties": { - "group": { - "type": "string", - "x-go-name": "Group" - }, - "kind": { - "type": "string", - "x-go-name": "Kind" - }, - "version": { - "type": "string", - "x-go-name": "Version" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GatekeeperConfig": { - "description": "GatekeeperConfig represents a gatekeeper config", - "type": "object", - "properties": { - "spec": { - "$ref": "#/definitions/GatekeeperConfigSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GatekeeperConfigSpec": { - "type": "object", - "properties": { - "match": { - "description": "Configuration for namespace exclusion", - "type": "array", - "items": { - "$ref": "#/definitions/MatchEntry" - }, - "x-go-name": "Match" - }, - "readiness": { - "$ref": "#/definitions/ReadinessSpec" - }, - "sync": { - "$ref": "#/definitions/Sync" - }, - "validation": { - "$ref": "#/definitions/Validation" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GitAuthMethod": { - "description": "+kubebuilder:validation:Enum=password;token;ssh-key", - "type": "string", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/apps.kubermatic/v1" - }, - "GitCredentials": { - "type": "object", - "properties": { - "method": { - "$ref": "#/definitions/GitAuthMethod" - }, - "password": { - "$ref": "#/definitions/SecretKeySelector" - }, - "sshKey": { - "$ref": "#/definitions/SecretKeySelector" - }, - "token": { - "$ref": "#/definitions/SecretKeySelector" - }, - "username": { - "$ref": "#/definitions/SecretKeySelector" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/apps.kubermatic/v1" - }, - "GitReference": { - "type": "object", - "properties": { - "branch": { - "description": "Branch to checkout. Only the last commit of the branch will be checkout in order to reduce the amount of data to download.\n+optional", - "type": "string", - "x-go-name": "Branch" - }, - "commit": { - "description": "Commit SHA in a Branch to checkout.\n\nIt must be used in conjunction with branch field.", - "type": "string", - "pattern": "=`^[a-f0-9]{40}$`", - "x-go-name": "Commit" - }, - "tag": { - "description": "Tag to check out.\nIt can not be used in conjunction with commit or branch.\n+kubebuilder:validation:Type=string\n+optional", - "type": "string", - "x-go-name": "Tag" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/apps.kubermatic/v1" - }, - "GitSource": { - "type": "object", - "properties": { - "credentials": { - "$ref": "#/definitions/GitCredentials" - }, - "path": { - "description": "Path of the \"source\" in the repository. default is repository root", - "type": "string", - "x-go-name": "Path" - }, - "ref": { - "$ref": "#/definitions/GitReference" - }, - "remote": { - "description": "URL to the repository. Can be HTTP(s) (e.g. https://example.com/myrepo) or\nSSH (e.g. git://example.com[:port]/path/to/repo.git/).\n+kubebuilder:validation:MinLength=1", - "type": "string", - "x-go-name": "Remote" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/apps.kubermatic/v1" - }, - "GlobalCustomLinks": { - "description": "GlobalCustomLinks defines custom links for global settings", - "type": "array", - "items": { - "$ref": "#/definitions/CustomLink" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "GlobalObjectKeySelector": { - "description": "GlobalObjectKeySelector is needed as we can not use v1.SecretKeySelector\nbecause it is not cross namespace.", - "type": "object", - "properties": { - "apiVersion": { - "description": "API version of the referent.\n+optional", - "type": "string", - "x-go-name": "APIVersion" - }, - "fieldPath": { - "description": "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future.\n+optional", - "type": "string", - "x-go-name": "FieldPath" - }, - "key": { - "type": "string", - "x-go-name": "Key" - }, - "kind": { - "description": "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n+optional", - "type": "string", - "x-go-name": "Kind" - }, - "name": { - "description": "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\n+optional", - "type": "string", - "x-go-name": "Name" - }, - "namespace": { - "description": "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/\n+optional", - "type": "string", - "x-go-name": "Namespace" - }, - "resourceVersion": { - "description": "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n+optional", - "type": "string", - "x-go-name": "ResourceVersion" - }, - "uid": { - "$ref": "#/definitions/UID" - } - }, - "x-go-package": "k8c.io/machine-controller/sdk/providerconfig" - }, - "GlobalSecretKeySelector": { - "$ref": "#/definitions/GlobalObjectKeySelector" - }, - "GlobalSettings": { - "description": "GlobalSettings defines global settings", - "type": "object", - "properties": { - "allowedOperatingSystems": { - "description": "AllowedOperatingSystems shows the available operating systems to use in the machine deployment.", - "type": "object", - "additionalProperties": { - "type": "boolean" - }, - "x-go-name": "AllowedOperatingSystems" - }, - "annotations": { - "$ref": "#/definitions/AnnotationSettings" - }, - "announcements": { - "description": "The announcement feature allows administrators to broadcast important messages to all users.", - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/Announcement" - }, - "x-go-name": "Announcements" - }, - "cleanupOptions": { - "$ref": "#/definitions/CleanupOptions" - }, - "clusterBackupOptions": { - "$ref": "#/definitions/ClusterBackupOptions" - }, - "customLinks": { - "$ref": "#/definitions/CustomLinks" - }, - "defaultNodeCount": { - "description": "DefaultNodeCount is the default number of replicas for the initial MachineDeployment.", - "type": "integer", - "format": "int8", - "x-go-name": "DefaultNodeCount" - }, - "defaultQuota": { - "$ref": "#/definitions/ProjectResourceQuota" - }, - "disableAdminKubeconfig": { - "description": "DisableAdminKubeconfig disables the admin kubeconfig functionality on the dashboard.", - "type": "boolean", - "x-go-name": "DisableAdminKubeconfig" - }, - "disableChangelogPopup": { - "description": "DisableChangelogPopup disables the changelog popup in KKP dashboard.", - "type": "boolean", - "x-go-name": "DisableChangelogPopup" - }, - "displayAPIDocs": { - "description": "DisplayDemoInfo controls whether a a link to the KKP API documentation is shown in the footer.", - "type": "boolean", - "x-go-name": "DisplayAPIDocs" - }, - "displayDemoInfo": { - "description": "DisplayDemoInfo controls whether a \"Demo System\" hint is shown in the footer.", - "type": "boolean", - "x-go-name": "DisplayDemoInfo" - }, - "displayTermsOfService": { - "description": "DisplayDemoInfo controls whether a a link to TOS is shown in the footer.", - "type": "boolean", - "x-go-name": "DisplayTermsOfService" - }, - "enableClusterBackups": { - "description": "EnableClusterBackups enables the Cluster Backup feature in the dashboard.", - "type": "boolean", - "x-go-name": "EnableClusterBackups" - }, - "enableDashboard": { - "description": "EnableDashboard enables the link to the Kubernetes dashboard for a user cluster.", - "type": "boolean", - "x-go-name": "EnableDashboard" - }, - "enableEtcdBackup": { - "description": "EnableEtcdBackup enables the etcd Backup feature in the dashboard.", - "type": "boolean", - "x-go-name": "EnableEtcdBackup" - }, - "enableExternalClusterImport": { - "type": "boolean", - "x-go-name": "EnableExternalClusterImport" - }, - "enableOIDCKubeconfig": { - "type": "boolean", - "x-go-name": "EnableOIDCKubeconfig" - }, - "enableShareCluster": { - "description": "EnableShareCluster enables the Share Cluster feature for the user clusters.", - "type": "boolean", - "x-go-name": "EnableShareCluster" - }, - "enableWebTerminal": { - "description": "EnableWebTerminal enables the Web Terminal feature for the user clusters.", - "type": "boolean", - "x-go-name": "EnableWebTerminal" - }, - "machineDeploymentOptions": { - "$ref": "#/definitions/MachineDeploymentOptions" - }, - "machineDeploymentVMResourceQuota": { - "$ref": "#/definitions/MachineFlavorFilter" - }, - "mlaAlertmanagerPrefix": { - "type": "string", - "x-go-name": "MlaAlertmanagerPrefix" - }, - "mlaGrafanaPrefix": { - "type": "string", - "x-go-name": "MlaGrafanaPrefix" - }, - "mlaOptions": { - "$ref": "#/definitions/MlaOptions" - }, - "notifications": { - "$ref": "#/definitions/NotificationsOptions" - }, - "opaOptions": { - "$ref": "#/definitions/OpaOptions" - }, - "providerConfiguration": { - "$ref": "#/definitions/ProviderConfiguration" - }, - "restrictProjectCreation": { - "type": "boolean", - "x-go-name": "RestrictProjectCreation" - }, - "restrictProjectDeletion": { - "type": "boolean", - "x-go-name": "RestrictProjectDeletion" - }, - "restrictProjectModification": { - "type": "boolean", - "x-go-name": "RestrictProjectModification" - }, - "staticLabels": { - "description": "StaticLabels are a list of labels that can be used for the clusters.", - "type": "array", - "items": { - "$ref": "#/definitions/StaticLabel" - }, - "x-go-name": "StaticLabels" - }, - "userProjectsLimit": { - "description": "UserProjectsLimit is the maximum number of projects a user can create.", - "type": "integer", - "format": "int64", - "x-go-name": "UserProjectsLimit" - }, - "webTerminalOptions": { - "$ref": "#/definitions/WebTerminalOptions" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "GroupProjectBinding": { - "type": "object", - "properties": { - "group": { - "type": "string", - "x-go-name": "Group" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "projectID": { - "type": "string", - "x-go-name": "ProjectID" - }, - "role": { - "type": "string", - "x-go-name": "Role" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "HealthStatus": { - "type": "string", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "HelmCredentials": { - "type": "object", - "properties": { - "password": { - "$ref": "#/definitions/SecretKeySelector" - }, - "registryConfigFile": { - "$ref": "#/definitions/SecretKeySelector" - }, - "username": { - "$ref": "#/definitions/SecretKeySelector" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/apps.kubermatic/v1" - }, - "HelmDeployOptions": { - "type": "object", - "title": "HelmDeployOptions holds the deployment settings when templating method is Helm.", - "properties": { - "atomic": { - "description": "Atomic corresponds to the --atomic flag on Helm cli.\nif set, the installation process deletes the installation on failure; the upgrade process rolls back changes made in case of failed upgrade.", - "type": "boolean", - "x-go-name": "Atomic" - }, - "enableDNS": { - "description": "EnableDNS corresponds to the --enable-dns flag on Helm cli.\nenable DNS lookups when rendering templates.\nif you enable this flag, you have to verify that helm template function 'getHostByName' is not being used in a chart to disclose any information you do not want to be passed to DNS servers.(c.f. CVE-2023-25165)", - "type": "boolean", - "x-go-name": "EnableDNS" - }, - "timeout": { - "$ref": "#/definitions/Duration" - }, - "wait": { - "description": "Wait corresponds to the --wait flag on Helm cli.\nif set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as timeout", - "type": "boolean", - "x-go-name": "Wait" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/apps.kubermatic/v1" - }, - "HelmSource": { - "type": "object", - "properties": { - "chartName": { - "description": "Name of the Chart.\n+kubebuilder:validation:MinLength=1", - "type": "string", - "x-go-name": "ChartName" - }, - "chartVersion": { - "description": "Version of the Chart.\n+kubebuilder:validation:MinLength=1", - "type": "string", - "x-go-name": "ChartVersion" - }, - "credentials": { - "$ref": "#/definitions/HelmCredentials" - }, - "insecure": { - "description": "Insecure disables certificate validation when using an HTTPS registry. This setting has no\neffect when using a plaintext connection.", - "type": "boolean", - "x-go-name": "Insecure" - }, - "plainHTTP": { - "description": "PlainHTTP will enable HTTP-only (i.e. unencrypted) traffic for oci:// URLs. By default HTTPS\nis used when communicating with an oci:// URL.", - "type": "boolean", - "x-go-name": "PlainHTTP" - }, - "url": { - "description": "URL of the Helm repository the following schemes are supported:\n\nhttp://example.com/myrepo (HTTP)\nhttps://example.com/myrepo (HTTPS)\noci://example.com:5000/myrepo (OCI, HTTPS by default, use plainHTTP to enable unencrypted HTTP)", - "type": "string", - "x-go-name": "URL" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/apps.kubermatic/v1" - }, - "Hetzner": { - "type": "object", - "properties": { - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "network": { - "description": "Network is the pre-existing Hetzner network in which the machines are running.\nWhile machines can be in multiple networks, a single one must be chosen for the\nHCloud CCM to work.\nIf this is empty, the network configured on the datacenter will be used.", - "type": "string", - "x-go-name": "Network" - }, - "token": { - "description": "Token is used to authenticate with the Hetzner API.", - "type": "string", - "x-go-name": "Token" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "HetznerCloudSpec": { - "type": "object", - "title": "HetznerCloudSpec specifies access data to hetzner cloud.", - "properties": { - "credentialsReference": { - "$ref": "#/definitions/GlobalSecretKeySelector" - }, - "network": { - "description": "Network is the pre-existing Hetzner network in which the machines are running.\nWhile machines can be in multiple networks, a single one must be chosen for the\nHCloud CCM to work.\nIf this is empty, the network configured on the datacenter will be used.", - "type": "string", - "x-go-name": "Network" - }, - "token": { - "description": "Token is used to authenticate with the Hetzner cloud API.", - "type": "string", - "x-go-name": "Token" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "HetznerNodeSpec": { - "description": "HetznerNodeSpec Hetzner node settings", - "type": "object", - "required": [ - "type" - ], - "properties": { - "network": { - "description": "network name", - "type": "string", - "x-go-name": "Network" - }, - "type": { - "description": "server type", - "type": "string", - "x-go-name": "Type" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "HetznerSize": { - "type": "object", - "title": "HetznerSize is the object representing Hetzner sizes.", - "properties": { - "cores": { - "type": "integer", - "format": "int64", - "x-go-name": "Cores" - }, - "description": { - "type": "string", - "x-go-name": "Description" - }, - "disk": { - "type": "integer", - "format": "int64", - "x-go-name": "Disk" - }, - "id": { - "type": "integer", - "format": "int64", - "x-go-name": "ID" - }, - "memory": { - "type": "number", - "format": "float", - "x-go-name": "Memory" - }, - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "HetznerSizeList": { - "type": "object", - "title": "HetznerSizeList represents an array of Hetzner sizes.", - "properties": { - "dedicated": { - "type": "array", - "items": { - "$ref": "#/definitions/HetznerSize" - }, - "x-go-name": "Dedicated" - }, - "standard": { - "type": "array", - "items": { - "$ref": "#/definitions/HetznerSize" - }, - "x-go-name": "Standard" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "IPAMPool": { - "type": "object", - "properties": { - "datacenters": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/IPAMPoolDatacenterSettings" - }, - "x-go-name": "Datacenters" - }, - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "IPAMPoolAllocationType": { - "description": "+kubebuilder:validation:Enum=prefix;range\nIPAMPoolAllocationType defines the type of allocation to be used.\nPossible values are `prefix` and `range`.", - "type": "string", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "IPAMPoolDatacenterSettings": { - "type": "object", - "properties": { - "allocationPrefix": { - "type": "integer", - "format": "int64", - "x-go-name": "AllocationPrefix" - }, - "allocationRange": { - "type": "integer", - "format": "int64", - "x-go-name": "AllocationRange" - }, - "poolCidr": { - "$ref": "#/definitions/SubnetCIDR" - }, - "type": { - "$ref": "#/definitions/IPAMPoolAllocationType" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "IPAllocationMode": { - "type": "string", - "x-go-package": "k8c.io/machine-controller/sdk/cloudprovider/vmwareclouddirector" - }, - "IPBlock": { - "description": "IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed\nto the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs\nthat should not be included within this rule.", - "type": "object", - "properties": { - "cidr": { - "description": "cidr is a string representing the IPBlock\nValid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", - "type": "string", - "x-go-name": "CIDR" - }, - "except": { - "description": "except is a slice of CIDRs that should not be included within an IPBlock\nValid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"\nExcept values will be rejected if they are outside the cidr range\n+optional\n+listType=atomic", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Except" - } - }, - "x-go-package": "k8s.io/api/networking/v1" - }, - "IPFamily": { - "description": "+kubebuilder:validation:Enum=\"\";IPv4;IPv4+IPv6", - "type": "string", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "IPVSConfiguration": { - "type": "object", - "title": "IPVSConfiguration contains ipvs-related configuration details for kube-proxy.", - "properties": { - "strictArp": { - "description": "StrictArp configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface.\ndefaults to true.", - "type": "boolean", - "x-go-name": "StrictArp" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ImageList": { - "type": "object", - "title": "ImageList defines a map of operating system and the image to use.", - "additionalProperties": { - "type": "string" - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ImageListWithVersions": { - "type": "object", - "title": "ImageListWithVersions defines a map of operating system with their versions to use.", - "additionalProperties": { - "$ref": "#/definitions/OSVersions" - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "InferFromVolumeFailurePolicy": { - "type": "string", - "x-go-package": "kubevirt.io/api/core/v1" - }, - "InstancetypeMatcher": { - "type": "object", - "title": "InstancetypeMatcher references a instancetype that is used to fill fields in the VMI template.", - "properties": { - "inferFromVolume": { - "description": "InferFromVolume lists the name of a volume that should be used to infer or discover the instancetype\nto be used through known annotations on the underlying resource. Once applied to the InstancetypeMatcher\nthis field is removed.\n\n+optional", - "type": "string", - "x-go-name": "InferFromVolume" - }, - "inferFromVolumeFailurePolicy": { - "$ref": "#/definitions/InferFromVolumeFailurePolicy" - }, - "kind": { - "description": "Kind specifies which instancetype resource is referenced.\nAllowed values are: \"VirtualMachineInstancetype\" and \"VirtualMachineClusterInstancetype\".\nIf not specified, \"VirtualMachineClusterInstancetype\" is used by default.\n\n+optional", - "type": "string", - "x-go-name": "Kind" - }, - "name": { - "description": "Name is the name of the VirtualMachineInstancetype or VirtualMachineClusterInstancetype\n\n+optional", - "type": "string", - "x-go-name": "Name" - }, - "revisionName": { - "description": "RevisionName specifies a ControllerRevision containing a specific copy of the\nVirtualMachineInstancetype or VirtualMachineClusterInstancetype to be used. This is initially\ncaptured the first time the instancetype is applied to the VirtualMachineInstance.\n\n+optional", - "type": "string", - "x-go-name": "RevisionName" - } - }, - "x-go-package": "kubevirt.io/api/core/v1" - }, - "IntOrString": { - "description": "+protobuf=true\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:openapi-gen=true", - "type": "object", - "title": "IntOrString is a type that can hold an int32 or a string. When used in\nJSON or YAML marshalling and unmarshalling, it produces or consumes the\ninner type. This allows you to have, for example, a JSON field that can\naccept a name or number.\nTODO: Rename to Int32OrString", - "properties": { - "IntVal": { - "type": "integer", - "format": "int32" - }, - "StrVal": { - "type": "string" - }, - "Type": { - "$ref": "#/definitions/Type" - } - }, - "x-go-package": "k8s.io/apimachinery/pkg/util/intstr" - }, - "JSON": { - "description": "These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil.", - "type": "object", - "title": "JSON represents any valid JSON value.", - "x-go-package": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - }, - "JSONSchemaDefinitions": { - "type": "object", - "title": "JSONSchemaDefinitions contains the models explicitly defined in this spec.", - "additionalProperties": { - "$ref": "#/definitions/JSONSchemaProps" - }, - "x-go-package": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - }, - "JSONSchemaDependencies": { - "type": "object", - "title": "JSONSchemaDependencies represent a dependencies property.", - "additionalProperties": { - "$ref": "#/definitions/JSONSchemaPropsOrStringArray" - }, - "x-go-package": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - }, - "JSONSchemaProps": { - "type": "object", - "title": "JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/).", - "properties": { - "$ref": { - "type": "string", - "x-go-name": "Ref" - }, - "$schema": { - "$ref": "#/definitions/JSONSchemaURL" - }, - "additionalItems": { - "$ref": "#/definitions/JSONSchemaPropsOrBool" - }, - "additionalProperties": { - "$ref": "#/definitions/JSONSchemaPropsOrBool" - }, - "allOf": { - "description": "+listType=atomic", - "type": "array", - "items": { - "$ref": "#/definitions/JSONSchemaProps" - }, - "x-go-name": "AllOf" - }, - "anyOf": { - "description": "+listType=atomic", - "type": "array", - "items": { - "$ref": "#/definitions/JSONSchemaProps" - }, - "x-go-name": "AnyOf" - }, - "default": { - "$ref": "#/definitions/JSON" - }, - "definitions": { - "$ref": "#/definitions/JSONSchemaDefinitions" - }, - "dependencies": { - "$ref": "#/definitions/JSONSchemaDependencies" - }, - "description": { - "type": "string", - "x-go-name": "Description" - }, - "enum": { - "description": "+listType=atomic", - "type": "array", - "items": { - "$ref": "#/definitions/JSON" - }, - "x-go-name": "Enum" - }, - "example": { - "$ref": "#/definitions/JSON" - }, - "exclusiveMaximum": { - "type": "boolean", - "x-go-name": "ExclusiveMaximum" - }, - "exclusiveMinimum": { - "type": "boolean", - "x-go-name": "ExclusiveMinimum" - }, - "externalDocs": { - "$ref": "#/definitions/ExternalDocumentation" - }, - "format": { - "description": "format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated:\n\nbsonobjectid: a bson object ID, i.e. a 24 characters hex string\nuri: an URI as parsed by Golang net/url.ParseRequestURI\nemail: an email address as parsed by Golang net/mail.ParseAddress\nhostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034].\nipv4: an IPv4 IP as parsed by Golang net.ParseIP\nipv6: an IPv6 IP as parsed by Golang net.ParseIP\ncidr: a CIDR as parsed by Golang net.ParseCIDR\nmac: a MAC address as parsed by Golang net.ParseMAC\nuuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$\nuuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$\nuuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$\nuuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$\nisbn: an ISBN10 or ISBN13 number string like \"0321751043\" or \"978-0321751041\"\nisbn10: an ISBN10 number string like \"0321751043\"\nisbn13: an ISBN13 number string like \"978-0321751041\"\ncreditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\\\d{3})\\\\d{11})$ with any non digit characters mixed in\nssn: a U.S. social security number following the regex ^\\\\d{3}[- ]?\\\\d{2}[- ]?\\\\d{4}$\nhexcolor: an hexadecimal color code like \"#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$\nrgbcolor: an RGB color code like rgb like \"rgb(255,255,2559\"\nbyte: base64 encoded binary data\npassword: any kind of string\ndate: a date string like \"2006-01-02\" as defined by full-date in RFC3339\nduration: a duration string like \"22 ns\" as parsed by Golang time.ParseDuration or compatible with Scala duration format\ndatetime: a date time string like \"2014-12-15T19:30:20.000Z\" as defined by date-time in RFC3339.", - "type": "string", - "x-go-name": "Format" - }, - "id": { - "type": "string", - "x-go-name": "ID" - }, - "items": { - "$ref": "#/definitions/JSONSchemaPropsOrArray" - }, - "maxItems": { - "type": "integer", - "format": "int64", - "x-go-name": "MaxItems" - }, - "maxLength": { - "type": "integer", - "format": "int64", - "x-go-name": "MaxLength" - }, - "maxProperties": { - "type": "integer", - "format": "int64", - "x-go-name": "MaxProperties" - }, - "maximum": { - "type": "number", - "format": "double", - "x-go-name": "Maximum" - }, - "minItems": { - "type": "integer", - "format": "int64", - "x-go-name": "MinItems" - }, - "minLength": { - "type": "integer", - "format": "int64", - "x-go-name": "MinLength" - }, - "minProperties": { - "type": "integer", - "format": "int64", - "x-go-name": "MinProperties" - }, - "minimum": { - "type": "number", - "format": "double", - "x-go-name": "Minimum" - }, - "multipleOf": { - "type": "number", - "format": "double", - "x-go-name": "MultipleOf" - }, - "not": { - "$ref": "#/definitions/JSONSchemaProps" - }, - "nullable": { - "type": "boolean", - "x-go-name": "Nullable" - }, - "oneOf": { - "description": "+listType=atomic", - "type": "array", - "items": { - "$ref": "#/definitions/JSONSchemaProps" - }, - "x-go-name": "OneOf" - }, - "pattern": { - "type": "string", - "x-go-name": "Pattern" - }, - "patternProperties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/JSONSchemaProps" - }, - "x-go-name": "PatternProperties" - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/JSONSchemaProps" - }, - "x-go-name": "Properties" - }, - "required": { - "description": "+listType=atomic", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Required" - }, - "title": { - "type": "string", - "x-go-name": "Title" - }, - "type": { - "type": "string", - "x-go-name": "Type" - }, - "uniqueItems": { - "type": "boolean", - "x-go-name": "UniqueItems" - }, - "x-kubernetes-embedded-resource": { - "description": "x-kubernetes-embedded-resource defines that the value is an\nembedded Kubernetes runtime.Object, with TypeMeta and\nObjectMeta. The type must be object. It is allowed to further\nrestrict the embedded object. kind, apiVersion and metadata\nare validated automatically. x-kubernetes-preserve-unknown-fields\nis allowed to be true, but does not have to be if the object\nis fully specified (up to kind, apiVersion, metadata).", - "type": "boolean", - "x-go-name": "XEmbeddedResource" - }, - "x-kubernetes-int-or-string": { - "description": "x-kubernetes-int-or-string specifies that this value is\neither an integer or a string. If this is true, an empty\ntype is allowed and type as child of anyOf is permitted\nif following one of the following patterns:\n\n1) anyOf:\ntype: integer\ntype: string\n2) allOf:\nanyOf:\ntype: integer\ntype: string\n... zero or more", - "type": "boolean", - "x-go-name": "XIntOrString" - }, - "x-kubernetes-list-map-keys": { - "description": "x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used\nas the index of the map.\n\nThis tag MUST only be used on lists that have the \"x-kubernetes-list-type\"\nextension set to \"map\". Also, the values specified for this attribute must\nbe a scalar typed field of the child structure (no nesting is supported).\n\nThe properties specified must either be required or have a default value,\nto ensure those properties are present for all list items.\n\n+optional\n+listType=atomic", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "XListMapKeys" - }, - "x-kubernetes-list-type": { - "description": "x-kubernetes-list-type annotates an array to further describe its topology.\nThis extension must only be used on lists and may have 3 possible values:\n\n1) `atomic`: the list is treated as a single entity, like a scalar.\nAtomic lists will be entirely replaced when updated. This extension\nmay be used on any type of list (struct, scalar, ...).\n2) `set`:\nSets are lists that must not have multiple items with the same value. Each\nvalue must be a scalar, an object with x-kubernetes-map-type `atomic` or an\narray with x-kubernetes-list-type `atomic`.\n3) `map`:\nThese lists are like maps in that their elements have a non-index key\nused to identify them. Order is preserved upon merge. The map tag\nmust only be used on a list with elements of type object.\nDefaults to atomic for arrays.\n+optional", - "type": "string", - "x-go-name": "XListType" - }, - "x-kubernetes-map-type": { - "description": "x-kubernetes-map-type annotates an object to further describe its topology.\nThis extension must only be used when type is object and may have 2 possible values:\n\n1) `granular`:\nThese maps are actual maps (key-value pairs) and each fields are independent\nfrom each other (they can each be manipulated by separate actors). This is\nthe default behaviour for all maps.\n2) `atomic`: the list is treated as a single entity, like a scalar.\nAtomic maps will be entirely replaced when updated.\n+optional", - "type": "string", - "x-go-name": "XMapType" - }, - "x-kubernetes-preserve-unknown-fields": { - "description": "x-kubernetes-preserve-unknown-fields stops the API server\ndecoding step from pruning fields which are not specified\nin the validation schema. This affects fields recursively,\nbut switches back to normal pruning behaviour if nested\nproperties or additionalProperties are specified in the schema.\nThis can either be true or undefined. False is forbidden.", - "type": "boolean", - "x-go-name": "XPreserveUnknownFields" - }, - "x-kubernetes-validations": { - "$ref": "#/definitions/ValidationRules" - } - }, - "x-go-package": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - }, - "JSONSchemaPropsOrArray": { - "description": "JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps\nor an array of JSONSchemaProps. Mainly here for serialization purposes.", - "type": "object", - "properties": { - "JSONSchemas": { - "description": "+listType=atomic", - "type": "array", - "items": { - "$ref": "#/definitions/JSONSchemaProps" - } - }, - "Schema": { - "$ref": "#/definitions/JSONSchemaProps" - } - }, - "x-go-package": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - }, - "JSONSchemaPropsOrBool": { - "description": "Defaults to true for the boolean property.", - "type": "object", - "title": "JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value.", - "properties": { - "Allows": { - "type": "boolean" - }, - "Schema": { - "$ref": "#/definitions/JSONSchemaProps" - } - }, - "x-go-package": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - }, - "JSONSchemaPropsOrStringArray": { - "type": "object", - "title": "JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array.", - "properties": { - "Property": { - "description": "+listType=atomic", - "type": "array", - "items": { - "type": "string" - } - }, - "Schema": { - "$ref": "#/definitions/JSONSchemaProps" - } - }, - "x-go-package": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - }, - "JSONSchemaURL": { - "type": "string", - "title": "JSONSchemaURL represents a schema url.", - "x-go-package": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - }, - "JoiningScript": { - "description": "JoiningScript represent an encoded joining script for machines", - "type": "string", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "Kind": { - "type": "object", - "title": "Kind specifies the resource Kind and APIGroup.", - "properties": { - "apiGroups": { - "description": "APIGroups specifies the APIGroups of the resources", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "APIGroups" - }, - "kinds": { - "description": "Kinds specifies the kinds of the resources", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Kinds" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "KubeLB": { - "description": "Only available in Enterprise Edition.", - "type": "object", - "title": "KubeLB contains settings for the kubeLB component as part of the cluster control plane. This component is responsible for managing load balancers.", - "properties": { - "enableGatewayAPI": { - "description": "EnableGatewayAPI is used to enable Gateway API for KubeLB. Once enabled, KKP installs the Gateway API CRDs for the user cluster.", - "type": "boolean", - "x-go-name": "EnableGatewayAPI" - }, - "enabled": { - "description": "Controls whether kubeLB is deployed or not.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "extraArgs": { - "description": "ExtraArgs are additional arbitrary flags to pass to the kubeLB CCM for the user cluster.", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "ExtraArgs" - }, - "useLoadBalancerClass": { - "description": "UseLoadBalancerClass is used to configure the use of load balancer class `kubelb` for kubeLB. If false, kubeLB will manage all load balancers in the\nuser cluster irrespective of the load balancer class.", - "type": "boolean", - "x-go-name": "UseLoadBalancerClass" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "KubeLBDatacenterSettings": { - "type": "object", - "properties": { - "disableIngressClass": { - "description": "DisableIngressClass is used to disable the ingress class `kubelb` filter for kubeLB.", - "type": "boolean", - "x-go-name": "DisableIngressClass" - }, - "enableGatewayAPI": { - "description": "EnableGatewayAPI is used to configure the use of gateway API for kubeLB.\nWhen this option is enabled for the user cluster, KKP installs the Gateway API CRDs for the user cluster.", - "type": "boolean", - "x-go-name": "EnableGatewayAPI" - }, - "enableSecretSynchronizer": { - "description": "EnableSecretSynchronizer is used to configure the use of secret synchronizer for kubeLB.", - "type": "boolean", - "x-go-name": "EnableSecretSynchronizer" - }, - "enabled": { - "description": "Enabled is used to enable/disable kubeLB for the datacenter. This is used to control whether installing kubeLB is allowed or not for the datacenter.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "enforced": { - "description": "Enforced is used to enforce kubeLB installation for all the user clusters belonging to this datacenter. Setting enforced to false will not uninstall kubeLB from the user clusters and it needs to be disabled manually.", - "type": "boolean", - "x-go-name": "Enforced" - }, - "extraArgs": { - "description": "ExtraArgs are additional arbitrary flags to pass to the kubeLB CCM for the user cluster. These args are propagated to all the user clusters unless overridden at a cluster level.", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "ExtraArgs" - }, - "kubeconfig": { - "$ref": "#/definitions/ObjectReference" - }, - "nodeAddressType": { - "description": "NodeAddressType is used to configure the address type from node, used for load balancing.\nOptional: Defaults to ExternalIP.\n+kubebuilder:validation:Enum=InternalIP;ExternalIP\n+kubebuilder:default=ExternalIP", - "type": "string", - "x-go-name": "NodeAddressType" - }, - "useLoadBalancerClass": { - "description": "UseLoadBalancerClass is used to configure the use of load balancer class `kubelb` for kubeLB. If false, kubeLB will manage all load balancers in the\nuser cluster irrespective of the load balancer class.", - "type": "boolean", - "x-go-name": "UseLoadBalancerClass" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "KubeLBSeedSettings": { - "type": "object", - "properties": { - "enableForAllDatacenters": { - "description": "EnableForAllDatacenters is used to enable kubeLB for all the datacenters belonging to this seed.\nThis is only used to control whether installing kubeLB is allowed or not for the datacenter.", - "type": "boolean", - "x-go-name": "EnableForAllDatacenters" - }, - "kubeconfig": { - "$ref": "#/definitions/ObjectReference" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "KubeLBSeedSettingsAPI": { - "type": "object", - "properties": { - "enableForAllDatacenters": { - "type": "boolean", - "x-go-name": "EnableForAllDatacenters" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeLBSettings": { - "type": "object", - "properties": { - "kubeconfig": { - "$ref": "#/definitions/ObjectReference" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "KubeOneAWSCloudSpec": { - "type": "object", - "title": "KubeOneAWSCloudSpec specifies access data to Amazon Web Services.", - "properties": { - "accessKeyID": { - "type": "string", - "x-go-name": "AccessKeyID" - }, - "secretAccessKey": { - "type": "string", - "x-go-name": "SecretAccessKey" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeOneAzureCloudSpec": { - "type": "object", - "title": "KubeOneAzureCloudSpec specifies access credentials to Azure cloud.", - "properties": { - "clientID": { - "type": "string", - "x-go-name": "ClientID" - }, - "clientSecret": { - "type": "string", - "x-go-name": "ClientSecret" - }, - "subscriptionID": { - "type": "string", - "x-go-name": "SubscriptionID" - }, - "tenantID": { - "type": "string", - "x-go-name": "TenantID" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeOneCloudSpec": { - "type": "object", - "properties": { - "aws": { - "$ref": "#/definitions/KubeOneAWSCloudSpec" - }, - "azure": { - "$ref": "#/definitions/KubeOneAzureCloudSpec" - }, - "digitalocean": { - "$ref": "#/definitions/KubeOneDigitalOceanCloudSpec" - }, - "equinix": { - "$ref": "#/definitions/KubeOneEquinixCloudSpec" - }, - "gcp": { - "$ref": "#/definitions/KubeOneGCPCloudSpec" - }, - "hetzner": { - "$ref": "#/definitions/KubeOneHetznerCloudSpec" - }, - "nutanix": { - "$ref": "#/definitions/KubeOneNutanixCloudSpec" - }, - "openstack": { - "$ref": "#/definitions/KubeOneOpenStackCloudSpec" - }, - "vmwareclouddirector": { - "$ref": "#/definitions/KubeOneVMwareCloudDirectorCloudSpec" - }, - "vsphere": { - "$ref": "#/definitions/KubeOneVSphereCloudSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeOneDigitalOceanCloudSpec": { - "type": "object", - "title": "KubeOneDigitalOceanCloudSpec specifies access data to DigitalOcean.", - "properties": { - "token": { - "description": "Token is used to authenticate with the DigitalOcean API.", - "type": "string", - "x-go-name": "Token" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeOneEquinixCloudSpec": { - "type": "object", - "title": "KubeOneEquinixCloudSpec specifies access data to a Equinix cloud.", - "properties": { - "apiKey": { - "type": "string", - "x-go-name": "APIKey" - }, - "projectID": { - "type": "string", - "x-go-name": "ProjectID" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeOneGCPCloudSpec": { - "type": "object", - "title": "KubeOneGCPCloudSpec specifies access data to GCP.", - "properties": { - "serviceAccount": { - "type": "string", - "x-go-name": "ServiceAccount" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeOneHetznerCloudSpec": { - "type": "object", - "title": "KubeOneHetznerCloudSpec specifies access data to hetzner cloud.", - "properties": { - "token": { - "description": "Token is used to authenticate with the Hetzner cloud API.", - "type": "string", - "x-go-name": "Token" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeOneNutanixCloudSpec": { - "type": "object", - "title": "KubeOneNutanixCloudSpec specifies the access data to Nutanix.", - "properties": { - "allowInsecure": { - "type": "boolean", - "x-go-name": "AllowInsecure" - }, - "clusterName": { - "description": "ClusterName is the Nutanix cluster that this user cluster will be deployed to.\n+optional", - "type": "string", - "x-go-name": "ClusterName" - }, - "elementEndpoint": { - "description": "PrismElementEndpoint to access Nutanix Prism Element for the CSI driver", - "type": "string", - "x-go-name": "PrismElementEndpoint" - }, - "elementPassword": { - "description": "PrismElementPassword to be used for the CSI driver", - "type": "string", - "x-go-name": "PrismElementPassword" - }, - "elementUsername": { - "description": "PrismElementUsername to be used for the CSI driver", - "type": "string", - "x-go-name": "PrismElementUsername" - }, - "endpoint": { - "description": "Endpoint is the Nutanix API (Prism Central) endpoint", - "type": "string", - "x-go-name": "Endpoint" - }, - "password": { - "type": "string", - "x-go-name": "Password" - }, - "port": { - "description": "Port is the Nutanix API (Prism Central) port", - "type": "string", - "x-go-name": "Port" - }, - "proxyURL": { - "type": "string", - "x-go-name": "ProxyURL" - }, - "username": { - "type": "string", - "x-go-name": "Username" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeOneOpenStackCloudSpec": { - "type": "object", - "title": "KubeOneOpenStackCloudSpec specifies access data to an OpenStack cloud.", - "properties": { - "authURL": { - "type": "string", - "x-go-name": "AuthURL" - }, - "domain": { - "type": "string", - "x-go-name": "Domain" - }, - "password": { - "type": "string", - "x-go-name": "Password" - }, - "project": { - "description": "Project, formally known as tenant.", - "type": "string", - "x-go-name": "Project" - }, - "projectID": { - "description": "ProjectID, formally known as tenantID.", - "type": "string", - "x-go-name": "ProjectID" - }, - "region": { - "type": "string", - "x-go-name": "Region" - }, - "username": { - "type": "string", - "x-go-name": "Username" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeOneSSHKey": { - "type": "object", - "title": "SSHKeySpec represents the details of a ssh key.", - "properties": { - "passphrase": { - "type": "string", - "x-go-name": "Passphrase" - }, - "privateKey": { - "description": "PrivateKey Base64 encoded privateKey", - "type": "string", - "x-go-name": "PrivateKey" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeOneSpec": { - "type": "object", - "properties": { - "cloudSpec": { - "$ref": "#/definitions/KubeOneCloudSpec" - }, - "manifest": { - "description": "Manifest Base64 encoded manifest", - "type": "string", - "x-go-name": "Manifest" - }, - "providerName": { - "description": "ProviderName is the name of the cloud provider used, one of\n\"aws\", \"azure\", \"digitalocean\", \"gcp\",\n\"hetzner\", \"nutanix\", \"openstack\", \"packet\", \"vsphere\" KubeOne natively-supported providers.\n+ readOnly", - "type": "string", - "x-go-name": "ProviderName" - }, - "region": { - "description": "Region is the kubernetes control plane region.\n+ readOnly", - "type": "string", - "x-go-name": "Region" - }, - "sshKey": { - "$ref": "#/definitions/KubeOneSSHKey" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeOneVMwareCloudDirectorCloudSpec": { - "type": "object", - "title": "KubeOneVMwareCloudDirectorCloudSpec represents credentials for accessing VMWare Cloud Director.", - "properties": { - "organization": { - "type": "string", - "x-go-name": "Organization" - }, - "password": { - "type": "string", - "x-go-name": "Password" - }, - "url": { - "type": "string", - "x-go-name": "URL" - }, - "username": { - "type": "string", - "x-go-name": "Username" - }, - "vdc": { - "type": "string", - "x-go-name": "VDC" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeOneVSphereCloudSpec": { - "type": "object", - "title": "KubeOneVSphereCloudSpec credentials represents a credential for accessing vSphere.", - "properties": { - "password": { - "type": "string", - "x-go-name": "Password" - }, - "server": { - "type": "string", - "x-go-name": "Server" - }, - "username": { - "type": "string", - "x-go-name": "Username" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeVirtCSIDriverOperator": { - "type": "object", - "title": "KubeVirtCSIDriverOperator contains the different configurations for the kubevirt csi driver operator in the user cluster.", - "properties": { - "overwriteRegistry": { - "description": "OverwriteRegistry overwrite the images registry that the operator pulls.", - "type": "string", - "x-go-name": "OverwriteRegistry" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "KubeVirtHTTPSource": { - "type": "object", - "title": "KubeVirtHTTPSource represents list of images and their versions that can be downloaded over HTTP.", - "properties": { - "operatingSystems": { - "description": "OperatingSystems represents list of supported operating-systems with their URLs.", - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/OSVersions" - }, - "x-go-name": "OperatingSystems" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "KubeVirtImageSourceType": { - "type": "string", - "title": "KubeVirtImageSourceType represents a KubeVirt image source type.", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeVirtImageSources": { - "type": "object", - "title": "KubeVirtImageSources represents KubeVirt image sources.", - "properties": { - "http": { - "$ref": "#/definitions/KubeVirtHTTPSource" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "KubeVirtImages": { - "type": "object", - "title": "KubeVirtImages represents images with versions and their source.", - "properties": { - "operatingSystems": { - "$ref": "#/definitions/ImageListWithVersions" - }, - "source": { - "$ref": "#/definitions/KubeVirtImageSourceType" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeVirtImagesList": { - "type": "object", - "title": "KubeVirtImagesList represents list of available KubeVirt images with their categories.", - "properties": { - "standard": { - "$ref": "#/definitions/KubeVirtImages" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeVirtInfraStorageClass": { - "type": "object", - "properties": { - "isDefaultClass": { - "description": "Optional: IsDefaultClass. If true, the created StorageClass in the tenant cluster will be annotated with:\nstorageclass.kubernetes.io/is-default-class : true\nIf missing or false, annotation will be:\nstorageclass.kubernetes.io/is-default-class : false", - "type": "boolean", - "x-go-name": "IsDefaultClass" - }, - "labels": { - "description": "Labels is a map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "regions": { - "description": "Regions represents a larger domain, made up of one or more zones. It is uncommon for Kubernetes clusters\nto span multiple regions", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Regions" - }, - "volumeBindingMode": { - "$ref": "#/definitions/VolumeBindingMode" - }, - "volumeProvisioner": { - "$ref": "#/definitions/KubeVirtVolumeProvisioner" - }, - "zones": { - "description": "Zones represent a logical failure domain. It is common for Kubernetes clusters to span multiple zones\nfor increased availability", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Zones" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "KubeVirtSubnet": { - "type": "object", - "title": "KubeVirtSubnet represents a KubeVirt Subnet.", - "properties": { - "cidr": { - "type": "string", - "x-go-name": "CIDR" - }, - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeVirtSubnetList": { - "type": "array", - "title": "KubeVirtSubnetList represents an array of KubeVirt Subnets.", - "items": { - "$ref": "#/definitions/KubeVirtSubnet" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeVirtVPC": { - "type": "object", - "title": "KubeVirtVPC represents a KubeVirt VPC.", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeVirtVPCList": { - "type": "array", - "title": "KubeVirtVPCList represents an array of KubeVirt VPCs.", - "items": { - "$ref": "#/definitions/KubeVirtVPC" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "KubeVirtVolumeProvisioner": { - "description": "KubeVirtVolumeProvisioner represents what is the provisioner of the storage class volume, whether it will be the csi driver\nand/or CDI for disk images.", - "type": "string", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "KubermaticVersions": { - "type": "object", - "title": "KubermaticVersions describes the versions of running Kubermatic components.", - "properties": { - "api": { - "description": "Version of the Kubermatic API server.", - "type": "string", - "x-go-name": "API" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "KubernetesDashboard": { - "type": "object", - "title": "KubernetesDashboard contains settings for the kubernetes-dashboard component as part of the cluster control plane.", - "properties": { - "enabled": { - "description": "Controls whether kubernetes-dashboard is deployed to the user cluster or not.\nEnabled by default.", - "type": "boolean", - "x-go-name": "Enabled" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "Kubevirt": { - "type": "object", - "properties": { - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "kubeconfig": { - "description": "Kubeconfig is the cluster's kubeconfig file, encoded with base64.", - "type": "string", - "x-go-name": "Kubeconfig" - }, - "subnetName": { - "description": "SubnetName is the name of a subnet that is smaller, segmented portion of a larger network, like a Virtual Private Cloud (VPC).", - "type": "string", - "x-go-name": "SubnetName" - }, - "vpcName": { - "description": "VPCName is a virtual network name dedicated to a single tenant within a KubeVirt", - "type": "string", - "x-go-name": "VPCName" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "KubevirtCloudSpec": { - "type": "object", - "title": "KubevirtCloudSpec specifies the access data to Kubevirt.", - "properties": { - "credentialsReference": { - "$ref": "#/definitions/GlobalSecretKeySelector" - }, - "csiDriverOperator": { - "$ref": "#/definitions/KubeVirtCSIDriverOperator" - }, - "csiKubeconfig": { - "type": "string", - "x-go-name": "CSIKubeconfig" - }, - "imageCloningEnabled": { - "description": "ImageCloningEnabled flag enable/disable cloning for a cluster.", - "type": "boolean", - "x-go-name": "ImageCloningEnabled" - }, - "infraStorageClasses": { - "description": "Deprecated: in favor of StorageClasses.\nInfraStorageClasses is a list of storage classes from KubeVirt infra cluster that are used for\ninitialization of user cluster storage classes by the CSI driver kubevirt (hot pluggable disks)", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "InfraStorageClasses" - }, - "kubeconfig": { - "description": "The cluster's kubeconfig file, encoded with base64.", - "type": "string", - "x-go-name": "Kubeconfig" - }, - "preAllocatedDataVolumes": { - "description": "Custom Images are a good example of this use case.", - "type": "array", - "items": { - "$ref": "#/definitions/PreAllocatedDataVolume" - }, - "x-go-name": "PreAllocatedDataVolumes" - }, - "storageClasses": { - "description": "StorageClasses is a list of storage classes from KubeVirt infra cluster that are used for\ninitialization of user cluster storage classes by the CSI driver kubevirt (hot pluggable disks.\nIt contains also some flag specifying which one is the default one.", - "type": "array", - "items": { - "$ref": "#/definitions/KubeVirtInfraStorageClass" - }, - "x-go-name": "StorageClasses" - }, - "subnetName": { - "description": "SubnetName is the name of a subnet that is smaller, segmented portion of a larger network, like a Virtual Private Cloud (VPC).", - "type": "string", - "x-go-name": "SubnetName" - }, - "vpcName": { - "description": "VPCName is a virtual network name dedicated to a single tenant within a KubeVirt.", - "type": "string", - "x-go-name": "VPCName" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "KubevirtNodeSize": { - "type": "object", - "required": [ - "cpus", - "memory", - "primaryDiskSize" - ], - "properties": { - "cpus": { - "description": "CPUs states how many cpus the kubevirt node will have.", - "type": "string", - "x-go-name": "CPUs" - }, - "memory": { - "description": "Memory states the memory that kubevirt node will have. The value should be in the\nformat of https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/", - "type": "string", - "x-go-name": "Memory" - }, - "primaryDiskSize": { - "description": "PrimaryDiskSize states the size of the provisioned pvc per node.", - "type": "string", - "x-go-name": "PrimaryDiskSize" - }, - "secondaryDisks": { - "description": "SecondaryDisks contains list of secondary-disks", - "type": "array", - "items": { - "$ref": "#/definitions/SecondaryDisks" - }, - "x-go-name": "SecondaryDisks" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "KubevirtNodeSpec": { - "description": "KubevirtNodeSpec kubevirt specific node settings", - "type": "object", - "required": [ - "cpus", - "memory", - "primaryDiskOSImage", - "primaryDiskStorageClassName", - "primaryDiskSize" - ], - "properties": { - "cpus": { - "description": "CPUs states how many cpus the kubevirt node will have.", - "type": "string", - "x-go-name": "CPUs" - }, - "evictionStrategy": { - "description": "EvictionStrategy describes the strategy to follow when a node drain occurs. If not set the default\nvalue is External and the VM will be protected by a PDB.", - "type": "string", - "x-go-name": "EvictionStrategy" - }, - "flavorName": { - "description": "FlavorName states name of the virtual-machine flavor.\n\nDeprecated. In favor of Instancetype and Preference.", - "type": "string", - "x-go-name": "FlavorName" - }, - "flavorProfile": { - "description": "FlavorProfile states name of virtual-machine profile.\n\nDeprecated. In favor of Instancetype and Preference.", - "type": "string", - "x-go-name": "FlavorProfile" - }, - "instancetype": { - "$ref": "#/definitions/InstancetypeMatcher" - }, - "memory": { - "description": "Memory states the memory that kubevirt node will have.", - "type": "string", - "x-go-name": "Memory" - }, - "nodeAffinityPreset": { - "$ref": "#/definitions/NodeAffinityPreset" - }, - "podAffinityPreset": { - "description": "PodAffinityPreset describes pod affinity scheduling rules\n\nDeprecated: in favor of topology spread constraints", - "type": "string", - "x-go-name": "PodAffinityPreset" - }, - "podAntiAffinityPreset": { - "description": "PodAntiAffinityPreset describes pod anti-affinity scheduling rules\n\nDeprecated: in favor of topology spread constraints", - "type": "string", - "x-go-name": "PodAntiAffinityPreset" - }, - "preference": { - "$ref": "#/definitions/PreferenceMatcher" - }, - "primaryDiskOSImage": { - "description": "PrimaryDiskOSImage states the source from which the imported image will be downloaded.\nThis field contains:\na URL to download an Os Image from a HTTP source.\na DataVolume Name as source for DataVolume cloning.", - "type": "string", - "x-go-name": "PrimaryDiskOSImage" - }, - "primaryDiskSize": { - "description": "PrimaryDiskSize states the size of the provisioned pvc per node.", - "type": "string", - "x-go-name": "PrimaryDiskSize" - }, - "primaryDiskStorageClassName": { - "description": "PrimaryDiskStorageClassName states the storage class name for the provisioned PVCs.", - "type": "string", - "x-go-name": "PrimaryDiskStorageClassName" - }, - "secondaryDisks": { - "description": "SecondaryDisks contains list of secondary-disks", - "type": "array", - "items": { - "$ref": "#/definitions/SecondaryDisks" - }, - "x-go-name": "SecondaryDisks" - }, - "subnet": { - "description": "Subnet is the name of the subnet to use for the VM.", - "type": "string", - "x-go-name": "Subnet" - }, - "topologySpreadConstraints": { - "description": "TopologySpreadConstraints describes topology spread constraints for VMs.", - "type": "array", - "items": { - "$ref": "#/definitions/TopologySpreadConstraint" - }, - "x-go-name": "TopologySpreadConstraints" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "KyvernoPolicyNamespace": { - "description": "This is relevant only if a Kyverno Policy resource is created because a Kyverno Policy is namespaced.\nFor Kyverno ClusterPolicy, this field is ignored.", - "type": "object", - "title": "KyvernoPolicyNamespace specifies the namespace to deploy the Kyverno Policy into.", - "properties": { - "annotations": { - "description": "Annotations to apply to this namespace.\n\n+optional", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "labels": { - "description": "Labels to apply to this namespace.\n\n+optional", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "name": { - "description": "Name is the name of the namespace to deploy the Kyverno Policy into.", - "type": "string", - "pattern": "=`^(|[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)`", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "KyvernoSettings": { - "type": "object", - "title": "KyvernoSettings contains settings for the Kyverno component as part of the cluster control plane. This component is responsible for policy management.", - "properties": { - "enabled": { - "description": "Controls whether Kyverno is deployed or not.", - "type": "boolean", - "x-go-name": "Enabled" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "LBClass": { - "type": "object", - "properties": { - "floatingNetworkID": { - "description": "FloatingNetworkID is the external network used to create floating IP for the load balancer VIP.", - "type": "string", - "x-go-name": "FloatingNetworkID" - }, - "floatingSubnet": { - "description": "FloatingSubnet is a name pattern for the external network subnet used to create floating IP for the load balancer VIP.", - "type": "string", - "x-go-name": "FloatingSubnet" - }, - "floatingSubnetID": { - "description": "FloatingSubnetID is the external network subnet used to create floating IP for the load balancer VIP.", - "type": "string", - "x-go-name": "FloatingSubnetID" - }, - "floatingSubnetTags": { - "description": "FloatingSubnetTags is a comma separated list of tags for the external network subnet used to create floating IP for the load balancer VIP.", - "type": "string", - "x-go-name": "FloatingSubnetTags" - }, - "memberSubnetID": { - "description": "MemberSubnetID is the ID of the Neutron network on which to create the members of the load balancer.", - "type": "string", - "x-go-name": "MemberSubnetID" - }, - "networkID": { - "description": "NetworkID is the ID of the Neutron network on which to create load balancer VIP, not needed if subnet-id is set.", - "type": "string", - "x-go-name": "NetworkID" - }, - "subnetID": { - "description": "SubnetID is the ID of the Neutron subnet on which to create load balancer VIP.", - "type": "string", - "x-go-name": "SubnetID" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "LBSKU": { - "type": "string", - "title": "Azure SKU for Load Balancers. Possible values are `basic` and `standard`.", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "LabelKeyList": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "LabelSelector": { - "description": "A label selector is a label query over a set of resources. The result of matchLabels and\nmatchExpressions are ANDed. An empty label selector matches all objects. A null\nlabel selector matches no objects.\n+structType=atomic", - "type": "object", - "properties": { - "matchExpressions": { - "description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.\n+optional\n+listType=atomic", - "type": "array", - "items": { - "$ref": "#/definitions/LabelSelectorRequirement" - }, - "x-go-name": "MatchExpressions" - }, - "matchLabels": { - "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed.\n+optional", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "MatchLabels" - } - }, - "x-go-package": "k8s.io/apimachinery/pkg/apis/meta/v1" - }, - "LabelSelectorOperator": { - "type": "string", - "title": "A label selector operator is the set of operators that can be used in a selector requirement.", - "x-go-package": "k8s.io/apimachinery/pkg/apis/meta/v1" - }, - "LabelSelectorRequirement": { - "description": "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values.", - "type": "object", - "properties": { - "key": { - "description": "key is the label key that the selector applies to.", - "type": "string", - "x-go-name": "Key" - }, - "operator": { - "$ref": "#/definitions/LabelSelectorOperator" - }, - "values": { - "description": "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch.\n+optional\n+listType=atomic", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Values" - } - }, - "x-go-package": "k8s.io/apimachinery/pkg/apis/meta/v1" - }, - "LoadBalancerClass": { - "type": "object", - "properties": { - "config": { - "$ref": "#/definitions/LBClass" - }, - "name": { - "description": "Name is the name of the load balancer class.\n\n+kubebuilder:validation:MinLength=1", - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "LocalObjectReference": { - "description": "New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.\n1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular\nrestrictions like, \"must refer only to types A and B\" or \"UID not honored\" or \"name must be restricted\".\nThose cannot be well described when embedded.\n2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.\n3. We cannot easily change it. Because this type is embedded in many locations, updates to this type\nwill affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.\n\nInstead of using this type, create a locally provided and used type that is well-focused on your reference.\nFor example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .\n+structType=atomic", - "type": "object", - "title": "LocalObjectReference contains enough information to let you locate the\nreferenced object inside the same namespace.", - "properties": { - "name": { - "description": "Name of the referent.\nThis field is effectively required, but due to backwards compatibility is\nallowed to be empty. Instances of this type with an empty value here are\nalmost certainly wrong.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\n+optional\n+default=\"\"\n+kubebuilder:default=\"\"\nTODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.", - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8s.io/api/core/v1" - }, - "LoggingRateLimitSettings": { - "type": "object", - "title": "LoggingRateLimitSettings contains rate-limiting configuration for logging in the user cluster.", - "properties": { - "ingestionBurstSize": { - "description": "IngestionBurstSize represents ingestion burst size in number of requests (nginx `burst`).", - "type": "integer", - "format": "int32", - "x-go-name": "IngestionBurstSize" - }, - "ingestionRate": { - "description": "IngestionRate represents ingestion rate limit in requests per second (nginx `rate` in `r/s`).", - "type": "integer", - "format": "int32", - "x-go-name": "IngestionRate" - }, - "queryBurstSize": { - "description": "QueryBurstSize represents query burst size in number of requests (nginx `burst`).", - "type": "integer", - "format": "int32", - "x-go-name": "QueryBurstSize" - }, - "queryRate": { - "description": "QueryRate represents query request rate limit per second (nginx `rate` in `r/s`).", - "type": "integer", - "format": "int32", - "x-go-name": "QueryRate" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "MLA": { - "type": "object", - "properties": { - "user_cluster_mla_enabled": { - "description": "whether the user cluster MLA (Monitoring, Logging \u0026 Alerting) stack is enabled in the seed", - "type": "boolean", - "x-go-name": "UserClusterMLAEnabled" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "MLAAdminSetting": { - "type": "object", - "title": "MLAAdminSetting represents an object holding admin setting options for user cluster MLA (Monitoring, Logging and Alerting).", - "properties": { - "loggingRateLimits": { - "$ref": "#/definitions/LoggingRateLimitSettings" - }, - "monitoringRateLimits": { - "$ref": "#/definitions/MonitoringRateLimitSettings" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "MLASettings": { - "type": "object", - "properties": { - "loggingEnabled": { - "description": "LoggingEnabled is the flag for enabling logging in user cluster.", - "type": "boolean", - "x-go-name": "LoggingEnabled" - }, - "loggingResources": { - "$ref": "#/definitions/ResourceRequirements" - }, - "monitoringEnabled": { - "description": "MonitoringEnabled is the flag for enabling monitoring in user cluster.", - "type": "boolean", - "x-go-name": "MonitoringEnabled" - }, - "monitoringReplicas": { - "description": "MonitoringReplicas is the number of desired pods of user cluster prometheus deployment.", - "type": "integer", - "format": "int32", - "x-go-name": "MonitoringReplicas" - }, - "monitoringResources": { - "$ref": "#/definitions/ResourceRequirements" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "MachineDeploymentOptions": { - "type": "object", - "properties": { - "autoUpdatesEnabled": { - "description": "AutoUpdatesEnabled enables the auto updates option for machine deployments on the dashboard.\nIn case of flatcar linux, this will enable automatic updates through update engine and for other operating systems,\nthis will enable package updates on boot for the machines.", - "type": "boolean", - "x-go-name": "AutoUpdatesEnabled" - }, - "autoUpdatesEnforced": { - "description": "AutoUpdatesEnforced enforces the auto updates option for machine deployments on the dashboard.\nIn case of flatcar linux, this will enable automatic updates through update engine and for other operating systems,\nthis will enable package updates on boot for the machines.", - "type": "boolean", - "x-go-name": "AutoUpdatesEnforced" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "MachineDeploymentStatus": { - "description": "[MachineDeploymentStatus]\nMachineDeploymentStatus defines the observed state of MachineDeployment.", - "type": "object", - "properties": { - "availableReplicas": { - "description": "Total number of available machines (ready for at least minReadySeconds)\ntargeted by this deployment.\n+optional", - "type": "integer", - "format": "int32", - "x-go-name": "AvailableReplicas" - }, - "observedGeneration": { - "description": "The generation observed by the deployment controller.\n+optional", - "type": "integer", - "format": "int64", - "x-go-name": "ObservedGeneration" - }, - "readyReplicas": { - "description": "Total number of ready machines targeted by this deployment.\n+optional", - "type": "integer", - "format": "int32", - "x-go-name": "ReadyReplicas" - }, - "replicas": { - "description": "Total number of non-terminated machines targeted by this deployment\n(their labels match the selector).\n+optional", - "type": "integer", - "format": "int32", - "x-go-name": "Replicas" - }, - "unavailableReplicas": { - "description": "Total number of unavailable machines targeted by this deployment.\nThis is the total number of machines that are still required for\nthe deployment to have 100% available capacity. They may either\nbe machines that are running but not yet available or machines\nthat still have not been created.\n+optional", - "type": "integer", - "format": "int32", - "x-go-name": "UnavailableReplicas" - }, - "updatedReplicas": { - "description": "Total number of non-terminated machines targeted by this deployment\nthat have the desired template spec.\n+optional", - "type": "integer", - "format": "int32", - "x-go-name": "UpdatedReplicas" - } - }, - "x-go-package": "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" - }, - "MachineFlavorFilter": { - "type": "object", - "properties": { - "enableGPU": { - "description": "Include VMs with GPU", - "type": "boolean", - "x-go-name": "EnableGPU" - }, - "maxCPU": { - "description": "Maximum number of vCPU", - "type": "integer", - "format": "int64", - "x-go-name": "MaxCPU" - }, - "maxRAM": { - "description": "Maximum RAM size in GB", - "type": "integer", - "format": "int64", - "x-go-name": "MaxRAM" - }, - "minCPU": { - "description": "Minimum number of vCPU", - "type": "integer", - "format": "int64", - "x-go-name": "MinCPU" - }, - "minRAM": { - "description": "Minimum RAM size in GB", - "type": "integer", - "format": "int64", - "x-go-name": "MinRAM" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "MachineNetworkingConfig": { - "type": "object", - "title": "MachineNetworkingConfig specifies the networking parameters used for IPAM.", - "properties": { - "cidr": { - "type": "string", - "x-go-name": "CIDR" - }, - "dnsServers": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "DNSServers" - }, - "gateway": { - "type": "string", - "x-go-name": "Gateway" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "MasterVersion": { - "description": "MasterVersion describes a version of the master components", - "type": "object", - "properties": { - "default": { - "type": "boolean", - "x-go-name": "Default" - }, - "restrictedByKubeletVersion": { - "description": "If true, then given version control plane version is not compatible\nwith one of the kubelets inside cluster and shouldn't be used.", - "type": "boolean", - "x-go-name": "RestrictedByKubeletVersion" - }, - "version": { - "type": "string", - "x-go-name": "Version" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "Match": { - "type": "object", - "title": "Match contains the constraint to resource matching data.", - "properties": { - "excludedNamespaces": { - "description": "ExcludedNamespaces is a list of namespace names. If defined, a constraint will only apply to resources not in a listed namespace.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "ExcludedNamespaces" - }, - "kinds": { - "description": "Kinds accepts a list of objects with apiGroups and kinds fields that list the groups/kinds of objects to which\nthe constraint will apply. If multiple groups/kinds objects are specified, only one match is needed for the resource to be in scope", - "type": "array", - "items": { - "$ref": "#/definitions/Kind" - }, - "x-go-name": "Kinds" - }, - "labelSelector": { - "$ref": "#/definitions/LabelSelector" - }, - "namespaceSelector": { - "$ref": "#/definitions/LabelSelector" - }, - "namespaces": { - "description": "Namespaces is a list of namespace names. If defined, a constraint will only apply to resources in a listed namespace.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Namespaces" - }, - "scope": { - "description": "Scope accepts *, Cluster, or Namespaced which determines if cluster-scoped and/or namespace-scoped resources are selected. (defaults to *)", - "type": "string", - "x-go-name": "Scope" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "MatchEntry": { - "type": "object", - "properties": { - "excludedNamespaces": { - "description": "Namespaces which will be excluded", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "ExcludedNamespaces" - }, - "processes": { - "description": "Processes which will be excluded in the given namespaces (sync, webhook, audit, *)", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Processes" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "MeteringConfiguration": { - "type": "object", - "title": "MeteringConfiguration contains all the configuration for the metering tool.", - "properties": { - "enabled": { - "type": "boolean", - "x-go-name": "Enabled" - }, - "reports": { - "description": "ReportConfigurations is a map of report configuration definitions.", - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/MeteringReportConfiguration" - }, - "x-go-name": "ReportConfigurations" - }, - "retentionDays": { - "description": "RetentionDays is the number of days for which data should be kept in Prometheus. Default value is 90.", - "type": "integer", - "format": "int64", - "x-go-name": "RetentionDays" - }, - "storageClassName": { - "description": "StorageClassName is the name of the storage class that the metering Prometheus instance uses to store metric data for reporting.", - "type": "string", - "x-go-name": "StorageClassName" - }, - "storageSize": { - "description": "StorageSize is the size of the storage class. Default value is 100Gi. Changing this value requires\nmanual deletion of the existing Prometheus PVC (and thereby removing all metering data).", - "type": "string", - "x-go-name": "StorageSize" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "MeteringReport": { - "description": "MeteringReport holds objects names and metadata for available reports", - "type": "object", - "properties": { - "lastModified": { - "type": "string", - "format": "date-time", - "x-go-name": "LastModified" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "size": { - "type": "integer", - "format": "int64", - "x-go-name": "Size" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "MeteringReportConfiguration": { - "description": "MeteringReportConfiguration holds report configuration", - "type": "object", - "properties": { - "format": { - "$ref": "#/definitions/MeteringReportFormat" - }, - "interval": { - "description": "Interval defines the number of days consulted in the metering report.\nIgnored when `Monthly` is set to true", - "type": "integer", - "format": "uint32", - "x-go-name": "Interval" - }, - "monthly": { - "description": "+optional\nMonthly creates a report for the previous month.", - "type": "boolean", - "x-go-name": "Monthly" - }, - "retention": { - "description": "Retention defines a number of days after which reports are queued for removal. If not set, reports are kept forever.\nPlease note that this functionality works only for object storage that supports an object lifecycle management mechanism.", - "type": "integer", - "format": "uint32", - "x-go-name": "Retention" - }, - "schedule": { - "description": "Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. Please take a note that Schedule is responsible\nonly for setting the time when a report generation mechanism kicks off. The Interval MUST be set independently.", - "type": "string", - "x-go-name": "Schedule" - }, - "type": { - "description": "Types of reports to generate. Available report types are cluster and namespace. By default, all types of reports are generated.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Types" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "MeteringReportFormat": { - "description": "+kubebuilder:validation:Enum=csv;json", - "type": "string", - "title": "MeteringReportFormat maps directly to the values supported by the kubermatic-metering tool.", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "MeteringReportURL": { - "description": "ReportURL represent an S3 pre signed URL to download a report", - "type": "string", - "x-go-name": "ReportURL", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "MlaOptions": { - "type": "object", - "properties": { - "loggingEnabled": { - "type": "boolean", - "x-go-name": "LoggingEnabled" - }, - "loggingEnforced": { - "type": "boolean", - "x-go-name": "LoggingEnforced" - }, - "monitoringEnabled": { - "type": "boolean", - "x-go-name": "MonitoringEnabled" - }, - "monitoringEnforced": { - "type": "boolean", - "x-go-name": "MonitoringEnforced" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "MonitoringRateLimitSettings": { - "type": "object", - "title": "MonitoringRateLimitSettings contains rate-limiting configuration for monitoring in the user cluster.", - "properties": { - "ingestionBurstSize": { - "description": "IngestionBurstSize represents ingestion burst size in samples per second (Cortex `ingestion_burst_size`).", - "type": "integer", - "format": "int32", - "x-go-name": "IngestionBurstSize" - }, - "ingestionRate": { - "description": "IngestionRate represents the ingestion rate limit in samples per second (Cortex `ingestion_rate`).", - "type": "integer", - "format": "int32", - "x-go-name": "IngestionRate" - }, - "maxSamplesPerQuery": { - "description": "MaxSamplesPerQuery represents maximum number of samples during a query (Cortex `max_samples_per_query`).", - "type": "integer", - "format": "int32", - "x-go-name": "MaxSamplesPerQuery" - }, - "maxSeriesPerMetric": { - "description": "MaxSeriesPerMetric represents maximum number of series per metric (Cortex `max_series_per_metric`).", - "type": "integer", - "format": "int32", - "x-go-name": "MaxSeriesPerMetric" - }, - "maxSeriesPerQuery": { - "description": "MaxSeriesPerQuery represents maximum number of timeseries during a query (Cortex `max_series_per_query`).", - "type": "integer", - "format": "int32", - "x-go-name": "MaxSeriesPerQuery" - }, - "maxSeriesTotal": { - "description": "MaxSeriesTotal represents maximum number of series per this user cluster (Cortex `max_series_per_user`).", - "type": "integer", - "format": "int32", - "x-go-name": "MaxSeriesTotal" - }, - "queryBurstSize": { - "description": "QueryBurstSize represents query burst size in number of requests (nginx `burst`).", - "type": "integer", - "format": "int32", - "x-go-name": "QueryBurstSize" - }, - "queryRate": { - "description": "QueryRate represents query request rate limit per second (nginx `rate` in `r/s`).", - "type": "integer", - "format": "int32", - "x-go-name": "QueryRate" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "Names": { - "type": "object", - "properties": { - "kind": { - "type": "string", - "x-go-name": "Kind" - }, - "shortNames": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "ShortNames" - } - }, - "x-go-package": "github.com/open-policy-agent/frameworks/constraint/pkg/apis/templates/v1" - }, - "Namespace": { - "description": "Namespace defines namespace", - "type": "object", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NamespaceSpec": { - "type": "object", - "title": "NamespaceSpec describe the desired state of the namespace where application will be created.", - "properties": { - "annotations": { - "description": "Annotations of the namespace\nMore info: http://kubernetes.io/docs/user-guide/annotations\n+optional", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "create": { - "description": "Create defines whether the namespace should be created if it does not exist. Defaults to true", - "type": "boolean", - "x-go-name": "Create" - }, - "labels": { - "description": "Labels of the namespace\nMore info: http://kubernetes.io/docs/user-guide/labels\n+optional", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "name": { - "description": "Name is the namespace to deploy the Application into.\nShould be a valid lowercase RFC1123 domain name", - "type": "string", - "pattern": "=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NamespacedMode": { - "type": "object", - "properties": { - "enabled": { - "description": "Enabled indicates whether the single namespace mode is enabled or not.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "name": { - "description": "Namespace is the name of the namespace to be used, if not specified the default \"kubevirt-workload\" will be used.\n+kubebuilder:default=kubevirt-workload", - "type": "string", - "x-go-name": "Namespace" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "NamespacedName": { - "type": "object", - "properties": { - "Name": { - "type": "string" - }, - "Namespace": { - "type": "string" - } - }, - "x-go-package": "k8s.io/apimachinery/pkg/types" - }, - "NetworkDefaults": { - "type": "object", - "title": "NetworkDefaults contains cluster network default settings.", - "properties": { - "clusterExposeStrategy": { - "$ref": "#/definitions/ExposeStrategy" - }, - "ipv4": { - "$ref": "#/definitions/NetworkDefaultsIPFamily" - }, - "ipv6": { - "$ref": "#/definitions/NetworkDefaultsIPFamily" - }, - "nodeLocalDNSCacheEnabled": { - "description": "NodeLocalDNSCacheEnabled controls whether the NodeLocal DNS Cache feature is enabled.", - "type": "boolean", - "x-go-name": "NodeLocalDNSCacheEnabled" - }, - "proxyMode": { - "description": "ProxyMode defines the default kube-proxy mode (\"ipvs\" / \"iptables\" / \"ebpf\").", - "type": "string", - "x-go-name": "ProxyMode" - }, - "tunnelingAgentIP": { - "description": "TunnelingAgentIP is the address used by the tunneling agents", - "type": "string", - "x-go-name": "TunnelingAgentIP" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "NetworkDefaultsIPFamily": { - "type": "object", - "title": "NetworkDefaultsIPFamily contains cluster network default settings for an IP family.", - "properties": { - "nodeCidrMaskSize": { - "description": "NodeCIDRMaskSize contains the default mask size used to address the nodes within provided Pods CIDR.", - "type": "integer", - "format": "int32", - "x-go-name": "NodeCIDRMaskSize" - }, - "nodePortsAllowedIPRange": { - "description": "NodePortsAllowedIPRange defines the default IP range from which access to NodePort services is allowed for applicable cloud providers.", - "type": "string", - "x-go-name": "NodePortsAllowedIPRange" - }, - "podsCidr": { - "description": "PodsCIDR contains the default network range from which POD networks are allocated.", - "type": "string", - "x-go-name": "PodsCIDR" - }, - "servicesCidr": { - "description": "ServicesCIDR contains the default network range from which service VIPs are allocated.", - "type": "string", - "x-go-name": "ServicesCIDR" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "NetworkPolicy": { - "type": "object", - "title": "NetworkPolicy describes if and which network policies will be deployed by default to kubevirt userclusters.", - "properties": { - "enabled": { - "type": "boolean", - "x-go-name": "Enabled" - }, - "mode": { - "$ref": "#/definitions/NetworkPolicyMode" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "NetworkPolicyEgressRule": { - "description": "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods\nmatched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to.\nThis type is beta-level in 1.8", - "type": "object", - "properties": { - "ports": { - "description": "ports is a list of destination ports for outgoing traffic.\nEach item in this list is combined using a logical OR. If this field is\nempty or missing, this rule matches all ports (traffic not restricted by port).\nIf this field is present and contains at least one item, then this rule allows\ntraffic only if the traffic matches at least one port in the list.\n+optional\n+listType=atomic", - "type": "array", - "items": { - "$ref": "#/definitions/NetworkPolicyPort" - }, - "x-go-name": "Ports" - }, - "to": { - "description": "to is a list of destinations for outgoing traffic of pods selected for this rule.\nItems in this list are combined using a logical OR operation. If this field is\nempty or missing, this rule matches all destinations (traffic not restricted by\ndestination). If this field is present and contains at least one item, this rule\nallows traffic only if the traffic matches at least one item in the to list.\n+optional\n+listType=atomic", - "type": "array", - "items": { - "$ref": "#/definitions/NetworkPolicyPeer" - }, - "x-go-name": "To" - } - }, - "x-go-package": "k8s.io/api/networking/v1" - }, - "NetworkPolicyIngressRule": { - "description": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods\nmatched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.", - "type": "object", - "properties": { - "from": { - "description": "from is a list of sources which should be able to access the pods selected for this rule.\nItems in this list are combined using a logical OR operation. If this field is\nempty or missing, this rule matches all sources (traffic not restricted by\nsource). If this field is present and contains at least one item, this rule\nallows traffic only if the traffic matches at least one item in the from list.\n+optional\n+listType=atomic", - "type": "array", - "items": { - "$ref": "#/definitions/NetworkPolicyPeer" - }, - "x-go-name": "From" - }, - "ports": { - "description": "ports is a list of ports which should be made accessible on the pods selected for\nthis rule. Each item in this list is combined using a logical OR. If this field is\nempty or missing, this rule matches all ports (traffic not restricted by port).\nIf this field is present and contains at least one item, then this rule allows\ntraffic only if the traffic matches at least one port in the list.\n+optional\n+listType=atomic", - "type": "array", - "items": { - "$ref": "#/definitions/NetworkPolicyPort" - }, - "x-go-name": "Ports" - } - }, - "x-go-package": "k8s.io/api/networking/v1" - }, - "NetworkPolicyMode": { - "description": "NetworkPolicyMode maps directly to the values supported by the kubermatic network policy mode for kubevirt\nworker nodes in kube-ovn environments.\n+kubebuilder:validation:Enum=deny;allow", - "type": "string", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "NetworkPolicyPeer": { - "description": "NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of\nfields are allowed", - "type": "object", - "properties": { - "ipBlock": { - "$ref": "#/definitions/IPBlock" - }, - "namespaceSelector": { - "$ref": "#/definitions/LabelSelector" - }, - "podSelector": { - "$ref": "#/definitions/LabelSelector" - } - }, - "x-go-package": "k8s.io/api/networking/v1" - }, - "NetworkPolicyPort": { - "description": "NetworkPolicyPort describes a port to allow traffic on", - "type": "object", - "properties": { - "endPort": { - "description": "endPort indicates that the range of ports from port to endPort if set, inclusive,\nshould be allowed by the policy. This field cannot be defined if the port field\nis not defined or if the port field is defined as a named (string) port.\nThe endPort must be equal or greater than port.\n+optional", - "type": "integer", - "format": "int32", - "x-go-name": "EndPort" - }, - "port": { - "$ref": "#/definitions/IntOrString" - }, - "protocol": { - "$ref": "#/definitions/Protocol" - } - }, - "x-go-package": "k8s.io/api/networking/v1" - }, - "NetworkPolicySpec": { - "description": "NetworkPolicySpec provides the specification of a NetworkPolicy", - "type": "object", - "properties": { - "egress": { - "description": "egress is a list of egress rules to be applied to the selected pods. Outgoing traffic\nis allowed if there are no NetworkPolicies selecting the pod (and cluster policy\notherwise allows the traffic), OR if the traffic matches at least one egress rule\nacross all of the NetworkPolicy objects whose podSelector matches the pod. If\nthis field is empty then this NetworkPolicy limits all outgoing traffic (and serves\nsolely to ensure that the pods it selects are isolated by default).\nThis field is beta-level in 1.8\n+optional\n+listType=atomic", - "type": "array", - "items": { - "$ref": "#/definitions/NetworkPolicyEgressRule" - }, - "x-go-name": "Egress" - }, - "ingress": { - "description": "ingress is a list of ingress rules to be applied to the selected pods.\nTraffic is allowed to a pod if there are no NetworkPolicies selecting the pod\n(and cluster policy otherwise allows the traffic), OR if the traffic source is\nthe pod's local node, OR if the traffic matches at least one ingress rule\nacross all of the NetworkPolicy objects whose podSelector matches the pod. If\nthis field is empty then this NetworkPolicy does not allow any traffic (and serves\nsolely to ensure that the pods it selects are isolated by default)\n+optional\n+listType=atomic", - "type": "array", - "items": { - "$ref": "#/definitions/NetworkPolicyIngressRule" - }, - "x-go-name": "Ingress" - }, - "podSelector": { - "$ref": "#/definitions/LabelSelector" - }, - "policyTypes": { - "description": "policyTypes is a list of rule types that the NetworkPolicy relates to.\nValid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"].\nIf this field is not specified, it will default based on the existence of ingress or egress rules;\npolicies that contain an egress section are assumed to affect egress, and all policies\n(whether or not they contain an ingress section) are assumed to affect ingress.\nIf you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ].\nLikewise, if you want to write a policy that specifies that no egress is allowed,\nyou must specify a policyTypes value that include \"Egress\" (since such a policy would not include\nan egress section and would otherwise default to just [ \"Ingress\" ]).\nThis field is beta-level in 1.8\n+optional\n+listType=atomic", - "type": "array", - "items": { - "$ref": "#/definitions/PolicyType" - }, - "x-go-name": "PolicyTypes" - } - }, - "x-go-package": "k8s.io/api/networking/v1" - }, - "NetworkRanges": { - "type": "object", - "title": "NetworkRanges represents ranges of network addresses.", - "properties": { - "cidrBlocks": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "CIDRBlocks" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "NetworkSpec": { - "description": "NetworkSpec machine static network configuration", - "type": "object", - "properties": { - "cidr": { - "type": "string", - "x-go-name": "CIDR" - }, - "dns": { - "$ref": "#/definitions/DNSConfig" - }, - "gateway": { - "type": "string", - "x-go-name": "Gateway" - }, - "ipFamily": { - "type": "string", - "x-go-name": "IPFamily" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "Node": { - "description": "Node represents a worker node that is part of a cluster", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/NodeSpec" - }, - "status": { - "$ref": "#/definitions/NodeStatus" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NodeAddress": { - "type": "object", - "title": "NodeAddress contains information for the node's address.", - "properties": { - "address": { - "type": "string", - "x-go-name": "Address", - "example": "192.168.1.1, node1.my.dns" - }, - "type": { - "type": "string", - "x-go-name": "Type", - "example": "ExternalIP, InternalIP, InternalDNS, ExternalDNS" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NodeAffinityPreset": { - "type": "object", - "properties": { - "Key": { - "type": "string" - }, - "Type": { - "type": "string" - }, - "Values": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NodeCloudSpec": { - "type": "object", - "title": "NodeCloudSpec represents the collection of cloud provider specific settings. Only one must be set at a time.", - "properties": { - "alibaba": { - "$ref": "#/definitions/AlibabaNodeSpec" - }, - "anexia": { - "$ref": "#/definitions/AnexiaNodeSpec" - }, - "aws": { - "$ref": "#/definitions/AWSNodeSpec" - }, - "azure": { - "$ref": "#/definitions/AzureNodeSpec" - }, - "baremetal": { - "$ref": "#/definitions/BaremetalNodeSpec" - }, - "digitalocean": { - "$ref": "#/definitions/DigitaloceanNodeSpec" - }, - "edge": { - "$ref": "#/definitions/EdgeNodeSpec" - }, - "gcp": { - "$ref": "#/definitions/GCPNodeSpec" - }, - "hetzner": { - "$ref": "#/definitions/HetznerNodeSpec" - }, - "kubevirt": { - "$ref": "#/definitions/KubevirtNodeSpec" - }, - "nutanix": { - "$ref": "#/definitions/NutanixNodeSpec" - }, - "opennebula": { - "$ref": "#/definitions/OpenNebulaNodeSpec" - }, - "openstack": { - "$ref": "#/definitions/OpenstackNodeSpec" - }, - "packet": { - "$ref": "#/definitions/PacketNodeSpec" - }, - "vmwareclouddirector": { - "$ref": "#/definitions/VMwareCloudDirectorNodeSpec" - }, - "vsphere": { - "$ref": "#/definitions/VSphereNodeSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NodeDeployment": { - "description": "NodeDeployment represents a set of worker nodes that is part of a cluster", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/NodeDeploymentSpec" - }, - "status": { - "$ref": "#/definitions/MachineDeploymentStatus" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NodeDeploymentSpec": { - "description": "NodeDeploymentSpec node deployment specification", - "type": "object", - "required": [ - "replicas", - "template" - ], - "properties": { - "dynamicConfig": { - "description": "Only supported for nodes with Kubernetes 1.23 or less.", - "type": "boolean", - "x-go-name": "DynamicConfig" - }, - "maxReplicas": { - "type": "integer", - "format": "uint32", - "x-go-name": "MaxReplicas" - }, - "minReplicas": { - "type": "integer", - "format": "uint32", - "x-go-name": "MinReplicas" - }, - "paused": { - "type": "boolean", - "x-go-name": "Paused" - }, - "replicas": { - "type": "integer", - "format": "int32", - "x-go-name": "Replicas" - }, - "template": { - "$ref": "#/definitions/NodeSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NodeMetric": { - "description": "NodeMetric defines a metric for the given node", - "type": "object", - "properties": { - "cpuAvailableMillicores": { - "type": "integer", - "format": "int64", - "x-go-name": "CPUAvailableMillicores" - }, - "cpuTotalMillicores": { - "description": "CPUTotalMillicores in m cores", - "type": "integer", - "format": "int64", - "x-go-name": "CPUTotalMillicores" - }, - "cpuUsedPercentage": { - "description": "CPUUsedPercentage in percentage", - "type": "integer", - "format": "int64", - "x-go-name": "CPUUsedPercentage" - }, - "memoryAvailableBytes": { - "description": "MemoryAvailableBytes available memory for node", - "type": "integer", - "format": "int64", - "x-go-name": "MemoryAvailableBytes" - }, - "memoryTotalBytes": { - "description": "MemoryTotalBytes current memory usage in bytes", - "type": "integer", - "format": "int64", - "x-go-name": "MemoryTotalBytes" - }, - "memoryUsedPercentage": { - "description": "MemoryUsedPercentage in percentage", - "type": "integer", - "format": "int64", - "x-go-name": "MemoryUsedPercentage" - }, - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NodeResources": { - "description": "NodeResources cpu and memory of a node", - "type": "object", - "properties": { - "cpu": { - "type": "string", - "x-go-name": "CPU" - }, - "memory": { - "type": "string", - "x-go-name": "Memory" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NodeSettings": { - "type": "object", - "title": "NodeSettings are node specific flags which can be configured on datacenter level.", - "properties": { - "containerdRegistryMirrors": { - "$ref": "#/definitions/ContainerRuntimeContainerd" - }, - "httpProxy": { - "$ref": "#/definitions/ProxyValue" - }, - "insecureRegistries": { - "description": "Optional: These image registries will be configured as insecure\non the container runtime.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "InsecureRegistries" - }, - "noProxy": { - "$ref": "#/definitions/ProxyValue" - }, - "pauseImage": { - "description": "Optional: Translates to --pod-infra-container-image on the kubelet.\nIf not set, the kubelet will default it.", - "type": "string", - "x-go-name": "PauseImage" - }, - "registryMirrors": { - "description": "Optional: These image registries will be configured as registry mirrors\non the container runtime.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "RegistryMirrors" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "NodeSpec": { - "description": "NodeSpec node specification", - "type": "object", - "required": [ - "cloud", - "operatingSystem", - "versions" - ], - "properties": { - "annotations": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "cloud": { - "$ref": "#/definitions/NodeCloudSpec" - }, - "labels": { - "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects.\nIt will be applied to Nodes allowing users run their apps on specific Node using labelSelector.", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "network": { - "$ref": "#/definitions/NetworkSpec" - }, - "operatingSystem": { - "$ref": "#/definitions/OperatingSystemSpec" - }, - "sshUserName": { - "type": "string", - "x-go-name": "SSHUserName" - }, - "taints": { - "description": "List of taints to set on new nodes", - "type": "array", - "items": { - "$ref": "#/definitions/TaintSpec" - }, - "x-go-name": "Taints" - }, - "versions": { - "$ref": "#/definitions/NodeVersionInfo" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NodeStatus": { - "type": "object", - "title": "NodeStatus is information about the current status of a node.", - "properties": { - "addresses": { - "description": "different addresses of a node", - "type": "array", - "items": { - "$ref": "#/definitions/NodeAddress" - }, - "x-go-name": "Addresses" - }, - "allocatable": { - "$ref": "#/definitions/NodeResources" - }, - "capacity": { - "$ref": "#/definitions/NodeResources" - }, - "errorMessage": { - "description": "in case of a error this will contain a detailed error explanation", - "type": "string", - "x-go-name": "ErrorMessage" - }, - "errorReason": { - "description": "in case of a error this will contain a short error message", - "type": "string", - "x-go-name": "ErrorReason" - }, - "machineName": { - "description": "name of the actual Machine object", - "type": "string", - "x-go-name": "MachineName" - }, - "nodeInfo": { - "$ref": "#/definitions/NodeSystemInfo" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NodeSystemInfo": { - "type": "object", - "title": "NodeSystemInfo is a set of versions/ids/uuids to uniquely identify the node.", - "properties": { - "architecture": { - "type": "string", - "x-go-name": "Architecture" - }, - "containerRuntime": { - "type": "string", - "x-go-name": "ContainerRuntime" - }, - "containerRuntimeVersion": { - "type": "string", - "x-go-name": "ContainerRuntimeVersion" - }, - "kernelVersion": { - "type": "string", - "x-go-name": "KernelVersion" - }, - "kubeletVersion": { - "type": "string", - "x-go-name": "KubeletVersion" - }, - "operatingSystem": { - "type": "string", - "x-go-name": "OperatingSystem" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NodeVersionInfo": { - "description": "NodeVersionInfo node version information", - "type": "object", - "properties": { - "kubelet": { - "type": "string", - "x-go-name": "Kubelet" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NodesMetric": { - "description": "NodesMetric defines a metric for a group of nodes", - "type": "object", - "properties": { - "cpuAvailableMillicores": { - "type": "integer", - "format": "int64", - "x-go-name": "CPUAvailableMillicores" - }, - "cpuTotalMillicores": { - "description": "CPUTotalMillicores in m cores", - "type": "integer", - "format": "int64", - "x-go-name": "CPUTotalMillicores" - }, - "cpuUsedPercentage": { - "description": "CPUUsedPercentage in percentage", - "type": "integer", - "format": "int64", - "x-go-name": "CPUUsedPercentage" - }, - "memoryAvailableBytes": { - "description": "MemoryAvailableBytes available memory for node", - "type": "integer", - "format": "int64", - "x-go-name": "MemoryAvailableBytes" - }, - "memoryTotalBytes": { - "description": "MemoryTotalBytes current memory usage in bytes", - "type": "integer", - "format": "int64", - "x-go-name": "MemoryTotalBytes" - }, - "memoryUsedPercentage": { - "description": "MemoryUsedPercentage in percentage", - "type": "integer", - "format": "int64", - "x-go-name": "MemoryUsedPercentage" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NotificationsOptions": { - "type": "object", - "properties": { - "hideErrorEvents": { - "description": "HideErrorEvents will silence error events for the dashboard.", - "type": "boolean", - "x-go-name": "HideErrorEvents" - }, - "hideErrors": { - "description": "HideErrors will silence error notifications for the dashboard.", - "type": "boolean", - "x-go-name": "HideErrors" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "Nutanix": { - "type": "object", - "properties": { - "clusterName": { - "description": "The name of the Nutanix cluster to which the resources and nodes are deployed to.", - "type": "string", - "x-go-name": "ClusterName" - }, - "csiEndpoint": { - "description": "CSIEndpoint to access Nutanix Prism Element for CSI driver.", - "type": "string", - "x-go-name": "CSIEndpoint" - }, - "csiPassword": { - "description": "Prism Element Password for CSI driver.", - "type": "string", - "x-go-name": "CSIPassword" - }, - "csiPort": { - "description": "CSIPort to use when connecting to the Nutanix Prism Element endpoint (defaults to 9440).", - "type": "integer", - "format": "int32", - "x-go-name": "CSIPort" - }, - "csiUsername": { - "description": "Prism Element Username for CSI driver.", - "type": "string", - "x-go-name": "CSIUsername" - }, - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "password": { - "description": "Password corresponding to the provided user.", - "type": "string", - "x-go-name": "Password" - }, - "projectName": { - "description": "Optional: Nutanix project to use. If none is given,\nno project will be used.", - "type": "string", - "x-go-name": "ProjectName" - }, - "proxyURL": { - "description": "Optional: To configure a HTTP proxy to access Nutanix Prism Central.", - "type": "string", - "x-go-name": "ProxyURL" - }, - "username": { - "description": "Username that is used to access the Nutanix Prism Central API.", - "type": "string", - "x-go-name": "Username" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "NutanixCSIConfig": { - "type": "object", - "title": "NutanixCSIConfig contains credentials and the endpoint for the Nutanix Prism Element to which the CSI driver connects.", - "properties": { - "endpoint": { - "description": "Prism Element Endpoint to access Nutanix Prism Element for CSI driver.", - "type": "string", - "x-go-name": "Endpoint" - }, - "fstype": { - "description": "Optional: defaults to \"xfs\"\n+optional", - "type": "string", - "x-go-name": "Fstype" - }, - "password": { - "description": "Prism Element Password for CSI driver.", - "type": "string", - "x-go-name": "Password" - }, - "port": { - "description": "Optional: Port to use when connecting to the Nutanix Prism Element endpoint (defaults to 9440).\n+optional", - "type": "integer", - "format": "int32", - "x-go-name": "Port" - }, - "ssSegmentedIscsiNetwork": { - "description": "Optional: defaults to \"false\".\n+optional", - "type": "boolean", - "x-go-name": "SsSegmentedIscsiNetwork" - }, - "storageContainer": { - "description": "Optional: defaults to \"SelfServiceContainer\".\n+optional", - "type": "string", - "x-go-name": "StorageContainer" - }, - "username": { - "description": "Prism Element Username for CSI driver.", - "type": "string", - "x-go-name": "Username" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "NutanixCategory": { - "type": "object", - "title": "NutanixCategory represents a Nutanix category.", - "properties": { - "description": { - "type": "string", - "x-go-name": "Description" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "systemDefined": { - "type": "boolean", - "x-go-name": "SystemDefined" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NutanixCategoryList": { - "type": "array", - "title": "NutanixCategoryList represents an array of Nutanix categories.", - "items": { - "$ref": "#/definitions/NutanixCategory" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NutanixCategoryValue": { - "type": "object", - "title": "NutanixCategoryValue represents a Nutanix category value.", - "properties": { - "value": { - "type": "string", - "x-go-name": "Value" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NutanixCategoryValueList": { - "type": "array", - "title": "NutanixCategoryValueList represents an array of Nutanix category values.", - "items": { - "$ref": "#/definitions/NutanixCategoryValue" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NutanixCloudSpec": { - "type": "object", - "title": "NutanixCloudSpec specifies the access data to Nutanix.", - "properties": { - "clusterName": { - "description": "ClusterName is the Nutanix cluster that this user cluster will be deployed to.", - "type": "string", - "x-go-name": "ClusterName" - }, - "credentialsReference": { - "$ref": "#/definitions/GlobalSecretKeySelector" - }, - "csi": { - "$ref": "#/definitions/NutanixCSIConfig" - }, - "password": { - "description": "Password corresponding to the provided user.", - "type": "string", - "x-go-name": "Password" - }, - "projectName": { - "description": "The name of the project that this cluster is deployed into. If none is given, no project will be used.\n+optional", - "type": "string", - "x-go-name": "ProjectName" - }, - "proxyURL": { - "description": "Optional: Used to configure a HTTP proxy to access Nutanix Prism Central.", - "type": "string", - "x-go-name": "ProxyURL" - }, - "username": { - "description": "Username to access the Nutanix Prism Central API.", - "type": "string", - "x-go-name": "Username" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "NutanixCluster": { - "type": "object", - "title": "NutanixCluster represents a Nutanix cluster.", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NutanixClusterList": { - "type": "array", - "title": "NutanixClusterList represents an array of Nutanix clusters.", - "items": { - "$ref": "#/definitions/NutanixCluster" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NutanixNodeSpec": { - "description": "NutanixNodeSpec nutanix specific node settings", - "type": "object", - "properties": { - "categories": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Categories" - }, - "cpuCores": { - "type": "integer", - "format": "int64", - "x-go-name": "CPUCores" - }, - "cpuPassthrough": { - "type": "boolean", - "x-go-name": "CPUPassthrough" - }, - "cpus": { - "type": "integer", - "format": "int64", - "x-go-name": "CPUs" - }, - "diskSize": { - "type": "integer", - "format": "int64", - "x-go-name": "DiskSize" - }, - "imageName": { - "type": "string", - "x-go-name": "ImageName" - }, - "memoryMB": { - "type": "integer", - "format": "int64", - "x-go-name": "MemoryMB" - }, - "subnetName": { - "type": "string", - "x-go-name": "SubnetName" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NutanixProject": { - "type": "object", - "title": "NutanixProject represents a Nutanix project.", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NutanixProjectList": { - "type": "array", - "title": "NutanixProjectList represents an array of Nutanix projects.", - "items": { - "$ref": "#/definitions/NutanixProject" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NutanixSubnet": { - "type": "object", - "title": "NutanixSubnet represents a Nutanix subnet.", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "type": { - "type": "string", - "x-go-name": "Type" - }, - "vlanID": { - "type": "integer", - "format": "int64", - "x-go-name": "VlanID" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "NutanixSubnetList": { - "type": "array", - "title": "NutanixSubnetList represents an array of Nutanix subnets.", - "items": { - "$ref": "#/definitions/NutanixSubnet" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "OIDCSettings": { - "type": "object", - "title": "OIDCSettings contains OIDC configuration parameters for enabling authentication mechanism for the cluster.", - "properties": { - "clientID": { - "type": "string", - "x-go-name": "ClientID" - }, - "clientSecret": { - "type": "string", - "x-go-name": "ClientSecret" - }, - "extraScopes": { - "type": "string", - "x-go-name": "ExtraScopes" - }, - "groupsClaim": { - "type": "string", - "x-go-name": "GroupsClaim" - }, - "groupsPrefix": { - "type": "string", - "x-go-name": "GroupsPrefix" - }, - "issuerURL": { - "type": "string", - "x-go-name": "IssuerURL" - }, - "requiredClaim": { - "type": "string", - "x-go-name": "RequiredClaim" - }, - "usernameClaim": { - "type": "string", - "x-go-name": "UsernameClaim" - }, - "usernamePrefix": { - "type": "string", - "x-go-name": "UsernamePrefix" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "OIDCSpec": { - "type": "object", - "title": "OIDCSpec contains OIDC params that can be used to access user cluster.", - "properties": { - "clientId": { - "type": "string", - "x-go-name": "ClientID" - }, - "clientSecret": { - "type": "string", - "x-go-name": "ClientSecret" - }, - "issuerUrl": { - "type": "string", - "x-go-name": "IssuerURL" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "OPAIntegrationSettings": { - "type": "object", - "title": "OPAIntegrationSettings configures the usage of OPA (Open Policy Agent) Gatekeeper inside the user cluster.", - "properties": { - "auditResources": { - "$ref": "#/definitions/ResourceRequirements" - }, - "controllerResources": { - "$ref": "#/definitions/ResourceRequirements" - }, - "enabled": { - "description": "Enables OPA Gatekeeper integration.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "experimentalEnableMutation": { - "description": "Optional: Enables experimental mutation in Gatekeeper.", - "type": "boolean", - "x-go-name": "ExperimentalEnableMutation" - }, - "webhookTimeoutSeconds": { - "description": "The timeout in seconds that is set for the Gatekeeper validating webhook admission review calls.\nDefaults to `10` (seconds).", - "type": "integer", - "format": "int32", - "x-go-name": "WebhookTimeoutSeconds" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "OSVersions": { - "type": "object", - "title": "OSVersions defines a map of OS version and the source to download the image.", - "additionalProperties": { - "type": "string" - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ObjectFieldSelector": { - "description": "+structType=atomic", - "type": "object", - "title": "ObjectFieldSelector selects an APIVersioned field of an object.", - "properties": { - "apiVersion": { - "description": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".\n+optional", - "type": "string", - "x-go-name": "APIVersion" - }, - "fieldPath": { - "description": "Path of the field to select in the specified API version.", - "type": "string", - "x-go-name": "FieldPath" - } - }, - "x-go-package": "k8s.io/api/core/v1" - }, - "ObjectMeta": { - "description": "ObjectMeta defines the set of fields that objects returned from the API have", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ObjectReference": { - "description": "New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.\n1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.\n2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular\nrestrictions like, \"must refer only to types A and B\" or \"UID not honored\" or \"name must be restricted\".\nThose cannot be well described when embedded.\n3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.\n4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity\nduring interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple\nand the version of the actual struct is irrelevant.\n5. We cannot easily change it. Because this type is embedded in many locations, updates to this type\nwill affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.\n\nInstead of using this type, create a locally provided and used type that is well-focused on your reference.\nFor example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+structType=atomic", - "type": "object", - "title": "ObjectReference contains enough information to let you inspect or modify the referred object.", - "properties": { - "apiVersion": { - "description": "API version of the referent.\n+optional", - "type": "string", - "x-go-name": "APIVersion" - }, - "fieldPath": { - "description": "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future.\n+optional", - "type": "string", - "x-go-name": "FieldPath" - }, - "kind": { - "description": "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n+optional", - "type": "string", - "x-go-name": "Kind" - }, - "name": { - "description": "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\n+optional", - "type": "string", - "x-go-name": "Name" - }, - "namespace": { - "description": "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/\n+optional", - "type": "string", - "x-go-name": "Namespace" - }, - "resourceVersion": { - "description": "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n+optional", - "type": "string", - "x-go-name": "ResourceVersion" - }, - "uid": { - "$ref": "#/definitions/UID" - } - }, - "x-go-package": "k8s.io/api/core/v1" - }, - "ObjectReferenceResource": { - "type": "object", - "title": "ObjectReferenceResource contains basic information about referred object.", - "properties": { - "name": { - "description": "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\n+optional", - "type": "string", - "x-go-name": "Name" - }, - "namespace": { - "description": "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/\n+optional", - "type": "string", - "x-go-name": "Namespace" - }, - "type": { - "description": "Type of the referent.", - "type": "string", - "x-go-name": "Type" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ObjectStorageLocation": { - "type": "object", - "title": "ObjectStorageLocation specifies the settings necessary to connect to a provider's object storage.", - "properties": { - "bucket": { - "description": "Bucket is the bucket to use for object storage.", - "type": "string", - "x-go-name": "Bucket" - }, - "caCert": { - "description": "CACert defines a CA bundle to use when verifying TLS connections to the provider.\n+optional", - "type": "array", - "items": { - "type": "integer", - "format": "uint8" - }, - "x-go-name": "CACert" - }, - "prefix": { - "description": "Prefix is the path inside a bucket to use for Velero storage. Optional.\n+optional", - "type": "string", - "x-go-name": "Prefix" - } - }, - "x-go-package": "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - }, - "OpaOptions": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean", - "x-go-name": "Enabled" - }, - "enforced": { - "type": "boolean", - "x-go-name": "Enforced" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "OpenNebulaNodeSpec": { - "description": "OpenNebulaNodeSpec OpenNebula specific node settings", - "type": "object", - "properties": { - "cpu": { - "type": "number", - "format": "double", - "x-go-name": "CPU" - }, - "datastore": { - "type": "string", - "x-go-name": "Datastore" - }, - "diskSize": { - "type": "integer", - "format": "int64", - "x-go-name": "DiskSize" - }, - "enableVNC": { - "type": "boolean", - "x-go-name": "EnableVNC" - }, - "image": { - "type": "string", - "x-go-name": "Image" - }, - "memory": { - "type": "integer", - "format": "int64", - "x-go-name": "Memory" - }, - "network": { - "type": "string", - "x-go-name": "Network" - }, - "vcpu": { - "type": "integer", - "format": "int64", - "x-go-name": "VCPU" - }, - "vmTemplateExtra": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "VMTemplateExtra" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "OpenStack": { - "type": "object", - "properties": { - "enforceCustomDisk": { - "description": "EnforceCustomDisk will enforce the custom disk option for machines for the dashboard.", - "type": "boolean", - "x-go-name": "EnforceCustomDisk" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "OpenStackAPIPreset": { - "description": "OpenStackPreset represents a preset for OpenStack", - "type": "object", - "properties": { - "floatingIPPool": { - "type": "string", - "x-go-name": "FloatingIPPool" - }, - "network": { - "type": "string", - "x-go-name": "Network" - }, - "routerID": { - "type": "string", - "x-go-name": "RouterID" - }, - "securityGroups": { - "type": "string", - "x-go-name": "SecurityGroups" - }, - "subnetID": { - "type": "string", - "x-go-name": "SubnetID" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "Openstack": { - "type": "object", - "properties": { - "applicationCredentialID": { - "description": "Application credential ID to authenticate in combination with an application credential secret (which is not the user's password).", - "type": "string", - "x-go-name": "ApplicationCredentialID" - }, - "applicationCredentialSecret": { - "description": "Application credential secret (which is not the user's password) to authenticate in combination with an application credential ID.", - "type": "string", - "x-go-name": "ApplicationCredentialSecret" - }, - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "domain": { - "description": "Domain holds the name of the identity service (keystone) domain.", - "type": "string", - "x-go-name": "Domain" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "floatingIPPool": { - "description": "FloatingIPPool holds the name of the public network The public network is reachable from the outside world and should provide the pool of IP addresses to choose from.", - "type": "string", - "x-go-name": "FloatingIPPool" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "network": { - "description": "Network holds the name of the internal network When specified, all worker nodes will be attached to this network. If not specified, a network, subnet \u0026 router will be created.", - "type": "string", - "x-go-name": "Network" - }, - "password": { - "type": "string", - "x-go-name": "Password" - }, - "project": { - "description": "Project, formally known as tenant.", - "type": "string", - "x-go-name": "Project" - }, - "projectID": { - "description": "ProjectID, formally known as tenantID.", - "type": "string", - "x-go-name": "ProjectID" - }, - "routerID": { - "type": "string", - "x-go-name": "RouterID" - }, - "securityGroups": { - "type": "string", - "x-go-name": "SecurityGroups" - }, - "subnetID": { - "type": "string", - "x-go-name": "SubnetID" - }, - "useToken": { - "type": "boolean", - "x-go-name": "UseToken" - }, - "username": { - "type": "string", - "x-go-name": "Username" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "OpenstackAvailabilityZone": { - "type": "object", - "title": "OpenstackAvailabilityZone is the object representing a openstack availability zone.", - "properties": { - "name": { - "description": "Name is the name of the availability zone", - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "OpenstackCloudSpec": { - "type": "object", - "title": "OpenstackCloudSpec specifies access data to an OpenStack cloud.", - "properties": { - "applicationCredentialID": { - "description": "Application credential ID to authenticate in combination with an application credential secret (which is not the user's password).", - "type": "string", - "x-go-name": "ApplicationCredentialID" - }, - "applicationCredentialSecret": { - "description": "Application credential secret (which is not the user's password) to authenticate in combination with an application credential ID.", - "type": "string", - "x-go-name": "ApplicationCredentialSecret" - }, - "cinderTopologyEnabled": { - "description": "Flag to configure enablement of topology support for the Cinder CSI plugin.\nThis requires Nova and Cinder to have matching availability zones configured.\n+optional", - "type": "boolean", - "x-go-name": "CinderTopologyEnabled" - }, - "credentialsReference": { - "$ref": "#/definitions/GlobalSecretKeySelector" - }, - "domain": { - "description": "Domain holds the name of the identity service (keystone) domain.", - "type": "string", - "x-go-name": "Domain" - }, - "enableIngressHostname": { - "description": "Enable the `enable-ingress-hostname` cloud provider option on the Openstack CCM. Can only be used with the\nexternal CCM and might be deprecated and removed in future versions as it is considered a workaround for the PROXY\nprotocol to preserve client IPs.\n+optional", - "type": "boolean", - "x-go-name": "EnableIngressHostname" - }, - "floatingIPPool": { - "description": "FloatingIPPool holds the name of the public network\nThe public network is reachable from the outside world\nand should provide the pool of IP addresses to choose from.\n\nWhen specified, all worker nodes will receive a public ip from this floating ip pool\n\nNote that the network is external if the \"External\" field is set to true", - "type": "string", - "x-go-name": "FloatingIPPool" - }, - "ingressHostnameSuffix": { - "description": "Set a specific suffix for the hostnames used for the PROXY protocol workaround that is enabled by EnableIngressHostname.\nThe suffix is set to `nip.io` by default. Can only be used with the external CCM and might be deprecated and removed in\nfuture versions as it is considered a workaround only.", - "type": "string", - "x-go-name": "IngressHostnameSuffix" - }, - "ipv6SubnetID": { - "description": "IPv6SubnetID holds the ID of the subnet used for IPv6 networking.\nIf not provided, a new subnet will be created if IPv6 is enabled.\n+optional", - "type": "string", - "x-go-name": "IPv6SubnetID" - }, - "ipv6SubnetPool": { - "description": "IPv6SubnetPool holds the name of the subnet pool used for creating new IPv6 subnets.\nIf not provided, the default IPv6 subnet pool will be used.\n+optional", - "type": "string", - "x-go-name": "IPv6SubnetPool" - }, - "network": { - "description": "Network holds the name of the internal network\nWhen specified, all worker nodes will be attached to this network. If not specified, a network, subnet \u0026 router will be created.\n\nNote that the network is internal if the \"External\" field is set to false", - "type": "string", - "x-go-name": "Network" - }, - "nodePortsAllowedIPRange": { - "description": "A CIDR range that will be used to allow access to the node port range in the security group to. Only applies if\nthe security group is generated by KKP and not preexisting.\nIf NodePortsAllowedIPRange nor NodePortsAllowedIPRanges is set, the node port range can be accessed from anywhere.", - "type": "string", - "x-go-name": "NodePortsAllowedIPRange" - }, - "nodePortsAllowedIPRanges": { - "$ref": "#/definitions/NetworkRanges" - }, - "password": { - "type": "string", - "x-go-name": "Password" - }, - "project": { - "description": "project, formally known as tenant.", - "type": "string", - "x-go-name": "Project" - }, - "projectID": { - "description": "project id, formally known as tenantID.", - "type": "string", - "x-go-name": "ProjectID" - }, - "routerID": { - "type": "string", - "x-go-name": "RouterID" - }, - "securityGroups": { - "description": "SecurityGroups is the name of the security group (only supports a singular security group) that will be used for Machines in the cluster.\nIf this field is left empty, a default security group will be created and used.", - "type": "string", - "x-go-name": "SecurityGroups" - }, - "subnetID": { - "type": "string", - "x-go-name": "SubnetID" - }, - "token": { - "description": "Used internally during cluster creation", - "type": "string", - "x-go-name": "Token" - }, - "useOctavia": { - "description": "Whether or not to use Octavia for LoadBalancer type of Service\nimplementation instead of using Neutron-LBaaS.\nAttention:Openstack CCM use Octavia as default load balancer\nimplementation since v1.17.0\n\nTakes precedence over the 'use_octavia' flag provided at datacenter\nlevel if both are specified.\n+optional", - "type": "boolean", - "x-go-name": "UseOctavia" - }, - "useToken": { - "type": "boolean", - "x-go-name": "UseToken" - }, - "username": { - "type": "string", - "x-go-name": "Username" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "OpenstackNetwork": { - "type": "object", - "title": "OpenstackNetwork is the object representing a openstack network.", - "properties": { - "external": { - "description": "External set if network is the external network", - "type": "boolean", - "x-go-name": "External" - }, - "id": { - "description": "Id uniquely identifies the current network", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name is the name of the network", - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "OpenstackNodeSizeRequirements": { - "type": "object", - "properties": { - "minimumMemory": { - "description": "MinimumMemory is the minimum required amount of memory, measured in MB", - "type": "integer", - "format": "int64", - "x-go-name": "MinimumMemory" - }, - "minimumVCPUs": { - "description": "VCPUs is the minimum required amount of (virtual) CPUs", - "type": "integer", - "format": "int64", - "x-go-name": "MinimumVCPUs" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "OpenstackNodeSpec": { - "description": "OpenstackNodeSpec openstack node settings", - "type": "object", - "required": [ - "flavor", - "image" - ], - "properties": { - "availabilityZone": { - "description": "if not set, the default AZ from the Datacenter spec will be used", - "type": "string", - "x-go-name": "AvailabilityZone" - }, - "configDrive": { - "description": "ConfigDrive enables a configuration drive that will be attached to the instance when it boots.", - "type": "boolean", - "x-go-name": "ConfigDrive" - }, - "diskSize": { - "description": "if set, the rootDisk will be a volume. If not, the rootDisk will be on ephemeral storage and its size will be derived from the flavor", - "type": "integer", - "format": "int64", - "x-go-name": "RootDiskSizeGB" - }, - "flavor": { - "description": "instance flavor", - "type": "string", - "x-go-name": "Flavor" - }, - "image": { - "description": "image to use", - "type": "string", - "x-go-name": "Image" - }, - "instanceReadyCheckPeriod": { - "description": "Period of time to check for instance ready status, i.e. 10s/1m", - "type": "string", - "x-go-name": "InstanceReadyCheckPeriod" - }, - "instanceReadyCheckTimeout": { - "description": "Max time to wait for the instance to be ready, i.e. 10s/1m", - "type": "string", - "x-go-name": "InstanceReadyCheckTimeout" - }, - "serverGroup": { - "description": "UUID of the server group, used to configure affinity or anti-affinity of the VM instances relative to hypervisor", - "type": "string", - "x-go-name": "ServerGroup" - }, - "tags": { - "description": "Additional metadata to set", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Tags" - }, - "useFloatingIP": { - "description": "Defines whether floating ip should be used", - "type": "boolean", - "x-go-name": "UseFloatingIP" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "OpenstackSecurityGroup": { - "type": "object", - "title": "OpenstackSecurityGroup is the object representing a openstack security group.", - "properties": { - "id": { - "description": "Id uniquely identifies the current security group", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name is the name of the security group", - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "OpenstackServerGroup": { - "type": "object", - "title": "OpenstackServerGroup is the object representing a openstack server group.", - "properties": { - "id": { - "description": "Id uniquely identifies the current server group", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name is the name of the server group", - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "OpenstackSize": { - "type": "object", - "title": "OpenstackSize is the object representing openstack's sizes.", - "properties": { - "disk": { - "description": "Disk is the amount of root disk, measured in GB", - "type": "integer", - "format": "int64", - "x-go-name": "Disk" - }, - "isPublic": { - "description": "IsPublic indicates whether the size is public (available to all projects) or scoped to a set of projects", - "type": "boolean", - "x-go-name": "IsPublic" - }, - "memory": { - "description": "MemoryTotalBytes is the amount of memory, measured in MB", - "type": "integer", - "format": "int64", - "x-go-name": "Memory" - }, - "region": { - "description": "Region specifies the geographic region in which the size resides", - "type": "string", - "x-go-name": "Region" - }, - "slug": { - "description": "Slug holds the name of the size", - "type": "string", - "x-go-name": "Slug" - }, - "swap": { - "description": "Swap is the amount of swap space, measured in MB", - "type": "integer", - "format": "int64", - "x-go-name": "Swap" - }, - "vcpus": { - "description": "VCPUs indicates how many (virtual) CPUs are available for this flavor", - "type": "integer", - "format": "int64", - "x-go-name": "VCPUs" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "OpenstackSubnet": { - "type": "object", - "title": "OpenstackSubnet is the object representing a openstack subnet.", - "properties": { - "id": { - "description": "Id uniquely identifies the subnet", - "type": "string", - "x-go-name": "ID" - }, - "ipVersion": { - "description": "IPversion is the IP protocol version (4 or 6)", - "type": "integer", - "format": "int64", - "x-go-name": "IPVersion" - }, - "name": { - "description": "Name is human-readable name for the subnet", - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "OpenstackSubnetPool": { - "type": "object", - "title": "OpenstackSubnetPool is the object representing a openstack subnet pool.", - "properties": { - "id": { - "description": "Id uniquely identifies the subnet pool", - "type": "string", - "x-go-name": "ID" - }, - "ipVersion": { - "description": "IPversion is the IP protocol version (4 or 6)", - "type": "integer", - "format": "int64", - "x-go-name": "IPversion" - }, - "isDefault": { - "description": "IsDefault indicates if the subnetpool is default pool or not", - "type": "boolean", - "x-go-name": "IsDefault" - }, - "name": { - "description": "Name is the name of the subnet pool", - "type": "string", - "x-go-name": "Name" - }, - "prefixes": { - "description": "Prefixes is the list of subnet prefixes", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Prefixes" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "OpenstackTenant": { - "type": "object", - "title": "OpenstackTenant is the object representing a openstack tenant.", - "properties": { - "id": { - "description": "Id uniquely identifies the current tenant", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name is the name of the tenant", - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "OperatingSystemProfile": { - "type": "object", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "operatingSystem": { - "type": "string", - "x-go-name": "OperatingSystem" - }, - "supportedCloudProviders": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "SupportedCloudProviders" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "OperatingSystemProfileList": { - "type": "object", - "title": "OperatingSystemProfileList defines a map of operating system and the OperatingSystemProfile to use.", - "additionalProperties": { - "type": "string" - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "OperatingSystemSpec": { - "type": "object", - "title": "OperatingSystemSpec represents the collection of os specific settings. Only one must be set at a time.", - "properties": { - "amzn2": { - "$ref": "#/definitions/AmazonLinuxSpec" - }, - "flatcar": { - "$ref": "#/definitions/FlatcarSpec" - }, - "rhel": { - "$ref": "#/definitions/RHELSpec" - }, - "rockylinux": { - "$ref": "#/definitions/RockyLinuxSpec" - }, - "ubuntu": { - "$ref": "#/definitions/UbuntuSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "Packet": { - "description": "This provider is no longer supported. Migrate your configurations away from \"packet\" immediately.", - "type": "object", - "title": "Deprecated: The Packet / Equinix Metal provider is deprecated and will be REMOVED IN VERSION 2.29.", - "properties": { - "apiKey": { - "type": "string", - "x-go-name": "APIKey" - }, - "billingCycle": { - "type": "string", - "x-go-name": "BillingCycle" - }, - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "projectID": { - "type": "string", - "x-go-name": "ProjectID" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "PacketCPU": { - "type": "object", - "title": "PacketCPU represents an array of Packet CPUs. It is a part of PacketSize.", - "properties": { - "count": { - "type": "integer", - "format": "int64", - "x-go-name": "Count" - }, - "type": { - "type": "string", - "x-go-name": "Type" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PacketCloudSpec": { - "description": "This provider is no longer supported. Migrate your configurations away from \"packet\" immediately.\nPacketCloudSpec specifies access data to a Packet cloud.", - "type": "object", - "title": "Deprecated: The Packet / Equinix Metal provider is deprecated and will be REMOVED IN VERSION 2.29.", - "properties": { - "apiKey": { - "type": "string", - "x-go-name": "APIKey" - }, - "billingCycle": { - "type": "string", - "x-go-name": "BillingCycle" - }, - "credentialsReference": { - "$ref": "#/definitions/GlobalSecretKeySelector" - }, - "projectID": { - "type": "string", - "x-go-name": "ProjectID" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "PacketDrive": { - "type": "object", - "title": "PacketDrive represents an array of Packet drives. It is a part of PacketSize.", - "properties": { - "count": { - "type": "integer", - "format": "int64", - "x-go-name": "Count" - }, - "size": { - "type": "string", - "x-go-name": "Size" - }, - "type": { - "type": "string", - "x-go-name": "Type" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PacketNodeSpec": { - "description": "PacketNodeSpec specifies packet specific node settings", - "type": "object", - "required": [ - "instanceType" - ], - "properties": { - "instanceType": { - "description": "InstanceType denotes the plan to which the device will be provisioned.", - "type": "string", - "x-go-name": "InstanceType" - }, - "tags": { - "description": "additional instance tags", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Tags" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PacketSize": { - "type": "object", - "title": "PacketSize is the object representing Packet VM sizes.", - "properties": { - "cpus": { - "type": "array", - "items": { - "$ref": "#/definitions/PacketCPU" - }, - "x-go-name": "CPUs" - }, - "drives": { - "type": "array", - "items": { - "$ref": "#/definitions/PacketDrive" - }, - "x-go-name": "Drives" - }, - "memory": { - "type": "string", - "x-go-name": "Memory" - }, - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PacketSizeList": { - "type": "array", - "title": "PacketSizeList represents an array of Packet VM sizes.", - "items": { - "$ref": "#/definitions/PacketSize" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "Parameters": { - "type": "object", - "additionalProperties": { - "type": "object" - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "Permission": { - "type": "object", - "title": "Permission represents the permissions (i.e. role and clusterRole) associated to an object.", - "properties": { - "namespace": { - "description": "Namespace on which the permission is given. Empty if scope is Cluster", - "type": "string", - "x-go-name": "Namespace" - }, - "roleRefName": { - "description": "RoleRefName is the name of the clusterRole or Role.", - "type": "string", - "x-go-name": "RoleRefName" - }, - "scope": { - "description": "Scope of the permission. Either \"Cluster\" or \"Namespace\".", - "type": "string", - "x-go-name": "Scope" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "PodDNSConfig": { - "description": "PodDNSConfig defines the DNS parameters of a pod in addition to\nthose generated from DNSPolicy.", - "type": "object", - "properties": { - "nameservers": { - "description": "A list of DNS name server IP addresses.\nThis will be appended to the base nameservers generated from DNSPolicy.\nDuplicated nameservers will be removed.\n+optional\n+listType=atomic", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Nameservers" - }, - "options": { - "description": "A list of DNS resolver options.\nThis will be merged with the base options generated from DNSPolicy.\nDuplicated entries will be removed. Resolution options given in Options\nwill override those that appear in the base DNSPolicy.\n+optional\n+listType=atomic", - "type": "array", - "items": { - "$ref": "#/definitions/PodDNSConfigOption" - }, - "x-go-name": "Options" - }, - "searches": { - "description": "A list of DNS search domains for host-name lookup.\nThis will be appended to the base search paths generated from DNSPolicy.\nDuplicated search paths will be removed.\n+optional\n+listType=atomic", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Searches" - } - }, - "x-go-package": "k8s.io/api/core/v1" - }, - "PodDNSConfigOption": { - "type": "object", - "title": "PodDNSConfigOption defines DNS resolver options of a pod.", - "properties": { - "name": { - "description": "Name is this DNS resolver option's name.\nRequired.", - "type": "string", - "x-go-name": "Name" - }, - "value": { - "description": "Value is this DNS resolver option's value.\n+optional", - "type": "string", - "x-go-name": "Value" - } - }, - "x-go-package": "k8s.io/api/core/v1" - }, - "PolicyBinding": { - "description": "PolicyBinding binds a PolicyTemplate to specific clusters/projects and\noptionally enables or disables it (if the template is not enforced).", - "type": "object", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/PolicyBindingSpec" - }, - "status": { - "$ref": "#/definitions/PolicyBindingStatus" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "PolicyBindingSpec": { - "type": "object", - "title": "PolicyBindingSpec describes how and where to apply the referenced PolicyTemplate.", - "properties": { - "kyvernoPolicyNamespace": { - "$ref": "#/definitions/KyvernoPolicyNamespace" - }, - "policyTemplateRef": { - "$ref": "#/definitions/ObjectReference" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "PolicyBindingStatus": { - "type": "object", - "title": "PolicyBindingStatus is the status of the policy binding.", - "properties": { - "active": { - "description": "Active reflects whether the Kyverno policy exists and is active in this User Cluster.\n\n+optional", - "type": "boolean", - "x-go-name": "Active" - }, - "conditions": { - "description": "Conditions represents the latest available observations of the policy binding's current state\n+optional\n+listType=map\n+listMapKey=type", - "type": "array", - "items": { - "$ref": "#/definitions/Condition" - }, - "x-go-name": "Conditions" - }, - "observedGeneration": { - "description": "ObservedGeneration is the generation observed by the controller.\n\n+optional", - "type": "integer", - "format": "int64", - "x-go-name": "ObservedGeneration" - }, - "templateEnforced": { - "description": "TemplateEnforced reflects the value of `spec.enforced` from PolicyTemplate\n\n+optional", - "type": "boolean", - "x-go-name": "TemplateEnforced" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "PolicyRule": { - "description": "PolicyRule holds information that describes a policy rule, but does not contain information\nabout who the rule applies to or which namespace the rule applies to.", - "type": "object", - "properties": { - "apiGroups": { - "description": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of\nthe enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups.\n+optional\n+listType=atomic", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "APIGroups" - }, - "nonResourceURLs": { - "description": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path\nSince non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.\nRules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.\n+optional\n+listType=atomic", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "NonResourceURLs" - }, - "resourceNames": { - "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.\n+optional\n+listType=atomic", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "ResourceNames" - }, - "resources": { - "description": "Resources is a list of resources this rule applies to. '*' represents all resources.\n+optional\n+listType=atomic", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Resources" - }, - "verbs": { - "description": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.\n+listType=atomic", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Verbs" - } - }, - "x-go-package": "k8s.io/api/rbac/v1" - }, - "PolicyTemplate": { - "type": "object", - "title": "PolicyTemplate defines a reusable blueprint of a Kyverno policy.", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/PolicyTemplateSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "PolicyTemplateSpec": { - "type": "object", - "properties": { - "category": { - "description": "Category is the category of the policy, specified as an annotation in the Kyverno policy\n\n+optional", - "type": "string", - "x-go-name": "Category" - }, - "default": { - "description": "Default determines whether we apply the policy (create policy binding) by default\n\n+optional", - "type": "boolean", - "x-go-name": "Default" - }, - "description": { - "description": "Description is the description of the policy, specified as an annotation in the Kyverno policy", - "type": "string", - "x-go-name": "Description" - }, - "enforced": { - "description": "Enforced indicates whether this policy is mandatory\n\nIf true, this policy is mandatory\nA PolicyBinding referencing it cannot disable it\n+optional", - "type": "boolean", - "x-go-name": "Enforced" - }, - "namespacedPolicy": { - "description": "NamespacedPolicy dictates the type of Kyverno resource to be created in this User Cluster.\n\n+optional", - "type": "boolean", - "x-go-name": "NamespacedPolicy" - }, - "policySpec": { - "$ref": "#/definitions/RawExtension" - }, - "projectID": { - "description": "ProjectID is the ID of the project for which the policy template is created\n\nRelevant only for project visibility policies\n+optional", - "type": "string", - "x-go-name": "ProjectID" - }, - "severity": { - "description": "Severity indicates the severity level of the policy\n\n+optional", - "type": "string", - "x-go-name": "Severity" - }, - "target": { - "$ref": "#/definitions/PolicyTemplateTarget" - }, - "title": { - "description": "Title is the title of the policy, specified as an annotation in the Kyverno policy", - "type": "string", - "x-go-name": "Title" - }, - "visibility": { - "description": "Visibility specifies where the policy is visible.\n\nCan be one of: global, project, or cluster\n+kubebuilder:validation:Enum=Global;Project;Cluster\n+kubebuilder:validation:Required", - "type": "string", - "x-go-name": "Visibility" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "PolicyTemplateTarget": { - "type": "object", - "title": "PolicyTemplateTarget allows specifying label selectors for Projects and Clusters.", - "properties": { - "clusterSelector": { - "$ref": "#/definitions/LabelSelector" - }, - "projectSelector": { - "$ref": "#/definitions/LabelSelector" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "PolicyType": { - "description": "PolicyType string describes the NetworkPolicy type\nThis type is beta-level in 1.8\n+enum", - "type": "string", - "x-go-package": "k8s.io/api/networking/v1" - }, - "PreAllocatedDataVolume": { - "type": "object", - "properties": { - "annotations": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "size": { - "type": "string", - "x-go-name": "Size" - }, - "storageClass": { - "type": "string", - "x-go-name": "StorageClass" - }, - "url": { - "type": "string", - "x-go-name": "URL" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "PreferenceMatcher": { - "type": "object", - "title": "PreferenceMatcher references a set of preference that is used to fill fields in the VMI template.", - "properties": { - "inferFromVolume": { - "description": "InferFromVolume lists the name of a volume that should be used to infer or discover the preference\nto be used through known annotations on the underlying resource. Once applied to the PreferenceMatcher\nthis field is removed.\n\n+optional", - "type": "string", - "x-go-name": "InferFromVolume" - }, - "inferFromVolumeFailurePolicy": { - "$ref": "#/definitions/InferFromVolumeFailurePolicy" - }, - "kind": { - "description": "Kind specifies which preference resource is referenced.\nAllowed values are: \"VirtualMachinePreference\" and \"VirtualMachineClusterPreference\".\nIf not specified, \"VirtualMachineClusterPreference\" is used by default.\n\n+optional", - "type": "string", - "x-go-name": "Kind" - }, - "name": { - "description": "Name is the name of the VirtualMachinePreference or VirtualMachineClusterPreference\n\n+optional", - "type": "string", - "x-go-name": "Name" - }, - "revisionName": { - "description": "RevisionName specifies a ControllerRevision containing a specific copy of the\nVirtualMachinePreference or VirtualMachineClusterPreference to be used. This is\ninitially captured the first time the instancetype is applied to the VirtualMachineInstance.\n\n+optional", - "type": "string", - "x-go-name": "RevisionName" - } - }, - "x-go-package": "kubevirt.io/api/core/v1" - }, - "Preset": { - "description": "Preset represents a preset", - "type": "object", - "properties": { - "enabled": { - "type": "boolean", - "x-go-name": "Enabled" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "providers": { - "type": "array", - "items": { - "$ref": "#/definitions/PresetProvider" - }, - "x-go-name": "Providers" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "PresetBody": { - "description": "PresetBody represents the body of a created preset", - "type": "object", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/PresetSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "PresetBodyMetadata": { - "description": "PresetBodyMetadata represents metadata within the body of a created preset", - "type": "object", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "PresetLinkages": { - "description": "PresetLinkages represents detailed linkage information for a preset", - "type": "object", - "properties": { - "clusterTemplates": { - "type": "array", - "items": { - "$ref": "#/definitions/ClusterTemplateAssociation" - }, - "x-go-name": "ClusterTemplates" - }, - "clusters": { - "type": "array", - "items": { - "$ref": "#/definitions/ClusterAssociation" - }, - "x-go-name": "Clusters" - }, - "presetName": { - "type": "string", - "x-go-name": "PresetName" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "PresetList": { - "description": "PresetList represents a list of presets", - "type": "object", - "properties": { - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/Preset" - }, - "x-go-name": "Items" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "PresetProvider": { - "description": "PresetProvider represents a preset provider", - "type": "object", - "properties": { - "enabled": { - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "name": { - "$ref": "#/definitions/ProviderType" - }, - "openstack": { - "$ref": "#/definitions/OpenStackAPIPreset" - }, - "vmwareCloudDirector": { - "$ref": "#/definitions/VMwareCloudDirectorAPIPreset" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "PresetSpec": { - "type": "object", - "title": "Presets specifies default presets for supported providers.", - "properties": { - "aks": { - "$ref": "#/definitions/AKS" - }, - "alibaba": { - "$ref": "#/definitions/Alibaba" - }, - "anexia": { - "$ref": "#/definitions/Anexia" - }, - "aws": { - "$ref": "#/definitions/AWS" - }, - "azure": { - "$ref": "#/definitions/Azure" - }, - "baremetal": { - "$ref": "#/definitions/Baremetal" - }, - "digitalocean": { - "$ref": "#/definitions/Digitalocean" - }, - "eks": { - "$ref": "#/definitions/EKS" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "fake": { - "$ref": "#/definitions/Fake" - }, - "gcp": { - "$ref": "#/definitions/GCP" - }, - "gke": { - "$ref": "#/definitions/GKE" - }, - "hetzner": { - "$ref": "#/definitions/Hetzner" - }, - "kubevirt": { - "$ref": "#/definitions/Kubevirt" - }, - "nutanix": { - "$ref": "#/definitions/Nutanix" - }, - "openstack": { - "$ref": "#/definitions/Openstack" - }, - "packet": { - "$ref": "#/definitions/Packet" - }, - "projects": { - "description": "Projects is a list of project IDs that this preset is limited to.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Projects" - }, - "requiredEmails": { - "description": "RequiredEmails is a list of e-mail addresses that this presets should\nbe restricted to. Each item in the list can be either a full e-mail\naddress or just a domain name. This restriction is only enforced in the\nKKP API.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "RequiredEmails" - }, - "vmwareclouddirector": { - "$ref": "#/definitions/VMwareCloudDirector" - }, - "vsphere": { - "$ref": "#/definitions/VSphere" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "PresetStats": { - "type": "object", - "title": "PresetStats represents the statistics for a preset.", - "properties": { - "associatedClusterTemplates": { - "type": "integer", - "format": "int64", - "x-go-name": "AssociatedClusterTemplates" - }, - "associatedClusters": { - "type": "integer", - "format": "int64", - "x-go-name": "AssociatedClusters" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "Project": { - "description": "Project is a top-level container for a set of resources", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "clustersNumber": { - "type": "integer", - "format": "int64", - "x-go-name": "ClustersNumber" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Labels" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "owners": { - "description": "Owners an optional owners list for the given project", - "type": "array", - "items": { - "$ref": "#/definitions/User" - }, - "x-go-name": "Owners" - }, - "spec": { - "$ref": "#/definitions/ProjectSpec" - }, - "status": { - "type": "string", - "x-go-name": "Status" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ProjectClusterList": { - "description": "An error message is added to the response in case when there was a problem with creating client for any of seeds.", - "type": "object", - "title": "ProjectClusterList contains a list of clusters for a project and an optional error message.", - "properties": { - "clusters": { - "$ref": "#/definitions/ClusterList" - }, - "errorMessage": { - "type": "string", - "x-go-name": "ErrorMessage" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ProjectGroup": { - "description": "ProjectGroup is a helper data structure that\nstores the information about a project and a group prefix that a user belongs to.", - "type": "object", - "properties": { - "group": { - "type": "string", - "x-go-name": "GroupPrefix" - }, - "id": { - "type": "string", - "x-go-name": "ID" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ProjectResourceQuota": { - "type": "object", - "properties": { - "quota": { - "$ref": "#/definitions/Quota" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ProjectSpec": { - "type": "object", - "title": "ProjectSpec is a specification of a project.", - "properties": { - "allowedOperatingSystems": { - "description": "AllowedOperatingSystems defines a map of operating systems that can be used for the machines inside this project.", - "type": "object", - "additionalProperties": { - "type": "boolean" - }, - "x-go-name": "AllowedOperatingSystems" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "Protocol": { - "description": "+enum", - "type": "string", - "title": "Protocol defines network protocols supported for things like container ports.", - "x-go-package": "k8s.io/api/core/v1" - }, - "ProviderConfiguration": { - "type": "object", - "properties": { - "openStack": { - "$ref": "#/definitions/OpenStack" - }, - "vmwareCloudDirector": { - "$ref": "#/definitions/VMwareCloudDirectorSettings" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ProviderNetwork": { - "type": "object", - "title": "ProviderNetwork describes the infra cluster network fabric that is being used.", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "networkPolicy": { - "$ref": "#/definitions/NetworkPolicy" - }, - "networkPolicyEnabled": { - "description": "Deprecated: Use .networkPolicy.enabled instead.", - "type": "boolean", - "x-go-name": "NetworkPolicyEnabled" - }, - "vpcs": { - "type": "array", - "items": { - "$ref": "#/definitions/VPC" - }, - "x-go-name": "VPCs" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ProviderPreset": { - "type": "object", - "properties": { - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ProviderType": { - "description": "+kubebuilder:validation:Enum=digitalocean;hetzner;azure;vsphere;aws;openstack;packet;gcp;kubevirt;nutanix;alibaba;anexia;fake;vmwareclouddirector", - "type": "string", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ProxySettings": { - "description": "ProxySettings allow configuring a HTTP proxy for the controlplanes\nand nodes.", - "type": "object", - "properties": { - "httpProxy": { - "$ref": "#/definitions/ProxyValue" - }, - "noProxy": { - "$ref": "#/definitions/ProxyValue" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ProxyValue": { - "type": "string", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "PublicAWSCloudSpec": { - "type": "object", - "title": "PublicAWSCloudSpec is a public counterpart of apiv1.AWSCloudSpec.", - "properties": { - "nodePortsAllowedIPRanges": { - "$ref": "#/definitions/NetworkRanges" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicAlibabaCloudSpec": { - "type": "object", - "title": "PublicAlibabaCloudSpec is a public counterpart of apiv1.AlibabaCloudSpec.", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicAnexiaCloudSpec": { - "type": "object", - "title": "PublicAnexiaCloudSpec is a public counterpart of apiv1.AnexiaCloudSpec.", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicAzureCloudSpec": { - "type": "object", - "title": "PublicAzureCloudSpec is a public counterpart of apiv1.AzureCloudSpec.", - "properties": { - "assignAvailabilitySet": { - "type": "boolean", - "x-go-name": "AssignAvailabilitySet" - }, - "nodePortsAllowedIPRanges": { - "$ref": "#/definitions/NetworkRanges" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicBaremetalCloudSpec": { - "type": "object", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicBringYourOwnCloudSpec": { - "type": "object", - "title": "PublicBringYourOwnCloudSpec is a public counterpart of apiv1.BringYourOwnCloudSpec.", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicCloudSpec": { - "type": "object", - "title": "PublicCloudSpec is a public counterpart of apiv1.CloudSpec.", - "properties": { - "alibaba": { - "$ref": "#/definitions/PublicAlibabaCloudSpec" - }, - "anexia": { - "$ref": "#/definitions/PublicAnexiaCloudSpec" - }, - "aws": { - "$ref": "#/definitions/PublicAWSCloudSpec" - }, - "azure": { - "$ref": "#/definitions/PublicAzureCloudSpec" - }, - "baremetal": { - "$ref": "#/definitions/PublicBaremetalCloudSpec" - }, - "bringyourown": { - "$ref": "#/definitions/PublicBringYourOwnCloudSpec" - }, - "dc": { - "type": "string", - "x-go-name": "DatacenterName" - }, - "digitalocean": { - "$ref": "#/definitions/PublicDigitaloceanCloudSpec" - }, - "edge": { - "$ref": "#/definitions/PublicEdgeCloudSpec" - }, - "fake": { - "$ref": "#/definitions/PublicFakeCloudSpec" - }, - "gcp": { - "$ref": "#/definitions/PublicGCPCloudSpec" - }, - "hetzner": { - "$ref": "#/definitions/PublicHetznerCloudSpec" - }, - "kubevirt": { - "$ref": "#/definitions/PublicKubevirtCloudSpec" - }, - "nutanix": { - "$ref": "#/definitions/PublicNutanixCloudSpec" - }, - "openstack": { - "$ref": "#/definitions/PublicOpenstackCloudSpec" - }, - "packet": { - "$ref": "#/definitions/PublicPacketCloudSpec" - }, - "vmwareclouddirector": { - "$ref": "#/definitions/PublicVMwareCloudDirectorCloudSpec" - }, - "vsphere": { - "$ref": "#/definitions/PublicVSphereCloudSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicDigitaloceanCloudSpec": { - "type": "object", - "title": "PublicDigitaloceanCloudSpec is a public counterpart of apiv1.DigitaloceanCloudSpec.", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicEdgeCloudSpec": { - "type": "object", - "title": "PublicEdgeCloudSpec is a public counterpart of apiv1.EdgeCloudSpec.", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicFakeCloudSpec": { - "type": "object", - "title": "PublicFakeCloudSpec is a public counterpart of apiv1.FakeCloudSpec.", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicGCPCloudSpec": { - "type": "object", - "title": "PublicGCPCloudSpec is a public counterpart of apiv1.GCPCloudSpec.", - "properties": { - "nodePortsAllowedIPRanges": { - "$ref": "#/definitions/NetworkRanges" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicHetznerCloudSpec": { - "type": "object", - "title": "PublicHetznerCloudSpec is a public counterpart of apiv1.HetznerCloudSpec.", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicKubevirtCloudSpec": { - "type": "object", - "title": "PublicKubevirtCloudSpec is a public counterpart of apiv1.KubevirtCloudSpec.", - "properties": { - "preAllocatedDataVolumes": { - "type": "array", - "items": { - "$ref": "#/definitions/PreAllocatedDataVolume" - }, - "x-go-name": "PreAllocatedDataVolumes" - }, - "vpcName": { - "type": "string", - "x-go-name": "VPCName" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicNutanixCloudSpec": { - "type": "object", - "title": "PublicNutanixCloudSpec is a public counterpart of apiv1.NutanixCloudSpec.", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicOpenstackCloudSpec": { - "type": "object", - "title": "PublicOpenstackCloudSpec is a public counterpart of apiv1.OpenstackCloudSpec.", - "properties": { - "domain": { - "type": "string", - "x-go-name": "Domain" - }, - "floatingIPPool": { - "type": "string", - "x-go-name": "FloatingIPPool" - }, - "network": { - "type": "string", - "x-go-name": "Network" - }, - "nodePortsAllowedIPRanges": { - "$ref": "#/definitions/NetworkRanges" - }, - "project": { - "type": "string", - "x-go-name": "Project" - }, - "projectID": { - "type": "string", - "x-go-name": "ProjectID" - }, - "routerID": { - "type": "string", - "x-go-name": "RouterID" - }, - "securityGroups": { - "type": "string", - "x-go-name": "SecurityGroups" - }, - "subnetID": { - "type": "string", - "x-go-name": "SubnetID" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicPacketCloudSpec": { - "type": "object", - "title": "PublicPacketCloudSpec is a public counterpart of apiv1.PacketCloudSpec.", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicServiceAccountToken": { - "description": "PublicServiceAccountToken represent an API service account token without secret fields", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "expiry": { - "description": "Expiry is a timestamp representing the time when this token will expire.", - "type": "string", - "format": "date-time", - "x-go-name": "Expiry" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "invalidated": { - "description": "Invalidated indicates if the token must be regenerated", - "type": "boolean", - "x-go-name": "Invalidated" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicVMwareCloudDirectorCloudSpec": { - "type": "object", - "title": "PublicVMwareCloudDirectorCloudSpec is a public counterpart of apiv1.VMwareCloudDirectorCloudSpec.", - "properties": { - "ovdcNetwork": { - "type": "string", - "x-go-name": "OVDCNetwork" - }, - "ovdcNetworks": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "OVDCNetworks" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "PublicVSphereCloudSpec": { - "type": "object", - "title": "PublicVSphereCloudSpec is a public counterpart of apiv1.VSphereCloudSpec.", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "Quantity": { - "description": "The serialization format is:\n\n```\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n\n(Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\n\u003cdigit\u003e ::= 0 | 1 | ... | 9\n\u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e\n\u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e\n\u003csign\u003e ::= \"+\" | \"-\"\n\u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e\n\u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e\n\u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n\n(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n```\n\nNo matter which of the three exponent forms is used, no quantity may represent\na number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal\nplaces. Numbers larger or more precise will be capped or rounded up.\n(E.g.: 0.1m will rounded up to 1m.)\nThis may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix\nit had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\".\nThis means that Exponent/suffix will be adjusted up or down (with a\ncorresponding increase or decrease in Mantissa) such that:\n\nNo precision is lost\nNo fractional digits will be emitted\nThe exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n1.5 will be serialized as \"1500m\"\n1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a\nfloating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed,\nbut will be re-emitted in their canonical form. (So always use canonical\nform, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without\nwriting some sort of special handling code in the hopes that that will\ncause implementors to also use a fixed point implementation.\n\n+protobuf=true\n+protobuf.embed=string\n+protobuf.options.marshal=false\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:deepcopy-gen=true\n+k8s:openapi-gen=true", - "type": "object", - "title": "Quantity is a fixed-point representation of a number.\nIt provides convenient marshaling/unmarshaling in JSON and YAML,\nin addition to String() and AsInt64() accessors.", - "x-go-package": "k8s.io/apimachinery/pkg/api/resource" - }, - "Quota": { - "type": "object", - "properties": { - "cpu": { - "description": "CPU holds the quantity of CPU.", - "type": "integer", - "format": "int64", - "x-go-name": "CPU" - }, - "memory": { - "description": "Memory represents the RAM amount. Denoted in GB, rounded to 2 decimal places.", - "type": "number", - "format": "double", - "x-go-name": "Memory" - }, - "storage": { - "description": "Storage represents the disk size. Denoted in GB, rounded to 2 decimal places.", - "type": "number", - "format": "double", - "x-go-name": "Storage" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "RHELSpec": { - "description": "RHELSpec contains rhel specific settings", - "type": "object", - "properties": { - "distUpgradeOnBoot": { - "description": "do a dist-upgrade on boot and reboot it required afterwards", - "type": "boolean", - "x-go-name": "DistUpgradeOnBoot" - }, - "rhelSubscriptionManagerPassword": { - "type": "string", - "x-go-name": "RHELSubscriptionManagerPassword" - }, - "rhelSubscriptionManagerUser": { - "type": "string", - "x-go-name": "RHELSubscriptionManagerUser" - }, - "rhsmOfflineToken": { - "type": "string", - "x-go-name": "RHSMOfflineToken" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "RawExtension": { - "description": "To use this, make a field which has RawExtension as its type in your external, versioned\nstruct, and Object in your internal struct. You also need to register your\nvarious plugin types.\n\nInternal package:\n\ntype MyAPIObject struct {\nruntime.TypeMeta `json:\",inline\"`\nMyPlugin runtime.Object `json:\"myPlugin\"`\n}\n\ntype PluginA struct {\nAOption string `json:\"aOption\"`\n}\n\nExternal package:\n\ntype MyAPIObject struct {\nruntime.TypeMeta `json:\",inline\"`\nMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n}\n\ntype PluginA struct {\nAOption string `json:\"aOption\"`\n}\n\nOn the wire, the JSON will look something like this:\n\n{\n\"kind\":\"MyAPIObject\",\n\"apiVersion\":\"v1\",\n\"myPlugin\": {\n\"kind\":\"PluginA\",\n\"aOption\":\"foo\",\n},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into\nyour external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.\nThe next step is to copy (using pkg/conversion) into the internal struct. The runtime\npackage's DefaultScheme has conversion functions installed which will unpack the\nJSON stored in RawExtension, turning it into the correct object type, and storing it\nin the Object. (TODO: In the case where the object is of an unknown type, a\nruntime.Unknown object will be created and stored.)\n\n+k8s:deepcopy-gen=true\n+protobuf=true\n+k8s:openapi-gen=true", - "type": "object", - "title": "RawExtension is used to hold extensions in external versions.", - "x-go-package": "k8s.io/apimachinery/pkg/runtime" - }, - "ReadinessSpec": { - "type": "object", - "properties": { - "statsEnabled": { - "description": "enables stats for gatekeeper audit", - "type": "boolean", - "x-go-name": "StatsEnabled" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ReplacedResources": { - "type": "object", - "title": "ReplacedResources is used to subtract replaced resources in the calculation. For instance, when user is changing instance type of an existing Machine Deployment, resources of an old instance type need to be subtracted.", - "properties": { - "alibabaInstanceType": { - "$ref": "#/definitions/AlibabaInstanceType" - }, - "anexiaNodeSpec": { - "$ref": "#/definitions/AnexiaNodeSpec" - }, - "awsSize": { - "$ref": "#/definitions/AWSSize" - }, - "azureSize": { - "$ref": "#/definitions/AzureSize" - }, - "diskSizeGB": { - "description": "DiskSizeGB will be processed only for those providers which don't have the disk size in their API objects, like AWS, Alibabla and GCP.", - "type": "integer", - "format": "int64", - "x-go-name": "DiskSizeGB" - }, - "doSize": { - "$ref": "#/definitions/DigitaloceanSize" - }, - "equinixSize": { - "$ref": "#/definitions/PacketSize" - }, - "gcpSize": { - "$ref": "#/definitions/GCPMachineSize" - }, - "hetznerSize": { - "$ref": "#/definitions/HetznerSize" - }, - "kubevirtNodeSize": { - "$ref": "#/definitions/KubevirtNodeSize" - }, - "nutanixNodeSpec": { - "$ref": "#/definitions/NutanixNodeSpec" - }, - "openstackSize": { - "$ref": "#/definitions/OpenstackSize" - }, - "replicas": { - "type": "integer", - "format": "int64", - "x-go-name": "Replicas" - }, - "vSphereNodeSpec": { - "$ref": "#/definitions/VSphereNodeSpec" - }, - "vmDirectorNodeSpec": { - "$ref": "#/definitions/VMwareCloudDirectorNodeSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/ee/resource-quota" - }, - "ResourceClaim": { - "type": "object", - "title": "ResourceClaim references one entry in PodSpec.ResourceClaims.", - "properties": { - "name": { - "description": "Name must match the name of one entry in pod.spec.resourceClaims of\nthe Pod where this field is used. It makes that resource available\ninside a container.", - "type": "string", - "x-go-name": "Name" - }, - "request": { - "description": "Request is the name chosen for a request in the referenced claim.\nIf empty, everything from the claim is made available, otherwise\nonly the result of this request.\n\n+optional", - "type": "string", - "x-go-name": "Request" - } - }, - "x-go-package": "k8s.io/api/core/v1" - }, - "ResourceFieldSelector": { - "description": "ResourceFieldSelector represents container resources (cpu, memory) and their output format\n+structType=atomic", - "type": "object", - "properties": { - "containerName": { - "description": "Container name: required for volumes, optional for env vars\n+optional", - "type": "string", - "x-go-name": "ContainerName" - }, - "divisor": { - "$ref": "#/definitions/Quantity" - }, - "resource": { - "description": "Required: resource to select", - "type": "string", - "x-go-name": "Resource" - } - }, - "x-go-package": "k8s.io/api/core/v1" - }, - "ResourceLabelMap": { - "type": "object", - "title": "ResourceLabelMap defines list of labels grouped by specific resource types.", - "additionalProperties": { - "$ref": "#/definitions/LabelKeyList" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ResourceList": { - "type": "object", - "title": "ResourceList is a set of (resource name, quantity) pairs.", - "additionalProperties": { - "$ref": "#/definitions/Quantity" - }, - "x-go-package": "k8s.io/api/core/v1" - }, - "ResourceQuota": { - "type": "object", - "properties": { - "isDefault": { - "type": "boolean", - "x-go-name": "IsDefault" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "quota": { - "$ref": "#/definitions/Quota" - }, - "status": { - "$ref": "#/definitions/ResourceQuotaStatus" - }, - "subjectHumanReadableName": { - "description": "SubjectHumanReadableName contains the human-readable name for the subject(if applicable). Just filled as information in get/list.", - "type": "string", - "x-go-name": "SubjectHumanReadableName" - }, - "subjectKind": { - "type": "string", - "x-go-name": "SubjectKind" - }, - "subjectName": { - "type": "string", - "x-go-name": "SubjectName" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ResourceQuotaStatus": { - "type": "object", - "properties": { - "globalUsage": { - "$ref": "#/definitions/Quota" - }, - "localUsage": { - "$ref": "#/definitions/Quota" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ResourceQuotaUpdateCalculation": { - "type": "object", - "properties": { - "calculatedQuota": { - "$ref": "#/definitions/Quota" - }, - "message": { - "description": "Message is filled if a resource in the calculated quota exceeds the resource quota limits.", - "type": "string", - "x-go-name": "Message" - }, - "resourceQuota": { - "$ref": "#/definitions/ResourceQuota" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "ResourceRequirements": { - "type": "object", - "title": "ResourceRequirements describes the compute resource requirements.", - "properties": { - "claims": { - "description": "Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.\n\n+listType=map\n+listMapKey=name\n+featureGate=DynamicResourceAllocation\n+optional", - "type": "array", - "items": { - "$ref": "#/definitions/ResourceClaim" - }, - "x-go-name": "Claims" - }, - "limits": { - "$ref": "#/definitions/ResourceList" - }, - "requests": { - "$ref": "#/definitions/ResourceList" - } - }, - "x-go-package": "k8s.io/api/core/v1" - }, - "ResourceType": { - "type": "string", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "RockyLinuxSpec": { - "description": "RockyLinuxSpec contains rocky-linux specific settings", - "type": "object", - "properties": { - "distUpgradeOnBoot": { - "description": "do a dist-upgrade on boot and reboot it required afterwards", - "type": "boolean", - "x-go-name": "DistUpgradeOnBoot" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "Role": { - "description": "Role defines RBAC role for the user cluster", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "namespace": { - "description": "Indicates the scope of this role.", - "type": "string", - "x-go-name": "Namespace" - }, - "rules": { - "description": "Rules holds all the PolicyRules for this Role", - "type": "array", - "items": { - "$ref": "#/definitions/PolicyRule" - }, - "x-go-name": "Rules" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "RoleBinding": { - "type": "object", - "title": "RoleBinding references a role, but does not contain it.", - "properties": { - "namespace": { - "description": "Indicates the scope of this binding.", - "type": "string", - "x-go-name": "Namespace" - }, - "roleRefName": { - "type": "string", - "x-go-name": "RoleRefName" - }, - "subjects": { - "description": "Subjects holds references to the objects the role applies to.", - "type": "array", - "items": { - "$ref": "#/definitions/Subject" - }, - "x-go-name": "Subjects" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "RoleName": { - "description": "RoleName defines RBAC role name object for the user cluster", - "type": "object", - "properties": { - "name": { - "description": "Name of the role.", - "type": "string", - "x-go-name": "Name" - }, - "namespace": { - "description": "Indicates the scopes of this role.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Namespace" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "RoleUser": { - "description": "RoleUser defines associated user with role", - "type": "object", - "properties": { - "group": { - "type": "string", - "x-go-name": "Group" - }, - "serviceAccount": { - "type": "string", - "x-go-name": "ServiceAccount" - }, - "serviceAccountNamespace": { - "type": "string", - "x-go-name": "ServiceAccountNamespace" - }, - "userEmail": { - "type": "string", - "x-go-name": "UserEmail" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "RuleGroup": { - "type": "object", - "title": "RuleGroup represents a rule group of recording and alerting rules.", - "properties": { - "data": { - "description": "contains the RuleGroup data. Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#rule_group", - "type": "array", - "items": { - "type": "integer", - "format": "uint8" - }, - "x-go-name": "Data" - }, - "isDefault": { - "description": "IsDefault indicates whether the ruleGroup is default", - "type": "boolean", - "x-go-name": "IsDefault" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "type": { - "$ref": "#/definitions/RuleGroupType" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "RuleGroupType": { - "type": "string", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "S3BackupCredentials": { - "description": "S3BackupCredentials contains credentials for S3 etcd backups", - "type": "object", - "properties": { - "accessKeyId": { - "type": "string", - "x-go-name": "AccessKeyID" - }, - "secretAccessKey": { - "type": "string", - "x-go-name": "SecretAccessKey" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "SSHKey": { - "description": "SSHKey represents a ssh key", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/SSHKeySpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "SSHKeySpec": { - "type": "object", - "title": "SSHKeySpec represents the details of a ssh key.", - "properties": { - "fingerprint": { - "type": "string", - "x-go-name": "Fingerprint" - }, - "publicKey": { - "type": "string", - "x-go-name": "PublicKey" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "SecondaryDisks": { - "type": "object", - "properties": { - "size": { - "type": "string", - "x-go-name": "Size" - }, - "storageClassName": { - "type": "string", - "x-go-name": "StorageClassName" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "SecretKeySelector": { - "description": "+structType=atomic", - "type": "object", - "title": "SecretKeySelector selects a key of a Secret.", - "properties": { - "key": { - "description": "The key of the secret to select from. Must be a valid secret key.", - "type": "string", - "x-go-name": "Key" - }, - "name": { - "description": "Name of the referent.\nThis field is effectively required, but due to backwards compatibility is\nallowed to be empty. Instances of this type with an empty value here are\nalmost certainly wrong.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\n+optional\n+default=\"\"\n+kubebuilder:default=\"\"\nTODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.", - "type": "string", - "x-go-name": "Name" - }, - "optional": { - "description": "Specify whether the Secret or its key must be defined\n+optional", - "type": "boolean", - "x-go-name": "Optional" - } - }, - "x-go-package": "k8s.io/api/core/v1" - }, - "SecretReference": { - "description": "SecretReference represents a Secret Reference. It has enough information to retrieve secret\nin any namespace\n+structType=atomic", - "type": "object", - "properties": { - "name": { - "description": "name is unique within a namespace to reference a secret resource.\n+optional", - "type": "string", - "x-go-name": "Name" - }, - "namespace": { - "description": "namespace defines the space within which the secret name must be unique.\n+optional", - "type": "string", - "x-go-name": "Namespace" - } - }, - "x-go-package": "k8s.io/api/core/v1" - }, - "Seed": { - "description": "Seed represents a seed object", - "type": "object", - "properties": { - "country": { - "description": "Optional: Country of the seed as ISO-3166 two-letter code, e.g. DE or UK.\nFor informational purposes in the Kubermatic dashboard only.", - "type": "string", - "x-go-name": "Country" - }, - "datacenters": { - "description": "Datacenters contains a map of the possible datacenters (DCs) in this seed.\nEach DC must have a globally unique identifier (i.e. names must be unique\nacross all seeds).", - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/Datacenter" - }, - "x-go-name": "SeedDatacenters" - }, - "etcdBackupRestore": { - "$ref": "#/definitions/EtcdBackupRestore" - }, - "expose_strategy": { - "$ref": "#/definitions/ExposeStrategy" - }, - "kubeconfig": { - "$ref": "#/definitions/ObjectReference" - }, - "kubelb": { - "$ref": "#/definitions/KubeLBSeedSettings" - }, - "location": { - "description": "Optional: Detailed location of the cluster, like \"Hamburg\" or \"Datacenter 7\".\nFor informational purposes in the Kubermatic dashboard only.", - "type": "string", - "x-go-name": "Location" - }, - "mla": { - "$ref": "#/definitions/SeedMLASettings" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "proxy_settings": { - "$ref": "#/definitions/ProxySettings" - }, - "seed_dns_overwrite": { - "description": "Optional: This can be used to override the DNS name used for this seed.\nBy default the seed name is used.", - "type": "string", - "x-go-name": "SeedDNSOverwrite" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "SeedMLASettings": { - "type": "object", - "title": "SeedMLASettings allow configuring seed level MLA (Monitoring, Logging \u0026 Alerting) stack settings.", - "properties": { - "userClusterMLAEnabled": { - "description": "Optional: UserClusterMLAEnabled controls whether the user cluster MLA (Monitoring, Logging \u0026 Alerting) stack is enabled in the seed.", - "type": "boolean", - "x-go-name": "UserClusterMLAEnabled" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "SeedNamesList": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "SeedOverview": { - "type": "object", - "title": "SeedOverview stores details about a requested Seed object.", - "properties": { - "created": { - "type": "string", - "x-go-name": "Created" - }, - "location": { - "type": "string", - "x-go-name": "Location" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "phase": { - "$ref": "#/definitions/SeedPhase" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "SeedPhase": { - "type": "string", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "SeedSettings": { - "description": "SeedSettings represents settings for a Seed cluster", - "type": "object", - "properties": { - "kubelb": { - "$ref": "#/definitions/KubeLBSeedSettingsAPI" - }, - "metering": { - "$ref": "#/definitions/MeteringConfiguration" - }, - "mla": { - "$ref": "#/definitions/MLA" - }, - "seedDNSOverwrite": { - "description": "the Seed level seed dns overwrite", - "type": "string", - "x-go-name": "SeedDNSOverwrite" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "SeedSpec": { - "type": "object", - "title": "The spec for a seed data.", - "properties": { - "country": { - "description": "Optional: Country of the seed as ISO-3166 two-letter code, e.g. DE or UK.\nFor informational purposes in the Kubermatic dashboard only.", - "type": "string", - "x-go-name": "Country" - }, - "datacenters": { - "description": "Datacenters contains a map of the possible datacenters (DCs) in this seed.\nEach DC must have a globally unique identifier (i.e. names must be unique\nacross all seeds).", - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/Datacenter" - }, - "x-go-name": "SeedDatacenters" - }, - "etcdBackupRestore": { - "$ref": "#/definitions/EtcdBackupRestore" - }, - "expose_strategy": { - "$ref": "#/definitions/ExposeStrategy" - }, - "kubeconfig": { - "$ref": "#/definitions/ObjectReference" - }, - "kubelb": { - "$ref": "#/definitions/KubeLBSeedSettings" - }, - "location": { - "description": "Optional: Detailed location of the cluster, like \"Hamburg\" or \"Datacenter 7\".\nFor informational purposes in the Kubermatic dashboard only.", - "type": "string", - "x-go-name": "Location" - }, - "mla": { - "$ref": "#/definitions/SeedMLASettings" - }, - "proxy_settings": { - "$ref": "#/definitions/ProxySettings" - }, - "seed_dns_overwrite": { - "description": "Optional: This can be used to override the DNS name used for this seed.\nBy default the seed name is used.", - "type": "string", - "x-go-name": "SeedDNSOverwrite" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "SeedStatus": { - "type": "object", - "title": "SeedStatus stores the current status of a Seed.", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "phase": { - "$ref": "#/definitions/SeedPhase" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "Semver": { - "type": "string", - "title": "Semver is a type that encapsulates github.com/Masterminds/semver/v3.Version struct so it can be used in our API.", - "x-go-package": "k8c.io/kubermatic/sdk/v2/semver" - }, - "ServiceAccount": { - "description": "ServiceAccount represent an API service account", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "group": { - "description": "Group that a service account belongs to", - "type": "string", - "x-go-name": "Group" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "status": { - "description": "Status describes three stages of ServiceAccount life including Active, Inactive and Terminating", - "type": "string", - "x-go-name": "Status" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "ServiceAccountSettings": { - "type": "object", - "properties": { - "apiAudiences": { - "description": "APIAudiences are the Identifiers of the API\nIf this is not specified, it will be set to a single element list containing the issuer URL", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "APIAudiences" - }, - "issuer": { - "description": "Issuer is the identifier of the service account token issuer\nIf this is not specified, it will be set to the URL of apiserver by default", - "type": "string", - "x-go-name": "Issuer" - }, - "tokenVolumeProjectionEnabled": { - "type": "boolean", - "x-go-name": "TokenVolumeProjectionEnabled" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "ServiceAccountToken": { - "description": "ServiceAccountToken represent an API service account token", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "expiry": { - "description": "Expiry is a timestamp representing the time when this token will expire.", - "type": "string", - "format": "date-time", - "x-go-name": "Expiry" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "invalidated": { - "description": "Invalidated indicates if the token must be regenerated", - "type": "boolean", - "x-go-name": "Invalidated" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "token": { - "description": "Token the JWT token", - "type": "string", - "x-go-name": "Token" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "StaticLabel": { - "type": "object", - "title": "StaticLabel is a label that can be used for the clusters.", - "properties": { - "default": { - "type": "boolean", - "x-go-name": "Default" - }, - "key": { - "type": "string", - "x-go-name": "Key" - }, - "protected": { - "type": "boolean", - "x-go-name": "Protected" - }, - "values": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Values" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "StorageClass": { - "description": "StorageClass represents a Kubernetes StorageClass", - "type": "object", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "StorageClassList": { - "type": "array", - "title": "StorageClassList represents a list of Kubernetes StorageClass.", - "items": { - "$ref": "#/definitions/StorageClass" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "StorageType": { - "description": "ObjectStorage must be non-nil, since it is currently the only supported StorageType.", - "type": "object", - "title": "StorageType represents the type of storage that a backup location uses.", - "properties": { - "objectStorage": { - "$ref": "#/definitions/ObjectStorageLocation" - } - }, - "x-go-package": "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - }, - "Subject": { - "description": "or a value for non-objects such as user and group names.\n+structType=atomic", - "type": "object", - "title": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference,", - "properties": { - "apiGroup": { - "description": "APIGroup holds the API group of the referenced subject.\nDefaults to \"\" for ServiceAccount subjects.\nDefaults to \"rbac.authorization.k8s.io\" for User and Group subjects.\n+optional", - "type": "string", - "x-go-name": "APIGroup" - }, - "kind": { - "description": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\".\nIf the Authorizer does not recognized the kind value, the Authorizer should report an error.", - "type": "string", - "x-go-name": "Kind" - }, - "name": { - "description": "Name of the object being referenced.", - "type": "string", - "x-go-name": "Name" - }, - "namespace": { - "description": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty\nthe Authorizer should report an error.\n+optional", - "type": "string", - "x-go-name": "Namespace" - } - }, - "x-go-package": "k8s.io/api/rbac/v1" - }, - "Subnet": { - "type": "object", - "title": "Subnet a smaller, segmented portion of a larger network, like a Virtual Private Cloud (VPC).", - "properties": { - "cidr": { - "description": "CIDR is the subnet IPV4 CIDR.", - "type": "string", - "x-go-name": "CIDR" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "regions": { - "description": "Regions represents a larger domain, made up of one or more zones. It is uncommon for Kubernetes clusters\nto span multiple regions", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Regions" - }, - "zones": { - "description": "Zones represent a logical failure domain. It is common for Kubernetes clusters to span multiple zones\nfor increased availability", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Zones" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "SubnetCIDR": { - "description": "SubnetCIDR is used to store IPv4/IPv6 CIDR.", - "type": "string", - "title": "+kubebuilder:validation:Pattern=\"((^((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))/([0-9]|[1-2][0-9]|3[0-2])$)|(^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))/([0-9]|[0-9][0-9]|1[0-1][0-9]|12[0-8])$))\"", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "Sync": { - "type": "object", - "properties": { - "syncOnly": { - "description": "If non-empty, entries on this list will be replicated into OPA", - "type": "array", - "items": { - "$ref": "#/definitions/GVK" - }, - "x-go-name": "SyncOnly" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "TaintSpec": { - "type": "object", - "title": "TaintSpec defines a node taint.", - "properties": { - "effect": { - "type": "string", - "x-go-name": "Effect" - }, - "key": { - "type": "string", - "x-go-name": "Key" - }, - "value": { - "type": "string", - "x-go-name": "Value" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "Target": { - "type": "object", - "properties": { - "code": { - "description": "The source code options for the constraint template. \"Rego\" can only\nbe specified in one place (either here or in the \"rego\" field)\n+listType=map\n+listMapKey=engine\n+kubebuilder:validation:Required", - "type": "array", - "items": { - "$ref": "#/definitions/Code" - }, - "x-go-name": "Code" - }, - "libs": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Libs" - }, - "rego": { - "type": "string", - "x-go-name": "Rego" - }, - "target": { - "type": "string", - "x-go-name": "Target" - } - }, - "x-go-package": "github.com/open-policy-agent/frameworks/constraint/pkg/apis/templates/v1" - }, - "TemplateMethod": { - "description": "+kubebuilder:validation:Enum=helm", - "type": "string", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/apps.kubermatic/v1" - }, - "Tinkerbell": { - "type": "object", - "properties": { - "kubeconfig": { - "description": "Kubeconfig is the cluster's kubeconfig file, encoded with base64.", - "type": "string", - "x-go-name": "Kubeconfig" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "TinkerbellCloudSpec": { - "type": "object", - "properties": { - "kubeconfig": { - "description": "The cluster's kubeconfig file, encoded with base64.", - "type": "string", - "x-go-name": "Kubeconfig" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "TinkerbellHTTPSource": { - "type": "object", - "title": "TinkerbellHTTPSource represents list of images and their versions that can be downloaded over HTTP.", - "properties": { - "operatingSystems": { - "description": "OperatingSystems represents list of supported operating-systems with their URLs.", - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/OSVersions" - }, - "x-go-name": "OperatingSystems" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "TinkerbellImageSourceType": { - "type": "string", - "title": "TinkerbellImageSourceType represents a Tinkerbell image source type.", - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "TinkerbellImageSources": { - "type": "object", - "title": "TinkerbellImageSources represents Operating System image sources for Tinkerbell.", - "properties": { - "http": { - "$ref": "#/definitions/TinkerbellHTTPSource" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "TinkerbellImages": { - "type": "object", - "title": "TinkerbellImages represents images with versions and their source.", - "properties": { - "operatingSystems": { - "$ref": "#/definitions/ImageListWithVersions" - }, - "source": { - "$ref": "#/definitions/TinkerbellImageSourceType" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "TinkerbellImagesList": { - "type": "object", - "title": "TinkerbellImagesList represents list of available Tinkerbell images with their categories.", - "properties": { - "standard": { - "$ref": "#/definitions/TinkerbellImages" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "TinkerbellNodeSpec": { - "description": "TinkerbellNodeSpec tinkerbell specific node settings", - "type": "object", - "properties": { - "hardwareRef": { - "$ref": "#/definitions/NamespacedName" - }, - "osImageUrl": { - "description": "OsImageUrl is the link for Operating System.", - "type": "string", - "x-go-name": "OsImageUrl" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "TopologySpreadConstraint": { - "type": "object", - "properties": { - "maxSkew": { - "description": "MaxSkew describes the degree to which VMs may be unevenly distributed.", - "type": "integer", - "format": "int64", - "x-go-name": "MaxSkew" - }, - "topologyKey": { - "description": "TopologyKey is the key of infra-node labels.", - "type": "string", - "x-go-name": "TopologyKey" - }, - "whenUnsatisfiable": { - "description": "WhenUnsatisfiable indicates how to deal with a VM if it doesn't satisfy\nthe spread constraint.", - "type": "string", - "x-go-name": "WhenUnsatisfiable" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "Trace": { - "type": "object", - "properties": { - "dump": { - "description": "Also dump the state of OPA with the trace. Set to `All` to dump everything.", - "type": "string", - "x-go-name": "Dump" - }, - "kind": { - "$ref": "#/definitions/GVK" - }, - "user": { - "description": "Only trace requests from the specified user", - "type": "string", - "x-go-name": "User" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "Type": { - "type": "integer", - "format": "int64", - "title": "Type represents the stored type of IntOrString.", - "x-go-package": "k8s.io/apimachinery/pkg/util/intstr" - }, - "UID": { - "description": "UID is a type that holds unique ID values, including UUIDs. Because we\ndon't ONLY use UUIDs, this is an alias to string. Being a type captures\nintent and helps make sure that UIDs and names do not get conflated.", - "type": "string", - "x-go-package": "k8s.io/apimachinery/pkg/types" - }, - "UbuntuSpec": { - "description": "UbuntuSpec ubuntu specific settings", - "type": "object", - "properties": { - "distUpgradeOnBoot": { - "description": "do a dist-upgrade on boot and reboot it required afterwards", - "type": "boolean", - "x-go-name": "DistUpgradeOnBoot" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "UpdateWindow": { - "description": "This is only applied to cluster nodes using Flatcar Linux.\nThe reference time for this is the node system time and might differ from\nthe user's timezone, which needs to be considered when configuring a window.", - "type": "object", - "title": "UpdateWindow allows defining windows for maintenance tasks related to OS updates.", - "properties": { - "length": { - "description": "Sets the length of the update window beginning with the start time. This needs to be a valid duration\nas parsed by Go's time.ParseDuration (https://pkg.go.dev/time#ParseDuration), e.g. `2h`.", - "type": "string", - "x-go-name": "Length" - }, - "start": { - "description": "Sets the start time of the update window. This can be a time of day in 24h format, e.g. `22:30`,\nor a day of week plus a time of day, for example `Mon 21:00`. Only short names for week days are supported,\ni.e. `Mon`, `Tue`, `Wed`, `Thu`, `Fri`, `Sat` and `Sun`.", - "type": "string", - "x-go-name": "Start" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "User": { - "description": "User represent an API user", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations that can be added to the resource", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Annotations" - }, - "creationTimestamp": { - "description": "CreationTimestamp is a timestamp representing the server time when this object was created.", - "type": "string", - "format": "date-time", - "x-go-name": "CreationTimestamp" - }, - "deletionTimestamp": { - "description": "DeletionTimestamp is a timestamp representing the server time when this object was deleted.", - "type": "string", - "format": "date-time", - "x-go-name": "DeletionTimestamp" - }, - "email": { - "description": "Email an email address of the user", - "type": "string", - "x-go-name": "Email" - }, - "groups": { - "description": "Groups holds the list of groups that the user belongs to", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Groups" - }, - "id": { - "description": "ID unique value that identifies the resource generated by the server. Read-Only.", - "type": "string", - "x-go-name": "ID" - }, - "isAdmin": { - "description": "IsAdmin indicates admin role", - "type": "boolean", - "x-go-name": "IsAdmin" - }, - "isGlobalViewer": { - "description": "IsGlobalViewer indicates GlobalViewer role", - "type": "boolean", - "x-go-name": "IsGlobalViewer" - }, - "lastSeen": { - "description": "LastSeen holds a time in UTC format when the user has been using the API last time", - "type": "string", - "format": "date-time", - "x-go-name": "LastSeen" - }, - "name": { - "description": "Name represents human readable name for the resource", - "type": "string", - "x-go-name": "Name" - }, - "projects": { - "description": "Projects holds the list of project the user belongs to\nalong with the group names", - "type": "array", - "items": { - "$ref": "#/definitions/ProjectGroup" - }, - "x-go-name": "Projects" - }, - "readAnnouncements": { - "description": "ReadAnnouncements holds the IDs of admin announcements that the user has read.\n+optional", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "ReadAnnouncements" - }, - "userSettings": { - "$ref": "#/definitions/UserSettings" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "UserSettings": { - "type": "object", - "title": "UserSettings represent an user settings.", - "properties": { - "collapseSidenav": { - "type": "boolean", - "x-go-name": "CollapseSidenav" - }, - "displayAllProjectsForAdmin": { - "type": "boolean", - "x-go-name": "DisplayAllProjectsForAdmin" - }, - "itemsPerPage": { - "type": "integer", - "format": "int8", - "x-go-name": "ItemsPerPage" - }, - "lastSeenChangelogVersion": { - "type": "string", - "x-go-name": "LastSeenChangelogVersion" - }, - "selectProjectTableView": { - "type": "boolean", - "x-go-name": "SelectProjectTableView" - }, - "selectedProjectID": { - "type": "string", - "x-go-name": "SelectedProjectID" - }, - "selectedTheme": { - "type": "string", - "x-go-name": "SelectedTheme" - }, - "useClustersView": { - "type": "boolean", - "x-go-name": "UseClustersView" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "VMwareCloudDirector": { - "type": "object", - "properties": { - "apiToken": { - "description": "The VMware Cloud Director API token.", - "type": "string", - "x-go-name": "APIToken" - }, - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "organization": { - "description": "The name of organization to use.", - "type": "string", - "x-go-name": "Organization" - }, - "ovdcNetwork": { - "description": "The name of organizational virtual data center network that will be associated with the VMs and vApp.\nDeprecated: OVDCNetwork has been deprecated starting with KKP 2.25 and will be removed in KKP 2.27+. It is recommended to use OVDCNetworks instead.", - "type": "string", - "x-go-name": "OVDCNetwork" - }, - "ovdcNetworks": { - "description": "OVDCNetworks is the list of organizational virtual data center networks that will be attached to the vApp and can be consumed the VMs.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "OVDCNetworks" - }, - "password": { - "description": "The VMware Cloud Director user password.", - "type": "string", - "x-go-name": "Password" - }, - "username": { - "description": "The VMware Cloud Director user name.", - "type": "string", - "x-go-name": "Username" - }, - "vdc": { - "description": "The organizational virtual data center.", - "type": "string", - "x-go-name": "VDC" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "VMwareCloudDirectorAPIPreset": { - "description": "VMwareCloudDirectorPreset represents a preset for VMware Cloud Director", - "type": "object", - "properties": { - "ovdcNetwork": { - "type": "string", - "x-go-name": "OVDCNetwork" - }, - "ovdcNetworks": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "OVDCNetworks" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "VMwareCloudDirectorCSIConfig": { - "type": "object", - "properties": { - "filesystem": { - "description": "Filesystem to use for named disks, defaults to \"ext4\"\n+optional", - "type": "string", - "x-go-name": "Filesystem" - }, - "storageProfile": { - "description": "The name of the storage profile to use for disks created by CSI driver", - "type": "string", - "x-go-name": "StorageProfile" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "VMwareCloudDirectorCatalog": { - "type": "object", - "title": "VMwareCloudDirectorCatalog represents a VMware Cloud Director catalog.", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "VMwareCloudDirectorCatalogList": { - "type": "array", - "title": "VMwareCloudDirectorCatalogList represents an array of VMware Cloud Director catalogs.", - "items": { - "$ref": "#/definitions/VMwareCloudDirectorCatalog" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "VMwareCloudDirectorCloudSpec": { - "type": "object", - "title": "VMwareCloudDirectorCloudSpec specifies access data to VMware Cloud Director cloud.", - "properties": { - "apiToken": { - "description": "The VMware Cloud Director API token.\n+optional", - "type": "string", - "x-go-name": "APIToken" - }, - "credentialsReference": { - "$ref": "#/definitions/GlobalSecretKeySelector" - }, - "csi": { - "$ref": "#/definitions/VMwareCloudDirectorCSIConfig" - }, - "organization": { - "description": "The name of organization to use.\n+optional", - "type": "string", - "x-go-name": "Organization" - }, - "ovdcNetwork": { - "description": "The name of organizational virtual data center network that will be associated with the VMs and vApp.\nDeprecated: OVDCNetwork has been deprecated starting with KKP 2.25 and will be removed in KKP 2.27+. It is recommended to use OVDCNetworks instead.", - "type": "string", - "x-go-name": "OVDCNetwork" - }, - "ovdcNetworks": { - "description": "OVDCNetworks is the list of organizational virtual data center networks that will be attached to the vApp and can be consumed the VMs.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "OVDCNetworks" - }, - "password": { - "description": "The VMware Cloud Director user password.\n+optional", - "type": "string", - "x-go-name": "Password" - }, - "username": { - "description": "The VMware Cloud Director user name.\n+optional", - "type": "string", - "x-go-name": "Username" - }, - "vapp": { - "description": "VApp used for isolation of VMs and their associated network\n+optional", - "type": "string", - "x-go-name": "VApp" - }, - "vdc": { - "description": "The organizational virtual data center.\n+optional", - "type": "string", - "x-go-name": "VDC" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "VMwareCloudDirectorComputePolicy": { - "type": "object", - "title": "VMwareCloudDirectorComputePolicy represents a VMware Cloud Director placement policy.", - "properties": { - "description": { - "type": "string", - "x-go-name": "Description" - }, - "id": { - "type": "string", - "x-go-name": "ID" - }, - "isSizingOnly": { - "type": "boolean", - "x-go-name": "IsSizingOnly" - }, - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "VMwareCloudDirectorComputePolicyList": { - "type": "array", - "title": "VMwareCloudDirectorNetworkList represents an array of VMware Cloud Director placement policies.", - "items": { - "$ref": "#/definitions/VMwareCloudDirectorComputePolicy" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "VMwareCloudDirectorNetwork": { - "type": "object", - "title": "VMwareCloudDirectorNetwork represents a VMware Cloud Director network.", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "VMwareCloudDirectorNetworkList": { - "type": "array", - "title": "VMwareCloudDirectorNetworkList represents an array of VMware Cloud Director networks.", - "items": { - "$ref": "#/definitions/VMwareCloudDirectorNetwork" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "VMwareCloudDirectorNodeSpec": { - "description": "VMwareCloudDirectorNodeSpec VMware Cloud Director node settings", - "type": "object", - "properties": { - "catalog": { - "type": "string", - "x-go-name": "Catalog" - }, - "cpuCores": { - "type": "integer", - "format": "int64", - "x-go-name": "CPUCores" - }, - "cpus": { - "type": "integer", - "format": "int64", - "x-go-name": "CPUs" - }, - "diskIOPS": { - "type": "integer", - "format": "int64", - "x-go-name": "DiskIOPS" - }, - "diskSizeGB": { - "type": "integer", - "format": "int64", - "x-go-name": "DiskSizeGB" - }, - "ipAllocationMode": { - "$ref": "#/definitions/IPAllocationMode" - }, - "memoryMB": { - "type": "integer", - "format": "int64", - "x-go-name": "MemoryMB" - }, - "metadata": { - "description": "Additional metadata to set", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "x-go-name": "Metadata" - }, - "network": { - "type": "string", - "x-go-name": "Network" - }, - "placementPolicy": { - "type": "string", - "x-go-name": "PlacementPolicy" - }, - "sizingPolicy": { - "type": "string", - "x-go-name": "SizingPolicy" - }, - "storageProfile": { - "type": "string", - "x-go-name": "StorageProfile" - }, - "template": { - "type": "string", - "x-go-name": "Template" - }, - "vapp": { - "type": "string", - "x-go-name": "VApp" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "VMwareCloudDirectorSettings": { - "type": "object", - "properties": { - "ipAllocationModes": { - "description": "IPAllocationModes are the allowed IP allocation modes for the VMware Cloud Director provider. If not set, all modes are allowed.", - "type": "array", - "items": { - "$ref": "#/definitions/ipAllocationMode" - }, - "x-go-name": "IPAllocationModes" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "VMwareCloudDirectorStorageProfile": { - "type": "object", - "title": "VMwareCloudDirectorStorageProfile represents a VMware Cloud Director storage profile.", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "VMwareCloudDirectorStorageProfileList": { - "type": "array", - "title": "VMwareCloudDirectorStorageProfileList represents an array of VMware Cloud Director storage profiles.", - "items": { - "$ref": "#/definitions/VMwareCloudDirectorStorageProfile" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "VMwareCloudDirectorTemplate": { - "type": "object", - "title": "VMwareCloudDirectorTemplate represents a VMware Cloud Director template.", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "VMwareCloudDirectorTemplateList": { - "type": "array", - "title": "VMwareCloudDirectorTemplateList represents an array of VMware Cloud Director templates.", - "items": { - "$ref": "#/definitions/VMwareCloudDirectorTemplate" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "VPC": { - "description": "VPC is a virtual network dedicated to a single tenant within a KubeVirt, where the resources in the VPC\nis isolated from any other resources within the KubeVirt infra cluster.", - "type": "object", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "subnets": { - "type": "array", - "items": { - "$ref": "#/definitions/Subnet" - }, - "x-go-name": "Subnets" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "VSphere": { - "type": "object", - "properties": { - "basePath": { - "description": "BasePath configures a vCenter folder path that KKP will create an individual cluster folder in.\nIf it's an absolute path, the RootPath configured in the datacenter will be ignored. If it is a relative path,\nthe BasePath part will be appended to the RootPath to construct the full path. For both cases,\nthe full folder structure needs to exist. KKP will only try to create the cluster folder.", - "type": "string", - "x-go-name": "BasePath" - }, - "datacenter": { - "description": "If datacenter is set, this preset is only applicable to the\nconfigured datacenter.", - "type": "string", - "x-go-name": "Datacenter" - }, - "datastore": { - "description": "Datastore to be used for storing virtual machines and as a default for dynamic volume provisioning, it is mutually exclusive with DatastoreCluster.", - "type": "string", - "x-go-name": "Datastore" - }, - "datastoreCluster": { - "description": "DatastoreCluster to be used for storing virtual machines, it is mutually exclusive with Datastore.", - "type": "string", - "x-go-name": "DatastoreCluster" - }, - "enabled": { - "description": "Only enabled presets will be available in the KKP dashboard.", - "type": "boolean", - "x-go-name": "Enabled" - }, - "isCustomizable": { - "description": "IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.\nNOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.", - "type": "boolean", - "x-go-name": "IsCustomizable" - }, - "networks": { - "description": "List of vSphere networks.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Networks" - }, - "password": { - "description": "The vSphere user password.", - "type": "string", - "x-go-name": "Password" - }, - "resourcePool": { - "description": "ResourcePool is used to manage resources such as cpu and memory for vSphere virtual machines. The resource pool should be defined on vSphere cluster level.", - "type": "string", - "x-go-name": "ResourcePool" - }, - "username": { - "description": "The vSphere user name.", - "type": "string", - "x-go-name": "Username" - }, - "vmNetName": { - "description": "Deprecated: Use networks instead.", - "type": "string", - "x-go-name": "VMNetName" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "VSphereCloudSpec": { - "type": "object", - "title": "VSphereCloudSpec specifies access data to VSphere cloud.", - "properties": { - "basePath": { - "description": "Optional: BasePath configures a vCenter folder path that KKP will create an individual cluster folder in.\nIf it's an absolute path, the RootPath configured in the datacenter will be ignored. If it is a relative path,\nthe BasePath part will be appended to the RootPath to construct the full path. For both cases,\nthe full folder structure needs to exist. KKP will only try to create the cluster folder.\n+optional", - "type": "string", - "x-go-name": "BasePath" - }, - "credentialsReference": { - "$ref": "#/definitions/GlobalSecretKeySelector" - }, - "datastore": { - "description": "Datastore to be used for storing virtual machines and as a default for\ndynamic volume provisioning, it is mutually exclusive with\nDatastoreCluster.\n+optional", - "type": "string", - "x-go-name": "Datastore" - }, - "datastoreCluster": { - "description": "DatastoreCluster to be used for storing virtual machines, it is mutually\nexclusive with Datastore.\n+optional", - "type": "string", - "x-go-name": "DatastoreCluster" - }, - "folder": { - "description": "Folder to be used to group the provisioned virtual\nmachines.\n+optional", - "type": "string", - "x-go-name": "Folder" - }, - "infraManagementUser": { - "$ref": "#/definitions/VSphereCredentials" - }, - "networks": { - "description": "List of vSphere networks.\n+optional", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Networks" - }, - "password": { - "description": "The vSphere user password.\n+optional", - "type": "string", - "x-go-name": "Password" - }, - "resourcePool": { - "description": "ResourcePool is used to manage resources such as cpu and memory for vSphere virtual machines. The resource pool\nshould be defined on vSphere cluster level.\n+optional", - "type": "string", - "x-go-name": "ResourcePool" - }, - "storagePolicy": { - "description": "StoragePolicy to be used for storage provisioning", - "type": "string", - "x-go-name": "StoragePolicy" - }, - "tags": { - "$ref": "#/definitions/VSphereTag" - }, - "username": { - "description": "The vSphere user name.\n+optional", - "type": "string", - "x-go-name": "Username" - }, - "vmNetName": { - "description": "The name of the vSphere network.\nDeprecated: Use networks instead.\n+optional", - "type": "string", - "x-go-name": "VMNetName" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "VSphereCredentials": { - "type": "object", - "title": "VSphereCredentials credentials represents a credential for accessing vSphere.", - "properties": { - "password": { - "type": "string", - "x-go-name": "Password" - }, - "username": { - "type": "string", - "x-go-name": "Username" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "VSphereDatastoreList": { - "type": "object", - "title": "VSphereDatastoreList is the object representing a vsphere datastores.", - "properties": { - "datastores": { - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Datastores" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "VSphereFolder": { - "type": "object", - "title": "VSphereFolder is the object representing a vsphere folder.", - "properties": { - "path": { - "description": "Path is the path of the folder", - "type": "string", - "x-go-name": "Path" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "VSphereNetwork": { - "type": "object", - "title": "VSphereNetwork is the object representing a vsphere network.", - "properties": { - "absolutePath": { - "description": "AbsolutePath is the absolute path inside vCenter", - "type": "string", - "x-go-name": "AbsolutePath" - }, - "name": { - "description": "Name is the name of the network", - "type": "string", - "x-go-name": "Name" - }, - "relativePath": { - "description": "RelativePath is the relative path inside the datacenter", - "type": "string", - "x-go-name": "RelativePath" - }, - "type": { - "description": "Type defines the type of network", - "type": "string", - "x-go-name": "Type" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "VSphereNodeSpec": { - "description": "VSphereNodeSpec VSphere node settings", - "type": "object", - "properties": { - "cpus": { - "type": "integer", - "format": "int64", - "x-go-name": "CPUs" - }, - "diskSizeGB": { - "type": "integer", - "format": "int64", - "x-go-name": "DiskSizeGB" - }, - "memory": { - "type": "integer", - "format": "int64", - "x-go-name": "Memory" - }, - "tags": { - "description": "Additional metadata to set", - "type": "array", - "items": { - "$ref": "#/definitions/VSphereTag" - }, - "x-go-name": "Tags" - }, - "template": { - "type": "string", - "x-go-name": "Template" - }, - "vmAntiAffinity": { - "description": "Automatically create anti affinity rules for machines.", - "type": "boolean", - "x-go-name": "VMAntiAffinity" - }, - "vmGroup": { - "type": "string", - "x-go-name": "VMGroup" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "VSphereTag": { - "description": "VSphereTag represents the tags that are attached or created on the cluster level, that are then propagated down to the\nMachineDeployments. In order to attach tags on MachineDeployment, users must create the tag on a cluster level first\nthen attach that tag on the MachineDeployment.", - "type": "object", - "properties": { - "categoryID": { - "description": "CategoryID is the id of the vsphere category that the tag belongs to. If the category id is left empty, the default\ncategory id for the cluster will be used.", - "type": "string", - "x-go-name": "CategoryID" - }, - "tags": { - "description": "Tags represents the name of the created tags.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "Tags" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "VSphereTagCategory": { - "type": "object", - "title": "VSphereTagCategory is the object representing a vsphere tag category.", - "properties": { - "id": { - "description": "ID is the unique identifier of the tag category.", - "type": "string", - "x-go-name": "ID" - }, - "name": { - "description": "Name is the name of the tag category.", - "type": "string", - "x-go-name": "Name" - }, - "usedBy": { - "description": "UsedBy is the list of actors that have resources attached to this tag category.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "UsedBy" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "VSphereVMGroup": { - "type": "object", - "title": "VSphereVMGroup is the object representing a vsphere VM Group.", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "VSphereVMGroupList": { - "type": "array", - "title": "VSphereVMGroupList represents an array of vSphere VM Groups.", - "items": { - "$ref": "#/definitions/VSphereVMGroup" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "Validation": { - "type": "object", - "properties": { - "legacySchema": { - "description": "+kubebuilder:default=false", - "type": "boolean", - "x-go-name": "LegacySchema" - }, - "openAPIV3Schema": { - "$ref": "#/definitions/JSONSchemaProps" - } - }, - "x-go-package": "github.com/open-policy-agent/frameworks/constraint/pkg/apis/templates/v1" - }, - "ValidationRule": { - "type": "object", - "title": "ValidationRule describes a validation rule written in the CEL expression language.", - "properties": { - "fieldPath": { - "description": "fieldPath represents the field path returned when the validation fails.\nIt must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field.\ne.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo`\nIf the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList`\nIt does not support list numeric index.\nIt supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info.\nNumeric index of array is not supported.\nFor field name which contains special characters, use `['specialName']` to refer the field name.\ne.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']`\n+optional", - "type": "string", - "x-go-name": "FieldPath" - }, - "message": { - "description": "Message represents the message displayed when validation fails. The message is required if the Rule contains\nline breaks. The message must not contain line breaks.\nIf unset, the message is \"failed rule: {Rule}\".\ne.g. \"must be a URL with the host matching spec.host\"", - "type": "string", - "x-go-name": "Message" - }, - "messageExpression": { - "description": "MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails.\nSince messageExpression is used as a failure message, it must evaluate to a string.\nIf both message and messageExpression are present on a rule, then messageExpression will be used if validation\nfails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced\nas if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string\nthat contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and\nthe fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged.\nmessageExpression has access to all the same variables as the rule; the only difference is the return type.", - "type": "string", - "x-go-name": "MessageExpression" - }, - "optionalOldSelf": { - "description": "optionalOldSelf is used to opt a transition rule into evaluation\neven when the object is first created, or if the old object is\nmissing the value.\n\nWhen enabled `oldSelf` will be a CEL optional whose value will be\n`None` if there is no old value, or when the object is initially created.\n\nYou may check for presence of oldSelf using `oldSelf.hasValue()` and\nunwrap it after checking using `oldSelf.value()`. Check the CEL\ndocumentation for Optional types for more information:\nhttps://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes\n\nMay not be set unless `oldSelf` is used in `rule`.\n\n+featureGate=CRDValidationRatcheting\n+optional", - "type": "boolean", - "x-go-name": "OptionalOldSelf" - }, - "reason": { - "$ref": "#/definitions/FieldValueErrorReason" - }, - "rule": { - "description": "Rule represents the expression which will be evaluated by CEL.\nref: https://github.com/google/cel-spec\nThe Rule is scoped to the location of the x-kubernetes-validations extension in the schema.\nThe `self` variable in the CEL expression is bound to the scoped value.", - "type": "string", - "x-go-name": "Rule" - } - }, - "x-go-package": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - }, - "ValidationRules": { - "type": "array", - "title": "ValidationRules describes a list of validation rules written in the CEL expression language.", - "items": { - "$ref": "#/definitions/ValidationRule" - }, - "x-go-package": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - }, - "VersionList": { - "description": "VersionList represents a list of versions", - "type": "array", - "items": { - "$ref": "#/definitions/MasterVersion" - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v1" - }, - "Violation": { - "type": "object", - "title": "Violation represents a gatekeeper constraint violation.", - "properties": { - "enforcementAction": { - "type": "string", - "x-go-name": "EnforcementAction" - }, - "kind": { - "type": "string", - "x-go-name": "Kind" - }, - "message": { - "type": "string", - "x-go-name": "Message" - }, - "name": { - "type": "string", - "x-go-name": "Name" - }, - "namespace": { - "type": "string", - "x-go-name": "Namespace" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "VirtualMachineInstancetype": { - "description": "VirtualMachineInstanctype represents a KubeVirt VirtualMachineInstanctype", - "type": "object", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "description": "Spec contains the kvinstancetypealpha1v1.VirtualMachineInstanctype.Spec object marshalled\nRequired by UI to not embed the whole kubevirt.io API object, but a marshalled spec.", - "type": "string", - "x-go-name": "Spec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "VirtualMachineInstancetypeList": { - "description": "VirtualMachineInstancetype are divided into 2 categories: \"custom\" or \"kubermatic\".", - "type": "object", - "title": "VirtualMachineInstancetypeList represents a list of VirtualMachineInstancetype.", - "properties": { - "instancetypes": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "$ref": "#/definitions/VirtualMachineInstancetype" - } - }, - "x-go-name": "Instancetypes" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "VirtualMachinePreference": { - "description": "VirtualMachinePreference represents a KubeVirt VirtualMachinePreference", - "$ref": "#/definitions/VirtualMachineInstancetype" - }, - "VirtualMachinePreferenceList": { - "description": "VirtualMachinePreference are divided into 2 categories: \"custom\" or \"kubermatic\".", - "type": "object", - "title": "VirtualMachinePreferenceList represents a list of VirtualMachinePreference.", - "properties": { - "preferences": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "$ref": "#/definitions/VirtualMachinePreference" - } - }, - "x-go-name": "Preferences" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "VolumeBindingMode": { - "description": "+enum", - "type": "string", - "title": "VolumeBindingMode indicates how PersistentVolumeClaims should be bound.", - "x-go-package": "k8s.io/api/storage/v1" - }, - "VpcConfigRequest": { - "type": "object", - "properties": { - "securityGroupIds": { - "description": "Specify one or more security groups for the cross-account elastic network\ninterfaces that Amazon EKS creates to use to allow communication between\nyour nodes and the Kubernetes control plane.\nFor more information, see Amazon EKS security group considerations (https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html)\nin the Amazon EKS User Guide .", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "SecurityGroupIds" - }, - "subnetIds": { - "description": "Specify subnets for your Amazon EKS nodes. Amazon EKS creates cross-account\nelastic network interfaces in these subnets to allow communication between\nyour nodes and the Kubernetes control plane.", - "type": "array", - "items": { - "type": "string" - }, - "x-go-name": "SubnetIds" - }, - "vpcId": { - "description": "The VPC associated with your cluster.", - "type": "string", - "x-go-name": "VpcId" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/api/v2" - }, - "WebTerminalOptions": { - "type": "object", - "properties": { - "additionalEnvironmentVariables": { - "description": "AdditionalEnvironmentVariables are the additional environment variables that can be set for the Web Terminal.", - "type": "array", - "items": { - "$ref": "#/definitions/EnvVar" - }, - "x-go-name": "AdditionalEnvironmentVariables" - }, - "enableInternetAccess": { - "description": "EnableInternetAccess enables the Web Terminal feature to access the internet.", - "type": "boolean", - "x-go-name": "EnableInternetAccess" - }, - "enabled": { - "description": "Enabled enables the Web Terminal feature for the user clusters.", - "type": "boolean", - "x-go-name": "Enabled" - } - }, - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "bcBody": { - "type": "object", - "properties": { - "backup_credentials": { - "$ref": "#/definitions/BackupCredentials" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/handler/v2/backupcredentials" - }, - "body": { - "type": "object", - "properties": { - "cloud": { - "$ref": "#/definitions/ExternalClusterCloudSpec" - }, - "kubeconfig": { - "description": "Kubeconfig Base64 encoded kubeconfig", - "type": "string", - "x-go-name": "Kubeconfig" - }, - "name": { - "description": "Name is human readable name for the external cluster", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/ExternalClusterSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/handler/v2/external_cluster" - }, - "constraintBody": { - "type": "object", - "properties": { - "Spec": { - "$ref": "#/definitions/ConstraintSpec" - }, - "name": { - "description": "Name is the name for the constraint", - "type": "string", - "x-go-name": "Name" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/handler/v2/constraint" - }, - "createPolicyBindingBody": { - "type": "object", - "properties": { - "name": { - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/PolicyBindingSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/ee/kyverno/policy-binding" - }, - "ctBody": { - "type": "object", - "properties": { - "name": { - "description": "Name of the constraint template", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/ConstraintTemplateSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/handler/v2/constraint_template" - }, - "ebcBody": { - "type": "object", - "properties": { - "name": { - "description": "Name of the etcd backup config", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/EtcdBackupConfigSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/handler/v2/etcdbackupconfig" - }, - "erBody": { - "type": "object", - "properties": { - "name": { - "description": "Name of the etcd backup restore. If not set, it will be generated", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/EtcdRestoreSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/handler/v2/etcdrestore" - }, - "errorResponse": { - "description": "ErrorResponse is the default representation of an error", - "type": "object", - "properties": { - "error": { - "$ref": "#/definitions/ErrorDetails" - } - }, - "x-go-name": "ErrorResponse", - "x-go-package": "k8c.io/dashboard/v2/pkg/handler" - }, - "groupProjectBindingBody": { - "type": "object", - "properties": { - "group": { - "type": "string", - "x-go-name": "Group" - }, - "role": { - "type": "string", - "x-go-name": "Role" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/ee/group-project-binding/handler" - }, - "ipAllocationMode": { - "description": "+kubebuilder:validation:Enum=DHCP;POOL", - "type": "string", - "x-go-package": "k8c.io/kubermatic/sdk/v2/apis/kubermatic/v1" - }, - "patchPolicyBindingBody": { - "type": "object", - "properties": { - "Spec": { - "$ref": "#/definitions/PolicyBindingSpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/ee/kyverno/policy-binding" - }, - "wrBody": { - "type": "object", - "properties": { - "name": { - "description": "Name of the allowed registry", - "type": "string", - "x-go-name": "Name" - }, - "spec": { - "$ref": "#/definitions/AllowedRegistrySpec" - } - }, - "x-go-package": "k8c.io/dashboard/v2/pkg/handler/v2/allowed_registry" - } - }, - "responses": { - "Kubeconfig": { - "description": "Kubeconfig is a clusters kubeconfig", - "schema": { - "type": "array", - "items": { - "type": "integer", - "format": "uint8" - } - } - }, - "empty": { - "description": "EmptyResponse is a empty response" - } - }, - "securityDefinitions": { - "api_key": { - "type": "apiKey", - "name": "Authorization", - "in": "header" - } - }, - "security": [ - { - "api_key": [] - } - ] -} \ No newline at end of file diff --git a/content/kubermatic/main/how-to-contribute/_index.en.md b/content/kubermatic/main/how-to-contribute/_index.en.md index 3136c9988..7f5ba5e0a 100644 --- a/content/kubermatic/main/how-to-contribute/_index.en.md +++ b/content/kubermatic/main/how-to-contribute/_index.en.md @@ -12,34 +12,34 @@ KKP is an open-source project to centrally manage the global automation of thous There are few things to note when contributing to the KKP project, which are highlighted below: -* KKP project is hosted on GitHub; thus, GitHub knowledge is one of the essential pre-requisites -* The KKP documentation is written in markdown (.md) and located in the [docs repository](https://github.com/kubermatic/docs/tree/main/content/kubermatic) -* See [CONTRIBUTING.md](https://github.com/kubermatic/kubermatic/blob/main/CONTRIBUTING.md) for instructions on the developer certificate of origin that we require -* Familiarization with Hugo for building static site locally is suggested for documentation contribution -* Kubernetes knowledge is also recommended -* The KKP documentation is currently available only in English -* We have a simple code of conduct that should be adhered to +- KKP project is hosted on GitHub; thus, GitHub knowledge is one of the essential pre-requisites +- The KKP documentation is written in markdown (.md) and located in the [docs repository](https://github.com/kubermatic/docs/tree/main/content/kubermatic) +- See [CONTRIBUTING.md](https://github.com/kubermatic/kubermatic/blob/main/CONTRIBUTING.md) for instructions on the developer certificate of origin that we require +- Familiarization with Hugo for building static site locally is suggested for documentation contribution +- Kubernetes knowledge is also recommended +- The KKP documentation is currently available only in English +- We have a simple code of conduct that should be adhered to ## Steps in Contributing to KKP -* Please familiarise yourself with our [Code of Conduct](https://github.com/kubermatic/kubermatic/blob/main/CODE_OF_CONDUCT.md) -* Check the [opened issues](https://github.com/kubermatic/kubermatic/issues) on our GitHub repo peradventure there might be anyone that will be of interest -* Fork the repository on GitHub -* Read the [README](https://github.com/kubermatic/kubermatic/blob/main/README.md) for build and test instructions +- Please familiarise yourself with our [Code of Conduct](https://github.com/kubermatic/kubermatic/blob/main/CODE_OF_CONDUCT.md) +- Check the [opened issues](https://github.com/kubermatic/kubermatic/issues) on our GitHub repo peradventure there might be anyone that will be of interest +- Fork the repository on GitHub +- Read the [README](https://github.com/kubermatic/kubermatic/blob/main/README.md) for build and test instructions ## Contribution Workflow The below outlines show an example of what a contributor's workflow looks like: -* Fork the repository on GitHub -* Create a topic branch from where you want to base your work (usually main) -* Make commits of logical units. -* Make sure your commit messages are in the proper format -* Push your changes to the topic branch in your fork repository -* Make sure the tests pass and add any new tests as appropriate -* Submit a pull request to the original repository -* Assign a reviewer if you wish and wait for the PR to be reviewed -* If everything works fine, your PR will be merged into the project's main branch +- Fork the repository on GitHub +- Create a topic branch from where you want to base your work (usually main) +- Make commits of logical units. +- Make sure your commit messages are in the proper format +- Push your changes to the topic branch in your fork repository +- Make sure the tests pass and add any new tests as appropriate +- Submit a pull request to the original repository +- Assign a reviewer if you wish and wait for the PR to be reviewed +- If everything works fine, your PR will be merged into the project's main branch Congratulations! You have successfully contributed to the KKP project. diff --git a/content/kubermatic/main/images/ui/add-provider-first-step.png b/content/kubermatic/main/images/ui/add-provider-first-step.png index 3d8eaaff5..c2b01134a 100644 Binary files a/content/kubermatic/main/images/ui/add-provider-first-step.png and b/content/kubermatic/main/images/ui/add-provider-first-step.png differ diff --git a/content/kubermatic/main/images/ui/download-kubeconfig.png b/content/kubermatic/main/images/ui/download-kubeconfig.png new file mode 100644 index 000000000..4f16da95f Binary files /dev/null and b/content/kubermatic/main/images/ui/download-kubeconfig.png differ diff --git a/content/kubermatic/main/images/ui/page-title.png b/content/kubermatic/main/images/ui/page-title.png new file mode 100644 index 000000000..f0d16cc8c Binary files /dev/null and b/content/kubermatic/main/images/ui/page-title.png differ diff --git a/content/kubermatic/main/images/ui/share.png b/content/kubermatic/main/images/ui/share.png index 93cb8bb10..5d0c6eefd 100644 Binary files a/content/kubermatic/main/images/ui/share.png and b/content/kubermatic/main/images/ui/share.png differ diff --git a/content/kubermatic/main/installation/install-kkp-ce/_index.en.md b/content/kubermatic/main/installation/install-kkp-ce/_index.en.md index 18b70c3bc..2e8afcbb6 100644 --- a/content/kubermatic/main/installation/install-kkp-ce/_index.en.md +++ b/content/kubermatic/main/installation/install-kkp-ce/_index.en.md @@ -35,6 +35,7 @@ For this guide you need to have [kubectl](https://kubernetes.io/docs/tasks/tools You should be familiar with core Kubernetes concepts and the YAML file format before proceeding. + In addition, we recommend familiarizing yourself with the resource quota system of your infrastructure provider. It is important to provide enough capacity to let KKP provision infrastructure for your future user clusters, but also to enforce a maximum to protect against overspending. {{< tabs name="resource-quotas" >}} @@ -132,14 +133,14 @@ The release archive hosted on GitHub contains examples for both of the configura The key items to consider while preparing your configuration files are described in the table below. -| Description | YAML Paths and File | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------- | -| The base domain under which KKP shall be accessible (e.g. `kkp.example.com`). | `.spec.ingress.domain` (`kubermatic.yaml`), `.dex.ingress.hosts[0].host` and `dex.ingress.tls[0].hosts[0]` (`values.yaml`); also adjust `.dex.config.staticClients[*].RedirectURIs` (`values.yaml`) according to your domain. | -| The certificate issuer for KKP (KKP requires it since the dashboard and Dex are accessible only via HTTPS); by default cert-manager is used, but you have to reference an issuer that you need to create later on. | `.spec.ingress.certificateIssuer.name` (`kubermatic.yaml`) | -| For proper authentication, shared secrets must be configured between Dex and KKP. Likewise, Dex uses yet another random secret to encrypt cookies stored in the users' browsers. | `.dex.config.staticClients[*].secret` (`values.yaml`), `.spec.auth.issuerClientSecret` (`kubermatic.yaml`); this needs to be equal to `.dex.config.staticClients[name=="kubermaticIssuer"].secret` (`values.yaml`), `.spec.auth.issuerCookieKey` and `.spec.auth.serviceAccountKey` (both `kubermatic.yaml`) | -| To authenticate via an external identity provider, you need to set up connectors in Dex. Check out [the Dex documentation](https://dexidp.io/docs/connectors/) for a list of available providers. This is not required, but highly recommended for multi-user installations. | `.dex.config.connectors` (`values.yaml`; commented in example file) | -| The expose strategy which controls how control plane components of a User Cluster are exposed to worker nodes and users. See [the expose strategy documentation]({{< ref "../../tutorials-howtos/networking/expose-strategies/" >}}) for available options. Defaults to `NodePort` strategy, if not set. | `.spec.exposeStrategy` (`kubermatic.yaml`; not included in example file) | -| Telemetry used to track the KKP and k8s cluster usage, uuid field is required and will print an error message when that entry is missing. | `.telemetry.uuid` (`values.yaml`) | +| Description | YAML Paths and File | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| The base domain under which KKP shall be accessible (e.g. `kkp.example.com`). | `.spec.ingress.domain` (`kubermatic.yaml`), `.dex.ingress.hosts[0].host` and `dex.ingress.tls[0].hosts[0]` (`values.yaml`); also adjust `.dex.config.staticClients[*].RedirectURIs` (`values.yaml`) according to your domain. | +| The certificate issuer for KKP (KKP requires it since the dashboard and Dex are accessible only via HTTPS); by default cert-manager is used, but you have to reference an issuer that you need to create later on. | `.spec.ingress.certificateIssuer.name` (`kubermatic.yaml`) | +| For proper authentication, shared secrets must be configured between Dex and KKP. Likewise, Dex uses yet another random secret to encrypt cookies stored in the users' browsers. | `.dex.config.staticClients[*].secret` (`values.yaml`), `.spec.auth.issuerClientSecret` (`kubermatic.yaml`); this needs to be equal to `.dex.config.staticClients[name=="kubermaticIssuer"].secret` (`values.yaml`), `.spec.auth.issuerCookieKey` and `.spec.auth.serviceAccountKey` (both `kubermatic.yaml`) | +| To authenticate via an external identity provider, you need to set up connectors in Dex. Check out [the Dex documentation](https://dexidp.io/docs/connectors/) for a list of available providers. This is not required, but highly recommended for multi-user installations. | `.dex.config.connectors` (`values.yaml`; commented in example file) | +| The expose strategy which controls how control plane components of a User Cluster are exposed to worker nodes and users. See [the expose strategy documentation]({{< ref "../../tutorials-howtos/networking/expose-strategies/" >}}) for available options. Defaults to `NodePort` strategy, if not set. | `.spec.exposeStrategy` (`kubermatic.yaml`; not included in example file) | +| Telemetry used to track the KKP and k8s cluster usage, uuid field is required and will print an error message when that entry is missing. | `.telemetry.uuid` (`values.yaml`) | There are many more options, but these are essential to get a minimal system up and running. A full reference of all options can be found in the [KubermaticConfiguration Reference]({{< relref "../../references/crds/#kubermaticconfigurationspec" >}}). The secret keys mentioned above can be generated using any password generator or on the shell using diff --git a/content/kubermatic/main/installation/install-kkp-ce/add-seed-cluster/_index.en.md b/content/kubermatic/main/installation/install-kkp-ce/add-seed-cluster/_index.en.md index c0aa6b013..52cc68ae9 100644 --- a/content/kubermatic/main/installation/install-kkp-ce/add-seed-cluster/_index.en.md +++ b/content/kubermatic/main/installation/install-kkp-ce/add-seed-cluster/_index.en.md @@ -29,9 +29,9 @@ about the cluster relationships. In this chapter, you will find the following KKP-specific terms: -* **Master Cluster** -- A Kubernetes cluster which is responsible for storing central information about users, projects and SSH keys. It hosts the KKP master components and might also act as a seed cluster. -* **Seed Cluster** -- A Kubernetes cluster which is responsible for hosting the control plane components (kube-apiserver, kube-scheduler, kube-controller-manager, etcd and more) of a User Cluster. -* **User Cluster** -- A Kubernetes cluster created and managed by KKP, hosting applications managed by users. +- **Master Cluster** -- A Kubernetes cluster which is responsible for storing central information about users, projects and SSH keys. It hosts the KKP master components and might also act as a seed cluster. +- **Seed Cluster** -- A Kubernetes cluster which is responsible for hosting the control plane components (kube-apiserver, kube-scheduler, kube-controller-manager, etcd and more) of a User Cluster. +- **User Cluster** -- A Kubernetes cluster created and managed by KKP, hosting applications managed by users. ## Overview @@ -82,6 +82,7 @@ a separate storage class with a different location/security level. The following {{< tabs name="StorageClass Creation" >}} {{% tab name="AWS" %}} + ```yaml apiVersion: storage.k8s.io/v1 kind: StorageClass @@ -91,8 +92,10 @@ provisioner: kubernetes.io/aws-ebs parameters: type: sc1 ``` + {{% /tab %}} {{% tab name="Azure" %}} + ```yaml apiVersion: storage.k8s.io/v1 kind: StorageClass @@ -103,8 +106,10 @@ parameters: kind: Managed storageaccounttype: Standard_LRS ``` + {{% /tab %}} {{% tab name="GCP" %}} + ```yaml apiVersion: storage.k8s.io/v1 kind: StorageClass @@ -114,8 +119,10 @@ provisioner: kubernetes.io/gce-pd parameters: type: pd-ssd ``` + {{% /tab %}} {{% tab name="vSphere" %}} + ```yaml apiVersion: storage.k8s.io/v1 kind: StorageClass @@ -123,8 +130,10 @@ metadata: name: kubermatic-backup provisioner: csi.vsphere.vmware.com ``` + {{% /tab %}} {{% tab name="Other Providers" %}} + For other providers, please refer to the respective CSI driver documentation. It should guide you through setting up a `StorageClass`. Ensure that the `StorageClass` you create is named `kubermatic-backup`. The final resource should look something like this: ```yaml @@ -139,6 +148,7 @@ parameters: parameter1: value1 parameter2: value2 ``` + {{% /tab %}} {{< /tabs >}} @@ -174,7 +184,7 @@ cluster backups. ## Installation -The Kubermatic Installler is the recommended way to setup new seed clusters. A manual installation is possible if you do not want +The Kubermatic Installer is the recommended way to setup new seed clusters. A manual installation is possible if you do not want to use the installer. {{% notice note %}} @@ -199,7 +209,7 @@ a suitable `StorageClass` and is therefore still recommended to use. ### Option 1: Use the Installer -Similar to how the Master Cluster can be installed with the installler, run the `deploy kubermatic-seed` command. You still need to +Similar to how the Master Cluster can be installed with the installer, run the `deploy kubermatic-seed` command. You still need to manually ensure that the StorageClass you configured for MinIO exists already. ```bash @@ -369,7 +379,7 @@ Key considerations for creating your `Seed` resource are: ### Configure Datacenters -Each `Seed` has a map of so-called _Datacenters_ (under `.spec.datacenters`), which define the cloud +Each `Seed` has a map of so-called *Datacenters* (under `.spec.datacenters`), which define the cloud provider locations that User Clusters can be deployed to. Every datacenter name is globally unique in a KKP setup. Users will select from a list of datacenters when creating User Clusters and their clusters will automatically get scheduled to the seed that defines that datacenter. @@ -380,6 +390,7 @@ datacenters: {{< tabs name="Datacenter Examples" >}} {{% tab name="AWS" %}} + ```yaml # Datacenter for AWS 'eu-central-1' region aws-eu-central-1a: @@ -396,8 +407,10 @@ aws-eu-west-1a: aws: region: eu-west-1 ``` + {{% /tab %}} {{% tab name="Azure" %}} + ```yaml # Datacenter for Azure 'westeurope' location azure-westeurope: @@ -407,8 +420,10 @@ azure-westeurope: azure: location: westeurope ``` + {{% /tab %}} {{% tab name="GCP" %}} + ```yaml # Datacenter for GCP 'europe-west3' region # this is configured to use three availability zones and spread cluster resources across them @@ -421,8 +436,10 @@ gce-eu-west-3: regional: true zoneSuffixes: [a,b,c] ``` + {{% /tab %}} {{% tab name="vSphere" %}} + ```yaml # Datacenter for a vSphere setup available under https://vsphere.hamburg.example.com vsphere-hamburg: @@ -438,10 +455,13 @@ vsphere-hamburg: templates: ubuntu: ubuntu-20.04-server-cloudimg-amd64 ``` + {{% /tab %}} {{% tab name="Other Providers" %}} + For additional providers supported by KKP, please check out our [DatacenterSpec CRD documentation]({{< ref "../../../references/crds/#datacenterspec" >}}) for the respective provider you want to use. + {{% /tab %}} {{< /tabs >}} @@ -535,6 +555,7 @@ kubectl apply -f seed-with-secret.yaml #Secret/kubeconfig-kubermatic created. #Seed/kubermatic created. ``` + You can watch the progress by using `kubectl` and `watch` on the master cluster: ```bash @@ -543,7 +564,7 @@ watch kubectl -n kubermatic get seeds #kubermatic 0 Hamburg v2.21.2 v1.24.8 Healthy 5m ``` -Watch the `PHASE` column until it shows "_Healthy_". If it does not after a couple of minutes, you can check +Watch the `PHASE` column until it shows "*Healthy*". If it does not after a couple of minutes, you can check the `kubermatic` namespace on the new seed cluster and verify if there are any Pods showing signs of issues: ```bash diff --git a/content/kubermatic/main/installation/install-kkp-ee/add-seed-cluster/_index.en.md b/content/kubermatic/main/installation/install-kkp-ee/add-seed-cluster/_index.en.md index 86ca0cdcb..8874885ca 100644 --- a/content/kubermatic/main/installation/install-kkp-ee/add-seed-cluster/_index.en.md +++ b/content/kubermatic/main/installation/install-kkp-ee/add-seed-cluster/_index.en.md @@ -18,9 +18,9 @@ Please [contact sales](mailto:sales@kubermatic.com) to receive your credentials. In this chapter, you will find the following KKP-specific terms: -* **Master Cluster** -- A Kubernetes cluster which is responsible for storing central information about users, projects and SSH keys. It hosts the KKP master components and might also act as a seed cluster. -* **Seed Cluster** -- A Kubernetes cluster which is responsible for hosting the control plane components (kube-apiserver, kube-scheduler, kube-controller-manager, etcd and more) of a User Cluster. -* **User Cluster** -- A Kubernetes cluster created and managed by KKP, hosting applications managed by users. +- **Master Cluster** -- A Kubernetes cluster which is responsible for storing central information about users, projects and SSH keys. It hosts the KKP master components and might also act as a seed cluster. +- **Seed Cluster** -- A Kubernetes cluster which is responsible for hosting the control plane components (kube-apiserver, kube-scheduler, kube-controller-manager, etcd and more) of a User Cluster. +- **User Cluster** -- A Kubernetes cluster created and managed by KKP, hosting applications managed by users. It is also recommended to make yourself familiar with our [architecture documentation]({{< ref "../../../architecture/" >}}). diff --git a/content/kubermatic/main/installation/local-installation/_index.en.md b/content/kubermatic/main/installation/local-installation/_index.en.md index 122caeb8b..b8463282c 100644 --- a/content/kubermatic/main/installation/local-installation/_index.en.md +++ b/content/kubermatic/main/installation/local-installation/_index.en.md @@ -80,7 +80,6 @@ tar -xzvf "kubermatic-${KUBERMATIC_EDITION}-v${VERSION}-darwin-${ARCH}.tar.gz" You can find more information regarding the download instructions in the [CE installation guide](../install-kkp-ce/#download-the-installer) or [EE installation guide](../install-kkp-ee/#download-the-installer). **2. Provide the image pull secret (EE)** - This step is only required if you are using the enterprise edition installer. Replace `${AUTH_TOKEN}` with the Docker authentication JSON provided by Kubermatic and run the following command: ```bash @@ -135,8 +134,8 @@ By default, KubeVirt is configured to use hardware virtualization. If this is no On Linux, KubeVirt uses the inode notify kernel subsystem `inotify` to watch for changes in certain files. Usually you shouldn't need to configure this but in case you can observe the `virt-handler` failing with -``` -kubectl log -nkubevirt ds/virt-handler +```bash +kubectl log -n kubevirt ds/virt-handler ... {"component":"virt-handler","level":"fatal","msg":"Failed to create an inotify watcher","pos":"cert-manager.go:105","reason":"too many open files","timestamp":"2023-06-22T09:58:24.284130Z"} ``` diff --git a/content/kubermatic/main/installation/offline-mode/_index.en.md b/content/kubermatic/main/installation/offline-mode/_index.en.md index 5b182f9c7..fb9f63c7a 100644 --- a/content/kubermatic/main/installation/offline-mode/_index.en.md +++ b/content/kubermatic/main/installation/offline-mode/_index.en.md @@ -23,13 +23,13 @@ without Docker. There are a number of sources for container images used in a KKP setup: -* The container images used by KKP itself (e.g. `quay.io/kubermatic/kubermatic`) -* The images used by the various Helm charts used to deploy KKP (nginx, cert-manager, +- The container images used by KKP itself (e.g. `quay.io/kubermatic/kubermatic`) +- The images used by the various Helm charts used to deploy KKP (nginx, cert-manager, Grafana, ...) -* The images used for creating a user cluster control plane (the Kubernetes apiserver, +- The images used for creating a user cluster control plane (the Kubernetes apiserver, scheduler, metrics-server, ...). -* The images referenced by cluster [Addons]({{< ref "../../architecture/concept/kkp-concepts/addons/" >}}). -* The images referenced in system [Applications]({{< ref "../../tutorials-howtos/applications/" >}}). +- The images referenced by cluster [Addons]({{< ref "../../architecture/concept/kkp-concepts/addons/" >}}). +- The images referenced in system [Applications]({{< ref "../../tutorials-howtos/applications/" >}}). To make it easier to collect all required images, the `kubermatic-installer mirror-images` utility is provided. It will scan KKP source code and Helm charts included in a KKP release to determine all images that need to be mirrored. @@ -93,7 +93,6 @@ pass `--registry-prefix 'docker.io'` to `kubermatic-installer mirror-images`. ### Addons - Note that by default, `kubermatic-installer mirror-images` will determine the addons container image based on the `KubermaticConfiguration` file, pull it down and then extract the addon manifests from the image, so that it can then scan them for container images to mirror. @@ -117,6 +116,7 @@ you should pass the `--addons-image` flag instead to reference a non-standard ad The `mirrorImages` field in the `KubermaticConfiguration` allows you to specify additional container images to mirror during the `kubermatic-installer mirror-images` command, simplifying air-gapped setups. Example: + ```yaml apiVersion: kubermatic.k8c.io/v1 kind: KubermaticConfiguration @@ -130,7 +130,8 @@ spec: ## Mirroring Binaries -The `kubermatic-installer mirror-binaries` command is designed to **mirror and host essential binaries** required by the Operating System Profiles for provisioning user clusters in **offline/airgapped environments**. This includes critical components like: +The `kubermatic-installer mirror-binaries` command is designed to **mirror and host essential binaries** required by the Operating System Profiles for provisioning user clusters in **offline/airgapped environments**. This includes critical components like: + - **Kubernetes binaries**: `kubeadm`, `kubelet`, `kubectl` - **CNI plugins** (e.g., bridge, ipvlan, loopback, macvlan, etc) - **CRI tools** (e.g., `crictl`) @@ -142,7 +143,8 @@ The default output directory (`/usr/share/nginx/html/`) requires root permission ### Key Features -#### Mirrors Original Domain Structure: +#### Mirrors Original Domain Structure + Binaries are stored in the **exact directory hierarchy** as their original domains (e.g., `containernetworking/plugins/releases/v1.5.1/...`). This allows **DNS-based redirection** of domains like `github.com` or `k8s.gcr.io` to your local/offline server, ensuring the OSP fetches binaries from the mirrored paths **without URL reconfiguration** or **Operating System Profile** changes. ### Example Workflow @@ -162,7 +164,7 @@ INFO[0033] ✅ Finished loading images. ### Example of the Directory Structure -``` +```bash . ├── containernetworking # CNI plugins (Container Network Interface) │ └── plugins @@ -248,6 +250,7 @@ kubectl -n kubermatic get seeds ``` Output will be similar to this: + ```bash #NAME AGE #hamburg 143d diff --git a/content/kubermatic/main/installation/single-node-setup/_index.en.md b/content/kubermatic/main/installation/single-node-setup/_index.en.md index aa2daaf66..984cf2424 100644 --- a/content/kubermatic/main/installation/single-node-setup/_index.en.md +++ b/content/kubermatic/main/installation/single-node-setup/_index.en.md @@ -18,7 +18,7 @@ In this **Get Started with KKP** guide, we will be using AWS Cloud as our underl ## Prerequisites 1. [Terraform >v1.0.0](https://www.terraform.io/downloads) -2. [KubeOne](https://github.com/kubermatic/kubeone/releases) +1. [KubeOne](https://github.com/kubermatic/kubeone/releases) ## Download the Repository @@ -95,18 +95,18 @@ export KUBECONFIG=$PWD/aws/-kubeconfig ## Validate the KKP Master Setup -* Get the LoadBalancer External IP by following command. +- Get the LoadBalancer External IP by following command. ```bash kubectl get svc -n ingress-nginx ``` -* Update DNS mapping with External IP of the nginx ingress controller service. In case of AWS, the CNAME record mapping for $TODO_DNS with External IP should be created. +- Update DNS mapping with External IP of the nginx ingress controller service. In case of AWS, the CNAME record mapping for $TODO_DNS with External IP should be created. -* Nginx Ingress Controller Load Balancer configuration - Add the node to backend pool manually. +- Nginx Ingress Controller Load Balancer configuration - Add the node to backend pool manually. > **Known Issue**: Should be supported in the future as part of Feature request[#1822](https://github.com/kubermatic/kubeone/issues/1822) -* Verify the Kubermatic resources and certificates +- Verify the Kubermatic resources and certificates ```bash kubectl -n kubermatic get deployments,pods @@ -122,5 +122,6 @@ export KUBECONFIG=$PWD/aws/-kubeconfig Finally, you should be able to login to KKP dashboard! -Login to https://$TODO_DNS/ +Login to + > Use username/password configured as part of Kubermatic configuration. diff --git a/content/kubermatic/main/installation/upgrading/upgrade-from-2.19-to-2.20/_index.en.md b/content/kubermatic/main/installation/upgrading/upgrade-from-2.19-to-2.20/_index.en.md index 56de7db27..8e84c6133 100644 --- a/content/kubermatic/main/installation/upgrading/upgrade-from-2.19-to-2.20/_index.en.md +++ b/content/kubermatic/main/installation/upgrading/upgrade-from-2.19-to-2.20/_index.en.md @@ -20,13 +20,13 @@ Migrating to KKP 2.20 requires a downtime of all reconciling and includes restar The general migration procedure is as follows: -* Shutdown KKP controllers/dashboard/API. -* Create duplicate of all KKP resources in the new API groups. -* Adjust the owner references in the new resources. -* Remove finalizers and owner references from old objects. -* Delete old objects. -* Deploy new KKP 2.20 Operator. -* The operator will reconcile and restart the remaining KKP controllers, dashboard and API. +- Shutdown KKP controllers/dashboard/API. +- Create duplicate of all KKP resources in the new API groups. +- Adjust the owner references in the new resources. +- Remove finalizers and owner references from old objects. +- Delete old objects. +- Deploy new KKP 2.20 Operator. +- The operator will reconcile and restart the remaining KKP controllers, dashboard and API. {{% notice note %}} Creating clones of, for example, Secrets in a cluster namespace will lead to new resource versions on those cloned Secrets. These new resource versions will affect Deployments like the kube-apiserver once KKP is restarted and reconciles. This will in turn cause all Deployments/StatefulSets to rotate. @@ -52,11 +52,11 @@ tar xzf kubermatic-ce-v2.20.0-linux-amd64.tar.gz Before the migration can begin, a number of preflight checks need to happen first: -* No KKP resource must be marked as deleted. -* The new CRD files must be available on disk. -* All seed clusters must be reachable. -* Deprecated features which were removed in KKP 2.20 must not be used anymore. -* (only before actual migration) No KKP controllers/webhooks must be running. +- No KKP resource must be marked as deleted. +- The new CRD files must be available on disk. +- All seed clusters must be reachable. +- Deprecated features which were removed in KKP 2.20 must not be used anymore. +- (only before actual migration) No KKP controllers/webhooks must be running. The first step is to get the kubeconfig file for the KKP **master** cluster. Set the `KUBECONFIG` variable pointing to it: @@ -199,12 +199,12 @@ When you're ready, start the migration: The installer will now -* perform the same preflight checks as the `preflight` command, plus it checks that no KKP controllers are running, -* create a backup of all KKP resources per seed cluster, -* install the new CRDs, -* migrate all KKP resources, -* adjust the owner references and -* optionally remove the old resources if `--remove-old-resources` was given (this can be done manually at any time later on). +- perform the same preflight checks as the `preflight` command, plus it checks that no KKP controllers are running, +- create a backup of all KKP resources per seed cluster, +- install the new CRDs, +- migrate all KKP resources, +- adjust the owner references and +- optionally remove the old resources if `--remove-old-resources` was given (this can be done manually at any time later on). {{% notice note %}} The command is idempotent and can be interrupted and restarted at any time. It will have to go through already migrated resources again, though. diff --git a/content/kubermatic/main/installation/upgrading/upgrade-from-2.21-to-2.22/_index.en.md b/content/kubermatic/main/installation/upgrading/upgrade-from-2.21-to-2.22/_index.en.md index 128b20062..67f3e3379 100644 --- a/content/kubermatic/main/installation/upgrading/upgrade-from-2.21-to-2.22/_index.en.md +++ b/content/kubermatic/main/installation/upgrading/upgrade-from-2.21-to-2.22/_index.en.md @@ -21,7 +21,7 @@ container runtime in KKP 2.22 is therefore containerd. As such, the upgrade will with Docker as container runtime. It is necessary to migrate **existing clusters and cluster templates** to containerd before proceeding. This can be done either via the Kubermatic Dashboard -or with `kubectl`. On the Dashboard, just edit the cluster or cluster template, change the _Container Runtime_ field to `containerd` and save your changes. +or with `kubectl`. On the Dashboard, just edit the cluster or cluster template, change the *Container Runtime* field to `containerd` and save your changes. ![Change Container Runtime](upgrade-container-runtime.png?classes=shadow,border&height=200 "Change Container Runtime") @@ -68,8 +68,8 @@ Before starting the upgrade, make sure your KKP Master and Seed clusters are hea Download the latest 2.22.x release archive for the correct edition (`ce` for Community Edition, `ee` for Enterprise Edition) from [the release page](https://github.com/kubermatic/kubermatic/releases) and extract it locally on your computer. Make sure you have the `values.yaml` you used to deploy KKP 2.21 available and already adjusted for any 2.22 changes (also see [Pre-Upgrade Considerations](#pre-upgrade-considerations)), as you need to pass it to the installer. The `KubermaticConfiguration` is no longer necessary (unless you are adjusting it), as the KKP operator will use its in-cluster representation. From within the extracted directory, run the installer: -```sh -$ ./kubermatic-installer deploy kubermatic-master --helm-values path/to/values.yaml +```bash +./kubermatic-installer deploy kubermatic-master --helm-values path/to/values.yaml # example output for a successful upgrade INFO[0000] 🚀 Initializing installer… edition="Enterprise Edition" version=v2.22.0 @@ -120,8 +120,8 @@ Upgrading seed clusters is no longer necessary in KKP 2.22, unless you are runni You can follow the upgrade process by either supervising the Pods on master and seed clusters (by simply checking `kubectl get pods -n kubermatic` frequently) or checking status information for the `Seed` objects. A possible command to extract the current status by seed would be: -```sh -$ kubectl get seeds -A -o jsonpath="{range .items[*]}{.metadata.name} - {.status}{'\n'}{end}" +```bash +kubectl get seeds -A -o jsonpath="{range .items[*]}{.metadata.name} - {.status}{'\n'}{end}" kubermatic - {"clusters":5,"conditions":{"ClusterInitialized":{"lastHeartbeatTime":"2023-02-16T10:53:34Z","message":"All KKP CRDs have been installed successfully.","reason":"CRDsUpdated","status":"True"},"KubeconfigValid":{"lastHeartbeatTime":"2023-02-14T16:50:09Z","reason":"KubeconfigValid","status":"True"},"ResourcesReconciled":{"lastHeartbeatTime":"2023-02-14T16:50:14Z","reason":"ReconcilingSuccess","status":"True"}},"phase":"Healthy","versions":{"cluster":"v1.24.10","kubermatic":"v2.22.0"}} ``` @@ -183,7 +183,7 @@ If a custom values file is required and is ready for use, `kubermatic-installer` uncomment the command flags that you need (e.g. `--helm-values` if you have a `mlavalues.yaml` to pass and `--mla-include-iap` if you are using IAP for MLA; both flags are optional). -```sh +```bash ./kubermatic-installer deploy usercluster-mla \ # uncomment if you are providing non-standard values # --helm-values mlavalues.yaml \ @@ -194,7 +194,6 @@ using IAP for MLA; both flags are optional). ## Post-Upgrade Considerations - ### KubeVirt Migration KubeVirt cloud provider support graduates to GA in KKP 2.22 and has gained several new features. However, KubeVirt clusters need to be migrated after the KKP 2.22 upgrade. [Instructions are available in KubeVirt provider documentation]({{< ref "../../../architecture/supported-providers/kubevirt#migration-from-kkp-221" >}}). diff --git a/content/kubermatic/main/installation/upgrading/upgrade-from-2.22-to-2.23/_index.en.md b/content/kubermatic/main/installation/upgrading/upgrade-from-2.22-to-2.23/_index.en.md index 66e4c4620..f2229b1a3 100644 --- a/content/kubermatic/main/installation/upgrading/upgrade-from-2.22-to-2.23/_index.en.md +++ b/content/kubermatic/main/installation/upgrading/upgrade-from-2.22-to-2.23/_index.en.md @@ -32,31 +32,31 @@ The JSON file contains a `format` key. If the output looks like {"version":"1","format":"xl-single","id":"5dc676ac-92f3-4c19-81d0-2304b366293c","xl":{"version":"3","this":"888f699a-2f22-402a-9e49-2e0fc9abd5c5","sets":[["888f699a-2f22-402a-9e49-2e0fc9abd5c5"]],"distributionAlgo":"SIPMOD+PARITY"}} ``` -you're good to go, no migration required. However if you receive +You're good to go, no migration required. However if you receive ```json {"version":"1","format":"fs","id":"baa787b5-43b6-4bcb-b1d7-acf46bcc0a05","fs":{"version":"2"}} ``` -you must either +You must either -* migrate according to the [migration guide](https://min.io/docs/minio/container/operations/install-deploy-manage/migrate-fs-gateway.html), which effectively involves setting up a second MinIO and copying each file over, or -* wipe your MinIO's storage (e.g. by deleting the PVC, see below), or -* pin the MinIO version to the last version that supports `fs`, which is `RELEASE.2022-10-24T18-35-07Z`, using the Helm values file (set `minio.image.tag=RELEASE.2022-10-24T18-35-07Z`). +- migrate according to the [migration guide](https://min.io/docs/minio/container/operations/install-deploy-manage/migrate-fs-gateway.html), which effectively involves setting up a second MinIO and copying each file over, or +- wipe your MinIO's storage (e.g. by deleting the PVC, see below), or +- pin the MinIO version to the last version that supports `fs`, which is `RELEASE.2022-10-24T18-35-07Z`, using the Helm values file (set `minio.image.tag=RELEASE.2022-10-24T18-35-07Z`). The KKP installer will, when installing the seed dependencies, perform an automated check and will refuse to upgrade if the existing MinIO volume uses the old `fs` driver. If the contents of MinIO is expendable, instead of migrating it's also possible to wipe (**deleting all data**) MinIO's storage entirely. There are several ways to go about this, for example: ```bash -$ kubectl --namespace minio scale deployment/minio --replicas=0 +kubectl --namespace minio scale deployment/minio --replicas=0 #deployment.apps/minio scaled -$ kubectl --namespace minio delete pvc minio-data +kubectl --namespace minio delete pvc minio-data #persistentvolumeclaim "minio-data" deleted # re-install MinIO chart manually -$ helm --namespace minio upgrade minio ./charts/minio --values myhelmvalues.yaml +helm --namespace minio upgrade minio ./charts/minio --values myhelmvalues.yaml #Release "minio" has been upgraded. Happy Helming! #NAME: minio #LAST DEPLOYED: Mon Jul 24 13:40:51 2023 @@ -65,7 +65,7 @@ $ helm --namespace minio upgrade minio ./charts/minio --values myhelmvalues.yaml #REVISION: 2 #TEST SUITE: None -$ kubectl --namespace minio scale deployment/minio --replicas=1 +kubectl --namespace minio scale deployment/minio --replicas=1 #deployment.apps/minio scaled ``` @@ -97,8 +97,8 @@ Before starting the upgrade, make sure your KKP Master and Seed clusters are hea Download the latest 2.23.x release archive for the correct edition (`ce` for Community Edition, `ee` for Enterprise Edition) from [the release page](https://github.com/kubermatic/kubermatic/releases) and extract it locally on your computer. Make sure you have the `values.yaml` you used to deploy KKP 2.22 available and already adjusted for any 2.23 changes (also see [Pre-Upgrade Considerations](#pre-upgrade-considerations)), as you need to pass it to the installer. The `KubermaticConfiguration` is no longer necessary (unless you are adjusting it), as the KKP operator will use its in-cluster representation. From within the extracted directory, run the installer: -```sh -$ ./kubermatic-installer deploy kubermatic-master --helm-values path/to/values.yaml +```bash +./kubermatic-installer deploy kubermatic-master --helm-values path/to/values.yaml # example output for a successful upgrade INFO[0000] 🚀 Initializing installer… edition="Enterprise Edition" version=v2.23.0 @@ -152,12 +152,13 @@ A breaking change in the `minio` Helm chart shipped in KKP v2.23.0 has been iden Upgrading seed cluster is not necessary unless User Cluster MLA has been installed. All other KKP components on the seed will be upgraded automatically. + You can follow the upgrade process by either supervising the Pods on master and seed clusters (by simply checking `kubectl get pods -n kubermatic` frequently) or checking status information for the `Seed` objects. A possible command to extract the current status by seed would be: -```sh -$ kubectl get seeds -A -o jsonpath="{range .items[*]}{.metadata.name} - {.status}{'\n'}{end}" +```bash +kubectl get seeds -A -o jsonpath="{range .items[*]}{.metadata.name} - {.status}{'\n'}{end}" kubermatic - {"clusters":5,"conditions":{"ClusterInitialized":{"lastHeartbeatTime":"2023-02-16T10:53:34Z","message":"All KKP CRDs have been installed successfully.","reason":"CRDsUpdated","status":"True"},"KubeconfigValid":{"lastHeartbeatTime":"2023-02-14T16:50:09Z","reason":"KubeconfigValid","status":"True"},"ResourcesReconciled":{"lastHeartbeatTime":"2023-02-14T16:50:14Z","reason":"ReconcilingSuccess","status":"True"}},"phase":"Healthy","versions":{"cluster":"v1.24.10","kubermatic":"v2.23.0"}} ``` diff --git a/content/kubermatic/main/installation/upgrading/upgrade-from-2.24-to-2.25/_index.en.md b/content/kubermatic/main/installation/upgrading/upgrade-from-2.24-to-2.25/_index.en.md index 428f82e70..f413cff4f 100644 --- a/content/kubermatic/main/installation/upgrading/upgrade-from-2.24-to-2.25/_index.en.md +++ b/content/kubermatic/main/installation/upgrading/upgrade-from-2.24-to-2.25/_index.en.md @@ -59,31 +59,31 @@ The JSON file contains a `format` key. If the output looks like {"version":"1","format":"xl-single","id":"5dc676ac-92f3-4c19-81d0-2304b366293c","xl":{"version":"3","this":"888f699a-2f22-402a-9e49-2e0fc9abd5c5","sets":[["888f699a-2f22-402a-9e49-2e0fc9abd5c5"]],"distributionAlgo":"SIPMOD+PARITY"}} ``` -you're good to go, no migration required. However if you receive +You're good to go, no migration required. However if you receive ```json {"version":"1","format":"fs","id":"baa787b5-43b6-4bcb-b1d7-acf46bcc0a05","fs":{"version":"2"}} ``` -you must either +You must either -* migrate according to the [migration guide](https://min.io/docs/minio/container/operations/install-deploy-manage/migrate-fs-gateway.html), which effectively involves setting up a second MinIO and copying each file over, or -* wipe your MinIO's storage (e.g. by deleting the PVC, see below), or -* pin the MinIO version to the last version that supports `fs`, which is `RELEASE.2022-10-24T18-35-07Z`, using the Helm values file (set `minio.image.tag=RELEASE.2022-10-24T18-35-07Z`). +- migrate according to the [migration guide](https://min.io/docs/minio/container/operations/install-deploy-manage/migrate-fs-gateway.html), which effectively involves setting up a second MinIO and copying each file over, or +- wipe your MinIO's storage (e.g. by deleting the PVC, see below), or +- pin the MinIO version to the last version that supports `fs`, which is `RELEASE.2022-10-24T18-35-07Z`, using the Helm values file (set `minio.image.tag=RELEASE.2022-10-24T18-35-07Z`). The KKP installer will, when installing the `usercluster-mla` stack, perform an automated check and will refuse to upgrade if the existing MinIO volume uses the old `fs` driver. If the contents of MinIO is expendable, instead of migrating it's also possible to wipe (**deleting all data**) MinIO's storage entirely. There are several ways to go about this, for example: ```bash -$ kubectl --namespace mla scale deployment/minio --replicas=0 +kubectl --namespace mla scale deployment/minio --replicas=0 #deployment.apps/minio scaled -$ kubectl --namespace mla delete pvc minio-data +kubectl --namespace mla delete pvc minio-data #persistentvolumeclaim "minio-data" deleted # re-install MinIO chart manually -$ helm --namespace mla upgrade minio ./charts/minio --values myhelmvalues.yaml +helm --namespace mla upgrade minio ./charts/minio --values myhelmvalues.yaml #Release "minio" has been upgraded. Happy Helming! #NAME: minio #LAST DEPLOYED: Mon Jul 24 13:40:51 2023 @@ -92,7 +92,7 @@ $ helm --namespace mla upgrade minio ./charts/minio --values myhelmvalues.yaml #REVISION: 2 #TEST SUITE: None -$ kubectl --namespace mla scale deployment/minio --replicas=1 +kubectl --namespace mla scale deployment/minio --replicas=1 #deployment.apps/minio scaled ``` @@ -108,8 +108,8 @@ Before starting the upgrade, make sure your KKP Master and Seed clusters are hea Download the latest 2.25.x release archive for the correct edition (`ce` for Community Edition, `ee` for Enterprise Edition) from [the release page](https://github.com/kubermatic/kubermatic/releases) and extract it locally on your computer. Make sure you have the `values.yaml` you used to deploy KKP 2.24 available and already adjusted for any 2.25 changes (also see [Pre-Upgrade Considerations](#pre-upgrade-considerations)), as you need to pass it to the installer. The `KubermaticConfiguration` is no longer necessary (unless you are adjusting it), as the KKP operator will use its in-cluster representation. From within the extracted directory, run the installer: -```sh -$ ./kubermatic-installer deploy kubermatic-master --helm-values path/to/values.yaml +```bash +./kubermatic-installer deploy kubermatic-master --helm-values path/to/values.yaml # example output for a successful upgrade INFO[0000] 🚀 Initializing installer… edition="Enterprise Edition" version=v2.25.0 @@ -160,8 +160,8 @@ Upgrading seed clusters is not necessary, unless you are running the `minio` Hel You can follow the upgrade process by either supervising the Pods on master and seed clusters (by simply checking `kubectl get pods -n kubermatic` frequently) or checking status information for the `Seed` objects. A possible command to extract the current status by seed would be: -```sh -$ kubectl get seeds -A -o jsonpath="{range .items[*]}{.metadata.name} - {.status}{'\n'}{end}" +```bash +kubectl get seeds -A -o jsonpath="{range .items[*]}{.metadata.name} - {.status}{'\n'}{end}" kubermatic - {"clusters":5,"conditions":{"ClusterInitialized":{"lastHeartbeatTime":"2024-03-11T10:53:34Z","message":"All KKP CRDs have been installed successfully.","reason":"CRDsUpdated","status":"True"},"KubeconfigValid":{"lastHeartbeatTime":"2024-03-11T16:50:09Z","reason":"KubeconfigValid","status":"True"},"ResourcesReconciled":{"lastHeartbeatTime":"2024-03-11T16:50:14Z","reason":"ReconcilingSuccess","status":"True"}},"phase":"Healthy","versions":{"cluster":"v1.27.11","kubermatic":"v2.25.0"}} ``` diff --git a/content/kubermatic/main/installation/upgrading/upgrade-from-2.25-to-2.26/_index.en.md b/content/kubermatic/main/installation/upgrading/upgrade-from-2.25-to-2.26/_index.en.md index 72dde1168..bbfcab911 100644 --- a/content/kubermatic/main/installation/upgrading/upgrade-from-2.25-to-2.26/_index.en.md +++ b/content/kubermatic/main/installation/upgrading/upgrade-from-2.25-to-2.26/_index.en.md @@ -26,8 +26,8 @@ Beginning with KKP 2.26, Helm chart versions now use strict semvers without a le KKP 2.26 ships a lot of major version upgrades for the Helm charts, most notably -* Loki & Promtail v2.5 to v2.9.x -* Grafana 9.x to 10.4.x +- Loki & Promtail v2.5 to v2.9.x +- Grafana 9.x to 10.4.x Some of these updates require manual intervention or at least checking whether a given KKP system is affected by upstream changes. Please read the following sections carefully before beginning the upgrade. @@ -41,17 +41,18 @@ Due to labelling changes, and in-place upgrade of Velero is not possible. It's r The switch to the upstream Helm chart requires adjusting the `values.yaml` used to install Velero. Most existing settings have a 1:1 representation in the new chart: -* `velero.podAnnotations` is now `velero.annotations` -* `velero.serverFlags` is now `velero.configuration.*` (each CLI flag is its own field in the YAML file, e.g. `serverFlags:["--log-format=json"]` would become `configuration.logFormat: "json"`) -* `velero.uploaderType` is now `velero.configuration.uploaderType`; note that the default has changed from restic to Kopia, see the next section below for more information. -* `velero.credentials` is now `velero.credentials.*` -* `velero.schedulesPath` is not available anymore, since putting additional files into a Helm chart before installing it is a rather unusual process. Instead, specify the desired schedules directly inside the `values.yaml` in `velero.schedules` -* `velero.backupStorageLocations` is now `velero.configuration.backupStorageLocation` -* `velero.volumeSnapshotLocations` is now `velero.configuration.volumeSnapshotLocation` -* `velero.defaultVolumeSnapshotLocations` is now `velero.configuration.defaultBackupStorageLocation` +- `velero.podAnnotations` is now `velero.annotations` +- `velero.serverFlags` is now `velero.configuration.*` (each CLI flag is its own field in the YAML file, e.g. `serverFlags:["--log-format=json"]` would become `configuration.logFormat: "json"`) +- `velero.uploaderType` is now `velero.configuration.uploaderType`; note that the default has changed from restic to Kopia, see the next section below for more information. +- `velero.credentials` is now `velero.credentials.*` +- `velero.schedulesPath` is not available anymore, since putting additional files into a Helm chart before installing it is a rather unusual process. Instead, specify the desired schedules directly inside the `values.yaml` in `velero.schedules` +- `velero.backupStorageLocations` is now `velero.configuration.backupStorageLocation` +- `velero.volumeSnapshotLocations` is now `velero.configuration.volumeSnapshotLocation` +- `velero.defaultVolumeSnapshotLocations` is now `velero.configuration.defaultBackupStorageLocation` {{< tabs name="Velero Helm Chart Upgrades" >}} {{% tab name="old Velero Chart" %}} + ```yaml velero: podAnnotations: @@ -89,9 +90,11 @@ velero: schedulesPath: schedules/* ``` + {{% /tab %}} {{% tab name="new Velero Chart" %}} + ```yaml velero: annotations: @@ -136,6 +139,7 @@ velero: aws_access_key_id=itsme aws_secret_access_key=andthisismypassword ``` + {{% /tab %}} {{< /tabs >}} @@ -155,15 +159,15 @@ If you decide to switch to Kopia and do not need the restic repository anymore, The configuration syntax for cert-manager has changed slightly. -* Breaking: If you have `.featureGates` value set in `values.yaml`, the features defined there will no longer be passed to cert-manager webhook, only to cert-manager controller. Use the `webhook.featureGates` field instead to define features to be enabled on webhook. -* Potentially breaking: Webhook validation of CertificateRequest resources is stricter now: all `KeyUsages` and `ExtendedKeyUsages` must be defined directly in the CertificateRequest resource, the encoded CSR can never contain more usages that defined there. +- Breaking: If you have `.featureGates` value set in `values.yaml`, the features defined there will no longer be passed to cert-manager webhook, only to cert-manager controller. Use the `webhook.featureGates` field instead to define features to be enabled on webhook. +- Potentially breaking: Webhook validation of CertificateRequest resources is stricter now: all `KeyUsages` and `ExtendedKeyUsages` must be defined directly in the CertificateRequest resource, the encoded CSR can never contain more usages that defined there. ### oauth2-proxy (IAP) 7.6 This upgrade includes one breaking change: -* A change to how auth routes are evaluated using the flags `skip-auth-route`/`skip-auth-regex`: the new behaviour uses the regex you specify to evaluate the full path including query parameters. For more details please read the [detailed PR description](https://github.com/oauth2-proxy/oauth2-proxy/issues/2271). -* The environment variable `OAUTH2_PROXY_GOOGLE_GROUP` has been deprecated in favor of `OAUTH2_PROXY_GOOGLE_GROUPS`. Next major release will remove this option. +- A change to how auth routes are evaluated using the flags `skip-auth-route`/`skip-auth-regex`: the new behaviour uses the regex you specify to evaluate the full path including query parameters. For more details please read the [detailed PR description](https://github.com/oauth2-proxy/oauth2-proxy/issues/2271). +- The environment variable `OAUTH2_PROXY_GOOGLE_GROUP` has been deprecated in favor of `OAUTH2_PROXY_GOOGLE_GROUPS`. Next major release will remove this option. ### Loki & Promtail 2.9 (Seed MLA) @@ -171,16 +175,16 @@ The Loki upgrade from 2.5 to 2.9 might be the most significant bump in this KKP Before upgrading, review your `values.yaml` for Loki, as a number of syntax changes were made: -* Most importantly, `loki.config` is now a templated string that aggregates many other individual values specified in `loki`, for example `loki.tableManager` gets rendered into `loki.config.table_manager`, and `loki.loki.schemaConfig` gets rendered into `loki.config.schema_config`. To follow these changes, if you have `loki.config` in your `values.yaml`, rename it to `loki.loki`. Ideally you should not need to manually override the templating string in `loki.config` from the upstream chart anymore. Additionally, some values are moved out or renamed slightly: - * `loki.config.schema_config` becomes `loki.loki.schemaConfig` - * `loki.config.table_manager` becomes `loki.tableManager` (sic) - * `loki.config.server` was removed, if you need to specify something, use `loki.loki.server` -* The base volume path for the Loki PVC was changed from `/data/loki` to `/var/loki`. -* Configuration for the default image has changed, there is no `loki.image.repository` field anymore, it's now `loki.image.registry` and `loki.image.repository`. -* `loki.affinity` is now a templated string and enabled by default; if you use multiple Loki replicas, your cluster needs to have multiple nodes to host these pods. -* All fields related to the Loki pod (`loki.tolerations`, `loki.resources`, `loki.nodeSelector` etc.) were moved below `loki.singleBinary`. -* Self-monitoring, Grafana Agent and selftests are disabled by default now, reducing the default resource requirements for the logging stack. -* `loki.singleBinary.persistence.enableStatefulSetAutoDeletePVC` is set to `false` to ensure that when the StatefulSet is deleted, the PVCs will not also be deleted. This allows for easier upgrades in the +- Most importantly, `loki.config` is now a templated string that aggregates many other individual values specified in `loki`, for example `loki.tableManager` gets rendered into `loki.config.table_manager`, and `loki.loki.schemaConfig` gets rendered into `loki.config.schema_config`. To follow these changes, if you have `loki.config` in your `values.yaml`, rename it to `loki.loki`. Ideally you should not need to manually override the templating string in `loki.config` from the upstream chart anymore. Additionally, some values are moved out or renamed slightly: + - `loki.config.schema_config` becomes `loki.loki.schemaConfig` + - `loki.config.table_manager` becomes `loki.tableManager` (sic) + - `loki.config.server` was removed, if you need to specify something, use `loki.loki.server` +- The base volume path for the Loki PVC was changed from `/data/loki` to `/var/loki`. +- Configuration for the default image has changed, there is no `loki.image.repository` field anymore, it's now `loki.image.registry` and `loki.image.repository`. +- `loki.affinity` is now a templated string and enabled by default; if you use multiple Loki replicas, your cluster needs to have multiple nodes to host these pods. +- All fields related to the Loki pod (`loki.tolerations`, `loki.resources`, `loki.nodeSelector` etc.) were moved below `loki.singleBinary`. +- Self-monitoring, Grafana Agent and selftests are disabled by default now, reducing the default resource requirements for the logging stack. +- `loki.singleBinary.persistence.enableStatefulSetAutoDeletePVC` is set to `false` to ensure that when the StatefulSet is deleted, the PVCs will not also be deleted. This allows for easier upgrades in the future, but if you scale down Loki, you would have to manually deleted the leftover PVCs. ### Alertmanager 0.27 (Seed MLA) @@ -205,39 +209,39 @@ Afterwards you can install the new release from the chart. As is typical for kube-state-metrics, the upgrade simple, but the devil is in the details. There were many minor changes since v2.8, please review [the changelog](https://github.com/kubernetes/kube-state-metrics/releases) carefully if you built upon metrics provided by kube-state-metrics: -* The deprecated experimental VerticalPodAutoscaler metrics are no longer supported, and have been removed. It's recommend to use CustomResourceState metrics to gather metrics from custom resources like the Vertical Pod Autoscaler. -* Label names were regulated to adhere with OTel-Prometheus standards, so existing label names that do not follow the same may be replaced by the ones that do. Please refer to [the PR](https://github.com/kubernetes/kube-state-metrics/pull/2004) for more details. -* Label and annotation metrics aren't exposed by default anymore to reduce the memory usage of the default configuration of kube-state-metrics. Before this change, they used to only include the name and namespace of the objects which is not relevant to users not opting in these metrics. +- The deprecated experimental VerticalPodAutoscaler metrics are no longer supported, and have been removed. It's recommend to use CustomResourceState metrics to gather metrics from custom resources like the Vertical Pod Autoscaler. +- Label names were regulated to adhere with OTel-Prometheus standards, so existing label names that do not follow the same may be replaced by the ones that do. Please refer to [the PR](https://github.com/kubernetes/kube-state-metrics/pull/2004) for more details. +- Label and annotation metrics aren't exposed by default anymore to reduce the memory usage of the default configuration of kube-state-metrics. Before this change, they used to only include the name and namespace of the objects which is not relevant to users not opting in these metrics. ### node-exporter 1.7 (Seed MLA) This new version comes with a few minor backwards-incompatible changes: -* metrics of offline CPUs in CPU collector were removed -* bcache cache_readaheads_totals metrics were removed -* ntp collector was deprecated -* supervisord collector was deprecated +- metrics of offline CPUs in CPU collector were removed +- bcache cache_readaheads_totals metrics were removed +- ntp collector was deprecated +- supervisord collector was deprecated ### Prometheus 2.51 (Seed MLA) Prometheus had many improvements and some changes to the remote-write functionality that might affect you: -* Remote-write: - * raise default samples per send to 2,000 - * respect `Retry-After` header on 5xx errors - * error `storage.ErrTooOldSample` is now generating HTTP error 400 instead of HTTP error 500 -* Scraping: - * Do experimental timestamp alignment even if tolerance is bigger than 1% of scrape interval +- Remote-write: + - raise default samples per send to 2,000 + - respect `Retry-After` header on 5xx errors + - error `storage.ErrTooOldSample` is now generating HTTP error 400 instead of HTTP error 500 +- Scraping: + - Do experimental timestamp alignment even if tolerance is bigger than 1% of scrape interval ### nginx-ingress-controller 1.10 nginx v1.10 brings quite a few potentially breaking changes: -* does not support chroot image (this will be fixed on a future minor patch release) -* dropped Opentracing and zipkin modules, just Opentelemetry is supported as of this release -* dropped support for PodSecurityPolicy -* dropped support for GeoIP (legacy), only GeoIP2 is supported -* The automatically generated `NetworkPolicy` from nginx 1.9.3 is now disabled by default, refer to https://github.com/kubernetes/ingress-nginx/pull/10238 for more information. +- does not support chroot image (this will be fixed on a future minor patch release) +- dropped Opentracing and zipkin modules, just Opentelemetry is supported as of this release +- dropped support for PodSecurityPolicy +- dropped support for GeoIP (legacy), only GeoIP2 is supported +- The automatically generated `NetworkPolicy` from nginx 1.9.3 is now disabled by default, refer to for more information. ### Dex 2.40 @@ -253,8 +257,8 @@ Before starting the upgrade, make sure your KKP Master and Seed clusters are hea Download the latest 2.26.x release archive for the correct edition (`ce` for Community Edition, `ee` for Enterprise Edition) from [the release page](https://github.com/kubermatic/kubermatic/releases) and extract it locally on your computer. Make sure you have the `values.yaml` you used to deploy KKP 2.26 available and already adjusted for any 2.26 changes (also see [Pre-Upgrade Considerations](#pre-upgrade-considerations)), as you need to pass it to the installer. The `KubermaticConfiguration` is no longer necessary (unless you are adjusting it), as the KKP operator will use its in-cluster representation. From within the extracted directory, run the installer: -```sh -$ ./kubermatic-installer deploy kubermatic-master --helm-values path/to/values.yaml +```bash +./kubermatic-installer deploy kubermatic-master --helm-values path/to/values.yaml # example output for a successful upgrade INFO[0000] 🚀 Initializing installer… edition="Enterprise Edition" version=v2.26.0 @@ -305,8 +309,8 @@ Upgrading seed clusters is not necessary, unless you are running the `minio` Hel You can follow the upgrade process by either supervising the Pods on master and seed clusters (by simply checking `kubectl get pods -n kubermatic` frequently) or checking status information for the `Seed` objects. A possible command to extract the current status by seed would be: -```sh -$ kubectl get seeds -A -o jsonpath="{range .items[*]}{.metadata.name} - {.status}{'\n'}{end}" +```bash +kubectl get seeds -A -o jsonpath="{range .items[*]}{.metadata.name} - {.status}{'\n'}{end}" kubermatic - {"clusters":5,"conditions":{"ClusterInitialized":{"lastHeartbeatTime":"2024-03-11T10:53:34Z","message":"All KKP CRDs have been installed successfully.","reason":"CRDsUpdated","status":"True"},"KubeconfigValid":{"lastHeartbeatTime":"2024-03-11T16:50:09Z","reason":"KubeconfigValid","status":"True"},"ResourcesReconciled":{"lastHeartbeatTime":"2024-03-11T16:50:14Z","reason":"ReconcilingSuccess","status":"True"}},"phase":"Healthy","versions":{"cluster":"v1.27.11","kubermatic":"v2.25.0"}} ``` diff --git a/content/kubermatic/main/installation/upgrading/upgrade-from-2.26-to-2.27/_index.en.md b/content/kubermatic/main/installation/upgrading/upgrade-from-2.26-to-2.27/_index.en.md index 37b92567c..6ecd399fd 100644 --- a/content/kubermatic/main/installation/upgrading/upgrade-from-2.26-to-2.27/_index.en.md +++ b/content/kubermatic/main/installation/upgrading/upgrade-from-2.26-to-2.27/_index.en.md @@ -32,17 +32,20 @@ A regression in KKP v2.26.0 caused the floatingIPPool field in OpenStack cluster If your OpenStack clusters use a floating IP pool other than the default, you may need to manually update Cluster objects after upgrading to v2.27. -* Action Required: - * After the upgrade, check your OpenStack clusters and manually reset the correct floating IP pool if needed. - * Example command to check the floating IP pool - ```sh - kubectl get clusters -o jsonpath="{.items[*].spec.cloud.openstack.floatingIPPool}" - ``` - * If incorrect, manually edit the Cluster object: - ```sh - kubectl edit cluster - ``` +- Action Required: + - After the upgrade, check your OpenStack clusters and manually reset the correct floating IP pool if needed. + - Example command to check the floating IP pool + ```bash + kubectl get clusters -o jsonpath="{.items[*].spec.cloud.openstack.floatingIPPool}" + ``` + + - If incorrect, manually edit the Cluster object: + + ```bash + kubectl edit cluster + ``` + ### Velero Configuration Changes By default, Velero backups and snapshots are turned off. If you were using Velero for etcd backups and/or volume backups, you must explicitly enable them in your values.yaml file. @@ -88,19 +91,20 @@ Because the namespace changes, both old and new Dex can temporarily live side-by To begin the migration, create a new `values.yaml` section for Dex (both old and new chart use `dex` as the top-level key in the YAML file) and migrate your existing configuration as follows: -* `dex.replicas` is now `dex.replicaCount` -* `dex.env` is now `dex.envVars` -* `dex.extraVolumes` is now `dex.volumes` -* `dex.extraVolumeMounts` is now `dex.volumeMounts` -* `dex.certIssuer` has been removed, admins must manually set the necessary annotations on the +- `dex.replicas` is now `dex.replicaCount` +- `dex.env` is now `dex.envVars` +- `dex.extraVolumes` is now `dex.volumes` +- `dex.extraVolumeMounts` is now `dex.volumeMounts` +- `dex.certIssuer` has been removed, admins must manually set the necessary annotations on the ingress to integrate with cert-manager. -* `dex.ingress` has changed internally: - * `class` is now `className` (the value "non-existent" is not supported anymore, use the `dex.ingress.enabled` field instead) - * `host` and `path` are gone, instead admins will have to manually define their Ingress configuration - * `scheme` is likewise gone and admins have to configure the `tls` section in the Ingress configuration +- `dex.ingress` has changed internally: + - `class` is now `className` (the value "non-existent" is not supported anymore, use the `dex.ingress.enabled` field instead) + - `host` and `path` are gone, instead admins will have to manually define their Ingress configuration + - `scheme` is likewise gone and admins have to configure the `tls` section in the Ingress configuration {{< tabs name="Dex Helm Chart values" >}} {{% tab name="old oauth Chart" %}} + ```yaml dex: replicas: 2 @@ -129,9 +133,11 @@ dex: name: letsencrypt-prod kind: ClusterIssuer ``` + {{% /tab %}} {{% tab name="new dex Chart" %}} + ```yaml # Tell the KKP installer to install the new dex Chart into the # "dex" namespace, instead of the old oauth Chart. @@ -166,19 +172,20 @@ dex: # above. - "kkp.example.com" ``` + {{% /tab %}} {{< /tabs >}} Additionally, Dex's own configuration is now more clearly separated from how Dex's Kubernetes manifests are configured. The following changes are required: -* In general, Dex's configuration is everything under `dex.config`. -* `dex.config.issuer` has to be set explicitly (the old `oauth` Chart automatically set it), usually to `https:///dex`, e.g. `https://kkp.example.com/dex`. -* `dex.connectors` is now `dex.config.connectors` -* `dex.expiry` is now `dex.config.expiry` -* `dex.frontend` is now `dex.config.frontend` -* `dex.grpc` is now `dex.config.grpc` -* `dex.clients` is now `dex.config.staticClients` -* `dex.staticPasswords` is now `dex.config.staticPasswords` (when using static passwords, you also have to set `dex.config.enablePasswordDB` to `true`) +- In general, Dex's configuration is everything under `dex.config`. +- `dex.config.issuer` has to be set explicitly (the old `oauth` Chart automatically set it), usually to `https:///dex`, e.g. `https://kkp.example.com/dex`. +- `dex.connectors` is now `dex.config.connectors` +- `dex.expiry` is now `dex.config.expiry` +- `dex.frontend` is now `dex.config.frontend` +- `dex.grpc` is now `dex.config.grpc` +- `dex.clients` is now `dex.config.staticClients` +- `dex.staticPasswords` is now `dex.config.staticPasswords` (when using static passwords, you also have to set `dex.config.enablePasswordDB` to `true`) Finally, theming support has changed. The old `oauth` Helm chart allowed to inline certain assets, like logos, as base64-encoded blobs into the Helm values. This mechanism is not available in the new `dex` Helm chart and admins have to manually provision the desired theme. KKP's Dex chart will setup a `dex-theme-kkp` ConfigMap, which is mounted into Dex and then overlays files over the default theme that ships with Dex. To customize, create your own ConfigMap/Secret and adjust `dex.volumes`, `dex.volumeMounts` and `dex.config.frontend.theme` / `dex.config.frontend.dir` accordingly. @@ -192,6 +199,7 @@ kubectl rollout restart deploy kubermatic-api -n kubermatic ``` #### Important: Update OIDC Provider URL for Hostname Changes + Before configuring the UI to use the new URL, ensure that the new Dex installation is healthy by checking that the pods are running and the logs show no suspicious errors. ```bash @@ -200,6 +208,7 @@ kubectl get pods -n dex # To check the logs kubectl get logs -n dex deploy/dex ``` + Next, verify the OpenID configuration by running: ```bash @@ -236,16 +245,16 @@ spec: Once you have verified that the new Dex installation is up and running, you can either -* point KKP to the new Dex installation (if its new URL is meant to be permanent) by changing the `tokenIssuer` in the `KubermaticConfiguration`, or -* delete the old `oauth` release (`helm -n oauth delete oauth`) and then re-deploy the new Dex release, but with the same host+path as the old `oauth` chart used, so that no further changes are necessary in downstream components like KKP. This will incur a short downtime, while no Ingress exists for the issuer URL configured in KKP. +- point KKP to the new Dex installation (if its new URL is meant to be permanent) by changing the `tokenIssuer` in the `KubermaticConfiguration`, or +- delete the old `oauth` release (`helm -n oauth delete oauth`) and then re-deploy the new Dex release, but with the same host+path as the old `oauth` chart used, so that no further changes are necessary in downstream components like KKP. This will incur a short downtime, while no Ingress exists for the issuer URL configured in KKP. ### API Changes -* New Prometheus Overrides - * Added `spec.componentsOverride.prometheus` to allow overriding Prometheus replicas and tolerations. +- New Prometheus Overrides + - Added `spec.componentsOverride.prometheus` to allow overriding Prometheus replicas and tolerations. -* Container Image Tagging - * Tagged KKP releases will no longer tag KKP images twice (with the Git tag and the Git hash), but only once with the Git tag. This ensures that existing hash-based container images do not suddenly change when a Git tag is set and the release job is run. Users of tagged KKP releases are not affected by this change. +- Container Image Tagging + - Tagged KKP releases will no longer tag KKP images twice (with the Git tag and the Git hash), but only once with the Git tag. This ensures that existing hash-based container images do not suddenly change when a Git tag is set and the release job is run. Users of tagged KKP releases are not affected by this change. ## Upgrade Procedure @@ -255,8 +264,8 @@ Before starting the upgrade, make sure your KKP Master and Seed clusters are hea Download the latest 2.27.x release archive for the correct edition (`ce` for Community Edition, `ee` for Enterprise Edition) from [the release page](https://github.com/kubermatic/kubermatic/releases) and extract it locally on your computer. Make sure you have the `values.yaml` you used to deploy KKP 2.27 available and already adjusted for any 2.27 changes (also see [Pre-Upgrade Considerations](#pre-upgrade-considerations)), as you need to pass it to the installer. The `KubermaticConfiguration` is no longer necessary (unless you are adjusting it), as the KKP operator will use its in-cluster representation. From within the extracted directory, run the installer: -```sh -$ ./kubermatic-installer deploy kubermatic-master --helm-values path/to/values.yaml +```bash +./kubermatic-installer deploy kubermatic-master --helm-values path/to/values.yaml # example output for a successful upgrade INFO[0000] 🚀 Initializing installer… edition="Enterprise Edition" version=v2.27.0 @@ -307,8 +316,8 @@ Upgrading seed clusters is not necessary, unless you are running the `minio` Hel You can follow the upgrade process by either supervising the Pods on master and seed clusters (by simply checking `kubectl get pods -n kubermatic` frequently) or checking status information for the `Seed` objects. A possible command to extract the current status by seed would be: -```sh -$ kubectl get seeds -A -o jsonpath="{range .items[*]}{.metadata.name} - {.status}{'\n'}{end}" +```bash +kubectl get seeds -A -o jsonpath="{range .items[*]}{.metadata.name} - {.status}{'\n'}{end}" kubermatic - {"clusters":5,"conditions":{"ClusterInitialized":{"lastHeartbeatTime":"2025-02-20T10:53:34Z","message":"All KKP CRDs have been installed successfully.","reason":"CRDsUpdated","status":"True"},"KubeconfigValid":{"lastHeartbeatTime":"2025-02-20T16:50:09Z","reason":"KubeconfigValid","status":"True"},"ResourcesReconciled":{"lastHeartbeatTime":"2025-02-20T16:50:14Z","reason":"ReconcilingSuccess","status":"True"}},"phase":"Healthy","versions":{"cluster":"v1.29.13","kubermatic":"v2.27.0"}} ``` @@ -320,13 +329,13 @@ Of particular interest to the upgrade process is if the `ResourcesReconciled` co Some functionality of KKP has been deprecated or removed with KKP 2.27. You should review the full [changelog](https://github.com/kubermatic/kubermatic/blob/main/docs/changelogs/CHANGELOG-2.27.md) and adjust any automation or scripts that might be using deprecated fields or features. Below is a list of changes that might affect you: -* The custom `oauth` Helm chart in KKP has been deprecated and will be replaced with a new Helm chart, `dex`, which is based on the [official upstream chart](https://github.com/dexidp/helm-charts/tree/master/charts/dex), in KKP 2.27. +- The custom `oauth` Helm chart in KKP has been deprecated and will be replaced with a new Helm chart, `dex`, which is based on the [official upstream chart](https://github.com/dexidp/helm-charts/tree/master/charts/dex), in KKP 2.27. -* Canal v3.19 and v3.20 addons have been removed. +- Canal v3.19 and v3.20 addons have been removed. -* kubermatic-installer `--docker-binary` flag has been removed from the kubermatic-installer `mirror-images` subcommand. +- kubermatic-installer `--docker-binary` flag has been removed from the kubermatic-installer `mirror-images` subcommand. -* The `K8sgpt` non-operator application has been deprecated and replaced by the `K8sgpt-operator`. The old application will be removed in future releases. +- The `K8sgpt` non-operator application has been deprecated and replaced by the `K8sgpt-operator`. The old application will be removed in future releases. ## Next Steps diff --git a/content/kubermatic/main/installation/upgrading/upgrade-from-2.27-to-2.28/_index.en.md b/content/kubermatic/main/installation/upgrading/upgrade-from-2.27-to-2.28/_index.en.md index 0ff69f675..ac6f3bb1d 100644 --- a/content/kubermatic/main/installation/upgrading/upgrade-from-2.27-to-2.28/_index.en.md +++ b/content/kubermatic/main/installation/upgrading/upgrade-from-2.27-to-2.28/_index.en.md @@ -16,6 +16,26 @@ This guide will walk you through upgrading Kubermatic Kubernetes Platform (KKP) Please review [known issues]({{< ref "../../../architecture/known-issues/" >}}) before upgrading to understand if any issues might affect you. {{% /notice %}} +### Kubevirt CSI driver operator version upgrade + +KKP 2.28 bumps up the Kubevirt CSI driver operator version, due to some breaking upstream changes the operator pod will start to crash on existing user clusters and we will observe below mentioned error in the logs. + +``` + kubevirt-csi-driver.go:120] cannot infer infra vm namespace +``` + +#### Migration Procedure + +To avoid the above issue we need to add an annotation on all the worker nodes of all the existing kubevirt user clusters. + +``` +export ClusterID= + +kubectl annotate node --all "cluster.x-k8s.io/cluster-namespace=cluster-${ClusterID}" --overwrite + +``` +If rotating the worker nodes of each user cluster is not an issue then rotating the worker nodes of all the user clusters after KKP upgrade also fixes the issue. There are no issue on cluster created after KKP upgrade. + ### Node Exporter Upgrade (Seed MLA) KKP 2.28 removes the custom Helm chart for Node Exporter and instead now reuses the official [upstream Helm chart](https://prometheus-community.github.io/helm-charts). @@ -111,6 +131,7 @@ kubectl -n monitoring delete statefulset alertmanager Afterwards you can install the new release from the chart using Helm CLI or using your favourite GitOps tool. Finally, cleanup the leftover PVC resources from old helm chart installation. + ```bash kubectl delete pvc -n monitoring -l app=alertmanager ``` @@ -253,9 +274,9 @@ Additionally, Dex's own configuration is now more clearly separated from how Dex Finally, theming support has changed. The old `oauth` Helm chart allowed to inline certain assets, like logos, as base64-encoded blobs into the Helm values. This mechanism is not available in the new `dex` Helm chart and admins have to manually provision the desired theme. KKP's Dex chart will setup a `dex-theme-kkp` ConfigMap, which is mounted into Dex and then overlays files over the default theme that ships with Dex. To customize, create your own ConfigMap/Secret and adjust `dex.volumes`, `dex.volumeMounts` and `dex.config.frontend.theme` / `dex.config.frontend.dir` accordingly. -**Note that you cannot have two Ingress objects with the same host names and paths. So if you install the new Dex in parallel to the old one, you will have to temporarily use a different hostname (e.g. `kkp.example.com/dex` for the old one and `kkp.example.com/dex2` for the new Dex installation).** +**Note** that you cannot have two Ingress objects with the same host names and paths. So if you install the new Dex in parallel to the old one, you will have to temporarily use a different hostname (e.g. `kkp.example.com/dex` for the old one and `kkp.example.com/dex2` for the new Dex installation). -**Restarting Kubermatic API After Dex Migration**: +**Restarting Kubermatic API After Dex Migration:** If you choose to delete the `oauth` chart and immediately switch to the new `dex` chart without using a different hostname, it is recommended to restart the `kubermatic-api` to ensure proper functionality. You can do this by running the following command: ```bash @@ -331,7 +352,7 @@ Upgrading seed clusters is not necessary, unless you are running the `minio` Hel You can follow the upgrade process by either supervising the Pods on master and seed clusters (by simply checking `kubectl get pods -n kubermatic` frequently) or checking status information for the `Seed` objects. A possible command to extract the current status by seed would be: -```sh +```bash $ kubectl get seeds -A -o jsonpath="{range .items[*]}{.metadata.name} - {.status}{'\n'}{end}" # Place holder for output @@ -351,10 +372,10 @@ This retirement affects all customers using the Azure Basic Load Balancer SKU, w If you have Basic Load Balancers deployed within Azure Cloud Services (extended support), these deployments will not be affected by this retirement, and no action is required for these specific instances. For more details about this deprecation, please refer to the official Azure announcement: -[https://azure.microsoft.com/en-us/updates?id=azure-basic-load-balancer-will-be-retired-on-30-september-2025-upgrade-to-standard-load-balancer](https://azure.microsoft.com/en-us/updates?id=azure-basic-load-balancer-will-be-retired-on-30-september-2025-upgrade-to-standard-load-balancer) + The Azure team has created an upgrade guideline, including required scripts to automate the migration process. -Please refer to the official documentation for detailed upgrade instructions: [https://learn.microsoft.com/en-us/azure/load-balancer/load-balancer-basic-upgrade-guidance#upgrade-using-automated-scripts-recommended](https://learn.microsoft.com/en-us/azure/load-balancer/load-balancer-basic-upgrade-guidance#upgrade-using-automated-scripts-recommended) +Please refer to the official documentation for detailed upgrade instructions: ## Next Steps diff --git a/content/kubermatic/main/installation/upgrading/upgrade-from-2.28-to-2.29/_index.en.md b/content/kubermatic/main/installation/upgrading/upgrade-from-2.28-to-2.29/_index.en.md new file mode 100644 index 000000000..a41a3d0f0 --- /dev/null +++ b/content/kubermatic/main/installation/upgrading/upgrade-from-2.28-to-2.29/_index.en.md @@ -0,0 +1,117 @@ ++++ +title = "Upgrading to KKP 2.29" +date = 2025-10-21T12:00:00+02:00 +weight = 10 ++++ + +{{% notice note %}} +Upgrading to KKP 2.29 is only supported from version 2.28. Do not attempt to upgrade from versions prior to that and apply the upgrade step by step over minor versions instead (e.g. from [2.27 to 2.28]({{< ref "../upgrade-from-2.27-to-2.28/" >}}) and then to 2.29). It is also strongly advised to be on the latest 2.28.x patch release before upgrading to 2.29. +{{% /notice %}} + +This guide will walk you through upgrading Kubermatic Kubernetes Platform (KKP) to version 2.29. For the full list of changes in this release, please check out the [KKP changelog for v2.29](https://github.com/kubermatic/kubermatic/blob/main/docs/changelogs/CHANGELOG-2.29.md). Please read the full document before proceeding with the upgrade. + +## Pre-Upgrade Considerations + +{{% notice warning %}} +Please review [known issues]({{< ref "../../../architecture/known-issues/" >}}) before upgrading to understand if any issues might affect you. +{{% /notice %}} + +### Alloy Helm Chart Integration and Promtail Removal (Seed MLA) + +KKP 2.29 fully replaces Promtail with the Grafana Alloy for log shipping in seed clusters. When upgrading, the installer will remove Promtail if it was previously installed. **Alloy is now the only supported log shipper for seed cluster logs.** + +#### Migration Procedure + +If you are using `kubermatic-installer` for the Seed MLA installation, then it will take care of removing the resources for the deprecated Promtail helm-chart and installing the new Grafana Alloy helm-chart. You just need to run the following command: + +```bash +./kubermatic-installer deploy seed-mla --helm-values values.yaml +``` + +If you are installing MLA using GitOps / Manual way using Helm CLI, before upgrading, you must delete the existing Helm release before doing the upgrade. + +```bash +helm uninstall promtail -n logging +``` + +Afterwards you can install the new release from the chart using Helm CLI or using your favourite GitOps tool. + +## Upgrade Procedure + +Before starting the upgrade, make sure your KKP Master and Seed clusters are healthy with no failing or pending Pods. If any Pod is showing problems, investigate and fix the individual problems before applying the upgrade. This includes the control plane components for user clusters, unhealthy user clusters should not be submitted to an upgrade. + +### KKP Master Upgrade + +Download the latest 2.29.x release archive for the correct edition (`ce` for Community Edition, `ee` for Enterprise Edition) from [the release page](https://github.com/kubermatic/kubermatic/releases) and extract it locally on your computer. Make sure you have the `values.yaml` you used to deploy KKP 2.29 available and already adjusted for any 2.29 changes (also see [Pre-Upgrade Considerations](#pre-upgrade-considerations)), as you need to pass it to the installer. The `KubermaticConfiguration` is no longer necessary (unless you are adjusting it), as the KKP operator will use its in-cluster representation. From within the extracted directory, run the installer: + +```sh +$ ./kubermatic-installer deploy kubermatic-master --helm-values path/to/values.yaml + +# example output for a successful upgrade +INFO[0000] 🚀 Initializing installer… edition="Enterprise Edition" version=v2.29.0 +INFO[0000] 🚦 Validating the provided configuration… +INFO[0000] ✅ Provided configuration is valid. +INFO[0000] 🚦 Validating existing installation… +INFO[0002] Checking seed cluster… seed=kubermatic +INFO[0005] ✅ Existing installation is valid. +INFO[0005] 🛫 Deploying KKP master stack… +INFO[0005] 💾 Deploying kubermatic-fast StorageClass… +INFO[0005] ✅ StorageClass exists, nothing to do. +INFO[0005] 📦 Deploying nginx-ingress-controller… +INFO[0008] Deploying Helm chart… +INFO[0016] ✅ Success. +INFO[0016] 📦 Deploying cert-manager… +INFO[0017] Deploying Custom Resource Definitions… +INFO[0028] Deploying Helm chart… +INFO[0035] ✅ Success. +INFO[0035] 📦 Deploying Dex… +INFO[0037] Updating release from 2.28.3 to 2.29.0… +INFO[0090] ✅ Success. +INFO[0090] 📦 Deploying Kubermatic Operator… +INFO[0090] Deploying Custom Resource Definitions… +INFO[0118] Deploying Helm chart… +INFO[0121] Updating release from 2.28.3 to 2.29.0… +INFO[0211] ✅ Success. +INFO[0211] 📝 Applying Kubermatic Configuration… +INFO[0211] ✅ Success. +INFO[0211] 📦 Deploying Telemetry… +INFO[0212] Updating release from 2.28.3 to 2.29.0… +INFO[0219] ✅ Success. +INFO[0219] Deploying default Policy Template catalog +INFO[0219] 📡 Determining DNS settings… +INFO[0219] The main Ingress is ready. +INFO[0220] Service : nginx-ingress-controller / nginx-ingress-controller +INFO[0220] Ingress via hostname: .eu-central-1.elb.amazonaws.com +INFO[0220] +INFO[0220] Please ensure your DNS settings for "" include the following records: +INFO[0220] +INFO[0220] . IN CNAME .eu-central-1.elb.amazonaws.com. +INFO[0220] *.. IN CNAME .eu-central-1.elb.amazonaws.com. +INFO[0220] +INFO[0220] 🛬 Installation completed successfully. ✌ + +``` + +Upgrading seed clusters is not necessary, unless you are running the `minio` Helm chart or User Cluster MLA as distributed by KKP on them. They will be automatically upgraded by KKP components. + +You can follow the upgrade process by either supervising the Pods on master and seed clusters (by simply checking `kubectl get pods -n kubermatic` frequently) or checking status information for the `Seed` objects. A possible command to extract the current status by seed would be: + +```bash +$ kubectl get seeds -A -o jsonpath="{range .items[*]}{.metadata.name} - {.status}{'\n'}{end}" + +kubermatic - {"clusters":0,"conditions":{"ClusterInitialized":{"lastHeartbeatTime":"2025-10-21T12:48:12Z","message":"All KKP CRDs have been installed successfully.","reason":"CRDsUpdated","status":"True"},"KubeconfigValid":{"lastHeartbeatTime":"2025-10-21T12:48:08Z","reason":"KubeconfigValid","status":"True"},"ResourcesReconciled":{"lastHeartbeatTime":"2025-10-21T12:48:16Z","reason":"ReconcilingSuccess","status":"True"}},"phase":"Healthy","versions":{"cluster":"v1.33.5","kubermatic":"v2.29.0"}} +``` + +Of particular interest to the upgrade process is if the `ResourcesReconciled` condition succeeded and if the `versions.kubermatic` field is showing the target KKP version. If this is not the case yet, the upgrade is still in flight. If the upgrade is stuck, try `kubectl -n kubermatic describe seed ` to see what exactly is keeping the KKP Operator from updating the Seed cluster. + +## Post-Upgrade Considerations + +### Deprecations and Removals + +Some functionality of KKP has been deprecated or removed with KKP 2.29. You should review the full [changelog](https://github.com/kubermatic/kubermatic/blob/main/docs/changelogs/CHANGELOG-2.29.md) and adjust any automation or scripts that might be using deprecated fields or features. Below is a list of changes that might affect you: + +- **Promtail Removal**: As mentioned in the [Pre-Upgrade Considerations](#pre-upgrade-considerations), Promtail has been removed in favor of Grafana Alloy for log shipping in seed clusters. Ensure that any references to Promtail in your documentation or automation are updated accordingly. + +## Next Steps + +- Try out Kubernetes 1.34, the latest Kubernetes release shipping with this version of KKP. diff --git a/content/kubermatic/main/references/crds/_index.en.md b/content/kubermatic/main/references/crds/_index.en.md index c728feaf3..7421914b4 100644 --- a/content/kubermatic/main/references/crds/_index.en.md +++ b/content/kubermatic/main/references/crds/_index.en.md @@ -56,7 +56,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `apps.kubermatic.k8c.io/v1` | `kind` _string_ | `ApplicationDefinition` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[ApplicationDefinitionSpec](#applicationdefinitionspec)_ | {{< unsafe >}}{{< /unsafe >}} | @@ -76,7 +76,7 @@ ApplicationDefinitionList contains a list of ApplicationDefinition. | --- | --- | | `apiVersion` _string_ | `apps.kubermatic.k8c.io/v1` | `kind` _string_ | `ApplicationDefinitionList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[ApplicationDefinition](#applicationdefinition) array_ | {{< unsafe >}}{{< /unsafe >}} | @@ -98,7 +98,7 @@ _Appears in:_ | `displayName` _string_ | {{< unsafe >}}DisplayName is the name for the application that will be displayed in the UI.{{< /unsafe >}} | | `description` _string_ | {{< unsafe >}}Description of the application. what is its purpose{{< /unsafe >}} | | `method` _[TemplateMethod](#templatemethod)_ | {{< unsafe >}}Method used to install the application{{< /unsafe >}} | -| `defaultValues` _[RawExtension](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#rawextension-runtime-pkg)_ | {{< unsafe >}}DefaultValues specify default values for the UI which are passed to helm templating when creating an application. Comments are not preserved.
Deprecated: Use DefaultValuesBlock instead. This field was deprecated in KKP 2.25 and will be removed in KKP 2.27+.{{< /unsafe >}} | +| `defaultValues` _[RawExtension](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#rawextension-runtime-pkg)_ | {{< unsafe >}}DefaultValues specify default values for the UI which are passed to helm templating when creating an application. Comments are not preserved.
Deprecated: Use DefaultValuesBlock instead. This field was deprecated in KKP 2.25 and will be removed in KKP 2.27+.{{< /unsafe >}} | | `defaultValuesBlock` _string_ | {{< unsafe >}}DefaultValuesBlock specifies default values for the UI which are passed to helm templating when creating an application. Comments are preserved.{{< /unsafe >}} | | `defaultNamespace` _[AppNamespaceSpec](#appnamespacespec)_ | {{< unsafe >}}DefaultNamespace specifies the default namespace which is used if a referencing ApplicationInstallation has no target namespace defined.
If unset, the name of the ApplicationDefinition is being used instead.{{< /unsafe >}} | | `defaultDeployOptions` _[DeployOptions](#deployoptions)_ | {{< unsafe >}}DefaultDeployOptions holds the settings specific to the templating method used to deploy the application.
These settings can be overridden in applicationInstallation.{{< /unsafe >}} | @@ -130,7 +130,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `apps.kubermatic.k8c.io/v1` | `kind` _string_ | `ApplicationInstallation` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[ApplicationInstallationSpec](#applicationinstallationspec)_ | {{< unsafe >}}{{< /unsafe >}} | | `status` _[ApplicationInstallationStatus](#applicationinstallationstatus)_ | {{< unsafe >}}{{< /unsafe >}} | @@ -150,9 +150,9 @@ _Appears in:_ | Field | Description | | --- | --- | -| `status` _[ConditionStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#conditionstatus-v1-core)_ | {{< unsafe >}}Status of the condition, one of True, False, Unknown.{{< /unsafe >}} | -| `lastHeartbeatTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}Last time we got an update on a given condition.{{< /unsafe >}} | -| `lastTransitionTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}Last time the condition transit from one status to another.{{< /unsafe >}} | +| `status` _[ConditionStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#conditionstatus-v1-core)_ | {{< unsafe >}}Status of the condition, one of True, False, Unknown.{{< /unsafe >}} | +| `lastHeartbeatTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}Last time we got an update on a given condition.{{< /unsafe >}} | +| `lastTransitionTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}Last time the condition transit from one status to another.{{< /unsafe >}} | | `reason` _string_ | {{< unsafe >}}(brief) reason for the condition's last transition.{{< /unsafe >}} | | `message` _string_ | {{< unsafe >}}Human readable message indicating details about last transition.{{< /unsafe >}} | | `observedGeneration` _integer_ | {{< unsafe >}}observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.{{< /unsafe >}} | @@ -186,7 +186,7 @@ ApplicationInstallationList is a list of ApplicationInstallations. | --- | --- | | `apiVersion` _string_ | `apps.kubermatic.k8c.io/v1` | `kind` _string_ | `ApplicationInstallationList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[ApplicationInstallation](#applicationinstallation) array_ | {{< unsafe >}}{{< /unsafe >}} | @@ -207,9 +207,9 @@ _Appears in:_ | --- | --- | | `namespace` _[AppNamespaceSpec](#appnamespacespec)_ | {{< unsafe >}}Namespace describe the desired state of the namespace where application will be created.{{< /unsafe >}} | | `applicationRef` _[ApplicationRef](#applicationref)_ | {{< unsafe >}}ApplicationRef is a reference to identify which Application should be deployed{{< /unsafe >}} | -| `values` _[RawExtension](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#rawextension-runtime-pkg)_ | {{< unsafe >}}Values specify values overrides that are passed to helm templating. Comments are not preserved.
Deprecated: Use ValuesBlock instead. This field was deprecated in KKP 2.25 and will be removed in KKP 2.27+.{{< /unsafe >}} | +| `values` _[RawExtension](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#rawextension-runtime-pkg)_ | {{< unsafe >}}Values specify values overrides that are passed to helm templating. Comments are not preserved.
Deprecated: Use ValuesBlock instead. This field was deprecated in KKP 2.25 and will be removed in KKP 2.27+.{{< /unsafe >}} | | `valuesBlock` _string_ | {{< unsafe >}}ValuesBlock specifies values overrides that are passed to helm templating. Comments are preserved.{{< /unsafe >}} | -| `reconciliationInterval` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#duration-v1-meta)_ | {{< unsafe >}}ReconciliationInterval is the interval at which to force the reconciliation of the application. By default, Applications are only reconciled
on changes on spec, annotations, or the parent application definition. Meaning that if the user manually deletes the workload
deployed by the application, nothing will happen until the application CR change.

Setting a value greater than zero force reconciliation even if no changes occurred on application CR.
Setting a value equal to 0 disables the force reconciliation of the application (default behavior).
Setting this too low can cause a heavy load and may disrupt your application workload depending on the template method.{{< /unsafe >}} | +| `reconciliationInterval` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#duration-v1-meta)_ | {{< unsafe >}}ReconciliationInterval is the interval at which to force the reconciliation of the application. By default, Applications are only reconciled
on changes on spec, annotations, or the parent application definition. Meaning that if the user manually deletes the workload
deployed by the application, nothing will happen until the application CR change.
Setting a value greater than zero force reconciliation even if no changes occurred on application CR.
Setting a value equal to 0 disables the force reconciliation of the application (default behavior).
Setting this too low can cause a heavy load and may disrupt your application workload depending on the template method.{{< /unsafe >}} | | `deployOptions` _[DeployOptions](#deployoptions)_ | {{< unsafe >}}DeployOptions holds the settings specific to the templating method used to deploy the application.{{< /unsafe >}} | @@ -394,10 +394,10 @@ _Appears in:_ | Field | Description | | --- | --- | | `method` _[GitAuthMethod](#gitauthmethod)_ | {{< unsafe >}}Authentication method. Either password or token or ssh-key.
If method is password then username and password must be defined.
If method is token then token must be defined.
If method is ssh-key then ssh-key must be defined.{{< /unsafe >}} | -| `username` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#secretkeyselector-v1-core)_ | {{< unsafe >}}Username holds the ref and key in the secret for the username credential.
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm" or "git".{{< /unsafe >}} | -| `password` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#secretkeyselector-v1-core)_ | {{< unsafe >}}Password holds the ref and key in the secret for the Password credential.
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm" or "git".{{< /unsafe >}} | -| `token` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#secretkeyselector-v1-core)_ | {{< unsafe >}}Token holds the ref and key in the secret for the token credential.
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm" or "git".{{< /unsafe >}} | -| `sshKey` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#secretkeyselector-v1-core)_ | {{< unsafe >}}SSHKey holds the ref and key in the secret for the SshKey credential.
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm" or "git".{{< /unsafe >}} | +| `username` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#secretkeyselector-v1-core)_ | {{< unsafe >}}Username holds the ref and key in the secret for the username credential.
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm" or "git".{{< /unsafe >}} | +| `password` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#secretkeyselector-v1-core)_ | {{< unsafe >}}Password holds the ref and key in the secret for the Password credential.
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm" or "git".{{< /unsafe >}} | +| `token` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#secretkeyselector-v1-core)_ | {{< unsafe >}}Token holds the ref and key in the secret for the token credential.
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm" or "git".{{< /unsafe >}} | +| `sshKey` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#secretkeyselector-v1-core)_ | {{< unsafe >}}SSHKey holds the ref and key in the secret for the SshKey credential.
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm" or "git".{{< /unsafe >}} | [Back to top](#top) @@ -416,7 +416,7 @@ _Appears in:_ | Field | Description | | --- | --- | | `branch` _string_ | {{< unsafe >}}Branch to checkout. Only the last commit of the branch will be checkout in order to reduce the amount of data to download.{{< /unsafe >}} | -| `commit` _string_ | {{< unsafe >}}Commit SHA in a Branch to checkout.

It must be used in conjunction with branch field.{{< /unsafe >}} | +| `commit` _string_ | {{< unsafe >}}Commit SHA in a Branch to checkout.
It must be used in conjunction with branch field.{{< /unsafe >}} | | `tag` _string_ | {{< unsafe >}}Tag to check out.
It can not be used in conjunction with commit or branch.{{< /unsafe >}} | @@ -457,9 +457,9 @@ _Appears in:_ | Field | Description | | --- | --- | -| `username` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#secretkeyselector-v1-core)_ | {{< unsafe >}}Username holds the ref and key in the secret for the username credential.
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm" or "git"{{< /unsafe >}} | -| `password` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#secretkeyselector-v1-core)_ | {{< unsafe >}}Password holds the ref and key in the secret for the password credential.
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm" or "git"{{< /unsafe >}} | -| `registryConfigFile` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#secretkeyselector-v1-core)_ | {{< unsafe >}}RegistryConfigFile holds the ref and key in the secret for the registry credential file.
The value is dockercfg file that follows the same format rules as ~/.docker/config.json.
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm" or "git"{{< /unsafe >}} | +| `username` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#secretkeyselector-v1-core)_ | {{< unsafe >}}Username holds the ref and key in the secret for the username credential.
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm" or "git"{{< /unsafe >}} | +| `password` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#secretkeyselector-v1-core)_ | {{< unsafe >}}Password holds the ref and key in the secret for the password credential.
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm" or "git"{{< /unsafe >}} | +| `registryConfigFile` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#secretkeyselector-v1-core)_ | {{< unsafe >}}RegistryConfigFile holds the ref and key in the secret for the registry credential file.
The value is dockercfg file that follows the same format rules as ~/.docker/config.json.
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm" or "git"{{< /unsafe >}} | [Back to top](#top) @@ -478,7 +478,7 @@ _Appears in:_ | Field | Description | | --- | --- | | `wait` _boolean_ | {{< unsafe >}}Wait corresponds to the --wait flag on Helm cli.
if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as timeout{{< /unsafe >}} | -| `timeout` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#duration-v1-meta)_ | {{< unsafe >}}Timeout corresponds to the --timeout flag on Helm cli.
time to wait for any individual Kubernetes operation.{{< /unsafe >}} | +| `timeout` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#duration-v1-meta)_ | {{< unsafe >}}Timeout corresponds to the --timeout flag on Helm cli.
time to wait for any individual Kubernetes operation.{{< /unsafe >}} | | `atomic` _boolean_ | {{< unsafe >}}Atomic corresponds to the --atomic flag on Helm cli.
if set, the installation process deletes the installation on failure; the upgrade process rolls back changes made in case of failed upgrade.{{< /unsafe >}} | | `enableDNS` _boolean_ | {{< unsafe >}}EnableDNS corresponds to the --enable-dns flag on Helm cli.
enable DNS lookups when rendering templates.
if you enable this flag, you have to verify that helm template function 'getHostByName' is not being used in a chart to disclose any information you do not want to be passed to DNS servers.(c.f. CVE-2023-25165){{< /unsafe >}} | @@ -519,9 +519,9 @@ _Appears in:_ | Field | Description | | --- | --- | -| `firstDeployed` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}FirstDeployed is when the release was first deployed.{{< /unsafe >}} | -| `lastDeployed` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}LastDeployed is when the release was last deployed.{{< /unsafe >}} | -| `deleted` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}Deleted tracks when this object was deleted.{{< /unsafe >}} | +| `firstDeployed` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}FirstDeployed is when the release was first deployed.{{< /unsafe >}} | +| `lastDeployed` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}LastDeployed is when the release was last deployed.{{< /unsafe >}} | +| `deleted` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}Deleted tracks when this object was deleted.{{< /unsafe >}} | | `description` _string_ | {{< unsafe >}}Description is human-friendly "log entry" about this release.{{< /unsafe >}} | | `status` _[HelmReleaseStatus](#helmreleasestatus)_ | {{< unsafe >}}Status is the current state of the release.{{< /unsafe >}} | | `notes` _string_ | {{< unsafe >}}Notes is the rendered templates/NOTES.txt if available.{{< /unsafe >}} | @@ -556,7 +556,7 @@ _Appears in:_ | Field | Description | | --- | --- | -| `url` _string_ | {{< unsafe >}}URL of the Helm repository the following schemes are supported:

* http://example.com/myrepo (HTTP)
* https://example.com/myrepo (HTTPS)
* oci://example.com:5000/myrepo (OCI, HTTPS by default, use plainHTTP to enable unencrypted HTTP){{< /unsafe >}} | +| `url` _string_ | {{< unsafe >}}URL of the Helm repository the following schemes are supported:
* http://example.com/myrepo (HTTP)
* https://example.com/myrepo (HTTPS)
* oci://example.com:5000/myrepo (OCI, HTTPS by default, use plainHTTP to enable unencrypted HTTP){{< /unsafe >}} | | `insecure` _boolean_ | {{< unsafe >}}Insecure disables certificate validation when using an HTTPS registry. This setting has no
effect when using a plaintext connection.{{< /unsafe >}} | | `plainHTTP` _boolean_ | {{< unsafe >}}PlainHTTP will enable HTTP-only (i.e. unencrypted) traffic for oci:// URLs. By default HTTPS
is used when communicating with an oci:// URL.{{< /unsafe >}} | | `chartName` _string_ | {{< unsafe >}}Name of the Chart.{{< /unsafe >}} | @@ -684,8 +684,8 @@ _Appears in:_ | Field | Description | | --- | --- | | `replicas` _integer_ | {{< unsafe >}}{{< /unsafe >}} | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}{{< /unsafe >}} | -| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) array_ | {{< unsafe >}}{{< /unsafe >}} | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}{{< /unsafe >}} | +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#toleration-v1-core) array_ | {{< unsafe >}}{{< /unsafe >}} | | `endpointReconcilingDisabled` _boolean_ | {{< unsafe >}}{{< /unsafe >}} | | `nodePortRange` _string_ | {{< unsafe >}}{{< /unsafe >}} | @@ -769,7 +769,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `Addon` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[AddonSpec](#addonspec)_ | {{< unsafe >}}Spec describes the desired addon state.{{< /unsafe >}} | | `status` _[AddonStatus](#addonstatus)_ | {{< unsafe >}}Status contains information about the reconciliation status.{{< /unsafe >}} | @@ -789,9 +789,9 @@ _Appears in:_ | Field | Description | | --- | --- | -| `status` _[ConditionStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#conditionstatus-v1-core)_ | {{< unsafe >}}Status of the condition, one of True, False, Unknown.{{< /unsafe >}} | -| `lastHeartbeatTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}Last time we got an update on a given condition.{{< /unsafe >}} | -| `lastTransitionTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}Last time the condition transitioned from one status to another.{{< /unsafe >}} | +| `status` _[ConditionStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#conditionstatus-v1-core)_ | {{< unsafe >}}Status of the condition, one of True, False, Unknown.{{< /unsafe >}} | +| `lastHeartbeatTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}Last time we got an update on a given condition.{{< /unsafe >}} | +| `lastTransitionTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}Last time the condition transitioned from one status to another.{{< /unsafe >}} | | `kubermaticVersion` _string_ | {{< unsafe >}}KubermaticVersion is the version of KKP that last _successfully_ reconciled this
addon.{{< /unsafe >}} | @@ -826,7 +826,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `AddonConfig` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[AddonConfigSpec](#addonconfigspec)_ | {{< unsafe >}}Spec describes the configuration of an addon.{{< /unsafe >}} | @@ -846,7 +846,7 @@ AddonConfigList is a list of addon configs. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `AddonConfigList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[AddonConfig](#addonconfig) array_ | {{< unsafe >}}Items refers to the list of AddonConfig objects.{{< /unsafe >}} | @@ -910,7 +910,7 @@ AddonList is a list of addons. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `AddonList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[Addon](#addon) array_ | {{< unsafe >}}Items refers to the list of the cluster addons.{{< /unsafe >}} | @@ -941,8 +941,8 @@ _Appears in:_ | Field | Description | | --- | --- | | `name` _string_ | {{< unsafe >}}Name defines the name of the addon to install{{< /unsafe >}} | -| `cluster` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectreference-v1-core)_ | {{< unsafe >}}Cluster is the reference to the cluster the addon should be installed in{{< /unsafe >}} | -| `variables` _[RawExtension](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#rawextension-runtime-pkg)_ | {{< unsafe >}}Variables is free form data to use for parsing the manifest templates{{< /unsafe >}} | +| `cluster` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectreference-v1-core)_ | {{< unsafe >}}Cluster is the reference to the cluster the addon should be installed in{{< /unsafe >}} | +| `variables` _[RawExtension](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#rawextension-runtime-pkg)_ | {{< unsafe >}}Variables is free form data to use for parsing the manifest templates{{< /unsafe >}} | | `requiredResourceTypes` _[GroupVersionKind](#groupversionkind) array_ | {{< unsafe >}}RequiredResourceTypes allows to indicate that this addon needs some resource type before it
can be installed. This can be used to indicate that a specific CRD and/or extension
apiserver must be installed before this addon can be installed. The addon will not
be installed until that resource is served.{{< /unsafe >}} | | `isDefault` _boolean_ | {{< unsafe >}}IsDefault indicates whether the addon is installed because it was configured in
the default addon section in the KubermaticConfiguration. User-installed addons
must not set this field to true, as extra default Addon objects (that are not in
the KubermaticConfiguration) will be garbage-collected.{{< /unsafe >}} | @@ -983,7 +983,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `AdmissionPlugin` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[AdmissionPluginSpec](#admissionpluginspec)_ | {{< unsafe >}}Spec describes an admission plugin name and in which k8s version it is supported.{{< /unsafe >}} | @@ -1003,7 +1003,7 @@ AdmissionPluginList is the type representing a AdmissionPluginList. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `AdmissionPluginList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[AdmissionPlugin](#admissionplugin) array_ | {{< unsafe >}}Items refers to the list of Admission Plugins{{< /unsafe >}} | @@ -1043,7 +1043,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `Alertmanager` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[AlertmanagerSpec](#alertmanagerspec)_ | {{< unsafe >}}Spec describes the configuration of the Alertmanager.{{< /unsafe >}} | | `status` _[AlertmanagerStatus](#alertmanagerstatus)_ | {{< unsafe >}}Status stores status information about the Alertmanager.{{< /unsafe >}} | @@ -1063,8 +1063,8 @@ _Appears in:_ | Field | Description | | --- | --- | -| `lastUpdated` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}LastUpdated stores the last successful time when the configuration was successfully applied{{< /unsafe >}} | -| `status` _[ConditionStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#conditionstatus-v1-core)_ | {{< unsafe >}}Status of whether the configuration was applied, one of True, False{{< /unsafe >}} | +| `lastUpdated` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}LastUpdated stores the last successful time when the configuration was successfully applied{{< /unsafe >}} | +| `status` _[ConditionStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#conditionstatus-v1-core)_ | {{< unsafe >}}Status of whether the configuration was applied, one of True, False{{< /unsafe >}} | | `errorMessage` _string_ | {{< unsafe >}}ErrorMessage contains a default error message in case the configuration could not be applied.
Will be reset if the error was resolved and condition becomes True{{< /unsafe >}} | @@ -1084,7 +1084,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `AlertmanagerList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[Alertmanager](#alertmanager) array_ | {{< unsafe >}}Items refers to the list of Alertmanager objects.{{< /unsafe >}} | @@ -1103,7 +1103,7 @@ _Appears in:_ | Field | Description | | --- | --- | -| `configSecret` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#localobjectreference-v1-core)_ | {{< unsafe >}}ConfigSecret refers to the Secret in the same namespace as the Alertmanager object,
which contains configuration for this Alertmanager.{{< /unsafe >}} | +| `configSecret` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#localobjectreference-v1-core)_ | {{< unsafe >}}ConfigSecret refers to the Secret in the same namespace as the Alertmanager object,
which contains configuration for this Alertmanager.{{< /unsafe >}} | [Back to top](#top) @@ -1183,7 +1183,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `AllowedRegistry` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[AllowedRegistrySpec](#allowedregistryspec)_ | {{< unsafe >}}Spec describes the desired state for an allowed registry.{{< /unsafe >}} | @@ -1203,7 +1203,7 @@ AllowedRegistryList specifies a list of allowed registries. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `AllowedRegistryList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[AllowedRegistry](#allowedregistry) array_ | {{< unsafe >}}Items refers to the list of the allowed registries.{{< /unsafe >}} | @@ -1301,8 +1301,8 @@ _Appears in:_ | --- | --- | | `message` _string_ | {{< unsafe >}}The message content of the announcement.{{< /unsafe >}} | | `isActive` _boolean_ | {{< unsafe >}}Indicates whether the announcement is active.{{< /unsafe >}} | -| `createdAt` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}Timestamp when the announcement was created.{{< /unsafe >}} | -| `expires` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}Expiration date for the announcement.{{< /unsafe >}} | +| `createdAt` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}Timestamp when the announcement was created.{{< /unsafe >}} | +| `expires` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}Expiration date for the announcement.{{< /unsafe >}} | [Back to top](#top) @@ -1321,6 +1321,43 @@ _Appears in:_ +### ApplicationCatalogLimit + + + + + +_Appears in:_ +- [CatalogManagerConfiguration](#catalogmanagerconfiguration) + +| Field | Description | +| --- | --- | +| `metadataSelector` _[ApplicationDefinitionMetadataSelector](#applicationdefinitionmetadataselector)_ | {{< unsafe >}}MetadataSelector defines criteria for selecting ApplicationDefinitions based on their metadata attributes.
For example, to select ApplicationDefinitions with a specific support tier (e.g., 'gold'),
specify that tier here.
When multiple tiers are specified, the Application Catalog Manager uses additive logic
to determine which ApplicationDefinitions to retrieve from the OCI registry.{{< /unsafe >}} | +| `nameSelector` _string array_ | {{< unsafe >}}NameSelector defines criteria for selecting ApplicationDefinitions by name.
Each name must correspond to an ApplicationDefinition's `metadata.name` field.
When multiple names are specified, the Application Catalog Manager uses additive logic
to retrieve all matching ApplicationDefinitions from the OCI registry.
Example: Specifying ['nginx', 'cert-manager'] will retrieve only those specific ApplicationDefinitions.{{< /unsafe >}} | + + +[Back to top](#top) + + + +### ApplicationDefinitionMetadataSelector + + + + + +_Appears in:_ +- [ApplicationCatalogLimit](#applicationcataloglimit) + +| Field | Description | +| --- | --- | +| `tiers` _string array_ | {{< unsafe >}}Tiers specifies the support tiers to filter ApplicationDefinitions.
ApplicationDefinitions matching any of the specified tiers will be selected.{{< /unsafe >}} | + + +[Back to top](#top) + + + ### ApplicationDefinitionsConfiguration @@ -1334,6 +1371,7 @@ _Appears in:_ | --- | --- | | `systemApplications` _[SystemApplicationsSettings](#systemapplicationssettings)_ | {{< unsafe >}}SystemApplications contains configuration for system applications.{{< /unsafe >}} | | `defaultApplicationCatalog` _[DefaultApplicationCatalogSettings](#defaultapplicationcatalogsettings)_ | {{< unsafe >}}DefaultApplicationCatalog contains configuration for the default application catalog.{{< /unsafe >}} | +| `catalogManager` _[CatalogManagerConfiguration](#catalogmanagerconfiguration)_ | {{< unsafe >}}CatalogManager configures the Application Catalog CatalogManager, which is responsible for managing ApplicationDefinitions
in the cluster from specified OCI registries.
Note: The Application Catalog CatalogManager requires its feature flag to be enabled as it is currently in beta.{{< /unsafe >}} | [Back to top](#top) @@ -1435,9 +1473,9 @@ _Appears in:_ | Field | Description | | --- | --- | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}{{< /unsafe >}} | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}{{< /unsafe >}} | | `config` _[AuditSidecarConfiguration](#auditsidecarconfiguration)_ | {{< unsafe >}}{{< /unsafe >}} | -| `extraEnvs` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#envvar-v1-core) array_ | {{< unsafe >}}ExtraEnvs are the additional environment variables that can be set for the audit logging sidecar.
Additional environment variables can be set and passed to the AuditSidecarConfiguration field
to allow passing variables to the fluent-bit configuration.
Only, `Value` field is supported for the environment variables; `ValueFrom` field is not supported.
By default, `CLUSTER_ID` is set as an environment variable in the audit-logging sidecar.{{< /unsafe >}} | +| `extraEnvs` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#envvar-v1-core) array_ | {{< unsafe >}}ExtraEnvs are the additional environment variables that can be set for the audit logging sidecar.
Additional environment variables can be set and passed to the AuditSidecarConfiguration field
to allow passing variables to the fluent-bit configuration.
Only, `Value` field is supported for the environment variables; `ValueFrom` field is not supported.
By default, `CLUSTER_ID` is set as an environment variable in the audit-logging sidecar.{{< /unsafe >}} | [Back to top](#top) @@ -1456,7 +1494,7 @@ _Appears in:_ | Field | Description | | --- | --- | -| `auditWebhookConfig` _[SecretReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#secretreference-v1-core)_ | {{< unsafe >}}Required : AuditWebhookConfig contains reference to secret holding the audit webhook config file{{< /unsafe >}} | +| `auditWebhookConfig` _[SecretReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#secretreference-v1-core)_ | {{< unsafe >}}Required : AuditWebhookConfig contains reference to secret holding the audit webhook config file{{< /unsafe >}} | | `auditWebhookInitialBackoff` _string_ | {{< unsafe >}}{{< /unsafe >}} | @@ -1518,6 +1556,8 @@ _Appears in:_ | `secretName` _string_ | {{< unsafe >}}The secret containing the webhook configuration{{< /unsafe >}} | | `secretKey` _string_ | {{< unsafe >}}The secret Key inside the secret{{< /unsafe >}} | | `webhookVersion` _string_ | {{< unsafe >}}the Webhook Version, by default "v1"{{< /unsafe >}} | +| `cacheAuthorizedTTL` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#duration-v1-meta)_ | {{< unsafe >}}Optional: The duration to cache authorization decisions for successful authorization webhook calls.{{< /unsafe >}} | +| `cacheUnauthorizedTTL` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#duration-v1-meta)_ | {{< unsafe >}}Optional: The duration to cache authorization decisions for failed authorization webhook calls.{{< /unsafe >}} | [Back to top](#top) @@ -1599,7 +1639,7 @@ _Appears in:_ | Field | Description | | --- | --- | -| `backupStorageLocation` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#localobjectreference-v1-core)_ | {{< unsafe >}}{{< /unsafe >}} | +| `backupStorageLocation` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#localobjectreference-v1-core)_ | {{< unsafe >}}{{< /unsafe >}} | [Back to top](#top) @@ -1619,7 +1659,7 @@ _Appears in:_ | --- | --- | | `endpoint` _string_ | {{< unsafe >}}Endpoint is the API endpoint to use for backup and restore.{{< /unsafe >}} | | `bucketName` _string_ | {{< unsafe >}}BucketName is the bucket name to use for backup and restore.{{< /unsafe >}} | -| `credentials` _[SecretReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#secretreference-v1-core)_ | {{< unsafe >}}Credentials hold the ref to the secret with backup credentials{{< /unsafe >}} | +| `credentials` _[SecretReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#secretreference-v1-core)_ | {{< unsafe >}}Credentials hold the ref to the secret with backup credentials{{< /unsafe >}} | [Back to top](#top) @@ -1637,16 +1677,16 @@ _Appears in:_ | Field | Description | | --- | --- | -| `scheduledTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}ScheduledTime will always be set when the BackupStatus is created, so it'll never be nil{{< /unsafe >}} | +| `scheduledTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}ScheduledTime will always be set when the BackupStatus is created, so it'll never be nil{{< /unsafe >}} | | `backupName` _string_ | {{< unsafe >}}{{< /unsafe >}} | | `jobName` _string_ | {{< unsafe >}}{{< /unsafe >}} | -| `backupStartTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}{{< /unsafe >}} | -| `backupFinishedTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}{{< /unsafe >}} | +| `backupStartTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}{{< /unsafe >}} | +| `backupFinishedTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}{{< /unsafe >}} | | `backupPhase` _[BackupStatusPhase](#backupstatusphase)_ | {{< unsafe >}}{{< /unsafe >}} | | `backupMessage` _string_ | {{< unsafe >}}{{< /unsafe >}} | | `deleteJobName` _string_ | {{< unsafe >}}{{< /unsafe >}} | -| `deleteStartTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}{{< /unsafe >}} | -| `deleteFinishedTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}{{< /unsafe >}} | +| `deleteStartTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}{{< /unsafe >}} | +| `deleteFinishedTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}{{< /unsafe >}} | | `deletePhase` _[BackupStatusPhase](#backupstatusphase)_ | {{< unsafe >}}{{< /unsafe >}} | | `deleteMessage` _string_ | {{< unsafe >}}{{< /unsafe >}} | @@ -1761,6 +1801,48 @@ _Appears in:_ +### CatalogManagerConfiguration + + + + + +_Appears in:_ +- [ApplicationDefinitionsConfiguration](#applicationdefinitionsconfiguration) + +| Field | Description | +| --- | --- | +| `logLevel` _string_ | {{< unsafe >}}LogLevel specifies the logging verbosity level for the Application Catalog Manager.{{< /unsafe >}} | +| `registrySettings` _[RegistrySettings](#registrysettings)_ | {{< unsafe >}}RegistrySettings configures the OCI registry from which the Application Catalog Manager
retrieves ApplicationDefinition manifests.{{< /unsafe >}} | +| `limit` _[ApplicationCatalogLimit](#applicationcataloglimit)_ | {{< unsafe >}}Limit defines filtering criteria for ApplicationDefinitions to be reconciled from the OCI registry.
When undefined, all ApplicationDefinitions from the registry are pulled and reconciled.
When defined, only ApplicationDefinitions matching the specified criteria are processed.{{< /unsafe >}} | +| `image` _[CatalogManagerImageConfiguration](#catalogmanagerimageconfiguration)_ | {{< unsafe >}}Image configures the container image for the application-catalog manager.{{< /unsafe >}} | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources describes the requested and maximum allowed CPU/memory usage.{{< /unsafe >}} | +| `reconciliationInterval` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#duration-v1-meta)_ | {{< unsafe >}}ReconciliationInterval is the interval at which application-catalog manager reconcile ApplicationDefinitions.
By default, ApplicationsDefinitions are reconciled at every 10 minutes.
Setting a value equal to 0 disables the force reconciliation of the default Application Catalog.{{< /unsafe >}} | + + +[Back to top](#top) + + + +### CatalogManagerImageConfiguration + + + +CatalogManagerImageConfiguration configures the container image settings. + +_Appears in:_ +- [CatalogManagerConfiguration](#catalogmanagerconfiguration) + +| Field | Description | +| --- | --- | +| `repository` _string_ | {{< unsafe >}}Repository is used to override the application-catalog manager image repository.
The default value is "quay.io/kubermatic/application-catalog-manager"{{< /unsafe >}} | +| `tag` _string_ | {{< unsafe >}}Tag is used to override the application-catalog manager image tag.{{< /unsafe >}} | + + +[Back to top](#top) + + + ### CleanupOptions @@ -1800,7 +1882,6 @@ _Appears in:_ | `aws` _[AWSCloudSpec](#awscloudspec)_ | {{< unsafe >}}AWS defines the configuration data of the Amazon Web Services(AWS) cloud provider.{{< /unsafe >}} | | `azure` _[AzureCloudSpec](#azurecloudspec)_ | {{< unsafe >}}Azure defines the configuration data of the Microsoft Azure cloud.{{< /unsafe >}} | | `openstack` _[OpenstackCloudSpec](#openstackcloudspec)_ | {{< unsafe >}}Openstack defines the configuration data of an OpenStack cloud.{{< /unsafe >}} | -| `packet` _[PacketCloudSpec](#packetcloudspec)_ | {{< unsafe >}}Deprecated: The Packet / Equinix Metal provider is deprecated and will be REMOVED IN VERSION 2.29.
This provider is no longer supported. Migrate your configurations away from "packet" immediately.
Packet defines the configuration data of a Packet / Equinix Metal cloud.{{< /unsafe >}} | | `hetzner` _[HetznerCloudSpec](#hetznercloudspec)_ | {{< unsafe >}}Hetzner defines the configuration data of the Hetzner cloud.{{< /unsafe >}} | | `vsphere` _[VSphereCloudSpec](#vspherecloudspec)_ | {{< unsafe >}}VSphere defines the configuration data of the vSphere.{{< /unsafe >}} | | `gcp` _[GCPCloudSpec](#gcpcloudspec)_ | {{< unsafe >}}GCP defines the configuration data of the Google Cloud Platform(GCP).{{< /unsafe >}} | @@ -1831,7 +1912,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `Cluster` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[ClusterSpec](#clusterspec)_ | {{< unsafe >}}Spec describes the desired cluster state.{{< /unsafe >}} | | `status` _[ClusterStatus](#clusterstatus)_ | {{< unsafe >}}Status contains reconciliation information for the cluster.{{< /unsafe >}} | @@ -1875,7 +1956,7 @@ _Appears in:_ | Field | Description | | --- | --- | -| `defaultChecksumAlgorithm` _string_ | {{< unsafe >}}DefaultChecksumAlgorithm allows setting a default checksum algorithm used by Velero for uploading objects to S3.

Optional{{< /unsafe >}} | +| `defaultChecksumAlgorithm` _string_ | {{< unsafe >}}DefaultChecksumAlgorithm allows setting a default checksum algorithm used by Velero for uploading objects to S3.
Optional{{< /unsafe >}} | [Back to top](#top) @@ -1895,7 +1976,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `ClusterBackupStorageLocation` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[BackupStorageLocationSpec](#backupstoragelocationspec)_ | {{< unsafe >}}Spec is a Velero BSL spec{{< /unsafe >}} | | `status` _[BackupStorageLocationStatus](#backupstoragelocationstatus)_ | {{< unsafe >}}{{< /unsafe >}} | @@ -1916,7 +1997,7 @@ ClusterBackupStorageLocationList is a list of ClusterBackupStorageLocations. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `ClusterBackupStorageLocationList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[ClusterBackupStorageLocation](#clusterbackupstoragelocation) array_ | {{< unsafe >}}Items is a list of EtcdBackupConfig objects.{{< /unsafe >}} | @@ -1935,10 +2016,10 @@ _Appears in:_ | Field | Description | | --- | --- | -| `status` _[ConditionStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#conditionstatus-v1-core)_ | {{< unsafe >}}Status of the condition, one of True, False, Unknown.{{< /unsafe >}} | +| `status` _[ConditionStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#conditionstatus-v1-core)_ | {{< unsafe >}}Status of the condition, one of True, False, Unknown.{{< /unsafe >}} | | `kubermaticVersion` _string_ | {{< unsafe >}}KubermaticVersion current kubermatic version.{{< /unsafe >}} | -| `lastHeartbeatTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}Last time we got an update on a given condition.{{< /unsafe >}} | -| `lastTransitionTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}Last time the condition transit from one status to another.{{< /unsafe >}} | +| `lastHeartbeatTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}Last time we got an update on a given condition.{{< /unsafe >}} | +| `lastTransitionTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}Last time the condition transit from one status to another.{{< /unsafe >}} | | `reason` _string_ | {{< unsafe >}}(brief) reason for the condition's last transition.{{< /unsafe >}} | | `message` _string_ | {{< unsafe >}}Human readable message indicating details about last transition.{{< /unsafe >}} | @@ -2003,7 +2084,7 @@ ClusterList specifies a list of user clusters. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `ClusterList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[Cluster](#cluster) array_ | {{< unsafe >}}{{< /unsafe >}} | @@ -2068,7 +2149,7 @@ _Appears in:_ | `version` _[Semver](#semver)_ | {{< unsafe >}}Version defines the wanted version of the control plane.{{< /unsafe >}} | | `cloud` _[CloudSpec](#cloudspec)_ | {{< unsafe >}}Cloud contains information regarding the cloud provider that
is responsible for hosting the cluster's workload.{{< /unsafe >}} | | `containerRuntime` _string_ | {{< unsafe >}}ContainerRuntime to use, i.e. `docker` or `containerd`. By default `containerd` will be used.{{< /unsafe >}} | -| `imagePullSecret` _[SecretReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#secretreference-v1-core)_ | {{< unsafe >}}Optional: ImagePullSecret references a secret with container registry credentials. This is passed to the machine-controller which sets the registry credentials on node level.{{< /unsafe >}} | +| `imagePullSecret` _[SecretReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#secretreference-v1-core)_ | {{< unsafe >}}Optional: ImagePullSecret references a secret with container registry credentials. This is passed to the machine-controller which sets the registry credentials on node level.{{< /unsafe >}} | | `cniPlugin` _[CNIPluginSettings](#cnipluginsettings)_ | {{< unsafe >}}Optional: CNIPlugin refers to the spec of the CNI plugin used by the Cluster.{{< /unsafe >}} | | `clusterNetwork` _[ClusterNetworkingConfig](#clusternetworkingconfig)_ | {{< unsafe >}}Optional: ClusterNetwork specifies the different networking parameters for a cluster.{{< /unsafe >}} | | `machineNetworks` _[MachineNetworkingConfig](#machinenetworkingconfig) array_ | {{< unsafe >}}Optional: MachineNetworks is the list of the networking parameters used for IPAM.{{< /unsafe >}} | @@ -2120,9 +2201,9 @@ _Appears in:_ | Field | Description | | --- | --- | | `address` _[ClusterAddress](#clusteraddress)_ | {{< unsafe >}}Address contains the IPs/URLs to access the cluster control plane.{{< /unsafe >}} | -| `lastUpdated` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}Deprecated: LastUpdated contains the timestamp at which the cluster was last modified.
It is kept only for KKP 2.20 release to not break the backwards-compatibility and not being set for KKP higher releases.{{< /unsafe >}} | +| `lastUpdated` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}Deprecated: LastUpdated contains the timestamp at which the cluster was last modified.
It is kept only for KKP 2.20 release to not break the backwards-compatibility and not being set for KKP higher releases.{{< /unsafe >}} | | `extendedHealth` _[ExtendedClusterHealth](#extendedclusterhealth)_ | {{< unsafe >}}ExtendedHealth exposes information about the current health state.
Extends standard health status for new states.{{< /unsafe >}} | -| `lastProviderReconciliation` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}LastProviderReconciliation is the time when the cloud provider resources
were last fully reconciled (during normal cluster reconciliation, KKP does
not re-check things like security groups, networks etc.).{{< /unsafe >}} | +| `lastProviderReconciliation` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}LastProviderReconciliation is the time when the cloud provider resources
were last fully reconciled (during normal cluster reconciliation, KKP does
not re-check things like security groups, networks etc.).{{< /unsafe >}} | | `namespaceName` _string_ | {{< unsafe >}}NamespaceName defines the namespace the control plane of this cluster is deployed in.{{< /unsafe >}} | | `versions` _[ClusterVersionsStatus](#clusterversionsstatus)_ | {{< unsafe >}}Versions contains information regarding the current and desired versions
of the cluster control plane and worker nodes.{{< /unsafe >}} | | `userName` _string_ | {{< unsafe >}}Deprecated: UserName contains the name of the owner of this cluster.
This field is not actively used and will be removed in the future.{{< /unsafe >}} | @@ -2164,7 +2245,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `ClusterTemplate` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `clusterLabels` _object (keys:string, values:string)_ | {{< unsafe >}}{{< /unsafe >}} | | `inheritedClusterLabels` _object (keys:string, values:string)_ | {{< unsafe >}}{{< /unsafe >}} | | `credential` _string_ | {{< unsafe >}}{{< /unsafe >}} | @@ -2189,7 +2270,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `ClusterTemplateInstance` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[ClusterTemplateInstanceSpec](#clustertemplateinstancespec)_ | {{< unsafe >}}Spec specifies the data for cluster instances.{{< /unsafe >}} | @@ -2209,7 +2290,7 @@ ClusterTemplateInstanceList specifies a list of cluster template instances. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `ClusterTemplateInstanceList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[ClusterTemplateInstance](#clustertemplateinstance) array_ | {{< unsafe >}}Items refers to the list of ClusterTemplateInstance objects.{{< /unsafe >}} | @@ -2250,7 +2331,7 @@ ClusterTemplateList specifies a list of cluster templates. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `ClusterTemplateList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[ClusterTemplate](#clustertemplate) array_ | {{< unsafe >}}Items refers to the list of the ClusterTemplate objects.{{< /unsafe >}} | @@ -2353,7 +2434,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `Constraint` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[ConstraintSpec](#constraintspec)_ | {{< unsafe >}}Spec describes the desired state for the constraint.{{< /unsafe >}} | @@ -2373,7 +2454,7 @@ ConstraintList specifies a list of constraints. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `ConstraintList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[Constraint](#constraint) array_ | {{< unsafe >}}Items is a list of Gatekeeper Constraints{{< /unsafe >}} | @@ -2393,7 +2474,7 @@ _Appears in:_ | Field | Description | | --- | --- | | `providers` _string array_ | {{< unsafe >}}Providers is a list of cloud providers to which the Constraint applies to. Empty means all providers are selected.{{< /unsafe >}} | -| `labelSelector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#labelselector-v1-meta)_ | {{< unsafe >}}LabelSelector selects the Clusters to which the Constraint applies based on their labels{{< /unsafe >}} | +| `labelSelector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#labelselector-v1-meta)_ | {{< unsafe >}}LabelSelector selects the Clusters to which the Constraint applies based on their labels{{< /unsafe >}} | [Back to top](#top) @@ -2414,7 +2495,7 @@ _Appears in:_ | `constraintType` _string_ | {{< unsafe >}}ConstraintType specifies the type of gatekeeper constraint that the constraint applies to{{< /unsafe >}} | | `disabled` _boolean_ | {{< unsafe >}}Disabled is the flag for disabling OPA constraints{{< /unsafe >}} | | `match` _[Match](#match)_ | {{< unsafe >}}Match contains the constraint to resource matching data{{< /unsafe >}} | -| `parameters` _[Parameters](#parameters)_ | {{< unsafe >}}Parameters specifies the parameters used by the constraint template REGO.
It supports both the legacy rawJSON parameters, in which all the parameters are set in a JSON string, and regular
parameters like in Gatekeeper Constraints.
If rawJSON is set, during constraint syncing to the user cluster, the other parameters are ignored
Example with rawJSON parameters:

parameters:
rawJSON: '\{"labels":["gatekeeper"]\}'

And with regular parameters:

parameters:
labels: ["gatekeeper"]{{< /unsafe >}} | +| `parameters` _[Parameters](#parameters)_ | {{< unsafe >}}Parameters specifies the parameters used by the constraint template REGO.
It supports both the legacy rawJSON parameters, in which all the parameters are set in a JSON string, and regular
parameters like in Gatekeeper Constraints.
If rawJSON is set, during constraint syncing to the user cluster, the other parameters are ignored
Example with rawJSON parameters:
parameters:
rawJSON: '\{"labels":["gatekeeper"]\}'
And with regular parameters:
parameters:
labels: ["gatekeeper"]{{< /unsafe >}} | | `selector` _[ConstraintSelector](#constraintselector)_ | {{< unsafe >}}Selector specifies the cluster selection filters{{< /unsafe >}} | | `enforcementAction` _string_ | {{< unsafe >}}EnforcementAction defines the action to take in response to a constraint being violated.
By default, EnforcementAction is set to deny as the default behavior is to deny admission requests with any violation.{{< /unsafe >}} | @@ -2436,7 +2517,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `ConstraintTemplate` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[ConstraintTemplateSpec](#constrainttemplatespec)_ | {{< unsafe >}}Spec specifies the gatekeeper constraint template and KKP related spec.{{< /unsafe >}} | @@ -2456,7 +2537,7 @@ ConstraintTemplateList specifies a list of constraint templates. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `ConstraintTemplateList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[ConstraintTemplate](#constrainttemplate) array_ | {{< unsafe >}}Items refers to the list of ConstraintTemplate objects.{{< /unsafe >}} | @@ -2476,7 +2557,7 @@ _Appears in:_ | Field | Description | | --- | --- | | `providers` _string array_ | {{< unsafe >}}Providers is a list of cloud providers to which the Constraint Template applies to. Empty means all providers are selected.{{< /unsafe >}} | -| `labelSelector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#labelselector-v1-meta)_ | {{< unsafe >}}LabelSelector selects the Clusters to which the Constraint Template applies based on their labels{{< /unsafe >}} | +| `labelSelector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#labelselector-v1-meta)_ | {{< unsafe >}}LabelSelector selects the Clusters to which the Constraint Template applies based on their labels{{< /unsafe >}} | [Back to top](#top) @@ -2538,6 +2619,7 @@ _Appears in:_ | `registryMirrors` _string array_ | {{< unsafe >}}Optional: These image registries will be configured as registry mirrors
on the container runtime.{{< /unsafe >}} | | `pauseImage` _string_ | {{< unsafe >}}Optional: Translates to --pod-infra-container-image on the kubelet.
If not set, the kubelet will default it.{{< /unsafe >}} | | `containerdRegistryMirrors` _[ContainerRuntimeContainerd](#containerruntimecontainerd)_ | {{< unsafe >}}Optional: ContainerdRegistryMirrors configure registry mirrors endpoints. Can be used multiple times to specify multiple mirrors.{{< /unsafe >}} | +| `enableNonRootDeviceOwnership` _boolean_ | {{< unsafe >}}Optional: EnableNonRootDeviceOwnership enables the non-root device ownership feature in the container runtime.{{< /unsafe >}} | [Back to top](#top) @@ -2575,8 +2657,8 @@ _Appears in:_ | Field | Description | | --- | --- | | `replicas` _integer_ | {{< unsafe >}}{{< /unsafe >}} | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}{{< /unsafe >}} | -| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) array_ | {{< unsafe >}}{{< /unsafe >}} | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}{{< /unsafe >}} | +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#toleration-v1-core) array_ | {{< unsafe >}}{{< /unsafe >}} | | `leaderElection` _[LeaderElectionSettings](#leaderelectionsettings)_ | {{< unsafe >}}{{< /unsafe >}} | @@ -2677,7 +2759,6 @@ _Appears in:_ | `aws` _[DatacenterSpecAWS](#datacenterspecaws)_ | {{< unsafe >}}AWS configures an Amazon Web Services (AWS) datacenter.{{< /unsafe >}} | | `azure` _[DatacenterSpecAzure](#datacenterspecazure)_ | {{< unsafe >}}Azure configures an Azure datacenter.{{< /unsafe >}} | | `openstack` _[DatacenterSpecOpenstack](#datacenterspecopenstack)_ | {{< unsafe >}}Openstack configures an Openstack datacenter.{{< /unsafe >}} | -| `packet` _[DatacenterSpecPacket](#datacenterspecpacket)_ | {{< unsafe >}}Deprecated: The Packet / Equinix Metal provider is deprecated and will be REMOVED IN VERSION 2.29.
This provider is no longer supported. Migrate your configurations away from "packet" immediately.
Packet configures an Equinix Metal datacenter.{{< /unsafe >}} | | `hetzner` _[DatacenterSpecHetzner](#datacenterspechetzner)_ | {{< unsafe >}}Hetzner configures a Hetzner datacenter.{{< /unsafe >}} | | `vsphere` _[DatacenterSpecVSphere](#datacenterspecvsphere)_ | {{< unsafe >}}VSphere configures a VMware vSphere datacenter.{{< /unsafe >}} | | `vmwareclouddirector` _[DatacenterSpecVMwareCloudDirector](#datacenterspecvmwareclouddirector)_ | {{< unsafe >}}VMwareCloudDirector configures a VMware Cloud Director datacenter.{{< /unsafe >}} | @@ -2690,12 +2771,12 @@ _Appears in:_ | `enforceAuditLogging` _boolean_ | {{< unsafe >}}Optional: EnforceAuditLogging enforces audit logging on every cluster within the DC,
ignoring cluster-specific settings.{{< /unsafe >}} | | `enforcedAuditWebhookSettings` _[AuditWebhookBackendSettings](#auditwebhookbackendsettings)_ | {{< unsafe >}}Optional: EnforcedAuditWebhookSettings allows admins to control webhook backend for audit logs of all the clusters within the DC,
ignoring cluster-specific settings.{{< /unsafe >}} | | `enforcePodSecurityPolicy` _boolean_ | {{< unsafe >}}Optional: EnforcePodSecurityPolicy enforces pod security policy plugin on every clusters within the DC,
ignoring cluster-specific settings.{{< /unsafe >}} | -| `providerReconciliationInterval` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#duration-v1-meta)_ | {{< unsafe >}}Optional: ProviderReconciliationInterval is the time that must have passed since a
Cluster's status.lastProviderReconciliation to make the cluster controller
perform an in-depth provider reconciliation, where for example missing security
groups will be reconciled.
Setting this too low can cause rate limits by the cloud provider, setting this
too high means that *if* a resource at a cloud provider is removed/changed outside
of KKP, it will take this long to fix it.{{< /unsafe >}} | +| `providerReconciliationInterval` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#duration-v1-meta)_ | {{< unsafe >}}Optional: ProviderReconciliationInterval is the time that must have passed since a
Cluster's status.lastProviderReconciliation to make the cluster controller
perform an in-depth provider reconciliation, where for example missing security
groups will be reconciled.
Setting this too low can cause rate limits by the cloud provider, setting this
too high means that *if* a resource at a cloud provider is removed/changed outside
of KKP, it will take this long to fix it.{{< /unsafe >}} | | `operatingSystemProfiles` _[OperatingSystemProfileList](#operatingsystemprofilelist)_ | {{< unsafe >}}Optional: DefaultOperatingSystemProfiles specifies the OperatingSystemProfiles to use for each supported operating system.{{< /unsafe >}} | | `machineFlavorFilter` _[MachineFlavorFilter](#machineflavorfilter)_ | {{< unsafe >}}Optional: MachineFlavorFilter is used to filter out allowed machine flavors based on the specified resource limits like CPU, Memory, and GPU etc.{{< /unsafe >}} | | `disableCsiDriver` _boolean_ | {{< unsafe >}}Optional: DisableCSIDriver disables the installation of CSI driver on every clusters within the DC
If true it can't be over-written in the cluster configuration{{< /unsafe >}} | | `kubelb` _[KubeLBDatacenterSettings](#kubelbdatacentersettings)_ | {{< unsafe >}}Optional: KubeLB holds the configuration for the kubeLB at the data center level.
Only available in Enterprise Edition.{{< /unsafe >}} | -| `apiServerServiceType` _[ServiceType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#servicetype-v1-core)_ | {{< unsafe >}}APIServerServiceType is the service type used for API Server service `apiserver-external` for the user clusters.
By default, the type of service that will be used is determined by the `ExposeStrategy` used for the cluster.{{< /unsafe >}} | +| `apiServerServiceType` _[ServiceType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#servicetype-v1-core)_ | {{< unsafe >}}APIServerServiceType is the service type used for API Server service `apiserver-external` for the user clusters.
By default, the type of service that will be used is determined by the `ExposeStrategy` used for the cluster.{{< /unsafe >}} | [Back to top](#top) @@ -2887,7 +2968,7 @@ _Appears in:_ | --- | --- | | `namespacedMode` _[NamespacedMode](#namespacedmode)_ | {{< unsafe >}}NamespacedMode represents the configuration for enabling the single namespace mode for all user-clusters in the KubeVirt datacenter.{{< /unsafe >}} | | `dnsPolicy` _string_ | {{< unsafe >}}DNSPolicy represents the dns policy for the pod. Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst',
'Default' or 'None'. Defaults to "ClusterFirst". DNS parameters given in DNSConfig will be merged with the
policy selected with DNSPolicy.{{< /unsafe >}} | -| `dnsConfig` _[PodDNSConfig](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#poddnsconfig-v1-core)_ | {{< unsafe >}}DNSConfig represents the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS
configuration based on DNSPolicy.{{< /unsafe >}} | +| `dnsConfig` _[PodDNSConfig](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#poddnsconfig-v1-core)_ | {{< unsafe >}}DNSConfig represents the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS
configuration based on DNSPolicy.{{< /unsafe >}} | | `enableDefaultNetworkPolicies` _boolean_ | {{< unsafe >}}Optional: EnableDefaultNetworkPolicies enables deployment of default network policies like cluster isolation.
Defaults to true.{{< /unsafe >}} | | `enableDedicatedCpus` _boolean_ | {{< unsafe >}}Optional: EnableDedicatedCPUs enables the assignment of dedicated cpus instead of resource requests and limits for a virtual machine.
Defaults to false.
Deprecated: Use .kubevirt.usePodResourcesCPU instead.{{< /unsafe >}} | | `usePodResourcesCPU` _boolean_ | {{< unsafe >}}Optional: UsePodResourcesCPU enables CPU assignment via Kubernetes Pod resource requests/limits.
When false (default), CPUs are assigned via KubeVirt's spec.domain.cpu.{{< /unsafe >}} | @@ -2965,25 +3046,6 @@ _Appears in:_ -### DatacenterSpecPacket - - - -Deprecated: The Packet / Equinix Metal provider is deprecated and will be REMOVED IN VERSION 2.29. -This provider is no longer supported. Migrate your configurations away from "packet" immediately. -DatacenterSpecPacket describes a Packet datacenter. - -_Appears in:_ -- [DatacenterSpec](#datacenterspec) - -| Field | Description | -| --- | --- | -| `facilities` _string array_ | {{< unsafe >}}The list of enabled facilities, for example "ams1", for a full list of available
facilities see https://metal.equinix.com/developers/docs/locations/facilities/{{< /unsafe >}} | -| `metro` _string_ | {{< unsafe >}}Metros are facilities that are grouped together geographically and share capacity
and networking features, see https://metal.equinix.com/developers/docs/locations/metros/{{< /unsafe >}} | - - -[Back to top](#top) - ### DatacenterSpecTinkerbell @@ -3061,7 +3123,7 @@ _Appears in:_ | `enable` _boolean_ | {{< unsafe >}}Enable is used to enable the installation of application definitions in the master cluster.{{< /unsafe >}} | | `applications` _string array_ | {{< unsafe >}}Applications is a list of application definition names that should be installed in the master cluster.
If not set, all the applications from the catalog are installed.{{< /unsafe >}} | | `helmRepository` _string_ | {{< unsafe >}}HelmRepository specifies OCI repository containing Helm charts of Applications from the default application catalog e.g. oci://localhost:5000/myrepo.{{< /unsafe >}} | -| `helmRegistryConfigFile` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#secretkeyselector-v1-core)_ | {{< unsafe >}}HelmRegistryConfigFile optionally holds the ref and key in the secret for the OCI registry credential file.
The value is dockercfg file that follows the same format rules as ~/.docker/config.json
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm".{{< /unsafe >}} | +| `helmRegistryConfigFile` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#secretkeyselector-v1-core)_ | {{< unsafe >}}HelmRegistryConfigFile optionally holds the ref and key in the secret for the OCI registry credential file.
The value is dockercfg file that follows the same format rules as ~/.docker/config.json
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm".{{< /unsafe >}} | [Back to top](#top) @@ -3102,8 +3164,8 @@ _Appears in:_ | Field | Description | | --- | --- | | `replicas` _integer_ | {{< unsafe >}}{{< /unsafe >}} | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}{{< /unsafe >}} | -| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) array_ | {{< unsafe >}}{{< /unsafe >}} | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}{{< /unsafe >}} | +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#toleration-v1-core) array_ | {{< unsafe >}}{{< /unsafe >}} | [Back to top](#top) @@ -3238,7 +3300,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `EtcdBackupConfig` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[EtcdBackupConfigSpec](#etcdbackupconfigspec)_ | {{< unsafe >}}Spec describes details of an Etcd backup.{{< /unsafe >}} | | `status` _[EtcdBackupConfigStatus](#etcdbackupconfigstatus)_ | {{< unsafe >}}{{< /unsafe >}} | @@ -3258,9 +3320,9 @@ _Appears in:_ | Field | Description | | --- | --- | -| `status` _[ConditionStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#conditionstatus-v1-core)_ | {{< unsafe >}}Status of the condition, one of True, False, Unknown.{{< /unsafe >}} | -| `lastHeartbeatTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}Last time we got an update on a given condition.{{< /unsafe >}} | -| `lastTransitionTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}Last time the condition transit from one status to another.{{< /unsafe >}} | +| `status` _[ConditionStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#conditionstatus-v1-core)_ | {{< unsafe >}}Status of the condition, one of True, False, Unknown.{{< /unsafe >}} | +| `lastHeartbeatTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}Last time we got an update on a given condition.{{< /unsafe >}} | +| `lastTransitionTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}Last time the condition transit from one status to another.{{< /unsafe >}} | | `reason` _string_ | {{< unsafe >}}(brief) reason for the condition's last transition.{{< /unsafe >}} | | `message` _string_ | {{< unsafe >}}Human readable message indicating details about last transition.{{< /unsafe >}} | @@ -3294,7 +3356,7 @@ EtcdBackupConfigList is a list of etcd backup configs. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `EtcdBackupConfigList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[EtcdBackupConfig](#etcdbackupconfig) array_ | {{< unsafe >}}Items is a list of EtcdBackupConfig objects.{{< /unsafe >}} | @@ -3314,7 +3376,7 @@ _Appears in:_ | Field | Description | | --- | --- | | `name` _string_ | {{< unsafe >}}Name defines the name of the backup
The name of the backup file in S3 will be -
If a schedule is set (see below), - will be appended.{{< /unsafe >}} | -| `cluster` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectreference-v1-core)_ | {{< unsafe >}}Cluster is the reference to the cluster whose etcd will be backed up{{< /unsafe >}} | +| `cluster` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectreference-v1-core)_ | {{< unsafe >}}Cluster is the reference to the cluster whose etcd will be backed up{{< /unsafe >}} | | `schedule` _string_ | {{< unsafe >}}Schedule is a cron expression defining when to perform
the backup. If not set, the backup is performed exactly
once, immediately.{{< /unsafe >}} | | `keep` _integer_ | {{< unsafe >}}Keep is the number of backups to keep around before deleting the oldest one
If not set, defaults to DefaultKeptBackupsCount. Only used if Schedule is set.{{< /unsafe >}} | | `destination` _string_ | {{< unsafe >}}Destination indicates where the backup will be stored. The destination name must correspond to a destination in
the cluster's Seed.Spec.EtcdBackupRestore.{{< /unsafe >}} | @@ -3357,7 +3419,7 @@ _Appears in:_ | --- | --- | | `destinations` _object (keys:string, values:[BackupDestination](#backupdestination))_ | {{< unsafe >}}Destinations stores all the possible destinations where the backups for the Seed can be stored. If not empty,
it enables automatic backup and restore for the seed.{{< /unsafe >}} | | `defaultDestination` _string_ | {{< unsafe >}}DefaultDestination marks the default destination that will be used for the default etcd backup config which is
created for every user cluster. Has to correspond to a destination in Destinations.
If removed, it removes the related default etcd backup configs.{{< /unsafe >}} | -| `backupInterval` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#duration-v1-meta)_ | {{< unsafe >}}BackupInterval defines the time duration between consecutive etcd backups.
Must be a valid time.Duration string format. Only takes effect when backup scheduling is enabled.{{< /unsafe >}} | +| `backupInterval` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#duration-v1-meta)_ | {{< unsafe >}}BackupInterval defines the time duration between consecutive etcd backups.
Must be a valid time.Duration string format. Only takes effect when backup scheduling is enabled.{{< /unsafe >}} | | `backupCount` _integer_ | {{< unsafe >}}BackupCount specifies the maximum number of backups to retain (defaults to DefaultKeptBackupsCount).
Oldest backups are automatically deleted when this limit is exceeded. Only applies when Schedule is configured.{{< /unsafe >}} | @@ -3378,7 +3440,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `EtcdRestore` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[EtcdRestoreSpec](#etcdrestorespec)_ | {{< unsafe >}}Spec describes details of an etcd restore.{{< /unsafe >}} | | `status` _[EtcdRestoreStatus](#etcdrestorestatus)_ | {{< unsafe >}}{{< /unsafe >}} | @@ -3399,7 +3461,7 @@ EtcdRestoreList is a list of etcd restores. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `EtcdRestoreList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[EtcdRestore](#etcdrestore) array_ | {{< unsafe >}}Items is the list of the Etcd restores.{{< /unsafe >}} | @@ -3430,7 +3492,7 @@ _Appears in:_ | Field | Description | | --- | --- | | `name` _string_ | {{< unsafe >}}Name defines the name of the restore
The name of the restore file in S3 will be -
If a schedule is set (see below), - will be appended.{{< /unsafe >}} | -| `cluster` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectreference-v1-core)_ | {{< unsafe >}}Cluster is the reference to the cluster whose etcd will be backed up{{< /unsafe >}} | +| `cluster` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectreference-v1-core)_ | {{< unsafe >}}Cluster is the reference to the cluster whose etcd will be backed up{{< /unsafe >}} | | `backupName` _string_ | {{< unsafe >}}BackupName is the name of the backup to restore from{{< /unsafe >}} | | `backupDownloadCredentialsSecret` _string_ | {{< unsafe >}}BackupDownloadCredentialsSecret is the name of a secret in the cluster-xxx namespace containing
credentials needed to download the backup{{< /unsafe >}} | | `destination` _string_ | {{< unsafe >}}Destination indicates where the backup was stored. The destination name should correspond to a destination in
the cluster's Seed.Spec.EtcdBackupRestore. If empty, it will use the legacy destination configured in Seed.Spec.BackupRestore{{< /unsafe >}} | @@ -3452,7 +3514,7 @@ _Appears in:_ | Field | Description | | --- | --- | | `phase` _[EtcdRestorePhase](#etcdrestorephase)_ | {{< unsafe >}}{{< /unsafe >}} | -| `restoreTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}{{< /unsafe >}} | +| `restoreTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}{{< /unsafe >}} | [Back to top](#top) @@ -3472,12 +3534,12 @@ _Appears in:_ | --- | --- | | `clusterSize` _integer_ | {{< unsafe >}}ClusterSize is the number of replicas created for etcd. This should be an
odd number to guarantee consensus, e.g. 3, 5 or 7.{{< /unsafe >}} | | `storageClass` _string_ | {{< unsafe >}}StorageClass is the Kubernetes StorageClass used for persistent storage
which stores the etcd WAL and other data persisted across restarts. Defaults to
`kubermatic-fast` (the global default).{{< /unsafe >}} | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources allows to override the resource requirements for etcd Pods.{{< /unsafe >}} | -| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) array_ | {{< unsafe >}}Tolerations allows to override the scheduling tolerations for etcd Pods.{{< /unsafe >}} | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources allows to override the resource requirements for etcd Pods.{{< /unsafe >}} | +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#toleration-v1-core) array_ | {{< unsafe >}}Tolerations allows to override the scheduling tolerations for etcd Pods.{{< /unsafe >}} | | `hostAntiAffinity` _[AntiAffinityType](#antiaffinitytype)_ | {{< unsafe >}}HostAntiAffinity allows to enforce a certain type of host anti-affinity on etcd
pods. Options are "preferred" (default) and "required". Please note that
enforcing anti-affinity via "required" can mean that pods are never scheduled.{{< /unsafe >}} | | `zoneAntiAffinity` _[AntiAffinityType](#antiaffinitytype)_ | {{< unsafe >}}ZoneAntiAffinity allows to enforce a certain type of availability zone anti-affinity on etcd
pods. Options are "preferred" (default) and "required". Please note that
enforcing anti-affinity via "required" can mean that pods are never scheduled.{{< /unsafe >}} | | `nodeSelector` _object (keys:string, values:string)_ | {{< unsafe >}}NodeSelector is a selector which restricts the set of nodes where etcd Pods can run.{{< /unsafe >}} | -| `quotaBackendGb` _integer_ | {{< unsafe >}}QuotaBackendGB is the maximum backend size of etcd in GB (0 means use etcd default).

For more details, please see https://etcd.io/docs/v3.5/op-guide/maintenance/{{< /unsafe >}} | +| `quotaBackendGb` _integer_ | {{< unsafe >}}QuotaBackendGB is the maximum backend size of etcd in GB (0 means use etcd default).
For more details, please see https://etcd.io/docs/v3.5/op-guide/maintenance/{{< /unsafe >}} | [Back to top](#top) @@ -3592,7 +3654,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `ExternalCluster` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[ExternalClusterSpec](#externalclusterspec)_ | {{< unsafe >}}Spec describes the desired cluster state.{{< /unsafe >}} | | `status` _[ExternalClusterStatus](#externalclusterstatus)_ | {{< unsafe >}}Status contains reconciliation information for the cluster.{{< /unsafe >}} | @@ -3739,7 +3801,7 @@ _Appears in:_ | Field | Description | | --- | --- | -| `providerName` _string_ | {{< unsafe >}}The name of the cloud provider used, one of
"aws", "azure", "digitalocean", "gcp",
"hetzner", "nutanix", "openstack", "packet", "vsphere" KubeOne natively-supported providers{{< /unsafe >}} | +| `providerName` _string_ | {{< unsafe >}}The name of the cloud provider used, one of
"aws", "azure", "digitalocean", "gcp",
"hetzner", "nutanix", "openstack", "vsphere" KubeOne natively-supported providers{{< /unsafe >}} | | `region` _string_ | {{< unsafe >}}The cloud provider region in which the cluster resides.
This field is used only to display information.{{< /unsafe >}} | | `credentialsReference` _[GlobalSecretKeySelector](#globalsecretkeyselector)_ | {{< unsafe >}}{{< /unsafe >}} | | `sshReference` _[GlobalSecretKeySelector](#globalsecretkeyselector)_ | {{< unsafe >}}{{< /unsafe >}} | @@ -3762,7 +3824,7 @@ ExternalClusterList specifies a list of external kubernetes clusters. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `ExternalClusterList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[ExternalCluster](#externalcluster) array_ | {{< unsafe >}}Items holds the list of the External Kubernetes cluster.{{< /unsafe >}} | @@ -3988,7 +4050,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `GroupProjectBinding` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[GroupProjectBindingSpec](#groupprojectbindingspec)_ | {{< unsafe >}}Spec describes an oidc group binding to a project.{{< /unsafe >}} | @@ -4008,7 +4070,7 @@ GroupProjectBindingList is a list of group project bindings. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `GroupProjectBindingList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[GroupProjectBinding](#groupprojectbinding) array_ | {{< unsafe >}}Items holds the list of the group and project bindings.{{< /unsafe >}} | @@ -4123,7 +4185,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `IPAMAllocation` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[IPAMAllocationSpec](#ipamallocationspec)_ | {{< unsafe >}}{{< /unsafe >}} | @@ -4143,7 +4205,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `IPAMAllocationList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[IPAMAllocation](#ipamallocation) array_ | {{< unsafe >}}{{< /unsafe >}} | @@ -4187,7 +4249,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `IPAMPool` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[IPAMPoolSpec](#ipampoolspec)_ | {{< unsafe >}}Spec describes the Multi-Cluster IP Address Management (IPAM) configuration for KKP user clusters.{{< /unsafe >}} | @@ -4244,7 +4306,7 @@ configuration for KKP user clusters. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `IPAMPoolList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[IPAMPool](#ipampool) array_ | {{< unsafe >}}Items holds the list of IPAM pool objects.{{< /unsafe >}} | @@ -4369,7 +4431,7 @@ _Appears in:_ | Field | Description | | --- | --- | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources configure limits/requests for Konnectivity components.{{< /unsafe >}} | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources configure limits/requests for Konnectivity components.{{< /unsafe >}} | | `keepaliveTime` _string_ | {{< unsafe >}}KeepaliveTime represents a duration of time to check if the transport is still alive.
The option is propagated to agents and server.
Defaults to 1m.{{< /unsafe >}} | | `args` _string array_ | {{< unsafe >}}Args configures arguments (flags) for the Konnectivity deployments.{{< /unsafe >}} | @@ -4392,7 +4454,7 @@ _Appears in:_ | --- | --- | | `enabled` _boolean_ | {{< unsafe >}}Controls whether kubeLB is deployed or not.{{< /unsafe >}} | | `useLoadBalancerClass` _boolean_ | {{< unsafe >}}UseLoadBalancerClass is used to configure the use of load balancer class `kubelb` for kubeLB. If false, kubeLB will manage all load balancers in the
user cluster irrespective of the load balancer class.{{< /unsafe >}} | -| `enableGatewayAPI` _boolean_ | {{< unsafe >}}EnableGatewayAPI is used to enable Gateway API for KubeLB. Once enabled, KKP installs the Gateway API CRDs for the user cluster.{{< /unsafe >}} | +| `enableGatewayAPI` _boolean_ | {{< unsafe >}}EnableGatewayAPI is used to enable Gateway API for KubeLB. Once enabled, KubeLB installs the Gateway API CRDs in the user cluster.{{< /unsafe >}} | | `extraArgs` _object (keys:string, values:string)_ | {{< unsafe >}}ExtraArgs are additional arbitrary flags to pass to the kubeLB CCM for the user cluster.{{< /unsafe >}} | @@ -4411,12 +4473,12 @@ _Appears in:_ | Field | Description | | --- | --- | -| `kubeconfig` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectreference-v1-core)_ | {{< unsafe >}}Kubeconfig is reference to the Kubeconfig for the kubeLB management cluster.{{< /unsafe >}} | +| `kubeconfig` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectreference-v1-core)_ | {{< unsafe >}}Kubeconfig is reference to the Kubeconfig for the kubeLB management cluster.{{< /unsafe >}} | | `enabled` _boolean_ | {{< unsafe >}}Enabled is used to enable/disable kubeLB for the datacenter. This is used to control whether installing kubeLB is allowed or not for the datacenter.{{< /unsafe >}} | | `enforced` _boolean_ | {{< unsafe >}}Enforced is used to enforce kubeLB installation for all the user clusters belonging to this datacenter. Setting enforced to false will not uninstall kubeLB from the user clusters and it needs to be disabled manually.{{< /unsafe >}} | | `nodeAddressType` _string_ | {{< unsafe >}}NodeAddressType is used to configure the address type from node, used for load balancing.
Optional: Defaults to ExternalIP.{{< /unsafe >}} | | `useLoadBalancerClass` _boolean_ | {{< unsafe >}}UseLoadBalancerClass is used to configure the use of load balancer class `kubelb` for kubeLB. If false, kubeLB will manage all load balancers in the
user cluster irrespective of the load balancer class.{{< /unsafe >}} | -| `enableGatewayAPI` _boolean_ | {{< unsafe >}}EnableGatewayAPI is used to configure the use of gateway API for kubeLB.
When this option is enabled for the user cluster, KKP installs the Gateway API CRDs for the user cluster.{{< /unsafe >}} | +| `enableGatewayAPI` _boolean_ | {{< unsafe >}}EnableGatewayAPI is used to configure the use of gateway API for kubeLB. Once enabled, Gateway API CRDs are installed for the user cluster.{{< /unsafe >}} | | `enableSecretSynchronizer` _boolean_ | {{< unsafe >}}EnableSecretSynchronizer is used to configure the use of secret synchronizer for kubeLB.{{< /unsafe >}} | | `disableIngressClass` _boolean_ | {{< unsafe >}}DisableIngressClass is used to disable the ingress class `kubelb` filter for kubeLB.{{< /unsafe >}} | | `extraArgs` _object (keys:string, values:string)_ | {{< unsafe >}}ExtraArgs are additional arbitrary flags to pass to the kubeLB CCM for the user cluster. These args are propagated to all the user clusters unless overridden at a cluster level.{{< /unsafe >}} | @@ -4437,7 +4499,7 @@ _Appears in:_ | Field | Description | | --- | --- | -| `kubeconfig` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectreference-v1-core)_ | {{< unsafe >}}Kubeconfig is reference to the Kubeconfig for the kubeLB management cluster.{{< /unsafe >}} | +| `kubeconfig` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectreference-v1-core)_ | {{< unsafe >}}Kubeconfig is reference to the Kubeconfig for the kubeLB management cluster.{{< /unsafe >}} | | `enableForAllDatacenters` _boolean_ | {{< unsafe >}}EnableForAllDatacenters is used to enable kubeLB for all the datacenters belonging to this seed.
This is only used to control whether installing kubeLB is allowed or not for the datacenter.{{< /unsafe >}} | @@ -4457,7 +4519,7 @@ _Appears in:_ | Field | Description | | --- | --- | -| `kubeconfig` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectreference-v1-core)_ | {{< unsafe >}}Kubeconfig is reference to the Kubeconfig for the kubeLB management cluster.{{< /unsafe >}} | +| `kubeconfig` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectreference-v1-core)_ | {{< unsafe >}}Kubeconfig is reference to the Kubeconfig for the kubeLB management cluster.{{< /unsafe >}} | [Back to top](#top) @@ -4517,7 +4579,7 @@ _Appears in:_ | --- | --- | | `name` _string_ | {{< unsafe >}}{{< /unsafe >}} | | `isDefaultClass` _boolean_ | {{< unsafe >}}Optional: IsDefaultClass. If true, the created StorageClass in the tenant cluster will be annotated with:
storageclass.kubernetes.io/is-default-class : true
If missing or false, annotation will be:
storageclass.kubernetes.io/is-default-class : false{{< /unsafe >}} | -| `volumeBindingMode` _[VolumeBindingMode](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#volumebindingmode-v1-storage)_ | {{< unsafe >}}VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset,
VolumeBindingImmediate is used.{{< /unsafe >}} | +| `volumeBindingMode` _[VolumeBindingMode](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#volumebindingmode-v1-storage)_ | {{< unsafe >}}VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset,
VolumeBindingImmediate is used.{{< /unsafe >}} | | `labels` _object (keys:string, values:string)_ | {{< unsafe >}}Labels is a map of string keys and values that can be used to organize and categorize
(scope and select) objects. May match selectors of replication controllers
and services.{{< /unsafe >}} | | `zones` _string array_ | {{< unsafe >}}Zones represent a logical failure domain. It is common for Kubernetes clusters to span multiple zones
for increased availability{{< /unsafe >}} | | `regions` _string array_ | {{< unsafe >}}Regions represents a larger domain, made up of one or more zones. It is uncommon for Kubernetes clusters
to span multiple regions{{< /unsafe >}} | @@ -4552,11 +4614,11 @@ _Appears in:_ | Field | Description | | --- | --- | | `dockerRepository` _string_ | {{< unsafe >}}DockerRepository is the repository containing the Kubermatic REST API image.{{< /unsafe >}} | -| `dockerTag` _string_ | {{< unsafe >}}DockerTag is used to overwrite the Kubermatic API Docker image tag and is only for development
purposes. This field must not be set in production environments. If DockerTag is specified then
DockerTagSuffix will be ignored.
---{{< /unsafe >}} | +| `dockerTag` _string_ | {{< unsafe >}}DockerTag is used to overwrite the Kubermatic API Docker image tag and is only for development
purposes. This field must not be set in production environments. If DockerTag is specified then
DockerTagSuffix will be ignored.{{< /unsafe >}} | | `dockerTagSuffix` _string_ | {{< unsafe >}}DockerTagSuffix is appended to the KKP version used for referring to the custom Kubermatic API image.
If left empty, either the `DockerTag` if specified or the original Kubermatic API Docker image tag will be used.
With DockerTagSuffix the tag becomes i.e. "v2.15.0-SUFFIX".{{< /unsafe >}} | | `accessibleAddons` _string array_ | {{< unsafe >}}AccessibleAddons is a list of addons that should be enabled in the API.{{< /unsafe >}} | | `pprofEndpoint` _string_ | {{< unsafe >}}PProfEndpoint controls the port the API should listen on to provide pprof
data. This port is never exposed from the container and only available via port-forwardings.{{< /unsafe >}} | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources describes the requested and maximum allowed CPU/memory usage.{{< /unsafe >}} | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources describes the requested and maximum allowed CPU/memory usage.{{< /unsafe >}} | | `debugLog` _boolean_ | {{< unsafe >}}DebugLog enables more verbose logging.{{< /unsafe >}} | | `replicas` _integer_ | {{< unsafe >}}Replicas sets the number of pod replicas for the API deployment.{{< /unsafe >}} | @@ -4624,7 +4686,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `KubermaticConfiguration` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[KubermaticConfigurationSpec](#kubermaticconfigurationspec)_ | {{< unsafe >}}{{< /unsafe >}} | | `status` _[KubermaticConfigurationStatus](#kubermaticconfigurationstatus)_ | {{< unsafe >}}{{< /unsafe >}} | @@ -4645,7 +4707,7 @@ KubermaticConfigurationList is a collection of KubermaticConfigurations. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `KubermaticConfigurationList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[KubermaticConfiguration](#kubermaticconfiguration) array_ | {{< unsafe >}}{{< /unsafe >}} | @@ -4664,7 +4726,7 @@ _Appears in:_ | Field | Description | | --- | --- | -| `caBundle` _[TypedLocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#typedlocalobjectreference-v1-core)_ | {{< unsafe >}}CABundle references a ConfigMap in the same namespace as the KubermaticConfiguration.
This ConfigMap must contain a ca-bundle.pem with PEM-encoded certificates. This bundle
automatically synchronized into each seed and each usercluster. APIGroup and Kind are
currently ignored.{{< /unsafe >}} | +| `caBundle` _[TypedLocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#typedlocalobjectreference-v1-core)_ | {{< unsafe >}}CABundle references a ConfigMap in the same namespace as the KubermaticConfiguration.
This ConfigMap must contain a ca-bundle.pem with PEM-encoded certificates. This bundle
automatically synchronized into each seed and each usercluster. APIGroup and Kind are
currently ignored.{{< /unsafe >}} | | `imagePullSecret` _string_ | {{< unsafe >}}ImagePullSecret is used to authenticate against Docker registries.{{< /unsafe >}} | | `auth` _[KubermaticAuthConfiguration](#kubermaticauthconfiguration)_ | {{< unsafe >}}Auth defines keys and URLs for Dex. These must be defined unless the HeadlessInstallation
feature gate is set, which will disable the UI/API and its need for an OIDC provider entirely.{{< /unsafe >}} | | `featureGates` _object (keys:string, values:boolean)_ | {{< unsafe >}}FeatureGates are used to optionally enable certain features.{{< /unsafe >}} | @@ -4721,7 +4783,7 @@ _Appears in:_ | `className` _string_ | {{< unsafe >}}ClassName is the Ingress resource's class name, used for selecting the appropriate
ingress controller.{{< /unsafe >}} | | `namespaceOverride` _string_ | {{< unsafe >}}NamespaceOverride need to be set if a different ingress-controller is used than the KKP default one.{{< /unsafe >}} | | `disable` _boolean_ | {{< unsafe >}}Disable will prevent an Ingress from being created at all. This is mostly useful
during testing. If the Ingress is disabled, the CertificateIssuer setting can also
be left empty, as no Certificate resource will be created.{{< /unsafe >}} | -| `certificateIssuer` _[TypedLocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#typedlocalobjectreference-v1-core)_ | {{< unsafe >}}CertificateIssuer is the name of a cert-manager Issuer or ClusterIssuer (default)
that will be used to acquire the certificate for the configured domain.
To use a namespaced Issuer, set the Kind to "Issuer" and manually create the
matching Issuer in Kubermatic's namespace.
Setting an empty name disables the automatic creation of certificates and disables
the TLS settings on the Kubermatic Ingress.{{< /unsafe >}} | +| `certificateIssuer` _[TypedLocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#typedlocalobjectreference-v1-core)_ | {{< unsafe >}}CertificateIssuer is the name of a cert-manager Issuer or ClusterIssuer (default)
that will be used to acquire the certificate for the configured domain.
To use a namespaced Issuer, set the Kind to "Issuer" and manually create the
matching Issuer in Kubermatic's namespace.
Setting an empty name disables the automatic creation of certificates and disables
the TLS settings on the Kubermatic Ingress.{{< /unsafe >}} | [Back to top](#top) @@ -4742,7 +4804,7 @@ _Appears in:_ | `dockerRepository` _string_ | {{< unsafe >}}DockerRepository is the repository containing the Kubermatic master-controller-manager image.{{< /unsafe >}} | | `projectsMigrator` _[KubermaticProjectsMigratorConfiguration](#kubermaticprojectsmigratorconfiguration)_ | {{< unsafe >}}ProjectsMigrator configures the migrator for user projects.{{< /unsafe >}} | | `pprofEndpoint` _string_ | {{< unsafe >}}PProfEndpoint controls the port the master-controller-manager should listen on to provide pprof
data. This port is never exposed from the container and only available via port-forwardings.{{< /unsafe >}} | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources describes the requested and maximum allowed CPU/memory usage.{{< /unsafe >}} | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources describes the requested and maximum allowed CPU/memory usage.{{< /unsafe >}} | | `debugLog` _boolean_ | {{< unsafe >}}DebugLog enables more verbose logging.{{< /unsafe >}} | | `replicas` _integer_ | {{< unsafe >}}Replicas sets the number of pod replicas for the master-controller-manager.{{< /unsafe >}} | @@ -4809,11 +4871,11 @@ _Appears in:_ | `backupCleanupContainer` _string_ | {{< unsafe >}}Deprecated: BackupCleanupContainer is the container used for removing expired backups from the storage location.
This field is a no-op and is no longer used. The old backup controller it was used for has been
removed. Do not set this field.{{< /unsafe >}} | | `maximumParallelReconciles` _integer_ | {{< unsafe >}}MaximumParallelReconciles limits the number of cluster reconciliations
that are active at any given time.{{< /unsafe >}} | | `pprofEndpoint` _string_ | {{< unsafe >}}PProfEndpoint controls the port the seed-controller-manager should listen on to provide pprof
data. This port is never exposed from the container and only available via port-forwardings.{{< /unsafe >}} | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources describes the requested and maximum allowed CPU/memory usage.{{< /unsafe >}} | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources describes the requested and maximum allowed CPU/memory usage.{{< /unsafe >}} | | `debugLog` _boolean_ | {{< unsafe >}}DebugLog enables more verbose logging.{{< /unsafe >}} | | `replicas` _integer_ | {{< unsafe >}}Replicas sets the number of pod replicas for the seed-controller-manager.{{< /unsafe >}} | | `disabledCollectors` _[MetricsCollector](#metricscollector) array_ | {{< unsafe >}}DisabledCollectors contains a list of metrics collectors that should be disabled.
Acceptable values are "Addon", "Cluster", "ClusterBackup", "Project", and "None".{{< /unsafe >}} | -| `backupInterval` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#duration-v1-meta)_ | {{< unsafe >}}BackupInterval defines the time duration between consecutive etcd backups.
Must be a valid time.Duration string format. Only takes effect when backup scheduling is enabled.{{< /unsafe >}} | +| `backupInterval` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#duration-v1-meta)_ | {{< unsafe >}}BackupInterval defines the time duration between consecutive etcd backups.
Must be a valid time.Duration string format. Only takes effect when backup scheduling is enabled.{{< /unsafe >}} | | `backupCount` _integer_ | {{< unsafe >}}BackupCount specifies the maximum number of backups to retain (defaults to DefaultKeptBackupsCount).
Oldest backups are automatically deleted when this limit is exceeded. Only applies when Schedule is configured.{{< /unsafe >}} | @@ -4836,7 +4898,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `KubermaticSetting` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[SettingSpec](#settingspec)_ | {{< unsafe >}}{{< /unsafe >}} | @@ -4856,7 +4918,7 @@ KubermaticSettingList is a list of settings. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `KubermaticSettingList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[KubermaticSetting](#kubermaticsetting) array_ | {{< unsafe >}}{{< /unsafe >}} | @@ -4876,13 +4938,13 @@ _Appears in:_ | Field | Description | | --- | --- | | `dockerRepository` _string_ | {{< unsafe >}}DockerRepository is the repository containing the Kubermatic dashboard image.{{< /unsafe >}} | -| `dockerTag` _string_ | {{< unsafe >}}DockerTag is used to overwrite the dashboard Docker image tag and is only for development
purposes. This field must not be set in production environments. If DockerTag is specified then
DockerTagSuffix will be ignored.
---{{< /unsafe >}} | +| `dockerTag` _string_ | {{< unsafe >}}DockerTag is used to overwrite the dashboard Docker image tag and is only for development
purposes. This field must not be set in production environments. If DockerTag is specified then
DockerTagSuffix will be ignored.{{< /unsafe >}} | | `dockerTagSuffix` _string_ | {{< unsafe >}}DockerTagSuffix is appended to the KKP version used for referring to the custom dashboard image.
If left empty, either the `DockerTag` if specified or the original dashboard Docker image tag will be used.
With DockerTagSuffix the tag becomes i.e. "v2.15.0-SUFFIX".{{< /unsafe >}} | | `config` _string_ | {{< unsafe >}}Config sets flags for various dashboard features.{{< /unsafe >}} | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources describes the requested and maximum allowed CPU/memory usage.{{< /unsafe >}} | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources describes the requested and maximum allowed CPU/memory usage.{{< /unsafe >}} | | `replicas` _integer_ | {{< unsafe >}}Replicas sets the number of pod replicas for the UI deployment.{{< /unsafe >}} | -| `extraVolumeMounts` _[VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#volumemount-v1-core) array_ | {{< unsafe >}}ExtraVolumeMounts allows to mount additional volumes into the UI container.{{< /unsafe >}} | -| `extraVolumes` _[Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#volume-v1-core) array_ | {{< unsafe >}}ExtraVolumes allows to mount additional volumes into the UI container.{{< /unsafe >}} | +| `extraVolumeMounts` _[VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#volumemount-v1-core) array_ | {{< unsafe >}}ExtraVolumeMounts allows to mount additional volumes into the UI container.{{< /unsafe >}} | +| `extraVolumes` _[Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#volume-v1-core) array_ | {{< unsafe >}}ExtraVolumes allows to mount additional volumes into the UI container.{{< /unsafe >}} | [Back to top](#top) @@ -4954,7 +5016,7 @@ _Appears in:_ | Field | Description | | --- | --- | | `dockerRepository` _string_ | {{< unsafe >}}DockerRepository is the repository containing the component's image.{{< /unsafe >}} | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources describes the requested and maximum allowed CPU/memory usage.{{< /unsafe >}} | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources describes the requested and maximum allowed CPU/memory usage.{{< /unsafe >}} | [Back to top](#top) @@ -5016,7 +5078,7 @@ _Appears in:_ | --- | --- | | `dockerRepository` _string_ | {{< unsafe >}}DockerRepository is the repository containing the Kubermatic webhook image.{{< /unsafe >}} | | `pprofEndpoint` _string_ | {{< unsafe >}}PProfEndpoint controls the port the webhook should listen on to provide pprof
data. This port is never exposed from the container and only available via port-forwardings.{{< /unsafe >}} | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources describes the requested and maximum allowed CPU/memory usage.{{< /unsafe >}} | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources describes the requested and maximum allowed CPU/memory usage.{{< /unsafe >}} | | `debugLog` _boolean_ | {{< unsafe >}}DebugLog enables more verbose logging.{{< /unsafe >}} | | `replicas` _integer_ | {{< unsafe >}}Replicas sets the number of pod replicas for the webhook.{{< /unsafe >}} | @@ -5133,6 +5195,28 @@ _Appears in:_ +### LBClass + + + + + +_Appears in:_ +- [LoadBalancerClass](#loadbalancerclass) + +| Field | Description | +| --- | --- | +| `floatingNetworkID` _string_ | {{< unsafe >}}FloatingNetworkID is the external network used to create floating IP for the load balancer VIP.{{< /unsafe >}} | +| `floatingSubnetID` _string_ | {{< unsafe >}}FloatingSubnetID is the external network subnet used to create floating IP for the load balancer VIP.{{< /unsafe >}} | +| `floatingSubnet` _string_ | {{< unsafe >}}FloatingSubnet is a name pattern for the external network subnet used to create floating IP for the load balancer VIP.{{< /unsafe >}} | +| `floatingSubnetTags` _string_ | {{< unsafe >}}FloatingSubnetTags is a comma separated list of tags for the external network subnet used to create floating IP for the load balancer VIP.{{< /unsafe >}} | +| `networkID` _string_ | {{< unsafe >}}NetworkID is the ID of the Neutron network on which to create load balancer VIP, not needed if subnet-id is set.{{< /unsafe >}} | +| `subnetID` _string_ | {{< unsafe >}}SubnetID is the ID of the Neutron subnet on which to create load balancer VIP.{{< /unsafe >}} | +| `memberSubnetID` _string_ | {{< unsafe >}}MemberSubnetID is the ID of the Neutron network on which to create the members of the load balancer.{{< /unsafe >}} | + + +[Back to top](#top) + ### LBSKU @@ -5170,12 +5254,21 @@ _Appears in:_ ### LoadBalancerClass -_Underlying type:_ `[struct{Name string "json:\"name\""; Config LBClass "json:\"config\""}](#struct{name-string-"json:\"name\"";-config-lbclass-"json:\"config\""})` + _Appears in:_ - [DatacenterSpecOpenstack](#datacenterspecopenstack) +- [OpenstackCloudSpec](#openstackcloudspec) + +| Field | Description | +| --- | --- | +| `name` _string_ | {{< unsafe >}}Name is the name of the load balancer class.{{< /unsafe >}} | +| `config` _[LBClass](#lbclass)_ | {{< unsafe >}}Config is the configuration for the specified LoadBalancerClass section in the cloud config.{{< /unsafe >}} | + + +[Back to top](#top) @@ -5214,7 +5307,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `MLAAdminSetting` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[MLAAdminSettingSpec](#mlaadminsettingspec)_ | {{< unsafe >}}Spec describes the cluster-specific administrator settings for KKP user cluster MLA
(monitoring, logging & alerting) stack.{{< /unsafe >}} | @@ -5235,7 +5328,7 @@ user cluster MLA (monitoring, logging & alerting) stack. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `MLAAdminSettingList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[MLAAdminSetting](#mlaadminsetting) array_ | {{< unsafe >}}Items holds the list of the cluster-specific administrative settings
for KKP user cluster MLA.{{< /unsafe >}} | @@ -5277,8 +5370,8 @@ _Appears in:_ | --- | --- | | `monitoringEnabled` _boolean_ | {{< unsafe >}}MonitoringEnabled is the flag for enabling monitoring in user cluster.{{< /unsafe >}} | | `loggingEnabled` _boolean_ | {{< unsafe >}}LoggingEnabled is the flag for enabling logging in user cluster.{{< /unsafe >}} | -| `monitoringResources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}MonitoringResources is the resource requirements for user cluster prometheus.{{< /unsafe >}} | -| `loggingResources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}LoggingResources is the resource requirements for user cluster promtail.{{< /unsafe >}} | +| `monitoringResources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}MonitoringResources is the resource requirements for user cluster prometheus.{{< /unsafe >}} | +| `loggingResources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}LoggingResources is the resource requirements for user cluster promtail.{{< /unsafe >}} | | `monitoringReplicas` _integer_ | {{< unsafe >}}MonitoringReplicas is the number of desired pods of user cluster prometheus deployment.{{< /unsafe >}} | @@ -5402,8 +5495,8 @@ _Appears in:_ | `scope` _string_ | {{< unsafe >}}Scope accepts *, Cluster, or Namespaced which determines if cluster-scoped and/or namespace-scoped resources are selected. (defaults to *){{< /unsafe >}} | | `namespaces` _string array_ | {{< unsafe >}}Namespaces is a list of namespace names. If defined, a constraint will only apply to resources in a listed namespace.{{< /unsafe >}} | | `excludedNamespaces` _string array_ | {{< unsafe >}}ExcludedNamespaces is a list of namespace names. If defined, a constraint will only apply to resources not in a listed namespace.{{< /unsafe >}} | -| `labelSelector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#labelselector-v1-meta)_ | {{< unsafe >}}LabelSelector is a standard Kubernetes label selector.{{< /unsafe >}} | -| `namespaceSelector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#labelselector-v1-meta)_ | {{< unsafe >}}NamespaceSelector is a standard Kubernetes namespace selector. If defined, make sure to add Namespaces to your
configs.config.gatekeeper.sh object to ensure namespaces are synced into OPA{{< /unsafe >}} | +| `labelSelector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#labelselector-v1-meta)_ | {{< unsafe >}}LabelSelector is a standard Kubernetes label selector.{{< /unsafe >}} | +| `namespaceSelector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#labelselector-v1-meta)_ | {{< unsafe >}}NamespaceSelector is a standard Kubernetes namespace selector. If defined, make sure to add Namespaces to your
configs.config.gatekeeper.sh object to ensure namespaces are synced into OPA{{< /unsafe >}} | [Back to top](#top) @@ -5585,7 +5678,7 @@ _Appears in:_ | Field | Description | | --- | --- | | `dockerRepository` _string_ | {{< unsafe >}}DockerRepository is the repository containing the component's image.{{< /unsafe >}} | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources describes the requested and maximum allowed CPU/memory usage.{{< /unsafe >}} | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources describes the requested and maximum allowed CPU/memory usage.{{< /unsafe >}} | | `loadBalancerService` _[EnvoyLoadBalancerService](#envoyloadbalancerservice)_ | {{< unsafe >}}{{< /unsafe >}} | @@ -5610,6 +5703,7 @@ _Appears in:_ | `registryMirrors` _string array_ | {{< unsafe >}}Optional: These image registries will be configured as registry mirrors
on the container runtime.{{< /unsafe >}} | | `pauseImage` _string_ | {{< unsafe >}}Optional: Translates to --pod-infra-container-image on the kubelet.
If not set, the kubelet will default it.{{< /unsafe >}} | | `containerdRegistryMirrors` _[ContainerRuntimeContainerd](#containerruntimecontainerd)_ | {{< unsafe >}}Optional: ContainerdRegistryMirrors configure registry mirrors endpoints. Can be used multiple times to specify multiple mirrors.{{< /unsafe >}} | +| `enableNonRootDeviceOwnership` _boolean_ | {{< unsafe >}}Optional: EnableNonRootDeviceOwnership enables the non-root device ownership feature in the container runtime.{{< /unsafe >}} | [Back to top](#top) @@ -5630,7 +5724,7 @@ _Appears in:_ | Field | Description | | --- | --- | | `dockerRepository` _string_ | {{< unsafe >}}DockerRepository is the repository containing the component's image.{{< /unsafe >}} | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources describes the requested and maximum allowed CPU/memory usage.{{< /unsafe >}} | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}Resources describes the requested and maximum allowed CPU/memory usage.{{< /unsafe >}} | [Back to top](#top) @@ -5653,8 +5747,8 @@ _Appears in:_ | `envoy` _[NodePortProxyComponentEnvoy](#nodeportproxycomponentenvoy)_ | {{< unsafe >}}Envoy configures the Envoy application itself.{{< /unsafe >}} | | `envoyManager` _[NodeportProxyComponent](#nodeportproxycomponent)_ | {{< unsafe >}}EnvoyManager configures the Kubermatic-internal Envoy manager.{{< /unsafe >}} | | `updater` _[NodeportProxyComponent](#nodeportproxycomponent)_ | {{< unsafe >}}Updater configures the component responsible for updating the LoadBalancer
service.{{< /unsafe >}} | -| `ipFamilyPolicy` _[IPFamilyPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#ipfamilypolicy-v1-core)_ | {{< unsafe >}}IPFamilyPolicy configures the IP family policy for the LoadBalancer service.{{< /unsafe >}} | -| `ipFamilies` _[IPFamily](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#ipfamily-v1-core) array_ | {{< unsafe >}}IPFamilies configures the IP families to use for the LoadBalancer service.{{< /unsafe >}} | +| `ipFamilyPolicy` _[IPFamilyPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#ipfamilypolicy-v1-core)_ | {{< unsafe >}}IPFamilyPolicy configures the IP family policy for the LoadBalancer service.{{< /unsafe >}} | +| `ipFamilies` _[IPFamily](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#ipfamily-v1-core) array_ | {{< unsafe >}}IPFamilies configures the IP families to use for the LoadBalancer service.{{< /unsafe >}} | [Back to top](#top) @@ -5824,8 +5918,8 @@ _Appears in:_ | `enabled` _boolean_ | {{< unsafe >}}Enables OPA Gatekeeper integration.{{< /unsafe >}} | | `webhookTimeoutSeconds` _integer_ | {{< unsafe >}}The timeout in seconds that is set for the Gatekeeper validating webhook admission review calls.
Defaults to `10` (seconds).{{< /unsafe >}} | | `experimentalEnableMutation` _boolean_ | {{< unsafe >}}Optional: Enables experimental mutation in Gatekeeper.{{< /unsafe >}} | -| `controllerResources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}Optional: ControllerResources is the resource requirements for user cluster gatekeeper controller.{{< /unsafe >}} | -| `auditResources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}Optional: AuditResources is the resource requirements for user cluster gatekeeper audit.{{< /unsafe >}} | +| `controllerResources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}Optional: ControllerResources is the resource requirements for user cluster gatekeeper controller.{{< /unsafe >}} | +| `auditResources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}Optional: AuditResources is the resource requirements for user cluster gatekeeper audit.{{< /unsafe >}} | [Back to top](#top) @@ -5844,8 +5938,8 @@ _Appears in:_ | Field | Description | | --- | --- | | `replicas` _integer_ | {{< unsafe >}}{{< /unsafe >}} | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}{{< /unsafe >}} | -| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) array_ | {{< unsafe >}}{{< /unsafe >}} | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}{{< /unsafe >}} | +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#toleration-v1-core) array_ | {{< unsafe >}}{{< /unsafe >}} | | `leaderElection` _[LeaderElectionSettings](#leaderelectionsettings)_ | {{< unsafe >}}{{< /unsafe >}} | | `proxy` _[ProxySettings](#proxysettings)_ | {{< unsafe >}}ProxySettings defines optional flags for OperatingSystemManager deployment to allow
setting specific proxy configurations for specific user clusters.{{< /unsafe >}} | @@ -5958,19 +6052,23 @@ _Appears in:_ | `applicationCredentialSecret` _string_ | {{< unsafe >}}Application credential secret (which is not the user's password) to authenticate in combination with an application credential ID.{{< /unsafe >}} | | `useToken` _boolean_ | {{< unsafe >}}{{< /unsafe >}} | | `token` _string_ | {{< unsafe >}}Used internally during cluster creation{{< /unsafe >}} | -| `network` _string_ | {{< unsafe >}}Network holds the name of the internal network
When specified, all worker nodes will be attached to this network. If not specified, a network, subnet & router will be created.

Note that the network is internal if the "External" field is set to false{{< /unsafe >}} | +| `network` _string_ | {{< unsafe >}}Network holds the name of the internal network
When specified, all worker nodes will be attached to this network. If not specified, a network, subnet & router will be created.
Note that the network is internal if the "External" field is set to false{{< /unsafe >}} | | `securityGroups` _string_ | {{< unsafe >}}SecurityGroups is the name of the security group (only supports a singular security group) that will be used for Machines in the cluster.
If this field is left empty, a default security group will be created and used.{{< /unsafe >}} | | `nodePortsAllowedIPRange` _string_ | {{< unsafe >}}A CIDR range that will be used to allow access to the node port range in the security group to. Only applies if
the security group is generated by KKP and not preexisting.
If NodePortsAllowedIPRange nor NodePortsAllowedIPRanges is set, the node port range can be accessed from anywhere.{{< /unsafe >}} | | `nodePortsAllowedIPRanges` _[NetworkRanges](#networkranges)_ | {{< unsafe >}}Optional: CIDR ranges that will be used to allow access to the node port range in the security group to. Only applies if
the security group is generated by KKP and not preexisting.
If NodePortsAllowedIPRange nor NodePortsAllowedIPRanges is set, the node port range can be accessed from anywhere.{{< /unsafe >}} | -| `floatingIPPool` _string_ | {{< unsafe >}}FloatingIPPool holds the name of the public network
The public network is reachable from the outside world
and should provide the pool of IP addresses to choose from.

When specified, all worker nodes will receive a public ip from this floating ip pool

Note that the network is external if the "External" field is set to true{{< /unsafe >}} | +| `floatingIPPool` _string_ | {{< unsafe >}}FloatingIPPool holds the name of the public network
The public network is reachable from the outside world
and should provide the pool of IP addresses to choose from.
When specified, all worker nodes will receive a public ip from this floating ip pool
Note that the network is external if the "External" field is set to true{{< /unsafe >}} | | `routerID` _string_ | {{< unsafe >}}{{< /unsafe >}} | | `subnetID` _string_ | {{< unsafe >}}{{< /unsafe >}} | +| `subnetCidr` _string_ | {{< unsafe >}}SubnetCIDR is the CIDR that will be assigned to the subnet that is created for the cluster if the cluster spec
didn't specify a subnet id.{{< /unsafe >}} | +| `subnetAllocationPool` _string_ | {{< unsafe >}}SubnetAllocationPool represents a pool of usable IPs that can be assigned to resources via the DHCP. The format is
first usable ip and last usable ip separated by a dash(e.g: 10.10.0.1-10.10.0.254){{< /unsafe >}} | +| `ipv6SubnetCidr` _string_ | {{< unsafe >}}IPv6SubnetCIDR is the CIDR that will be assigned to the subnet that is created for the cluster if the cluster spec
didn't specify a subnet id for the IPv6 networking.{{< /unsafe >}} | | `ipv6SubnetID` _string_ | {{< unsafe >}}IPv6SubnetID holds the ID of the subnet used for IPv6 networking.
If not provided, a new subnet will be created if IPv6 is enabled.{{< /unsafe >}} | | `ipv6SubnetPool` _string_ | {{< unsafe >}}IPv6SubnetPool holds the name of the subnet pool used for creating new IPv6 subnets.
If not provided, the default IPv6 subnet pool will be used.{{< /unsafe >}} | -| `useOctavia` _boolean_ | {{< unsafe >}}Whether or not to use Octavia for LoadBalancer type of Service
implementation instead of using Neutron-LBaaS.
Attention:Openstack CCM use Octavia as default load balancer
implementation since v1.17.0

Takes precedence over the 'use_octavia' flag provided at datacenter
level if both are specified.{{< /unsafe >}} | +| `useOctavia` _boolean_ | {{< unsafe >}}Whether or not to use Octavia for LoadBalancer type of Service
implementation instead of using Neutron-LBaaS.
Attention:Openstack CCM use Octavia as default load balancer
implementation since v1.17.0
Takes precedence over the 'use_octavia' flag provided at datacenter
level if both are specified.{{< /unsafe >}} | | `enableIngressHostname` _boolean_ | {{< unsafe >}}Enable the `enable-ingress-hostname` cloud provider option on the Openstack CCM. Can only be used with the
external CCM and might be deprecated and removed in future versions as it is considered a workaround for the PROXY
protocol to preserve client IPs.{{< /unsafe >}} | | `ingressHostnameSuffix` _string_ | {{< unsafe >}}Set a specific suffix for the hostnames used for the PROXY protocol workaround that is enabled by EnableIngressHostname.
The suffix is set to `nip.io` by default. Can only be used with the external CCM and might be deprecated and removed in
future versions as it is considered a workaround only.{{< /unsafe >}} | | `cinderTopologyEnabled` _boolean_ | {{< unsafe >}}Flag to configure enablement of topology support for the Cinder CSI plugin.
This requires Nova and Cinder to have matching availability zones configured.{{< /unsafe >}} | +| `loadBalancerClasses` _[LoadBalancerClass](#loadbalancerclass) array_ | {{< unsafe >}}List of LoadBalancerClass configurations to be used for the OpenStack cloud provider.{{< /unsafe >}} | [Back to top](#top) @@ -6030,51 +6128,6 @@ _Appears in:_ -### Packet - - - -Deprecated: The Packet / Equinix Metal provider is deprecated and will be REMOVED IN VERSION 2.29. -This provider is no longer supported. Migrate your configurations away from "packet" immediately. - -_Appears in:_ -- [PresetSpec](#presetspec) - -| Field | Description | -| --- | --- | -| `enabled` _boolean_ | {{< unsafe >}}Only enabled presets will be available in the KKP dashboard.{{< /unsafe >}} | -| `isCustomizable` _boolean_ | {{< unsafe >}}IsCustomizable marks a preset as editable on the KKP UI; Customizable presets still have the credentials obscured on the UI, but other fields that are not considered private are displayed during cluster creation. Users can then update those fields, if required.
NOTE: This is only supported for OpenStack Cloud Provider in KKP 2.26. Support for other providers will be added later on.{{< /unsafe >}} | -| `datacenter` _string_ | {{< unsafe >}}If datacenter is set, this preset is only applicable to the
configured datacenter.{{< /unsafe >}} | -| `apiKey` _string_ | {{< unsafe >}}{{< /unsafe >}} | -| `projectID` _string_ | {{< unsafe >}}{{< /unsafe >}} | -| `billingCycle` _string_ | {{< unsafe >}}{{< /unsafe >}} | - - -[Back to top](#top) - - - -### PacketCloudSpec - - - -Deprecated: The Packet / Equinix Metal provider is deprecated and will be REMOVED IN VERSION 2.29. -This provider is no longer supported. Migrate your configurations away from "packet" immediately. -PacketCloudSpec specifies access data to a Packet cloud. - -_Appears in:_ -- [CloudSpec](#cloudspec) - -| Field | Description | -| --- | --- | -| `credentialsReference` _[GlobalSecretKeySelector](#globalsecretkeyselector)_ | {{< unsafe >}}{{< /unsafe >}} | -| `apiKey` _string_ | {{< unsafe >}}{{< /unsafe >}} | -| `projectID` _string_ | {{< unsafe >}}{{< /unsafe >}} | -| `billingCycle` _string_ | {{< unsafe >}}{{< /unsafe >}} | - - -[Back to top](#top) - ### Parameters @@ -6102,7 +6155,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `PolicyBinding` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[PolicyBindingSpec](#policybindingspec)_ | {{< unsafe >}}{{< /unsafe >}} | | `status` _[PolicyBindingStatus](#policybindingstatus)_ | {{< unsafe >}}{{< /unsafe >}} | @@ -6125,7 +6178,7 @@ PolicyBindingList is a list of PolicyBinding objects. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `PolicyBindingList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[PolicyBinding](#policybinding) array_ | {{< unsafe >}}Items refers to the list of PolicyBinding objects{{< /unsafe >}} | @@ -6144,8 +6197,8 @@ _Appears in:_ | Field | Description | | --- | --- | -| `policyTemplateRef` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectreference-v1-core)_ | {{< unsafe >}}PolicyTemplateRef references the PolicyTemplate by name{{< /unsafe >}} | -| `kyvernoPolicyNamespace` _[KyvernoPolicyNamespace](#kyvernopolicynamespace)_ | {{< unsafe >}}KyvernoPolicyNamespace specifies the Kyverno namespace to deploy the Kyverno Policy into.

Relevant only if the referenced PolicyTemplate has spec.enforced=false.
If Template.NamespacedPolicy is true and this field is omitted, no Kyverno Policy resources will be created.{{< /unsafe >}} | +| `policyTemplateRef` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectreference-v1-core)_ | {{< unsafe >}}PolicyTemplateRef references the PolicyTemplate by name{{< /unsafe >}} | +| `kyvernoPolicyNamespace` _[KyvernoPolicyNamespace](#kyvernopolicynamespace)_ | {{< unsafe >}}KyvernoPolicyNamespace specifies the Kyverno namespace to deploy the Kyverno Policy into.
Relevant only if the referenced PolicyTemplate has spec.enforced=false.
If Template.NamespacedPolicy is true and this field is omitted, no Kyverno Policy resources will be created.{{< /unsafe >}} | [Back to top](#top) @@ -6166,7 +6219,7 @@ _Appears in:_ | `observedGeneration` _integer_ | {{< unsafe >}}ObservedGeneration is the generation observed by the controller.{{< /unsafe >}} | | `templateEnforced` _boolean_ | {{< unsafe >}}TemplateEnforced reflects the value of `spec.enforced` from PolicyTemplate{{< /unsafe >}} | | `active` _boolean_ | {{< unsafe >}}Active reflects whether the Kyverno policy exists and is active in this User Cluster.{{< /unsafe >}} | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#condition-v1-meta) array_ | {{< unsafe >}}Conditions represents the latest available observations of the policy binding's current state{{< /unsafe >}} | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#condition-v1-meta) array_ | {{< unsafe >}}Conditions represents the latest available observations of the policy binding's current state{{< /unsafe >}} | [Back to top](#top) @@ -6186,7 +6239,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `PolicyTemplate` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[PolicyTemplateSpec](#policytemplatespec)_ | {{< unsafe >}}{{< /unsafe >}} | @@ -6206,7 +6259,7 @@ PolicyTemplateList is a list of PolicyTemplate objects. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `PolicyTemplateList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[PolicyTemplate](#policytemplate) array_ | {{< unsafe >}}Items refers to the list of PolicyTemplate objects{{< /unsafe >}} | @@ -6229,13 +6282,13 @@ _Appears in:_ | `description` _string_ | {{< unsafe >}}Description is the description of the policy, specified as an annotation in the Kyverno policy{{< /unsafe >}} | | `category` _string_ | {{< unsafe >}}Category is the category of the policy, specified as an annotation in the Kyverno policy{{< /unsafe >}} | | `severity` _string_ | {{< unsafe >}}Severity indicates the severity level of the policy{{< /unsafe >}} | -| `visibility` _string_ | {{< unsafe >}}Visibility specifies where the policy is visible.

Can be one of: global, project, or cluster{{< /unsafe >}} | -| `projectID` _string_ | {{< unsafe >}}ProjectID is the ID of the project for which the policy template is created

Relevant only for project visibility policies{{< /unsafe >}} | +| `visibility` _string_ | {{< unsafe >}}Visibility specifies where the policy is visible.
Can be one of: global, project, or cluster{{< /unsafe >}} | +| `projectID` _string_ | {{< unsafe >}}ProjectID is the ID of the project for which the policy template is created
Relevant only for project visibility policies{{< /unsafe >}} | | `default` _boolean_ | {{< unsafe >}}Default determines whether we apply the policy (create policy binding) by default{{< /unsafe >}} | -| `enforced` _boolean_ | {{< unsafe >}}Enforced indicates whether this policy is mandatory

If true, this policy is mandatory
A PolicyBinding referencing it cannot disable it{{< /unsafe >}} | +| `enforced` _boolean_ | {{< unsafe >}}Enforced indicates whether this policy is mandatory
If true, this policy is mandatory
A PolicyBinding referencing it cannot disable it{{< /unsafe >}} | | `namespacedPolicy` _boolean_ | {{< unsafe >}}NamespacedPolicy dictates the type of Kyverno resource to be created in this User Cluster.{{< /unsafe >}} | | `target` _[PolicyTemplateTarget](#policytemplatetarget)_ | {{< unsafe >}}Target allows selection of projects and clusters where this template applies,
If 'Target' itself is omitted, the scope defaults based on 'Visibility' and 'ProjectID':{{< /unsafe >}} | -| `policySpec` _[RawExtension](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#rawextension-runtime-pkg)_ | {{< unsafe >}}PolicySpec is the policy spec of the Kyverno Policy we want to apply on the cluster.

The structure of this spec should follow the rules defined in Kyverno
[Writing Policies Docs](https://kyverno.io/docs/writing-policies/).

For example, a simple policy spec could be defined as:

policySpec:
validationFailureAction: Audit
background: true
rules:
- name: check-for-labels
match:
any:
- resources:
kinds:
- Pod
validate:
message: "The label `app.kubernetes.io/name` is required."
pattern:
metadata:
labels:
app.kubernetes.io/name: "?*"

There are also further examples of Kyverno policies in the
[Kyverno Policies Examples](https://kyverno.io/policies/).{{< /unsafe >}} | +| `policySpec` _[RawExtension](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#rawextension-runtime-pkg)_ | {{< unsafe >}}PolicySpec is the policy spec of the Kyverno Policy we want to apply on the cluster.
The structure of this spec should follow the rules defined in Kyverno
[Writing Policies Docs](https://kyverno.io/docs/writing-policies/).
For example, a simple policy spec could be defined as:
policySpec:
validationFailureAction: Audit
background: true
rules:
- name: check-for-labels
match:
any:
- resources:
kinds:
- Pod
validate:
message: "The label `app.kubernetes.io/name` is required."
pattern:
metadata:
labels:
app.kubernetes.io/name: "?*"
There are also further examples of Kyverno policies in the
[Kyverno Policies Examples](https://kyverno.io/policies/).{{< /unsafe >}} | [Back to top](#top) @@ -6253,8 +6306,8 @@ _Appears in:_ | Field | Description | | --- | --- | -| `projectSelector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#labelselector-v1-meta)_ | {{< unsafe >}}ProjectSelector filters KKP Projects based on their labels.{{< /unsafe >}} | -| `clusterSelector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#labelselector-v1-meta)_ | {{< unsafe >}}ClusterSelector filters individual KKP Cluster resources based on their labels.{{< /unsafe >}} | +| `projectSelector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#labelselector-v1-meta)_ | {{< unsafe >}}ProjectSelector filters KKP Projects based on their labels.{{< /unsafe >}} | +| `clusterSelector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#labelselector-v1-meta)_ | {{< unsafe >}}ClusterSelector filters individual KKP Cluster resources based on their labels.{{< /unsafe >}} | [Back to top](#top) @@ -6300,7 +6353,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `Preset` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[PresetSpec](#presetspec)_ | {{< unsafe >}}{{< /unsafe >}} | @@ -6322,7 +6375,7 @@ PresetList is the type representing a PresetList. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `PresetList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[Preset](#preset) array_ | {{< unsafe >}}List of presets{{< /unsafe >}} | @@ -6348,7 +6401,6 @@ _Appears in:_ | `baremetal` _[Baremetal](#baremetal)_ | {{< unsafe >}}Access data for Baremetal (Tinkerbell only for now).{{< /unsafe >}} | | `aws` _[AWS](#aws)_ | {{< unsafe >}}Access data for Amazon Web Services(AWS) Cloud.{{< /unsafe >}} | | `openstack` _[Openstack](#openstack)_ | {{< unsafe >}}Access data for OpenStack.{{< /unsafe >}} | -| `packet` _[Packet](#packet)_ | {{< unsafe >}}Deprecated: The Packet / Equinix Metal provider is deprecated and will be REMOVED IN VERSION 2.29.
This provider is no longer supported. Migrate your configurations away from "packet" immediately.
Access data for Packet Cloud.{{< /unsafe >}} | | `gcp` _[GCP](#gcp)_ | {{< unsafe >}}Access data for Google Cloud Platform(GCP).{{< /unsafe >}} | | `kubevirt` _[Kubevirt](#kubevirt)_ | {{< unsafe >}}Access data for KuberVirt.{{< /unsafe >}} | | `alibaba` _[Alibaba](#alibaba)_ | {{< unsafe >}}Access data for Alibaba Cloud.{{< /unsafe >}} | @@ -6382,7 +6434,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `Project` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[ProjectSpec](#projectspec)_ | {{< unsafe >}}Spec describes the configuration of the project.{{< /unsafe >}} | | `status` _[ProjectStatus](#projectstatus)_ | {{< unsafe >}}Status holds the current status of the project.{{< /unsafe >}} | @@ -6405,7 +6457,7 @@ ProjectList is a collection of projects. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `ProjectList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[Project](#project) array_ | {{< unsafe >}}Items is the list of the projects.{{< /unsafe >}} | @@ -6437,6 +6489,7 @@ _Appears in:_ | --- | --- | | `name` _string_ | {{< unsafe >}}Name is the human-readable name given to the project.{{< /unsafe >}} | | `allowedOperatingSystems` _[allowedOperatingSystems](#allowedoperatingsystems)_ | {{< unsafe >}}AllowedOperatingSystems defines a map of operating systems that can be used for the machines inside this project.{{< /unsafe >}} | +| `defaultTenantSpec` _[TenantSpec](#tenantspec)_ | {{< unsafe >}}DefaultTenantSpec{{< /unsafe >}} | [Back to top](#top) @@ -6512,7 +6565,6 @@ _Appears in:_ - [Kubevirt](#kubevirt) - [Nutanix](#nutanix) - [Openstack](#openstack) -- [Packet](#packet) - [VMwareCloudDirector](#vmwareclouddirector) - [VSphere](#vsphere) @@ -6563,6 +6615,46 @@ _Appears in:_ +### RegistryCredentials + + + + + +_Appears in:_ +- [RegistrySettings](#registrysettings) + +| Field | Description | +| --- | --- | +| `username` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#secretkeyselector-v1-core)_ | {{< unsafe >}}Username references the secret containing the registry username credential.
The referenced Secret must exist in the KKP installation namespace (default: "kubermatic").{{< /unsafe >}} | +| `password` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#secretkeyselector-v1-core)_ | {{< unsafe >}}Password references the secret containing the registry password credential.
The referenced Secret must exist in the KKP installation namespace (default: "kubermatic").{{< /unsafe >}} | +| `registryConfigFile` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#secretkeyselector-v1-core)_ | {{< unsafe >}}RegistryConfigFile references the secret containing the Docker registry configuration file.
The value must be a dockercfg file following the same format as ~/.docker/config.json.
The referenced Secret must exist in the KKP installation namespace (default: "kubermatic").{{< /unsafe >}} | + + +[Back to top](#top) + + + +### RegistrySettings + + + + + +_Appears in:_ +- [CatalogManagerConfiguration](#catalogmanagerconfiguration) + +| Field | Description | +| --- | --- | +| `registryURL` _string_ | {{< unsafe >}}RegistryURL specifies the OCI registry URL where ApplicationDefinitions are stored.
Example: oci://localhost:5000/myrepo{{< /unsafe >}} | +| `tag` _string_ | {{< unsafe >}}Tag specifies the version tag for ApplicationDefinitions in the OCI registry.
Example: v1.0.0{{< /unsafe >}} | +| `credentials` _[RegistryCredentials](#registrycredentials)_ | {{< unsafe >}}Credentials optionally references a secret containing Helm registry authentication credentials.
Either username/password or registryConfigFile can be specified, but not both.{{< /unsafe >}} | + + +[Back to top](#top) + + + ### ResourceDetails @@ -6590,7 +6682,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `ResourceQuota` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[ResourceQuotaSpec](#resourcequotaspec)_ | {{< unsafe >}}Spec describes the desired state of the resource quota.{{< /unsafe >}} | | `status` _[ResourceQuotaStatus](#resourcequotastatus)_ | {{< unsafe >}}Status holds the current state of the resource quota.{{< /unsafe >}} | @@ -6611,7 +6703,7 @@ ResourceQuotaList is a collection of resource quotas. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `ResourceQuotaList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[ResourceQuota](#resourcequota) array_ | {{< unsafe >}}Items is the list of the resource quotas.{{< /unsafe >}} | @@ -6670,7 +6762,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `RuleGroup` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[RuleGroupSpec](#rulegroupspec)_ | {{< unsafe >}}{{< /unsafe >}} | @@ -6690,7 +6782,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `RuleGroupList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[RuleGroup](#rulegroup) array_ | {{< unsafe >}}{{< /unsafe >}} | @@ -6711,7 +6803,7 @@ _Appears in:_ | --- | --- | | `isDefault` _boolean_ | {{< unsafe >}}IsDefault indicates whether the ruleGroup is default{{< /unsafe >}} | | `ruleGroupType` _[RuleGroupType](#rulegrouptype)_ | {{< unsafe >}}RuleGroupType is the type of this ruleGroup applies to. It can be `Metrics` or `Logs`.{{< /unsafe >}} | -| `cluster` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectreference-v1-core)_ | {{< unsafe >}}Cluster is the reference to the cluster the ruleGroup should be created in. All fields
except for the name are ignored.{{< /unsafe >}} | +| `cluster` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectreference-v1-core)_ | {{< unsafe >}}Cluster is the reference to the cluster the ruleGroup should be created in. All fields
except for the name are ignored.{{< /unsafe >}} | | `data` _integer array_ | {{< unsafe >}}Data contains the RuleGroup data. Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#rule_group{{< /unsafe >}} | @@ -6784,7 +6876,7 @@ _Appears in:_ | --- | --- | | `name` _string_ | {{< unsafe >}}Identifier of a key, used in various places to refer to the key.{{< /unsafe >}} | | `value` _string_ | {{< unsafe >}}Value contains a 32-byte random key that is base64 encoded. This is the key used
for encryption. Can be generated via `head -c 32 /dev/urandom \| base64`, for example.{{< /unsafe >}} | -| `secretRef` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#secretkeyselector-v1-core)_ | {{< unsafe >}}Instead of passing the sensitive encryption key via the `value` field, a secret can be
referenced. The key of the secret referenced here needs to hold a key equivalent to the `value` field.{{< /unsafe >}} | +| `secretRef` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#secretkeyselector-v1-core)_ | {{< unsafe >}}Instead of passing the sensitive encryption key via the `value` field, a secret can be
referenced. The key of the secret referenced here needs to hold a key equivalent to the `value` field.{{< /unsafe >}} | [Back to top](#top) @@ -6805,7 +6897,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `Seed` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[SeedSpec](#seedspec)_ | {{< unsafe >}}Spec describes the configuration of the Seed cluster.{{< /unsafe >}} | | `status` _[SeedStatus](#seedstatus)_ | {{< unsafe >}}Status holds the runtime information of the Seed cluster.{{< /unsafe >}} | @@ -6825,9 +6917,9 @@ _Appears in:_ | Field | Description | | --- | --- | -| `status` _[ConditionStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#conditionstatus-v1-core)_ | {{< unsafe >}}Status of the condition, one of True, False, Unknown.{{< /unsafe >}} | -| `lastHeartbeatTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}Last time we got an update on a given condition.{{< /unsafe >}} | -| `lastTransitionTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}Last time the condition transit from one status to another.{{< /unsafe >}} | +| `status` _[ConditionStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#conditionstatus-v1-core)_ | {{< unsafe >}}Status of the condition, one of True, False, Unknown.{{< /unsafe >}} | +| `lastHeartbeatTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}Last time we got an update on a given condition.{{< /unsafe >}} | +| `lastTransitionTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}Last time the condition transit from one status to another.{{< /unsafe >}} | | `reason` _string_ | {{< unsafe >}}(brief) reason for the condition's last transition.{{< /unsafe >}} | | `message` _string_ | {{< unsafe >}}Human readable message indicating details about last transition.{{< /unsafe >}} | @@ -6861,7 +6953,7 @@ SeedDatacenterList is the type representing a SeedDatacenterList. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `SeedList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[Seed](#seed) array_ | {{< unsafe >}}List of seeds{{< /unsafe >}} | @@ -6911,14 +7003,14 @@ _Appears in:_ | --- | --- | | `country` _string_ | {{< unsafe >}}Optional: Country of the seed as ISO-3166 two-letter code, e.g. DE or UK.
For informational purposes in the Kubermatic dashboard only.{{< /unsafe >}} | | `location` _string_ | {{< unsafe >}}Optional: Detailed location of the cluster, like "Hamburg" or "Datacenter 7".
For informational purposes in the Kubermatic dashboard only.{{< /unsafe >}} | -| `kubeconfig` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectreference-v1-core)_ | {{< unsafe >}}A reference to the Kubeconfig of this cluster. The Kubeconfig must
have cluster-admin privileges. This field is mandatory for every
seed, even if there are no datacenters defined yet.{{< /unsafe >}} | +| `kubeconfig` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectreference-v1-core)_ | {{< unsafe >}}A reference to the Kubeconfig of this cluster. The Kubeconfig must
have cluster-admin privileges. This field is mandatory for every
seed, even if there are no datacenters defined yet.{{< /unsafe >}} | | `datacenters` _object (keys:string, values:[Datacenter](#datacenter))_ | {{< unsafe >}}Datacenters contains a map of the possible datacenters (DCs) in this seed.
Each DC must have a globally unique identifier (i.e. names must be unique
across all seeds).{{< /unsafe >}} | | `seedDNSOverwrite` _string_ | {{< unsafe >}}Optional: This can be used to override the DNS name used for this seed.
By default the seed name is used.{{< /unsafe >}} | | `nodeportProxy` _[NodeportProxyConfig](#nodeportproxyconfig)_ | {{< unsafe >}}NodeportProxy can be used to configure the NodePort proxy service that is
responsible for making user-cluster control planes accessible from the outside.{{< /unsafe >}} | | `proxySettings` _[ProxySettings](#proxysettings)_ | {{< unsafe >}}Optional: ProxySettings can be used to configure HTTP proxy settings on the
worker nodes in user clusters. However, proxy settings on nodes take precedence.{{< /unsafe >}} | | `exposeStrategy` _[ExposeStrategy](#exposestrategy)_ | {{< unsafe >}}Optional: ExposeStrategy explicitly sets the expose strategy for this seed cluster, if not set, the default provided by the master is used.{{< /unsafe >}} | | `mla` _[SeedMLASettings](#seedmlasettings)_ | {{< unsafe >}}Optional: MLA allows configuring seed level MLA (Monitoring, Logging & Alerting) stack settings.{{< /unsafe >}} | -| `defaultComponentSettings` _[ComponentSettings](#componentsettings)_ | {{< unsafe >}}DefaultComponentSettings are default values to set for newly created clusters.
Deprecated: Use DefaultClusterTemplate instead.{{< /unsafe >}} | +| `defaultComponentSettings` _[ComponentSettings](#componentsettings)_ | {{< unsafe >}}DefaultComponentSettings are default values to set for newly created clusters.{{< /unsafe >}} | | `defaultClusterTemplate` _string_ | {{< unsafe >}}DefaultClusterTemplate is the name of a cluster template of scope "seed" that is used
to default all new created clusters{{< /unsafe >}} | | `metering` _[MeteringConfiguration](#meteringconfiguration)_ | {{< unsafe >}}Metering configures the metering tool on user clusters across the seed.{{< /unsafe >}} | | `etcdBackupRestore` _[EtcdBackupRestore](#etcdbackuprestore)_ | {{< unsafe >}}EtcdBackupRestore holds the configuration of the automatic etcd backup restores for the Seed;
if this is set, the new backup/restore controllers are enabled for this Seed.{{< /unsafe >}} | @@ -7058,8 +7150,8 @@ _Appears in:_ | Field | Description | | --- | --- | | `replicas` _integer_ | {{< unsafe >}}{{< /unsafe >}} | -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core)_ | {{< unsafe >}}{{< /unsafe >}} | -| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) array_ | {{< unsafe >}}{{< /unsafe >}} | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#resourcerequirements-v1-core)_ | {{< unsafe >}}{{< /unsafe >}} | +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#toleration-v1-core) array_ | {{< unsafe >}}{{< /unsafe >}} | [Back to top](#top) @@ -7150,7 +7242,7 @@ _Appears in:_ | Field | Description | | --- | --- | | `helmRepository` _string_ | {{< unsafe >}}HelmRepository specifies OCI repository containing Helm charts of system Applications e.g. oci://localhost:5000/myrepo.{{< /unsafe >}} | -| `helmRegistryConfigFile` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#secretkeyselector-v1-core)_ | {{< unsafe >}}HelmRegistryConfigFile optionally holds the ref and key in the secret for the OCI registry credential file.
The value is dockercfg file that follows the same format rules as ~/.docker/config.json
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm".{{< /unsafe >}} | +| `helmRegistryConfigFile` _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#secretkeyselector-v1-core)_ | {{< unsafe >}}HelmRegistryConfigFile optionally holds the ref and key in the secret for the OCI registry credential file.
The value is dockercfg file that follows the same format rules as ~/.docker/config.json
The Secret must exist in the namespace where KKP is installed (default is "kubermatic").
The Secret must be annotated with `apps.kubermatic.k8c.io/secret-type:` set to "helm".{{< /unsafe >}} | [Back to top](#top) @@ -7245,8 +7337,8 @@ _Appears in:_ | --- | --- | | `from` _string_ | {{< unsafe >}}From is the version from which an update is allowed. Wildcards are allowed, e.g. "1.18.*".{{< /unsafe >}} | | `to` _string_ | {{< unsafe >}}To is the version to which an update is allowed.
Must be a valid version if `automatic` is set to true, e.g. "1.20.13".
Can be a wildcard otherwise, e.g. "1.20.*".{{< /unsafe >}} | -| `automatic` _boolean_ | {{< unsafe >}}Automatic controls whether this update is executed automatically
for the control plane of all matching user clusters.
---{{< /unsafe >}} | -| `automaticNodeUpdate` _boolean_ | {{< unsafe >}}Automatic controls whether this update is executed automatically
for the worker nodes of all matching user clusters.
---{{< /unsafe >}} | +| `automatic` _boolean_ | {{< unsafe >}}Automatic controls whether this update is executed automatically
for the control plane of all matching user clusters.{{< /unsafe >}} | +| `automaticNodeUpdate` _boolean_ | {{< unsafe >}}Automatic controls whether this update is executed automatically
for the worker nodes of all matching user clusters.{{< /unsafe >}} | [Back to top](#top) @@ -7289,7 +7381,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `User` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[UserSpec](#userspec)_ | {{< unsafe >}}Spec describes a KKP user.{{< /unsafe >}} | | `status` _[UserStatus](#userstatus)_ | {{< unsafe >}}Status holds the information about the KKP user.{{< /unsafe >}} | @@ -7310,7 +7402,7 @@ UserList is a list of users. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `UserList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[User](#user) array_ | {{< unsafe >}}Items is the list of KKP users.{{< /unsafe >}} | @@ -7332,7 +7424,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `UserProjectBinding` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[UserProjectBindingSpec](#userprojectbindingspec)_ | {{< unsafe >}}Spec describes a KKP user and project binding.{{< /unsafe >}} | @@ -7352,7 +7444,7 @@ UserProjectBindingList is a list of KKP user and project bindings. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `UserProjectBindingList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[UserProjectBinding](#userprojectbinding) array_ | {{< unsafe >}}Items is the list of KKP user and project bindings.{{< /unsafe >}} | @@ -7393,7 +7485,7 @@ _Appears in:_ | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `UserSSHKey` -| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `spec` _[SSHKeySpec](#sshkeyspec)_ | {{< unsafe >}}{{< /unsafe >}} | @@ -7413,7 +7505,7 @@ UserSSHKeyList specifies a users UserSSHKey. | --- | --- | | `apiVersion` _string_ | `kubermatic.k8c.io/v1` | `kind` _string_ | `UserSSHKeyList` -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | `items` _[UserSSHKey](#usersshkey) array_ | {{< unsafe >}}{{< /unsafe >}} | @@ -7484,7 +7576,7 @@ _Appears in:_ | Field | Description | | --- | --- | -| `lastSeen` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#time-v1-meta)_ | {{< unsafe >}}{{< /unsafe >}} | +| `lastSeen` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#time-v1-meta)_ | {{< unsafe >}}{{< /unsafe >}} | [Back to top](#top) @@ -7696,7 +7788,7 @@ _Appears in:_ | --- | --- | | `enabled` _boolean_ | {{< unsafe >}}Enabled enables the Web Terminal feature for the user clusters.{{< /unsafe >}} | | `enableInternetAccess` _boolean_ | {{< unsafe >}}EnableInternetAccess enables the Web Terminal feature to access the internet.{{< /unsafe >}} | -| `additionalEnvironmentVariables` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#envvar-v1-core) array_ | {{< unsafe >}}AdditionalEnvironmentVariables are the additional environment variables that can be set for the Web Terminal.{{< /unsafe >}} | +| `additionalEnvironmentVariables` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#envvar-v1-core) array_ | {{< unsafe >}}AdditionalEnvironmentVariables are the additional environment variables that can be set for the Web Terminal.{{< /unsafe >}} | [Back to top](#top) diff --git a/content/kubermatic/main/references/rest-api-reference/index.html b/content/kubermatic/main/references/rest-api-reference/index.html index 0c467c22c..019859951 100644 --- a/content/kubermatic/main/references/rest-api-reference/index.html +++ b/content/kubermatic/main/references/rest-api-reference/index.html @@ -11,7 +11,7 @@ - + + + + diff --git a/content/kubermatic/v2.29/references/rest-api-reference/swagger-ui-bundle.js b/content/kubermatic/v2.29/references/rest-api-reference/swagger-ui-bundle.js new file mode 100644 index 000000000..3474100df --- /dev/null +++ b/content/kubermatic/v2.29/references/rest-api-reference/swagger-ui-bundle.js @@ -0,0 +1,3 @@ +/*! For license information please see swagger-ui-bundle.js.LICENSE.txt */ +!function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t(function(){try{return require("esprima")}catch(e){}}()):"function"==typeof define&&define.amd?define(["esprima"],t):"object"==typeof exports?exports.SwaggerUIBundle=t(function(){try{return require("esprima")}catch(e){}}()):e.SwaggerUIBundle=t(e.esprima)}(this,(function(e){return function(e){var t={};function n(r){if(t[r])return t[r].exports;var o=t[r]={i:r,l:!1,exports:{}};return e[r].call(o.exports,o,o.exports,n),o.l=!0,o.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)n.d(r,o,function(t){return e[t]}.bind(null,o));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="/dist",n(n.s=549)}([function(e,t,n){"use strict";e.exports=n(129)},function(e,t,n){e.exports=function(){"use strict";var e=Array.prototype.slice;function t(e,t){t&&(e.prototype=Object.create(t.prototype)),e.prototype.constructor=e}function n(e){return i(e)?e:$(e)}function r(e){return s(e)?e:K(e)}function o(e){return u(e)?e:Y(e)}function a(e){return i(e)&&!c(e)?e:G(e)}function i(e){return!(!e||!e[p])}function s(e){return!(!e||!e[f])}function u(e){return!(!e||!e[h])}function c(e){return s(e)||u(e)}function l(e){return!(!e||!e[d])}t(r,n),t(o,n),t(a,n),n.isIterable=i,n.isKeyed=s,n.isIndexed=u,n.isAssociative=c,n.isOrdered=l,n.Keyed=r,n.Indexed=o,n.Set=a;var p="@@__IMMUTABLE_ITERABLE__@@",f="@@__IMMUTABLE_KEYED__@@",h="@@__IMMUTABLE_INDEXED__@@",d="@@__IMMUTABLE_ORDERED__@@",m="delete",v=5,g=1<>>0;if(""+n!==t||4294967295===n)return NaN;t=n}return t<0?A(e)+t:t}function O(){return!0}function j(e,t,n){return(0===e||void 0!==n&&e<=-n)&&(void 0===t||void 0!==n&&t>=n)}function T(e,t){return P(e,t,0)}function I(e,t){return P(e,t,t)}function P(e,t,n){return void 0===e?n:e<0?Math.max(0,t+e):void 0===t?e:Math.min(t,e)}var N=0,M=1,R=2,D="function"==typeof Symbol&&Symbol.iterator,L="@@iterator",B=D||L;function F(e){this.next=e}function U(e,t,n,r){var o=0===e?t:1===e?n:[t,n];return r?r.value=o:r={value:o,done:!1},r}function q(){return{value:void 0,done:!0}}function z(e){return!!H(e)}function V(e){return e&&"function"==typeof e.next}function W(e){var t=H(e);return t&&t.call(e)}function H(e){var t=e&&(D&&e[D]||e[L]);if("function"==typeof t)return t}function J(e){return e&&"number"==typeof e.length}function $(e){return null==e?ie():i(e)?e.toSeq():ce(e)}function K(e){return null==e?ie().toKeyedSeq():i(e)?s(e)?e.toSeq():e.fromEntrySeq():se(e)}function Y(e){return null==e?ie():i(e)?s(e)?e.entrySeq():e.toIndexedSeq():ue(e)}function G(e){return(null==e?ie():i(e)?s(e)?e.entrySeq():e:ue(e)).toSetSeq()}F.prototype.toString=function(){return"[Iterator]"},F.KEYS=N,F.VALUES=M,F.ENTRIES=R,F.prototype.inspect=F.prototype.toSource=function(){return this.toString()},F.prototype[B]=function(){return this},t($,n),$.of=function(){return $(arguments)},$.prototype.toSeq=function(){return this},$.prototype.toString=function(){return this.__toString("Seq {","}")},$.prototype.cacheResult=function(){return!this._cache&&this.__iterateUncached&&(this._cache=this.entrySeq().toArray(),this.size=this._cache.length),this},$.prototype.__iterate=function(e,t){return pe(this,e,t,!0)},$.prototype.__iterator=function(e,t){return fe(this,e,t,!0)},t(K,$),K.prototype.toKeyedSeq=function(){return this},t(Y,$),Y.of=function(){return Y(arguments)},Y.prototype.toIndexedSeq=function(){return this},Y.prototype.toString=function(){return this.__toString("Seq [","]")},Y.prototype.__iterate=function(e,t){return pe(this,e,t,!1)},Y.prototype.__iterator=function(e,t){return fe(this,e,t,!1)},t(G,$),G.of=function(){return G(arguments)},G.prototype.toSetSeq=function(){return this},$.isSeq=ae,$.Keyed=K,$.Set=G,$.Indexed=Y;var Z,X,Q,ee="@@__IMMUTABLE_SEQ__@@";function te(e){this._array=e,this.size=e.length}function ne(e){var t=Object.keys(e);this._object=e,this._keys=t,this.size=t.length}function re(e){this._iterable=e,this.size=e.length||e.size}function oe(e){this._iterator=e,this._iteratorCache=[]}function ae(e){return!(!e||!e[ee])}function ie(){return Z||(Z=new te([]))}function se(e){var t=Array.isArray(e)?new te(e).fromEntrySeq():V(e)?new oe(e).fromEntrySeq():z(e)?new re(e).fromEntrySeq():"object"==typeof e?new ne(e):void 0;if(!t)throw new TypeError("Expected Array or iterable object of [k, v] entries, or keyed object: "+e);return t}function ue(e){var t=le(e);if(!t)throw new TypeError("Expected Array or iterable object of values: "+e);return t}function ce(e){var t=le(e)||"object"==typeof e&&new ne(e);if(!t)throw new TypeError("Expected Array or iterable object of values, or keyed object: "+e);return t}function le(e){return J(e)?new te(e):V(e)?new oe(e):z(e)?new re(e):void 0}function pe(e,t,n,r){var o=e._cache;if(o){for(var a=o.length-1,i=0;i<=a;i++){var s=o[n?a-i:i];if(!1===t(s[1],r?s[0]:i,e))return i+1}return i}return e.__iterateUncached(t,n)}function fe(e,t,n,r){var o=e._cache;if(o){var a=o.length-1,i=0;return new F((function(){var e=o[n?a-i:i];return i++>a?q():U(t,r?e[0]:i-1,e[1])}))}return e.__iteratorUncached(t,n)}function he(e,t){return t?de(t,e,"",{"":e}):me(e)}function de(e,t,n,r){return Array.isArray(t)?e.call(r,n,Y(t).map((function(n,r){return de(e,n,r,t)}))):ve(t)?e.call(r,n,K(t).map((function(n,r){return de(e,n,r,t)}))):t}function me(e){return Array.isArray(e)?Y(e).map(me).toList():ve(e)?K(e).map(me).toMap():e}function ve(e){return e&&(e.constructor===Object||void 0===e.constructor)}function ge(e,t){if(e===t||e!=e&&t!=t)return!0;if(!e||!t)return!1;if("function"==typeof e.valueOf&&"function"==typeof t.valueOf){if((e=e.valueOf())===(t=t.valueOf())||e!=e&&t!=t)return!0;if(!e||!t)return!1}return!("function"!=typeof e.equals||"function"!=typeof t.equals||!e.equals(t))}function ye(e,t){if(e===t)return!0;if(!i(t)||void 0!==e.size&&void 0!==t.size&&e.size!==t.size||void 0!==e.__hash&&void 0!==t.__hash&&e.__hash!==t.__hash||s(e)!==s(t)||u(e)!==u(t)||l(e)!==l(t))return!1;if(0===e.size&&0===t.size)return!0;var n=!c(e);if(l(e)){var r=e.entries();return t.every((function(e,t){var o=r.next().value;return o&&ge(o[1],e)&&(n||ge(o[0],t))}))&&r.next().done}var o=!1;if(void 0===e.size)if(void 0===t.size)"function"==typeof e.cacheResult&&e.cacheResult();else{o=!0;var a=e;e=t,t=a}var p=!0,f=t.__iterate((function(t,r){if(n?!e.has(t):o?!ge(t,e.get(r,b)):!ge(e.get(r,b),t))return p=!1,!1}));return p&&e.size===f}function be(e,t){if(!(this instanceof be))return new be(e,t);if(this._value=e,this.size=void 0===t?1/0:Math.max(0,t),0===this.size){if(X)return X;X=this}}function _e(e,t){if(!e)throw new Error(t)}function we(e,t,n){if(!(this instanceof we))return new we(e,t,n);if(_e(0!==n,"Cannot step a Range by 0"),e=e||0,void 0===t&&(t=1/0),n=void 0===n?1:Math.abs(n),tr?q():U(e,o,n[t?r-o++:o++])}))},t(ne,K),ne.prototype.get=function(e,t){return void 0===t||this.has(e)?this._object[e]:t},ne.prototype.has=function(e){return this._object.hasOwnProperty(e)},ne.prototype.__iterate=function(e,t){for(var n=this._object,r=this._keys,o=r.length-1,a=0;a<=o;a++){var i=r[t?o-a:a];if(!1===e(n[i],i,this))return a+1}return a},ne.prototype.__iterator=function(e,t){var n=this._object,r=this._keys,o=r.length-1,a=0;return new F((function(){var i=r[t?o-a:a];return a++>o?q():U(e,i,n[i])}))},ne.prototype[d]=!0,t(re,Y),re.prototype.__iterateUncached=function(e,t){if(t)return this.cacheResult().__iterate(e,t);var n=W(this._iterable),r=0;if(V(n))for(var o;!(o=n.next()).done&&!1!==e(o.value,r++,this););return r},re.prototype.__iteratorUncached=function(e,t){if(t)return this.cacheResult().__iterator(e,t);var n=W(this._iterable);if(!V(n))return new F(q);var r=0;return new F((function(){var t=n.next();return t.done?t:U(e,r++,t.value)}))},t(oe,Y),oe.prototype.__iterateUncached=function(e,t){if(t)return this.cacheResult().__iterate(e,t);for(var n,r=this._iterator,o=this._iteratorCache,a=0;a=r.length){var t=n.next();if(t.done)return t;r[o]=t.value}return U(e,o,r[o++])}))},t(be,Y),be.prototype.toString=function(){return 0===this.size?"Repeat []":"Repeat [ "+this._value+" "+this.size+" times ]"},be.prototype.get=function(e,t){return this.has(e)?this._value:t},be.prototype.includes=function(e){return ge(this._value,e)},be.prototype.slice=function(e,t){var n=this.size;return j(e,t,n)?this:new be(this._value,I(t,n)-T(e,n))},be.prototype.reverse=function(){return this},be.prototype.indexOf=function(e){return ge(this._value,e)?0:-1},be.prototype.lastIndexOf=function(e){return ge(this._value,e)?this.size:-1},be.prototype.__iterate=function(e,t){for(var n=0;n=0&&t=0&&nn?q():U(e,a++,i)}))},we.prototype.equals=function(e){return e instanceof we?this._start===e._start&&this._end===e._end&&this._step===e._step:ye(this,e)},t(xe,n),t(Ee,xe),t(Se,xe),t(Ce,xe),xe.Keyed=Ee,xe.Indexed=Se,xe.Set=Ce;var Ae="function"==typeof Math.imul&&-2===Math.imul(4294967295,2)?Math.imul:function(e,t){var n=65535&(e|=0),r=65535&(t|=0);return n*r+((e>>>16)*r+n*(t>>>16)<<16>>>0)|0};function ke(e){return e>>>1&1073741824|3221225471&e}function Oe(e){if(!1===e||null==e)return 0;if("function"==typeof e.valueOf&&(!1===(e=e.valueOf())||null==e))return 0;if(!0===e)return 1;var t=typeof e;if("number"===t){if(e!=e||e===1/0)return 0;var n=0|e;for(n!==e&&(n^=4294967295*e);e>4294967295;)n^=e/=4294967295;return ke(n)}if("string"===t)return e.length>Fe?je(e):Te(e);if("function"==typeof e.hashCode)return e.hashCode();if("object"===t)return Ie(e);if("function"==typeof e.toString)return Te(e.toString());throw new Error("Value type "+t+" cannot be hashed.")}function je(e){var t=ze[e];return void 0===t&&(t=Te(e),qe===Ue&&(qe=0,ze={}),qe++,ze[e]=t),t}function Te(e){for(var t=0,n=0;n0)switch(e.nodeType){case 1:return e.uniqueID;case 9:return e.documentElement&&e.documentElement.uniqueID}}var Re,De="function"==typeof WeakMap;De&&(Re=new WeakMap);var Le=0,Be="__immutablehash__";"function"==typeof Symbol&&(Be=Symbol(Be));var Fe=16,Ue=255,qe=0,ze={};function Ve(e){_e(e!==1/0,"Cannot perform this action with an infinite size.")}function We(e){return null==e?ot():He(e)&&!l(e)?e:ot().withMutations((function(t){var n=r(e);Ve(n.size),n.forEach((function(e,n){return t.set(n,e)}))}))}function He(e){return!(!e||!e[$e])}t(We,Ee),We.of=function(){var t=e.call(arguments,0);return ot().withMutations((function(e){for(var n=0;n=t.length)throw new Error("Missing value for key: "+t[n]);e.set(t[n],t[n+1])}}))},We.prototype.toString=function(){return this.__toString("Map {","}")},We.prototype.get=function(e,t){return this._root?this._root.get(0,void 0,e,t):t},We.prototype.set=function(e,t){return at(this,e,t)},We.prototype.setIn=function(e,t){return this.updateIn(e,b,(function(){return t}))},We.prototype.remove=function(e){return at(this,e,b)},We.prototype.deleteIn=function(e){return this.updateIn(e,(function(){return b}))},We.prototype.update=function(e,t,n){return 1===arguments.length?e(this):this.updateIn([e],t,n)},We.prototype.updateIn=function(e,t,n){n||(n=t,t=void 0);var r=vt(this,xn(e),t,n);return r===b?void 0:r},We.prototype.clear=function(){return 0===this.size?this:this.__ownerID?(this.size=0,this._root=null,this.__hash=void 0,this.__altered=!0,this):ot()},We.prototype.merge=function(){return ft(this,void 0,arguments)},We.prototype.mergeWith=function(t){return ft(this,t,e.call(arguments,1))},We.prototype.mergeIn=function(t){var n=e.call(arguments,1);return this.updateIn(t,ot(),(function(e){return"function"==typeof e.merge?e.merge.apply(e,n):n[n.length-1]}))},We.prototype.mergeDeep=function(){return ft(this,ht,arguments)},We.prototype.mergeDeepWith=function(t){var n=e.call(arguments,1);return ft(this,dt(t),n)},We.prototype.mergeDeepIn=function(t){var n=e.call(arguments,1);return this.updateIn(t,ot(),(function(e){return"function"==typeof e.mergeDeep?e.mergeDeep.apply(e,n):n[n.length-1]}))},We.prototype.sort=function(e){return zt(pn(this,e))},We.prototype.sortBy=function(e,t){return zt(pn(this,t,e))},We.prototype.withMutations=function(e){var t=this.asMutable();return e(t),t.wasAltered()?t.__ensureOwner(this.__ownerID):this},We.prototype.asMutable=function(){return this.__ownerID?this:this.__ensureOwner(new S)},We.prototype.asImmutable=function(){return this.__ensureOwner()},We.prototype.wasAltered=function(){return this.__altered},We.prototype.__iterator=function(e,t){return new et(this,e,t)},We.prototype.__iterate=function(e,t){var n=this,r=0;return this._root&&this._root.iterate((function(t){return r++,e(t[1],t[0],n)}),t),r},We.prototype.__ensureOwner=function(e){return e===this.__ownerID?this:e?rt(this.size,this._root,e,this.__hash):(this.__ownerID=e,this.__altered=!1,this)},We.isMap=He;var Je,$e="@@__IMMUTABLE_MAP__@@",Ke=We.prototype;function Ye(e,t){this.ownerID=e,this.entries=t}function Ge(e,t,n){this.ownerID=e,this.bitmap=t,this.nodes=n}function Ze(e,t,n){this.ownerID=e,this.count=t,this.nodes=n}function Xe(e,t,n){this.ownerID=e,this.keyHash=t,this.entries=n}function Qe(e,t,n){this.ownerID=e,this.keyHash=t,this.entry=n}function et(e,t,n){this._type=t,this._reverse=n,this._stack=e._root&&nt(e._root)}function tt(e,t){return U(e,t[0],t[1])}function nt(e,t){return{node:e,index:0,__prev:t}}function rt(e,t,n,r){var o=Object.create(Ke);return o.size=e,o._root=t,o.__ownerID=n,o.__hash=r,o.__altered=!1,o}function ot(){return Je||(Je=rt(0))}function at(e,t,n){var r,o;if(e._root){var a=x(_),i=x(w);if(r=it(e._root,e.__ownerID,0,void 0,t,n,a,i),!i.value)return e;o=e.size+(a.value?n===b?-1:1:0)}else{if(n===b)return e;o=1,r=new Ye(e.__ownerID,[[t,n]])}return e.__ownerID?(e.size=o,e._root=r,e.__hash=void 0,e.__altered=!0,e):r?rt(o,r):ot()}function it(e,t,n,r,o,a,i,s){return e?e.update(t,n,r,o,a,i,s):a===b?e:(E(s),E(i),new Qe(t,r,[o,a]))}function st(e){return e.constructor===Qe||e.constructor===Xe}function ut(e,t,n,r,o){if(e.keyHash===r)return new Xe(t,r,[e.entry,o]);var a,i=(0===n?e.keyHash:e.keyHash>>>n)&y,s=(0===n?r:r>>>n)&y;return new Ge(t,1<>>=1)i[s]=1&n?t[a++]:void 0;return i[r]=o,new Ze(e,a+1,i)}function ft(e,t,n){for(var o=[],a=0;a>1&1431655765))+(e>>2&858993459))+(e>>4)&252645135,e+=e>>8,127&(e+=e>>16)}function yt(e,t,n,r){var o=r?e:C(e);return o[t]=n,o}function bt(e,t,n,r){var o=e.length+1;if(r&&t+1===o)return e[t]=n,e;for(var a=new Array(o),i=0,s=0;s=wt)return ct(e,u,r,o);var f=e&&e===this.ownerID,h=f?u:C(u);return p?s?c===l-1?h.pop():h[c]=h.pop():h[c]=[r,o]:h.push([r,o]),f?(this.entries=h,this):new Ye(e,h)}},Ge.prototype.get=function(e,t,n,r){void 0===t&&(t=Oe(n));var o=1<<((0===e?t:t>>>e)&y),a=this.bitmap;return 0==(a&o)?r:this.nodes[gt(a&o-1)].get(e+v,t,n,r)},Ge.prototype.update=function(e,t,n,r,o,a,i){void 0===n&&(n=Oe(r));var s=(0===t?n:n>>>t)&y,u=1<=xt)return pt(e,f,c,s,d);if(l&&!d&&2===f.length&&st(f[1^p]))return f[1^p];if(l&&d&&1===f.length&&st(d))return d;var m=e&&e===this.ownerID,g=l?d?c:c^u:c|u,_=l?d?yt(f,p,d,m):_t(f,p,m):bt(f,p,d,m);return m?(this.bitmap=g,this.nodes=_,this):new Ge(e,g,_)},Ze.prototype.get=function(e,t,n,r){void 0===t&&(t=Oe(n));var o=(0===e?t:t>>>e)&y,a=this.nodes[o];return a?a.get(e+v,t,n,r):r},Ze.prototype.update=function(e,t,n,r,o,a,i){void 0===n&&(n=Oe(r));var s=(0===t?n:n>>>t)&y,u=o===b,c=this.nodes,l=c[s];if(u&&!l)return this;var p=it(l,e,t+v,n,r,o,a,i);if(p===l)return this;var f=this.count;if(l){if(!p&&--f0&&r=0&&e>>t&y;if(r>=this.array.length)return new Ot([],e);var o,a=0===r;if(t>0){var i=this.array[r];if((o=i&&i.removeBefore(e,t-v,n))===i&&a)return this}if(a&&!o)return this;var s=Lt(this,e);if(!a)for(var u=0;u>>t&y;if(o>=this.array.length)return this;if(t>0){var a=this.array[o];if((r=a&&a.removeAfter(e,t-v,n))===a&&o===this.array.length-1)return this}var i=Lt(this,e);return i.array.splice(o+1),r&&(i.array[o]=r),i};var jt,Tt,It={};function Pt(e,t){var n=e._origin,r=e._capacity,o=qt(r),a=e._tail;return i(e._root,e._level,0);function i(e,t,n){return 0===t?s(e,n):u(e,t,n)}function s(e,i){var s=i===o?a&&a.array:e&&e.array,u=i>n?0:n-i,c=r-i;return c>g&&(c=g),function(){if(u===c)return It;var e=t?--c:u++;return s&&s[e]}}function u(e,o,a){var s,u=e&&e.array,c=a>n?0:n-a>>o,l=1+(r-a>>o);return l>g&&(l=g),function(){for(;;){if(s){var e=s();if(e!==It)return e;s=null}if(c===l)return It;var n=t?--l:c++;s=i(u&&u[n],o-v,a+(n<=e.size||t<0)return e.withMutations((function(e){t<0?Ft(e,t).set(0,n):Ft(e,0,t+1).set(t,n)}));t+=e._origin;var r=e._tail,o=e._root,a=x(w);return t>=qt(e._capacity)?r=Dt(r,e.__ownerID,0,t,n,a):o=Dt(o,e.__ownerID,e._level,t,n,a),a.value?e.__ownerID?(e._root=o,e._tail=r,e.__hash=void 0,e.__altered=!0,e):Nt(e._origin,e._capacity,e._level,o,r):e}function Dt(e,t,n,r,o,a){var i,s=r>>>n&y,u=e&&s0){var c=e&&e.array[s],l=Dt(c,t,n-v,r,o,a);return l===c?e:((i=Lt(e,t)).array[s]=l,i)}return u&&e.array[s]===o?e:(E(a),i=Lt(e,t),void 0===o&&s===i.array.length-1?i.array.pop():i.array[s]=o,i)}function Lt(e,t){return t&&e&&t===e.ownerID?e:new Ot(e?e.array.slice():[],t)}function Bt(e,t){if(t>=qt(e._capacity))return e._tail;if(t<1<0;)n=n.array[t>>>r&y],r-=v;return n}}function Ft(e,t,n){void 0!==t&&(t|=0),void 0!==n&&(n|=0);var r=e.__ownerID||new S,o=e._origin,a=e._capacity,i=o+t,s=void 0===n?a:n<0?a+n:o+n;if(i===o&&s===a)return e;if(i>=s)return e.clear();for(var u=e._level,c=e._root,l=0;i+l<0;)c=new Ot(c&&c.array.length?[void 0,c]:[],r),l+=1<<(u+=v);l&&(i+=l,o+=l,s+=l,a+=l);for(var p=qt(a),f=qt(s);f>=1<p?new Ot([],r):h;if(h&&f>p&&iv;g-=v){var b=p>>>g&y;m=m.array[b]=Lt(m.array[b],r)}m.array[p>>>v&y]=h}if(s=f)i-=f,s-=f,u=v,c=null,d=d&&d.removeBefore(r,0,i);else if(i>o||f>>u&y;if(_!==f>>>u&y)break;_&&(l+=(1<o&&(c=c.removeBefore(r,u,i-l)),c&&fa&&(a=c.size),i(u)||(c=c.map((function(e){return he(e)}))),r.push(c)}return a>e.size&&(e=e.setSize(a)),mt(e,t,r)}function qt(e){return e>>v<=g&&i.size>=2*a.size?(r=(o=i.filter((function(e,t){return void 0!==e&&s!==t}))).toKeyedSeq().map((function(e){return e[0]})).flip().toMap(),e.__ownerID&&(r.__ownerID=o.__ownerID=e.__ownerID)):(r=a.remove(t),o=s===i.size-1?i.pop():i.set(s,void 0))}else if(u){if(n===i.get(s)[1])return e;r=a,o=i.set(s,[t,n])}else r=a.set(t,i.size),o=i.set(i.size,[t,n]);return e.__ownerID?(e.size=r.size,e._map=r,e._list=o,e.__hash=void 0,e):Wt(r,o)}function $t(e,t){this._iter=e,this._useKeys=t,this.size=e.size}function Kt(e){this._iter=e,this.size=e.size}function Yt(e){this._iter=e,this.size=e.size}function Gt(e){this._iter=e,this.size=e.size}function Zt(e){var t=bn(e);return t._iter=e,t.size=e.size,t.flip=function(){return e},t.reverse=function(){var t=e.reverse.apply(this);return t.flip=function(){return e.reverse()},t},t.has=function(t){return e.includes(t)},t.includes=function(t){return e.has(t)},t.cacheResult=_n,t.__iterateUncached=function(t,n){var r=this;return e.__iterate((function(e,n){return!1!==t(n,e,r)}),n)},t.__iteratorUncached=function(t,n){if(t===R){var r=e.__iterator(t,n);return new F((function(){var e=r.next();if(!e.done){var t=e.value[0];e.value[0]=e.value[1],e.value[1]=t}return e}))}return e.__iterator(t===M?N:M,n)},t}function Xt(e,t,n){var r=bn(e);return r.size=e.size,r.has=function(t){return e.has(t)},r.get=function(r,o){var a=e.get(r,b);return a===b?o:t.call(n,a,r,e)},r.__iterateUncached=function(r,o){var a=this;return e.__iterate((function(e,o,i){return!1!==r(t.call(n,e,o,i),o,a)}),o)},r.__iteratorUncached=function(r,o){var a=e.__iterator(R,o);return new F((function(){var o=a.next();if(o.done)return o;var i=o.value,s=i[0];return U(r,s,t.call(n,i[1],s,e),o)}))},r}function Qt(e,t){var n=bn(e);return n._iter=e,n.size=e.size,n.reverse=function(){return e},e.flip&&(n.flip=function(){var t=Zt(e);return t.reverse=function(){return e.flip()},t}),n.get=function(n,r){return e.get(t?n:-1-n,r)},n.has=function(n){return e.has(t?n:-1-n)},n.includes=function(t){return e.includes(t)},n.cacheResult=_n,n.__iterate=function(t,n){var r=this;return e.__iterate((function(e,n){return t(e,n,r)}),!n)},n.__iterator=function(t,n){return e.__iterator(t,!n)},n}function en(e,t,n,r){var o=bn(e);return r&&(o.has=function(r){var o=e.get(r,b);return o!==b&&!!t.call(n,o,r,e)},o.get=function(r,o){var a=e.get(r,b);return a!==b&&t.call(n,a,r,e)?a:o}),o.__iterateUncached=function(o,a){var i=this,s=0;return e.__iterate((function(e,a,u){if(t.call(n,e,a,u))return s++,o(e,r?a:s-1,i)}),a),s},o.__iteratorUncached=function(o,a){var i=e.__iterator(R,a),s=0;return new F((function(){for(;;){var a=i.next();if(a.done)return a;var u=a.value,c=u[0],l=u[1];if(t.call(n,l,c,e))return U(o,r?c:s++,l,a)}}))},o}function tn(e,t,n){var r=We().asMutable();return e.__iterate((function(o,a){r.update(t.call(n,o,a,e),0,(function(e){return e+1}))})),r.asImmutable()}function nn(e,t,n){var r=s(e),o=(l(e)?zt():We()).asMutable();e.__iterate((function(a,i){o.update(t.call(n,a,i,e),(function(e){return(e=e||[]).push(r?[i,a]:a),e}))}));var a=yn(e);return o.map((function(t){return mn(e,a(t))}))}function rn(e,t,n,r){var o=e.size;if(void 0!==t&&(t|=0),void 0!==n&&(n===1/0?n=o:n|=0),j(t,n,o))return e;var a=T(t,o),i=I(n,o);if(a!=a||i!=i)return rn(e.toSeq().cacheResult(),t,n,r);var s,u=i-a;u==u&&(s=u<0?0:u);var c=bn(e);return c.size=0===s?s:e.size&&s||void 0,!r&&ae(e)&&s>=0&&(c.get=function(t,n){return(t=k(this,t))>=0&&ts)return q();var e=o.next();return r||t===M?e:U(t,u-1,t===N?void 0:e.value[1],e)}))},c}function on(e,t,n){var r=bn(e);return r.__iterateUncached=function(r,o){var a=this;if(o)return this.cacheResult().__iterate(r,o);var i=0;return e.__iterate((function(e,o,s){return t.call(n,e,o,s)&&++i&&r(e,o,a)})),i},r.__iteratorUncached=function(r,o){var a=this;if(o)return this.cacheResult().__iterator(r,o);var i=e.__iterator(R,o),s=!0;return new F((function(){if(!s)return q();var e=i.next();if(e.done)return e;var o=e.value,u=o[0],c=o[1];return t.call(n,c,u,a)?r===R?e:U(r,u,c,e):(s=!1,q())}))},r}function an(e,t,n,r){var o=bn(e);return o.__iterateUncached=function(o,a){var i=this;if(a)return this.cacheResult().__iterate(o,a);var s=!0,u=0;return e.__iterate((function(e,a,c){if(!s||!(s=t.call(n,e,a,c)))return u++,o(e,r?a:u-1,i)})),u},o.__iteratorUncached=function(o,a){var i=this;if(a)return this.cacheResult().__iterator(o,a);var s=e.__iterator(R,a),u=!0,c=0;return new F((function(){var e,a,l;do{if((e=s.next()).done)return r||o===M?e:U(o,c++,o===N?void 0:e.value[1],e);var p=e.value;a=p[0],l=p[1],u&&(u=t.call(n,l,a,i))}while(u);return o===R?e:U(o,a,l,e)}))},o}function sn(e,t){var n=s(e),o=[e].concat(t).map((function(e){return i(e)?n&&(e=r(e)):e=n?se(e):ue(Array.isArray(e)?e:[e]),e})).filter((function(e){return 0!==e.size}));if(0===o.length)return e;if(1===o.length){var a=o[0];if(a===e||n&&s(a)||u(e)&&u(a))return a}var c=new te(o);return n?c=c.toKeyedSeq():u(e)||(c=c.toSetSeq()),(c=c.flatten(!0)).size=o.reduce((function(e,t){if(void 0!==e){var n=t.size;if(void 0!==n)return e+n}}),0),c}function un(e,t,n){var r=bn(e);return r.__iterateUncached=function(r,o){var a=0,s=!1;function u(e,c){var l=this;e.__iterate((function(e,o){return(!t||c0}function dn(e,t,r){var o=bn(e);return o.size=new te(r).map((function(e){return e.size})).min(),o.__iterate=function(e,t){for(var n,r=this.__iterator(M,t),o=0;!(n=r.next()).done&&!1!==e(n.value,o++,this););return o},o.__iteratorUncached=function(e,o){var a=r.map((function(e){return e=n(e),W(o?e.reverse():e)})),i=0,s=!1;return new F((function(){var n;return s||(n=a.map((function(e){return e.next()})),s=n.some((function(e){return e.done}))),s?q():U(e,i++,t.apply(null,n.map((function(e){return e.value}))))}))},o}function mn(e,t){return ae(e)?t:e.constructor(t)}function vn(e){if(e!==Object(e))throw new TypeError("Expected [K, V] tuple: "+e)}function gn(e){return Ve(e.size),A(e)}function yn(e){return s(e)?r:u(e)?o:a}function bn(e){return Object.create((s(e)?K:u(e)?Y:G).prototype)}function _n(){return this._iter.cacheResult?(this._iter.cacheResult(),this.size=this._iter.size,this):$.prototype.cacheResult.call(this)}function wn(e,t){return e>t?1:e=0;n--)t={value:arguments[n],next:t};return this.__ownerID?(this.size=e,this._head=t,this.__hash=void 0,this.__altered=!0,this):Kn(e,t)},Vn.prototype.pushAll=function(e){if(0===(e=o(e)).size)return this;Ve(e.size);var t=this.size,n=this._head;return e.reverse().forEach((function(e){t++,n={value:e,next:n}})),this.__ownerID?(this.size=t,this._head=n,this.__hash=void 0,this.__altered=!0,this):Kn(t,n)},Vn.prototype.pop=function(){return this.slice(1)},Vn.prototype.unshift=function(){return this.push.apply(this,arguments)},Vn.prototype.unshiftAll=function(e){return this.pushAll(e)},Vn.prototype.shift=function(){return this.pop.apply(this,arguments)},Vn.prototype.clear=function(){return 0===this.size?this:this.__ownerID?(this.size=0,this._head=void 0,this.__hash=void 0,this.__altered=!0,this):Yn()},Vn.prototype.slice=function(e,t){if(j(e,t,this.size))return this;var n=T(e,this.size);if(I(t,this.size)!==this.size)return Se.prototype.slice.call(this,e,t);for(var r=this.size-n,o=this._head;n--;)o=o.next;return this.__ownerID?(this.size=r,this._head=o,this.__hash=void 0,this.__altered=!0,this):Kn(r,o)},Vn.prototype.__ensureOwner=function(e){return e===this.__ownerID?this:e?Kn(this.size,this._head,e,this.__hash):(this.__ownerID=e,this.__altered=!1,this)},Vn.prototype.__iterate=function(e,t){if(t)return this.reverse().__iterate(e);for(var n=0,r=this._head;r&&!1!==e(r.value,n++,this);)r=r.next;return n},Vn.prototype.__iterator=function(e,t){if(t)return this.reverse().__iterator(e);var n=0,r=this._head;return new F((function(){if(r){var t=r.value;return r=r.next,U(e,n++,t)}return q()}))},Vn.isStack=Wn;var Hn,Jn="@@__IMMUTABLE_STACK__@@",$n=Vn.prototype;function Kn(e,t,n,r){var o=Object.create($n);return o.size=e,o._head=t,o.__ownerID=n,o.__hash=r,o.__altered=!1,o}function Yn(){return Hn||(Hn=Kn(0))}function Gn(e,t){var n=function(n){e.prototype[n]=t[n]};return Object.keys(t).forEach(n),Object.getOwnPropertySymbols&&Object.getOwnPropertySymbols(t).forEach(n),e}$n[Jn]=!0,$n.withMutations=Ke.withMutations,$n.asMutable=Ke.asMutable,$n.asImmutable=Ke.asImmutable,$n.wasAltered=Ke.wasAltered,n.Iterator=F,Gn(n,{toArray:function(){Ve(this.size);var e=new Array(this.size||0);return this.valueSeq().__iterate((function(t,n){e[n]=t})),e},toIndexedSeq:function(){return new Kt(this)},toJS:function(){return this.toSeq().map((function(e){return e&&"function"==typeof e.toJS?e.toJS():e})).__toJS()},toJSON:function(){return this.toSeq().map((function(e){return e&&"function"==typeof e.toJSON?e.toJSON():e})).__toJS()},toKeyedSeq:function(){return new $t(this,!0)},toMap:function(){return We(this.toKeyedSeq())},toObject:function(){Ve(this.size);var e={};return this.__iterate((function(t,n){e[n]=t})),e},toOrderedMap:function(){return zt(this.toKeyedSeq())},toOrderedSet:function(){return Ln(s(this)?this.valueSeq():this)},toSet:function(){return jn(s(this)?this.valueSeq():this)},toSetSeq:function(){return new Yt(this)},toSeq:function(){return u(this)?this.toIndexedSeq():s(this)?this.toKeyedSeq():this.toSetSeq()},toStack:function(){return Vn(s(this)?this.valueSeq():this)},toList:function(){return St(s(this)?this.valueSeq():this)},toString:function(){return"[Iterable]"},__toString:function(e,t){return 0===this.size?e+t:e+" "+this.toSeq().map(this.__toStringMapper).join(", ")+" "+t},concat:function(){return mn(this,sn(this,e.call(arguments,0)))},includes:function(e){return this.some((function(t){return ge(t,e)}))},entries:function(){return this.__iterator(R)},every:function(e,t){Ve(this.size);var n=!0;return this.__iterate((function(r,o,a){if(!e.call(t,r,o,a))return n=!1,!1})),n},filter:function(e,t){return mn(this,en(this,e,t,!0))},find:function(e,t,n){var r=this.findEntry(e,t);return r?r[1]:n},forEach:function(e,t){return Ve(this.size),this.__iterate(t?e.bind(t):e)},join:function(e){Ve(this.size),e=void 0!==e?""+e:",";var t="",n=!0;return this.__iterate((function(r){n?n=!1:t+=e,t+=null!=r?r.toString():""})),t},keys:function(){return this.__iterator(N)},map:function(e,t){return mn(this,Xt(this,e,t))},reduce:function(e,t,n){var r,o;return Ve(this.size),arguments.length<2?o=!0:r=t,this.__iterate((function(t,a,i){o?(o=!1,r=t):r=e.call(n,r,t,a,i)})),r},reduceRight:function(e,t,n){var r=this.toKeyedSeq().reverse();return r.reduce.apply(r,arguments)},reverse:function(){return mn(this,Qt(this,!0))},slice:function(e,t){return mn(this,rn(this,e,t,!0))},some:function(e,t){return!this.every(tr(e),t)},sort:function(e){return mn(this,pn(this,e))},values:function(){return this.__iterator(M)},butLast:function(){return this.slice(0,-1)},isEmpty:function(){return void 0!==this.size?0===this.size:!this.some((function(){return!0}))},count:function(e,t){return A(e?this.toSeq().filter(e,t):this)},countBy:function(e,t){return tn(this,e,t)},equals:function(e){return ye(this,e)},entrySeq:function(){var e=this;if(e._cache)return new te(e._cache);var t=e.toSeq().map(er).toIndexedSeq();return t.fromEntrySeq=function(){return e.toSeq()},t},filterNot:function(e,t){return this.filter(tr(e),t)},findEntry:function(e,t,n){var r=n;return this.__iterate((function(n,o,a){if(e.call(t,n,o,a))return r=[o,n],!1})),r},findKey:function(e,t){var n=this.findEntry(e,t);return n&&n[0]},findLast:function(e,t,n){return this.toKeyedSeq().reverse().find(e,t,n)},findLastEntry:function(e,t,n){return this.toKeyedSeq().reverse().findEntry(e,t,n)},findLastKey:function(e,t){return this.toKeyedSeq().reverse().findKey(e,t)},first:function(){return this.find(O)},flatMap:function(e,t){return mn(this,cn(this,e,t))},flatten:function(e){return mn(this,un(this,e,!0))},fromEntrySeq:function(){return new Gt(this)},get:function(e,t){return this.find((function(t,n){return ge(n,e)}),void 0,t)},getIn:function(e,t){for(var n,r=this,o=xn(e);!(n=o.next()).done;){var a=n.value;if((r=r&&r.get?r.get(a,b):b)===b)return t}return r},groupBy:function(e,t){return nn(this,e,t)},has:function(e){return this.get(e,b)!==b},hasIn:function(e){return this.getIn(e,b)!==b},isSubset:function(e){return e="function"==typeof e.includes?e:n(e),this.every((function(t){return e.includes(t)}))},isSuperset:function(e){return(e="function"==typeof e.isSubset?e:n(e)).isSubset(this)},keyOf:function(e){return this.findKey((function(t){return ge(t,e)}))},keySeq:function(){return this.toSeq().map(Qn).toIndexedSeq()},last:function(){return this.toSeq().reverse().first()},lastKeyOf:function(e){return this.toKeyedSeq().reverse().keyOf(e)},max:function(e){return fn(this,e)},maxBy:function(e,t){return fn(this,t,e)},min:function(e){return fn(this,e?nr(e):ar)},minBy:function(e,t){return fn(this,t?nr(t):ar,e)},rest:function(){return this.slice(1)},skip:function(e){return this.slice(Math.max(0,e))},skipLast:function(e){return mn(this,this.toSeq().reverse().skip(e).reverse())},skipWhile:function(e,t){return mn(this,an(this,e,t,!0))},skipUntil:function(e,t){return this.skipWhile(tr(e),t)},sortBy:function(e,t){return mn(this,pn(this,t,e))},take:function(e){return this.slice(0,Math.max(0,e))},takeLast:function(e){return mn(this,this.toSeq().reverse().take(e).reverse())},takeWhile:function(e,t){return mn(this,on(this,e,t))},takeUntil:function(e,t){return this.takeWhile(tr(e),t)},valueSeq:function(){return this.toIndexedSeq()},hashCode:function(){return this.__hash||(this.__hash=ir(this))}});var Zn=n.prototype;Zn[p]=!0,Zn[B]=Zn.values,Zn.__toJS=Zn.toArray,Zn.__toStringMapper=rr,Zn.inspect=Zn.toSource=function(){return this.toString()},Zn.chain=Zn.flatMap,Zn.contains=Zn.includes,Gn(r,{flip:function(){return mn(this,Zt(this))},mapEntries:function(e,t){var n=this,r=0;return mn(this,this.toSeq().map((function(o,a){return e.call(t,[a,o],r++,n)})).fromEntrySeq())},mapKeys:function(e,t){var n=this;return mn(this,this.toSeq().flip().map((function(r,o){return e.call(t,r,o,n)})).flip())}});var Xn=r.prototype;function Qn(e,t){return t}function er(e,t){return[t,e]}function tr(e){return function(){return!e.apply(this,arguments)}}function nr(e){return function(){return-e.apply(this,arguments)}}function rr(e){return"string"==typeof e?JSON.stringify(e):String(e)}function or(){return C(arguments)}function ar(e,t){return et?-1:0}function ir(e){if(e.size===1/0)return 0;var t=l(e),n=s(e),r=t?1:0;return sr(e.__iterate(n?t?function(e,t){r=31*r+ur(Oe(e),Oe(t))|0}:function(e,t){r=r+ur(Oe(e),Oe(t))|0}:t?function(e){r=31*r+Oe(e)|0}:function(e){r=r+Oe(e)|0}),r)}function sr(e,t){return t=Ae(t,3432918353),t=Ae(t<<15|t>>>-15,461845907),t=Ae(t<<13|t>>>-13,5),t=Ae((t=(t+3864292196|0)^e)^t>>>16,2246822507),t=ke((t=Ae(t^t>>>13,3266489909))^t>>>16)}function ur(e,t){return e^t+2654435769+(e<<6)+(e>>2)|0}return Xn[f]=!0,Xn[B]=Zn.entries,Xn.__toJS=Zn.toObject,Xn.__toStringMapper=function(e,t){return JSON.stringify(t)+": "+rr(e)},Gn(o,{toKeyedSeq:function(){return new $t(this,!1)},filter:function(e,t){return mn(this,en(this,e,t,!1))},findIndex:function(e,t){var n=this.findEntry(e,t);return n?n[0]:-1},indexOf:function(e){var t=this.keyOf(e);return void 0===t?-1:t},lastIndexOf:function(e){var t=this.lastKeyOf(e);return void 0===t?-1:t},reverse:function(){return mn(this,Qt(this,!1))},slice:function(e,t){return mn(this,rn(this,e,t,!1))},splice:function(e,t){var n=arguments.length;if(t=Math.max(0|t,0),0===n||2===n&&!t)return this;e=T(e,e<0?this.count():this.size);var r=this.slice(0,e);return mn(this,1===n?r:r.concat(C(arguments,2),this.slice(e+t)))},findLastIndex:function(e,t){var n=this.findLastEntry(e,t);return n?n[0]:-1},first:function(){return this.get(0)},flatten:function(e){return mn(this,un(this,e,!1))},get:function(e,t){return(e=k(this,e))<0||this.size===1/0||void 0!==this.size&&e>this.size?t:this.find((function(t,n){return n===e}),void 0,t)},has:function(e){return(e=k(this,e))>=0&&(void 0!==this.size?this.size===1/0||e1)try{return decodeURIComponent(t[1])}catch(e){console.error(e)}return null}function Me(e){return t=e.replace(/\.[^./]*$/,""),Y()($()(t));var t}function Re(e,t,n,r,o){if(!t)return[];var a=[],i=t.get("nullable"),s=t.get("required"),u=t.get("maximum"),c=t.get("minimum"),l=t.get("type"),p=t.get("format"),f=t.get("maxLength"),h=t.get("minLength"),m=t.get("uniqueItems"),g=t.get("maxItems"),y=t.get("minItems"),b=t.get("pattern");if(i&&null===e)return[];if(l&&(n||s||void 0!==e||"array"===l)){var _="string"===l&&e,w="array"===l&&U()(e)&&e.length,x="array"===l&&W.a.List.isList(e)&&e.count(),E=[_,w,x,"array"===l&&"string"==typeof e&&e,"file"===l&&e instanceof se.a.File,"boolean"===l&&(e||!1===e),"number"===l&&(e||0===e),"integer"===l&&(e||0===e),"object"===l&&"object"===z()(e)&&null!==e,"object"===l&&"string"==typeof e&&e],S=v()(E).call(E,(function(e){return!!e}));if((n||s)&&!S&&!r)return a.push("Required field is not provided"),a;if("object"===l&&(null===o||"application/json"===o)){var C,A=e;if("string"==typeof e)try{A=JSON.parse(e)}catch(e){return a.push("Parameter string value must be valid JSON"),a}if(t&&t.has("required")&&Ce(s.isList)&&s.isList()&&T()(s).call(s,(function(e){void 0===A[e]&&a.push({propKey:e,error:"Required property not found"})})),t&&t.has("properties"))T()(C=t.get("properties")).call(C,(function(e,t){var n=Re(A[t],e,!1,r,o);a.push.apply(a,d()(B()(n).call(n,(function(e){return{propKey:t,error:e}}))))}))}if(b){var k=function(e,t){if(!new RegExp(t).test(e))return"Value must follow pattern "+t}(e,b);k&&a.push(k)}if(y&&"array"===l){var j=function(e,t){var n;if(!e&&t>=1||e&&e.lengtht)return P()(n="Array must not contain more then ".concat(t," item")).call(n,1===t?"":"s")}(e,g);I&&a.push({needRemove:!0,error:I})}if(m&&"array"===l){var N=function(e,t){if(e&&("true"===t||!0===t)){var n=Object(V.fromJS)(e),r=n.toSet();if(e.length>r.size){var o=Object(V.Set)();if(T()(n).call(n,(function(e,t){O()(n).call(n,(function(t){return Ce(t.equals)?t.equals(e):t===e})).size>1&&(o=o.add(t))})),0!==o.size)return B()(o).call(o,(function(e){return{index:e,error:"No duplicates allowed."}})).toArray()}}}(e,m);N&&a.push.apply(a,d()(N))}if(f||0===f){var M=function(e,t){var n;if(e.length>t)return P()(n="Value must be no longer than ".concat(t," character")).call(n,1!==t?"s":"")}(e,f);M&&a.push(M)}if(h){var R=function(e,t){var n;if(e.lengtht)return"Value must be less than ".concat(t)}(e,u);D&&a.push(D)}if(c||0===c){var L=function(e,t){if(e2&&void 0!==arguments[2]?arguments[2]:{},r=n.isOAS3,o=void 0!==r&&r,a=n.bypassRequiredCheck,i=void 0!==a&&a,s=e.get("required"),u=Object(le.a)(e,{isOAS3:o}),c=u.schema,l=u.parameterContentMediaType;return Re(t,c,s,i,l)},Le=function(e,t,n){if(e&&(!e.xml||!e.xml.name)){if(e.xml=e.xml||{},!e.$$ref)return e.type||e.items||e.properties||e.additionalProperties?'\n\x3c!-- XML example cannot be generated; root element name is undefined --\x3e':null;var r=e.$$ref.match(/\S*\/(\S+)$/);e.xml.name=r[1]}return Object(ie.memoizedCreateXMLExample)(e,t,n)},Be=[{when:/json/,shouldStringifyTypes:["string"]}],Fe=["object"],Ue=function(e,t,n,r){var o=Object(ie.memoizedSampleFromSchema)(e,t,r),a=z()(o),i=S()(Be).call(Be,(function(e,t){var r;return t.when.test(n)?P()(r=[]).call(r,d()(e),d()(t.shouldStringifyTypes)):e}),Fe);return te()(i,(function(e){return e===a}))?f()(o,null,2):o},qe=function(e,t,n,r){var o,a=Ue(e,t,n,r);try{"\n"===(o=ve.a.safeDump(ve.a.safeLoad(a),{lineWidth:-1}))[o.length-1]&&(o=y()(o).call(o,0,o.length-1))}catch(e){return console.error(e),"error: could not generate yaml example"}return o.replace(/\t/g," ")},ze=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:void 0;return e&&Ce(e.toJS)&&(e=e.toJS()),r&&Ce(r.toJS)&&(r=r.toJS()),/xml/.test(t)?Le(e,n,r):/(yaml|yml)/.test(t)?qe(e,n,t,r):Ue(e,n,t,r)},Ve=function(){var e={},t=se.a.location.search;if(!t)return{};if(""!=t){var n=t.substr(1).split("&");for(var r in n)n.hasOwnProperty(r)&&(r=n[r].split("="),e[decodeURIComponent(r[0])]=r[1]&&decodeURIComponent(r[1])||"")}return e},We=function(t){return(t instanceof e?t:e.from(t.toString(),"utf-8")).toString("base64")},He={operationsSorter:{alpha:function(e,t){return e.get("path").localeCompare(t.get("path"))},method:function(e,t){return e.get("method").localeCompare(t.get("method"))}},tagsSorter:{alpha:function(e,t){return e.localeCompare(t)}}},Je=function(e){var t=[];for(var n in e){var r=e[n];void 0!==r&&""!==r&&t.push([n,"=",encodeURIComponent(r).replace(/%20/g,"+")].join(""))}return t.join("&")},$e=function(e,t,n){return!!Q()(n,(function(n){return re()(e[n],t[n])}))};function Ke(e){return"string"!=typeof e||""===e?"":Object(H.sanitizeUrl)(e)}function Ye(e){return!(!e||l()(e).call(e,"localhost")>=0||l()(e).call(e,"127.0.0.1")>=0||"none"===e)}function Ge(e){if(!W.a.OrderedMap.isOrderedMap(e))return null;if(!e.size)return null;var t=u()(e).call(e,(function(e,t){return i()(t).call(t,"2")&&A()(e.get("content")||{}).length>0})),n=e.get("default")||W.a.OrderedMap(),r=(n.get("content")||W.a.OrderedMap()).keySeq().toJS().length?n:null;return t||r}var Ze=function(e){return"string"==typeof e||e instanceof String?o()(e).call(e).replace(/\s/g,"%20"):""},Xe=function(e){return ce()(Ze(e).replace(/%20/g,"_"))},Qe=function(e){return O()(e).call(e,(function(e,t){return/^x-/.test(t)}))},et=function(e){return O()(e).call(e,(function(e,t){return/^pattern|maxLength|minLength|maximum|minimum/.test(t)}))};function tt(e,t){var n,r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:function(){return!0};if("object"!==z()(e)||U()(e)||null===e||!t)return e;var o=x()({},e);return T()(n=A()(o)).call(n,(function(e){e===t&&r(o[e],e)?delete o[e]:o[e]=tt(o[e],t,r)})),o}function nt(e){if("string"==typeof e)return e;if(e&&e.toJS&&(e=e.toJS()),"object"===z()(e)&&null!==e)try{return f()(e,null,2)}catch(t){return String(e)}return null==e?"":e.toString()}function rt(e){return"number"==typeof e?e.toString():e}function ot(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.returnAll,r=void 0!==n&&n,o=t.allowHashes,a=void 0===o||o;if(!W.a.Map.isMap(e))throw new Error("paramToIdentifier: received a non-Im.Map parameter as input");var i,s,u,c=e.get("name"),l=e.get("in"),p=[];e&&e.hashCode&&l&&c&&a&&p.push(P()(i=P()(s="".concat(l,".")).call(s,c,".hash-")).call(i,e.hashCode()));l&&c&&p.push(P()(u="".concat(l,".")).call(u,c));return p.push(c),r?p:p[0]||""}function at(e,t){var n,r=ot(e,{returnAll:!0});return O()(n=B()(r).call(r,(function(e){return t[e]}))).call(n,(function(e){return void 0!==e}))[0]}function it(){return ut(fe()(32).toString("base64"))}function st(e){return ut(de()("sha256").update(e).digest("base64"))}function ut(e){return e.replace(/\+/g,"-").replace(/\//g,"_").replace(/=/g,"")}var ct=function(e){return!e||!(!ye(e)||!e.isEmpty())}}).call(this,n(77).Buffer)},function(e,t){e.exports=function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}},function(e,t,n){var r=n(244);function o(e,t){for(var n=0;n1?t-1:0),r=1;r1&&void 0!==arguments[1]?arguments[1]:r,n=null,a=null;return function(){return o(t,n,arguments)||(a=e.apply(null,arguments)),n=arguments,a}}))},function(e,t,n){e.exports=n(598)},function(e,t,n){e.exports=n(610)},function(e,t,n){e.exports=n(607)},function(e,t,n){"use strict";var r=n(42),o=n(107).f,a=n(358),i=n(33),s=n(109),u=n(70),c=n(52),l=function(e){var t=function(t,n,r){if(this instanceof e){switch(arguments.length){case 0:return new e;case 1:return new e(t);case 2:return new e(t,n)}return new e(t,n,r)}return e.apply(this,arguments)};return t.prototype=e.prototype,t};e.exports=function(e,t){var n,p,f,h,d,m,v,g,y=e.target,b=e.global,_=e.stat,w=e.proto,x=b?r:_?r[y]:(r[y]||{}).prototype,E=b?i:i[y]||(i[y]={}),S=E.prototype;for(f in t)n=!a(b?f:y+(_?".":"#")+f,e.forced)&&x&&c(x,f),d=E[f],n&&(m=e.noTargetGet?(g=o(x,f))&&g.value:x[f]),h=n&&m?m:t[f],n&&typeof d==typeof h||(v=e.bind&&n?s(h,r):e.wrap&&n?l(h):w&&"function"==typeof h?s(Function.call,h):h,(e.sham||h&&h.sham||d&&d.sham)&&u(v,"sham",!0),E[f]=v,w&&(c(i,p=y+"Prototype")||u(i,p,{}),i[p][f]=h,e.real&&S&&!S[f]&&u(S,f,h)))}},function(e,t,n){var r=n(244),o=n(874),a=n(878),i=n(883),s=n(450),u=n(888),c=n(451),l=n(452),p=n(3);function f(e,t){var n=l(e);if(c){var r=c(e);t&&(r=u(r).call(r,(function(t){return s(e,t).enumerable}))),n.push.apply(n,r)}return n}e.exports=function(e){for(var t=1;t>",i=function(){invariant(!1,"ImmutablePropTypes type checking code is stripped in production.")};i.isRequired=i;var s=function(){return i};function u(e){var t=typeof e;return Array.isArray(e)?"array":e instanceof RegExp?"object":e instanceof o.Iterable?"Immutable."+e.toSource().split(" ")[0]:t}function c(e){function t(t,n,r,o,i,s){for(var u=arguments.length,c=Array(u>6?u-6:0),l=6;l4)}function l(e){var t=e.get("swagger");return"string"==typeof t&&i()(t).call(t,"2.0")}function p(e){return function(t,n){return function(r){return n&&n.specSelectors&&n.specSelectors.specJson?c(n.specSelectors.specJson())?u.a.createElement(e,o()({},r,n,{Ori:t})):u.a.createElement(t,r):(console.warn("OAS3 wrapper: couldn't get spec"),null)}}}},function(e,t,n){e.exports=n(670)},function(e,t,n){e.exports=n(664)},function(e,t,n){var r=n(42),o=n(233),a=n(52),i=n(179),s=n(235),u=n(362),c=o("wks"),l=r.Symbol,p=u?l:l&&l.withoutSetter||i;e.exports=function(e){return a(c,e)||(s&&a(l,e)?c[e]=l[e]:c[e]=p("Symbol."+e)),c[e]}},function(e,t,n){"use strict";var r=Object.getOwnPropertySymbols,o=Object.prototype.hasOwnProperty,a=Object.prototype.propertyIsEnumerable;function i(e){if(null==e)throw new TypeError("Object.assign cannot be called with null or undefined");return Object(e)}e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map((function(e){return t[e]})).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach((function(e){r[e]=e})),"abcdefghijklmnopqrst"===Object.keys(Object.assign({},r)).join("")}catch(e){return!1}}()?Object.assign:function(e,t){for(var n,s,u=i(e),c=1;c0){var o=D()(n).call(n,(function(e){return console.error(e),e.line=e.fullPath?g(y,e.fullPath):null,e.path=e.fullPath?e.fullPath.join("."):null,e.level="error",e.type="thrown",e.source="resolver",M()(e,"message",{enumerable:!0,value:e.message}),e}));a.newThrownErrBatch(o)}return r.updateResolved(t)}))}},Ce=[],Ae=Z()(P()(_.a.mark((function e(){var t,n,r,o,a,i,s,u,c,l,p,f,h,d,m,v,g,y;return _.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:if(t=Ce.system){e.next=4;break}return console.error("debResolveSubtrees: don't have a system to operate on, aborting."),e.abrupt("return");case 4:if(n=t.errActions,r=t.errSelectors,o=t.fn,a=o.resolveSubtree,i=o.fetch,s=o.AST,u=void 0===s?{}:s,c=t.specSelectors,l=t.specActions,a){e.next=8;break}return console.error("Error: Swagger-Client did not provide a `resolveSubtree` method, doing nothing."),e.abrupt("return");case 8:return p=u.getLineNumberForPath?u.getLineNumberForPath:function(){},f=c.specStr(),h=t.getConfigs(),d=h.modelPropertyMacro,m=h.parameterMacro,v=h.requestInterceptor,g=h.responseInterceptor,e.prev=11,e.next=14,T()(Ce).call(Ce,function(){var e=P()(_.a.mark((function e(t,o){var s,u,l,h,y,b,w,E,C;return _.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.next=2,t;case 2:return s=e.sent,u=s.resultMap,l=s.specWithCurrentSubtrees,e.next=7,a(l,o,{baseDoc:c.url(),modelPropertyMacro:d,parameterMacro:m,requestInterceptor:v,responseInterceptor:g});case 7:if(h=e.sent,y=h.errors,b=h.spec,r.allErrors().size&&n.clearBy((function(e){var t;return"thrown"!==e.get("type")||"resolver"!==e.get("source")||!O()(t=e.get("fullPath")).call(t,(function(e,t){return e===o[t]||void 0===o[t]}))})),B()(y)&&y.length>0&&(w=D()(y).call(y,(function(e){return e.line=e.fullPath?p(f,e.fullPath):null,e.path=e.fullPath?e.fullPath.join("."):null,e.level="error",e.type="thrown",e.source="resolver",M()(e,"message",{enumerable:!0,value:e.message}),e})),n.newThrownErrBatch(w)),!b||!c.isOAS3()||"components"!==o[0]||"securitySchemes"!==o[1]){e.next=15;break}return e.next=15,A.a.all(D()(E=S()(C=x()(b)).call(C,(function(e){return"openIdConnect"===e.type}))).call(E,function(){var e=P()(_.a.mark((function e(t){var n,r;return _.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return n={url:t.openIdConnectUrl,requestInterceptor:v,responseInterceptor:g},e.prev=1,e.next=4,i(n);case 4:(r=e.sent)instanceof Error||r.status>=400?console.error(r.statusText+" "+n.url):t.openIdConnectData=JSON.parse(r.text),e.next=11;break;case 8:e.prev=8,e.t0=e.catch(1),console.error(e.t0);case 11:case"end":return e.stop()}}),e,null,[[1,8]])})));return function(t){return e.apply(this,arguments)}}()));case 15:return Q()(u,o,b),Q()(l,o,b),e.abrupt("return",{resultMap:u,specWithCurrentSubtrees:l});case 18:case"end":return e.stop()}}),e)})));return function(t,n){return e.apply(this,arguments)}}(),A.a.resolve({resultMap:(c.specResolvedSubtree([])||Object(V.Map)()).toJS(),specWithCurrentSubtrees:c.specJson().toJS()}));case 14:y=e.sent,delete Ce.system,Ce=[],e.next=22;break;case 19:e.prev=19,e.t0=e.catch(11),console.error(e.t0);case 22:l.updateResolvedSubtree([],y.resultMap);case 23:case"end":return e.stop()}}),e,null,[[11,19]])}))),35),ke=function(e){return function(t){var n;y()(n=D()(Ce).call(Ce,(function(e){return e.join("@@")}))).call(n,e.join("@@"))>-1||(Ce.push(e),Ce.system=t,Ae())}};function Oe(e,t,n,r,o){return{type:oe,payload:{path:e,value:r,paramName:t,paramIn:n,isXml:o}}}function je(e,t,n,r){return{type:oe,payload:{path:e,param:t,value:n,isXml:r}}}var Te=function(e,t){return{type:ve,payload:{path:e,value:t}}},Ie=function(){return{type:ve,payload:{path:[],value:Object(V.Map)()}}},Pe=function(e,t){return{type:ie,payload:{pathMethod:e,isOAS3:t}}},Ne=function(e,t,n,r){return{type:ae,payload:{pathMethod:e,paramName:t,paramIn:n,includeEmptyValue:r}}};function Me(e){return{type:he,payload:{pathMethod:e}}}function Re(e,t){return{type:de,payload:{path:e,value:t,key:"consumes_value"}}}function De(e,t){return{type:de,payload:{path:e,value:t,key:"produces_value"}}}var Le=function(e,t,n){return{payload:{path:e,method:t,res:n},type:se}},Be=function(e,t,n){return{payload:{path:e,method:t,req:n},type:ue}},Fe=function(e,t,n){return{payload:{path:e,method:t,req:n},type:ce}},Ue=function(e){return{payload:e,type:le}},qe=function(e){return function(t){var n,r,o=t.fn,a=t.specActions,i=t.specSelectors,s=t.getConfigs,c=t.oas3Selectors,p=e.pathName,h=e.method,m=e.operation,g=s(),y=g.requestInterceptor,b=g.responseInterceptor,w=m.toJS();m&&m.get("parameters")&&v()(n=S()(r=m.get("parameters")).call(r,(function(e){return e&&!0===e.get("allowEmptyValue")}))).call(n,(function(t){if(i.parameterInclusionSettingFor([p,h],t.get("name"),t.get("in"))){e.parameters=e.parameters||{};var n=Object(ee.C)(t,e.parameters);(!n||n&&0===n.size)&&(e.parameters[t.get("name")]="")}}));if(e.contextUrl=H()(i.url()).toString(),w&&w.operationId?e.operationId=w.operationId:w&&p&&h&&(e.operationId=o.opId(w,p,h)),i.isOAS3()){var x,E=d()(x="".concat(p,":")).call(x,h);e.server=c.selectedServer(E)||c.selectedServer();var C=c.serverVariables({server:e.server,namespace:E}).toJS(),A=c.serverVariables({server:e.server}).toJS();e.serverVariables=f()(C).length?C:A,e.requestContentType=c.requestContentType(p,h),e.responseContentType=c.responseContentType(p,h)||"*/*";var k=c.requestBodyValue(p,h),O=c.requestBodyInclusionSetting(p,h);if(Object(ee.t)(k))e.requestBody=JSON.parse(k);else if(k&&k.toJS){var j;e.requestBody=S()(j=D()(k).call(k,(function(e){return V.Map.isMap(e)?e.get("value"):e}))).call(j,(function(e,t){return(B()(e)?0!==e.length:!Object(ee.q)(e))||O.get(t)})).toJS()}else e.requestBody=k}var T=l()({},e);T=o.buildRequest(T),a.setRequest(e.pathName,e.method,T);var I=function(){var t=P()(_.a.mark((function t(n){var r,o;return _.a.wrap((function(t){for(;;)switch(t.prev=t.next){case 0:return t.next=2,y.apply(undefined,[n]);case 2:return r=t.sent,o=l()({},r),a.setMutatedRequest(e.pathName,e.method,o),t.abrupt("return",r);case 6:case"end":return t.stop()}}),t)})));return function(e){return t.apply(this,arguments)}}();e.requestInterceptor=I,e.responseInterceptor=b;var N=u()();return o.execute(e).then((function(t){t.duration=u()()-N,a.setResponse(e.pathName,e.method,t)})).catch((function(t){console.error(t),a.setResponse(e.pathName,e.method,{error:!0,err:$()(t)})}))}},ze=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.path,n=e.method,r=i()(e,["path","method"]);return function(e){var a=e.fn.fetch,i=e.specSelectors,s=e.specActions,u=i.specJsonWithResolvedSubtrees().toJS(),c=i.operationScheme(t,n),l=i.contentTypeValues([t,n]).toJS(),p=l.requestContentType,f=l.responseContentType,h=/xml/i.test(p),d=i.parameterValues([t,n],h).toJS();return s.executeRequest(o()(o()({},r),{},{fetch:a,spec:u,pathName:t,method:n,parameters:d,requestContentType:p,scheme:c,responseContentType:f}))}};function Ve(e,t){return{type:pe,payload:{path:e,method:t}}}function We(e,t){return{type:fe,payload:{path:e,method:t}}}function He(e,t,n){return{type:ge,payload:{scheme:e,path:t,method:n}}}},function(e,t,n){var r=n(33),o=n(52),a=n(232),i=n(71).f;e.exports=function(e){var t=r.Symbol||(r.Symbol={});o(t,e)||i(t,e,{value:a.f(e)})}},function(e,t,n){var r=n(35);e.exports=!r((function(){return 7!=Object.defineProperty({},1,{get:function(){return 7}})[1]}))},function(e,t,n){"use strict";var r=n(163),o=["kind","resolve","construct","instanceOf","predicate","represent","defaultStyle","styleAliases"],a=["scalar","sequence","mapping"];e.exports=function(e,t){var n,i;if(t=t||{},Object.keys(t).forEach((function(t){if(-1===o.indexOf(t))throw new r('Unknown option "'+t+'" is met in definition of "'+e+'" YAML type.')})),this.tag=e,this.kind=t.kind||null,this.resolve=t.resolve||function(){return!0},this.construct=t.construct||function(e){return e},this.instanceOf=t.instanceOf||null,this.predicate=t.predicate||null,this.represent=t.represent||null,this.defaultStyle=t.defaultStyle||null,this.styleAliases=(n=t.styleAliases||null,i={},null!==n&&Object.keys(n).forEach((function(e){n[e].forEach((function(t){i[String(t)]=e}))})),i),-1===a.indexOf(this.kind))throw new r('Unknown kind "'+this.kind+'" is specified for "'+e+'" YAML type.')}},function(e,t,n){var r=n(402),o=n(246),a=n(684),i=n(181),s=n(186);e.exports=function(e,t){var n;if(void 0===i||null==a(e)){if(o(e)||(n=s(e))||t&&e&&"number"==typeof e.length){n&&(e=n);var u=0,c=function(){};return{s:c,n:function(){return u>=e.length?{done:!0}:{done:!1,value:e[u++]}},e:function(e){throw e},f:c}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var l,p=!0,f=!1;return{s:function(){n=r(e)},n:function(){var e=n.next();return p=e.done,e},e:function(e){f=!0,l=e},f:function(){try{p||null==n.return||n.return()}finally{if(f)throw l}}}}},function(e,t){var n={}.hasOwnProperty;e.exports=function(e,t){return n.call(e,t)}},function(e,t,n){var r=n(45);e.exports=function(e){if(!r(e))throw TypeError(String(e)+" is not an object");return e}},function(e,t){var n=Array.isArray;e.exports=n},function(e,t){var n;n=function(){return this}();try{n=n||new Function("return this")()}catch(e){"object"==typeof window&&(n=window)}e.exports=n},function(e,t,n){var r=n(453),o=n(451),a=n(894);e.exports=function(e,t){if(null==e)return{};var n,i,s=a(e,t);if(o){var u=o(e);for(i=0;i=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(s[n]=e[n])}return s}},function(e,t,n){"use strict";n.r(t),n.d(t,"UPDATE_SELECTED_SERVER",(function(){return r})),n.d(t,"UPDATE_REQUEST_BODY_VALUE",(function(){return o})),n.d(t,"UPDATE_REQUEST_BODY_VALUE_RETAIN_FLAG",(function(){return a})),n.d(t,"UPDATE_REQUEST_BODY_INCLUSION",(function(){return i})),n.d(t,"UPDATE_ACTIVE_EXAMPLES_MEMBER",(function(){return s})),n.d(t,"UPDATE_REQUEST_CONTENT_TYPE",(function(){return u})),n.d(t,"UPDATE_RESPONSE_CONTENT_TYPE",(function(){return c})),n.d(t,"UPDATE_SERVER_VARIABLE_VALUE",(function(){return l})),n.d(t,"SET_REQUEST_BODY_VALIDATE_ERROR",(function(){return p})),n.d(t,"CLEAR_REQUEST_BODY_VALIDATE_ERROR",(function(){return f})),n.d(t,"CLEAR_REQUEST_BODY_VALUE",(function(){return h})),n.d(t,"setSelectedServer",(function(){return d})),n.d(t,"setRequestBodyValue",(function(){return m})),n.d(t,"setRetainRequestBodyValueFlag",(function(){return v})),n.d(t,"setRequestBodyInclusion",(function(){return g})),n.d(t,"setActiveExamplesMember",(function(){return y})),n.d(t,"setRequestContentType",(function(){return b})),n.d(t,"setResponseContentType",(function(){return _})),n.d(t,"setServerVariableValue",(function(){return w})),n.d(t,"setRequestBodyValidateError",(function(){return x})),n.d(t,"clearRequestBodyValidateError",(function(){return E})),n.d(t,"initRequestBodyValidateError",(function(){return S})),n.d(t,"clearRequestBodyValue",(function(){return C}));var r="oas3_set_servers",o="oas3_set_request_body_value",a="oas3_set_request_body_retain_flag",i="oas3_set_request_body_inclusion",s="oas3_set_active_examples_member",u="oas3_set_request_content_type",c="oas3_set_response_content_type",l="oas3_set_server_variable_value",p="oas3_set_request_body_validate_error",f="oas3_clear_request_body_validate_error",h="oas3_clear_request_body_value";function d(e,t){return{type:r,payload:{selectedServerUrl:e,namespace:t}}}function m(e){var t=e.value,n=e.pathMethod;return{type:o,payload:{value:t,pathMethod:n}}}var v=function(e){var t=e.value,n=e.pathMethod;return{type:a,payload:{value:t,pathMethod:n}}};function g(e){var t=e.value,n=e.pathMethod,r=e.name;return{type:i,payload:{value:t,pathMethod:n,name:r}}}function y(e){var t=e.name,n=e.pathMethod,r=e.contextType,o=e.contextName;return{type:s,payload:{name:t,pathMethod:n,contextType:r,contextName:o}}}function b(e){var t=e.value,n=e.pathMethod;return{type:u,payload:{value:t,pathMethod:n}}}function _(e){var t=e.value,n=e.path,r=e.method;return{type:c,payload:{value:t,path:n,method:r}}}function w(e){var t=e.server,n=e.namespace,r=e.key,o=e.val;return{type:l,payload:{server:t,namespace:n,key:r,val:o}}}var x=function(e){var t=e.path,n=e.method,r=e.validationErrors;return{type:p,payload:{path:t,method:n,validationErrors:r}}},E=function(e){var t=e.path,n=e.method;return{type:f,payload:{path:t,method:n}}},S=function(e){var t=e.pathMethod;return{type:f,payload:{path:t[0],method:t[1]}}},C=function(e){var t=e.pathMethod;return{type:h,payload:{pathMethod:t}}}},function(e,t,n){"use strict";var r=!("undefined"==typeof window||!window.document||!window.document.createElement),o={canUseDOM:r,canUseWorkers:"undefined"!=typeof Worker,canUseEventListeners:r&&!(!window.addEventListener&&!window.attachEvent),canUseViewport:r&&!!window.screen,isInWorker:!r};e.exports=o},function(e,t){e.exports=function(e){var t=typeof e;return null!=e&&("object"==t||"function"==t)}},function(e,t,n){"use strict";n.d(t,"b",(function(){return b})),n.d(t,"e",(function(){return _})),n.d(t,"c",(function(){return x})),n.d(t,"a",(function(){return E})),n.d(t,"d",(function(){return S}));var r=n(51),o=n.n(r),a=n(18),i=n.n(a),s=n(37),u=n.n(s),c=n(2),l=n.n(c),p=n(19),f=n.n(p),h=n(59),d=n.n(h),m=n(353),v=n.n(m),g=function(e){return String.prototype.toLowerCase.call(e)},y=function(e){return e.replace(/[^\w]/gi,"_")};function b(e){var t=e.openapi;return!!t&&v()(t,"3")}function _(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"",r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},o=r.v2OperationIdCompatibilityMode;if(!e||"object"!==f()(e))return null;var a=(e.operationId||"").replace(/\s/g,"");return a.length?y(e.operationId):w(t,n,{v2OperationIdCompatibilityMode:o})}function w(e,t){var n,r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},o=r.v2OperationIdCompatibilityMode;if(o){var a,i,s=l()(a="".concat(t.toLowerCase(),"_")).call(a,e).replace(/[\s!@#$%^&*()_+=[{\]};:<>|./?,\\'""-]/g,"_");return(s=s||l()(i="".concat(e.substring(1),"_")).call(i,t)).replace(/((_){2,})/g,"_").replace(/^(_)*/g,"").replace(/([_])*$/g,"")}return l()(n="".concat(g(t))).call(n,y(e))}function x(e,t){var n;return l()(n="".concat(g(t),"-")).call(n,e)}function E(e,t){return e&&e.paths?function(e,t){return function(e,t,n){if(!e||"object"!==f()(e)||!e.paths||"object"!==f()(e.paths))return null;var r=e.paths;for(var o in r)for(var a in r[o])if("PARAMETERS"!==a.toUpperCase()){var i=r[o][a];if(i&&"object"===f()(i)){var s={spec:e,pathName:o,method:a.toUpperCase(),operation:i},u=t(s);if(n&&u)return s}}return}(e,t,!0)||null}(e,(function(e){var n,r=e.pathName,o=e.method,a=e.operation;if(!a||"object"!==f()(a))return!1;var i=a.operationId,s=_(a,r,o),c=x(r,o);return u()(n=[s,c,i]).call(n,(function(e){return e&&e===t}))})):null}function S(e){var t=e.spec,n=t.paths,r={};if(!n||t.$$normalized)return e;for(var a in n){var s=n[a];if(d()(s)){var c=s.parameters,p=function(e){var n=s[e];if(!d()(n))return"continue";var p=_(n,a,e);if(p){r[p]?r[p].push(n):r[p]=[n];var f=r[p];if(f.length>1)i()(f).call(f,(function(e,t){var n;e.__originalOperationId=e.__originalOperationId||e.operationId,e.operationId=l()(n="".concat(p)).call(n,t+1)}));else if(void 0!==n.operationId){var h=f[0];h.__originalOperationId=h.__originalOperationId||n.operationId,h.operationId=p}}if("parameters"!==e){var m=[],v={};for(var g in t)"produces"!==g&&"consumes"!==g&&"security"!==g||(v[g]=t[g],m.push(v));if(c&&(v.parameters=c,m.push(v)),m.length){var y,b=o()(m);try{for(b.s();!(y=b.n()).done;){var w=y.value;for(var x in w)if(n[x]){if("parameters"===x){var E,S=o()(w[x]);try{var C=function(){var e,t=E.value;u()(e=n[x]).call(e,(function(e){return e.name&&e.name===t.name||e.$ref&&e.$ref===t.$ref||e.$$ref&&e.$$ref===t.$$ref||e===t}))||n[x].push(t)};for(S.s();!(E=S.n()).done;)C()}catch(e){S.e(e)}finally{S.f()}}}else n[x]=w[x]}}catch(e){b.e(e)}finally{b.f()}}}};for(var f in s)p(f)}}return t.$$normalized=!0,e}},function(e,t,n){"use strict";n.r(t),n.d(t,"NEW_THROWN_ERR",(function(){return a})),n.d(t,"NEW_THROWN_ERR_BATCH",(function(){return i})),n.d(t,"NEW_SPEC_ERR",(function(){return s})),n.d(t,"NEW_SPEC_ERR_BATCH",(function(){return u})),n.d(t,"NEW_AUTH_ERR",(function(){return c})),n.d(t,"CLEAR",(function(){return l})),n.d(t,"CLEAR_BY",(function(){return p})),n.d(t,"newThrownErr",(function(){return f})),n.d(t,"newThrownErrBatch",(function(){return h})),n.d(t,"newSpecErr",(function(){return d})),n.d(t,"newSpecErrBatch",(function(){return m})),n.d(t,"newAuthErr",(function(){return v})),n.d(t,"clear",(function(){return g})),n.d(t,"clearBy",(function(){return y}));var r=n(143),o=n.n(r),a="err_new_thrown_err",i="err_new_thrown_err_batch",s="err_new_spec_err",u="err_new_spec_err_batch",c="err_new_auth_err",l="err_clear",p="err_clear_by";function f(e){return{type:a,payload:o()(e)}}function h(e){return{type:i,payload:e}}function d(e){return{type:s,payload:e}}function m(e){return{type:u,payload:e}}function v(e){return{type:c,payload:e}}function g(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return{type:l,payload:e}}function y(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:function(){return!0};return{type:p,payload:e}}},function(e,t,n){var r=n(49),o=n(35),a=n(52),i=Object.defineProperty,s={},u=function(e){throw e};e.exports=function(e,t){if(a(s,e))return s[e];t||(t={});var n=[][e],c=!!a(t,"ACCESSORS")&&t.ACCESSORS,l=a(t,0)?t[0]:u,p=a(t,1)?t[1]:void 0;return s[e]=!!n&&!o((function(){if(c&&!r)return!0;var e={length:-1};c?i(e,1,{enumerable:!0,get:u}):e[1]=1,n.call(e,l,p)}))}},function(e,t){"function"==typeof Object.create?e.exports=function(e,t){e.super_=t,e.prototype=Object.create(t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}})}:e.exports=function(e,t){e.super_=t;var n=function(){};n.prototype=t.prototype,e.prototype=new n,e.prototype.constructor=e}},function(e,t,n){var r=n(77),o=r.Buffer;function a(e,t){for(var n in e)t[n]=e[n]}function i(e,t,n){return o(e,t,n)}o.from&&o.alloc&&o.allocUnsafe&&o.allocUnsafeSlow?e.exports=r:(a(r,t),t.Buffer=i),a(o,i),i.from=function(e,t,n){if("number"==typeof e)throw new TypeError("Argument must not be a number");return o(e,t,n)},i.alloc=function(e,t,n){if("number"!=typeof e)throw new TypeError("Argument must be a number");var r=o(e);return void 0!==t?"string"==typeof n?r.fill(t,n):r.fill(t):r.fill(0),r},i.allocUnsafe=function(e){if("number"!=typeof e)throw new TypeError("Argument must be a number");return o(e)},i.allocUnsafeSlow=function(e){if("number"!=typeof e)throw new TypeError("Argument must be a number");return r.SlowBuffer(e)}},function(e,t,n){var r;!function(){"use strict";var n={}.hasOwnProperty;function o(){for(var e=[],t=0;t=i())throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+i().toString(16)+" bytes");return 0|e}function d(e,t){if(u.isBuffer(e))return e.length;if("undefined"!=typeof ArrayBuffer&&"function"==typeof ArrayBuffer.isView&&(ArrayBuffer.isView(e)||e instanceof ArrayBuffer))return e.byteLength;"string"!=typeof e&&(e=""+e);var n=e.length;if(0===n)return 0;for(var r=!1;;)switch(t){case"ascii":case"latin1":case"binary":return n;case"utf8":case"utf-8":case void 0:return q(e).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*n;case"hex":return n>>>1;case"base64":return z(e).length;default:if(r)return q(e).length;t=(""+t).toLowerCase(),r=!0}}function m(e,t,n){var r=!1;if((void 0===t||t<0)&&(t=0),t>this.length)return"";if((void 0===n||n>this.length)&&(n=this.length),n<=0)return"";if((n>>>=0)<=(t>>>=0))return"";for(e||(e="utf8");;)switch(e){case"hex":return T(this,t,n);case"utf8":case"utf-8":return A(this,t,n);case"ascii":return O(this,t,n);case"latin1":case"binary":return j(this,t,n);case"base64":return C(this,t,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return I(this,t,n);default:if(r)throw new TypeError("Unknown encoding: "+e);e=(e+"").toLowerCase(),r=!0}}function v(e,t,n){var r=e[t];e[t]=e[n],e[n]=r}function g(e,t,n,r,o){if(0===e.length)return-1;if("string"==typeof n?(r=n,n=0):n>2147483647?n=2147483647:n<-2147483648&&(n=-2147483648),n=+n,isNaN(n)&&(n=o?0:e.length-1),n<0&&(n=e.length+n),n>=e.length){if(o)return-1;n=e.length-1}else if(n<0){if(!o)return-1;n=0}if("string"==typeof t&&(t=u.from(t,r)),u.isBuffer(t))return 0===t.length?-1:y(e,t,n,r,o);if("number"==typeof t)return t&=255,u.TYPED_ARRAY_SUPPORT&&"function"==typeof Uint8Array.prototype.indexOf?o?Uint8Array.prototype.indexOf.call(e,t,n):Uint8Array.prototype.lastIndexOf.call(e,t,n):y(e,[t],n,r,o);throw new TypeError("val must be string, number or Buffer")}function y(e,t,n,r,o){var a,i=1,s=e.length,u=t.length;if(void 0!==r&&("ucs2"===(r=String(r).toLowerCase())||"ucs-2"===r||"utf16le"===r||"utf-16le"===r)){if(e.length<2||t.length<2)return-1;i=2,s/=2,u/=2,n/=2}function c(e,t){return 1===i?e[t]:e.readUInt16BE(t*i)}if(o){var l=-1;for(a=n;as&&(n=s-u),a=n;a>=0;a--){for(var p=!0,f=0;fo&&(r=o):r=o;var a=t.length;if(a%2!=0)throw new TypeError("Invalid hex string");r>a/2&&(r=a/2);for(var i=0;i>8,o=n%256,a.push(o),a.push(r);return a}(t,e.length-n),e,n,r)}function C(e,t,n){return 0===t&&n===e.length?r.fromByteArray(e):r.fromByteArray(e.slice(t,n))}function A(e,t,n){n=Math.min(e.length,n);for(var r=[],o=t;o239?4:c>223?3:c>191?2:1;if(o+p<=n)switch(p){case 1:c<128&&(l=c);break;case 2:128==(192&(a=e[o+1]))&&(u=(31&c)<<6|63&a)>127&&(l=u);break;case 3:a=e[o+1],i=e[o+2],128==(192&a)&&128==(192&i)&&(u=(15&c)<<12|(63&a)<<6|63&i)>2047&&(u<55296||u>57343)&&(l=u);break;case 4:a=e[o+1],i=e[o+2],s=e[o+3],128==(192&a)&&128==(192&i)&&128==(192&s)&&(u=(15&c)<<18|(63&a)<<12|(63&i)<<6|63&s)>65535&&u<1114112&&(l=u)}null===l?(l=65533,p=1):l>65535&&(l-=65536,r.push(l>>>10&1023|55296),l=56320|1023&l),r.push(l),o+=p}return function(e){var t=e.length;if(t<=k)return String.fromCharCode.apply(String,e);var n="",r=0;for(;r0&&(e=this.toString("hex",0,n).match(/.{2}/g).join(" "),this.length>n&&(e+=" ... ")),""},u.prototype.compare=function(e,t,n,r,o){if(!u.isBuffer(e))throw new TypeError("Argument must be a Buffer");if(void 0===t&&(t=0),void 0===n&&(n=e?e.length:0),void 0===r&&(r=0),void 0===o&&(o=this.length),t<0||n>e.length||r<0||o>this.length)throw new RangeError("out of range index");if(r>=o&&t>=n)return 0;if(r>=o)return-1;if(t>=n)return 1;if(this===e)return 0;for(var a=(o>>>=0)-(r>>>=0),i=(n>>>=0)-(t>>>=0),s=Math.min(a,i),c=this.slice(r,o),l=e.slice(t,n),p=0;po)&&(n=o),e.length>0&&(n<0||t<0)||t>this.length)throw new RangeError("Attempt to write outside buffer bounds");r||(r="utf8");for(var a=!1;;)switch(r){case"hex":return b(this,e,t,n);case"utf8":case"utf-8":return _(this,e,t,n);case"ascii":return w(this,e,t,n);case"latin1":case"binary":return x(this,e,t,n);case"base64":return E(this,e,t,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return S(this,e,t,n);default:if(a)throw new TypeError("Unknown encoding: "+r);r=(""+r).toLowerCase(),a=!0}},u.prototype.toJSON=function(){return{type:"Buffer",data:Array.prototype.slice.call(this._arr||this,0)}};var k=4096;function O(e,t,n){var r="";n=Math.min(e.length,n);for(var o=t;or)&&(n=r);for(var o="",a=t;an)throw new RangeError("Trying to access beyond buffer length")}function N(e,t,n,r,o,a){if(!u.isBuffer(e))throw new TypeError('"buffer" argument must be a Buffer instance');if(t>o||te.length)throw new RangeError("Index out of range")}function M(e,t,n,r){t<0&&(t=65535+t+1);for(var o=0,a=Math.min(e.length-n,2);o>>8*(r?o:1-o)}function R(e,t,n,r){t<0&&(t=4294967295+t+1);for(var o=0,a=Math.min(e.length-n,4);o>>8*(r?o:3-o)&255}function D(e,t,n,r,o,a){if(n+r>e.length)throw new RangeError("Index out of range");if(n<0)throw new RangeError("Index out of range")}function L(e,t,n,r,a){return a||D(e,0,n,4),o.write(e,t,n,r,23,4),n+4}function B(e,t,n,r,a){return a||D(e,0,n,8),o.write(e,t,n,r,52,8),n+8}u.prototype.slice=function(e,t){var n,r=this.length;if((e=~~e)<0?(e+=r)<0&&(e=0):e>r&&(e=r),(t=void 0===t?r:~~t)<0?(t+=r)<0&&(t=0):t>r&&(t=r),t0&&(o*=256);)r+=this[e+--t]*o;return r},u.prototype.readUInt8=function(e,t){return t||P(e,1,this.length),this[e]},u.prototype.readUInt16LE=function(e,t){return t||P(e,2,this.length),this[e]|this[e+1]<<8},u.prototype.readUInt16BE=function(e,t){return t||P(e,2,this.length),this[e]<<8|this[e+1]},u.prototype.readUInt32LE=function(e,t){return t||P(e,4,this.length),(this[e]|this[e+1]<<8|this[e+2]<<16)+16777216*this[e+3]},u.prototype.readUInt32BE=function(e,t){return t||P(e,4,this.length),16777216*this[e]+(this[e+1]<<16|this[e+2]<<8|this[e+3])},u.prototype.readIntLE=function(e,t,n){e|=0,t|=0,n||P(e,t,this.length);for(var r=this[e],o=1,a=0;++a=(o*=128)&&(r-=Math.pow(2,8*t)),r},u.prototype.readIntBE=function(e,t,n){e|=0,t|=0,n||P(e,t,this.length);for(var r=t,o=1,a=this[e+--r];r>0&&(o*=256);)a+=this[e+--r]*o;return a>=(o*=128)&&(a-=Math.pow(2,8*t)),a},u.prototype.readInt8=function(e,t){return t||P(e,1,this.length),128&this[e]?-1*(255-this[e]+1):this[e]},u.prototype.readInt16LE=function(e,t){t||P(e,2,this.length);var n=this[e]|this[e+1]<<8;return 32768&n?4294901760|n:n},u.prototype.readInt16BE=function(e,t){t||P(e,2,this.length);var n=this[e+1]|this[e]<<8;return 32768&n?4294901760|n:n},u.prototype.readInt32LE=function(e,t){return t||P(e,4,this.length),this[e]|this[e+1]<<8|this[e+2]<<16|this[e+3]<<24},u.prototype.readInt32BE=function(e,t){return t||P(e,4,this.length),this[e]<<24|this[e+1]<<16|this[e+2]<<8|this[e+3]},u.prototype.readFloatLE=function(e,t){return t||P(e,4,this.length),o.read(this,e,!0,23,4)},u.prototype.readFloatBE=function(e,t){return t||P(e,4,this.length),o.read(this,e,!1,23,4)},u.prototype.readDoubleLE=function(e,t){return t||P(e,8,this.length),o.read(this,e,!0,52,8)},u.prototype.readDoubleBE=function(e,t){return t||P(e,8,this.length),o.read(this,e,!1,52,8)},u.prototype.writeUIntLE=function(e,t,n,r){(e=+e,t|=0,n|=0,r)||N(this,e,t,n,Math.pow(2,8*n)-1,0);var o=1,a=0;for(this[t]=255&e;++a=0&&(a*=256);)this[t+o]=e/a&255;return t+n},u.prototype.writeUInt8=function(e,t,n){return e=+e,t|=0,n||N(this,e,t,1,255,0),u.TYPED_ARRAY_SUPPORT||(e=Math.floor(e)),this[t]=255&e,t+1},u.prototype.writeUInt16LE=function(e,t,n){return e=+e,t|=0,n||N(this,e,t,2,65535,0),u.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8):M(this,e,t,!0),t+2},u.prototype.writeUInt16BE=function(e,t,n){return e=+e,t|=0,n||N(this,e,t,2,65535,0),u.TYPED_ARRAY_SUPPORT?(this[t]=e>>>8,this[t+1]=255&e):M(this,e,t,!1),t+2},u.prototype.writeUInt32LE=function(e,t,n){return e=+e,t|=0,n||N(this,e,t,4,4294967295,0),u.TYPED_ARRAY_SUPPORT?(this[t+3]=e>>>24,this[t+2]=e>>>16,this[t+1]=e>>>8,this[t]=255&e):R(this,e,t,!0),t+4},u.prototype.writeUInt32BE=function(e,t,n){return e=+e,t|=0,n||N(this,e,t,4,4294967295,0),u.TYPED_ARRAY_SUPPORT?(this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e):R(this,e,t,!1),t+4},u.prototype.writeIntLE=function(e,t,n,r){if(e=+e,t|=0,!r){var o=Math.pow(2,8*n-1);N(this,e,t,n,o-1,-o)}var a=0,i=1,s=0;for(this[t]=255&e;++a>0)-s&255;return t+n},u.prototype.writeIntBE=function(e,t,n,r){if(e=+e,t|=0,!r){var o=Math.pow(2,8*n-1);N(this,e,t,n,o-1,-o)}var a=n-1,i=1,s=0;for(this[t+a]=255&e;--a>=0&&(i*=256);)e<0&&0===s&&0!==this[t+a+1]&&(s=1),this[t+a]=(e/i>>0)-s&255;return t+n},u.prototype.writeInt8=function(e,t,n){return e=+e,t|=0,n||N(this,e,t,1,127,-128),u.TYPED_ARRAY_SUPPORT||(e=Math.floor(e)),e<0&&(e=255+e+1),this[t]=255&e,t+1},u.prototype.writeInt16LE=function(e,t,n){return e=+e,t|=0,n||N(this,e,t,2,32767,-32768),u.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8):M(this,e,t,!0),t+2},u.prototype.writeInt16BE=function(e,t,n){return e=+e,t|=0,n||N(this,e,t,2,32767,-32768),u.TYPED_ARRAY_SUPPORT?(this[t]=e>>>8,this[t+1]=255&e):M(this,e,t,!1),t+2},u.prototype.writeInt32LE=function(e,t,n){return e=+e,t|=0,n||N(this,e,t,4,2147483647,-2147483648),u.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8,this[t+2]=e>>>16,this[t+3]=e>>>24):R(this,e,t,!0),t+4},u.prototype.writeInt32BE=function(e,t,n){return e=+e,t|=0,n||N(this,e,t,4,2147483647,-2147483648),e<0&&(e=4294967295+e+1),u.TYPED_ARRAY_SUPPORT?(this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e):R(this,e,t,!1),t+4},u.prototype.writeFloatLE=function(e,t,n){return L(this,e,t,!0,n)},u.prototype.writeFloatBE=function(e,t,n){return L(this,e,t,!1,n)},u.prototype.writeDoubleLE=function(e,t,n){return B(this,e,t,!0,n)},u.prototype.writeDoubleBE=function(e,t,n){return B(this,e,t,!1,n)},u.prototype.copy=function(e,t,n,r){if(n||(n=0),r||0===r||(r=this.length),t>=e.length&&(t=e.length),t||(t=0),r>0&&r=this.length)throw new RangeError("sourceStart out of bounds");if(r<0)throw new RangeError("sourceEnd out of bounds");r>this.length&&(r=this.length),e.length-t=0;--o)e[o+t]=this[o+n];else if(a<1e3||!u.TYPED_ARRAY_SUPPORT)for(o=0;o>>=0,n=void 0===n?this.length:n>>>0,e||(e=0),"number"==typeof e)for(a=t;a55295&&n<57344){if(!o){if(n>56319){(t-=3)>-1&&a.push(239,191,189);continue}if(i+1===r){(t-=3)>-1&&a.push(239,191,189);continue}o=n;continue}if(n<56320){(t-=3)>-1&&a.push(239,191,189),o=n;continue}n=65536+(o-55296<<10|n-56320)}else o&&(t-=3)>-1&&a.push(239,191,189);if(o=null,n<128){if((t-=1)<0)break;a.push(n)}else if(n<2048){if((t-=2)<0)break;a.push(n>>6|192,63&n|128)}else if(n<65536){if((t-=3)<0)break;a.push(n>>12|224,n>>6&63|128,63&n|128)}else{if(!(n<1114112))throw new Error("Invalid code point");if((t-=4)<0)break;a.push(n>>18|240,n>>12&63|128,n>>6&63|128,63&n|128)}}return a}function z(e){return r.toByteArray(function(e){if((e=function(e){return e.trim?e.trim():e.replace(/^\s+|\s+$/g,"")}(e).replace(F,"")).length<2)return"";for(;e.length%4!=0;)e+="=";return e}(e))}function V(e,t,n,r){for(var o=0;o=t.length||o>=e.length);++o)t[o+n]=e[o];return o}}).call(this,n(55))},function(e,t,n){"use strict";var r=n(848);e.exports=r},function(e,t,n){var r=n(908);function o(e,t,n,o,a,i,s){try{var u=e[i](s),c=u.value}catch(e){return void n(e)}u.done?t(c):r.resolve(c).then(o,a)}e.exports=function(e){return function(){var t=this,n=arguments;return new r((function(r,a){var i=e.apply(t,n);function s(e){o(i,r,a,s,u,"next",e)}function u(e){o(i,r,a,s,u,"throw",e)}s(void 0)}))}}},function(e,t){e.exports=function(e){if("function"!=typeof e)throw TypeError(String(e)+" is not a function");return e}},function(e,t,n){var r=n(151),o=Math.min;e.exports=function(e){return e>0?o(r(e),9007199254740991):0}},function(e,t,n){var r,o,a,i=n(364),s=n(42),u=n(45),c=n(70),l=n(52),p=n(234),f=n(180),h=n(152),d=s.WeakMap;if(i){var m=p.state||(p.state=new d),v=m.get,g=m.has,y=m.set;r=function(e,t){return t.facade=e,y.call(m,e,t),t},o=function(e){return v.call(m,e)||{}},a=function(e){return g.call(m,e)}}else{var b=f("state");h[b]=!0,r=function(e,t){return t.facade=e,c(e,b,t),t},o=function(e){return l(e,b)?e[b]:{}},a=function(e){return l(e,b)}}e.exports={set:r,get:o,has:a,enforce:function(e){return a(e)?o(e):r(e,{})},getterFor:function(e){return function(t){var n;if(!u(t)||(n=o(t)).type!==e)throw TypeError("Incompatible receiver, "+e+" required");return n}}}},function(e,t,n){"use strict";function r(e){return function(){return e}}var o=function(){};o.thatReturns=r,o.thatReturnsFalse=r(!1),o.thatReturnsTrue=r(!0),o.thatReturnsNull=r(null),o.thatReturnsThis=function(){return this},o.thatReturnsArgument=function(e){return e},e.exports=o},function(e,t,n){"use strict";var r=n(31),o=n(40),a=n(475),i=n(124),s=n(476),u=n(140),c=n(204),l=n(26),p=[],f=0,h=a.getPooled(),d=!1,m=null;function v(){x.ReactReconcileTransaction&&m||r("123")}var g=[{initialize:function(){this.dirtyComponentsLength=p.length},close:function(){this.dirtyComponentsLength!==p.length?(p.splice(0,this.dirtyComponentsLength),w()):p.length=0}},{initialize:function(){this.callbackQueue.reset()},close:function(){this.callbackQueue.notifyAll()}}];function y(){this.reinitializeTransaction(),this.dirtyComponentsLength=null,this.callbackQueue=a.getPooled(),this.reconcileTransaction=x.ReactReconcileTransaction.getPooled(!0)}function b(e,t){return e._mountOrder-t._mountOrder}function _(e){var t=e.dirtyComponentsLength;t!==p.length&&r("124",t,p.length),p.sort(b),f++;for(var n=0;n",'"',"`"," ","\r","\n","\t"]),l=["'"].concat(c),p=["%","/","?",";","#"].concat(l),f=["/","?","#"],h=/^[+a-z0-9A-Z_-]{0,63}$/,d=/^([+a-z0-9A-Z_-]{0,63})(.*)$/,m={javascript:!0,"javascript:":!0},v={javascript:!0,"javascript:":!0},g={http:!0,https:!0,ftp:!0,gopher:!0,file:!0,"http:":!0,"https:":!0,"ftp:":!0,"gopher:":!0,"file:":!0},y=n(1077);function b(e,t,n){if(e&&o.isObject(e)&&e instanceof a)return e;var r=new a;return r.parse(e,t,n),r}a.prototype.parse=function(e,t,n){if(!o.isString(e))throw new TypeError("Parameter 'url' must be a string, not "+typeof e);var a=e.indexOf("?"),s=-1!==a&&a127?N+="x":N+=P[M];if(!N.match(h)){var D=T.slice(0,k),L=T.slice(k+1),B=P.match(d);B&&(D.push(B[1]),L.unshift(B[2])),L.length&&(b="/"+L.join(".")+b),this.hostname=D.join(".");break}}}this.hostname.length>255?this.hostname="":this.hostname=this.hostname.toLowerCase(),j||(this.hostname=r.toASCII(this.hostname));var F=this.port?":"+this.port:"",U=this.hostname||"";this.host=U+F,this.href+=this.host,j&&(this.hostname=this.hostname.substr(1,this.hostname.length-2),"/"!==b[0]&&(b="/"+b))}if(!m[x])for(k=0,I=l.length;k0)&&n.host.split("@"))&&(n.auth=j.shift(),n.host=n.hostname=j.shift());return n.search=e.search,n.query=e.query,o.isNull(n.pathname)&&o.isNull(n.search)||(n.path=(n.pathname?n.pathname:"")+(n.search?n.search:"")),n.href=n.format(),n}if(!E.length)return n.pathname=null,n.search?n.path="/"+n.search:n.path=null,n.href=n.format(),n;for(var C=E.slice(-1)[0],A=(n.host||e.host||E.length>1)&&("."===C||".."===C)||""===C,k=0,O=E.length;O>=0;O--)"."===(C=E[O])?E.splice(O,1):".."===C?(E.splice(O,1),k++):k&&(E.splice(O,1),k--);if(!w&&!x)for(;k--;k)E.unshift("..");!w||""===E[0]||E[0]&&"/"===E[0].charAt(0)||E.unshift(""),A&&"/"!==E.join("/").substr(-1)&&E.push("");var j,T=""===E[0]||E[0]&&"/"===E[0].charAt(0);S&&(n.hostname=n.host=T?"":E.length?E.shift():"",(j=!!(n.host&&n.host.indexOf("@")>0)&&n.host.split("@"))&&(n.auth=j.shift(),n.host=n.hostname=j.shift()));return(w=w||n.host&&E.length)&&!T&&E.unshift(""),E.length?n.pathname=E.join("/"):(n.pathname=null,n.path=null),o.isNull(n.pathname)&&o.isNull(n.search)||(n.path=(n.pathname?n.pathname:"")+(n.search?n.search:"")),n.auth=e.auth||n.auth,n.slashes=n.slashes||e.slashes,n.href=n.format(),n},a.prototype.parseHost=function(){var e=this.host,t=s.exec(e);t&&(":"!==(t=t[0])&&(this.port=t.substr(1)),e=e.substr(0,e.length-t.length)),e&&(this.hostname=e)}},function(e,t,n){"use strict";n.r(t),n.d(t,"SHOW_AUTH_POPUP",(function(){return h})),n.d(t,"AUTHORIZE",(function(){return d})),n.d(t,"LOGOUT",(function(){return m})),n.d(t,"PRE_AUTHORIZE_OAUTH2",(function(){return v})),n.d(t,"AUTHORIZE_OAUTH2",(function(){return g})),n.d(t,"VALIDATE",(function(){return y})),n.d(t,"CONFIGURE_AUTH",(function(){return b})),n.d(t,"RESTORE_AUTHORIZATION",(function(){return _})),n.d(t,"showDefinitions",(function(){return w})),n.d(t,"authorize",(function(){return x})),n.d(t,"authorizeWithPersistOption",(function(){return E})),n.d(t,"logout",(function(){return S})),n.d(t,"logoutWithPersistOption",(function(){return C})),n.d(t,"preAuthorizeImplicit",(function(){return A})),n.d(t,"authorizeOauth2",(function(){return k})),n.d(t,"authorizeOauth2WithPersistOption",(function(){return O})),n.d(t,"authorizePassword",(function(){return j})),n.d(t,"authorizeApplication",(function(){return T})),n.d(t,"authorizeAccessCodeWithFormParams",(function(){return I})),n.d(t,"authorizeAccessCodeWithBasicAuthentication",(function(){return P})),n.d(t,"authorizeRequest",(function(){return N})),n.d(t,"configureAuth",(function(){return M})),n.d(t,"restoreAuthorization",(function(){return R})),n.d(t,"persistAuthorizationIfNeeded",(function(){return D}));var r=n(19),o=n.n(r),a=n(22),i=n.n(a),s=n(32),u=n.n(s),c=n(98),l=n.n(c),p=n(27),f=n(5),h="show_popup",d="authorize",m="logout",v="pre_authorize_oauth2",g="authorize_oauth2",y="validate",b="configure_auth",_="restore_authorization";function w(e){return{type:h,payload:e}}function x(e){return{type:d,payload:e}}var E=function(e){return function(t){var n=t.authActions;n.authorize(e),n.persistAuthorizationIfNeeded()}};function S(e){return{type:m,payload:e}}var C=function(e){return function(t){var n=t.authActions;n.logout(e),n.persistAuthorizationIfNeeded()}},A=function(e){return function(t){var n=t.authActions,r=t.errActions,o=e.auth,a=e.token,i=e.isValid,s=o.schema,c=o.name,l=s.get("flow");delete p.a.swaggerUIRedirectOauth2,"accessCode"===l||i||r.newAuthErr({authId:c,source:"auth",level:"warning",message:"Authorization may be unsafe, passed state was changed in server Passed state wasn't returned from auth server"}),a.error?r.newAuthErr({authId:c,source:"auth",level:"error",message:u()(a)}):n.authorizeOauth2WithPersistOption({auth:o,token:a})}};function k(e){return{type:g,payload:e}}var O=function(e){return function(t){var n=t.authActions;n.authorizeOauth2(e),n.persistAuthorizationIfNeeded()}},j=function(e){return function(t){var n=t.authActions,r=e.schema,o=e.name,a=e.username,s=e.password,u=e.passwordType,c=e.clientId,l=e.clientSecret,p={grant_type:"password",scope:e.scopes.join(" "),username:a,password:s},h={};switch(u){case"request-body":!function(e,t,n){t&&i()(e,{client_id:t});n&&i()(e,{client_secret:n})}(p,c,l);break;case"basic":h.Authorization="Basic "+Object(f.a)(c+":"+l);break;default:console.warn("Warning: invalid passwordType ".concat(u," was passed, not including client id and secret"))}return n.authorizeRequest({body:Object(f.b)(p),url:r.get("tokenUrl"),name:o,headers:h,query:{},auth:e})}};var T=function(e){return function(t){var n=t.authActions,r=e.schema,o=e.scopes,a=e.name,i=e.clientId,s=e.clientSecret,u={Authorization:"Basic "+Object(f.a)(i+":"+s)},c={grant_type:"client_credentials",scope:o.join(" ")};return n.authorizeRequest({body:Object(f.b)(c),name:a,url:r.get("tokenUrl"),auth:e,headers:u})}},I=function(e){var t=e.auth,n=e.redirectUrl;return function(e){var r=e.authActions,o=t.schema,a=t.name,i=t.clientId,s=t.clientSecret,u=t.codeVerifier,c={grant_type:"authorization_code",code:t.code,client_id:i,client_secret:s,redirect_uri:n,code_verifier:u};return r.authorizeRequest({body:Object(f.b)(c),name:a,url:o.get("tokenUrl"),auth:t})}},P=function(e){var t=e.auth,n=e.redirectUrl;return function(e){var r=e.authActions,o=t.schema,a=t.name,i=t.clientId,s=t.clientSecret,u={Authorization:"Basic "+Object(f.a)(i+":"+s)},c={grant_type:"authorization_code",code:t.code,client_id:i,redirect_uri:n};return r.authorizeRequest({body:Object(f.b)(c),name:a,url:o.get("tokenUrl"),auth:t,headers:u})}},N=function(e){return function(t){var n,r=t.fn,a=t.getConfigs,s=t.authActions,c=t.errActions,p=t.oas3Selectors,f=t.specSelectors,h=t.authSelectors,d=e.body,m=e.query,v=void 0===m?{}:m,g=e.headers,y=void 0===g?{}:g,b=e.name,_=e.url,w=e.auth,x=(h.getConfigs()||{}).additionalQueryStringParams;if(f.isOAS3()){var E=p.serverEffectiveValue(p.selectedServer());n=l()(_,E,!0)}else n=l()(_,f.url(),!0);"object"===o()(x)&&(n.query=i()({},n.query,x));var S=n.toString(),C=i()({Accept:"application/json, text/plain, */*","Content-Type":"application/x-www-form-urlencoded","X-Requested-With":"XMLHttpRequest"},y);r.fetch({url:S,method:"post",headers:C,query:v,body:d,requestInterceptor:a().requestInterceptor,responseInterceptor:a().responseInterceptor}).then((function(e){var t=JSON.parse(e.data),n=t&&(t.error||""),r=t&&(t.parseError||"");e.ok?n||r?c.newAuthErr({authId:b,level:"error",source:"auth",message:u()(t)}):s.authorizeOauth2WithPersistOption({auth:w,token:t}):c.newAuthErr({authId:b,level:"error",source:"auth",message:e.statusText})})).catch((function(e){var t=new Error(e).message;if(e.response&&e.response.data){var n=e.response.data;try{var r="string"==typeof n?JSON.parse(n):n;r.error&&(t+=", error: ".concat(r.error)),r.error_description&&(t+=", description: ".concat(r.error_description))}catch(e){}}c.newAuthErr({authId:b,level:"error",source:"auth",message:t})}))}};function M(e){return{type:b,payload:e}}function R(e){return{type:_,payload:e}}var D=function(){return function(e){var t=e.authSelectors;if((0,e.getConfigs)().persistAuthorization){var n=t.authorized();localStorage.setItem("authorized",u()(n.toJS()))}}}},function(e,t,n){var r=n(1105);e.exports=function(e){for(var t=1;tS;S++)if((h||S in w)&&(b=x(y=w[S],S,_),e))if(t)A[S]=b;else if(b)switch(e){case 3:return!0;case 5:return y;case 6:return S;case 2:u.call(A,y)}else switch(e){case 4:return!1;case 7:u.call(A,y)}return p?-1:c||l?l:A}};e.exports={forEach:c(0),map:c(1),filter:c(2),some:c(3),every:c(4),find:c(5),findIndex:c(6),filterOut:c(7)}},function(e,t,n){"use strict";e.exports={current:null}},function(e,t){e.exports=function(e){return null!=e&&"object"==typeof e}},function(e,t){var n,r,o=e.exports={};function a(){throw new Error("setTimeout has not been defined")}function i(){throw new Error("clearTimeout has not been defined")}function s(e){if(n===setTimeout)return setTimeout(e,0);if((n===a||!n)&&setTimeout)return n=setTimeout,setTimeout(e,0);try{return n(e,0)}catch(t){try{return n.call(null,e,0)}catch(t){return n.call(this,e,0)}}}!function(){try{n="function"==typeof setTimeout?setTimeout:a}catch(e){n=a}try{r="function"==typeof clearTimeout?clearTimeout:i}catch(e){r=i}}();var u,c=[],l=!1,p=-1;function f(){l&&u&&(l=!1,u.length?c=u.concat(c):p=-1,c.length&&h())}function h(){if(!l){var e=s(f);l=!0;for(var t=c.length;t;){for(u=c,c=[];++p1)for(var n=1;n0&&"/"!==t[0]}));function Se(e,t,n){var r;t=t||[];var o=we.apply(void 0,A()(r=[e]).call(r,O()(t))).get("parameters",Object(I.List)());return f()(o).call(o,(function(e,t){var r=n&&"body"===t.get("in")?t.get("value_xml"):t.get("value");return e.set(Object(T.B)(t,{allowHashes:!1}),r)}),Object(I.fromJS)({}))}function Ce(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";if(I.List.isList(e))return u()(e).call(e,(function(e){return I.Map.isMap(e)&&e.get("in")===t}))}function Ae(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";if(I.List.isList(e))return u()(e).call(e,(function(e){return I.Map.isMap(e)&&e.get("type")===t}))}function ke(e,t){var n,r;t=t||[];var o=z(e).getIn(A()(n=["paths"]).call(n,O()(t)),Object(I.fromJS)({})),a=e.getIn(A()(r=["meta","paths"]).call(r,O()(t)),Object(I.fromJS)({})),i=Oe(e,t),s=o.get("parameters")||new I.List,u=a.get("consumes_value")?a.get("consumes_value"):Ae(s,"file")?"multipart/form-data":Ae(s,"formData")?"application/x-www-form-urlencoded":void 0;return Object(I.fromJS)({requestContentType:u,responseContentType:i})}function Oe(e,t){var n,r;t=t||[];var o=z(e).getIn(A()(n=["paths"]).call(n,O()(t)),null);if(null!==o){var a=e.getIn(A()(r=["meta","paths"]).call(r,O()(t),["produces_value"]),null),i=o.getIn(["produces",0],null);return a||i||"application/json"}}function je(e,t){var n;t=t||[];var r=z(e),o=r.getIn(A()(n=["paths"]).call(n,O()(t)),null);if(null!==o){var a=t,s=i()(a,1)[0],u=o.get("produces",null),c=r.getIn(["paths",s,"produces"],null),l=r.getIn(["produces"],null);return u||c||l}}function Te(e,t){var n;t=t||[];var r=z(e),o=r.getIn(A()(n=["paths"]).call(n,O()(t)),null);if(null!==o){var a=t,s=i()(a,1)[0],u=o.get("consumes",null),c=r.getIn(["paths",s,"consumes"],null),l=r.getIn(["consumes"],null);return u||c||l}}var Ie=function(e,t,n){var r=e.get("url").match(/^([a-z][a-z0-9+\-.]*):/),a=o()(r)?r[1]:null;return e.getIn(["scheme",t,n])||e.getIn(["scheme","_defaultScheme"])||a||""},Pe=function(e,t,n){var r;return _()(r=["http","https"]).call(r,Ie(e,t,n))>-1},Ne=function(e,t){var n;t=t||[];var r=e.getIn(A()(n=["meta","paths"]).call(n,O()(t),["parameters"]),Object(I.fromJS)([])),o=!0;return x()(r).call(r,(function(e){var t=e.get("errors");t&&t.count()&&(o=!1)})),o},Me=function(e,t){var n,r,o={requestBody:!1,requestContentType:{}},a=e.getIn(A()(n=["resolvedSubtrees","paths"]).call(n,O()(t),["requestBody"]),Object(I.fromJS)([]));return a.size<1||(a.getIn(["required"])&&(o.requestBody=a.getIn(["required"])),x()(r=a.getIn(["content"]).entrySeq()).call(r,(function(e){var t=e[0];if(e[1].getIn(["schema","required"])){var n=e[1].getIn(["schema","required"]).toJS();o.requestContentType[t]=n}}))),o},Re=function(e,t,n,r){var o;if((n||r)&&n===r)return!0;var a=e.getIn(A()(o=["resolvedSubtrees","paths"]).call(o,O()(t),["requestBody","content"]),Object(I.fromJS)([]));if(a.size<2||!n||!r)return!1;var i=a.getIn([n,"schema","properties"],Object(I.fromJS)([])),s=a.getIn([r,"schema","properties"],Object(I.fromJS)([]));return!!i.equals(s)};function De(e){return I.Map.isMap(e)?e:new I.Map}},function(e,t,n){"use strict";(function(t){var r=n(915),o=n(916),a=/^[A-Za-z][A-Za-z0-9+-.]*:\/\//,i=/^([a-z][a-z0-9.+-]*:)?(\/\/)?([\S\s]*)/i,s=new RegExp("^[\\x09\\x0A\\x0B\\x0C\\x0D\\x20\\xA0\\u1680\\u180E\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200A\\u202F\\u205F\\u3000\\u2028\\u2029\\uFEFF]+");function u(e){return(e||"").toString().replace(s,"")}var c=[["#","hash"],["?","query"],function(e){return e.replace("\\","/")},["/","pathname"],["@","auth",1],[NaN,"host",void 0,1,1],[/:(\d+)$/,"port",void 0,1],[NaN,"hostname",void 0,1,1]],l={hash:1,query:1};function p(e){var n,r=("undefined"!=typeof window?window:void 0!==t?t:"undefined"!=typeof self?self:{}).location||{},o={},i=typeof(e=e||r);if("blob:"===e.protocol)o=new h(unescape(e.pathname),{});else if("string"===i)for(n in o=new h(e,{}),l)delete o[n];else if("object"===i){for(n in e)n in l||(o[n]=e[n]);void 0===o.slashes&&(o.slashes=a.test(e.href))}return o}function f(e){e=u(e);var t=i.exec(e);return{protocol:t[1]?t[1].toLowerCase():"",slashes:!!t[2],rest:t[3]}}function h(e,t,n){if(e=u(e),!(this instanceof h))return new h(e,t,n);var a,i,s,l,d,m,v=c.slice(),g=typeof t,y=this,b=0;for("object"!==g&&"string"!==g&&(n=t,t=null),n&&"function"!=typeof n&&(n=o.parse),t=p(t),a=!(i=f(e||"")).protocol&&!i.slashes,y.slashes=i.slashes||a&&t.slashes,y.protocol=i.protocol||t.protocol||"",e=i.rest,i.slashes||(v[3]=[/(.*)/,"pathname"]);b=n.length?{value:void 0,done:!0}:(e=r(n,o),t.index+=e.length,{value:e,done:!1})}))},function(e,t,n){var r=n(238),o=n(71).f,a=n(70),i=n(52),s=n(559),u=n(39)("toStringTag");e.exports=function(e,t,n,c){if(e){var l=n?e:e.prototype;i(l,u)||o(l,u,{configurable:!0,value:t}),c&&!r&&a(l,"toString",s)}}},function(e,t,n){"use strict";e.exports=function(e){if("function"!=typeof e)throw new TypeError(e+" is not a function");return e}},function(e,t,n){e.exports=n(636)},function(e,t,n){e.exports=n(869)},function(e,t,n){"use strict";n.r(t),n.d(t,"UPDATE_LAYOUT",(function(){return o})),n.d(t,"UPDATE_FILTER",(function(){return a})),n.d(t,"UPDATE_MODE",(function(){return i})),n.d(t,"SHOW",(function(){return s})),n.d(t,"updateLayout",(function(){return u})),n.d(t,"updateFilter",(function(){return c})),n.d(t,"show",(function(){return l})),n.d(t,"changeMode",(function(){return p}));var r=n(5),o="layout_update_layout",a="layout_update_filter",i="layout_update_mode",s="layout_show";function u(e){return{type:o,payload:e}}function c(e){return{type:a,payload:e}}function l(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];return e=Object(r.w)(e),{type:s,payload:{thing:e,shown:t}}}function p(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";return e=Object(r.w)(e),{type:i,payload:{thing:e,mode:t}}}},function(e,t,n){var r=n(421),o=n(161),a=n(193),i=n(54),s=n(117),u=n(194),c=n(160),l=n(252),p=Object.prototype.hasOwnProperty;e.exports=function(e){if(null==e)return!0;if(s(e)&&(i(e)||"string"==typeof e||"function"==typeof e.splice||u(e)||l(e)||a(e)))return!e.length;var t=o(e);if("[object Map]"==t||"[object Set]"==t)return!e.size;if(c(e))return!r(e).length;for(var n in e)if(p.call(e,n))return!1;return!0}},function(e,t,n){var r=n(49),o=n(176),a=n(108),i=n(69),s=n(178),u=n(52),c=n(357),l=Object.getOwnPropertyDescriptor;t.f=r?l:function(e,t){if(e=i(e),t=s(t,!0),c)try{return l(e,t)}catch(e){}if(u(e,t))return a(!o.f.call(e,t),e[t])}},function(e,t){e.exports=function(e,t){return{enumerable:!(1&e),configurable:!(2&e),writable:!(4&e),value:t}}},function(e,t,n){var r=n(80);e.exports=function(e,t,n){if(r(e),void 0===t)return e;switch(n){case 0:return function(){return e.call(t)};case 1:return function(n){return e.call(t,n)};case 2:return function(n,r){return e.call(t,n,r)};case 3:return function(n,r,o){return e.call(t,n,r,o)}}return function(){return e.apply(t,arguments)}}},function(e,t,n){var r,o=n(53),a=n(237),i=n(231),s=n(152),u=n(369),c=n(228),l=n(180),p=l("IE_PROTO"),f=function(){},h=function(e){return" +{{with and $isProduction .Site.Params.cookiefirstScript}} + {{end}}