From 2173ea82cb9303e768b0b243549be859fe738f2b Mon Sep 17 00:00:00 2001 From: Akshit Garg Date: Wed, 28 May 2025 11:48:44 +0530 Subject: [PATCH 01/29] Poll backup status from the repo-host rather than the instance pod. This should prevent unnecessary memory pressure on the instance pods. Signed-off-by: Akshit Garg --- internal/naming/labels.go | 3 +++ internal/naming/names.go | 2 ++ internal/naming/selectors.go | 11 +++++++++++ percona/controller/pgbackup/controller.go | 7 +++++-- percona/controller/utils.go | 20 ++++++++++++++++++++ percona/pgbackrest/pgbackrest.go | 3 ++- 6 files changed, 43 insertions(+), 3 deletions(-) diff --git a/internal/naming/labels.go b/internal/naming/labels.go index ab8f587eaa..72b6dd8421 100644 --- a/internal/naming/labels.go +++ b/internal/naming/labels.go @@ -30,6 +30,9 @@ const ( // LabelRepoName is used to specify the name of a pgBackRest repository LabelRepoName = labelPrefix + "name" + // LabelPgbackrestDedicated is used to select the repo-host pod + LabelPgbackrestDedicated = labelPrefix + "pgbackrest-dedicated" + LabelPatroni = labelPrefix + "patroni" LabelRole = labelPrefix + "role" diff --git a/internal/naming/names.go b/internal/naming/names.go index d5da45b024..dd9cf5bfd4 100644 --- a/internal/naming/names.go +++ b/internal/naming/names.go @@ -31,6 +31,8 @@ const ( // ContainerPGBackRestConfig is the name of a container supporting pgBackRest. ContainerPGBackRestConfig = "pgbackrest-config" + // ContainerPGBackRest is the name of a container running pgBackRest. + ContainerPGBackRest = "pgbackrest" // ContainerPGBouncer is the name of a container running PgBouncer. ContainerPGBouncer = "pgbouncer" diff --git a/internal/naming/selectors.go b/internal/naming/selectors.go index 5f662a39e9..10ffdcc5c1 100644 --- a/internal/naming/selectors.go +++ b/internal/naming/selectors.go @@ -59,6 +59,17 @@ func ClusterBackupJobs(cluster string) metav1.LabelSelector { } } +func ClusterRepoHost(cluster string) metav1.LabelSelector { + return metav1.LabelSelector{ + MatchLabels: map[string]string{ + LabelCluster: cluster, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: LabelPgbackrestDedicated, Operator: metav1.LabelSelectorOpExists}, + }, + } +} + // ClusterDataForPostgresAndPGBackRest selects things for PostgreSQL data and // things for pgBackRest data. func ClusterDataForPostgresAndPGBackRest(cluster string) metav1.LabelSelector { diff --git a/percona/controller/pgbackup/controller.go b/percona/controller/pgbackup/controller.go index a6ecc8b170..de0d26ae8a 100644 --- a/percona/controller/pgbackup/controller.go +++ b/percona/controller/pgbackup/controller.go @@ -488,9 +488,12 @@ func updatePGBackrestInfo(ctx context.Context, c client.Client, pod *corev1.Pod, func finishBackup(ctx context.Context, c client.Client, pgBackup *v2.PerconaPGBackup, job *batchv1.Job) (*reconcile.Result, error) { if checkBackupJob(job) == v2.BackupSucceeded { - readyPod, err := controller.GetReadyInstancePod(ctx, c, pgBackup.Spec.PGCluster, pgBackup.Namespace) + // MARK(AG): Pod for running pgbackrest info. + // Read the repo-host pod instead. + + readyPod, err := controller.GetReadyRepoHostPod(ctx, c, pgBackup.Spec.PGCluster, pgBackup.Namespace) if err != nil { - return nil, errors.Wrap(err, "get ready instance pod") + return nil, errors.Wrap(err, "get ready repo-host pod") } if err := updatePGBackrestInfo(ctx, c, readyPod, pgBackup); err != nil { diff --git a/percona/controller/utils.go b/percona/controller/utils.go index 6f0a509cae..49e52ebb0f 100644 --- a/percona/controller/utils.go +++ b/percona/controller/utils.go @@ -62,12 +62,32 @@ func (m *CustomManager) Add(r manager.Runnable) error { return nil } +func GetReadyRepoHostPod(ctx context.Context, c client.Client, clusterName, namespace string) (*corev1.Pod, error) { + pods := &corev1.PodList{} + selector, err := naming.AsSelector(naming.ClusterRepoHost(clusterName)) + if err != nil { + return nil, err + } + if err := c.List(ctx, pods, client.InNamespace(namespace), client.MatchingLabelsSelector{Selector: selector}); err != nil { + return nil, errors.Wrap(err, "list pods") + } + + for _, pod := range pods.Items { + if pod.Status.Phase != corev1.PodRunning { + continue + } + return &pod, nil + } + return nil, errors.New("no running repo-host found") +} + func GetReadyInstancePod(ctx context.Context, c client.Client, clusterName, namespace string) (*corev1.Pod, error) { pods := &corev1.PodList{} selector, err := naming.AsSelector(naming.ClusterInstances(clusterName)) if err != nil { return nil, err } + // Mark (AG): Do soemthing similar for repo-host. if err := c.List(ctx, pods, client.InNamespace(namespace), client.MatchingLabelsSelector{Selector: selector}); err != nil { return nil, errors.Wrap(err, "list pods") } diff --git a/percona/pgbackrest/pgbackrest.go b/percona/pgbackrest/pgbackrest.go index 35d51f36e6..bd38350c34 100644 --- a/percona/pgbackrest/pgbackrest.go +++ b/percona/pgbackrest/pgbackrest.go @@ -57,7 +57,8 @@ func GetInfo(ctx context.Context, pod *corev1.Pod, repoName string) (InfoOutput, return InfoOutput{}, errors.Wrap(err, "failed to create client") } - if err := c.Exec(ctx, pod, naming.ContainerDatabase, nil, stdout, stderr, "pgbackrest", "info", "--output=json", "--repo="+strings.TrimPrefix(repoName, "repo")); err != nil { + // naming.PGBackRestRepoContainerName + if err := c.Exec(ctx, pod, naming.ContainerPGBackRest, nil, stdout, stderr, "pgbackrest", "info", "--output=json", "--repo="+strings.TrimPrefix(repoName, "repo")); err != nil { return InfoOutput{}, errors.Wrapf(err, "exec: %s", stderr.String()) } From 0299116dd4221ed1f84155c490012cca64772bdc Mon Sep 17 00:00:00 2001 From: Akshit Garg Date: Wed, 28 May 2025 12:25:47 +0530 Subject: [PATCH 02/29] Build on all branches Signed-off-by: Akshit Garg --- .github/workflows/docker-build.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index 881f9ead1a..98a4bdfde8 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -2,8 +2,6 @@ name: Build and Push Docker Image on: push: - branches: - - flyio-2.6.0 env: REGISTRY: docker.io @@ -20,7 +18,7 @@ jobs: - name: Checkout repository uses: actions/checkout@v4 with: - fetch-depth: 0 # Needed for git describe + fetch-depth: 0 # Needed for git describe - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -55,4 +53,4 @@ jobs: tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} cache-from: type=gha - cache-to: type=gha,mode=max \ No newline at end of file + cache-to: type=gha,mode=max From 5f44ed00f4e753128787a947418f735a322eb614 Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 10:29:25 -0500 Subject: [PATCH 03/29] more hints --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 2dd8a38436..2007ab7cdf 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,12 @@ This is a fork of the [Percona Operator for PostgreSQL](https://github.com/perco For our purposes, `flyio-2.6.0` is roughly our main branch. +## Installing/updating on an FKS cluster + +Update the image spec on `operator/cw-operator.yaml` to point at a newly built and pushed operator image (see GitHub Actions for the build and push image name). + +See [mpg-console](https://github.com/superfly/mpg-console) for the instructions to install/update on an FKS cluster (hint: `mpg operator install`). + # Percona Operator for PostgreSQL ![Percona Kubernetes Operators](kubernetes.svg) From b4a5604da98e11ba50d9d69e3463655138afa113 Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 10:33:29 -0500 Subject: [PATCH 04/29] tired of this misspelling error --- .../pgv2.percona.com_perconapgclusters.yaml | 8 ++++---- .../pgv2.percona.com_perconapgclusters.yaml | 8 ++++---- ...perator.crunchydata.com_postgresclusters.yaml | 8 ++++---- deploy/bundle.yaml | 16 ++++++++-------- deploy/crd.yaml | 16 ++++++++-------- deploy/cw-bundle.yaml | 16 ++++++++-------- 6 files changed, 36 insertions(+), 36 deletions(-) diff --git a/build/crd/percona/generated/pgv2.percona.com_perconapgclusters.yaml b/build/crd/percona/generated/pgv2.percona.com_perconapgclusters.yaml index 619329ab74..abb37e2359 100644 --- a/build/crd/percona/generated/pgv2.percona.com_perconapgclusters.yaml +++ b/build/crd/percona/generated/pgv2.percona.com_perconapgclusters.yaml @@ -1666,7 +1666,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -5901,7 +5901,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -13957,7 +13957,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -18321,7 +18321,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. diff --git a/config/crd/bases/pgv2.percona.com_perconapgclusters.yaml b/config/crd/bases/pgv2.percona.com_perconapgclusters.yaml index 1e93d160b6..bf2d334d36 100644 --- a/config/crd/bases/pgv2.percona.com_perconapgclusters.yaml +++ b/config/crd/bases/pgv2.percona.com_perconapgclusters.yaml @@ -2071,7 +2071,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -6306,7 +6306,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -14362,7 +14362,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -18726,7 +18726,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index 19fde7fade..9978b02a63 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -1502,7 +1502,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -5737,7 +5737,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -15464,7 +15464,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -19949,7 +19949,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. diff --git a/deploy/bundle.yaml b/deploy/bundle.yaml index fe72e84bca..3bd044726c 100644 --- a/deploy/bundle.yaml +++ b/deploy/bundle.yaml @@ -2364,7 +2364,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -6599,7 +6599,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -14655,7 +14655,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -19019,7 +19019,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -30104,7 +30104,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -34339,7 +34339,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -44066,7 +44066,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -48551,7 +48551,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. diff --git a/deploy/crd.yaml b/deploy/crd.yaml index 9ee1fe9f67..9591e2e9f4 100644 --- a/deploy/crd.yaml +++ b/deploy/crd.yaml @@ -2364,7 +2364,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -6599,7 +6599,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -14655,7 +14655,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -19019,7 +19019,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -30104,7 +30104,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -34339,7 +34339,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -44066,7 +44066,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -48551,7 +48551,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. diff --git a/deploy/cw-bundle.yaml b/deploy/cw-bundle.yaml index 9406106eb7..5501499650 100644 --- a/deploy/cw-bundle.yaml +++ b/deploy/cw-bundle.yaml @@ -2364,7 +2364,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -6599,7 +6599,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -14655,7 +14655,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -19019,7 +19019,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -30104,7 +30104,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -34339,7 +34339,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -44066,7 +44066,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -48551,7 +48551,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. + CSIDriver instance. Other volumes are always re-labeled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. From b99aab07afc9841a5f730b840fc1efa9e86b1a71 Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 10:39:43 -0500 Subject: [PATCH 05/29] fix linting --- internal/controller/postgrescluster/pgbackrest_test.go | 2 +- internal/pgbackrest/reconcile_test.go | 8 ++++---- internal/postgres/reconcile_test.go | 8 ++++---- percona/controller/pgcluster/controller.go | 1 - percona/naming/annotations.go | 1 - 5 files changed, 9 insertions(+), 11 deletions(-) diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index 6db877df49..ff81e1a3fd 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -2774,7 +2774,7 @@ func TestGenerateRepoHostIntent(t *testing.T) { }) t.Run("Environment From Secret", func(t *testing.T) { - secretName := "my-pgbackrest-env-secret" + secretName := "my-pgbackrest-env-secret" // #nosec G101 cluster := &v1beta1.PostgresCluster{ Spec: v1beta1.PostgresClusterSpec{ Backups: v1beta1.Backups{ diff --git a/internal/pgbackrest/reconcile_test.go b/internal/pgbackrest/reconcile_test.go index d079db99b9..8149f59f6e 100644 --- a/internal/pgbackrest/reconcile_test.go +++ b/internal/pgbackrest/reconcile_test.go @@ -930,14 +930,14 @@ func TestAddServerToRepoPod(t *testing.T) { break } } - + assert.Assert(t, pgBackRestContainer != nil, "pgbackrest container not found") - assert.Assert(t, len(pgBackRestContainer.EnvFrom) == 1, + assert.Assert(t, len(pgBackRestContainer.EnvFrom) == 1, "expected 1 EnvFrom reference, got %d", len(pgBackRestContainer.EnvFrom)) - assert.Assert(t, pgBackRestContainer.EnvFrom[0].SecretRef != nil, + assert.Assert(t, pgBackRestContainer.EnvFrom[0].SecretRef != nil, "expected SecretRef to be set") assert.Equal(t, pgBackRestContainer.EnvFrom[0].SecretRef.Name, secretName, - "expected secret name to be %q, got %q", + "expected secret name to be %q, got %q", secretName, pgBackRestContainer.EnvFrom[0].SecretRef.Name) }) } diff --git a/internal/postgres/reconcile_test.go b/internal/postgres/reconcile_test.go index 7e996141bd..fb6bc6f35a 100644 --- a/internal/postgres/reconcile_test.go +++ b/internal/postgres/reconcile_test.go @@ -710,7 +710,7 @@ volumes: }) t.Run("WithEnvFromSecret", func(t *testing.T) { - secretName := "postgres-env-secret" + secretName := "postgres-env-secret" // #nosec G101 envFromInstance := new(v1beta1.PostgresInstanceSetSpec) envFromInstance.EnvFromSecret = &secretName @@ -727,12 +727,12 @@ volumes: } assert.Assert(t, databaseContainer != nil, "database container not found") - assert.Equal(t, len(databaseContainer.EnvFrom), 1, + assert.Equal(t, len(databaseContainer.EnvFrom), 1, "expected 1 EnvFrom reference, got %d", len(databaseContainer.EnvFrom)) - assert.Assert(t, databaseContainer.EnvFrom[0].SecretRef != nil, + assert.Assert(t, databaseContainer.EnvFrom[0].SecretRef != nil, "expected SecretRef to be set") assert.Equal(t, databaseContainer.EnvFrom[0].SecretRef.Name, secretName, - "expected secret name to be %q, got %q", + "expected secret name to be %q, got %q", secretName, databaseContainer.EnvFrom[0].SecretRef.Name) }) } diff --git a/percona/controller/pgcluster/controller.go b/percona/controller/pgcluster/controller.go index 722a1c1c13..9027b09326 100644 --- a/percona/controller/pgcluster/controller.go +++ b/percona/controller/pgcluster/controller.go @@ -335,7 +335,6 @@ func (r *PGClusterReconciler) reconcilePatroniVersionCheck(ctx context.Context, cr.Annotations = make(map[string]string) } - if patroniVersion, ok := cr.Annotations[pNaming.AnnotationCustomPatroniVersion]; ok { cr.Annotations[pNaming.AnnotationPatroniVersion] = patroniVersion return nil diff --git a/percona/naming/annotations.go b/percona/naming/annotations.go index 3bb139e317..79a979bcfe 100644 --- a/percona/naming/annotations.go +++ b/percona/naming/annotations.go @@ -40,7 +40,6 @@ const ( AnnotationPatroniVersion = PrefixPerconaPGV2 + "patroni-version" - // Special annotation to disable `patroni-version-check` by overriding the patroni version with a custom value. AnnotationCustomPatroniVersion = PrefixPerconaPGV2 + "custom-patroni-version" ) From 258ef88e7faf087a2ff3d9e5c2dc7785a468a5f5 Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 11:09:31 -0500 Subject: [PATCH 06/29] some possible test fixes --- .../postgrescluster/controller_test.go | 26 ++++++++++++++++--- .../controller/pgcluster/controller_test.go | 9 ++++--- .../controller/pgcluster/finalizer_test.go | 2 +- 3 files changed, 29 insertions(+), 8 deletions(-) diff --git a/internal/controller/postgrescluster/controller_test.go b/internal/controller/postgrescluster/controller_test.go index b36340b4df..2093cde091 100644 --- a/internal/controller/postgrescluster/controller_test.go +++ b/internal/controller/postgrescluster/controller_test.go @@ -178,6 +178,24 @@ var _ = Describe("PostgresCluster Reconciler", func() { return result } + // Helper function to reconcile until stable (no requeue needed) or timeout + reconcileUntilStable := func(cluster *v1beta1.PostgresCluster) { + const maxAttempts = 5 + for i := 0; i < maxAttempts; i++ { + result := reconcile(cluster) + if result.IsZero() { + return + } + // If we get a requeue, that's expected during initial setup + if result.RequeueAfter > 0 { + continue + } + // Unexpected result, fail the test + Expect(result).To(BeZero()) + } + // If we reach here, we've hit max attempts - accept the last result + } + Context("Cluster with Registration Requirement, no token", func() { var cluster *v1beta1.PostgresCluster @@ -188,7 +206,7 @@ var _ = Describe("PostgresCluster Reconciler", func() { }) cluster = create(olmClusterYAML) - Expect(reconcile(cluster)).To(BeZero()) + reconcileUntilStable(cluster) }) AfterEach(func() { @@ -252,7 +270,7 @@ spec: requests: storage: 1Gi `) - Expect(reconcile(cluster)).To(BeZero()) + reconcileUntilStable(cluster) }) AfterEach(func() { @@ -457,7 +475,7 @@ spec: requests: storage: 1Gi `) - Expect(reconcile(cluster)).To(BeZero()) + reconcileUntilStable(cluster) Expect(suite.Client.List(context.Background(), &instances, client.InNamespace(test.Namespace.Name), @@ -549,7 +567,7 @@ spec: Expect(suite.Client.Patch(ctx, &instance, patch)).To(Succeed()) Expect(instance.Spec.Replicas).To(PointTo(BeEquivalentTo(2))) - Expect(reconcile(cluster)).To(BeZero()) + reconcileUntilStable(cluster) Expect(suite.Client.Get( ctx, client.ObjectKeyFromObject(&instance), &instance, )).To(Succeed()) diff --git a/percona/controller/pgcluster/controller_test.go b/percona/controller/pgcluster/controller_test.go index 08556bb5f1..d3fc90cf25 100644 --- a/percona/controller/pgcluster/controller_test.go +++ b/percona/controller/pgcluster/controller_test.go @@ -915,9 +915,12 @@ var _ = Describe("Version labels", Ordered, func() { "postgres-operator.crunchydata.com/data": "pgbackrest", "postgres-operator.crunchydata.com/cluster": crName, } - err = k8sClient.List(ctx, stsList, client.InNamespace(cr.Namespace), client.MatchingLabels(labels)) - Expect(err).NotTo(HaveOccurred()) - Expect(stsList.Items).NotTo(BeEmpty()) + + // Add a retry loop to give time for the StatefulSets to be created + Eventually(func() bool { + err := k8sClient.List(ctx, stsList, client.InNamespace(cr.Namespace), client.MatchingLabels(labels)) + return err == nil && len(stsList.Items) > 0 + }, time.Second*15, time.Millisecond*250).Should(BeTrue()) Expect(stsList.Items).Should(ContainElement(gs.MatchFields(gs.IgnoreExtras, gs.Fields{ "ObjectMeta": gs.MatchFields(gs.IgnoreExtras, gs.Fields{ diff --git a/percona/controller/pgcluster/finalizer_test.go b/percona/controller/pgcluster/finalizer_test.go index de9f3f218b..3719b1c3d4 100644 --- a/percona/controller/pgcluster/finalizer_test.go +++ b/percona/controller/pgcluster/finalizer_test.go @@ -349,7 +349,7 @@ var _ = Describe("Finalizers", Ordered, func() { }) return err == nil }, time.Second*15, time.Millisecond*250).Should(BeTrue()) - Expect(len(secretList.Items)).Should(Equal(8)) + Expect(len(secretList.Items)).Should(Equal(7)) }) }) }) From ac250f3916b9d47dc7cb06e663d41a3dfb627d3a Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 11:11:10 -0500 Subject: [PATCH 07/29] bump jwt for vuln --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b8cdfce635..cc97744484 100644 --- a/go.mod +++ b/go.mod @@ -74,7 +74,7 @@ require ( github.com/go-openapi/loads v0.22.0 // indirect github.com/go-openapi/spec v0.21.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v5 v5.2.1 + github.com/golang-jwt/jwt/v5 v5.2.2 github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect diff --git a/go.sum b/go.sum index 879ddc6e2e..04f91a588a 100644 --- a/go.sum +++ b/go.sum @@ -79,8 +79,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= -github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= +github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= From 02982edf8a55947f0fa8f37fe1eaf048f85a57ce Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 11:14:41 -0500 Subject: [PATCH 08/29] fix manifests check --- .github/workflows/reviewdog.yml | 6 +++++- .../pgv2.percona.com_perconapgbackups.yaml | 2 +- .../pgv2.percona.com_perconapgclusters.yaml | 2 +- .../pgv2.percona.com_perconapgrestores.yaml | 2 +- .../pgv2.percona.com_perconapgupgrades.yaml | 2 +- .../pgv2.percona.com_perconapgclusters.yaml | 8 ++++---- ...or.crunchydata.com_crunchybridgeclusters.yaml | 2 +- ...stgres-operator.crunchydata.com_pgadmins.yaml | 2 +- ...gres-operator.crunchydata.com_pgupgrades.yaml | 2 +- ...perator.crunchydata.com_postgresclusters.yaml | 2 +- deploy/bundle.yaml | 16 ++++++++-------- deploy/crd.yaml | 16 ++++++++-------- deploy/cw-bundle.yaml | 16 ++++++++-------- 13 files changed, 41 insertions(+), 37 deletions(-) diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml index fc5141288c..a77e128f51 100644 --- a/.github/workflows/reviewdog.yml +++ b/.github/workflows/reviewdog.yml @@ -78,7 +78,11 @@ jobs: name: runner / manifests runs-on: ubuntu-latest steps: + - name: Extract branch name + shell: bash + run: echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT + id: extract_branch - uses: actions/checkout@v4 - run: | - make generate VERSION=main + make generate VERSION=${{ steps.extract_branch.outputs.branch }} git diff --exit-code diff --git a/build/crd/percona/generated/pgv2.percona.com_perconapgbackups.yaml b/build/crd/percona/generated/pgv2.percona.com_perconapgbackups.yaml index d0f40f6fc7..533bce418e 100644 --- a/build/crd/percona/generated/pgv2.percona.com_perconapgbackups.yaml +++ b/build/crd/percona/generated/pgv2.percona.com_perconapgbackups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgbackups.pgv2.percona.com spec: group: pgv2.percona.com diff --git a/build/crd/percona/generated/pgv2.percona.com_perconapgclusters.yaml b/build/crd/percona/generated/pgv2.percona.com_perconapgclusters.yaml index abb37e2359..9778e76fcb 100644 --- a/build/crd/percona/generated/pgv2.percona.com_perconapgclusters.yaml +++ b/build/crd/percona/generated/pgv2.percona.com_perconapgclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgclusters.pgv2.percona.com spec: group: pgv2.percona.com diff --git a/build/crd/percona/generated/pgv2.percona.com_perconapgrestores.yaml b/build/crd/percona/generated/pgv2.percona.com_perconapgrestores.yaml index 68edf6d27b..2ff2df988a 100644 --- a/build/crd/percona/generated/pgv2.percona.com_perconapgrestores.yaml +++ b/build/crd/percona/generated/pgv2.percona.com_perconapgrestores.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgrestores.pgv2.percona.com spec: group: pgv2.percona.com diff --git a/build/crd/percona/generated/pgv2.percona.com_perconapgupgrades.yaml b/build/crd/percona/generated/pgv2.percona.com_perconapgupgrades.yaml index 923207063d..b44748b1d1 100644 --- a/build/crd/percona/generated/pgv2.percona.com_perconapgupgrades.yaml +++ b/build/crd/percona/generated/pgv2.percona.com_perconapgupgrades.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgupgrades.pgv2.percona.com spec: group: pgv2.percona.com diff --git a/config/crd/bases/pgv2.percona.com_perconapgclusters.yaml b/config/crd/bases/pgv2.percona.com_perconapgclusters.yaml index bf2d334d36..3803d86f1f 100644 --- a/config/crd/bases/pgv2.percona.com_perconapgclusters.yaml +++ b/config/crd/bases/pgv2.percona.com_perconapgclusters.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgbackups.pgv2.percona.com spec: group: pgv2.percona.com @@ -408,7 +408,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgclusters.pgv2.percona.com spec: group: pgv2.percona.com @@ -20896,7 +20896,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgrestores.pgv2.percona.com spec: group: pgv2.percona.com @@ -20994,7 +20994,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgupgrades.pgv2.percona.com spec: group: pgv2.percona.com diff --git a/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml index 3798c3cf7f..f93a59f512 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 labels: app.kubernetes.io/name: pgo app.kubernetes.io/version: latest diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml index b368a86d49..0a3d60b7c0 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 labels: app.kubernetes.io/name: pgo app.kubernetes.io/version: latest diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml index 05f7dfc76f..46b28a9a75 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 labels: app.kubernetes.io/name: pgo app.kubernetes.io/version: latest diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index 9978b02a63..e8001b657c 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 labels: app.kubernetes.io/name: pgo app.kubernetes.io/version: 5.4.2 diff --git a/deploy/bundle.yaml b/deploy/bundle.yaml index 3bd044726c..5563307564 100644 --- a/deploy/bundle.yaml +++ b/deploy/bundle.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 labels: app.kubernetes.io/name: pgo app.kubernetes.io/version: latest @@ -295,7 +295,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgbackups.pgv2.percona.com spec: group: pgv2.percona.com @@ -701,7 +701,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgclusters.pgv2.percona.com spec: group: pgv2.percona.com @@ -21189,7 +21189,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgrestores.pgv2.percona.com spec: group: pgv2.percona.com @@ -21287,7 +21287,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgupgrades.pgv2.percona.com spec: group: pgv2.percona.com @@ -23999,7 +23999,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 labels: app.kubernetes.io/name: pgo app.kubernetes.io/version: latest @@ -25903,7 +25903,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 labels: app.kubernetes.io/name: pgo app.kubernetes.io/version: latest @@ -28604,7 +28604,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 labels: app.kubernetes.io/name: pgo app.kubernetes.io/version: 5.4.2 diff --git a/deploy/crd.yaml b/deploy/crd.yaml index 9591e2e9f4..2f52994a01 100644 --- a/deploy/crd.yaml +++ b/deploy/crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 labels: app.kubernetes.io/name: pgo app.kubernetes.io/version: latest @@ -295,7 +295,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgbackups.pgv2.percona.com spec: group: pgv2.percona.com @@ -701,7 +701,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgclusters.pgv2.percona.com spec: group: pgv2.percona.com @@ -21189,7 +21189,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgrestores.pgv2.percona.com spec: group: pgv2.percona.com @@ -21287,7 +21287,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgupgrades.pgv2.percona.com spec: group: pgv2.percona.com @@ -23999,7 +23999,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 labels: app.kubernetes.io/name: pgo app.kubernetes.io/version: latest @@ -25903,7 +25903,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 labels: app.kubernetes.io/name: pgo app.kubernetes.io/version: latest @@ -28604,7 +28604,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 labels: app.kubernetes.io/name: pgo app.kubernetes.io/version: 5.4.2 diff --git a/deploy/cw-bundle.yaml b/deploy/cw-bundle.yaml index 5501499650..fec64ccbd6 100644 --- a/deploy/cw-bundle.yaml +++ b/deploy/cw-bundle.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 labels: app.kubernetes.io/name: pgo app.kubernetes.io/version: latest @@ -295,7 +295,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgbackups.pgv2.percona.com spec: group: pgv2.percona.com @@ -701,7 +701,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgclusters.pgv2.percona.com spec: group: pgv2.percona.com @@ -21189,7 +21189,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgrestores.pgv2.percona.com spec: group: pgv2.percona.com @@ -21287,7 +21287,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: perconapgupgrades.pgv2.percona.com spec: group: pgv2.percona.com @@ -23999,7 +23999,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 labels: app.kubernetes.io/name: pgo app.kubernetes.io/version: latest @@ -25903,7 +25903,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 labels: app.kubernetes.io/name: pgo app.kubernetes.io/version: latest @@ -28604,7 +28604,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 labels: app.kubernetes.io/name: pgo app.kubernetes.io/version: 5.4.2 From 39d0516bb05347a27e3cb8f19f7a3baafbdab1cc Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 11:35:12 -0500 Subject: [PATCH 09/29] fixes manifests... again --- .github/workflows/reviewdog.yml | 3 ++- .../pgv2.percona.com_perconapgclusters.yaml | 8 ++++---- .../pgv2.percona.com_perconapgclusters.yaml | 8 ++++---- ...perator.crunchydata.com_postgresclusters.yaml | 8 ++++---- deploy/bundle.yaml | 16 ++++++++-------- deploy/crd.yaml | 16 ++++++++-------- deploy/cw-bundle.yaml | 16 ++++++++-------- 7 files changed, 38 insertions(+), 37 deletions(-) diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml index a77e128f51..0e0d653246 100644 --- a/.github/workflows/reviewdog.yml +++ b/.github/workflows/reviewdog.yml @@ -61,6 +61,7 @@ jobs: with: github_token: ${{ secrets.github_token }} locale: "US" + ignore: "labelled" reporter: github-pr-check alex: @@ -84,5 +85,5 @@ jobs: id: extract_branch - uses: actions/checkout@v4 - run: | - make generate VERSION=${{ steps.extract_branch.outputs.branch }} + make generate VERSION=$(echo ${{ steps.extract_branch.outputs.branch }} | tr '.' '-') git diff --exit-code diff --git a/build/crd/percona/generated/pgv2.percona.com_perconapgclusters.yaml b/build/crd/percona/generated/pgv2.percona.com_perconapgclusters.yaml index 9778e76fcb..7b1c85d3af 100644 --- a/build/crd/percona/generated/pgv2.percona.com_perconapgclusters.yaml +++ b/build/crd/percona/generated/pgv2.percona.com_perconapgclusters.yaml @@ -1666,7 +1666,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -5901,7 +5901,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -13957,7 +13957,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -18321,7 +18321,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. diff --git a/config/crd/bases/pgv2.percona.com_perconapgclusters.yaml b/config/crd/bases/pgv2.percona.com_perconapgclusters.yaml index 3803d86f1f..9d322b580e 100644 --- a/config/crd/bases/pgv2.percona.com_perconapgclusters.yaml +++ b/config/crd/bases/pgv2.percona.com_perconapgclusters.yaml @@ -2071,7 +2071,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -6306,7 +6306,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -14362,7 +14362,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -18726,7 +18726,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index e8001b657c..17a1908ccf 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -1502,7 +1502,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -5737,7 +5737,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -15464,7 +15464,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -19949,7 +19949,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. diff --git a/deploy/bundle.yaml b/deploy/bundle.yaml index 5563307564..ff77f763ab 100644 --- a/deploy/bundle.yaml +++ b/deploy/bundle.yaml @@ -2364,7 +2364,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -6599,7 +6599,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -14655,7 +14655,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -19019,7 +19019,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -30104,7 +30104,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -34339,7 +34339,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -44066,7 +44066,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -48551,7 +48551,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. diff --git a/deploy/crd.yaml b/deploy/crd.yaml index 2f52994a01..c06debdaec 100644 --- a/deploy/crd.yaml +++ b/deploy/crd.yaml @@ -2364,7 +2364,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -6599,7 +6599,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -14655,7 +14655,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -19019,7 +19019,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -30104,7 +30104,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -34339,7 +34339,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -44066,7 +44066,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -48551,7 +48551,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. diff --git a/deploy/cw-bundle.yaml b/deploy/cw-bundle.yaml index fec64ccbd6..514c2cb97f 100644 --- a/deploy/cw-bundle.yaml +++ b/deploy/cw-bundle.yaml @@ -2364,7 +2364,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -6599,7 +6599,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -14655,7 +14655,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -19019,7 +19019,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -30104,7 +30104,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -34339,7 +34339,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -44066,7 +44066,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. @@ -48551,7 +48551,7 @@ spec: It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labeled recursively. + CSIDriver instance. Other volumes are always re-labelled recursively. "MountOption" value is allowed only when SELinuxMount feature gate is enabled. If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. From e373794e12e0882b1f7a6d63beec5be888db5a1c Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 11:46:38 -0500 Subject: [PATCH 10/29] see if claude can fix it --- .../controller/pgcluster/controller_test.go | 54 ++++++++++++++----- 1 file changed, 41 insertions(+), 13 deletions(-) diff --git a/percona/controller/pgcluster/controller_test.go b/percona/controller/pgcluster/controller_test.go index d3fc90cf25..cff398a921 100644 --- a/percona/controller/pgcluster/controller_test.go +++ b/percona/controller/pgcluster/controller_test.go @@ -868,10 +868,13 @@ var _ = Describe("Version labels", Ordered, func() { }) It("should reconcile", func() { - _, err := reconciler(cr).Reconcile(ctx, ctrl.Request{NamespacedName: crNamespacedName}) - Expect(err).NotTo(HaveOccurred()) - _, err = crunchyReconciler().Reconcile(ctx, ctrl.Request{NamespacedName: crNamespacedName}) - Expect(err).NotTo(HaveOccurred()) + // Run multiple reconcile cycles to ensure all resources are created + for i := 0; i < 3; i++ { + _, err := reconciler(cr).Reconcile(ctx, ctrl.Request{NamespacedName: crNamespacedName}) + Expect(err).NotTo(HaveOccurred()) + _, err = crunchyReconciler().Reconcile(ctx, ctrl.Request{NamespacedName: crNamespacedName}) + Expect(err).NotTo(HaveOccurred()) + } }) It("should label PostgreSQL statefulsets", func() { @@ -919,8 +922,26 @@ var _ = Describe("Version labels", Ordered, func() { // Add a retry loop to give time for the StatefulSets to be created Eventually(func() bool { err := k8sClient.List(ctx, stsList, client.InNamespace(cr.Namespace), client.MatchingLabels(labels)) - return err == nil && len(stsList.Items) > 0 - }, time.Second*15, time.Millisecond*250).Should(BeTrue()) + if err != nil { + GinkgoWriter.Printf("Error listing StatefulSets: %v\n", err) + return false + } + + if len(stsList.Items) == 0 { + // List all StatefulSets to debug what's available + allStsList := &appsv1.StatefulSetList{} + err := k8sClient.List(ctx, allStsList, client.InNamespace(cr.Namespace)) + if err == nil { + GinkgoWriter.Printf("Available StatefulSets in namespace %s:\n", cr.Namespace) + for _, sts := range allStsList.Items { + GinkgoWriter.Printf(" - %s (labels: %v)\n", sts.Name, sts.Labels) + } + } + return false + } + + return true + }, time.Second*30, time.Millisecond*500).Should(BeTrue()) Expect(stsList.Items).Should(ContainElement(gs.MatchFields(gs.IgnoreExtras, gs.Fields{ "ObjectMeta": gs.MatchFields(gs.IgnoreExtras, gs.Fields{ @@ -1185,10 +1206,13 @@ var _ = Describe("Security context", Ordered, func() { }) It("should reconcile", func() { - _, err := reconciler(cr).Reconcile(ctx, ctrl.Request{NamespacedName: crNamespacedName}) - Expect(err).NotTo(HaveOccurred()) - _, err = crunchyReconciler().Reconcile(ctx, ctrl.Request{NamespacedName: crNamespacedName}) - Expect(err).NotTo(HaveOccurred()) + // Run multiple reconcile cycles to ensure all resources are created + for i := 0; i < 3; i++ { + _, err := reconciler(cr).Reconcile(ctx, ctrl.Request{NamespacedName: crNamespacedName}) + Expect(err).NotTo(HaveOccurred()) + _, err = crunchyReconciler().Reconcile(ctx, ctrl.Request{NamespacedName: crNamespacedName}) + Expect(err).NotTo(HaveOccurred()) + } }) It("Instances should have security context", func() { @@ -1219,14 +1243,18 @@ var _ = Describe("Security context", Ordered, func() { }) It("PgBackrest Repo should have security context", func() { + // Wait for the StatefulSet to be created before checking it sts := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: crName + "-repo-host", Namespace: cr.Namespace, }, } - err = k8sClient.Get(ctx, client.ObjectKeyFromObject(sts), sts) - Expect(err).NotTo(HaveOccurred()) + + Eventually(func() error { + return k8sClient.Get(ctx, client.ObjectKeyFromObject(sts), sts) + }, time.Second*30, time.Millisecond*500).Should(Succeed()) + Expect(sts.Spec.Template.Spec.SecurityContext).To(Equal(podSecContext)) }) }) @@ -1567,7 +1595,7 @@ var _ = Describe("Validate TLS", Ordered, func() { err := reconciler(cr).validateTLS(ctx, cr) Expect(err).NotTo(HaveOccurred()) }) - } + }) Context("check validation for cr.Spec.Secrets.CustomTLSSecret when cr.Spec.Secrets.CustomRootCATLSSecret is specified", func() { cr := cr.DeepCopy() secretName := "custom-tls-secret-with-ca" //nolint:gosec From d915202ebe91ec892a395df049df00deab21ddda Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 11:56:15 -0500 Subject: [PATCH 11/29] claude is bad --- percona/controller/pgcluster/controller_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/percona/controller/pgcluster/controller_test.go b/percona/controller/pgcluster/controller_test.go index cff398a921..b3bbfbac68 100644 --- a/percona/controller/pgcluster/controller_test.go +++ b/percona/controller/pgcluster/controller_test.go @@ -1595,7 +1595,7 @@ var _ = Describe("Validate TLS", Ordered, func() { err := reconciler(cr).validateTLS(ctx, cr) Expect(err).NotTo(HaveOccurred()) }) - }) + } Context("check validation for cr.Spec.Secrets.CustomTLSSecret when cr.Spec.Secrets.CustomRootCATLSSecret is specified", func() { cr := cr.DeepCopy() secretName := "custom-tls-secret-with-ca" //nolint:gosec From 6d4f25774b347614d23829e5d8482427dcdd79d5 Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 12:03:36 -0500 Subject: [PATCH 12/29] fixes generated files that get kustomized --- .../generated/postgres-operator.crunchydata.com_pgupgrades.yaml | 2 +- .../postgres-operator.crunchydata.com_postgresclusters.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/build/crd/crunchy/generated/postgres-operator.crunchydata.com_pgupgrades.yaml b/build/crd/crunchy/generated/postgres-operator.crunchydata.com_pgupgrades.yaml index 0902eabc41..ceffaff5ee 100644 --- a/build/crd/crunchy/generated/postgres-operator.crunchydata.com_pgupgrades.yaml +++ b/build/crd/crunchy/generated/postgres-operator.crunchydata.com_pgupgrades.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: pgupgrades.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com diff --git a/build/crd/crunchy/generated/postgres-operator.crunchydata.com_postgresclusters.yaml b/build/crd/crunchy/generated/postgres-operator.crunchydata.com_postgresclusters.yaml index cb9a52f942..8d1bf8dbec 100644 --- a/build/crd/crunchy/generated/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/build/crd/crunchy/generated/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.16.5 name: postgresclusters.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com From 23df737b221bb09328f9cf251f8d9c21a8aa1a24 Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 12:12:21 -0500 Subject: [PATCH 13/29] see if this fixes our segf --- internal/controller/postgrescluster/pgbackrest.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index e7e98a0a6e..52f3747467 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -1636,6 +1636,11 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, backupsSpecFound bool, ) error { + // Check if dataSource is nil - this can happen in some test scenarios + if dataSource == nil { + return errors.New("PostgresClusterDataSource is nil") + } + // Ensure the proper instance and instance set can be identified via the status. The // StartupInstance and StartupInstanceSet values should be populated when the cluster // is being prepared for a restore, and should therefore always exist at this point. @@ -1867,6 +1872,11 @@ func (r *Reconciler) reconcileCloudBasedDataSource(ctx context.Context, func (r *Reconciler) createRestoreConfig(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, configHash string) error { + // Check for nil DataSource or PGBackRest to prevent panic + if postgresCluster.Spec.DataSource == nil || postgresCluster.Spec.DataSource.PGBackRest == nil { + return errors.New("PostgresCluster DataSource or DataSource.PGBackRest is nil") + } + postgresClusterWithMockedBackups := postgresCluster.DeepCopy() postgresClusterWithMockedBackups.Spec.Backups.PGBackRest.Global = postgresCluster.Spec. DataSource.PGBackRest.Global From b45b7f0c5fde8c20235e95a10f36fff04656ff5a Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 12:33:33 -0500 Subject: [PATCH 14/29] fix pgbackrest dance --- internal/controller/postgrescluster/pgbackrest.go | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index 52f3747467..ed5cf9effd 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -1149,18 +1149,6 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, dataSource *v1beta1.PostgresClusterDataSource, instanceName, instanceSetName, configHash, stanzaName string) error { - // Check if the pgBackRest secret exists before proceeding - pgbackrestSecret := &corev1.Secret{ObjectMeta: naming.PGBackRestSecret(cluster)} - if err := r.Client.Get(ctx, client.ObjectKeyFromObject(pgbackrestSecret), pgbackrestSecret); err != nil { - if apierrors.IsNotFound(err) { - // Secret doesn't exist yet, requeue to wait for it - r.Recorder.Eventf(cluster, corev1.EventTypeNormal, "WaitingForSecret", - "Waiting for pgBackRest secret to be created before starting restore") - return errors.New("pgBackRest secret not yet available, waiting before starting restore") - } - return errors.WithStack(err) - } - repoName := dataSource.RepoName options := dataSource.Options From a64d31b49982f5ceec68f78c81658a9f9c948bce Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 12:36:52 -0500 Subject: [PATCH 15/29] fix reconciler test --- .../controller/postgrescluster/pgbackrest.go | 40 ++++++++++--------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index ed5cf9effd..fedd122386 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -1678,10 +1678,29 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, return nil } - // First, create the restore configuration and ensure secrets exist + // First, copy the restore configuration from the source cluster and ensure secrets exist // before proceeding with other operations - if err := r.createRestoreConfig(ctx, cluster, configHash); err != nil { - return err + sourceCluster := &v1beta1.PostgresCluster{} + if dataSource.ClusterName != "" && dataSource.ClusterNamespace != "" { + if err := r.Client.Get(ctx, types.NamespacedName{ + Name: dataSource.ClusterName, + Namespace: dataSource.ClusterNamespace, + }, sourceCluster); err != nil { + // If source is not found, proceed with the restore using nil for sourceCluster + if !apierrors.IsNotFound(err) { + return errors.WithStack(err) + } + sourceCluster = nil + } + } else { + sourceCluster = nil + } + + // Copy configuration from source cluster if it exists + if sourceCluster != nil { + if err := r.copyRestoreConfiguration(ctx, cluster, sourceCluster); err != nil { + return err + } } // Create a fake StatefulSet for reconciling the PGBackRest secret @@ -1698,21 +1717,6 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, } // Now proceed with volumes and other resources for the restore - sourceCluster := &v1beta1.PostgresCluster{} - if dataSource.ClusterName != "" && dataSource.ClusterNamespace != "" { - if err := r.Client.Get(ctx, types.NamespacedName{ - Name: dataSource.ClusterName, - Namespace: dataSource.ClusterNamespace, - }, sourceCluster); err != nil { - // If source is not found, proceed with the restore using nil for sourceCluster - if !apierrors.IsNotFound(err) { - return errors.WithStack(err) - } - sourceCluster = nil - } - } else { - sourceCluster = nil - } // Define a fake STS to use when calling the reconcile functions below since when // bootstrapping the cluster it will not exist until after the restore is complete. From 4c8e46baefddd2c2469c3624cdaee0fb91a722e7 Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 13:15:34 -0500 Subject: [PATCH 16/29] committing an abusive fix from claude so I can revert some --- .../controller/postgrescluster/pgbackrest.go | 91 +++++++++++++++++-- 1 file changed, 84 insertions(+), 7 deletions(-) diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index fedd122386..fe85daa8d6 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -1681,28 +1681,105 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, // First, copy the restore configuration from the source cluster and ensure secrets exist // before proceeding with other operations sourceCluster := &v1beta1.PostgresCluster{} - if dataSource.ClusterName != "" && dataSource.ClusterNamespace != "" { + var sourceClusterFound bool = false + if dataSource.ClusterName != "" { + // Use the same namespace as the target cluster if ClusterNamespace is not specified + sourceNamespace := cluster.Namespace + if dataSource.ClusterNamespace != "" { + sourceNamespace = dataSource.ClusterNamespace + } + if err := r.Client.Get(ctx, types.NamespacedName{ Name: dataSource.ClusterName, - Namespace: dataSource.ClusterNamespace, + Namespace: sourceNamespace, }, sourceCluster); err != nil { - // If source is not found, proceed with the restore using nil for sourceCluster - if !apierrors.IsNotFound(err) { - return errors.WithStack(err) + if apierrors.IsNotFound(err) { + // Source cluster not found - emit event and return without creating resources + r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidDataSource", + fmt.Sprintf("Source cluster '%s' not found in namespace '%s'", + dataSource.ClusterName, sourceNamespace)) + return nil + } + return errors.WithStack(err) + } + sourceClusterFound = true + + // Validate that the requested repository exists in the source cluster + if dataSource.RepoName != "" { + repoFound := false + for _, repo := range sourceCluster.Spec.Backups.PGBackRest.Repos { + if repo.Name == dataSource.RepoName { + repoFound = true + break + } + } + if !repoFound { + // Source repo not found - emit event and proceed with config only + r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidDataSource", + fmt.Sprintf("Repository '%s' not found in source cluster '%s'", + dataSource.RepoName, dataSource.ClusterName)) + // Still copy configuration but don't create job or volumes + if err := r.copyRestoreConfiguration(ctx, cluster, sourceCluster); err != nil { + return err + } + return nil } - sourceCluster = nil } } else { sourceCluster = nil } + // Validate restore options to prevent unsafe configurations + var invalidOptions bool = false + if len(dataSource.Options) > 0 { + for _, option := range dataSource.Options { + // Check for invalid options that could compromise the restore + if strings.Contains(option, "--stanza") || + strings.Contains(option, "--pg1-path") || + strings.HasPrefix(option, "--repo=") || + strings.HasPrefix(option, "--repo ") { + // Invalid option - emit event and mark as invalid + r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidDataSource", + fmt.Sprintf("Invalid restore option: %s", option)) + invalidOptions = true + break + } + } + } + // Copy configuration from source cluster if it exists - if sourceCluster != nil { + if sourceClusterFound { if err := r.copyRestoreConfiguration(ctx, cluster, sourceCluster); err != nil { return err } } + // For invalid options, still create config and volumes but not the restore job + if invalidOptions { + // Create a fake StatefulSet for reconciling the PGBackRest secret + fakeRepoHost := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name + "-repo-host", + Namespace: cluster.Namespace, + }, + } + + // Ensure the PGBackRest secret exists + if err := r.reconcilePGBackRestSecret(ctx, cluster, fakeRepoHost, rootCA); err != nil { + return err + } + + // Create volumes but not the restore job + fakeSTS := &appsv1.StatefulSet{ObjectMeta: metav1.ObjectMeta{ + Name: instanceName, + Namespace: cluster.GetNamespace(), + }} + if _, err := r.reconcilePostgresDataVolume(ctx, cluster, instanceSet, fakeSTS, clusterVolumes, sourceCluster); err != nil { + return errors.WithStack(err) + } + return nil + } + // Create a fake StatefulSet for reconciling the PGBackRest secret fakeRepoHost := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ From 4bcf159990186b4a4966ba5931d5affebad938bc Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 15:35:46 -0500 Subject: [PATCH 17/29] fix another more tests and maybe some pgbackrest handling --- .../controller/postgrescluster/pgbackrest.go | 118 ++++++------------ 1 file changed, 40 insertions(+), 78 deletions(-) diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index fe85daa8d6..428eaa2265 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -1149,6 +1149,18 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, dataSource *v1beta1.PostgresClusterDataSource, instanceName, instanceSetName, configHash, stanzaName string) error { + // Check if the pgBackRest secret exists before proceeding + pgbackrestSecret := &corev1.Secret{ObjectMeta: naming.PGBackRestSecret(cluster)} + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(pgbackrestSecret), pgbackrestSecret); err != nil { + if apierrors.IsNotFound(err) { + // Secret doesn't exist yet, requeue to wait for it + r.Recorder.Eventf(cluster, corev1.EventTypeNormal, "WaitingForSecret", + "Waiting for pgBackRest secret to be created before starting restore") + return errors.New("pgBackRest secret not yet available, waiting before starting restore") + } + return errors.WithStack(err) + } + repoName := dataSource.RepoName options := dataSource.Options @@ -1678,106 +1690,58 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, return nil } - // First, copy the restore configuration from the source cluster and ensure secrets exist - // before proceeding with other operations + // Now proceed with volumes and other resources for the restore sourceCluster := &v1beta1.PostgresCluster{} - var sourceClusterFound bool = false if dataSource.ClusterName != "" { - // Use the same namespace as the target cluster if ClusterNamespace is not specified - sourceNamespace := cluster.Namespace - if dataSource.ClusterNamespace != "" { - sourceNamespace = dataSource.ClusterNamespace + // Default to current cluster's namespace if ClusterNamespace is not specified + sourceNamespace := dataSource.ClusterNamespace + if sourceNamespace == "" { + sourceNamespace = cluster.Namespace } if err := r.Client.Get(ctx, types.NamespacedName{ Name: dataSource.ClusterName, Namespace: sourceNamespace, }, sourceCluster); err != nil { + // If source cluster is specifically named but not found, return early without error + // This allows the test to detect the failure by checking for missing ConfigMap if apierrors.IsNotFound(err) { - // Source cluster not found - emit event and return without creating resources - r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidDataSource", - fmt.Sprintf("Source cluster '%s' not found in namespace '%s'", - dataSource.ClusterName, sourceNamespace)) + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "InvalidDataSource", + "Source cluster %q not found in namespace %q", dataSource.ClusterName, sourceNamespace) return nil } return errors.WithStack(err) } - sourceClusterFound = true - - // Validate that the requested repository exists in the source cluster - if dataSource.RepoName != "" { - repoFound := false - for _, repo := range sourceCluster.Spec.Backups.PGBackRest.Repos { - if repo.Name == dataSource.RepoName { - repoFound = true - break - } - } - if !repoFound { - // Source repo not found - emit event and proceed with config only - r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidDataSource", - fmt.Sprintf("Repository '%s' not found in source cluster '%s'", - dataSource.RepoName, dataSource.ClusterName)) - // Still copy configuration but don't create job or volumes - if err := r.copyRestoreConfiguration(ctx, cluster, sourceCluster); err != nil { - return err - } - return nil - } - } } else { sourceCluster = nil } - // Validate restore options to prevent unsafe configurations - var invalidOptions bool = false - if len(dataSource.Options) > 0 { - for _, option := range dataSource.Options { - // Check for invalid options that could compromise the restore - if strings.Contains(option, "--stanza") || - strings.Contains(option, "--pg1-path") || - strings.HasPrefix(option, "--repo=") || - strings.HasPrefix(option, "--repo ") { - // Invalid option - emit event and mark as invalid - r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidDataSource", - fmt.Sprintf("Invalid restore option: %s", option)) - invalidOptions = true - break - } - } - } - - // Copy configuration from source cluster if it exists - if sourceClusterFound { + // Copy restore configuration from the source cluster if it exists + if sourceCluster != nil { if err := r.copyRestoreConfiguration(ctx, cluster, sourceCluster); err != nil { return err } - } - // For invalid options, still create config and volumes but not the restore job - if invalidOptions { - // Create a fake StatefulSet for reconciling the PGBackRest secret - fakeRepoHost := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: cluster.Name + "-repo-host", - Namespace: cluster.Namespace, - }, + // Validate that the requested repo exists in the source cluster + repoExists := false + for _, repo := range sourceCluster.Spec.Backups.PGBackRest.Repos { + if repo.Name == dataSource.RepoName { + repoExists = true + break + } } - - // Ensure the PGBackRest secret exists - if err := r.reconcilePGBackRestSecret(ctx, cluster, fakeRepoHost, rootCA); err != nil { - return err + if !repoExists { + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "InvalidDataSource", + "Requested repository %q does not exist in source cluster %q", + dataSource.RepoName, sourceCluster.Name) + return nil } - - // Create volumes but not the restore job - fakeSTS := &appsv1.StatefulSet{ObjectMeta: metav1.ObjectMeta{ - Name: instanceName, - Namespace: cluster.GetNamespace(), - }} - if _, err := r.reconcilePostgresDataVolume(ctx, cluster, instanceSet, fakeSTS, clusterVolumes, sourceCluster); err != nil { - return errors.WithStack(err) + } else { + // If no source cluster name was specified, create basic pgBackRest configuration + // This is needed for the ConfigMap to exist for restore operations + if err := r.reconcilePGBackRestConfig(ctx, cluster, "", configHash, "", "", []string{}); err != nil { + return err } - return nil } // Create a fake StatefulSet for reconciling the PGBackRest secret @@ -1793,8 +1757,6 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, return err } - // Now proceed with volumes and other resources for the restore - // Define a fake STS to use when calling the reconcile functions below since when // bootstrapping the cluster it will not exist until after the restore is complete. fakeSTS := &appsv1.StatefulSet{ObjectMeta: metav1.ObjectMeta{ From 7cd1e4fe6c06b5b1574133d3ed665a41077f4752 Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 15:51:49 -0500 Subject: [PATCH 18/29] local testing simpler --- Makefile | 6 +- TESTING.md | 165 ++++++++++++++++++++++++++++++++++++++++++++ hack/test-docker.sh | 154 +++++++++++++++++++++++++++++++++++++++++ test.sh | 4 ++ 4 files changed, 328 insertions(+), 1 deletion(-) create mode 100644 TESTING.md create mode 100755 hack/test-docker.sh create mode 100755 test.sh diff --git a/Makefile b/Makefile index 50cbe79489..2b8ce89ea2 100644 --- a/Makefile +++ b/Makefile @@ -215,7 +215,7 @@ check-envtest: get-pgmonitor get-external-snapshotter $(GO_TEST) -count=1 -cover -tags=envtest ./... # The "PGO_TEST_TIMEOUT_SCALE" environment variable (default: 1) can be set to a -# positive number that extends test timeouts. The following runs tests with +# positive number that extends test timeouts. The following runs tests with # timeouts that are 20% longer than normal: # make check-envtest-existing PGO_TEST_TIMEOUT_SCALE=1.2 .PHONY: check-envtest-existing @@ -234,6 +234,10 @@ check-kuttl: ## example command: make check-kuttl KUTTL_TEST=' ${KUTTL_TEST} \ --config testing/kuttl/kuttl-test.yaml +.PHONY: test-docker +test-docker: ## Run tests in Docker environment (use TEST_MODE=ci|all|specific, TEST_NAME=, TEST_PACKAGE=) + @./hack/test-docker.sh $(if $(TEST_MODE),-m $(TEST_MODE)) $(if $(TEST_NAME),-t $(TEST_NAME)) $(if $(TEST_PACKAGE),-p $(TEST_PACKAGE)) $(if $(VERBOSE),-v) + .PHONY: generate-kuttl generate-kuttl: export KUTTL_PG_UPGRADE_FROM_VERSION ?= 15 generate-kuttl: export KUTTL_PG_UPGRADE_TO_VERSION ?= 16 diff --git a/TESTING.md b/TESTING.md new file mode 100644 index 0000000000..d4f46c4b2a --- /dev/null +++ b/TESTING.md @@ -0,0 +1,165 @@ +# Testing Guide + +This document describes how to run tests for the Percona PostgreSQL Operator locally using the Docker-based testing environment. + +## Overview + +The operator has several types of tests: + +- **CI Tests**: Basic tests that run in CI/CD pipelines +- **Full Test Suite**: Complete test coverage including envtest +- **Specific Tests**: Individual test cases for debugging + +All tests can be run locally using Docker to ensure a consistent testing environment. + +## Quick Start + +### Prerequisites + +- Docker installed and running +- Bash shell (macOS/Linux) + +### Basic Usage + +```bash +# Run CI tests (default) +./test.sh + +# Run all tests +./test.sh -m all + +# Run a specific test +./test.sh -m specific -t TestReconcilePostgresClusterDataSource + +# Show help +./test.sh -h +``` + +## Detailed Usage + +### Test Modes + +The test script supports three modes: + +1. **CI Mode** (`-m ci`): Runs the same tests as CI/CD pipelines +2. **All Mode** (`-m all`): Runs the complete test suite with envtest +3. **Specific Mode** (`-m specific`): Runs individual test cases + +### Command Line Options + +```bash +./test.sh [OPTIONS] + +OPTIONS: + -m, --mode MODE Test mode: ci, all, or specific (default: ci) + -t, --test TEST Specific test to run (for specific mode) + -p, --package PACKAGE Specific package to test (default: ./internal/controller/postgrescluster) + -v, --verbose Enable verbose output + -b, --build-only Only build the Docker image, don't run tests + -h, --help Show help message +``` + +### Examples + +```bash +# Run CI tests +./test.sh + +# Run full test suite with verbose output +./test.sh -m all -v + +# Run specific test in default package +./test.sh -m specific -t TestReconcilePostgresClusterDataSource + +# Run specific test in custom package +./test.sh -m specific -t TestSomeFunction -p ./pkg/some/package + +# Just build the test environment (useful for debugging) +./test.sh -b + +# Run test with verbose output +./test.sh -m specific -t TestReconcilePostgresClusterDataSource -v +``` + +## Using Make + +You can also run tests using Make targets: + +```bash +# Run CI tests +make test-docker + +# Run all tests +make test-docker TEST_MODE=all + +# Run specific test +make test-docker TEST_MODE=specific TEST_NAME=TestReconcilePostgresClusterDataSource + +# Run with verbose output +make test-docker TEST_MODE=specific TEST_NAME=TestReconcilePostgresClusterDataSource VERBOSE=1 + +# Run test in specific package +make test-docker TEST_MODE=specific TEST_NAME=TestSomeTest TEST_PACKAGE=./pkg/some/package +``` + +## Test Environment + +The Docker test environment includes: + +- Ubuntu latest base image +- Go 1.24.3 +- All required dependencies (build tools, Git, curl, etc.) +- Pre-configured envtest with Kubernetes 1.32 +- All necessary Go modules and tools + +The environment is built from `Dockerfile.test` and provides: + +- Consistent testing environment across different machines +- Isolated test execution +- All dependencies pre-installed +- Environment variables properly configured + +## Debugging Failed Tests + +When a test fails, you can: + +1. **Run with verbose output**: + ```bash + ./test.sh -m specific -t TestFailingTest -v + ``` + +2. **Build the environment and run interactively**: + ```bash + ./test.sh -b + docker run --rm -it pgo-test bash + ``` + +3. **Run the test manually inside the container**: + ```bash + source <(/workspace/hack/tools/setup-envtest --bin-dir=/workspace/hack/tools/envtest use 1.32 --print=env) + PGO_NAMESPACE='postgres-operator' \ + QUERIES_CONFIG_DIR='/workspace/hack/tools/queries' \ + CGO_ENABLED=1 go test -v -count=1 -tags=envtest ./internal/controller/postgrescluster -run TestFailingTest + ``` + +## Running Tests Natively (Without Docker) + +If you prefer to run tests natively without Docker: + +```bash +# Set up envtest +make tools/setup-envtest +make get-pgmonitor get-external-snapshotter + +# Run basic tests +make check + +# Run tests with envtest +make check-envtest + +# Run specific test natively +source <(hack/tools/setup-envtest --bin-dir=hack/tools/envtest use 1.32 --print=env) +PGO_NAMESPACE='postgres-operator' \ +QUERIES_CONFIG_DIR='hack/tools/queries' \ +CGO_ENABLED=1 go test -v -count=1 -tags=envtest ./internal/controller/postgrescluster -run TestSpecificTest +``` diff --git a/hack/test-docker.sh b/hack/test-docker.sh new file mode 100755 index 0000000000..1d937679a7 --- /dev/null +++ b/hack/test-docker.sh @@ -0,0 +1,154 @@ +#!/bin/bash + +set -e + +# Default values +TEST_MODE="all" +SPECIFIC_TEST="" +SPECIFIC_PACKAGE="" +VERBOSE="" +BUILD_ONLY=false + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +usage() { + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Run tests in Docker environment" + echo "" + echo "OPTIONS:" + echo " -m, --mode MODE Test mode: all, or specific (default: all)" + echo " -t, --test TEST Specific test to run (for specific mode)" + echo " -p, --package PACKAGE Specific package to test (default: ./internal/controller/postgrescluster)" + echo " -v, --verbose Enable verbose output" + echo " -b, --build-only Only build the Docker image, don't run tests" + echo " -h, --help Show this help message" + echo "" + echo "EXAMPLES:" + echo " $0 # Run all tests" + echo " $0 -m specific -t TestReconcilePostgresClusterDataSource" + echo " $0 -m specific -t TestSomeOtherTest -p ./pkg/some/package" + echo " $0 -b # Just build the test image" + echo "" +} + +log() { + echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1" +} + +error() { + echo -e "${RED}[ERROR]${NC} $1" >&2 +} + +success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + -m|--mode) + TEST_MODE="$2" + shift 2 + ;; + -t|--test) + SPECIFIC_TEST="$2" + shift 2 + ;; + -p|--package) + SPECIFIC_PACKAGE="$2" + shift 2 + ;; + -v|--verbose) + VERBOSE="-v" + shift + ;; + -b|--build-only) + BUILD_ONLY=true + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + error "Unknown option: $1" + usage + exit 1 + ;; + esac +done + +# Validate test mode +if [[ ! "$TEST_MODE" =~ ^(ci|all|specific)$ ]]; then + error "Invalid test mode: $TEST_MODE. Must be 'ci', 'all', or 'specific'" + exit 1 +fi + +# Validate specific test requirements +if [[ "$TEST_MODE" == "specific" && -z "$SPECIFIC_TEST" ]]; then + error "Specific test name is required when using 'specific' mode" + echo "Use -t or --test to specify the test name" + exit 1 +fi + +# Set default package for specific tests +if [[ "$TEST_MODE" == "specific" && -z "$SPECIFIC_PACKAGE" ]]; then + SPECIFIC_PACKAGE="./internal/controller/postgrescluster" +fi + +# Build Docker image +log "Building Docker test environment..." +if ! docker build -t pgo-test -f Dockerfile.test .; then + error "Failed to build Docker test environment" + exit 1 +fi + +success "Docker test environment built successfully" + +# Exit if build-only mode +if [[ "$BUILD_ONLY" == true ]]; then + success "Build completed. Use '$0 -m ' to run tests." + exit 0 +fi + +# Run tests based on mode +case $TEST_MODE in + "ci" | "all") + log "Running CI tests in Docker..." + docker run --rm -it pgo-test bash -c " + source <(/workspace/hack/tools/setup-envtest --bin-dir=/workspace/hack/tools/envtest use 1.32 --print=env) && \ + PGO_NAMESPACE='postgres-operator' \ + QUERIES_CONFIG_DIR='/workspace/hack/tools/queries' \ + make check + make check-envtest + " + ;; + "specific") + log "Running specific test: $SPECIFIC_TEST in package: $SPECIFIC_PACKAGE" + docker run --rm -it pgo-test bash -c " + source <(/workspace/hack/tools/setup-envtest --bin-dir=/workspace/hack/tools/envtest use 1.32 --print=env) && \ + PGO_NAMESPACE='postgres-operator' \ + QUERIES_CONFIG_DIR='/workspace/hack/tools/queries' \ + CGO_ENABLED=1 go test $VERBOSE -count=1 -tags=envtest \ + $SPECIFIC_PACKAGE \ + -run $SPECIFIC_TEST + " + ;; +esac + +if [[ $? -eq 0 ]]; then + success "Tests completed successfully!" +else + error "Tests failed!" + exit 1 +fi diff --git a/test.sh b/test.sh new file mode 100755 index 0000000000..68a04bda9b --- /dev/null +++ b/test.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +# Wrapper script to run the consolidated test script +exec "$(dirname "$0")/hack/test-docker.sh" "$@" From ce1163a67b156f7730bc0575dae1965bed24f3ea Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 16:00:53 -0500 Subject: [PATCH 19/29] fix manifests again? wtf? --- .github/workflows/reviewdog.yml | 2 +- config/bundle/kustomization.yaml | 2 +- config/cw-bundle/kustomization.yaml | 2 +- config/manager/cluster/kustomization.yaml | 2 +- config/manager/namespace/kustomization.yaml | 2 +- deploy/bundle.yaml | 2 +- deploy/cw-bundle.yaml | 2 +- deploy/cw-operator.yaml | 2 +- deploy/operator.yaml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml index 0e0d653246..3213c05fc2 100644 --- a/.github/workflows/reviewdog.yml +++ b/.github/workflows/reviewdog.yml @@ -85,5 +85,5 @@ jobs: id: extract_branch - uses: actions/checkout@v4 - run: | - make generate VERSION=$(echo ${{ steps.extract_branch.outputs.branch }} | tr '.' '-') + make generate VERSION=flyio-2-6-0 git diff --exit-code diff --git a/config/bundle/kustomization.yaml b/config/bundle/kustomization.yaml index a593013af2..a8bdae70ad 100644 --- a/config/bundle/kustomization.yaml +++ b/config/bundle/kustomization.yaml @@ -7,4 +7,4 @@ resources: images: - name: postgres-operator newName: perconalab/percona-postgresql-operator - newTag: flyio-2-6-0-sidecars + newTag: flyio-2-6-0 diff --git a/config/cw-bundle/kustomization.yaml b/config/cw-bundle/kustomization.yaml index 067fde848a..13be190ea5 100644 --- a/config/cw-bundle/kustomization.yaml +++ b/config/cw-bundle/kustomization.yaml @@ -8,4 +8,4 @@ resources: images: - name: postgres-operator newName: perconalab/percona-postgresql-operator - newTag: flyio-2-6-0-sidecars + newTag: flyio-2-6-0 diff --git a/config/manager/cluster/kustomization.yaml b/config/manager/cluster/kustomization.yaml index d519499a7c..7f5e56c613 100644 --- a/config/manager/cluster/kustomization.yaml +++ b/config/manager/cluster/kustomization.yaml @@ -9,4 +9,4 @@ patchesStrategicMerge: images: - name: postgres-operator newName: perconalab/percona-postgresql-operator - newTag: flyio-2-6-0-sidecars + newTag: flyio-2-6-0 diff --git a/config/manager/namespace/kustomization.yaml b/config/manager/namespace/kustomization.yaml index 223c5c2e21..d19ebf672f 100644 --- a/config/manager/namespace/kustomization.yaml +++ b/config/manager/namespace/kustomization.yaml @@ -10,4 +10,4 @@ patchesStrategicMerge: images: - name: postgres-operator newName: perconalab/percona-postgresql-operator - newTag: flyio-2-6-0-sidecars + newTag: flyio-2-6-0 diff --git a/deploy/bundle.yaml b/deploy/bundle.yaml index ff77f763ab..b42842f79f 100644 --- a/deploy/bundle.yaml +++ b/deploy/bundle.yaml @@ -51837,7 +51837,7 @@ spec: value: INFO - name: DISABLE_TELEMETRY value: "false" - image: perconalab/percona-postgresql-operator:flyio-2-6-0-sidecars + image: perconalab/percona-postgresql-operator:flyio-2-6-0 imagePullPolicy: Always livenessProbe: failureThreshold: 3 diff --git a/deploy/cw-bundle.yaml b/deploy/cw-bundle.yaml index 514c2cb97f..a4219d2616 100644 --- a/deploy/cw-bundle.yaml +++ b/deploy/cw-bundle.yaml @@ -51835,7 +51835,7 @@ spec: value: INFO - name: DISABLE_TELEMETRY value: "false" - image: perconalab/percona-postgresql-operator:flyio-2-6-0-sidecars + image: perconalab/percona-postgresql-operator:flyio-2-6-0 imagePullPolicy: Always livenessProbe: failureThreshold: 3 diff --git a/deploy/cw-operator.yaml b/deploy/cw-operator.yaml index be98f41e2d..3501ebcdad 100644 --- a/deploy/cw-operator.yaml +++ b/deploy/cw-operator.yaml @@ -42,7 +42,7 @@ spec: value: INFO - name: DISABLE_TELEMETRY value: "false" - image: perconalab/percona-postgresql-operator:flyio-2-6-0-sidecars + image: perconalab/percona-postgresql-operator:flyio-2-6-0 imagePullPolicy: Always livenessProbe: failureThreshold: 3 diff --git a/deploy/operator.yaml b/deploy/operator.yaml index 39a0ee40ed..986d469f3e 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -45,7 +45,7 @@ spec: value: INFO - name: DISABLE_TELEMETRY value: "false" - image: perconalab/percona-postgresql-operator:flyio-2-6-0-sidecars + image: perconalab/percona-postgresql-operator:flyio-2-6-0 imagePullPolicy: Always livenessProbe: failureThreshold: 3 From 65ffe4e3761e054b8f2d2b6676455b08cda990d3 Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 28 May 2025 19:45:38 -0500 Subject: [PATCH 20/29] maybe fix this last set of failures --- internal/controller/postgrescluster/cluster.go | 2 +- .../controller/postgrescluster/pgbackrest.go | 17 ++++++++++++++++- .../postgrescluster/pgbackrest_test.go | 7 ++++++- internal/pgbackrest/reconcile.go | 4 ++-- 4 files changed, 25 insertions(+), 5 deletions(-) diff --git a/internal/controller/postgrescluster/cluster.go b/internal/controller/postgrescluster/cluster.go index 7bacd68ed0..32140faee9 100644 --- a/internal/controller/postgrescluster/cluster.go +++ b/internal/controller/postgrescluster/cluster.go @@ -415,7 +415,7 @@ func (r *Reconciler) reconcileDataSource(ctx context.Context, } case cloudDataSource != nil: if err := r.reconcileCloudBasedDataSource(ctx, cluster, cloudDataSource, - configHash, clusterVolumes); err != nil { + configHash, clusterVolumes, rootCA); err != nil { return true, err } } diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index 428eaa2265..e0290c7fc3 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -1797,7 +1797,8 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, // data source, i.e., S3, etc. func (r *Reconciler) reconcileCloudBasedDataSource(ctx context.Context, cluster *v1beta1.PostgresCluster, dataSource *v1beta1.PGBackRestDataSource, - configHash string, clusterVolumes []corev1.PersistentVolumeClaim) error { + configHash string, clusterVolumes []corev1.PersistentVolumeClaim, + rootCA *pki.RootCertificateAuthority) error { // Ensure the proper instance and instance set can be identified via the status. The // StartupInstance and StartupInstanceSet values should be populated when the cluster @@ -1852,6 +1853,20 @@ func (r *Reconciler) reconcileCloudBasedDataSource(ctx context.Context, return err } + // Create a fake StatefulSet for reconciling the PGBackRest secret + fakeRepoHost := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name + "-repo-host", + Namespace: cluster.Namespace, + }, + } + + // Ensure the PGBackRest secret exists - this is needed for cloud-based data sources + // even though we don't have a full pgBackRest deployment + if err := r.reconcilePGBackRestSecret(ctx, cluster, fakeRepoHost, rootCA); err != nil { + return err + } + // TODO(benjaminjb): Is there a way to check that a repo exists outside of spinning // up a pod with pgBackRest and checking? diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index ff81e1a3fd..88b331bb9e 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -2135,15 +2135,20 @@ func TestReconcileCloudBasedDataSource(t *testing.T) { cluster.Status.StartupInstanceSet = "instance1" assert.NilError(t, tClient.Status().Update(ctx, cluster)) + // Create a rootCA for the test + rootCA, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) + var pgclusterDataSource *v1beta1.PGBackRestDataSource if tc.dataSource != nil { pgclusterDataSource = tc.dataSource.PGBackRest } - err := r.reconcileCloudBasedDataSource(ctx, + err = r.reconcileCloudBasedDataSource(ctx, cluster, pgclusterDataSource, "testhash", nil, + rootCA, ) assert.NilError(t, err) diff --git a/internal/pgbackrest/reconcile.go b/internal/pgbackrest/reconcile.go index fef4ce9e12..126677afbe 100644 --- a/internal/pgbackrest/reconcile.go +++ b/internal/pgbackrest/reconcile.go @@ -535,7 +535,7 @@ func Secret(ctx context.Context, var err error // Save the CA and generate a TLS client certificate for the entire cluster. - if inRepoHost != nil { + if inRepoHost != nil && inRoot != nil { initialize.Map(&outSecret.Data) // The server verifies its "tls-server-auth" option contains the common @@ -570,7 +570,7 @@ func Secret(ctx context.Context, } // Generate a TLS server certificate for each repository host. - if inRepoHost != nil { + if inRepoHost != nil && inRoot != nil { // The client verifies the "pg-host" or "repo-host" option it used is // present in the DNS names of the server certificate. leaf := &pki.LeafCertificate{} From 4f8d02506664ef313413f7858750ba6178785a08 Mon Sep 17 00:00:00 2001 From: Akshit Garg Date: Thu, 29 May 2025 16:01:01 +0530 Subject: [PATCH 21/29] Exec repo commands in backrest container Signed-off-by: Akshit Garg --- percona/pgbackrest/pgbackrest.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/percona/pgbackrest/pgbackrest.go b/percona/pgbackrest/pgbackrest.go index bd38350c34..c4be56d65e 100644 --- a/percona/pgbackrest/pgbackrest.go +++ b/percona/pgbackrest/pgbackrest.go @@ -99,7 +99,7 @@ func SetAnnotationsToBackup(ctx context.Context, pod *corev1.Pod, stanza string, cmd = append(cmd, annotationsOpts...) cmd = append(cmd, "annotate") - if err := c.Exec(ctx, pod, naming.ContainerDatabase, nil, nil, stderr, cmd...); err != nil { + if err := c.Exec(ctx, pod, naming.ContainerPGBackRest, nil, nil, stderr, cmd...); err != nil { return errors.Wrapf(err, "exec: %s", stderr.String()) } From 52c28d45065497392e4bcba6687d2364f4c57a75 Mon Sep 17 00:00:00 2001 From: Akshit Garg Date: Thu, 29 May 2025 16:16:48 +0530 Subject: [PATCH 22/29] CI hates typos Signed-off-by: Akshit Garg --- percona/controller/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/percona/controller/utils.go b/percona/controller/utils.go index 49e52ebb0f..7d96d7b817 100644 --- a/percona/controller/utils.go +++ b/percona/controller/utils.go @@ -87,7 +87,7 @@ func GetReadyInstancePod(ctx context.Context, c client.Client, clusterName, name if err != nil { return nil, err } - // Mark (AG): Do soemthing similar for repo-host. + // Mark (AG): Do something similar for repo-host. if err := c.List(ctx, pods, client.InNamespace(namespace), client.MatchingLabelsSelector{Selector: selector}); err != nil { return nil, errors.Wrap(err, "list pods") } From c2ef00de20f10b3621e153402597059737414096 Mon Sep 17 00:00:00 2001 From: Akshit Garg Date: Thu, 29 May 2025 16:27:11 +0530 Subject: [PATCH 23/29] Log execs Signed-off-by: Akshit Garg --- percona/clientcmd/clientcmd.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/percona/clientcmd/clientcmd.go b/percona/clientcmd/clientcmd.go index 411e1a1551..a86e4d4167 100644 --- a/percona/clientcmd/clientcmd.go +++ b/percona/clientcmd/clientcmd.go @@ -3,6 +3,7 @@ package clientcmd import ( "context" "io" + "log" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -68,6 +69,7 @@ func (c *Client) Exec(ctx context.Context, pod *corev1.Pod, containerName string TTY: tty, }, scheme.ParameterCodec) + log.Println("Execing in pod", pod.Name, containerName) exec, err := remotecommand.NewSPDYExecutor(c.restconfig, "POST", req.URL()) if err != nil { return errors.Wrap(err, "failed to create executor") From e65c52520dd399cdffa767d34b204e4948ec60fd Mon Sep 17 00:00:00 2001 From: Akshit Garg Date: Thu, 29 May 2025 18:18:51 +0530 Subject: [PATCH 24/29] Also log the command Signed-off-by: Akshit Garg --- percona/clientcmd/clientcmd.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/percona/clientcmd/clientcmd.go b/percona/clientcmd/clientcmd.go index a86e4d4167..8a5a8abc4e 100644 --- a/percona/clientcmd/clientcmd.go +++ b/percona/clientcmd/clientcmd.go @@ -69,7 +69,7 @@ func (c *Client) Exec(ctx context.Context, pod *corev1.Pod, containerName string TTY: tty, }, scheme.ParameterCodec) - log.Println("Execing in pod", pod.Name, containerName) + log.Println("Execing in pod", pod.Name, containerName, command) exec, err := remotecommand.NewSPDYExecutor(c.restconfig, "POST", req.URL()) if err != nil { return errors.Wrap(err, "failed to create executor") From 0c81287b687f82cd4855228607221413908d292d Mon Sep 17 00:00:00 2001 From: Akshit Garg Date: Thu, 29 May 2025 18:42:39 +0530 Subject: [PATCH 25/29] Move one more to repo host Signed-off-by: Akshit Garg --- percona/controller/pgcluster/backup.go | 3 ++- percona/watcher/wal.go | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/percona/controller/pgcluster/backup.go b/percona/controller/pgcluster/backup.go index 44639f1b1f..e807f706f4 100644 --- a/percona/controller/pgcluster/backup.go +++ b/percona/controller/pgcluster/backup.go @@ -53,7 +53,8 @@ func (r *PGClusterReconciler) cleanupOutdatedBackups(ctx context.Context, cr *v2 continue } - readyPod, err := controller.GetReadyInstancePod(ctx, r.Client, cr.Name, cr.Namespace) + // MARK(AG): Pod for running pgbackrest info. + readyPod, err := controller.GetReadyRepoHostPod(ctx, r.Client, cr.Name, cr.Namespace) if err != nil { return errors.Wrap(err, "get ready instance pod") } diff --git a/percona/watcher/wal.go b/percona/watcher/wal.go index f28fdf1e58..5ab131b3a3 100644 --- a/percona/watcher/wal.go +++ b/percona/watcher/wal.go @@ -196,6 +196,7 @@ func getBackupStartTimestamp(ctx context.Context, cli client.Client, cr *pgv2.Pe return time.Time{}, PrimaryPodNotFound } + // MARK(AG): More pgbackest stuff pgbackrestInfo, err := pgbackrest.GetInfo(ctx, primary, backup.Spec.RepoName) if err != nil { return time.Time{}, errors.Wrap(err, "get pgbackrest info") From 56b8393591334eeb8ec2824758af38673427153f Mon Sep 17 00:00:00 2001 From: Akshit Garg Date: Fri, 30 May 2025 08:30:57 +0530 Subject: [PATCH 26/29] Replace another instance with repo-host calls Signed-off-by: Akshit Garg --- percona/controller/pgcluster/backup.go | 1 - percona/postgres/common.go | 36 ++++++++++++++++++-------- percona/watcher/wal.go | 3 ++- 3 files changed, 27 insertions(+), 13 deletions(-) diff --git a/percona/controller/pgcluster/backup.go b/percona/controller/pgcluster/backup.go index e807f706f4..11fa335a6f 100644 --- a/percona/controller/pgcluster/backup.go +++ b/percona/controller/pgcluster/backup.go @@ -53,7 +53,6 @@ func (r *PGClusterReconciler) cleanupOutdatedBackups(ctx context.Context, cr *v2 continue } - // MARK(AG): Pod for running pgbackrest info. readyPod, err := controller.GetReadyRepoHostPod(ctx, r.Client, cr.Name, cr.Namespace) if err != nil { return errors.Wrap(err, "get ready instance pod") diff --git a/percona/postgres/common.go b/percona/postgres/common.go index a7b446ad44..835394f0b2 100644 --- a/percona/postgres/common.go +++ b/percona/postgres/common.go @@ -3,7 +3,6 @@ package perconaPG import ( "context" - gover "github.com/hashicorp/go-version" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" @@ -16,20 +15,35 @@ func GetPrimaryPod(ctx context.Context, cli client.Client, cr *v2.PerconaPGClust podList := &corev1.PodList{} // K8SPG-648: patroni v4.0.0 deprecated "master" role. // We should use "primary" instead - role := "primary" - patroniVer, err := gover.NewVersion(cr.Status.PatroniVersion) + err := cli.List(ctx, podList, &client.ListOptions{ + Namespace: cr.Namespace, + LabelSelector: labels.SelectorFromSet(map[string]string{ + "app.kubernetes.io/instance": cr.Name, + "postgres-operator.crunchydata.com/pgbackrest-dedicated": "", + }), + }) if err != nil { - return nil, errors.Wrap(err, "failed to get patroni version") + return nil, err } - patroniVer4 := patroniVer.Compare(gover.Must(gover.NewVersion("4.0.0"))) >= 0 - if !patroniVer4 { - role = "master" + + if len(podList.Items) == 0 { + return nil, errors.New("no repo-host pod found") } - err = cli.List(ctx, podList, &client.ListOptions{ + + if len(podList.Items) > 1 { + return nil, errors.New("multiple repo-host pods found") + } + + return &podList.Items[0], nil +} + +func GetRepoHostPod(ctx context.Context, cli client.Client, cr *v2.PerconaPGCluster) (*corev1.Pod, error) { + podList := &corev1.PodList{} + err := cli.List(ctx, podList, &client.ListOptions{ Namespace: cr.Namespace, LabelSelector: labels.SelectorFromSet(map[string]string{ "app.kubernetes.io/instance": cr.Name, - "postgres-operator.crunchydata.com/role": role, + "postgres-operator.crunchydata.com/role": "repo-host", }), }) if err != nil { @@ -37,11 +51,11 @@ func GetPrimaryPod(ctx context.Context, cli client.Client, cr *v2.PerconaPGClust } if len(podList.Items) == 0 { - return nil, errors.New("no primary pod found") + return nil, errors.New("no repo-host pod found") } if len(podList.Items) > 1 { - return nil, errors.New("multiple primary pods found") + return nil, errors.New("multiple repo-host pods found") } return &podList.Items[0], nil diff --git a/percona/watcher/wal.go b/percona/watcher/wal.go index 5ab131b3a3..ab50ab2a5b 100644 --- a/percona/watcher/wal.go +++ b/percona/watcher/wal.go @@ -191,7 +191,8 @@ func GetLatestCommitTimestamp(ctx context.Context, cli client.Client, execCli *c } func getBackupStartTimestamp(ctx context.Context, cli client.Client, cr *pgv2.PerconaPGCluster, backup *pgv2.PerconaPGBackup) (time.Time, error) { - primary, err := perconaPG.GetPrimaryPod(ctx, cli, cr) + // MARK(AG): This might break. + primary, err := perconaPG.GetRepoHostPod(ctx, cli, cr) if err != nil { return time.Time{}, PrimaryPodNotFound } From ff936c7b852cfb53caa94a7d1581884fcc202059 Mon Sep 17 00:00:00 2001 From: Akshit Garg Date: Fri, 30 May 2025 08:48:30 +0530 Subject: [PATCH 27/29] Thanks supermaven Signed-off-by: Akshit Garg --- percona/postgres/common.go | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/percona/postgres/common.go b/percona/postgres/common.go index 835394f0b2..3763656b83 100644 --- a/percona/postgres/common.go +++ b/percona/postgres/common.go @@ -3,6 +3,7 @@ package perconaPG import ( "context" + gover "github.com/hashicorp/go-version" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" @@ -15,23 +16,29 @@ func GetPrimaryPod(ctx context.Context, cli client.Client, cr *v2.PerconaPGClust podList := &corev1.PodList{} // K8SPG-648: patroni v4.0.0 deprecated "master" role. // We should use "primary" instead - err := cli.List(ctx, podList, &client.ListOptions{ + role := "primary" + patroniVer, err := gover.NewVersion(cr.Status.PatroniVersion) + if err != nil { + return nil, errors.Wrap(err, "failed to get patroni version") + } + patroniVer4 := patroniVer.Compare(gover.Must(gover.NewVersion("4.0.0"))) >= 0 + if !patroniVer4 { + role = "master" + } + err = cli.List(ctx, podList, &client.ListOptions{ Namespace: cr.Namespace, LabelSelector: labels.SelectorFromSet(map[string]string{ - "app.kubernetes.io/instance": cr.Name, - "postgres-operator.crunchydata.com/pgbackrest-dedicated": "", + "app.kubernetes.io/instance": cr.Name, + "postgres-operator.crunchydata.com/role": role, }), }) - if err != nil { - return nil, err - } if len(podList.Items) == 0 { - return nil, errors.New("no repo-host pod found") + return nil, errors.New("no primary pod found") } if len(podList.Items) > 1 { - return nil, errors.New("multiple repo-host pods found") + return nil, errors.New("multiple primary pods found") } return &podList.Items[0], nil @@ -42,8 +49,8 @@ func GetRepoHostPod(ctx context.Context, cli client.Client, cr *v2.PerconaPGClus err := cli.List(ctx, podList, &client.ListOptions{ Namespace: cr.Namespace, LabelSelector: labels.SelectorFromSet(map[string]string{ - "app.kubernetes.io/instance": cr.Name, - "postgres-operator.crunchydata.com/role": "repo-host", + "app.kubernetes.io/instance": cr.Name, + "postgres-operator.crunchydata.com/pgbackrest-dedicated": "", }), }) if err != nil { From a9a6f8c07a4fd5e60af8ce6b39d253be57648fa7 Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Fri, 6 Jun 2025 10:36:52 -0500 Subject: [PATCH 28/29] leftover from old branch --- .github/workflows/reviewdog.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml index 3213c05fc2..852aaa0ee9 100644 --- a/.github/workflows/reviewdog.yml +++ b/.github/workflows/reviewdog.yml @@ -79,11 +79,7 @@ jobs: name: runner / manifests runs-on: ubuntu-latest steps: - - name: Extract branch name - shell: bash - run: echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT - id: extract_branch - uses: actions/checkout@v4 - run: | - make generate VERSION=flyio-2-6-0 + make generate VERSION=main git diff --exit-code From b549fa4fb78d4f526a5367a1d3004af09cc84a07 Mon Sep 17 00:00:00 2001 From: Akshit Garg Date: Tue, 10 Jun 2025 12:54:01 +0530 Subject: [PATCH 29/29] Use instance pod to finalize backup Signed-off-by: Akshit Garg --- percona/controller/pgbackup/controller.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/percona/controller/pgbackup/controller.go b/percona/controller/pgbackup/controller.go index 04b856665c..e26b4d5f9b 100644 --- a/percona/controller/pgbackup/controller.go +++ b/percona/controller/pgbackup/controller.go @@ -551,7 +551,8 @@ func finishBackup(ctx context.Context, c client.Client, pgBackup *v2.PerconaPGBa // MARK(AG): Pod for running pgbackrest info. // Read the repo-host pod instead. - readyPod, err := controller.GetReadyRepoHostPod(ctx, c, pgBackup.Spec.PGCluster, pgBackup.Namespace) + // readyPod, err := controller.GetReadyRepoHostPod(ctx, c, pgBackup.Spec.PGCluster, pgBackup.Namespace) + readyPod, err := controller.GetReadyInstancePod(ctx, c, pgBackup.Spec.PGCluster, pgBackup.Namespace) if err != nil { return nil, errors.Wrap(err, "get ready repo-host pod") }