From 410fbef64a42c12b4c20f5a68586f5d6d439ebe1 Mon Sep 17 00:00:00 2001 From: "neo.mxn0" Date: Fri, 29 Sep 2023 22:25:43 +0700 Subject: [PATCH 1/4] add proxy & monitoring, also disable Debug + auto update --- kustomize/install/manager/kustomization.yaml | 17 +++++ kustomize/monitoring/my-ingress.yaml | 26 +++++++ kustomize/postgres/postgres.yaml | 77 +++++++++++++++++++- 3 files changed, 119 insertions(+), 1 deletion(-) create mode 100644 kustomize/monitoring/my-ingress.yaml diff --git a/kustomize/install/manager/kustomization.yaml b/kustomize/install/manager/kustomization.yaml index 5c5f0b84..df67892d 100644 --- a/kustomize/install/manager/kustomization.yaml +++ b/kustomize/install/manager/kustomization.yaml @@ -1,2 +1,19 @@ resources: - manager.yaml + +patchesStrategicMerge: +- |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: pgo + spec: + template: + spec: + containers: + - name: operator + env: + - name: CRUNCHY_DEBUG + value: "false" + - name: CHECK_FOR_UPGRADES + value: "false" diff --git a/kustomize/monitoring/my-ingress.yaml b/kustomize/monitoring/my-ingress.yaml new file mode 100644 index 00000000..4664a5c3 --- /dev/null +++ b/kustomize/monitoring/my-ingress.yaml @@ -0,0 +1,26 @@ +# +# This file was made by me, not from the crunchy repos +# And will not be included in the kustomize. +# The purpose is to access grafana dashboard via nginx-ingress +# +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + nginx.ingress.kubernetes.io/proxy-body-size: 10m + name: mon-pgo-ingress + namespace: postgres-operator +spec: + ingressClassName: nginx + rules: + - host: mon-pgo.mxn0.store + http: + paths: + # note all services must be alive, otherwise might affect another + - path: / + pathType: Prefix + backend: + service: + name: crunchy-grafana + port: + number: 3000 diff --git a/kustomize/postgres/postgres.yaml b/kustomize/postgres/postgres.yaml index 32144b11..4daf25e4 100644 --- a/kustomize/postgres/postgres.yaml +++ b/kustomize/postgres/postgres.yaml @@ -6,13 +6,45 @@ spec: image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.4-0 postgresVersion: 15 instances: - - name: instance1 + # name `db` will create pod name like: hippo-db1-lct8-0 + - name: db1 + replicas: 2 dataVolumeClaimSpec: accessModes: - "ReadWriteOnce" resources: requests: storage: 1Gi + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + postgres-operator.crunchydata.com/cluster: hippo + postgres-operator.crunchydata.com/instance-set: db1 + + # What kind of replicas this + - name: db2 + replicas: 1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + postgres-operator.crunchydata.com/cluster: hippo + postgres-operator.crunchydata.com/instance-set: db2 backups: pgbackrest: image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.47-0 @@ -25,3 +57,46 @@ spec: resources: requests: storage: 1Gi + + # connection pool + proxy: + pgBouncer: + image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.19-4 + replicas: 2 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + postgres-operator.crunchydata.com/cluster: hippo + postgres-operator.crunchydata.com/role: pgbouncer + + # spec.proxy.pgBouncer.service + # expose to the world => add this will failed to create pods, plz check + # For more detail on definition: See the kustomize/install/crd/base/postgres-operator.crunchydata.com_postgresclusters.yaml + # But it still not work as expected, no 35432 port service created :( + # Comment this custom service, let as default: + # We'll got service: `hippo-pgbouncer ClusterIP pgbouncer:5432►0` + # + # service: + # metadata: + # labels: + # app: f2e-api + # type: NodePort + # nodePort: 35432 + + # But it still not work as expected, no 35432 port service created :( + # service: + # metadata: + # labels: + # app: f2e-api + # type: NodePort + # nodePort: 35432 + + monitoring: + pgmonitor: + exporter: + image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi8-5.4.2-0 From 63655d54e2d6a8bc14efcad52959a6900964ae68 Mon Sep 17 00:00:00 2001 From: "neo.mxn0" Date: Tue, 17 Oct 2023 18:06:27 +0700 Subject: [PATCH 2/4] add user & db management --- kustomize/postgres/postgres.yaml | 36 +++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/kustomize/postgres/postgres.yaml b/kustomize/postgres/postgres.yaml index 4daf25e4..d93a39e5 100644 --- a/kustomize/postgres/postgres.yaml +++ b/kustomize/postgres/postgres.yaml @@ -45,6 +45,18 @@ spec: matchLabels: postgres-operator.crunchydata.com/cluster: hippo postgres-operator.crunchydata.com/instance-set: db2 + + # add new user rhino, hippo still exist + # db name here is postgres db name, not db1, db2 instance's name above + users: + - name: rhino + databases: + - payment + - finance + - name: f2e + databases: + - f2e-local-pgo + backups: pgbackrest: image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.47-0 @@ -81,20 +93,20 @@ spec: # Comment this custom service, let as default: # We'll got service: `hippo-pgbouncer ClusterIP pgbouncer:5432►0` # - # service: - # metadata: - # labels: - # app: f2e-api - # type: NodePort - # nodePort: 35432 + service: + metadata: + labels: + app: f2e-api + type: NodePort + nodePort: 30001 # But it still not work as expected, no 35432 port service created :( - # service: - # metadata: - # labels: - # app: f2e-api - # type: NodePort - # nodePort: 35432 + service: + metadata: + labels: + app: f2e-api + type: NodePort + nodePort: 30000 monitoring: pgmonitor: From 947993d38e84254f840534187c0c2a7c663d3c0a Mon Sep 17 00:00:00 2001 From: "neo.mxn0" Date: Tue, 17 Oct 2023 21:09:14 +0700 Subject: [PATCH 3/4] change ingress to traefik, add comment to config --- kustomize/monitoring/my-ingress.yaml | 6 +++++- kustomize/postgres/postgres.yaml | 29 +++++++++++++--------------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/kustomize/monitoring/my-ingress.yaml b/kustomize/monitoring/my-ingress.yaml index 4664a5c3..d6dbe589 100644 --- a/kustomize/monitoring/my-ingress.yaml +++ b/kustomize/monitoring/my-ingress.yaml @@ -11,7 +11,11 @@ metadata: name: mon-pgo-ingress namespace: postgres-operator spec: - ingressClassName: nginx + # How to get class name? + # Depend on setup of ingress controller + # kubectl get ingressClass --all-namespaces + ingressClassName: traefik + # ingressClassName: nginx rules: - host: mon-pgo.mxn0.store http: diff --git a/kustomize/postgres/postgres.yaml b/kustomize/postgres/postgres.yaml index d93a39e5..20d49858 100644 --- a/kustomize/postgres/postgres.yaml +++ b/kustomize/postgres/postgres.yaml @@ -5,10 +5,16 @@ metadata: spec: image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.4-0 postgresVersion: 15 + + # HA: + # https://access.crunchydata.com/documentation/postgres-operator/5.0.1/tutorial/high-availability/ + # https://stackoverflow.com/questions/77309504/crunchy-postgres-operator-whats-the-different-and-use-cases-between-spec-insta + # instances: # name `db` will create pod name like: hippo-db1-lct8-0 - - name: db1 - replicas: 2 + - name: i1 + replicas: 1 + # replicas: 2 dataVolumeClaimSpec: accessModes: - "ReadWriteOnce" @@ -24,10 +30,8 @@ spec: labelSelector: matchLabels: postgres-operator.crunchydata.com/cluster: hippo - postgres-operator.crunchydata.com/instance-set: db1 - - # What kind of replicas this - - name: db2 + postgres-operator.crunchydata.com/instance-set: i1 + - name: i2 replicas: 1 dataVolumeClaimSpec: accessModes: @@ -44,7 +48,7 @@ spec: labelSelector: matchLabels: postgres-operator.crunchydata.com/cluster: hippo - postgres-operator.crunchydata.com/instance-set: db2 + postgres-operator.crunchydata.com/instance-set: i2 # add new user rhino, hippo still exist # db name here is postgres db name, not db1, db2 instance's name above @@ -85,14 +89,7 @@ spec: matchLabels: postgres-operator.crunchydata.com/cluster: hippo postgres-operator.crunchydata.com/role: pgbouncer - - # spec.proxy.pgBouncer.service - # expose to the world => add this will failed to create pods, plz check - # For more detail on definition: See the kustomize/install/crd/base/postgres-operator.crunchydata.com_postgresclusters.yaml - # But it still not work as expected, no 35432 port service created :( - # Comment this custom service, let as default: - # We'll got service: `hippo-pgbouncer ClusterIP pgbouncer:5432►0` - # + # pgbouncer service service: metadata: labels: @@ -100,7 +97,7 @@ spec: type: NodePort nodePort: 30001 - # But it still not work as expected, no 35432 port service created :( + # hippo-ha service service: metadata: labels: From 4b91543a0cf0d6396438d32e55424cddae96a262 Mon Sep 17 00:00:00 2001 From: "neo.mxn0" Date: Thu, 19 Oct 2023 17:17:25 +0700 Subject: [PATCH 4/4] add merge-kubeconfig --- merge-kubeconfig | 68 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100755 merge-kubeconfig diff --git a/merge-kubeconfig b/merge-kubeconfig new file mode 100755 index 00000000..0d1323ee --- /dev/null +++ b/merge-kubeconfig @@ -0,0 +1,68 @@ +#!/bin/bash + +# Initialize variables to store option values +show_help=0 +do_reset=0 + +# Check the first argument +if [ $# -ge 1 ]; then + if [ "$1" == "-h" ] || [ "$1" == "--help" ]; then + show_help=1 + shift + elif [ "$1" == "-r" ] || [ "$1" == "--reset" ]; then + do_reset=1 + shift + fi +fi + + +if [ "$show_help" -eq 1 ]; then + echo "Usage: merge-kubeconfig [-h] [-r] [file1] [file2] [...] [file n]" + echo "Options:" + echo " -h, --help Show help" + echo " -r, --reset override(replace) the current config rather than merge with it" + echo "" + echo "Eg:" + echo "" + echo " 1) Append" + echo " " + echo " merge-kubeconfig ~/.kube/k3s-fi.conf aws-k8s.conf" + echo " " + echo " would append k3s-fi and aws-k8s config into current config, stored at ~/.kube/config" + echo " " + echo " 2) Replace" + echo "" + echo " merge-kubeconfig -r ~/.kube/k3s-fi.conf aws-k8s.conf" + echo "" + echo " would empty the current config, then store k3s-fi and aws-k8s config into current config, stored at ~/.kube/config" + + exit 0 +fi + + +KUBECONFIG="" +for arg in "$@"; do + KUBECONFIG="${KUBECONFIG}:${arg}" +done +# Remove the fisrt `:` char +KUBECONFIG="${KUBECONFIG#:}" +if [ "$do_reset" -ne 1 ]; then + KUBECONFIG=~/.kube/config:$KUBECONFIG +fi + +echo "---" +echo "KUBECONFIG=$KUBECONFIG" +echo "---" + +export KUBECONFIG +kubectl config view --flatten > all-in-one-kubeconfig.yaml +mv all-in-one-kubeconfig.yaml ~/.kube/config + +if [ "$do_reset" -eq 1 ]; then + echo "OK: replace current config with $@" + exit 0 +else + echo "OK: merged current config with $@" + exit 0 +fi +