diff --git a/.ansible-lint b/.ansible-lint deleted file mode 100644 index 1fd43bbd7..000000000 --- a/.ansible-lint +++ /dev/null @@ -1,7 +0,0 @@ -skip_list: - - 'yaml' - - 'role-name' - - 'ignore-errors' - - 'package-latest' - - 'risky-file-permissions' # TODO - - 'no-log-password' # We do not display passwords diff --git a/.config/.flake8 b/.config/.flake8 new file mode 100644 index 000000000..dc5b41d2b --- /dev/null +++ b/.config/.flake8 @@ -0,0 +1,10 @@ +[flake8] +ignore = E501, W503, E402 +exclude = + .git, + __pycache__, + docs/source/conf.py, + old, + build, + dist, + .venv diff --git a/.config/.yamllint b/.config/.yamllint new file mode 100644 index 000000000..fea88d72d --- /dev/null +++ b/.config/.yamllint @@ -0,0 +1,21 @@ +--- +extends: default + +rules: + line-length: + max: 160 + comments-indentation: disable + comments: + min-spaces-from-content: 1 + braces: + min-spaces-inside: 0 + max-spaces-inside: 1 + new-lines: + type: unix + empty-lines: + max: 1 + indentation: false + +ignore: | + .github/ + .venv diff --git a/.config/ansible-lint.yml b/.config/ansible-lint.yml new file mode 100644 index 000000000..51b3b83ef --- /dev/null +++ b/.config/ansible-lint.yml @@ -0,0 +1,26 @@ +--- +skip_list: + - command-instead-of-module + - command-instead-of-shell + - experimental + - ignore-errors + - no-changed-when + - no-handler + - no-relative-paths + - package-latest + - key-order[task] + - var-naming[no-role-prefix] + - yaml[indentation] + - yaml[line-length] + - name[missing] + - name[template] + - name[casing] # TODO: All names should start with an uppercase letter. + - risky-file-permissions # TODO: File permissions unset or incorrect. + - role-name # TODO: Avoid using paths when importing roles. Role name XXX does not match ``^*$`` pattern. + - schema[playbook] # TODO: Use FQCN for `become_method`. + - schema[tasks] # TODO: Use FQCN for `become_method`. + - galaxy[no-changelog] + +exclude_paths: + - ../.venv + - ../.github diff --git a/.config/gitpod/Dockerfile b/.config/gitpod/Dockerfile new file mode 100644 index 000000000..d47538592 --- /dev/null +++ b/.config/gitpod/Dockerfile @@ -0,0 +1,83 @@ +# First stage: Install build-time dependencies +FROM ubuntu:noble as builder + +# hadolint ignore=DL3002 +USER root + +# Copy Python version config file +COPY .config/python_version.config /tmp/ + +# Set a variable for the npm version +ARG NPM_VERSION=11.2.0 +# Set a variable for the ungit version +ARG UNGIT_VERSION=1.5.28 +# Set a variable for the prettier version +ARG PRETTIER_VERSION=3.5.3 +# Set a variable for the editorconfig-checker version +ARG EDITORCONFIG_CHECKER_VERSION=3.2.0 + +# Update system and install packages +# hadolint ignore=DL3008,DL3013,DL3009 +RUN PYTHON_VERSION=$(cut -d '=' -f 2 /tmp/python_version.config) \ + && apt-get update \ + && apt-get upgrade -y \ + && apt-get install -y --no-install-recommends \ + bash-completion \ + ca-certificates \ + curl \ + git \ + git-lfs \ + gnupg \ + htop \ + iproute2 \ + lsb-release \ + make \ + nano \ + python3-pip \ + "python${PYTHON_VERSION}" \ + "python${PYTHON_VERSION}-venv" \ + sudo \ + tree \ + vim \ + wget \ + && python3 -m pip install --no-cache-dir virtualenv --break-system-packages + +# Install Docker +FROM builder as docker-installer + +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +# hadolint ignore=DL3008,DL3009 +RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg \ + && echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null \ + && apt-get update \ + && apt-get install -y --no-install-recommends docker-ce docker-ce-cli containerd.io + +# Install npm, ungit, prettier, editorconfig-checker, and sql-formatter +FROM docker-installer as npm-installer + +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +# hadolint ignore=DL3008 +RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - \ + && apt-get install -y --no-install-recommends nodejs \ + && npm install -g "npm@${NPM_VERSION}" \ + && npm install -g \ + "ungit@${UNGIT_VERSION}" \ + "prettier@${PRETTIER_VERSION}" \ + "editorconfig-checker@${EDITORCONFIG_CHECKER_VERSION}" \ + "sql-formatter@latest" + +# Cleanup +FROM npm-installer as cleanup + +RUN apt-get clean && rm -rf /var/lib/apt/lists/* tmp/* + +# Final stage: Setup final image and user +FROM scratch + +COPY --from=cleanup / / + +# Create the gitpod user. UID must be 33333. +RUN useradd -l -u 33333 -G sudo -md /home/gitpod -s /bin/bash -p gitpod gitpod + +USER gitpod diff --git a/.config/make/docker.mak b/.config/make/docker.mak new file mode 100644 index 000000000..06aa77248 --- /dev/null +++ b/.config/make/docker.mak @@ -0,0 +1,92 @@ +## —— Docker ————————————————————————————————————————————————————————————————————————————————————— +TAG ?= local +DOCKER_REGISTRY ?= autobase + +.PHONY: docker-lint docker-lint-console-ui docker-lint-console-api docker-lint-console-db docker-lint-console +docker-lint: docker-lint-automation docker-lint-console-ui docker-lint-console-api docker-lint-console-db docker-lint-console ## Lint all Dockerfiles + +docker-lint-automation: ## Lint automation Dockerfile + @echo "Lint automation container Dockerfile" + docker run --rm -i -v $(PWD)/automation/Dockerfile:/Dockerfile \ + hadolint/hadolint hadolint --ignore DL3002 --ignore DL3008 --ignore DL3059 /Dockerfile + +docker-lint-console-ui: ## Lint console ui Dockerfile + @echo "Lint console ui container Dockerfile" + docker run --rm -i -v $(PWD)/console/ui/Dockerfile:/Dockerfile \ + hadolint/hadolint hadolint --ignore DL3002 --ignore DL3008 --ignore DL3059 /Dockerfile + +docker-lint-console-api: ## Lint console api Dockerfile + @echo "Lint console api container Dockerfile" + docker run --rm -i -v $(PWD)/console/service/Dockerfile:/Dockerfile \ + hadolint/hadolint hadolint --ignore DL3002 --ignore DL3008 --ignore DL3059 /Dockerfile + +docker-lint-console-db: ## Lint console db Dockerfile + @echo "Lint console db container Dockerfile" + docker run --rm -i -v $(PWD)/console/db/Dockerfile:/Dockerfile \ + hadolint/hadolint hadolint --ignore DL3002 --ignore DL3008 --ignore DL3059 --ignore DL4001 /Dockerfile + +docker-lint-console: ## Lint console Dockerfile (all services) + @echo "Lint console container Dockerfile" + docker run --rm -i -v $(PWD)/console/Dockerfile:/Dockerfile \ + hadolint/hadolint hadolint --ignore DL3002 --ignore DL3008 --ignore DL3059 --ignore DL4001 /Dockerfile + +.PHONY: docker-build docker-build-console-ui docker-build-console-api docker-build-console-db docker-build-console +docker-build: docker-build-automation docker-build-console-ui docker-build-console-api docker-build-console-db docker-build-console ## Build for all Docker images + +docker-build-automation: ## Build automation image + @echo "Build automation docker image with tag $(TAG)"; + docker build --no-cache --platform linux/amd64 --tag automation:$(TAG) --file automation/Dockerfile . + +docker-build-console-ui: ## Build console ui image + @echo "Build console ui docker image with tag $(TAG)" + docker build --no-cache --platform linux/amd64 --tag console_ui:$(TAG) --file console/ui/Dockerfile . + +docker-build-console-api: ## Build console api image + @echo "Build console api docker image with tag $(TAG)" + docker build --no-cache --platform linux/amd64 --tag console_api:$(TAG) --file console/service/Dockerfile . + +docker-build-console-db: ## Build console db image + @echo "Build console db docker image with tag $(TAG)" + docker build --no-cache --platform linux/amd64 --tag console_db:$(TAG) --file console/db/Dockerfile . + +docker-build-console: ## Build console image (all services) + @echo "Build console docker image with tag $(TAG)" + docker build --no-cache --platform linux/amd64 --tag console:$(TAG) --file console/Dockerfile . + +.PHONY: docker-push docker-push-console-ui docker-push-console-api docker-push-console-db docker-push-console +docker-push: docker-push-automation docker-push-console-ui docker-push-console-api docker-push-console-db docker-push-console ## Push all images to Dockerhub (example: make docker-push TAG=my_tag DOCKER_REGISTRY=my_repo DOCKER_REGISTRY_USER="my_username" DOCKER_REGISTRY_PASSWORD="my_password") + +docker-push-automation: ## Push automation to Dockerhub + @echo "Push automation docker image with tag $(TAG)"; + echo "$(DOCKER_REGISTRY_PASSWORD)" | docker login --username "$(DOCKER_REGISTRY_USER)" --password-stdin + docker tag automation:$(TAG) $(DOCKER_REGISTRY)/automation:$(TAG) + docker push $(DOCKER_REGISTRY)/automation:$(TAG) + +docker-push-console-ui: ## Push console ui image to Dockerhub + @echo "Push console ui docker image with tag $(TAG)" + echo "$(DOCKER_REGISTRY_PASSWORD)" | docker login --username "$(DOCKER_REGISTRY_USER)" --password-stdin + docker tag console_ui:$(TAG) $(DOCKER_REGISTRY)/console_ui:$(TAG) + docker push $(DOCKER_REGISTRY)/console_ui:$(TAG) + +docker-push-console-api: ## Push console api image to Dockerhub + @echo "Push console api docker image with tag $(TAG)" + echo "$(DOCKER_REGISTRY_PASSWORD)" | docker login --username "$(DOCKER_REGISTRY_USER)" --password-stdin + docker tag console_api:$(TAG) $(DOCKER_REGISTRY)/console_api:$(TAG) + docker push $(DOCKER_REGISTRY)/console_api:$(TAG) + +docker-push-console-db: ## Push console db image to Dockerhub + @echo "Push console db docker image with tag $(TAG)" + echo "$(DOCKER_REGISTRY_PASSWORD)" | docker login --username "$(DOCKER_REGISTRY_USER)" --password-stdin + docker tag console_db:$(TAG) $(DOCKER_REGISTRY)/console_db:$(TAG) + docker push $(DOCKER_REGISTRY)/console_db:$(TAG) + +docker-push-console: ## Push console image to Dockerhub (all services) + @echo "Push console docker image with tag $(TAG)" + echo "$(DOCKER_REGISTRY_PASSWORD)" | docker login --username "$(DOCKER_REGISTRY_USER)" --password-stdin + docker tag console:$(TAG) $(DOCKER_REGISTRY)/console:$(TAG) + docker push $(DOCKER_REGISTRY)/console:$(TAG) + +.PHONY: docker-tests +docker-tests: ## Run tests for docker + $(MAKE) docker-lint + $(MAKE) docker-build diff --git a/.config/make/formatting.mak b/.config/make/formatting.mak new file mode 100644 index 000000000..b98b7da86 --- /dev/null +++ b/.config/make/formatting.mak @@ -0,0 +1,20 @@ +## —— Formatting ——————————————————————————————————————————————————————————————————————————------- + +.PHONY: prettier +prettier: ## Run Prettier formatting + npx prettier --write . + +.PHONY: prettier-check +prettier-check: ## Check formatting with Prettier (without modifying files) + npx prettier --check . + +.PHONY: sql-format +sql-format: ## Format all SQL files using sql-formatter + find . -name "*.sql" -print0 | xargs -0 -n1 sql-formatter --fix + +# https://hub.docker.com/r/backplane/pgformatter +.PHONY: pg-format +pg-format: ## Format all SQL files using pgFormatter (PostgreSQL SQL queries and PL/PGSQL code beautifier) + find . -name "*.sql" -print0 | xargs -0 -I{} \ + docker run --rm -v "$(shell pwd):/work" -u $(shell id -u):$(shell id -g) \ + backplane/pgformatter -u 1 -U 1 -f 1 -s 2 -W 0 -w 160 -i "{}" diff --git a/.config/make/help.mak b/.config/make/help.mak new file mode 100644 index 000000000..a71f2c7ca --- /dev/null +++ b/.config/make/help.mak @@ -0,0 +1,5 @@ +## —— Help ——————————————————————————————————————————————————————————————————————————————————————— +.PHONY: help +help: ## Help command + echo -e "$$HEADER" + grep -E '(^[a-zA-Z0-9_-]+:.*?## .*$$)|(^## )' $(MAKEFILE_LIST) | sed 's/^[^:]*://g' | awk 'BEGIN {FS = ":.*?## | #"} ; {printf "${cyan}%-30s${reset} ${white}%s${reset} ${green}%s${reset}\n", $$1, $$2, $$3}' | sed -e 's/\[36m##/\n[32m##/' diff --git a/.config/make/linters.mak b/.config/make/linters.mak new file mode 100644 index 000000000..ada449820 --- /dev/null +++ b/.config/make/linters.mak @@ -0,0 +1,32 @@ +## —— Linter ————————————————————————————————————————————————————————————————————————————————————— + +# Activate virtual environment +ACTIVATE_VENV = source .venv/bin/activate + +# Configuration files +YAMLLINT_CONFIG = .config/.yamllint +FLAKE8_CONFIG = .config/.flake8 + +.PHONY: linter-yamllint +linter-yamllint: ## Lint YAML files using yamllint + echo "yamllint #############################################################" + $(ACTIVATE_VENV) && \ + yamllint --strict -c $(YAMLLINT_CONFIG) . + +.PHONY: linter-ansible-lint +linter-ansible-lint: ## Lint Ansible files using ansible-lint + echo "ansible-lint #########################################################" + $(ACTIVATE_VENV) && \ + ansible-lint --force-color --parseable ./automation + +.PHONY: linter-flake8 +linter-flake8: ## Lint Python files using flake8 + echo "flake8 ###############################################################" + $(ACTIVATE_VENV) && \ + flake8 --config $(FLAKE8_CONFIG) + +.PHONY: lint +lint: ## Run all linters + $(MAKE) linter-yamllint + $(MAKE) linter-ansible-lint + $(MAKE) linter-flake8 diff --git a/.config/make/molecule.mak b/.config/make/molecule.mak new file mode 100644 index 000000000..61572edfa --- /dev/null +++ b/.config/make/molecule.mak @@ -0,0 +1,56 @@ +# Activate virtual environment +ACTIVATE_VENV = . .venv/bin/activate + +## —— Molecule ——————————————————————————————————————————————————————————————————————————————————— + +.PHONY: molecule-test +molecule-test: ## Run test sequence for default scenario + $(ACTIVATE_VENV) && cd automation && molecule test + +.PHONY: molecule-destroy +molecule-destroy: ## Run destroy sequence for default scenario + $(ACTIVATE_VENV) && cd automation && molecule destroy + +.PHONY: molecule-converge +molecule-converge: ## Run converge sequence for default scenario + $(ACTIVATE_VENV) && cd automation && molecule converge + +.PHONY: molecule-reconverge +molecule-reconverge: ## Run destroy and converge sequence for default scenario + $(ACTIVATE_VENV) && cd automation && molecule destroy && && molecule converge + +.PHONY: molecule-test-all +molecule-test-all: ## Run test sequence for all scenarios + $(ACTIVATE_VENV) && cd automation && molecule test --all + +.PHONY: molecule-destroy-all +molecule-destroy-all: ## Run destroy sequence for all scenarios + $(ACTIVATE_VENV) && cd automation && molecule destroy --all + +.PHONY: molecule-test-scenario +molecule-test-scenario: ## Run molecule test with specific scenario (example: make molecule-test-scenario MOLECULE_SCENARIO="scenario_name") + $(ACTIVATE_VENV) && cd automation && molecule test --scenario-name $(MOLECULE_SCENARIO) + +.PHONY: molecule-destroy-scenario +molecule-destroy-scenario: ## Run molecule destroy with specific scenario (example: make molecule-destroy-scenario MOLECULE_SCENARIO="scenario_name") + $(ACTIVATE_VENV) && cd automation && molecule destroy --scenario-name $(MOLECULE_SCENARIO) + +.PHONY: molecule-converge-scenario +molecule-converge-scenario: ## Run molecule converge with specific scenario (example: make molecule-converge-scenario MOLECULE_SCENARIO="scenario_name") + $(ACTIVATE_VENV) && cd automation && molecule converge --scenario-name $(MOLECULE_SCENARIO) + +.PHONY: molecule-dependency +molecule-dependency: ## Run dependency sequence + $(ACTIVATE_VENV) && cd automation && molecule dependency + +.PHONY: molecule-verify +molecule-verify: ## Run verify sequence + $(ACTIVATE_VENV) && cd automation && molecule verify + +.PHONY: molecule-login +molecule-login: ## Log in to one instance using custom host IP (example: make molecule-login MOLECULE_HOST="10.172.0.20") + $(ACTIVATE_VENV) && cd automation && molecule login --host $(MOLECULE_HOST) + +.PHONY: molecule-login-scenario +molecule-login-scenario: ## Log in to one instance using custom host IP and scenario name (example: make molecule-login-scenario MOLECULE_HOST="10.172.1.20" MOLECULE_SCENARIO="scenario_name") + $(ACTIVATE_VENV) && cd automation && molecule login --host $(MOLECULE_HOST) --scenario-name $(MOLECULE_SCENARIO) diff --git a/.config/make/python.mak b/.config/make/python.mak new file mode 100644 index 000000000..805f93e2a --- /dev/null +++ b/.config/make/python.mak @@ -0,0 +1,72 @@ +# Python default launcher +python_launcher := python$(shell cat .config/python_version.config | cut -d '=' -f 2) +python_requirements_file ?= automation/requirements.txt +python_requirements_dev_file ?= .config/python/dev/requirements.txt + +# Activate virtual environment +ACTIVATE_VENV = . .venv/bin/activate + +## —— Python ————————————————————————————————————————————————————————————————————————————————————— +.PHONY: python-bootstrap +python-bootstrap: ## Bootstrap python + $(MAKE) python-venv-init + $(MAKE) python-venv-upgrade + $(MAKE) python-venv-requirements + +.PHONY: python-bootstrap-dev +python-bootstrap-dev: ## Bootstrap python for dev env + $(MAKE) python-venv-requirements-dev + $(MAKE) python-venv-linters-install + +# =============================================================================================== +# .venv +# =============================================================================================== +.PHONY: python-venv-init +python-venv-init: ## Create venv ".venv/" if not exist + @echo "Checking if .venv directory exists..."; \ + if [ ! -d .venv ]; then echo "Creating virtual environment using $(python_launcher)..."; $(python_launcher) -m venv .venv; else echo ".venv directory already exists. Skipping creation."; fi + +.PHONY: python-venv-upgrade +python-venv-upgrade: ## Upgrade venv with pip, setuptools and wheel + @echo "Upgrading virtual environment..." + $(ACTIVATE_VENV) && pip install --upgrade pip setuptools wheel + +.PHONY: python-venv-requirements +python-venv-requirements: ## Install or upgrade from $(python_requirements_file) + @echo "Installing or upgrading requirements from $(python_requirements_file)..." + $(ACTIVATE_VENV) && pip install --upgrade --requirement $(python_requirements_file) + +.PHONY: python-venv-requirements-dev +python-venv-requirements-dev: ## Install or upgrade from $(python_requirements_dev_file) + @echo "Installing or upgrading dev requirements from $(python_requirements_dev_file)..." + $(ACTIVATE_VENV) && pip install --upgrade --requirement $(python_requirements_dev_file) + +.PHONY: python-venv-linters-install +python-venv-linters-install: ## Install or upgrade linters + @echo "Installing or upgrading linters..." + $(ACTIVATE_VENV) && pip install --upgrade flake8 + +.PHONY: python-venv-purge +python-venv-purge: ## Remove venv ".venv/" folder + @echo "Removing .venv directory..." + @rm -rf .venv + +# =============================================================================================== +# Utils +# =============================================================================================== +.PHONY: python-purge-cache +python-purge-cache: ## Purge cache to avoid used cached files + @echo "Purging pip cache..." + @if [ -d .venv ] ; then $(ACTIVATE_VENV) && pip cache purge; fi + +.PHONY: python-version +python-version: ## Displays the python version used for the .venv + $(ACTIVATE_VENV) && $(python_launcher) --version + +.PHONY: python-flake8 +python-flake8: ## Run flake8 linter for python + $(ACTIVATE_VENV) && flake8 --config .config/.flake8 + +.PHONY: python-pytest +python-pytest: ## Run pytest to test python scripts + $(ACTIVATE_VENV) && cd scripts/ && $(python_launcher) -m pytest diff --git a/.config/molecule/config.yml b/.config/molecule/config.yml new file mode 100644 index 000000000..cfdcdf156 --- /dev/null +++ b/.config/molecule/config.yml @@ -0,0 +1,43 @@ +--- +dependency: + name: galaxy + enabled: false +driver: + name: docker + +provisioner: + name: ansible + config_options: + defaults: + display_skipped_hosts: false + remote_tmp: "~/.ansible/tmp" + allow_world_readable_tmpfiles: false + timeout: 60 + playbooks: + prepare: prepare.yml + +scenario: + create_sequence: + - prepare + - create + converge_sequence: + - prepare + - create + - converge + destroy_sequence: + - cleanup + - destroy + test_sequence: + - cleanup + - destroy + - syntax + - prepare + - create + - converge + # - idempotence # >> role:patroni,task:"data directory check result" + - verify + - cleanup + - destroy + +verifier: + name: ansible diff --git a/.config/python/dev/requirements.txt b/.config/python/dev/requirements.txt new file mode 100644 index 000000000..3fa0994ee --- /dev/null +++ b/.config/python/dev/requirements.txt @@ -0,0 +1,53 @@ +ansible==9.13.0 +ansible-compat==25.1.4 +ansible-core==2.16.14 +ansible-lint==25.1.3 +attrs==25.3.0 +black==25.1.0 +bracex==2.5.post1 +certifi==2025.1.31 +cffi==1.17.1 +charset-normalizer==3.4.1 +click==8.1.8 +click-help-colors==0.9.4 +cryptography==44.0.2 +docker==7.1.0 +enrich==1.2.7 +filelock==3.18.0 +flake8==7.1.2 +idna==3.10 +importlib-metadata==8.6.1 +jinja2==3.1.6 +jsonschema==4.23.0 +jsonschema-specifications==2024.10.1 +markdown-it-py==3.0.0 +markupsafe==3.0.2 +mccabe==0.7.0 +mdurl==0.1.2 +molecule==25.3.1 +molecule-plugins==23.7.0 +mypy-extensions==1.0.0 +packaging==24.2 +pathspec==0.12.1 +platformdirs==4.3.7 +pluggy==1.5.0 +pycodestyle==2.12.1 +pycparser==2.22 +pyflakes==3.2.0 +pygments==2.19.1 +pyyaml==6.0.2 +referencing==0.36.2 +requests==2.32.3 +resolvelib==1.0.1 +rich==13.9.4 +rpds-py==0.23.1 +ruamel-yaml==0.18.10 +ruamel-yaml-clib==0.2.12 +subprocess-tee==0.4.2 +tomli==2.2.1 +typing-extensions==4.12.2 +urllib3==2.3.0 +wcmatch==10.0 +wheel==0.45.1 +yamllint==1.37.0 +zipp==3.21.0 diff --git a/.config/python_version.config b/.config/python_version.config new file mode 100644 index 000000000..030b8224f --- /dev/null +++ b/.config/python_version.config @@ -0,0 +1 @@ +PYTHON_VERSION=3.12 diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 000000000..602f542fe --- /dev/null +++ b/.editorconfig @@ -0,0 +1,26 @@ +root = true + +[*] +indent_style = space +indent_size = 2 +tab_width = 2 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.go] +indent_style = tab + +[*.mak] +indent_style = tab + +[Makefile] +indent_style = tab + +[*.conf] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false +indent_size = 1 diff --git a/.editorconfig-checker.json b/.editorconfig-checker.json new file mode 100644 index 000000000..5c2bedd93 --- /dev/null +++ b/.editorconfig-checker.json @@ -0,0 +1,3 @@ +{ + "Exclude": [".env", ".j2", "console/", "settings.json", ".sql", "go.mod", "go.sum", "patroni/library/"] +} diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index d67816996..08227f7bd 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,3 +1,3 @@ --- -github: # TODO +github: vitabaks patreon: vitabaks diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml new file mode 100644 index 000000000..e0f7aa6f0 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml @@ -0,0 +1,74 @@ +--- +name: "Bug report" +description: "Submit a report and help us improve Autobase" +title: "[Bug] " +labels: ["bug", "needs triage"] +body: + - type: markdown + attributes: + value: "### Thank you for contributing to Autobase!" + - type: markdown + attributes: + value: | + Before submitting, we'd appreciate it if you: + - Verify that your issue is not already reported on GitHub. + - Ensure your description is detailed to help us reproduce the problem. + - Attach logs or error messages, if applicable, to help us diagnose the problem. + - Check the Autobase [documentation](https://autobase.tech/docs). + - Ensure you have an active Autobase [subscription](https://autobase.tech/docs/support) for prioritized support. + - type: textarea + id: bug-description + attributes: + label: Bug description + description: Briefly describe the issue you're experiencing. + validations: + required: true + - type: textarea + id: expected-behavior + attributes: + label: Expected behavior + description: Describe what you expected to happen. + validations: + required: true + - type: textarea + id: reproduce + attributes: + label: Steps to reproduce + description: Describe the steps to reproduce the bug. Provide specific details for better understanding. + value: | + 1. + 2. + 3. + ... + validations: + required: false + - type: dropdown + id: install-method + attributes: + label: Installation method + description: | + Select the installation method you used. + If "other", provide details in the "Additional info" section. + options: + - "Console (UI)" + - "Command line" + - "GitOps" + - "Other" + validations: + required: true + - type: textarea + id: system-info + attributes: + label: System info + description: | + Provide information about your system. Include OS version, Autobase version, and any relevant configuration details. + validations: + required: true + - type: textarea + id: additional-info + attributes: + label: Additional info + description: | + Any additional information related to the issue, such as error messages, or ansible and system logs. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/FEAT_REQUEST.yml b/.github/ISSUE_TEMPLATE/FEAT_REQUEST.yml new file mode 100644 index 000000000..c8355f581 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/FEAT_REQUEST.yml @@ -0,0 +1,52 @@ +--- +name: "Feature request" +description: "Submit a feature request and help us improve Autobase" +title: "[Feat] " +labels: ["feature request", "needs triage"] +body: + - type: markdown + attributes: + value: "### Thank you for contributing to Autobase!" + - type: markdown + attributes: + value: | + Before submitting, please: + - Verify that your feature request is not already reported on GitHub. + - Ensure you have an active Autobase [subscription](https://autobase.tech/docs/support) for prioritized support. + - type: textarea + id: problem + attributes: + label: Problem + description: | + Describe the problem your feature request is intended to solve. How does the lack of this feature affect you? + validations: + required: true + - type: textarea + id: description + attributes: + label: Description + description: | + Clearly describe the feature you want and how it would improve Autobase. + validations: + required: true + - type: dropdown + id: importance + attributes: + label: Importance + description: | + How important is this feature for you? Select "blocker" if its absence prevents you from using Autobase. + options: + - "nice to have" + - "really want" + - "must have" + - "blocker" + validations: + required: true + - type: textarea + id: proposed-implementation + attributes: + label: Proposed implementation + description: | + If you have any ideas, share how this feature could be implemented. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000..fdbb3fa12 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,9 @@ +--- +blank_issues_enabled: false +contact_links: + - name: "Question" + url: https://github.com/vitabaks/autobase/discussions/new?category=q-a + about: Ask a question about Autobase + - name: "Commercial support" + url: https://autobase.tech/docs/support + about: Find out more about the available Autobase packages diff --git a/.github/workflows/ansible-lint.yml b/.github/workflows/ansible-lint.yml index bf3434428..766ea1815 100644 --- a/.github/workflows/ansible-lint.yml +++ b/.github/workflows/ansible-lint.yml @@ -1,7 +1,5 @@ --- -# yamllint disable rule:truthy - -name: Ansible-lint +name: "Ansible-lint" on: push: @@ -16,51 +14,19 @@ jobs: runs-on: ubuntu-latest steps: - - name: Git clone repo postgresql_cluster - uses: actions/checkout@v2 + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV - - name: Lint playbook with Ansible v2.10 - uses: iranzo/ansible-lint-action@v4.1.1 - with: - targets: | - deploy_pgcluster.yml - add_pgnode.yml - add_balancer.yml - override-deps: ansible==2.10.7 - args: "" + - name: Checkout + uses: actions/checkout@v3 - - name: Lint playbook with Ansible v2.9 - uses: iranzo/ansible-lint-action@v4.1.1 + - name: Set up Python 3.12 + uses: actions/setup-python@v4 with: - targets: | - deploy_pgcluster.yml - add_pgnode.yml - add_balancer.yml - override-deps: ansible==2.9.22 - args: "" + python-version: "3.12" - - name: Lint playbook with Ansible v2.8 - uses: iranzo/ansible-lint-action@v4.1.1 - with: - targets: | - deploy_pgcluster.yml - add_pgnode.yml - add_balancer.yml - override-deps: | - ansible==2.8.20 - ansible-lint==4.3.7 - args: "-x 106,208,403" - - - name: Lint playbook with Ansible v2.7 - uses: iranzo/ansible-lint-action@v4.1.1 - with: - targets: | - deploy_pgcluster.yml - add_pgnode.yml - add_balancer.yml - override-deps: | - ansible==2.7.18 - ansible-lint==4.2.0 - args: "-x 106,208,403" + - name: Install dependencies + run: make bootstrap-dev -... + - name: Run Ansible-lint + run: make linter-ansible-lint diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 000000000..0a6ab3baa --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,65 @@ +--- +name: "Docker" + +on: + push: + branches: + - master + tags: + - "*" + pull_request: + branches: + - master + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV + + - name: Extract branch or tag name + run: | + if [[ -n "${GITHUB_HEAD_REF}" ]]; then + # This is a PR, use the source branch name + echo "REF_NAME=${GITHUB_HEAD_REF}" >> $GITHUB_ENV + else + # This is a push, use the branch or tag name from GITHUB_REF + echo "REF_NAME=${GITHUB_REF##*/}" >> $GITHUB_ENV + fi + + - name: Set TAG + run: | + if [[ "${{ env.REF_NAME }}" == "master" ]]; then + echo "TAG=latest" >> $GITHUB_ENV + else + echo "TAG=${{ env.REF_NAME }}" >> $GITHUB_ENV + fi + + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up Python 3.12 + uses: actions/setup-python@v4 + with: + python-version: "3.12" + + - name: Install dependencies + run: make bootstrap-dev + + - name: Run Docker lint + run: make docker-lint + + - name: Run Docker build + run: make docker-build + env: + TAG: ${{ env.TAG }} + + - name: Run Docker push + if: ${{ env.DOCKER_REGISTRY_USER != '' && env.DOCKER_REGISTRY_PASSWORD != '' }} + run: make docker-push + env: + TAG: ${{ env.TAG }} + DOCKER_REGISTRY_USER: ${{ secrets.DOCKER_USERNAME }} + DOCKER_REGISTRY_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} diff --git a/.github/workflows/flake8.yml b/.github/workflows/flake8.yml new file mode 100644 index 000000000..c6d8b460b --- /dev/null +++ b/.github/workflows/flake8.yml @@ -0,0 +1,32 @@ +--- +name: "Flake8" + +on: + push: + branches: + - master + pull_request: + branches: + - master + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up Python 3.12 + uses: actions/setup-python@v4 + with: + python-version: "3.12" + + - name: Install dependencies + run: make bootstrap-dev + + - name: Run Flake8 + run: make linter-flake8 diff --git a/.github/workflows/molecule.yml b/.github/workflows/molecule.yml index 0e2be1c6a..4b96c4d58 100644 --- a/.github/workflows/molecule.yml +++ b/.github/workflows/molecule.yml @@ -1,7 +1,4 @@ --- -# yamllint disable rule:truthy -# yamllint disable rule:line-length - name: Molecule on: @@ -18,32 +15,61 @@ jobs: strategy: fail-fast: false matrix: - distro: - - debian11 - - debian10 - - ubuntu2004 - - ubuntu1804 - - centos8 - - centos7 - - rockylinux8 + config: + - distro: debian12 + tag: latest + namespace: geerlingguy + - distro: debian11 + tag: latest + namespace: geerlingguy + - distro: ubuntu2404 + tag: latest + namespace: geerlingguy + - distro: ubuntu2204 + tag: latest + namespace: geerlingguy + - distro: rockylinux8 + tag: latest + namespace: geerlingguy + - distro: rockylinux9 + tag: latest + namespace: geerlingguy + - distro: almalinux8 + tag: latest + namespace: glillico + - distro: almalinux9 + tag: latest + namespace: glillico + - distro: oraclelinux8 + tag: latest + namespace: glillico + - distro: oraclelinux9 + tag: latest + namespace: glillico + - distro: centosstream9 + tag: latest + namespace: glillico steps: - - name: checkout - uses: actions/checkout@v2 + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v3 - - name: Set up Python 3 - uses: actions/setup-python@v2 + - name: Set up Python 3.12 + uses: actions/setup-python@v4 with: - python-version: '3.x' + python-version: "3.12" - name: Install dependencies - run: pip3 install molecule[docker] ansible + run: make bootstrap-dev - name: Run Molecule tests - run: molecule test + run: make molecule-test env: - PY_COLORS: '1' - ANSIBLE_FORCE_COLOR: '1' - MOLECULE_DISTRO: ${{ matrix.distro }} - -... + PY_COLORS: "1" + ANSIBLE_FORCE_COLOR: "1" + IMAGE_DISTRO: ${{ matrix.config.distro }} + IMAGE_TAG: ${{ matrix.config.tag }} + IMAGE_NAMESPACE: ${{ matrix.config.namespace }} diff --git a/.github/workflows/molecule_pg_upgrade.yml b/.github/workflows/molecule_pg_upgrade.yml new file mode 100644 index 000000000..37b22ddf4 --- /dev/null +++ b/.github/workflows/molecule_pg_upgrade.yml @@ -0,0 +1,72 @@ +--- +name: Molecule pg_upgrade + +on: + schedule: + - cron: "0 0 * * 6" + +jobs: + test: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + config: + - distro: debian12 + tag: latest + namespace: geerlingguy + - distro: debian11 + tag: latest + namespace: geerlingguy + - distro: ubuntu2404 + tag: latest + namespace: geerlingguy + - distro: ubuntu2204 + tag: latest + namespace: geerlingguy + - distro: rockylinux8 + tag: latest + namespace: geerlingguy + - distro: rockylinux9 + tag: latest + namespace: geerlingguy + - distro: almalinux8 + tag: latest + namespace: glillico + - distro: almalinux9 + tag: latest + namespace: glillico + - distro: oraclelinux8 + tag: latest + namespace: glillico + - distro: oraclelinux9 + tag: latest + namespace: glillico + - distro: centosstream9 + tag: latest + namespace: glillico + + steps: + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up Python 3.12 + uses: actions/setup-python@v4 + with: + python-version: "3.12" + + - name: Install dependencies + run: make bootstrap-dev + + - name: Run Molecule tests for pg_upgrade + run: make molecule-test-scenario + env: + PY_COLORS: "1" + ANSIBLE_FORCE_COLOR: "1" + IMAGE_DISTRO: ${{ matrix.config.distro }} + IMAGE_TAG: ${{ matrix.config.tag }} + IMAGE_NAMESPACE: ${{ matrix.config.namespace }} + MOLECULE_SCENARIO: "pg_upgrade" diff --git a/.github/workflows/molecule_pgpro.yml b/.github/workflows/molecule_pgpro.yml index 2832b874d..8f3ee9fa5 100644 --- a/.github/workflows/molecule_pgpro.yml +++ b/.github/workflows/molecule_pgpro.yml @@ -1,11 +1,9 @@ --- -# yamllint disable rule:truthy - name: Molecule PostgresPro on: schedule: - - cron: '0 0 * * 0' + - cron: "0 0 * * 0" jobs: test: @@ -15,29 +13,30 @@ jobs: matrix: distro: - centos8 - - centos7 - debian10 - - debian9 - - ubuntu1804 + - debian11 - ubuntu2004 + - ubuntu2204 steps: - - name: checkout - uses: actions/checkout@v2 + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v3 - - name: Set up Python 3 - uses: actions/setup-python@v2 + - name: Set up Python 3.12 + uses: actions/setup-python@v4 with: - python-version: '3.x' + python-version: "3.12" - name: Install dependencies - run: pip3 install molecule[docker] ansible + run: make bootstrap-dev - name: Run Molecule tests for PostgresPro - run: molecule test --scenario-name postgrespro + run: make molecule-test-scenario env: - PY_COLORS: '1' - ANSIBLE_FORCE_COLOR: '1' - MOLECULE_DISTRO: ${{ matrix.distro }} - -... + PY_COLORS: "1" + ANSIBLE_FORCE_COLOR: "1" + IMAGE_DISTRO: ${{ matrix.distro }} + MOLECULE_SCENARIO: "postgrespro" diff --git a/.github/workflows/schedule_pg_almalinux8.yml b/.github/workflows/schedule_pg_almalinux8.yml new file mode 100644 index 000000000..727c6c08c --- /dev/null +++ b/.github/workflows/schedule_pg_almalinux8.yml @@ -0,0 +1,33 @@ +--- +name: scheduled PostgreSQL (AlmaLinux 8) + +on: + schedule: + - cron: "15 1 * * *" + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up Python 3.12 + uses: actions/setup-python@v4 + with: + python-version: "3.12" + + - name: Install dependencies + run: make bootstrap-dev + + - name: Run Molecule tests + run: make molecule-test + env: + PY_COLORS: "1" + ANSIBLE_FORCE_COLOR: "1" + IMAGE_DISTRO: almalinux8 + IMAGE_NAMESPACE: glillico diff --git a/.github/workflows/schedule_pg_almalinux9.yml b/.github/workflows/schedule_pg_almalinux9.yml new file mode 100644 index 000000000..0f7aa43d6 --- /dev/null +++ b/.github/workflows/schedule_pg_almalinux9.yml @@ -0,0 +1,33 @@ +--- +name: scheduled PostgreSQL (AlmaLinux 9) + +on: + schedule: + - cron: "15 1 * * *" + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up Python 3.12 + uses: actions/setup-python@v4 + with: + python-version: "3.12" + + - name: Install dependencies + run: make bootstrap-dev + + - name: Run Molecule tests + run: make molecule-test + env: + PY_COLORS: "1" + ANSIBLE_FORCE_COLOR: "1" + IMAGE_DISTRO: almalinux9 + IMAGE_NAMESPACE: glillico diff --git a/.github/workflows/schedule_pg_centos7.yml b/.github/workflows/schedule_pg_centos7.yml deleted file mode 100644 index b39f09f34..000000000 --- a/.github/workflows/schedule_pg_centos7.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -# yamllint disable rule:truthy - -name: scheduled PostgreSQL (CentOS 7) - -on: - schedule: - - cron: '0 0 * * *' - -jobs: - test: - runs-on: ubuntu-latest - - steps: - - name: checkout - uses: actions/checkout@v2 - - - name: Set up Python 3 - uses: actions/setup-python@v2 - with: - python-version: '3.x' - - - name: Install dependencies - run: pip3 install molecule[docker] ansible - - - name: Run Molecule tests - run: molecule test - env: - PY_COLORS: '1' - ANSIBLE_FORCE_COLOR: '1' - MOLECULE_DISTRO: centos7 - -... diff --git a/.github/workflows/schedule_pg_centos8.yml b/.github/workflows/schedule_pg_centos8.yml deleted file mode 100644 index 3417d03ab..000000000 --- a/.github/workflows/schedule_pg_centos8.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -# yamllint disable rule:truthy - -name: scheduled PostgreSQL (CentOS 8) - -on: - schedule: - - cron: '0 0 * * *' - -jobs: - test: - runs-on: ubuntu-latest - - steps: - - name: checkout - uses: actions/checkout@v2 - - - name: Set up Python 3 - uses: actions/setup-python@v2 - with: - python-version: '3.x' - - - name: Install dependencies - run: pip3 install molecule[docker] ansible - - - name: Run Molecule tests - run: molecule test - env: - PY_COLORS: '1' - ANSIBLE_FORCE_COLOR: '1' - MOLECULE_DISTRO: centos8 - -... diff --git a/.github/workflows/schedule_pg_centosstream9.yml b/.github/workflows/schedule_pg_centosstream9.yml new file mode 100644 index 000000000..df9923be6 --- /dev/null +++ b/.github/workflows/schedule_pg_centosstream9.yml @@ -0,0 +1,33 @@ +--- +name: scheduled PostgreSQL (CentOS Stream 9) + +on: + schedule: + - cron: "0 0 * * *" + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up Python 3.12 + uses: actions/setup-python@v4 + with: + python-version: "3.12" + + - name: Install dependencies + run: make bootstrap-dev + + - name: Run Molecule tests + run: make molecule-test + env: + PY_COLORS: "1" + ANSIBLE_FORCE_COLOR: "1" + IMAGE_DISTRO: centosstream9 + IMAGE_NAMESPACE: glillico diff --git a/.github/workflows/schedule_pg_debian10.yml b/.github/workflows/schedule_pg_debian10.yml deleted file mode 100644 index d6f974cca..000000000 --- a/.github/workflows/schedule_pg_debian10.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -# yamllint disable rule:truthy - -name: scheduled PostgreSQL (Debian 10) - -on: - schedule: - - cron: '15 0 * * *' - -jobs: - test: - runs-on: ubuntu-latest - - steps: - - name: checkout - uses: actions/checkout@v2 - - - name: Set up Python 3 - uses: actions/setup-python@v2 - with: - python-version: '3.x' - - - name: Install dependencies - run: pip3 install molecule[docker] ansible - - - name: Run Molecule tests - run: molecule test - env: - PY_COLORS: '1' - ANSIBLE_FORCE_COLOR: '1' - MOLECULE_DISTRO: debian10 - -... diff --git a/.github/workflows/schedule_pg_debian11.yml b/.github/workflows/schedule_pg_debian11.yml index 3aced10a0..26cd397a4 100644 --- a/.github/workflows/schedule_pg_debian11.yml +++ b/.github/workflows/schedule_pg_debian11.yml @@ -1,33 +1,32 @@ --- -# yamllint disable rule:truthy - name: scheduled PostgreSQL (Debian 11) on: schedule: - - cron: '15 0 * * *' + - cron: "15 0 * * *" jobs: test: runs-on: ubuntu-latest steps: - - name: checkout - uses: actions/checkout@v2 + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v3 - - name: Set up Python 3 - uses: actions/setup-python@v2 + - name: Set up Python 3.12 + uses: actions/setup-python@v4 with: - python-version: '3.x' + python-version: "3.12" - name: Install dependencies - run: pip3 install molecule[docker] ansible + run: make bootstrap-dev - name: Run Molecule tests - run: molecule test + run: make molecule-test env: - PY_COLORS: '1' - ANSIBLE_FORCE_COLOR: '1' - MOLECULE_DISTRO: debian11 - -... + PY_COLORS: "1" + ANSIBLE_FORCE_COLOR: "1" + IMAGE_DISTRO: debian11 diff --git a/.github/workflows/schedule_pg_debian12.yml b/.github/workflows/schedule_pg_debian12.yml new file mode 100644 index 000000000..6dde7c9d7 --- /dev/null +++ b/.github/workflows/schedule_pg_debian12.yml @@ -0,0 +1,32 @@ +--- +name: scheduled PostgreSQL (Debian 12) + +on: + schedule: + - cron: "15 0 * * *" + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up Python 3.12 + uses: actions/setup-python@v4 + with: + python-version: "3.12" + + - name: Install dependencies + run: make bootstrap-dev + + - name: Run Molecule tests + run: make molecule-test + env: + PY_COLORS: "1" + ANSIBLE_FORCE_COLOR: "1" + IMAGE_DISTRO: debian12 diff --git a/.github/workflows/schedule_pg_debian9.yml b/.github/workflows/schedule_pg_debian9.yml deleted file mode 100644 index 9d95e5a1f..000000000 --- a/.github/workflows/schedule_pg_debian9.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -# yamllint disable rule:truthy - -name: scheduled PostgreSQL (Debian 9) - -on: - schedule: - - cron: '15 0 * * *' - -jobs: - test: - runs-on: ubuntu-latest - - steps: - - name: checkout - uses: actions/checkout@v2 - - - name: Set up Python 3 - uses: actions/setup-python@v2 - with: - python-version: '3.x' - - - name: Install dependencies - run: pip3 install molecule[docker] ansible - - - name: Run Molecule tests - run: molecule test - env: - PY_COLORS: '1' - ANSIBLE_FORCE_COLOR: '1' - MOLECULE_DISTRO: debian9 - -... diff --git a/.github/workflows/schedule_pg_oracle_linux7.yml b/.github/workflows/schedule_pg_oracle_linux7.yml deleted file mode 100644 index 13f7de950..000000000 --- a/.github/workflows/schedule_pg_oracle_linux7.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -# yamllint disable rule:truthy - -name: scheduled PostgreSQL (OracleLinux 7) - -on: - schedule: - - cron: '45 0 * * *' - -jobs: - test: - runs-on: ubuntu-latest - - steps: - - name: checkout - uses: actions/checkout@v2 - - - name: Set up Python 3 - uses: actions/setup-python@v2 - with: - python-version: '3.x' - - - name: Install dependencies - run: pip3 install molecule[docker] ansible - - - name: Run Molecule tests - run: molecule test --scenario-name oraclelinux - env: - PY_COLORS: '1' - ANSIBLE_FORCE_COLOR: '1' - MOLECULE_DISTRO_TAG: 7 - -... diff --git a/.github/workflows/schedule_pg_oracle_linux8.yml b/.github/workflows/schedule_pg_oracle_linux8.yml index 1f94b6c48..7b5e1ffbc 100644 --- a/.github/workflows/schedule_pg_oracle_linux8.yml +++ b/.github/workflows/schedule_pg_oracle_linux8.yml @@ -1,33 +1,33 @@ --- -# yamllint disable rule:truthy - name: scheduled PostgreSQL (OracleLinux 8) on: schedule: - - cron: '0 1 * * *' + - cron: "0 1 * * *" jobs: test: runs-on: ubuntu-latest steps: - - name: checkout - uses: actions/checkout@v2 + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v3 - - name: Set up Python 3 - uses: actions/setup-python@v2 + - name: Set up Python 3.12 + uses: actions/setup-python@v4 with: - python-version: '3.x' + python-version: "3.12" - name: Install dependencies - run: pip3 install molecule[docker] ansible + run: make bootstrap-dev - name: Run Molecule tests - run: molecule test --scenario-name oraclelinux + run: make molecule-test env: - PY_COLORS: '1' - ANSIBLE_FORCE_COLOR: '1' - MOLECULE_DISTRO_TAG: latest - -... + PY_COLORS: "1" + ANSIBLE_FORCE_COLOR: "1" + IMAGE_DISTRO: oraclelinux8 + IMAGE_NAMESPACE: glillico diff --git a/.github/workflows/schedule_pg_oracle_linux9.yml b/.github/workflows/schedule_pg_oracle_linux9.yml new file mode 100644 index 000000000..0eac05d0d --- /dev/null +++ b/.github/workflows/schedule_pg_oracle_linux9.yml @@ -0,0 +1,33 @@ +--- +name: scheduled PostgreSQL (OracleLinux 9) + +on: + schedule: + - cron: "0 1 * * *" + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up Python 3.12 + uses: actions/setup-python@v4 + with: + python-version: "3.12" + + - name: Install dependencies + run: make bootstrap-dev + + - name: Run Molecule tests + run: make molecule-test + env: + PY_COLORS: "1" + ANSIBLE_FORCE_COLOR: "1" + IMAGE_DISTRO: oraclelinux9 + IMAGE_NAMESPACE: glillico diff --git a/.github/workflows/schedule_pg_rockylinux8.yml b/.github/workflows/schedule_pg_rockylinux8.yml index 6625f4fde..549d5ac0b 100644 --- a/.github/workflows/schedule_pg_rockylinux8.yml +++ b/.github/workflows/schedule_pg_rockylinux8.yml @@ -1,33 +1,32 @@ --- -# yamllint disable rule:truthy - name: scheduled PostgreSQL (RockyLinux 8) on: schedule: - - cron: '15 1 * * *' + - cron: "15 1 * * *" jobs: test: runs-on: ubuntu-latest steps: - - name: checkout - uses: actions/checkout@v2 + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v3 - - name: Set up Python 3 - uses: actions/setup-python@v2 + - name: Set up Python 3.12 + uses: actions/setup-python@v4 with: - python-version: '3.x' + python-version: "3.12" - name: Install dependencies - run: pip3 install molecule[docker] ansible + run: make bootstrap-dev - name: Run Molecule tests - run: molecule test + run: make molecule-test env: - PY_COLORS: '1' - ANSIBLE_FORCE_COLOR: '1' - MOLECULE_DISTRO: rockylinux8 - -... + PY_COLORS: "1" + ANSIBLE_FORCE_COLOR: "1" + IMAGE_DISTRO: rockylinux8 diff --git a/.github/workflows/schedule_pg_rockylinux9.yml b/.github/workflows/schedule_pg_rockylinux9.yml new file mode 100644 index 000000000..90fc2e93d --- /dev/null +++ b/.github/workflows/schedule_pg_rockylinux9.yml @@ -0,0 +1,32 @@ +--- +name: scheduled PostgreSQL (RockyLinux 9) + +on: + schedule: + - cron: "15 1 * * *" + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up Python 3.12 + uses: actions/setup-python@v4 + with: + python-version: "3.12" + + - name: Install dependencies + run: make bootstrap-dev + + - name: Run Molecule tests + run: make molecule-test + env: + PY_COLORS: "1" + ANSIBLE_FORCE_COLOR: "1" + IMAGE_DISTRO: rockylinux9 diff --git a/.github/workflows/schedule_pg_ubuntu1804.yml b/.github/workflows/schedule_pg_ubuntu1804.yml deleted file mode 100644 index 49ac23071..000000000 --- a/.github/workflows/schedule_pg_ubuntu1804.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -# yamllint disable rule:truthy - -name: scheduled PostgreSQL (Ubuntu 18.04) - -on: - schedule: - - cron: '30 0 * * *' - -jobs: - test: - runs-on: ubuntu-latest - - steps: - - name: checkout - uses: actions/checkout@v2 - - - name: Set up Python 3 - uses: actions/setup-python@v2 - with: - python-version: '3.x' - - - name: Install dependencies - run: pip3 install molecule[docker] ansible - - - name: Run Molecule tests - run: molecule test - env: - PY_COLORS: '1' - ANSIBLE_FORCE_COLOR: '1' - MOLECULE_DISTRO: ubuntu1804 - -... diff --git a/.github/workflows/schedule_pg_ubuntu2004.yml b/.github/workflows/schedule_pg_ubuntu2004.yml deleted file mode 100644 index 54e2d7e2d..000000000 --- a/.github/workflows/schedule_pg_ubuntu2004.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -# yamllint disable rule:truthy - -name: scheduled PostgreSQL (Ubuntu 20.04) - -on: - schedule: - - cron: '30 0 * * *' - -jobs: - test: - runs-on: ubuntu-latest - - steps: - - name: checkout - uses: actions/checkout@v2 - - - name: Set up Python 3 - uses: actions/setup-python@v2 - with: - python-version: '3.x' - - - name: Install dependencies - run: pip3 install molecule[docker] ansible - - - name: Run Molecule tests - run: molecule test - env: - PY_COLORS: '1' - ANSIBLE_FORCE_COLOR: '1' - MOLECULE_DISTRO: ubuntu2004 - -... diff --git a/.github/workflows/schedule_pg_ubuntu2204.yml b/.github/workflows/schedule_pg_ubuntu2204.yml new file mode 100644 index 000000000..e2dc113c3 --- /dev/null +++ b/.github/workflows/schedule_pg_ubuntu2204.yml @@ -0,0 +1,32 @@ +--- +name: scheduled PostgreSQL (Ubuntu 22.04) + +on: + schedule: + - cron: "30 0 * * *" + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up Python 3.12 + uses: actions/setup-python@v4 + with: + python-version: "3.12" + + - name: Install dependencies + run: make bootstrap-dev + + - name: Run Molecule tests + run: make molecule-test + env: + PY_COLORS: "1" + ANSIBLE_FORCE_COLOR: "1" + IMAGE_DISTRO: ubuntu2204 diff --git a/.github/workflows/schedule_pg_ubuntu2404.yml b/.github/workflows/schedule_pg_ubuntu2404.yml new file mode 100644 index 000000000..20098b07c --- /dev/null +++ b/.github/workflows/schedule_pg_ubuntu2404.yml @@ -0,0 +1,32 @@ +--- +name: scheduled PostgreSQL (Ubuntu 24.04) + +on: + schedule: + - cron: "30 0 * * *" + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up Python 3.12 + uses: actions/setup-python@v4 + with: + python-version: "3.12" + + - name: Install dependencies + run: make bootstrap-dev + + - name: Run Molecule tests + run: make molecule-test + env: + PY_COLORS: "1" + ANSIBLE_FORCE_COLOR: "1" + IMAGE_DISTRO: ubuntu2404 diff --git a/.github/workflows/yamllint.yml b/.github/workflows/yamllint.yml index 39d42db1f..5c5c5d979 100644 --- a/.github/workflows/yamllint.yml +++ b/.github/workflows/yamllint.yml @@ -1,7 +1,5 @@ --- -# yamllint disable rule:truthy - -name: 'Yamllint' +name: "Yamllint" on: push: @@ -16,16 +14,19 @@ jobs: runs-on: ubuntu-latest steps: - - name: 'Checkout' - uses: actions/checkout@v2 + - name: Set TERM environment variable + run: echo "TERM=xterm" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v4 - - name: 'Run yamllint' - uses: karancode/yamllint-github-action@master + - name: Set up Python 3.12 + uses: actions/setup-python@v4 with: - yamllint_file_or_dir: '.' - yamllint_strict: true - yamllint_comment: true - env: - GITHUB_ACCESS_TOKEN: ${{ secrets.GITHUB_TOKEN }} + python-version: "3.12" + + - name: Install dependencies + run: make bootstrap-dev -... + - name: Run Yamllint + run: make linter-yamllint diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..a254d4912 --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +.DS_Store +.idea +.venv +.vscode/settings.json +__pycache__/ +*.log +molecule/**/tmp +.ansible/lock diff --git a/.gitpod.yml b/.gitpod.yml new file mode 100644 index 000000000..6a932579a --- /dev/null +++ b/.gitpod.yml @@ -0,0 +1,6 @@ +--- +image: + file: .config/gitpod/Dockerfile + +tasks: + - init: make bootstrap-dev diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 000000000..8f7b399ce --- /dev/null +++ b/.prettierignore @@ -0,0 +1,3 @@ +console/service +console/ui +*.sql diff --git a/.prettierrc.json b/.prettierrc.json new file mode 100644 index 000000000..cd0cfbf71 --- /dev/null +++ b/.prettierrc.json @@ -0,0 +1,9 @@ +{ + "semi": true, + "trailingComma": "all", + "singleQuote": false, + "printWidth": 160, + "tabWidth": 2, + "endOfLine": "lf", + "useTabs": false +} diff --git a/.sql-formatter.json b/.sql-formatter.json new file mode 100644 index 000000000..801a72ef3 --- /dev/null +++ b/.sql-formatter.json @@ -0,0 +1,14 @@ +{ + "language": "postgresql", + "keywordCase": "lower", + "dataTypeCase": "lower", + "functionCase": "lower", + "identifierCase": "lower", + "tabWidth": 2, + "useTabs": false, + "linesBetweenQueries": 1, + "logicalOperatorNewline": true, + "expressionWidth": 160, + "denseOperators": false, + "newlineBeforeSemicolon": false +} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..b1197511f --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,71 @@ +# Contributing Guide + +Help improve this project by: + +- [Creating an issue](https://help.github.com/articles/creating-an-issue/) (Check for [known issues](https://github.com/search?q=user%3Avitabaks+is%3Aissue+state%3Aopen) first) +- [Submitting a pull request](https://docs.github.com/fr/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request) to fix a problem or add a feature + +Your contributions are appreciated and will be taken seriously. + +## How to Contribute + +### 1. Create an issue + +Report problems or suggest improvements by [creating an issue](https://github.com/vitabaks/autobase/issues). + +### 2. Fork the project + +[Fork the repository](https://github.com/vitabaks/autobase) to your GitHub account. + +### 3. Make changes + +Clone your fork locally and make the necessary changes: + +```bash +git clone git@github.com:YOURNAMESPACE/autobase.git +``` + +### 4. Test your changes + +#### 4.1 Desktop + +Install [make](https://www.gnu.org/software/make/), [Python3.12](https://www.python.org/), [venv](https://packaging.python.org/en/latest/guides/installing-using-pip-and-virtual-environments/), and [docker](https://docs.docker.com/engine/install/ubuntu/). + +Run `make` for Makefile help. + +1. Initialize virtualenv and install dependencies with: `make bootstrap-dev` +2. Run Prettier formatting: `make prettier` +3. Lint your code with `make lint` +4. Test your changes with: `make tests` or `make molecule-converge` + +To test a specific distribution, set `distro`, `tag`, and `namespace`: + +```bash +IMAGE_NAMESPACE=geerlingguy IMAGE_DISTRO=debian12 make molecule-converge +``` + +#### 4.2 Gitpod + +Use Gitpod for a cloud-based development environment: + +1. Sign up for Gitpod: https://gitpod.io +2. Fork the `autobase` repository +3. Open your fork in Gitpod: `https://gitpod.io/#https://github.com/username/autobase` +4. Create a new branch: `git checkout -b my-feature-branch` +5. Make your changes and commit: `git add .` and `git commit -m "Description of changes"` +6. Run Prettier formatting: `make prettier` +7. Test with linters: `make lint` +8. Test with Molecule: `make tests` or `make molecule-converge` +9. Push your changes: `git push origin my-feature-branch` + +Keep your Gitpod workspace synced with the main repository. + +### 5. Submit a pull request + +[Create a pull request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork) and [refer to the issue number](https://help.github.com/en/github/writing-on-github/autolinked-references-and-urls) using #123, where 123 is the issue number. + +### 6. Wait + +Your pull request will be reviewed, and you’ll receive feedback. Thanks for contributing! + +Consider sponsoring the maintainer via [GitHub](https://github.com/sponsors/vitabaks) or [Patreon](https://patreon.com/vitabaks). diff --git a/LICENSE b/LICENSE index f03af3f1d..aeb3ad166 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2020 Vitaliy Kukharik +Copyright (c) 2019-2025 Vitaliy Kukharik Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..e4010937e --- /dev/null +++ b/Makefile @@ -0,0 +1,86 @@ +.PHONY: all SHELL + +# Makefile global config +.DEFAULT_GOAL:=help +.EXPORT_ALL_VARIABLES: +.ONESHELL: +.SILENT: +MAKEFLAGS += "-j$(NUM_CORES) -l$(NUM_CORES)" +MAKEFLAGS += --silent +SHELL:=/bin/bash +.SHELLFLAGS = -eu -o pipefail -c + +# Makefile colors config +bold=$(shell tput bold) +normal=$(shell tput sgr0) +errorTitle=$(shell tput setab 1 && tput bold && echo '\n') +recommendation=$(shell tput setab 4) +underline=$(shell tput smul) +reset=$(shell tput -Txterm sgr0) +black=$(shell tput setaf 0) +red=$(shell tput setaf 1) +green=$(shell tput setaf 2) +yellow=$(shell tput setaf 3) +blue=$(shell tput setaf 4) +magenta=$(shell tput setaf 5) +cyan=$(shell tput setaf 6) +white=$(shell tput setaf 7) + +define HEADER +How to use me: + make && make bootstrap + make ${cyan}${reset} + +endef +export HEADER + +python_launcher := python$(shell cat .config/python_version.config | cut -d '=' -f 2) + +-include $(addsuffix /*.mak, $(shell find .config/make -type d)) + +## —— Bootstrap —————————————————————————————————————————————————————————————————————————————————— +.PHONY: bootstrap +bootstrap: ## Bootstrap Ansible collection + $(MAKE) python-bootstrap + +.PHONY: bootstrap-dev +bootstrap-dev: ## Bootstrap Ansible collection for development + $(MAKE) bootstrap + $(MAKE) python-bootstrap-dev + +## —— Virtualenv ————————————————————————————————————————————————————————————————————————————————— +.PHONY: reinitialization +reinitialization: ## Return to initial state of Bootstrap Ansible collection + $(MAKE) clean + $(MAKE) bootstrap + +.PHONY: reinitialization-dev +reinitialization-dev: ## Return to initial state of Bootstrap Ansible collection for development + $(MAKE) reinitialization + $(MAKE) bootstrap-dev + +## —— Tests —————————————————————————————————————————————————————————————————————————————————————— +.PHONY: tests +tests: ## tests Ansible + $(MAKE) docker-tests + $(MAKE) lint + $(MAKE) molecule-test-all + +.PHONY: tests-fast +tests-fast: ## tests Ansible quickly + $(MAKE) lint + $(MAKE) molecule-converge + +## —— Clean —————————————————————————————————————————————————————————————————————————————————————— +.PHONY: clean + $(MAKE) clean +clean: ## Clean + rm -rf .venv/ + rm -rf vendor/ + rm -f *.mak + rm -rf .pytest_cache/ + rm -rf scripts/.pytest_cache/ + rm -rf scripts/tests/__pycache__/ + rm -rf scripts/modules/__pycache__/ + rm -rf scripts/modules/services/__pycache__/ + rm -rf scripts/modules/utils/__pycache__/ diff --git a/README.md b/README.md index b0a66a821..9626386cb 100644 --- a/README.md +++ b/README.md @@ -1,414 +1,281 @@ -# PostgreSQL High-Availability Cluster :elephant: :sparkling_heart: +

+ +

-[](https://github.com/vitabaks/postgresql_cluster/actions?query=workflow%3AAnsible-lint) [](https://github.com/vitabaks/postgresql_cluster/actions?query=workflow%3AYamllint) [](https://github.com/vitabaks/postgresql_cluster/actions?query=workflow%3AMolecule) [![GitHub license](https://img.shields.io/github/license/vitabaks/postgresql_cluster)](https://github.com/vitabaks/postgresql_cluster/blob/master/LICENSE) ![GitHub stars](https://img.shields.io/github/stars/vitabaks/postgresql_cluster) -[![Support me on Patreon](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Fshieldsio-patreon.vercel.app%2Fapi%3Fusername%3Dvitabaks%26type%3Dpatrons&style=social)](https://patreon.com/vitabaks) +# Autobase for PostgreSQL® :elephant: :sparkling_heart: -### Deploy a Production Ready PostgreSQL High-Availability Cluster (based on "Patroni" and "DCS(etcd)"). Automating with Ansible. +[![Ansible-lint](https://github.com/vitabaks/autobase/actions/workflows/ansible-lint.yml/badge.svg)](https://github.com/vitabaks/autobase/actions/workflows/ansible-lint.yml) +[![Yamllint](https://github.com/vitabaks/autobase/actions/workflows/yamllint.yml/badge.svg)](https://github.com/vitabaks/autobase/actions/workflows/yamllint.yml) +[![Flake8](https://github.com/vitabaks/autobase/actions/workflows/flake8.yml/badge.svg)](https://github.com/vitabaks/autobase/actions/workflows/flake8.yml) +[![Molecule](https://github.com/vitabaks/autobase/actions/workflows/molecule.yml/badge.svg)](https://github.com/vitabaks/autobase/actions/workflows/molecule.yml) +[![GitHub license](https://img.shields.io/github/license/vitabaks/autobase)](https://github.com/vitabaks/autobase/blob/master/LICENSE) +![GitHub stars](https://img.shields.io/github/stars/vitabaks/autobase) -This Ansible playbook is designed for deploying a PostgreSQL high availability cluster on dedicated physical servers for a production environment. -Сluster can be deployed in virtual machines for test environments and small projects. +**Autobase for PostgreSQL®** is an open-source alternative to cloud-managed databases (DBaaS) such as Amazon RDS, Google Cloud SQL, Azure Database, and more. -This playbook support the deployment of cluster over already existing and running PostgreSQL. You must specify the variable `postgresql_exists='true'` in the inventory file. -**Attention!** Your PostgreSQL will be stopped before running in cluster mode. You must planing downtime of existing databases. +This automated database platform enables you to create and manage production-ready, highly available PostgreSQL clusters. It simplifies the deployment process, reduces operational costs, and makes database management accessible—even for teams without specialized expertise. -> :heavy_exclamation_mark: Please test it in your test enviroment before using in a production. +**Automate deployment, failover, backups, restore, upgrades, scaling, and more with ease.** +You can find a version of this documentation that is searchable and also easier to navigate at [autobase.tech](https://autobase.tech) -You have two options available for deployment ("Type A" and "Type B"): - -### [Type A] PostgreSQL High-Availability with Load Balancing -![TypeA](https://github.com/vitabaks/postgresql_cluster/blob/master/TypeA.png) - -> To use this scheme, specify `with_haproxy_load_balancing: true` in variable file vars/main.yml - -This scheme provides the ability to distribute the load on reading. This also allows us to scale out the cluster (with read-only replicas). - -- port 5000 (read / write) master -- port 5001 (read only) all replicas - -###### if variable "synchronous_mode" is 'true' (vars/main.yml): -- port 5002 (read only) synchronous replica only -- port 5003 (read only) asynchronous replicas only - -> :heavy_exclamation_mark: Your application must have support sending read requests to a custom port (ex 5001), and write requests (ex 5000). +--- -##### Components of high availability: -[**Patroni**](https://github.com/zalando/patroni) is a template for you to create your own customized, high-availability solution using Python and - for maximum accessibility - a distributed configuration store like ZooKeeper, etcd, Consul or Kubernetes. Used for automate the management of PostgreSQL instances and auto failover. +### Project Status -[**etcd**](https://github.com/etcd-io/etcd) is a distributed reliable key-value store for the most critical data of a distributed system. etcd is written in Go and uses the [Raft](https://raft.github.io/) consensus algorithm to manage a highly-available replicated log. It is used by Patroni to store information about the status of the cluster and PostgreSQL configuration parameters. +Autobase has been actively developed for over 5 years (since 2019) and is trusted by companies worldwide, including in production environments with high loads and demanding reliability requirements. Our mission is to provide an open-source DBaaS that delivers reliability, flexibility, and cost-efficiency. -[What is Distributed Consensus?](http://thesecretlivesofdata.com/raft/) +The project will remain open-source forever, but to ensure its continuous growth and development, we rely on [sponsorship](https://autobase.tech/docs/sponsor). By subscribing to [Autobase packages](https://autobase.tech/docs/support), you gain access to personalized support from the project authors and PostgreSQL experts, ensuring the reliability of your database infrastructure. -##### Components of load balancing: -[**HAProxy**](http://www.haproxy.org/) is a free, very fast and reliable solution offering high availability, load balancing, and proxying for TCP and HTTP-based applications. +--- -[**confd**](https://github.com/kelseyhightower/confd) manage local application configuration files using templates and data from etcd or consul. Used to automate HAProxy configuration file management. +### Supported setups of Postgres Cluster -[**Keepalived**](https://github.com/acassen/keepalived) provides a virtual high-available IP address (VIP) and single entry point for databases access. -Implementing VRRP (Virtual Router Redundancy Protocol) for Linux. -In our configuration keepalived checks the status of the HAProxy service and in case of a failure delegates the VIP to another server in the cluster. +![pg_cluster_scheme](images/pg_cluster_scheme.png#gh-light-mode-only) +![pg_cluster_scheme](images/pg_cluster_scheme.dark_mode.png#gh-dark-mode-only) -[**PgBouncer**](https://pgbouncer.github.io/features.html) is a connection pooler for PostgreSQL. +You have three schemes available for deployment: +#### 1. PostgreSQL High-Availability only +This is simple scheme without load balancing. -### [Type B] PostgreSQL High-Availability only -![TypeB](https://github.com/vitabaks/postgresql_cluster/blob/master/TypeB.png) +##### Components: -This is simple scheme without load balancing `Used by default` +- [**Patroni**](https://github.com/zalando/patroni) is a template for you to create your own customized, high-availability solution using Python and - for maximum accessibility - a distributed configuration store like ZooKeeper, etcd, Consul or Kubernetes. Used for automate the management of PostgreSQL instances and auto failover. -To provide a single entry point (VIP) for databases access is used "vip-manager". +- [**etcd**](https://github.com/etcd-io/etcd) is a distributed reliable key-value store for the most critical data of a distributed system. etcd is written in Go and uses the [Raft](https://raft.github.io/) consensus algorithm to manage a highly-available replicated log. It is used by Patroni to store information about the status of the cluster and PostgreSQL configuration parameters. [What is Distributed Consensus?](https://thesecretlivesofdata.com/raft/) -[**vip-manager**](https://github.com/cybertec-postgresql/vip-manager) is a service that gets started on all cluster nodes and connects to the DCS. If the local node owns the leader-key, vip-manager starts the configured VIP. In case of a failover, vip-manager removes the VIP on the old leader and the corresponding service on the new leader starts it there. \ -Written in Go. Cybertec Schönig & Schönig GmbH https://www.cybertec-postgresql.com +- [**vip-manager**](https://github.com/cybertec-postgresql/vip-manager) (_optional, if the `cluster_vip` variable is specified_) is a service that gets started on all cluster nodes and connects to the DCS. If the local node owns the leader-key, vip-manager starts the configured VIP. In case of a failover, vip-manager removes the VIP on the old leader and the corresponding service on the new leader starts it there. Used to provide a single entry point (VIP) for database access. +- [**PgBouncer**](https://pgbouncer.github.io/features.html) (optional, if the `pgbouncer_install` variable is `true`) is a connection pooler for PostgreSQL. +#### 2. PostgreSQL High-Availability with Load Balancing ---- -## Compatibility -RedHat and Debian based distros (x86_64) +This scheme enables load distribution for read operations and also allows for scaling out the cluster with read-only replicas. -###### Supported Linux Distributions: -- **Debian**: 9, 10, 11 -- **Ubuntu**: 18.04, 20.04 -- **CentOS**: 7, 8 -- **Oracle Linux**: 7, 8 -- **Rocky Linux**: 8 +When deploying to cloud providers such as AWS, GCP, Azure, DigitalOcean, and Hetzner Cloud, a cloud load balancer is automatically created by default to provide a single entry point to the database (controlled by the `cloud_load_balancer` variable). -###### PostgreSQL versions: -all supported PostgreSQL versions +For non-cloud environments, such as when deploying on Your Own Machines, the HAProxy load balancer is available for use. To enable it, set `with_haproxy_load_balancing: true` variable. -:white_check_mark: tested, works fine: `PostgreSQL 9.6, 10, 11, 12, 13, 14` +> [!NOTE] +> Your application must have support sending read requests to a custom port 5001, and write requests to port 5000. -_Table of results of daily automated testing of cluster deployment:_ -| Distribution | Test result | -|--------------|:----------:| -| Debian 9 | [![GitHub Workflow Status](https://img.shields.io/github/workflow/status/vitabaks/postgresql_cluster/scheduled%20PostgreSQL%20(Debian%209))](https://github.com/vitabaks/postgresql_cluster/actions?query=workflow%3A%22scheduled+PostgreSQL+%28Debian+9%29%22) | -| Debian 10 | [![GitHub Workflow Status](https://img.shields.io/github/workflow/status/vitabaks/postgresql_cluster/scheduled%20PostgreSQL%20(Debian%2010))](https://github.com/vitabaks/postgresql_cluster/actions?query=workflow%3A%22scheduled+PostgreSQL+%28Debian+10%29%22) | -| Debian 11 | [![GitHub Workflow Status](https://img.shields.io/github/workflow/status/vitabaks/postgresql_cluster/scheduled%20PostgreSQL%20(Debian%2011))](https://github.com/vitabaks/postgresql_cluster/actions?query=workflow%3A%22scheduled+PostgreSQL+%28Debian+11%29%22) | -| Ubuntu 18.04 | [![GitHub Workflow Status](https://img.shields.io/github/workflow/status/vitabaks/postgresql_cluster/scheduled%20PostgreSQL%20(Ubuntu%2018.04))](https://github.com/vitabaks/postgresql_cluster/actions?query=workflow%3A%22scheduled+PostgreSQL+%28Ubuntu+18.04%29%22) | -| Ubuntu 20.04 | [![GitHub Workflow Status](https://img.shields.io/github/workflow/status/vitabaks/postgresql_cluster/scheduled%20PostgreSQL%20(Ubuntu%2020.04))](https://github.com/vitabaks/postgresql_cluster/actions?query=workflow%3A%22scheduled+PostgreSQL+%28Ubuntu+20.04%29%22) | -| CentOS 7 | [![GitHub Workflow Status](https://img.shields.io/github/workflow/status/vitabaks/postgresql_cluster/scheduled%20PostgreSQL%20(CentOS%207))](https://github.com/vitabaks/postgresql_cluster/actions?query=workflow%3A%22scheduled+PostgreSQL+%28CentOS+7%29%22) | -| CentOS 8 | [![GitHub Workflow Status](https://img.shields.io/github/workflow/status/vitabaks/postgresql_cluster/scheduled%20PostgreSQL%20(CentOS%208))](https://github.com/vitabaks/postgresql_cluster/actions?query=workflow%3A%22scheduled+PostgreSQL+%28CentOS+8%29%22) | -| Oracle Linux 7 | [![GitHub Workflow Status](https://img.shields.io/github/workflow/status/vitabaks/postgresql_cluster/scheduled%20PostgreSQL%20(OracleLinux%207))](https://github.com/vitabaks/postgresql_cluster/actions/workflows/schedule_pg_oracle_linux7.yml) | -| Oracle Linux 8 | [![GitHub Workflow Status](https://img.shields.io/github/workflow/status/vitabaks/postgresql_cluster/scheduled%20PostgreSQL%20(OracleLinux%208))](https://github.com/vitabaks/postgresql_cluster/actions/workflows/schedule_pg_oracle_linux8.yml) | -| Rocky Linux 8 | [![GitHub Workflow Status](https://img.shields.io/github/workflow/status/vitabaks/postgresql_cluster/scheduled%20PostgreSQL%20(RockyLinux%208))](https://github.com/vitabaks/postgresql_cluster/actions/workflows/schedule_pg_rockylinux8.yml) | +List of ports when using HAProxy: +- port 5000 (read / write) master +- port 5001 (read only) all replicas +- port 5002 (read only) synchronous replica only +- port 5003 (read only) asynchronous replicas only -###### Ansible version -This has been tested on Ansible 2.7, 2.8, 2.9, 2.10, 2.11 - -## Requirements -This playbook requires root privileges or sudo. - -Ansible ([What is Ansible](https://www.ansible.com/resources/videos/quick-start-video)?) - -## Recommendations -- **linux (Operation System)**: +##### Components of HAProxy load balancing: -Update your operating system on your target servers before deploying; +- [**HAProxy**](https://www.haproxy.org/) is a free, very fast and reliable solution offering high availability, load balancing, and proxying for TCP and HTTP-based applications. -Make sure you have time synchronization is configured (NTP). -Specify `ntp_enabled:'true'` and `ntp_servers` if you want to install and configure the ntp service. +- [**confd**](https://github.com/kelseyhightower/confd) manage local application configuration files using templates and data from etcd or consul. Used to automate HAProxy configuration file management. -- **DCS (Distributed Configuration Store)**: +- [**Keepalived**](https://github.com/acassen/keepalived) (_optional, if the `cluster_vip` variable is specified_) provides a virtual high-available IP address (VIP) and single entry point for databases access. + Implementing VRRP (Virtual Router Redundancy Protocol) for Linux. In our configuration keepalived checks the status of the HAProxy service and in case of a failure delegates the VIP to another server in the cluster. -Fast drives and a reliable network are the most important factors for the performance and stability of an etcd cluster. +#### 3. PostgreSQL High-Availability with Consul Service Discovery -Avoid storing etcd data on the same drive along with other processes (such as the database) that are intensively using the resources of the disk subsystem! -Store the etcd and postgresql data on **different** disks (see `etcd_data_dir` variable), use ssd drives if possible. -See [hardware recommendations](https://etcd.io/docs/v3.3.12/op-guide/hardware/) and [tuning](https://etcd.io/docs/v3.3.12/tuning/) guides. +To use this scheme, specify `dcs_type: consul` variable. -Overloaded (highload) database clusters may require the installation of the etcd cluster on dedicated servers, separate from the database servers. +This scheme is suitable for master-only access and for load balancing (using DNS) for reading across replicas. Consul [Service Discovery](https://developer.hashicorp.com/consul/docs/concepts/service-discovery) with [DNS resolving ](https://developer.hashicorp.com/consul/docs/discovery/dns) is used as a client access point to the database. -- **Placement of cluster members in different data centers**: +Client access point (example): -If you’d prefer a cross-data center setup, where the replicating databases are located in different data centers, etcd member placement becomes critical. +- `master.postgres-cluster.service.consul` +- `replica.postgres-cluster.service.consul` -There are quite a lot of things to consider if you want to create a really robust etcd cluster, but there is one rule: *do not placing all etcd members in your primary data center*. See some [examples](https://www.cybertec-postgresql.com/en/introduction-and-how-to-etcd-clusters-for-patroni/). +Besides, it can be useful for a distributed cluster across different data centers. We can specify in advance which data center the database server is located in and then use this for applications running in the same data center. +Example: `replica.postgres-cluster.service.dc1.consul`, `replica.postgres-cluster.service.dc2.consul` -- **How to prevent data loss in case of autofailover (synchronous_modes and pg_rewind)**: +It requires the installation of a consul in client mode on each application server for service DNS resolution (or use [forward DNS](https://developer.hashicorp.com/consul/tutorials/networking/dns-forwarding?utm_source=docs) to the remote consul server instead of installing a local consul client). -Due to performance reasons, a synchronous replication is disabled by default. +## Compatibility -To minimize the risk of losing data on autofailover, you can configure settings in the following way: -- synchronous_mode: 'true' -- synchronous_mode_strict: 'true' -- synchronous_commit: 'on' (or 'remote_write'/'remote_apply') -- use_pg_rewind: 'false' (enabled by default) +RedHat and Debian based distros (x86_64) ---- +###### Supported Linux Distributions: -## Deployment: quick start -0. [Install Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) on one control node (which could easily be a laptop) -###### Example: install latest release using [pip](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#installing-ansible-with-pip) -`sudo apt update && sudo apt install python3-pip sshpass git -y` \ -`sudo pip3 install ansible` +- **Debian**: 11, 12 +- **Ubuntu**: 22.04, 24.04 +- **CentOS Stream**: 9 +- **Oracle Linux**: 8, 9 +- **Rocky Linux**: 8, 9 +- **AlmaLinux**: 8, 9 -1. Download or clone this repository +###### PostgreSQL versions: -`git clone https://github.com/vitabaks/postgresql_cluster.git` +all supported PostgreSQL versions -2. Go to the playbook directory +:white_check_mark: tested, works fine: PostgreSQL 10, 11, 12, 13, 14, 15, 16, 17 -`cd postgresql_cluster/` +_Table of results of daily automated testing of cluster deployment:_ +| Distribution | Test result | +|--------------|:----------:| +| Debian 11 | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/vitabaks/autobase/schedule_pg_debian11.yml?branch=master)](https://github.com/vitabaks/autobase/actions/workflows/schedule_pg_debian11.yml) | +| Debian 12 | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/vitabaks/autobase/schedule_pg_debian11.yml?branch=master)](https://github.com/vitabaks/autobase/actions/workflows/schedule_pg_debian12.yml) | +| Ubuntu 22.04 | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/vitabaks/autobase/schedule_pg_ubuntu2204.yml?branch=master)](https://github.com/vitabaks/autobase/actions/workflows/schedule_pg_ubuntu2204.yml) | +| Ubuntu 24.04 | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/vitabaks/autobase/schedule_pg_ubuntu2204.yml?branch=master)](https://github.com/vitabaks/autobase/actions/workflows/schedule_pg_ubuntu2404.yml) | +| CentOS Stream 9 | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/vitabaks/autobase/schedule_pg_centosstream9.yml?branch=master)](https://github.com/vitabaks/autobase/actions/workflows/schedule_pg_centosstream9.yml) | +| Oracle Linux 8 | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/vitabaks/autobase/schedule_pg_oracle_linux8.yml?branch=master)](https://github.com/vitabaks/autobase/actions/workflows/schedule_pg_oracle_linux8.yml) | +| Oracle Linux 9 | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/vitabaks/autobase/schedule_pg_oracle_linux9.yml?branch=master)](https://github.com/vitabaks/autobase/actions/workflows/schedule_pg_oracle_linux9.yml) | +| Rocky Linux 8 | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/vitabaks/autobase/schedule_pg_rockylinux8.yml?branch=master)](https://github.com/vitabaks/autobase/actions/workflows/schedule_pg_rockylinux8.yml) | +| Rocky Linux 9 | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/vitabaks/autobase/schedule_pg_rockylinux9.yml?branch=master)](https://github.com/vitabaks/autobase/actions/workflows/schedule_pg_rockylinux9.yml) | +| AlmaLinux 8 | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/vitabaks/autobase/schedule_pg_almalinux8.yml?branch=master)](https://github.com/vitabaks/autobase/actions/workflows/schedule_pg_almalinux8.yml) | +| AlmaLinux 9 | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/vitabaks/autobase/schedule_pg_almalinux9.yml?branch=master)](https://github.com/vitabaks/autobase/actions/workflows/schedule_pg_almalinux9.yml) | -3. Edit the inventory file +## Getting Started -###### Specify the ip addresses and connection settings (`ansible_user`, `ansible_ssh_pass` ...) for your environment +You have the option to deploy Postgres clusters using the Console (UI), command line, or GitOps. -`vim inventory` +> [!TIP] +> 📩 Contact us at info@autobase.tech, and our team will help you implement Autobase into your infrastructure. -4. Edit the variable file vars/[main.yml](./vars/main.yml) +### Console (UI) -`vim vars/main.yml` +The Autobase Console (UI) is the recommended method for most users. It is designed to be user-friendly, minimizing the risk of errors and making it easier than ever to set up your PostgreSQL clusters. This method is suitable for both beginners and those who prefer a visual interface for managing their PostgreSQL clusters. -###### Minimum set of variables: -- `proxy_env` # if required (*for download packages*) +To run the autobase console, execute the following command: -example: ``` -proxy_env: - http_proxy: http://proxy_server_ip:port - https_proxy: http://proxy_server_ip:port +docker run -d --name autobase-console \ + --publish 80:80 \ + --publish 8080:8080 \ + --env PG_CONSOLE_API_URL=http://localhost:8080/api/v1 \ + --env PG_CONSOLE_AUTHORIZATION_TOKEN=secret_token \ + --env PG_CONSOLE_DOCKER_IMAGE=autobase/automation:latest \ + --volume console_postgres:/var/lib/postgresql \ + --volume /var/run/docker.sock:/var/run/docker.sock \ + --volume /tmp/ansible:/tmp/ansible \ + --restart=unless-stopped \ + autobase/console:latest ``` -- `cluster_vip` # for client access to databases in the cluster (optional) -- `patroni_cluster_name` -- `with_haproxy_load_balancing` `'true'` (Type A) or `'false'`/default (Type B) -- `postgresql_version` -- `postgresql_data_dir` - - -5. Run playbook: - -`ansible-playbook deploy_pgcluster.yml` -[![asciicast](https://asciinema.org/a/251019.svg)](https://asciinema.org/a/251019?speed=5) +> [!NOTE] +> If you are running the console on a dedicated server (rather than on your laptop), replace `localhost` with the server’s IP address in the `PG_CONSOLE_API_URL` variable. ---- - -## Variables -See the vars/[main.yml](./vars/main.yml), [system.yml](./vars/system.yml) and ([Debian.yml](./vars/Debian.yml) or [RedHat.yml](./vars/RedHat.yml)) files for more details. - - -## Cluster Scaling -Add new postgresql node to existing cluster -
Click here to expand...

- -After you successfully deployed your PostgreSQL HA cluster, you may need to scale it further. \ -Use the `add_pgnode.yml` playbook for this. - -> :grey_exclamation: This playbook does not scaling the etcd cluster and haproxy balancers. - -During the run this playbook, the new nodes will be prepared in the same way as when first deployment the cluster. But unlike the initial deployment, all the necessary **configuration files will be copied from the master server**. - -###### Preparation: - -1. Add a new node (*or subnet*) to the `pg_hba.conf` file on all nodes in your cluster -2. Apply pg_hba.conf for all PostgreSQL (see `patronictl reload --help`) - -###### Steps to add a new node: - -3. Go to the playbook directory -4. Edit the inventory file - -Specify the ip address of one of the nodes of the cluster in the [master] group, and the new node (which you want to add) in the [replica] group. - -5. Edit the variable files - -Variables that should be the same on all cluster nodes: \ -`with_haproxy_load_balancing`,` postgresql_version`, `postgresql_data_dir`,` postgresql_conf_dir`. - -6. Run playbook: - -`ansible-playbook add_pgnode.yml` - -

- -Add new haproxy balancer node -
Click here to expand...

- -Use the `add_balancer.yml` playbook for this. +> [!TIP] +> It is recommended to run the console in the same network as your database servers to enable monitoring of the cluster status. -During the run this playbook, the new balancer node will be prepared in the same way as when first deployment the cluster. But unlike the initial deployment, **all necessary configuration files will be copied from the server specified in the [master] group**. +**Open the Console UI**: -> :heavy_exclamation_mark: Please test it in your test enviroment before using in a production. +Go to http://localhost:80 (or the address of your server) and use `secret_token` for authorization. -###### Steps to add a new banlancer node: +![Cluster creation demo](images/autobase_create_cluster_demo.gif) -1. Go to the playbook directory +Refer to the [Deployment](https://autobase.tech/docs/category/deployment) section to learn more about the different deployment methods. -2. Edit the inventory file +### Command line -Specify the ip address of one of the existing balancer nodes in the [master] group, and the new balancer node (which you want to add) in the [balancers] group. +

Click here to expand... if you prefer the command line.

-> :heavy_exclamation_mark: Attention! The list of Firewall ports is determined dynamically based on the group in which the host is specified. \ -If you adding a new haproxy balancer node to one of the existing nodes from the [etcd_cluster] or [master]/[replica] groups, you can rewrite the iptables rules! \ -See firewall_allowed_tcp_ports_for.balancers variable in the system.yml file. +The command line mode is suitable for advanced users who require greater flexibility and control over the deployment and management of their PostgreSQL clusters. +While the Console (UI) is designed for ease of use and is suitable for most users, the command line provides powerful options for those experienced in automation and configuration. -3. Edit the `main.yml` variable file +0. [Install Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) on one control node (which could easily be a laptop) -Specify `with_haproxy_load_balancing: true` +```sh +sudo apt update && sudo apt install -y python3-pip sshpass git +pip3 install ansible +``` -4. Run playbook: +1. Install the Autobase Collection -`ansible-playbook add_balancer.yml` +```sh +# from Ansible Galaxy +ansible-galaxy collection install vitabaks.autobase +``` -

+Or reference it in a `requirements.yml`: +```yml +# from Ansible Galaxy +collections: + - name: vitabaks.autobase + version: 2.2.0 +``` -## Restore and Cloning -Create new clusters from your existing backups with [pgBackRest](https://github.com/pgbackrest/pgbackrest) or [WAL-G](https://github.com/wal-g/wal-g) \ -Point-In-Time-Recovery +2. Prepare the inventory -
Click here to expand...

+See example of [inventory](./automation/inventory.example) file. -##### Create cluster with pgBackRest: -1. Edit the `main.yml` variable file -``` -patroni_cluster_bootstrap_method: "pgbackrest" - -patroni_create_replica_methods: - - pgbackrest - - basebackup - -postgresql_restore_command: "pgbackrest --stanza={{ pgbackrest_stanza }} archive-get %f %p" - -pgbackrest_install: true -pgbackrest_stanza: "stanza_name" # specify your --stanza -pgbackrest_repo_type: "posix" # or "s3" -pgbackrest_repo_host: "ip-address" # dedicated repository host (if repo_type: "posix") -pgbackrest_repo_user: "postgres" # if "repo_host" is set -pgbackrest_conf: # see more options https://pgbackrest.org/configuration.html - global: # [global] section - - {option: "xxxxxxx", value: "xxxxxxx"} - ... - stanza: # [stanza_name] section - - {option: "xxxxxxx", value: "xxxxxxx"} - ... - -pgbackrest_patroni_cluster_restore_command: - '/usr/bin/pgbackrest --stanza={{ pgbackrest_stanza }} --type=time "--target=2020-06-01 11:00:00+03" --delta restore' -``` -example for S3 https://github.com/vitabaks/postgresql_cluster/pull/40#issuecomment-647146432 +Specify (non-public) IP addresses and connection settings (`ansible_user`, `ansible_ssh_pass` or `ansible_ssh_private_key_file` for your environment -2. Run playbook: +3. Prepare variables -`ansible-playbook deploy_pgcluster.yml` +See the [main.yml](./automation/roles/common/defaults/main.yml), [system.yml](./automation/roles/common/defaults/system.yml) and ([Debian.yml](./automation/roles/common/defaults/Debian.yml) or [RedHat.yml](./automation/roles/common/defaults/RedHat.yml)) variable files for more details. +4. Test host connectivity -##### Create cluster with WAL-G: -1. Edit the `main.yml` variable file +```sh +ansible all -m ping ``` -patroni_cluster_bootstrap_method: "wal-g" -patroni_create_replica_methods: - - wal_g - - basebackup +5. Create playbook to execute the playbooks within the collection: -postgresql_restore_command: "wal-g wal-fetch %f %p" +```yaml +- name: Playbook + hosts: -wal_g_install: true -wal_g_ver: "v0.2.15" # version to install -wal_g_json: # see more options https://github.com/wal-g/wal-g#configuration - - {option: "xxxxxxx", value: "xxxxxxx"} - - {option: "xxxxxxx", value: "xxxxxxx"} - ... + tasks: + # Start with the 'deploy' playbook, change to 'config' afterwards + - name: Run playbook + ansible.builtin.include_playbook: vitabaks.autobase.deploy_pgcluster ``` -2. Run playbook: -`ansible-playbook deploy_pgcluster.yml` +#### How to start from scratch +If you need to start from the very beginning, you can use the `remove_cluster` playbook. -##### Point-In-Time-Recovery: -You can run automatic restore of your existing patroni cluster \ -for PITR, specify the required parameters in the main.yml variable file and run the playbook with the tag: -``` -ansible-playbook deploy_pgcluster.yml --tags point_in_time_recovery -``` -Recovery steps with pgBackRest: -``` -1. Stop patroni service on the Replica servers (if running); -2. Stop patroni service on the Master server; -3. Remove patroni cluster "xxxxxxx" from DCS (if exist); -4. Run "/usr/bin/pgbackrest --stanza=xxxxxxx --delta restore" on Master; -5. Run "/usr/bin/pgbackrest --stanza=xxxxxxx --delta restore" on Replica (if patroni_create_replica_methods: "pgbackrest"); -6. Waiting for restore from backup (timeout 24 hours); -7. Start PostgreSQL for Recovery (master and replicas); -8. Waiting for PostgreSQL Recovery to complete (WAL apply); -9. Stop PostgreSQL instance (if running); -10. Disable PostgreSQL archive_command (if enabled); -11. Start patroni service on the Master server; -12. Check PostgreSQL is started and accepting connections on Master; -13. Make sure the postgresql users (superuser and replication) are present, and password does not differ from the specified in vars/main.yml; -14. Update postgresql authentication parameter in patroni.yml (if superuser or replication users is changed); -15. Reload patroni service (if patroni.yml is updated); -16. Start patroni service on Replica servers; -17. Check that the patroni is healthy on the replica server (timeout 10 hours); -18. Check postgresql cluster health (finish). -``` +Available variables: -**Why disable archive_command?** +- `remove_postgres`: stop the PostgreSQL service and remove data. +- `remove_etcd`: stop the ETCD service and remove data. +- `remove_consul`: stop the Consul service and remove data. -This is necessary to avoid conflicts in the archived log storage when archiving WALs. When multiple clusters try to send WALs to the same storage. \ -For example, when you make multiple clones of a cluster from one backup. +:warning: **Caution:** be careful when running this command in a production environment. -You can change this parameter using `patronictl edit-config` after restore. \ -Or set `disable_archive_command: false` to not disable archive_command after restore.

+## Star us -## Maintenance -Please note that the original design goal of this playbook was more concerned with the initial deploiment of a PostgreSQL HA Cluster and so it does not currently concern itself with performing ongoing maintenance of a cluster. - -You should learn each component of the cluster for its further maintenance. +If you find our project helpful, consider giving it a star on GitHub! Your support helps us grow and motivates us to keep improving. Starring the project is a simple yet effective way to show your appreciation and help others discover it. -- [Tutorial: Management of High-Availability PostgreSQL clusters with Patroni](https://pgconf.ru/en/2018/108567) -- [Patroni documentation](https://patroni.readthedocs.io/en/latest/) -- [etcd operations guide](https://etcd.io/docs/v3.3.12/op-guide/) + + + + + Star History Chart + + -## Disaster Recovery +## Sponsor this project -A high availability cluster provides an automatic failover mechanism, and does not cover all disaster recovery scenarios. -You must take care of backing up your data yourself. -##### etcd -> Patroni nodes are dumping the state of the DCS options to disk upon for every change of the configuration into the file patroni.dynamic.json located in the Postgres data directory. The master (patroni leader) is allowed to restore these options from the on-disk dump if these are completely absent from the DCS or if they are invalid. +By sponsoring our project, you directly contribute to its continuous improvement and innovation. As a sponsor, you'll receive exclusive benefits, including personalized support, early access to new features, and the opportunity to influence the project's direction. Your sponsorship is invaluable to us and helps ensure the project's sustainability and progress. -However, I recommend that you read the disaster recovery guide for the etcd cluster: -- [etcd disaster recovery](https://etcd.io/docs/v3.3.12/op-guide/recovery) +Become a sponsor today and help us take this project to the next level! -##### PostgreSQL (databases) -I can recommend the following backup and restore tools: -* [pgbackrest](https://github.com/pgbackrest/pgbackrest) -* [pg_probackup](https://github.com/postgrespro/pg_probackup) -* [wal-g](https://github.com/wal-g/wal-g) +Support our work through [GitHub Sponsors](https://github.com/sponsors/vitabaks) -Do not forget to validate your backups (for example [pgbackrest auto](https://github.com/vitabaks/pgbackrest_auto)). +[![GitHub Sponsors](https://img.shields.io/github/sponsors/vitabaks?style=for-the-badge)](https://github.com/sponsors/vitabaks) -## How to start from scratch -Should you need to start from very beginning, use the following to clean up: -- on all nodes, stop Patroni and remove PGDATA: - ```shell - sudo systemctl stop patroni - sudo rm -rf /var/lib/postgresql/ # be careful with this if there are other PG clusters - ``` -- then delete etcd entry (can be run on any node): - ```shell - etcdctl rm --dir --recursive /service/postgres-cluster # adjust if you changed the cluster's name - ``` +Support our work through [Patreon](https://www.patreon.com/vitabaks) ---- +[![Support me on Patreon](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Fshieldsio-patreon.vercel.app%2Fapi%3Fusername%3Dvitabaks%26type%3Dpatrons&style=for-the-badge)](https://patreon.com/vitabaks) ## License + Licensed under the MIT License. See the [LICENSE](./LICENSE) file for details. ## Author -Vitaliy Kukharik (PostgreSQL DBA) \ -vitabaks@gmail.com -### Sponsor this project -[![Support me on Patreon](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Fshieldsio-patreon.vercel.app%2Fapi%3Fusername%3Dvitabaks%26type%3Dpatrons&style=for-the-badge)](https://patreon.com/vitabaks) +Vitaliy Kukharik (PostgreSQL DBA) \ +vitaliy@autobase.tech ## Feedback, bug-reports, requests, ... -Are [welcome](https://github.com/vitabaks/postgresql_cluster/issues)! + +Are [welcome](https://github.com/vitabaks/autobase/issues)! diff --git a/add_balancer.yml b/add_balancer.yml deleted file mode 100644 index a3d6ab700..000000000 --- a/add_balancer.yml +++ /dev/null @@ -1,84 +0,0 @@ ---- -# yamllint disable rule:line-length - -- name: Add haproxy balancer node - hosts: balancers - become: true - become_method: sudo - any_errors_fatal: true - gather_facts: true - vars_files: - - vars/main.yml - - vars/system.yml - vars: - add_balancer: true - - pre_tasks: - - name: Include OS-specific variables - include_vars: "vars/{{ ansible_os_family }}.yml" - - - name: Checking Linux distribution - fail: - msg: "{{ ansible_distribution }} is not supported" - when: ansible_distribution not in os_valid_distributions - - - name: Checking version of OS Linux - fail: - msg: "{{ ansible_distribution_version }} of {{ ansible_distribution }} is not supported" - when: ansible_distribution_version is version_compare(os_minimum_versions[ansible_distribution], '<') - - - name: Update apt cache - apt: - update_cache: true - cache_valid_time: 3600 - environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "Debian" and installation_method == "repo" - - - name: Make sure the gnupg and apt-transport-https packages are present - apt: - pkg: - - gnupg - - apt-transport-https - state: present - environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "Debian" and installation_method == "repo" - - - name: Build a firewall_ports_dynamic_var - set_fact: - firewall_ports_dynamic_var: "{{ firewall_ports_dynamic_var |default([]) }} + {{ firewall_allowed_tcp_ports_for[item] }}" - loop: "{{ hostvars[inventory_hostname].group_names }}" - when: firewall_enabled_at_boot|bool - tags: firewall - - - name: Build a firewall_rules_dynamic_var - set_fact: - firewall_rules_dynamic_var: "{{ firewall_rules_dynamic_var |default([]) }} + {{ firewall_additional_rules_for[item] }}" - loop: "{{ hostvars[inventory_hostname].group_names }}" - when: firewall_enabled_at_boot|bool - tags: firewall - - roles: - - role: ansible-role-firewall - environment: "{{ proxy_env | default({}) }}" - vars: - firewall_allowed_tcp_ports: "{{ firewall_ports_dynamic_var|list | unique }}" - firewall_additional_rules: "{{ firewall_rules_dynamic_var|list | unique }}" - when: firewall_enabled_at_boot|bool - tags: firewall - - - role: hostname - - role: resolv_conf - - role: sysctl - - - role: haproxy - when: with_haproxy_load_balancing|bool - - - role: confd - when: dcs_type == "etcd" and - with_haproxy_load_balancing|bool - - - role: keepalived - when: cluster_vip is defined and cluster_vip | length > 0 and - with_haproxy_load_balancing|bool - -... diff --git a/add_pgnode.yml b/add_pgnode.yml deleted file mode 100644 index ded4068e5..000000000 --- a/add_pgnode.yml +++ /dev/null @@ -1,130 +0,0 @@ ---- -# yamllint disable rule:line-length - -- name: PostgreSQL High-Availability Cluster Scaling (add replica node) - hosts: replica - become: true - become_method: sudo - any_errors_fatal: true - gather_facts: true - vars_files: - - vars/main.yml - - vars/system.yml - - pre_tasks: - - name: Include OS-specific variables - include_vars: "vars/{{ ansible_os_family }}.yml" - - - name: Checking Linux distribution - fail: - msg: "{{ ansible_distribution }} is not supported" - when: ansible_distribution not in os_valid_distributions - - - name: Checking version of OS Linux - fail: - msg: "{{ ansible_distribution_version }} of {{ ansible_distribution }} is not supported" - when: ansible_distribution_version is version_compare(os_minimum_versions[ansible_distribution], '<') - - - name: Update apt cache - apt: - update_cache: true - cache_valid_time: 3600 - environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "Debian" and installation_method == "repo" - - - name: Make sure the gnupg and apt-transport-https packages are present - apt: - pkg: - - gnupg - - apt-transport-https - state: present - environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "Debian" and installation_method == "repo" - - - name: Build a firewall_ports_dynamic_var - set_fact: - firewall_ports_dynamic_var: "{{ firewall_ports_dynamic_var |default([]) }} + {{ firewall_allowed_tcp_ports_for[item] }}" - loop: "{{ hostvars[inventory_hostname].group_names }}" - when: firewall_enabled_at_boot|bool - tags: firewall - - - name: Build a firewall_rules_dynamic_var - set_fact: - firewall_rules_dynamic_var: "{{ firewall_rules_dynamic_var |default([]) }} + {{ firewall_additional_rules_for[item] }}" - loop: "{{ hostvars[inventory_hostname].group_names }}" - when: firewall_enabled_at_boot|bool - tags: firewall - - roles: - - role: ansible-role-firewall - environment: "{{ proxy_env | default({}) }}" - vars: - firewall_allowed_tcp_ports: "{{ firewall_ports_dynamic_var|list | unique }}" - firewall_additional_rules: "{{ firewall_rules_dynamic_var|list | unique }}" - when: firewall_enabled_at_boot|bool - tags: firewall - - - role: hostname - - role: resolv_conf - - role: etc_hosts - - role: add-repository - - role: packages - - role: sudo - - role: sysctl - - role: transparent_huge_pages - - role: pam_limits - - role: io-scheduler - - role: locales - - role: timezone - - role: ntp - - role: ssh-keys - -- hosts: pgbackrest:postgres_cluster - become: true - become_method: sudo - gather_facts: true - any_errors_fatal: true - vars_files: - - vars/main.yml - pre_tasks: - - name: Include OS-specific variables - include_vars: "vars/{{ ansible_os_family }}.yml" - roles: - - role: pgbackrest - when: pgbackrest_install|bool - -- hosts: replica - become: true - become_method: sudo - gather_facts: true - any_errors_fatal: true - vars_files: - - vars/main.yml - - vars/system.yml - vars: - existing_pgcluster: true - - pre_tasks: - - name: Include OS-specific variables - include_vars: "vars/{{ ansible_os_family }}.yml" - - roles: - - role: wal-g - when: wal_g_install|bool - - - role: pgbouncer - when: pgbouncer_install|bool - - - role: patroni - - - role: vip-manager - when: not with_haproxy_load_balancing|bool and - (cluster_vip is defined and cluster_vip | length > 0) - - - role: netdata - when: netdata_install is defined and netdata_install|bool - - # finish (info) - - role: deploy-finish - -... diff --git a/ansible.cfg b/ansible.cfg deleted file mode 100644 index 7b66b7db6..000000000 --- a/ansible.cfg +++ /dev/null @@ -1,16 +0,0 @@ -[defaults] -inventory = ./inventory -display_skipped_hosts = false -remote_tmp = /tmp/${USER}/ansible -allow_world_readable_tmpfiles = false # or "true" if the temporary directory on the remote host is mounted with POSIX acls disabled or the remote machines use ZFS. -host_key_checking = false -timeout=60 -deprecation_warnings=False - -[persistent_connection] -retries = 3 -connect_timeout = 60 -command_timeout = 30 - - -# https://raw.githubusercontent.com/ansible/ansible/devel/examples/ansible.cfg \ No newline at end of file diff --git a/automation/.dockerignore b/automation/.dockerignore new file mode 100644 index 000000000..21d0b898f --- /dev/null +++ b/automation/.dockerignore @@ -0,0 +1 @@ +.venv/ diff --git a/automation/Dockerfile b/automation/Dockerfile new file mode 100644 index 000000000..25b6a70ed --- /dev/null +++ b/automation/Dockerfile @@ -0,0 +1,39 @@ +FROM debian:bookworm-slim +LABEL maintainer="Vitaliy Kukharik vitabaks@gmail.com" + +USER root + +# Set SHELL to Bash to ensure pipefail is supported +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +# Copy autobase repository +COPY automation /autobase/automation + +# Install required packages, Python dependencies, Ansible requirements, and perform cleanup +RUN apt-get clean && rm -rf /var/lib/apt/lists/partial \ + && apt-get update -o Acquire::CompressionTypes::Order::=gz \ + && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ + ca-certificates gnupg git python3 python3-dev python3-pip keychain ssh-client sshpass\ + gcc g++ cmake make libssl-dev curl apt-transport-https lsb-release gnupg \ + && pip3 install --break-system-packages --no-cache-dir -r \ + /autobase/automation/requirements.txt \ + && ansible-galaxy install --force -r \ + /autobase/automation/requirements.yml \ + && ansible-galaxy collection list \ + && pip3 install --break-system-packages --no-cache-dir -r \ + /root/.ansible/collections/ansible_collections/azure/azcollection/requirements.txt \ + && curl -sLS https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor | tee /etc/apt/trusted.gpg.d/microsoft.asc.gpg > /dev/null \ + && echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ $(lsb_release -cs) main" | tee /etc/apt/sources.list.d/azure-cli.list \ + && apt-get update && apt-get install --no-install-recommends -y azure-cli \ + && apt-get autoremove -y --purge gnupg git python3-dev gcc g++ cmake make libssl-dev \ + && apt-get clean -y autoclean \ + && rm -rf /var/lib/apt/lists/* /tmp/* \ + && chmod +x /autobase/automation/entrypoint.sh + +# Set environment variable for Ansible collections paths +ENV ANSIBLE_COLLECTIONS_PATH=/root/.ansible/collections/ansible_collections:/usr/local/lib/python3.11/dist-packages/ansible_collections +ENV USER=root + +WORKDIR /autobase/automation + +ENTRYPOINT ["./entrypoint.sh"] diff --git a/automation/README.md b/automation/README.md new file mode 100644 index 000000000..4e33d7f19 --- /dev/null +++ b/automation/README.md @@ -0,0 +1,76 @@ +# Autobase: Automation + +**Autobase for PostgreSQL®** automates the deployment and management of highly available PostgreSQL clusters in production environments. This solution is tailored for use on dedicated physical servers, virtual machines, and within both on-premises and cloud-based infrastructures. + +For a detailed description of the cluster components, visit the [Architecture](https://autobase.tech/docs/overview/architecture) page. + +## Getting Started + +1. [Install Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) on one control node (which could easily be a laptop) + +```sh +sudo apt update && sudo apt install -y python3-pip sshpass git +pip3 install ansible +``` + +2. Install the Autobase Collection + +```sh +# from Ansible Galaxy +ansible-galaxy collection install vitabaks.autobase +``` + +Or reference it in a `requirements.yml`: + +```yml +# from Ansible Galaxy +collections: + - name: vitabaks.autobase + version: 2.2.0 +``` + +1. Prepare inventory + +See example of [inventory](./inventory.example) file. + +Specify (non-public) IP addresses and connection settings (`ansible_user`, `ansible_ssh_pass` or `ansible_ssh_private_key_file` for your environment + +4. Prepare variables + +See the [main.yml](./roles/common/defaults/main.yml), [system.yml](./roles/common/defaults/system.yml) and ([Debian.yml](./roles/common/defaults/Debian.yml) or [RedHat.yml](./roles/common/defaults/RedHat.yml)) variable files for more details. + +5. Test host connectivity + +```sh +ansible all -m ping +``` + +6. Create playbook to execute the playbooks within the collection: + +```yaml +- name: Playbook + hosts: + + tasks: + # Start with the 'deploy' playbook, change to 'config' afterwards + - name: Run playbook + ansible.builtin.include_playbook: vitabaks.autobase.deploy_pgcluster +``` + +#### How to start from scratch + +If you need to start from the very beginning, you can use the `remove_cluster` playbook. + +Available variables: + +- `remove_postgres`: stop the PostgreSQL service and remove data. +- `remove_etcd`: stop the ETCD service and remove data. +- `remove_consul`: stop the Consul service and remove data. + +:warning: **Caution:** be careful when running this command in a production environment. + +## Support + +We provide personalized support and expert assistance, so you can focus on building your project with confidence, knowing that a reliable partner is always available when you need help. + +Choose the support plan that fits your needs: https://autobase.tech/docs/support diff --git a/automation/add_balancer.yml b/automation/add_balancer.yml new file mode 100644 index 000000000..230dedd9f --- /dev/null +++ b/automation/add_balancer.yml @@ -0,0 +1,158 @@ +--- +- name: add_balancer.yml | Add HAProxy balancer node + hosts: balancers + become: true + become_method: sudo + any_errors_fatal: true + gather_facts: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include system variables + ansible.builtin.include_vars: "roles/common/defaults/system.yml" + tags: always + + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always + + - name: "[Pre-Check] Checking Linux distribution" + ansible.builtin.fail: + msg: "{{ ansible_distribution }} is not supported" + when: ansible_distribution not in os_valid_distributions + + - name: "[Pre-Check] Checking version of OS Linux" + ansible.builtin.fail: + msg: "{{ ansible_distribution_version }} of {{ ansible_distribution }} is not supported" + when: ansible_distribution_version is version_compare(os_minimum_versions[ansible_distribution], '<') + + - name: "[Pre-Check] Check if there is a node with new_node set to true" + ansible.builtin.set_fact: + new_nodes: "{{ new_nodes | default([]) + [item] }}" + when: hostvars[item]['new_node'] | default(false) | bool + loop: "{{ groups['balancers'] }}" + tags: always + + # Stop, if no nodes found with new_node variable + - name: "Pre-Check error. No nodes found with new_node set to true" + run_once: true # noqa run-once + ansible.builtin.fail: + msg: "Please specify the new_node=true variable for the new balancer server to add it to the existing cluster." + when: new_nodes | default([]) | length < 1 + + - name: Print a list of new balancer nodes + run_once: true # noqa run-once + ansible.builtin.debug: + var: new_nodes + tags: always + + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: + - new_node | default(false) | bool + - ansible_os_family == "Debian" + - installation_method == "repo" + + - name: Make sure the gnupg and apt-transport-https packages are present + ansible.builtin.apt: + pkg: + - gnupg + - apt-transport-https + state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: + - new_node | default(false) | bool + - ansible_os_family == "Debian" + - installation_method == "repo" + + - name: Build a firewall_ports_dynamic_var + ansible.builtin.set_fact: + firewall_ports_dynamic_var: "{{ firewall_ports_dynamic_var | default([]) + (firewall_allowed_tcp_ports_for[item] | default([])) }}" + loop: "{{ hostvars[inventory_hostname].group_names }}" + when: + - new_node | default(false) | bool + - firewall_enabled_at_boot | bool + tags: firewall + + - name: Build a firewall_rules_dynamic_var + ansible.builtin.set_fact: + firewall_rules_dynamic_var: "{{ firewall_rules_dynamic_var | default([]) + (firewall_additional_rules_for[item] | default([])) }}" + loop: "{{ hostvars[inventory_hostname].group_names }}" + when: + - new_node | default(false) | bool + - firewall_enabled_at_boot | bool + tags: firewall + + roles: + - role: firewall + environment: "{{ proxy_env | default({}) }}" + vars: + firewall_allowed_tcp_ports: "{{ firewall_ports_dynamic_var | default([]) | unique }}" + firewall_additional_rules: "{{ firewall_rules_dynamic_var | default([]) | unique }}" + when: + - new_node | default(false) | bool + - firewall_enabled_at_boot | bool + tags: firewall + + - role: sysctl + when: + - new_node | default(false) | bool + + tasks: + - name: Add host to group new_balancer (in-memory inventory) + ansible.builtin.add_host: + name: "{{ item }}" + groups: new_balancer + loop: "{{ new_nodes }}" + changed_when: false + tags: always + +- name: add_balancer.yml | Add load balancer + hosts: new_balancer + become: true + become_method: sudo + gather_facts: true + any_errors_fatal: true + vars: + add_balancer: true + + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include system variables + ansible.builtin.include_vars: "roles/common/defaults/system.yml" + tags: always + + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always + + roles: + - role: hostname + - role: resolv_conf + + - role: haproxy + when: with_haproxy_load_balancing|bool + + - role: confd + when: dcs_type == "etcd" and + with_haproxy_load_balancing|bool + + - role: keepalived + when: cluster_vip is defined and cluster_vip | length > 0 and + with_haproxy_load_balancing|bool diff --git a/automation/add_pgnode.yml b/automation/add_pgnode.yml new file mode 100644 index 000000000..312fee3f3 --- /dev/null +++ b/automation/add_pgnode.yml @@ -0,0 +1,259 @@ +--- +- name: add_pgnode.yml | PostgreSQL HA Cluster Scaling (add a replica node) + hosts: postgres_cluster + become: true + become_method: sudo + any_errors_fatal: true + gather_facts: true + handlers: + - ansible.builtin.import_tasks: roles/patroni/handlers/main.yml + + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include system variables + ansible.builtin.include_vars: "roles/common/defaults/system.yml" + tags: always + + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always + + - name: Set maintenance variable + ansible.builtin.set_fact: + postgresql_cluster_maintenance: true + + - name: "[Pre-Check] Checking Linux distribution" + ansible.builtin.fail: + msg: "{{ ansible_distribution }} is not supported" + when: ansible_distribution not in os_valid_distributions + + - name: "[Pre-Check] Checking version of OS Linux" + ansible.builtin.fail: + msg: "{{ ansible_distribution_version }} of {{ ansible_distribution }} is not supported" + when: ansible_distribution_version is version_compare(os_minimum_versions[ansible_distribution], '<') + + - name: "[Pre-Check] Check if there is a node with new_node set to true" + ansible.builtin.set_fact: + new_nodes: "{{ new_nodes | default([]) + [item] }}" + when: hostvars[item]['new_node'] | default(false) | bool + loop: "{{ groups['replica'] }}" + tags: always + + # Stop, if no nodes found with new_node variable + - name: "Pre-Check error. No nodes found with new_node set to true" + run_once: true # noqa run-once + ansible.builtin.fail: + msg: "Please specify the new_node=true variable for the new server to add it to the existing cluster." + when: new_nodes | default([]) | length < 1 + + - name: Print a list of new nodes + run_once: true # noqa run-once + ansible.builtin.debug: + var: new_nodes + tags: always + + - name: Add a new node to pg_hba.conf on existing cluster nodes + ansible.builtin.include_role: + name: patroni/config + tasks_from: pg_hba + when: not new_node | default(false) | bool + + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: + - new_node | default(false) | bool + - ansible_os_family == "Debian" + - installation_method == "repo" + + - name: Make sure the gnupg and apt-transport-https packages are present + ansible.builtin.apt: + pkg: + - gnupg + - apt-transport-https + state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: + - new_node | default(false) | bool + - ansible_os_family == "Debian" + - installation_method == "repo" + + - name: Build a firewall_ports_dynamic_var + ansible.builtin.set_fact: + firewall_ports_dynamic_var: "{{ firewall_ports_dynamic_var | default([]) + (firewall_allowed_tcp_ports_for[item] | default([])) }}" + loop: "{{ hostvars[inventory_hostname].group_names }}" + when: + - new_node | default(false) | bool + - firewall_enabled_at_boot | bool + tags: firewall + + - name: Build a firewall_rules_dynamic_var + ansible.builtin.set_fact: + firewall_rules_dynamic_var: "{{ firewall_rules_dynamic_var | default([]) + (firewall_additional_rules_for[item] | default([])) }}" + loop: "{{ hostvars[inventory_hostname].group_names }}" + when: + - new_node | default(false) | bool + - firewall_enabled_at_boot | bool + tags: firewall + + roles: + - role: firewall + environment: "{{ proxy_env | default({}) }}" + vars: + firewall_allowed_tcp_ports: "{{ firewall_ports_dynamic_var | default([]) | unique }}" + firewall_additional_rules: "{{ firewall_rules_dynamic_var | default([]) | unique }}" + when: + - new_node | default(false) | bool + - firewall_enabled_at_boot | bool + tags: firewall + + - role: pre_checks + vars: + minimal_ansible_version: 2.16.0 + timescale_minimal_pg_version: 12 # if enable_timescale is defined + tags: always + + - role: sysctl + when: + - new_node | default(false) | bool + + - role: ssh_keys + when: + - enable_ssh_key_based_authentication | default(false) | bool + + tasks: + - name: Add host to group new_replica (in-memory inventory) + ansible.builtin.add_host: + name: "{{ item }}" + groups: new_replica + loop: "{{ new_nodes }}" + changed_when: false + tags: always + +- name: add_pgnode.yml | Configure new PostgreSQL node + hosts: new_replica + become: true + become_method: sudo + gather_facts: true + any_errors_fatal: true + + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include system variables + ansible.builtin.include_vars: "roles/common/defaults/system.yml" + tags: always + + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always + + # if 'dcs_type' is 'consul' + - name: Add a nameserver entry poining to localhost for dnsmasq + ansible.builtin.set_fact: + nameservers: "{{ ['127.0.0.1'] + (nameservers | default([])) }}" + when: dcs_type == "consul" and consul_dnsmasq_enable | bool and ('127.0.0.1' not in (nameservers | default([]))) + + roles: + - role: hostname + - role: resolv_conf + - role: etc_hosts + - role: add_repository + - role: packages + - role: sudo + - role: swap + - role: transparent_huge_pages + - role: pam_limits + - role: io_scheduler + - role: locales + - role: timezone + - role: ntp + - role: copy + - role: cron + +- name: add_pgnode.yml | Configure pgBackRest + hosts: pgbackrest:postgres_cluster + become: true + become_method: sudo + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always + + roles: + - role: pgbackrest + when: pgbackrest_install|bool + +- name: add_pgnode.yml | Import Consul playbook + ansible.builtin.import_playbook: consul.yml + when: dcs_type == "consul" + tags: consul + +- name: add_pgnode.yml | Add new PostgreSQL replica to the cluster + hosts: new_replica + become: true + become_method: sudo + gather_facts: true + any_errors_fatal: true + vars: + existing_pgcluster: true + + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include system variables + ansible.builtin.include_vars: "roles/common/defaults/system.yml" + tags: always + + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always + + roles: + - role: wal_g + when: wal_g_install|bool + + - role: pg_probackup + when: pg_probackup_install|bool + + - role: tls_certificate/copy + when: tls_cert_generate|bool + + - role: pgbouncer + when: pgbouncer_install|bool + + - role: pgpass + + - role: patroni + + - role: vip_manager + when: not with_haproxy_load_balancing|bool and + (cluster_vip is defined and cluster_vip | length > 0) + + - role: netdata + + # finish (info) + - role: deploy_finish diff --git a/automation/ansible.cfg b/automation/ansible.cfg new file mode 100644 index 000000000..334116498 --- /dev/null +++ b/automation/ansible.cfg @@ -0,0 +1,28 @@ +[defaults] +forks = 10 +inventory = ./inventory +use_persistent_connections = True +remote_tmp = /tmp/${USER}/ansible +allow_world_readable_tmpfiles = false # or "true" if the temporary directory on the remote host is mounted with POSIX acls disabled or the remote machines use ZFS. +host_key_checking = False +timeout = 60 +deprecation_warnings = False +display_skipped_hosts = False +localhost_warning = False +stdout_callback = default +# Define the directory for custom callback plugins +callback_plugins = ./plugins/callback +# Enable JSON logging if 'ANSIBLE_JSON_LOG_FILE' environment variable is defined (example: ANSIBLE_JSON_LOG_FILE=./ansible_log.json) +callbacks_enabled = json_log + +[ssh_connection] +ssh_args = -o Ciphers=aes128-ctr -o MACs=hmac-sha2-256 -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=3 -o ServerAliveInterval=5 -o ServerAliveCountMax=10 +pipelining = True + +[persistent_connection] +retries = 3 +connect_timeout = 60 +command_timeout = 30 + +# https://github.com/ansible/ansible/blob/stable-2.11/examples/ansible.cfg +# https://docs.ansible.com/ansible/latest/reference_appendices/config.html diff --git a/balancers.yml b/automation/balancers.yml similarity index 61% rename from balancers.yml rename to automation/balancers.yml index d824c781b..d385d1fc2 100644 --- a/balancers.yml +++ b/automation/balancers.yml @@ -1,67 +1,80 @@ --- -# yamllint disable rule:line-length - -- hosts: balancers +- name: balancers.yml | Configure HAProxy load balancers + hosts: balancers become: true become_method: sudo any_errors_fatal: true gather_facts: true - vars_files: - - vars/main.yml - - vars/system.yml vars: - vip_manager_disable: false # or 'true' for disable vip-manager service (if installed) + vip_manager_disable: false # or 'true' for disable vip-manager service (if installed) pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include system variables + ansible.builtin.include_vars: "roles/common/defaults/system.yml" + tags: always + - name: Include OS-specific variables - include_vars: "vars/{{ ansible_os_family }}.yml" + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always - name: Checking Linux distribution - fail: + ansible.builtin.fail: msg: "{{ ansible_distribution }} is not supported" when: ansible_distribution not in os_valid_distributions - name: Checking version of OS Linux - fail: + ansible.builtin.fail: msg: "{{ ansible_distribution_version }} of {{ ansible_distribution }} is not supported" when: ansible_distribution_version is version_compare(os_minimum_versions[ansible_distribution], '<') - name: Update apt cache - apt: + ansible.builtin.apt: update_cache: true cache_valid_time: 3600 + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 environment: "{{ proxy_env | default({}) }}" when: ansible_os_family == "Debian" and installation_method == "repo" - name: Make sure the gnupg and apt-transport-https packages are present - apt: + ansible.builtin.apt: pkg: - gnupg - apt-transport-https state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 environment: "{{ proxy_env | default({}) }}" when: ansible_os_family == "Debian" and installation_method == "repo" - name: Build a firewall_ports_dynamic_var - set_fact: - firewall_ports_dynamic_var: "{{ firewall_ports_dynamic_var |default([]) }} + {{ firewall_allowed_tcp_ports_for[item] }}" + ansible.builtin.set_fact: + firewall_ports_dynamic_var: "{{ firewall_ports_dynamic_var | default([]) + (firewall_allowed_tcp_ports_for[item] | default([])) }}" loop: "{{ hostvars[inventory_hostname].group_names }}" - when: firewall_enabled_at_boot|bool + when: firewall_enabled_at_boot | bool tags: firewall - name: Build a firewall_rules_dynamic_var - set_fact: - firewall_rules_dynamic_var: "{{ firewall_rules_dynamic_var |default([]) }} + {{ firewall_additional_rules_for[item] }}" + ansible.builtin.set_fact: + firewall_rules_dynamic_var: "{{ firewall_rules_dynamic_var | default([]) + (firewall_additional_rules_for[item] | default([])) }}" loop: "{{ hostvars[inventory_hostname].group_names }}" - when: firewall_enabled_at_boot|bool + when: firewall_enabled_at_boot | bool tags: firewall roles: - - role: ansible-role-firewall + - role: firewall environment: "{{ proxy_env | default({}) }}" vars: - firewall_allowed_tcp_ports: "{{ firewall_ports_dynamic_var|list | unique }}" - firewall_additional_rules: "{{ firewall_rules_dynamic_var|list | unique }}" + firewall_allowed_tcp_ports: "{{ firewall_ports_dynamic_var | default([]) | unique }}" + firewall_additional_rules: "{{ firewall_rules_dynamic_var | default([]) | unique }}" when: firewall_enabled_at_boot|bool tags: firewall @@ -74,10 +87,8 @@ - role: confd when: dcs_type == "etcd" - - role: vip-manager/disable + - role: vip_manager/disable when: vip_manager_disable|bool - role: keepalived when: cluster_vip is defined and cluster_vip | length > 0 - -... diff --git a/automation/changelog.yaml b/automation/changelog.yaml new file mode 100644 index 000000000..a4d952ee1 --- /dev/null +++ b/automation/changelog.yaml @@ -0,0 +1,3 @@ +# changelog.yaml +--- +releases: {} diff --git a/automation/config_pgcluster.yml b/automation/config_pgcluster.yml new file mode 100644 index 000000000..ebf322dab --- /dev/null +++ b/automation/config_pgcluster.yml @@ -0,0 +1,367 @@ +--- +- name: config_pgcluster.yml | Configuration PostgreSQL HA Cluster (based on "Patroni") + hosts: localhost + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + # set_fact: 'pgbackrest_install' to configure Postgres backups (TODO: Add the ability to configure backups in the UI) + # Note: Applicable only for "aws", "gcp", "azure", because: + # "digitalocean" - requires the Spaces access keys ("AWS_ACCESS_KEY_ID" and "AWS_SECRET_ACCESS_KEY" variables) + # "hetzner" - requires the S3 credentials ("hetzner_object_storage_access_key" and "hetzner_object_storage_secret_key" variables). + - name: "Set variable: 'pgbackrest_install' to configure Postgres backups" + ansible.builtin.set_fact: + pgbackrest_install: true + when: + - not (pgbackrest_install | bool or wal_g_install | bool) + - cloud_provider | default('') | lower in ['aws', 'gcp', 'azure'] + - pgbackrest_auto_conf | default(true) | bool # to be able to disable auto backup settings + tags: always + roles: + - role: cloud_resources + when: cloud_provider | default('') | length > 0 + vars: + postgresql_cluster_maintenance: true + tags: always + +- name: config_pgcluster.yml | Check the PostgreSQL cluster state and perform pre-checks + hosts: postgres_cluster + become: true + become_method: sudo + gather_facts: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + + - name: Include system variables + ansible.builtin.include_vars: "roles/common/defaults/system.yml" + + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + + - name: "[Prepare] Set maintenance variable" + ansible.builtin.set_fact: + postgresql_cluster_maintenance: true + + - name: "[Prepare] Get Patroni Cluster Leader Node" + ansible.builtin.uri: + url: http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/leader + status_code: 200 + register: patroni_leader_result + changed_when: false + failed_when: false + check_mode: false + environment: + no_proxy: "{{ inventory_hostname }}" + + # Stop, if Patroni is unavailable + - name: The Patroni cluster is unhealthy + ansible.builtin.fail: + msg: "Patroni is unavailable on {{ ansible_hostname }}. Please check the cluster status." + changed_when: false + when: patroni_leader_result is undefined or patroni_leader_result.status == -1 + + roles: + - role: pre_checks + vars: + minimal_ansible_version: 2.16.0 + timescale_minimal_pg_version: 12 # if enable_timescale is defined + + tasks: + - name: '[Prepare] Add host to group "primary" (in-memory inventory)' + ansible.builtin.add_host: + name: "{{ item }}" + groups: primary + postgresql_exists: true + when: hostvars[item]['patroni_leader_result']['status'] == 200 + loop: "{{ groups['postgres_cluster'] }}" + changed_when: false + check_mode: false + + - name: '[Prepare] Add hosts to group "secondary" (in-memory inventory)' + ansible.builtin.add_host: + name: "{{ item }}" + groups: secondary + postgresql_exists: true + when: hostvars[item]['patroni_leader_result']['status'] != 200 + loop: "{{ groups['postgres_cluster'] }}" + changed_when: false + check_mode: false + + - name: "Print Patroni Cluster info" + ansible.builtin.debug: + msg: + - "Cluster Name: {{ patroni_cluster_name }}" + - "Cluster Leader: {{ ansible_hostname }}" + when: inventory_hostname in groups['primary'] + + # if 'cloud_provider' is 'aws', 'gcp', or 'azure' + # set_fact: 'pgbackrest_install' to configure Postgres backups (TODO: Add the ability to configure backups in the UI) + - name: "Set variable: 'pgbackrest_install' to configure Postgres backups" + ansible.builtin.set_fact: + pgbackrest_install: true + when: + - not (pgbackrest_install | bool or wal_g_install | bool) + - cloud_provider | default('') | lower in ['aws', 'gcp', 'azure'] + - pgbackrest_auto_conf | default(true) | bool # to be able to disable auto backup settings + tags: + - always + +- name: config_pgcluster.yml | Configure PostgreSQL Cluster + hosts: "primary:secondary" + become: true + become_method: sudo + gather_facts: true + any_errors_fatal: true + environment: "{{ proxy_env | default({}) }}" + handlers: + - ansible.builtin.import_tasks: roles/patroni/handlers/main.yml + - ansible.builtin.import_tasks: roles/pgbouncer/handlers/main.yml + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include system variables + ansible.builtin.include_vars: "roles/common/defaults/system.yml" + tags: always + + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always + + - name: Build a firewall_ports_dynamic_var + ansible.builtin.set_fact: + firewall_ports_dynamic_var: "{{ firewall_ports_dynamic_var | default([]) + (firewall_allowed_tcp_ports_for[item] | default([])) }}" + loop: "{{ hostvars[inventory_hostname].group_names }}" + when: firewall_enabled_at_boot | bool + tags: config_firewall, firewall + + - name: Build a firewall_rules_dynamic_var + ansible.builtin.set_fact: + firewall_rules_dynamic_var: "{{ firewall_rules_dynamic_var | default([]) + (firewall_additional_rules_for[item] | default([])) }}" + loop: "{{ hostvars[inventory_hostname].group_names }}" + when: firewall_enabled_at_boot | bool + tags: firewall + + # if 'dcs_type' is 'consul' + - name: Add a nameserver entry poining to localhost for dnsmasq + ansible.builtin.set_fact: + nameservers: "{{ ['127.0.0.1'] + (nameservers | default([])) }}" + when: dcs_type == "consul" and consul_dnsmasq_enable | bool and ('127.0.0.1' not in (nameservers | default([]))) + + roles: + - role: firewall + vars: + firewall_allowed_tcp_ports: "{{ firewall_ports_dynamic_var | default([]) | unique }}" + firewall_additional_rules: "{{ firewall_rules_dynamic_var | default([]) | unique }}" + when: firewall_enabled_at_boot | bool + tags: firewall + + - role: hostname + - role: resolv_conf + - role: etc_hosts + - role: timezone + - role: add_repository + - role: packages + - role: sudo + - role: swap + - role: sysctl + - role: transparent_huge_pages + - role: pam_limits + - role: io_scheduler + - role: locales + - role: ntp + - role: ssh_keys + - role: copy + - role: pgpass + - role: cron + + - role: patroni/config + + - role: pgbouncer/config + when: pgbouncer_install | bool + + - role: vip_manager + when: not with_haproxy_load_balancing | bool and + (cluster_vip is defined and cluster_vip | length > 0) + + - role: postgresql_users + when: inventory_hostname in groups['primary'] + + - role: postgresql_databases + when: inventory_hostname in groups['primary'] + + - role: postgresql_schemas + when: inventory_hostname in groups['primary'] + + - role: postgresql_privs + when: inventory_hostname in groups['primary'] + + - role: wal_g + when: wal_g_install | bool + +- name: config_pgcluster | Configure pgBackRest + hosts: pgbackrest:postgres_cluster + become: true + become_method: sudo + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always + + # if 'cloud_provider' is 'aws', 'gcp', or 'azure' + # set_fact: 'pgbackrest_install' to configure Postgres backups (TODO: Add the ability to configure backups in the UI) + - name: "Set variable: 'pgbackrest_install' to configure Postgres backups" + ansible.builtin.set_fact: + pgbackrest_install: true + when: + - not (pgbackrest_install | bool or wal_g_install | bool) + - cloud_provider | default('') | lower in ['aws', 'gcp', 'azure'] + - pgbackrest_auto_conf | default(true) | bool # to be able to disable auto backup settings + tags: always + roles: + - role: pgbackrest + when: pgbackrest_install | bool + +- name: config_pgcluster.yml | Restart patroni on secondary after config settings if need + hosts: secondary + serial: 1 # restart replicas one by one + gather_facts: false + become: true + become_method: sudo + any_errors_fatal: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always + + tasks: + - name: Stop read-only traffic + ansible.builtin.include_role: + name: update + tasks_from: stop_traffic + when: + - pending_restart | bool + - pg_pending_restart_settings | length > 0 + + - name: Stop Services + ansible.builtin.include_role: + name: update + tasks_from: stop_services + when: + - pending_restart | bool + - pg_pending_restart_settings | length > 0 + + - name: Start Services + ansible.builtin.include_role: + name: update + tasks_from: start_services + when: + - pending_restart | bool + - pg_pending_restart_settings | length > 0 + + - name: Start read-only traffic + ansible.builtin.include_role: + name: update + tasks_from: start_traffic + when: + - pending_restart | bool + - pg_pending_restart_settings | length > 0 + tags: + - patroni_conf + +- name: config_pgcluster.yml | Restart patroni on master after config settings if need + hosts: primary + gather_facts: false + become: true + become_method: sudo + any_errors_fatal: true + environment: "{{ proxy_env | default({}) }}" + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always + + tasks: + - name: Switchover Patroni leader role + ansible.builtin.include_role: + name: update + tasks_from: switchover + when: + - pending_restart | bool + - pg_pending_restart_settings | length > 0 + + - name: Stop read-only traffic + ansible.builtin.include_role: + name: update + tasks_from: stop_traffic + when: + - pending_restart | bool + - pg_pending_restart_settings | length > 0 + + - name: Stop Services + ansible.builtin.include_role: + name: update + tasks_from: stop_services + when: + - pending_restart | bool + - pg_pending_restart_settings | length > 0 + + - name: Start Services + ansible.builtin.include_role: + name: update + tasks_from: start_services + when: + - pending_restart | bool + - pg_pending_restart_settings | length > 0 + + - name: Start read-only traffic + ansible.builtin.include_role: + name: update + tasks_from: start_traffic + when: + - pending_restart | bool + - pg_pending_restart_settings | length > 0 + tags: + - patroni_conf + +- name: config_pgcluster.yml | Configure PostgreSQL Cluster and info + hosts: primary + become: true + become_method: sudo + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include system variables + ansible.builtin.include_vars: "roles/common/defaults/system.yml" + tags: always + + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always + roles: + - role: postgresql_extensions + + # finish (info) + - role: deploy_finish diff --git a/automation/consul.yml b/automation/consul.yml new file mode 100644 index 000000000..c6ece93cb --- /dev/null +++ b/automation/consul.yml @@ -0,0 +1,153 @@ +--- +- name: consul.yml | Configure Consul instances + hosts: consul_instances + become: true + become_method: sudo + any_errors_fatal: true + gather_facts: true + environment: "{{ proxy_env | default({}) }}" + + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include system variables + ansible.builtin.include_vars: "roles/common/defaults/system.yml" + tags: always + + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always + + - name: Check if the consul role requirements (ansible.utils) are installed + ansible.builtin.command: ansible-galaxy collection list ansible.utils + changed_when: false + failed_when: false + register: ansible_utils_result + delegate_to: localhost + run_once: true # noqa run-once + + - name: Consul role requirements + ansible.builtin.fail: + msg: + - "Please install consul role requirements (ansible.utils)" + - "ansible-galaxy install -r roles/consul/requirements.yml" + delegate_to: localhost + run_once: true # noqa run-once + when: + - ansible_utils_result.stderr is defined + - ansible_utils_result.stderr is search("unable to find") + + - name: Make sure the unzip package are present on the control host + ansible.builtin.package: + name: unzip + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + delegate_to: localhost + run_once: true # noqa run-once + when: ansible_distribution != "MacOSX" + + - name: Make sure the python3-pip package are present on the control host + ansible.builtin.package: + name: python3-pip + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + delegate_to: localhost + run_once: true # noqa run-once + when: ansible_distribution != "MacOSX" + + - name: Install netaddr dependency on the control host + ansible.builtin.pip: + name: netaddr + executable: pip3 + become: false + environment: + PIP_BREAK_SYSTEM_PACKAGES: "1" + delegate_to: localhost + run_once: true # noqa run-once + + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "Debian" and installation_method == "repo" + + - name: Make sure the gnupg and apt-transport-https packages are present + ansible.builtin.apt: + pkg: + - gnupg + - apt-transport-https + state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "Debian" and installation_method == "repo" + + - name: Make sure the python3-pip package are present + ansible.builtin.package: + name: python3-pip + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + + - name: Build a firewall_ports_dynamic_var + ansible.builtin.set_fact: + firewall_ports_dynamic_var: "{{ firewall_ports_dynamic_var | default([]) + (firewall_allowed_tcp_ports_for[item] | default([])) }}" + loop: "{{ hostvars[inventory_hostname].group_names }}" + when: firewall_enabled_at_boot | bool + tags: firewall + + - name: Build a firewall_rules_dynamic_var + ansible.builtin.set_fact: + firewall_rules_dynamic_var: "{{ firewall_rules_dynamic_var | default([]) + (firewall_additional_rules_for[item] | default([])) }}" + loop: "{{ hostvars[inventory_hostname].group_names }}" + when: firewall_enabled_at_boot | bool + tags: firewall + + - name: Add a nameserver entry poining to localhost for dnsmasq + ansible.builtin.set_fact: + nameservers: "{{ ['127.0.0.1'] + (nameservers | default([])) }}" + when: dcs_type == "consul" and consul_dnsmasq_enable | bool and ('127.0.0.1' not in (nameservers | default([]))) + + - name: Redefine the consul_dnsmasq_servers variable + ansible.builtin.set_fact: + consul_dnsmasq_servers: "{{ consul_dnsmasq_servers | reject('equalto', '127.0.0.1') | list }}" + when: dcs_type == "consul" and consul_dnsmasq_enable | bool and ('127.0.0.1' in (consul_dnsmasq_servers | default([]))) + + # Setting variables for Consul during cloud deployment + - name: Redefine the consul_node_role and consul_bootstrap_expect variables + ansible.builtin.set_fact: + consul_node_role: "{{ 'server' if not dcs_exists else 'client' }}" + consul_bootstrap_expect: "{{ not dcs_exists }}" + consul_datacenter: "{{ server_location | default('dc1') }}" + when: cloud_provider | default('') | length > 0 + + roles: + - role: firewall + vars: + firewall_allowed_tcp_ports: "{{ firewall_ports_dynamic_var | default([]) | unique }}" + firewall_additional_rules: "{{ firewall_rules_dynamic_var | default([]) | unique }}" + when: firewall_enabled_at_boot|bool + tags: firewall + + - role: hostname + - role: resolv_conf + - role: etc_hosts + - role: timezone + - role: ntp + + - role: consul diff --git a/automation/deploy_pgcluster.yml b/automation/deploy_pgcluster.yml new file mode 100644 index 000000000..1ca8282a6 --- /dev/null +++ b/automation/deploy_pgcluster.yml @@ -0,0 +1,464 @@ +--- +- name: Deploy PostgreSQL HA Cluster (based on "Patroni") + hosts: localhost + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + # set_fact: 'pgbackrest_install' to configure Postgres backups (TODO: Add the ability to configure backups in the UI) + # Note: Applicable only for "aws", "gcp", "azure", because: + # "digitalocean" - requires the Spaces access keys ("AWS_ACCESS_KEY_ID" and "AWS_SECRET_ACCESS_KEY" variables) + # "hetzner" - requires the S3 credentials ("hetzner_object_storage_access_key" and "hetzner_object_storage_secret_key" variables). + - name: "Set variable: 'pgbackrest_install' to configure Postgres backups" + ansible.builtin.set_fact: + pgbackrest_install: true + when: + - not (pgbackrest_install | bool or wal_g_install | bool) + - cloud_provider | default('') | lower in ['aws', 'gcp', 'azure'] + - pgbackrest_auto_conf | default(true) | bool # to be able to disable auto backup settings + tags: always + roles: + - role: cloud_resources + when: cloud_provider | default('') | length > 0 + tags: always + +- name: deploy_pgcluster.yml | Perform pre-checks + hosts: all + become: true + become_method: sudo + gather_facts: true + any_errors_fatal: true + environment: "{{ proxy_env | default({}) }}" + + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include system variables + ansible.builtin.include_vars: "roles/common/defaults/system.yml" + tags: always + + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always + + - name: System info + ansible.builtin.debug: + msg: + server_name: "{{ hostname | default(ansible_hostname) }}" + server_location: "{{ server_location | default(omit) }}" + ip_address: "{{ inventory_hostname | default('N/A') }}" + os: "{{ ansible_distribution | default('N/A') }} {{ ansible_distribution_version | default('N/A') }}" + kernel: "{{ ansible_kernel | default('N/A') }}" + cpu: + model: "{{ ansible_processor[2] | default('N/A') }}" + count: "{{ ansible_processor_count | default('N/A') }}" + cores: "{{ ansible_processor_cores | default('N/A') }}" + memory: "{{ (ansible_memtotal_mb / 1024) | round(2) if ansible_memtotal_mb is defined else 'N/A' }} GB" + disk_space_total: >- + {{ + (ansible_mounts + | map(attribute='size_total') + | map('int') + | sum / 1024 / 1024 / 1024 + ) + | round(2) if ansible_mounts is defined else 'N/A' + }} GB + architecture: "{{ ansible_architecture | default('N/A') }}" + virtualization_type: "{{ ansible_virtualization_type | default('N/A') }}" + product_name: "{{ ansible_product_name | default('N/A') }}" + tags: always + + # if 'cloud_provider' is 'aws', 'gcp', or 'azure' + # set_fact: 'pgbackrest_install' to configure Postgres backups (TODO: Add the ability to configure backups in the UI) + - name: "Set variable: 'pgbackrest_install' to configure Postgres backups" + ansible.builtin.set_fact: + pgbackrest_install: true + when: + - not (pgbackrest_install | bool or wal_g_install | bool) + - cloud_provider | default('') | lower in ['aws', 'gcp', 'azure'] + - pgbackrest_auto_conf | default(true) | bool # to be able to disable auto backup settings + tags: always + + - name: Clean dnf cache + ansible.builtin.command: dnf clean all + when: + - ansible_os_family == "RedHat" + + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "Debian" + + - name: Make sure the gnupg and apt-transport-https packages are present + ansible.builtin.apt: + pkg: + - gnupg + - apt-transport-https + state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "Debian" + + # Ansible requires the iproute package for network facts to be populated + - name: Make sure that the iproute is installed + ansible.builtin.package: + name: iproute + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "RedHat" + + - name: Make sure that the iproute is installed + ansible.builtin.apt: + name: iproute2 + state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "Debian" + + # (optional) Command or script to be executed before the Postgres cluster deployment. + - block: + - name: Print pre-deploy command + ansible.builtin.debug: + var: pre_deploy_command + when: pre_deploy_command_print | default(false) | bool + + - name: Run pre-deploy command + ansible.builtin.shell: "{{ pre_deploy_command }} > {{ pre_deploy_command_log | default('/var/tmp/pre_deploy_command.log') }} 2>&1" + args: + executable: /bin/bash + register: pre_deploy_result + delegate_to: "{{ item }}" + loop: "{{ pre_deploy_command_hosts.split(',') | map('extract', groups) | list | flatten }}" + async: "{{ pre_deploy_command_timeout }}" # run the command asynchronously + poll: 0 + + - name: Wait for the pre-deploy command to complete + ansible.builtin.async_status: + jid: "{{ item.ansible_job_id }}" + register: pre_deploy_job_result + delegate_to: "{{ item.item }}" + loop: "{{ pre_deploy_result.results }}" + loop_control: + label: "{{ item.item }}" + until: pre_deploy_job_result.finished + retries: "{{ (pre_deploy_command_timeout | int) // 10 }}" + delay: 10 + ignore_errors: true # allows to collect logs before stopping execution (in case of failure) + when: + - pre_deploy_result.results is defined + - item.ansible_job_id is defined + + - name: Get pre-deploy command log + ansible.builtin.command: "cat {{ pre_deploy_command_log | default('/var/tmp/pre_deploy_command.log') }}" + register: pre_deploy_log_content + delegate_to: "{{ item }}" + loop: "{{ pre_deploy_command_hosts.split(',') | map('extract', groups) | list | flatten }}" + changed_when: false + when: pre_deploy_command_print_result | default(false) | bool + + - name: Print pre-deploy command result + ansible.builtin.debug: + msg: "{{ item.stdout_lines }}" + loop: "{{ pre_deploy_log_content.results }}" + loop_control: + label: "{{ item.item }}" + when: + - pre_deploy_log_content.results is defined + - item.stdout_lines is defined + + - name: Stop if pre-deploy command failed + ansible.builtin.fail: + msg: "Pre-deploy command failed. See log for details." + when: pre_deploy_job_result.results | json_query('[?failed]') | length > 0 + run_once: true # noqa run-once + when: pre_deploy_command | default('') | length > 0 + tags: pre_deploy, pre_deploy_command + + roles: + # (optional) if 'ssh_public_keys' is defined + - role: authorized_keys + tags: ssh_public_keys + + - role: pre_checks + vars: + minimal_ansible_version: 2.16.0 + timescale_minimal_pg_version: 12 # if enable_timescale is defined + tags: always + + - role: hostname + + - role: tls_certificate/generate + vars: + tls_group_name: "postgres_cluster" + tls_cert_regenerate: "{{ patroni_tls_cert_regenerate | default(false) }}" # Do not generate new certificates if they already exist. + when: tls_cert_generate | bool + +- name: deploy_pgcluster.yml | Deploy etcd cluster + ansible.builtin.import_playbook: etcd_cluster.yml + when: not dcs_exists|bool and dcs_type == "etcd" + tags: etcd + +- name: deploy_pgcluster.yml | Deploy Consul + ansible.builtin.import_playbook: consul.yml + when: dcs_type == "consul" + tags: consul + +- name: deploy_pgcluster.yml | Postgres Cluster Configuration + hosts: postgres_cluster + become: true + become_method: sudo + gather_facts: true + any_errors_fatal: true + environment: "{{ proxy_env | default({}) }}" + + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include system variables + ansible.builtin.include_vars: "roles/common/defaults/system.yml" + tags: always + + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always + + - name: Build a firewall_ports_dynamic_var + ansible.builtin.set_fact: + firewall_ports_dynamic_var: "{{ firewall_ports_dynamic_var | default([]) + (firewall_allowed_tcp_ports_for[item] | default([])) }}" + loop: "{{ hostvars[inventory_hostname].group_names }}" + when: firewall_enabled_at_boot | bool + tags: firewall + + - name: Build a firewall_rules_dynamic_var + ansible.builtin.set_fact: + firewall_rules_dynamic_var: "{{ firewall_rules_dynamic_var | default([]) + (firewall_additional_rules_for[item] | default([])) }}" + loop: "{{ hostvars[inventory_hostname].group_names }}" + when: firewall_enabled_at_boot | bool + tags: firewall + + # if 'dcs_type' is 'consul' + - name: Add a nameserver entry poining to localhost for dnsmasq + ansible.builtin.set_fact: + nameservers: "{{ ['127.0.0.1'] + (nameservers | default([])) }}" + when: dcs_type == "consul" and consul_dnsmasq_enable | bool and ('127.0.0.1' not in (nameservers | default([]))) + + roles: + - role: firewall + vars: + firewall_allowed_tcp_ports: "{{ firewall_ports_dynamic_var | default([]) | unique }}" + firewall_additional_rules: "{{ firewall_rules_dynamic_var | default([]) | unique }}" + when: firewall_enabled_at_boot|bool + tags: firewall + + - role: resolv_conf + - role: etc_hosts + - role: add_repository + - role: packages + - role: sudo + - role: mount + - role: swap + - role: sysctl + - role: transparent_huge_pages + - role: pam_limits + - role: io_scheduler + - role: locales + - role: timezone + - role: ntp + - role: ssh_keys + - role: copy + +- name: deploy_pgcluster.yml | Deploy balancers + ansible.builtin.import_playbook: balancers.yml + when: with_haproxy_load_balancing|bool + tags: load_balancing, haproxy + +- name: deploy_pgcluster.yml | Install and configure pgBackRest + hosts: pgbackrest:postgres_cluster + become: true + become_method: sudo + gather_facts: true + any_errors_fatal: true + + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always + + # if 'cloud_provider' is 'aws', 'gcp', or 'azure' + # set_fact: 'pgbackrest_install' to configure Postgres backups (TODO: Add the ability to configure backups in the UI) + - name: "Set variable: 'pgbackrest_install' to configure Postgres backups" + ansible.builtin.set_fact: + pgbackrest_install: true + when: + - not (pgbackrest_install | bool or wal_g_install | bool) + - cloud_provider | default('') | lower in ['aws', 'gcp', 'azure'] + - pgbackrest_auto_conf | default(true) | bool # to be able to disable auto backup settings + tags: always + roles: + - role: pgbackrest + when: pgbackrest_install | bool + +- name: deploy_pgcluster.yml | PostgreSQL Cluster Deployment + hosts: postgres_cluster + become: true + become_method: sudo + gather_facts: true + any_errors_fatal: true + handlers: + - ansible.builtin.import_tasks: roles/pgbouncer/handlers/main.yml + + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include system variables + ansible.builtin.include_vars: "roles/common/defaults/system.yml" + tags: always + + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always + + # if 'cloud_provider' is 'aws', 'gcp', or 'azure' + # set_fact: 'pgbackrest_install' to configure Postgres backups (TODO: Add the ability to configure backups in the UI) + - name: "Set variable: 'pgbackrest_install' to configure Postgres backups" + ansible.builtin.set_fact: + pgbackrest_install: true + when: + - not (pgbackrest_install | bool or wal_g_install | bool) + - cloud_provider | default('') | lower in ['aws', 'gcp', 'azure'] + - pgbackrest_auto_conf | default(true) | bool # to be able to disable auto backup settings + tags: always + + roles: + - role: wal_g + when: wal_g_install|bool + + - role: pg_probackup + when: pg_probackup_install|bool + + - role: cron + + - role: tls_certificate/copy + when: tls_cert_generate|bool + + - role: pgbouncer + when: pgbouncer_install|bool + + - role: pgpass + + - role: patroni + + - role: pgbackrest/stanza-create + when: pgbackrest_install | bool + + - role: vip_manager + when: not with_haproxy_load_balancing|bool and + (cluster_vip is defined and cluster_vip | length > 0) + + # optional + - role: postgresql_users + when: inventory_hostname == groups['master'][0] + + - role: postgresql_databases + when: inventory_hostname == groups['master'][0] + + - role: postgresql_schemas + when: inventory_hostname == groups['master'][0] + + - role: postgresql_privs + when: inventory_hostname == groups['master'][0] + + - role: postgresql_extensions + when: inventory_hostname == groups['master'][0] + + - role: pgbouncer/config + when: pgbouncer_install|bool + + - role: netdata + + # finish (info) + - role: deploy_finish + + tasks: + # (optional) Command or script to be executed after the Postgres cluster deployment. + - block: + - name: Print post-deploy command + ansible.builtin.debug: + var: post_deploy_command + when: post_deploy_command_print | default(false) | bool + + - name: Run post-deploy command + ansible.builtin.shell: "{{ post_deploy_command }} > {{ post_deploy_command_log | default('/var/tmp/post_deploy_command.log') }} 2>&1" + args: + executable: /bin/bash + register: post_deploy_result + delegate_to: "{{ item }}" + loop: "{{ post_deploy_command_hosts.split(',') | map('extract', groups) | list | flatten }}" + async: "{{ post_deploy_command_timeout }}" # run the command asynchronously + poll: 0 + + - name: Wait for the post-deploy command to complete + ansible.builtin.async_status: + jid: "{{ item.ansible_job_id }}" + register: post_deploy_job_result + delegate_to: "{{ item.item }}" + loop: "{{ post_deploy_result.results }}" + loop_control: + label: "{{ item.item }}" + until: post_deploy_job_result.finished + retries: "{{ (post_deploy_command_timeout | int) // 10 }}" + delay: 10 + ignore_errors: true # allows to collect logs before stopping execution (in case of failure) + when: + - post_deploy_result.results is defined + - item.ansible_job_id is defined + + - name: Get post-deploy command log + ansible.builtin.command: "cat {{ post_deploy_command_log | default('/var/tmp/post_deploy_command.log') }}" + register: post_deploy_log_content + delegate_to: "{{ item }}" + loop: "{{ post_deploy_command_hosts.split(',') | map('extract', groups) | list | flatten }}" + changed_when: false + when: post_deploy_command_print_result | default(false) | bool + + - name: Print post-deploy command result + ansible.builtin.debug: + msg: "{{ item.stdout_lines }}" + loop: "{{ post_deploy_log_content.results }}" + loop_control: + label: "{{ item.item }}" + when: + - post_deploy_log_content.results is defined + - item.stdout_lines is defined + + - name: Stop if post-deploy command failed + ansible.builtin.fail: + msg: "Post-deploy command failed. See log for details." + when: post_deploy_job_result.results | json_query('[?failed]') | length > 0 + run_once: true # noqa run-once + when: post_deploy_command | default('') | length > 0 + tags: post_deploy, post_deploy_command diff --git a/automation/entrypoint.sh b/automation/entrypoint.sh new file mode 100644 index 000000000..9068df6da --- /dev/null +++ b/automation/entrypoint.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +is_base64() { + # Check if input is base64 encoded + if [[ "$1" =~ ^[A-Za-z0-9+/=]+$ ]]; then + return 0 + else + return 1 + fi +} + +# Check if ANSIBLE_INVENTORY_JSON is set and create inventory.json if it is +if [[ -n "${ANSIBLE_INVENTORY_JSON}" ]]; then + if is_base64 "${ANSIBLE_INVENTORY_JSON}"; then + echo "Creating inventory.json with the (base64 decoded) content of ANSIBLE_INVENTORY_JSON" + echo "${ANSIBLE_INVENTORY_JSON}" | base64 -d > /autobase/inventory.json + else + echo "Creating inventory.json with the content of ANSIBLE_INVENTORY_JSON" + echo "${ANSIBLE_INVENTORY_JSON}" > /autobase/inventory.json + fi + # Set ANSIBLE_INVENTORY environment variable + export ANSIBLE_INVENTORY=/autobase/inventory.json + # Set ANSIBLE_SSH_ARGS environment variable + export ANSIBLE_SSH_ARGS="-o StrictHostKeyChecking=no" +fi + +# Check if SSH_PRIVATE_KEY_CONTENT is set and create the SSH private key file if it is +if [[ -n "${SSH_PRIVATE_KEY_CONTENT}" ]]; then + mkdir -p /root/.ssh + if is_base64 "${SSH_PRIVATE_KEY_CONTENT}"; then + echo "Creating SSH private key file with the (base64 decoded) content of SSH_PRIVATE_KEY_CONTENT" + echo "${SSH_PRIVATE_KEY_CONTENT}" | base64 -d > /root/.ssh/id_rsa + else + echo "Creating SSH private key file with the content of SSH_PRIVATE_KEY_CONTENT" + echo "${SSH_PRIVATE_KEY_CONTENT}" > /root/.ssh/id_rsa + fi + + chmod 600 /root/.ssh/id_rsa + + # Ensure the key file ends with a newline + sed -i -e '$a\' /root/.ssh/id_rsa + + echo "Checking SSH private key with ssh-keygen" + ssh-keygen -y -f /root/.ssh/id_rsa > /dev/null + + # Set ANSIBLE_PRIVATE_KEY_FILE environment variable + export ANSIBLE_PRIVATE_KEY_FILE=/root/.ssh/id_rsa +fi + +# Execute the passed command +exec "$@" diff --git a/etcd_cluster.yml b/automation/etcd_cluster.yml similarity index 56% rename from etcd_cluster.yml rename to automation/etcd_cluster.yml index 706614956..da97776d5 100644 --- a/etcd_cluster.yml +++ b/automation/etcd_cluster.yml @@ -1,64 +1,75 @@ --- -# yamllint disable rule:line-length - -- hosts: etcd_cluster +- name: etcd_cluster.yml | Deploy etcd Cluster + hosts: etcd_cluster become: true become_method: sudo any_errors_fatal: true gather_facts: true - vars_files: - - vars/main.yml - - vars/system.yml pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + + - name: Include system variables + ansible.builtin.include_vars: "roles/common/defaults/system.yml" + tags: always + - name: Include OS-specific variables - include_vars: "vars/{{ ansible_os_family }}.yml" + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tags: always - name: Update apt cache - apt: + ansible.builtin.apt: update_cache: true cache_valid_time: 3600 + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 environment: "{{ proxy_env | default({}) }}" when: ansible_os_family == "Debian" and installation_method == "repo" - name: Make sure the gnupg and apt-transport-https packages are present - apt: + ansible.builtin.apt: pkg: - gnupg - apt-transport-https state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 environment: "{{ proxy_env | default({}) }}" when: ansible_os_family == "Debian" and installation_method == "repo" - name: Build a firewall_ports_dynamic_var - set_fact: - firewall_ports_dynamic_var: "{{ firewall_ports_dynamic_var |default([]) }} + {{ firewall_allowed_tcp_ports_for[item] }}" + ansible.builtin.set_fact: + firewall_ports_dynamic_var: "{{ firewall_ports_dynamic_var | default([]) + (firewall_allowed_tcp_ports_for[item] | default([])) }}" loop: "{{ hostvars[inventory_hostname].group_names }}" - when: firewall_enabled_at_boot|bool + when: firewall_enabled_at_boot | bool tags: firewall - name: Build a firewall_rules_dynamic_var - set_fact: - firewall_rules_dynamic_var: "{{ firewall_rules_dynamic_var |default([]) }} + {{ firewall_additional_rules_for[item] }}" + ansible.builtin.set_fact: + firewall_rules_dynamic_var: "{{ firewall_rules_dynamic_var | default([]) + (firewall_additional_rules_for[item] | default([])) }}" loop: "{{ hostvars[inventory_hostname].group_names }}" - when: firewall_enabled_at_boot|bool + when: firewall_enabled_at_boot | bool tags: firewall roles: - - role: ansible-role-firewall + - role: firewall environment: "{{ proxy_env | default({}) }}" vars: - firewall_allowed_tcp_ports: "{{ firewall_ports_dynamic_var|list | unique }}" - firewall_additional_rules: "{{ firewall_rules_dynamic_var|list | unique }}" + firewall_allowed_tcp_ports: "{{ firewall_ports_dynamic_var | default([]) | unique }}" + firewall_additional_rules: "{{ firewall_rules_dynamic_var | default([]) | unique }}" when: firewall_enabled_at_boot|bool tags: firewall - role: hostname - role: resolv_conf - - role: sysctl + - role: etc_hosts - role: timezone - role: ntp - role: etcd - -... diff --git a/files/requirements.txt b/automation/files/requirements.txt similarity index 71% rename from files/requirements.txt rename to automation/files/requirements.txt index 4776a3ad0..a67121a52 100644 --- a/files/requirements.txt +++ b/automation/files/requirements.txt @@ -1,5 +1,5 @@ urllib3>=1.25.9 -boto +boto3 PyYAML requests>=2.25.1 six>=1.7 @@ -10,5 +10,6 @@ prettytable>=0.7 python-dateutil psutil>=2.0.0 ydiff>=1.2.0 -pexpect>=4.8.0 -ruamel.yaml>=0.16.10 +pexpect==4.9.0 +ruamel.yaml==0.17.40 +ruamel.yaml.clib==0.2.8 diff --git a/automation/galaxy.yml b/automation/galaxy.yml new file mode 100644 index 000000000..4354cbc23 --- /dev/null +++ b/automation/galaxy.yml @@ -0,0 +1,53 @@ +--- +namespace: vitabaks +name: autobase +version: 2.2.0 + +readme: README.md + +authors: + - Vitaliy Kukharik + +description: > + Autobase for PostgreSQL® is an open-source alternative to cloud-managed databases (DBaaS) such as Amazon RDS, + Google Cloud SQL, Azure Database, and more. This automated database platform enables you to create and manage + production-ready, highly available PostgreSQL clusters. It simplifies the deployment process, reduces operational + costs, and makes database management accessible—even for teams without specialized expertise. + + Automate deployment, failover, backups, restore, upgrades, scaling, and more with ease. + +license: + - MIT + +tags: + - high_availability + - cluster + - postgresql + - postgres + - database + - automation + - dbaas + - autobase + +dependencies: + amazon.aws: "==9.3.0" + google.cloud: "==1.5.1" + azure.azcollection: "==3.3.1" + community.digitalocean: "==1.27.0" + hetzner.hcloud: "==4.3.0" + community.postgresql: "==3.12.0" + community.docker: "==4.4.0" + community.general: "==10.4.0" + ansible.posix: "==1.6.2" + ansible.utils: "==5.1.2" + +repository: https://github.com/vitabaks/autobase +documentation: https://autobase.tech/docs +homepage: https://autobase.tech +issues: https://github.com/vitabaks/autobase/issues + +build_ignore: + - molecule + - .dockerignore + - Dockerfile + - entrypoint.sh diff --git a/automation/inventory.example b/automation/inventory.example new file mode 100644 index 000000000..fe22c4d3f --- /dev/null +++ b/automation/inventory.example @@ -0,0 +1,64 @@ +# Please specify the ip addresses and connection settings for your environment +# The specified ip addresses will be used to listen by the cluster components. +# Attention! Specify private IP addresses so that the cluster does not listen a public IP addresses. +# For deploying via public IPs, add 'ansible_host=public_ip_address' variable for each node. +# +# "postgresql_exists=true" if PostgreSQL is already exists and running +# "hostname=" variable is optional (used to change the server name) +# "new_node=true" to add a new server to an existing cluster using the add_pgnode.yml playbook +# balancer_tags="key=value" the Balancer tags for the /replica, /sync, /async endpoints. Must match 'patroni_tags'. +# patroni_tags="key=value" the Patroni tags in "key=value" format separated by commas. +# patroni_replicatefrom="" the Patroni node to replicate from (cascading replication). + +# if dcs_exists: false and dcs_type: "etcd" +[etcd_cluster] # recommendation: 3, or 5-7 nodes +#10.128.64.140 +#10.128.64.142 +#10.128.64.143 + +# if dcs_exists: false and dcs_type: "consul" +[consul_instances] # recommendation: 3 or 5-7 nodes +#10.128.64.140 consul_node_role=server consul_bootstrap_expect=true consul_datacenter=dc1 +#10.128.64.142 consul_node_role=server consul_bootstrap_expect=true consul_datacenter=dc1 +#10.128.64.143 consul_node_role=server consul_bootstrap_expect=true consul_datacenter=dc1 +#10.128.64.144 consul_node_role=client consul_datacenter=dc2 +#10.128.64.145 consul_node_role=client consul_datacenter=dc2 + +# if with_haproxy_load_balancing: true +[balancers] +#10.128.64.140 # balancer_tags="datacenter=dc1" +#10.128.64.142 # balancer_tags="datacenter=dc1" +#10.128.64.143 # balancer_tags="datacenter=dc1" +#10.128.64.144 # balancer_tags="datacenter=dc2" +#10.128.64.145 # balancer_tags="datacenter=dc2" new_node=true + +# PostgreSQL nodes +[master] +#10.128.64.140 hostname=pgnode01 postgresql_exists=false # patroni_tags="datacenter=dc1" + +[replica] +#10.128.64.142 hostname=pgnode02 postgresql_exists=false # patroni_tags="datacenter=dc1" +#10.128.64.143 hostname=pgnode03 postgresql_exists=false # patroni_tags="datacenter=dc1" +#10.128.64.144 hostname=pgnode04 postgresql_exists=false # patroni_tags="datacenter=dc2" patroni_replicatefrom="pgnode03" +#10.128.64.145 hostname=pgnode04 postgresql_exists=false # patroni_tags="datacenter=dc2" new_node=true + +[postgres_cluster:children] +master +replica + +# if pgbackrest_install: true and "repo_host" is set +[pgbackrest] # optional (Dedicated Repository Host) +#10.128.64.110 + +[pgbackrest:vars] +#ansible_user='postgres' +#ansible_ssh_pass='secretpassword' + +# Connection settings +[all:vars] +ansible_connection='ssh' +ansible_ssh_port='22' +#ansible_user='root' +#ansible_ssh_pass='secretpassword' # "sshpass" package is required for use "ansible_ssh_pass" +#ansible_ssh_private_key_file= +#ansible_python_interpreter='/usr/bin/python3' diff --git a/automation/meta/runtime.yml b/automation/meta/runtime.yml new file mode 100644 index 000000000..109f032ed --- /dev/null +++ b/automation/meta/runtime.yml @@ -0,0 +1,51 @@ +--- +# Collections must specify a minimum required ansible version to upload +# to galaxy +requires_ansible: ">=2.16.0" +# Content that Ansible needs to load from another location or that has +# been deprecated/removed +# plugin_routing: +# action: +# redirected_plugin_name: +# redirect: ns.col.new_location +# deprecated_plugin_name: +# deprecation: +# removal_version: "4.0.0" +# warning_text: | +# See the porting guide on how to update your playbook to +# use ns.col.another_plugin instead. +# removed_plugin_name: +# tombstone: +# removal_version: "2.0.0" +# warning_text: | +# See the porting guide on how to update your playbook to +# use ns.col.another_plugin instead. +# become: +# cache: +# callback: +# cliconf: +# connection: +# doc_fragments: +# filter: +# httpapi: +# inventory: +# lookup: +# module_utils: +# modules: +# netconf: +# shell: +# strategy: +# terminal: +# test: +# vars: + +# Python import statements that Ansible needs to load from another location +# import_redirection: +# ansible_collections.ns.col.plugins.module_utils.old_location: +# redirect: ansible_collections.ns.col.plugins.module_utils.new_location + +# Groups of actions/modules that take a common set of options +# action_groups: +# group_name: +# - module1 +# - module2 diff --git a/automation/molecule/default/cleanup.yml b/automation/molecule/default/cleanup.yml new file mode 100644 index 000000000..8dd64f684 --- /dev/null +++ b/automation/molecule/default/cleanup.yml @@ -0,0 +1,10 @@ +--- +- name: Molecule.default.cleanup + hosts: localhost + gather_facts: false + + tasks: + - name: Delete dcs_type.yml file + ansible.builtin.file: + path: "../../dcs_type.yml" + state: absent diff --git a/automation/molecule/default/converge.yml b/automation/molecule/default/converge.yml new file mode 100644 index 000000000..85f97c893 --- /dev/null +++ b/automation/molecule/default/converge.yml @@ -0,0 +1,125 @@ +--- +- name: Converge + hosts: all + gather_facts: true + + tasks: + - name: Set variables for PostgreSQL Cluster deployment test + ansible.builtin.set_fact: + ansible_become_method: su # Override become_method + firewall_enabled_at_boot: false + firewall_enable_ipv6: false # Added to prevent test failures in CI. + swap_file_create: false # Added to prevent test failures in CI. + sysctl_set: false # Added to prevent test failures in CI. + nameservers: ["8.8.8.8", "9.9.9.9"] + timezone: "Etc/UTC" + with_haproxy_load_balancing: "{{ [true, false] | random }}" + dcs_type: "{{ ['etcd', 'consul'] | random }}" # Set 'dcs_type' to either 'etcd' or 'consul' randomly + consul_node_role: server # if dcs_type: "consul" + consul_bootstrap_expect: true # if dcs_type: "consul" + postgresql_version: 17 # to test custom WAL dir + pgbouncer_processes: 2 # Test multiple pgbouncer processes (so_reuseport) + patroni_tags: "datacenter=dc1,key1=value1" + balancer_tags: "datacenter=dc1" + cacheable: true + delegate_to: localhost + run_once: true # noqa run-once + + - name: Write dcs_type.yml file (for Molecule verify) + ansible.builtin.copy: + content: | + --- + dcs_type: "{{ dcs_type }}" + dest: "../../dcs_type.yml" + delegate_to: localhost + run_once: true # noqa run-once + + - name: Set variables for vip-manager test + ansible.builtin.set_fact: + cluster_vip: "10.172.0.200" + vip_manager_dcs_type: "{{ [dcs_type, 'patroni'] | random }}" # Randomly choose between dcs_type value or 'patroni' + delegate_to: localhost + run_once: true # noqa run-once + when: not with_haproxy_load_balancing | bool + + # Consul package for OracleLinux missing in HashiCorp repository + # Only the installation of a binary file is supported + - name: "Set variables: 'consul_install_from_repo: false' and 'patroni_installation_method: pip'" + ansible.builtin.set_fact: + consul_install_from_repo: false # whether to install consul from repository as opposed to installing the binary directly + patroni_installation_method: "pip" # the "rpm" method is supported only for consul installed from the repository + when: + - dcs_type == "consul" + - ansible_distribution == "OracleLinux" + + - name: Set variables for custom PostgreSQL data and WAL directory test + ansible.builtin.set_fact: + postgresql_data_dir: "/pgdata/{{ postgresql_version }}/main" + postgresql_wal_dir: "/pgwal/{{ postgresql_version }}/pg_wal" + + - name: Set variables for Extensions test + ansible.builtin.set_fact: + enable_timescale: true + enable_pg_repack: true + enable_pg_cron: true + enable_pgaudit: true + enable_pgvector: true + enable_postgis: true + enable_pgrouting: true + enable_pg_stat_kcache: true + enable_pg_wait_sampling: true + enable_pg_partman: true + enable_citus: "{{ 'false' if ansible_distribution_release == 'noble' else 'true' }}" # no packages for Ubuntu 24.04 (TODO) + enable_paradedb: "{{ 'false' if ansible_distribution_release == 'bullseye' else 'true' }}" # pg_search and pg_analytics (no packages for debian 11) + enable_pgvectorscale: "{{ 'true' if ansible_distribution_release in ['bookworm', 'jammy', 'noble'] else 'false' }}" # only deb packages are available + pgvectorscale_version: "0.6.0" # New versions’ packages aren’t available immediately. + # create extension + postgresql_schemas: + - { schema: "paradedb", db: "postgres", owner: "postgres" } # pg_search must be installed in the paradedb schema. + postgresql_extensions: + - { ext: "vector", db: "postgres" } + - { ext: "vectorscale", db: "postgres" } + - { ext: "pg_search", db: "postgres", schema: "paradedb" } + - { ext: "pg_analytics", db: "postgres" } + # - { ext: "", db: "" } + + - name: Set variables for PostgreSQL Cluster update test + ansible.builtin.set_fact: + target: system # includes updates to Postgres, Patroni, and all system packages + + - name: Add repository GPG key + ansible.builtin.command: "rpm --import https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux-{{ ansible_distribution_major_version }}" + when: ansible_distribution == "AlmaLinux" + + - name: Update all system packages + ansible.builtin.include_role: + name: ../../roles/update + tasks_from: system + + - name: Install openssh-server package + become: true + ansible.builtin.package: + name: openssh-server + state: present + + - name: Start ssh service + become: true + ansible.builtin.systemd: + name: "{{ 'ssh' if ansible_os_family == 'Debian' else 'sshd' }}" + state: started + enabled: true + + - name: Delete "/run/nologin" file (if exists) + become: true + ansible.builtin.file: + path: /run/nologin + state: absent + +- name: Deploy PostgreSQL Cluster test + ansible.builtin.import_playbook: ../../deploy_pgcluster.yml + +- name: Config PostgreSQL Cluster test + ansible.builtin.import_playbook: ../../config_pgcluster.yml + +- name: Update PostgreSQL Cluster test + ansible.builtin.import_playbook: ../../update_pgcluster.yml diff --git a/automation/molecule/default/molecule.yml b/automation/molecule/default/molecule.yml new file mode 100644 index 000000000..dcdca9ce9 --- /dev/null +++ b/automation/molecule/default/molecule.yml @@ -0,0 +1,99 @@ +--- +platforms: + - name: 10.172.0.20 + hostname: pgnode01 + image: "${IMAGE_NAMESPACE:-geerlingguy}/docker-${IMAGE_DISTRO:-ubuntu2204}-ansible:${IMAGE_TAG:-latest}" + # docker_networks: # TODO github.com/ansible-community/molecule/pull/2696 + # - name: test_docker_network + # ipam_config: + # - subnet: 10.172.0.0/24 + # gateway: 10.172.0.1 + networks: + - name: test_docker_network + ipv4_address: 10.172.0.20 + exposed_ports: + - 22/tcp + - 2379/tcp # if dcs_type: "etcd" + - 2380/tcp # if dcs_type: "etcd" + - 8300/tcp # if dcs_type: "consul" + - 8301/tcp # if dcs_type: "consul" + - 8302/tcp # if dcs_type: "consul" + - 8500/tcp # if dcs_type: "consul" + - 8600/tcp # if dcs_type: "consul" + - 8008/tcp + - 5432/tcp + - 6432/tcp + command: "" + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true + pre_build_image: true + groups: + - etcd_cluster # if dcs_type: "etcd" + - consul_instances # if dcs_type: "consul" + - master + - postgres_cluster + - balancers + + - name: 10.172.0.21 + hostname: pgnode02 + image: "${IMAGE_NAMESPACE:-geerlingguy}/docker-${IMAGE_DISTRO:-ubuntu2204}-ansible:${IMAGE_TAG:-latest}" + networks: + - name: test_docker_network + ipv4_address: 10.172.0.21 + exposed_ports: + - 22/tcp + - 2379/tcp + - 2380/tcp + - 8300/tcp + - 8301/tcp + - 8302/tcp + - 8500/tcp + - 8600/tcp + - 8008/tcp + - 5432/tcp + - 6432/tcp + command: "" + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true + pre_build_image: true + groups: + - etcd_cluster + - consul_instances + - replica + - postgres_cluster + - balancers + + - name: 10.172.0.22 + hostname: pgnode03 + image: "${IMAGE_NAMESPACE:-geerlingguy}/docker-${IMAGE_DISTRO:-ubuntu2204}-ansible:${IMAGE_TAG:-latest}" + networks: + - name: test_docker_network + ipv4_address: 10.172.0.22 + exposed_ports: + - 22/tcp + - 2379/tcp + - 2380/tcp + - 8300/tcp + - 8301/tcp + - 8302/tcp + - 8500/tcp + - 8600/tcp + - 8008/tcp + - 5432/tcp + - 6432/tcp + command: "" + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true + pre_build_image: true + groups: + - etcd_cluster + - consul_instances + - replica + - postgres_cluster + - balancers diff --git a/molecule/default/prepare.yml b/automation/molecule/default/prepare.yml similarity index 61% rename from molecule/default/prepare.yml rename to automation/molecule/default/prepare.yml index b7ab340d6..0c9716d3d 100644 --- a/molecule/default/prepare.yml +++ b/automation/molecule/default/prepare.yml @@ -5,9 +5,11 @@ become: false tasks: - name: "Create docker network: test_docker_network" - docker_network: + community.docker.docker_network: name: test_docker_network driver: bridge + driver_options: + com.docker.network.driver.mtu: 1440 enable_ipv6: false internal: false ipam_config: @@ -18,4 +20,8 @@ labels: owner: molecule -... + - name: "Install netaddr dependency on controlling host" + ansible.builtin.pip: + name: netaddr + environment: + PIP_BREAK_SYSTEM_PACKAGES: "1" diff --git a/automation/molecule/default/verify.yml b/automation/molecule/default/verify.yml new file mode 100644 index 000000000..ee2f7125f --- /dev/null +++ b/automation/molecule/default/verify.yml @@ -0,0 +1,41 @@ +--- +# 🚀 This playbook is designed to verify the default Molecule configuration +# 🎯 The objective is to ensure that all tasks and roles are correctly set up and functioning as expected + +- name: Molecule.default.verify + hosts: all + gather_facts: true + + tasks: + # 📝 Including the main variables file for the Molecule default configuration + # This file contains all the necessary variables for the playbook + - name: Molecule.default.verify | Include Main Variables + ansible.builtin.include_vars: + file: ../../roles/common/defaults/main.yml + + # Including the dcs_type variable redefined in converge.yml + - name: Molecule.default.verify | Include dcs_type Variable + ansible.builtin.include_vars: + file: ../../dcs_type.yml + + # 🔄 Including OS-specific variables + # These variables are specific to the operating system on which the playbook is running + - name: Molecule.default.verify | Include OS-Specific Variables + ansible.builtin.include_vars: + file: "../../roles/common/defaults/{{ ansible_os_family }}.yml" + + # 🔄 Including all tests for the Molecule default configuration + # These tests ensure that all components of the configuration are functioning correctly + - name: Molecule.default.verify | Include All Tests + ansible.builtin.include_tasks: "{{ item }}" + with_fileglob: + - ../tests/variables/main.yml + - ../tests/etcd/*.yml + - ../tests/patroni/*.yml + - ../tests/postgres/*.yml + - ../tests/roles/confd/main.yml + - ../tests/roles/deploy_finish/main.yml + - ../tests/roles/haproxy/main.yml + - ../tests/roles/patroni/main.yml + - ../tests/roles/pre_checks/main.yml + - ../tests/roles/swap/main.yml diff --git a/automation/molecule/pg_upgrade/converge.yml b/automation/molecule/pg_upgrade/converge.yml new file mode 100644 index 000000000..55f2949dd --- /dev/null +++ b/automation/molecule/pg_upgrade/converge.yml @@ -0,0 +1,82 @@ +--- +- name: Converge + hosts: all + gather_facts: true + + tasks: + - name: Set variables for Patroni cluster deployment test + ansible.builtin.set_fact: + ansible_become_method: su # Override become_method + firewall_enabled_at_boot: false + firewall_enable_ipv6: false # Added to prevent test failures in CI. + swap_file_create: false # Added to prevent test failures in CI. + sysctl_set: false # Added to prevent test failures in CI. + nameservers: ["8.8.8.8", "9.9.9.9"] + timezone: "Etc/UTC" + with_haproxy_load_balancing: true + dcs_type: "{{ ['etcd', 'consul'] | random }}" # Set 'dcs_type' to either 'etcd' or 'consul' randomly + consul_node_role: server # if dcs_type: "consul" + consul_bootstrap_expect: true # if dcs_type: "consul" + postgresql_version: 16 # redefine the version to install for the upgrade test + pgbouncer_processes: 4 # Test multiple pgbouncer processes (so_reuseport) + cacheable: true + delegate_to: localhost + run_once: true # noqa run-once + + # Consul package for OracleLinux missing in HashiCorp repository + # Only the installation of a binary file is supported + - name: "Set variables: 'consul_install_from_repo: false' and 'patroni_installation_method: pip'" + ansible.builtin.set_fact: + consul_install_from_repo: false # whether to install consul from repository as opposed to installing the binary directly + patroni_installation_method: "pip" # the "rpm" method is supported only for consul installed from the repository + when: + - dcs_type == "consul" + - ansible_distribution == "OracleLinux" + + - name: Set variables for custom PostgreSQL data and WAL directory test + ansible.builtin.set_fact: + postgresql_data_dir: "/pgdata/{{ postgresql_version }}/main" + postgresql_wal_dir: "/pgwal/{{ postgresql_version }}/pg_wal" + + - name: Set variables for TimescaleDB cluster deployment test + ansible.builtin.set_fact: + enable_timescale: true + + - name: Set variables for PostgreSQL upgrade test + ansible.builtin.set_fact: + pg_old_version: 16 + pg_new_version: 17 + + - name: Add repository GPG key + ansible.builtin.command: "rpm --import https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux-{{ ansible_distribution_major_version }}" + when: ansible_distribution == "AlmaLinux" + + - name: Update all system packages + ansible.builtin.include_role: + name: ../../roles/update + tasks_from: system + + - name: Install openssh-server package + become: true + ansible.builtin.package: + name: openssh-server + state: present + + - name: Start ssh service + become: true + ansible.builtin.systemd: + name: "{{ 'ssh' if ansible_os_family == 'Debian' else 'sshd' }}" + state: started + enabled: true + + - name: Delete "/run/nologin" file (if exists) + become: true + ansible.builtin.file: + path: /run/nologin + state: absent + +- name: Deploy PostgreSQL Cluster + ansible.builtin.import_playbook: ../../deploy_pgcluster.yml + +- name: PostgreSQL upgrade test + ansible.builtin.import_playbook: ../../pg_upgrade.yml diff --git a/automation/molecule/pg_upgrade/molecule.yml b/automation/molecule/pg_upgrade/molecule.yml new file mode 100644 index 000000000..22b008efa --- /dev/null +++ b/automation/molecule/pg_upgrade/molecule.yml @@ -0,0 +1,94 @@ +--- +platforms: + - name: 10.172.2.20 + hostname: pgnode01 + image: "${IMAGE_NAMESPACE:-geerlingguy}/docker-${IMAGE_DISTRO:-ubuntu2204}-ansible:${IMAGE_TAG:-latest}" + networks: + - name: upgrade_test_docker_network + ipv4_address: 10.172.2.20 + exposed_ports: + - 22/tcp + - 2379/tcp # if dcs_type: "etcd" + - 2380/tcp # if dcs_type: "etcd" + - 8300/tcp # if dcs_type: "consul" + - 8301/tcp # if dcs_type: "consul" + - 8302/tcp # if dcs_type: "consul" + - 8500/tcp # if dcs_type: "consul" + - 8600/tcp # if dcs_type: "consul" + - 8008/tcp + - 5432/tcp + - 6432/tcp + command: "" + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true + pre_build_image: true + groups: + - etcd_cluster # if dcs_type: "etcd" + - consul_instances # if dcs_type: "consul" + - master + - postgres_cluster + - balancers + + - name: 10.172.2.21 + hostname: pgnode02 + image: "${IMAGE_NAMESPACE:-geerlingguy}/docker-${IMAGE_DISTRO:-ubuntu2204}-ansible:${IMAGE_TAG:-latest}" + networks: + - name: upgrade_test_docker_network + ipv4_address: 10.172.2.21 + exposed_ports: + - 22/tcp + - 2379/tcp + - 2380/tcp + - 8300/tcp + - 8301/tcp + - 8302/tcp + - 8500/tcp + - 8600/tcp + - 8008/tcp + - 5432/tcp + - 6432/tcp + command: "" + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true + pre_build_image: true + groups: + - etcd_cluster + - consul_instances + - replica + - postgres_cluster + - balancers + + - name: 10.172.2.22 + hostname: pgnode03 + image: "${IMAGE_NAMESPACE:-geerlingguy}/docker-${IMAGE_DISTRO:-ubuntu2204}-ansible:${IMAGE_TAG:-latest}" + networks: + - name: upgrade_test_docker_network + ipv4_address: 10.172.2.22 + exposed_ports: + - 22/tcp + - 2379/tcp + - 2380/tcp + - 8300/tcp + - 8301/tcp + - 8302/tcp + - 8500/tcp + - 8600/tcp + - 8008/tcp + - 5432/tcp + - 6432/tcp + command: "" + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true + pre_build_image: true + groups: + - etcd_cluster + - consul_instances + - replica + - postgres_cluster + - balancers diff --git a/automation/molecule/pg_upgrade/prepare.yml b/automation/molecule/pg_upgrade/prepare.yml new file mode 100644 index 000000000..0c7a02416 --- /dev/null +++ b/automation/molecule/pg_upgrade/prepare.yml @@ -0,0 +1,28 @@ +--- +- name: "Update docker network(s)" + hosts: localhost + gather_facts: false + become: false + tasks: + - name: "Create docker network: upgrade_test_docker_network" + community.docker.docker_network: + name: upgrade_test_docker_network + driver: bridge + driver_options: + com.docker.network.driver.mtu: 1440 + enable_ipv6: false + internal: false + ipam_config: + - subnet: 10.172.2.0/24 + gateway: 10.172.2.1 + force: true + state: present + labels: + owner: molecule + + - name: "Install netaddr dependency on controlling host" + ansible.builtin.pip: + name: netaddr + become: false + environment: + PIP_BREAK_SYSTEM_PACKAGES: "1" diff --git a/automation/molecule/postgrespro/converge.yml b/automation/molecule/postgrespro/converge.yml new file mode 100644 index 000000000..747618fb6 --- /dev/null +++ b/automation/molecule/postgrespro/converge.yml @@ -0,0 +1,26 @@ +--- +- name: Converge + hosts: all + gather_facts: true + + tasks: + - name: Set variables for molecule + ansible.builtin.set_fact: + ansible_become_method: su # Override become_method + firewall_enable_ipv6: false # Added to prevent test failures in CI. + swap_file_create: false # Added to prevent test failures in CI. + sysctl_set: false # Added to prevent test failures in CI. + nameservers: ["8.8.8.8", "9.9.9.9"] + with_haproxy_load_balancing: true + cacheable: true + + - name: Update all system packages + ansible.builtin.include_role: + name: ../../roles/update + tasks_from: system + + - name: Include postgrespro vars + ansible.builtin.include_vars: vars/postgrespro_vars.yml + +- name: Deploy PostgreSQL Cluster + ansible.builtin.import_playbook: ../../deploy_pgcluster.yml diff --git a/automation/molecule/postgrespro/molecule.yml b/automation/molecule/postgrespro/molecule.yml new file mode 100644 index 000000000..fe41b8dfc --- /dev/null +++ b/automation/molecule/postgrespro/molecule.yml @@ -0,0 +1,99 @@ +--- +platforms: + - name: 10.172.1.20 + hostname: pgnode01 + image: "${IMAGE_NAMESPACE:-geerlingguy}/docker-${IMAGE_DISTRO:-ubuntu2204}-ansible:${IMAGE_TAG:-latest}" + # docker_networks: # TODO github.com/ansible-community/molecule/pull/2696 + # - name: test_docker_network + # ipam_config: + # - subnet: 10.172.0.0/24 + # gateway: 10.172.0.1 + networks: + - name: test_docker_network + ipv4_address: 10.172.1.20 + exposed_ports: + - 22/tcp + - 2379/tcp # if dcs_type: "etcd" + - 2380/tcp # if dcs_type: "etcd" + - 8300/tcp # if dcs_type: "consul" + - 8301/tcp # if dcs_type: "consul" + - 8302/tcp # if dcs_type: "consul" + - 8500/tcp # if dcs_type: "consul" + - 8600/tcp # if dcs_type: "consul" + - 8008/tcp + - 5432/tcp + - 6432/tcp + command: "" + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true + pre_build_image: true + groups: + - etcd_cluster # if dcs_type: "etcd" + - consul_instances # if dcs_type: "consul" + - master + - postgres_cluster + - balancers + + - name: 10.172.1.21 + hostname: pgnode02 + image: "${IMAGE_NAMESPACE:-geerlingguy}/docker-${IMAGE_DISTRO:-ubuntu2204}-ansible:${IMAGE_TAG:-latest}" + networks: + - name: test_docker_network + ipv4_address: 10.172.1.21 + exposed_ports: + - 22/tcp + - 2379/tcp + - 2380/tcp + - 8300/tcp + - 8301/tcp + - 8302/tcp + - 8500/tcp + - 8600/tcp + - 8008/tcp + - 5432/tcp + - 6432/tcp + command: "" + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true + pre_build_image: true + groups: + - etcd_cluster + - consul_instances + - replica + - postgres_cluster + - balancers + + - name: 10.172.1.22 + hostname: pgnode03 + image: "${IMAGE_NAMESPACE:-geerlingguy}/docker-${IMAGE_DISTRO:-ubuntu2204}-ansible:${IMAGE_TAG:-latest}" + networks: + - name: test_docker_network + ipv4_address: 10.172.1.22 + exposed_ports: + - 22/tcp + - 2379/tcp + - 2380/tcp + - 8300/tcp + - 8301/tcp + - 8302/tcp + - 8500/tcp + - 8600/tcp + - 8008/tcp + - 5432/tcp + - 6432/tcp + command: "" + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true + pre_build_image: true + groups: + - etcd_cluster + - consul_instances + - replica + - postgres_cluster + - balancers diff --git a/molecule/postgrespro/prepare.yml b/automation/molecule/postgrespro/prepare.yml similarity index 80% rename from molecule/postgrespro/prepare.yml rename to automation/molecule/postgrespro/prepare.yml index 32f70f378..ff475bd2d 100644 --- a/molecule/postgrespro/prepare.yml +++ b/automation/molecule/postgrespro/prepare.yml @@ -5,9 +5,11 @@ become: false tasks: - name: "Create docker network: test_docker_network" - docker_network: + community.docker.docker_network: name: test_docker_network driver: bridge + driver_options: + com.docker.network.driver.mtu: 1440 enable_ipv6: false internal: false ipam_config: @@ -17,5 +19,3 @@ state: present labels: owner: molecule - -... diff --git a/molecule/postgrespro/postgrespro_vars.yml b/automation/molecule/postgrespro/vars/postgrespro_vars.yml similarity index 84% rename from molecule/postgrespro/postgrespro_vars.yml rename to automation/molecule/postgrespro/vars/postgrespro_vars.yml index c47618c8e..f8e5b7a90 100644 --- a/molecule/postgrespro/postgrespro_vars.yml +++ b/automation/molecule/postgrespro/vars/postgrespro_vars.yml @@ -1,8 +1,6 @@ --- -# yamllint disable rule:line-length - # PostgreSQL variables -postgresql_version: "12" # major version +postgresql_version: "12" # major version postgrespro_minor_version: "{{ postgresql_version }}.4.1" postgresql_data_dir: "/var/lib/pgpro/std-{{ postgresql_version }}/data" @@ -11,7 +9,7 @@ postgresql_bin_dir: "/opt/pgpro/std-{{ postgresql_version }}/bin" postgresql_unix_socket_dir: "/tmp" # YUM Repository -_baseurl: +_baseurl: # yamllint disable rule:line-length CentOS: "https://repo.postgrespro.ru//pgpro-archive/pgpro-%7B%7B%20postgrespro_minor_version%20%7D%7D/centos/%7B%7B%20ansible_distribution_major_version%20%7D%7D/os/x86_64/rpms/" RedHat: "https://repo.postgrespro.ru//pgpro-archive/pgpro-%7B%7B%20postgrespro_minor_version%20%7D%7D/rhel/%7B%7B%20ansible_distribution_major_version%20%7D%7DServer/os/x86_64/rpms/" OracleLinux: "https://repo.postgrespro.ru//pgpro-archive/pgpro-%7B%7B%20postgrespro_minor_version%20%7D%7D/oraclelinux/%7B%7B%20ansible_distribution_major_version%20%7D%7DServer/os/x86_64/rpms/" @@ -26,7 +24,10 @@ yum_repository: apt_repository_keys: - key: "https://repo.postgrespro.ru/keys/GPG-KEY-POSTGRESPRO" apt_repository: - - repo: "deb https://repo.postgrespro.ru//pgpro-archive/pgpro-{{ postgrespro_minor_version }}/{{ ansible_distribution |lower }}/ {{ ansible_distribution_release }} main" + - repo: >- + {{ 'deb https://repo.postgrespro.ru//pgpro-archive/pgpro-' ~ + postgrespro_minor_version ~ '/' ~ (ansible_distribution | lower) ~ '/' ~ + ' ' ~ ansible_distribution_release ~ ' main' }} install_postgresql_repo: false @@ -41,5 +42,3 @@ postgresql_packages: - postgrespro-std-{{ postgresql_version }}-client - postgrespro-std-{{ postgresql_version }}-contrib - "{{ os_specific_pgpro_packages[ansible_os_family] }}" - -... diff --git a/automation/molecule/tests/etcd/etcd.yml b/automation/molecule/tests/etcd/etcd.yml new file mode 100644 index 000000000..356243dc8 --- /dev/null +++ b/automation/molecule/tests/etcd/etcd.yml @@ -0,0 +1,17 @@ +--- +- name: Check etcd health + ansible.builtin.uri: + url: "{{ patroni_etcd_protocol | default('http', true) }}://{{ inventory_hostname }}:2379/health" + method: GET + return_content: true + validate_certs: "{{ tls_cert_generate | default(false) | bool }}" + ca_path: "{{ ca_cert_path if tls_cert_generate | default(false) | bool else omit }}" + client_cert: "{{ cert_path if tls_cert_generate | default(false) | bool else omit }}" + client_key: "{{ key_path if tls_cert_generate | default(false) | bool else omit }}" + register: etcd_health_status + failed_when: "(etcd_health_status.content | from_json).health != 'true'" + vars: + ca_cert_path: "{{ etcd_tls_dir | default('/etc/etcd/tls') }}/{{ etcd_tls_ca_crt | default('ca.crt') }}" + cert_path: "{{ etcd_tls_dir | default('/etc/etcd/tls') }}/{{ etcd_tls_server_crt | default('server.crt') }}" + key_path: "{{ etcd_tls_dir | default('/etc/etcd/tls') }}/{{ etcd_tls_server_key | default('server.key') }}" + when: dcs_type == "etcd" diff --git a/automation/molecule/tests/patroni/patroni.yml b/automation/molecule/tests/patroni/patroni.yml new file mode 100644 index 000000000..0af690219 --- /dev/null +++ b/automation/molecule/tests/patroni/patroni.yml @@ -0,0 +1,7 @@ +--- +- name: Check Patroni status + ansible.builtin.uri: + url: "http://{{ inventory_hostname }}:8008/patroni" + return_content: true + register: patroni_status + failed_when: "'running' not in patroni_status.content" diff --git a/automation/molecule/tests/postgres/postgres.yml b/automation/molecule/tests/postgres/postgres.yml new file mode 100644 index 000000000..d728d9e85 --- /dev/null +++ b/automation/molecule/tests/postgres/postgres.yml @@ -0,0 +1,19 @@ +--- +- name: Check if PostgreSQL process is running + command: pgrep -u postgres + register: result + failed_when: result.rc != 0 + +- name: Check if PostgreSQL is listening on the default port + ansible.builtin.wait_for: + port: 5432 + timeout: 5 + register: is_listening + failed_when: not is_listening + +- name: Try to connect to PostgreSQL + postgresql_ping: + login_unix_socket: "{{ postgresql_unix_socket_dir }}" + login_port: "{{ postgresql_port }}" + login_user: "{{ patroni_superuser_username }}" + login_db: template1 diff --git a/automation/molecule/tests/postgres/replication.yml b/automation/molecule/tests/postgres/replication.yml new file mode 100644 index 000000000..2e3bb09f9 --- /dev/null +++ b/automation/molecule/tests/postgres/replication.yml @@ -0,0 +1,10 @@ +--- +- name: Check PostgreSQL replication status + postgresql_query: + query: "SELECT pg_is_in_recovery(), count(*) FROM pg_stat_wal_receiver;" + login_unix_socket: "{{ postgresql_unix_socket_dir }}" + login_port: "{{ postgresql_port }}" + login_user: "{{ patroni_superuser_username }}" + login_db: template1 + register: pg_replication_status + failed_when: "pg_replication_status.query_result[0].pg_is_in_recovery and pg_replication_status.query_result[0].count == 0" diff --git a/automation/molecule/tests/roles/confd/main.yml b/automation/molecule/tests/roles/confd/main.yml new file mode 100644 index 000000000..5a3b3b6ed --- /dev/null +++ b/automation/molecule/tests/roles/confd/main.yml @@ -0,0 +1,13 @@ +--- +# 🚀 This task is designed to include variable tests for the main role in the Confd molecule tests +# 🎯 The goal is to ensure that all variable tests are executed in a systematic and organised manner + +# 🔄 Including variable tests for the main role in the Confd molecule tests +# We use a loop to include all YAML files in the 'variables' directory +# Each file is included as a task, ensuring that all variable tests are executed +- name: Molecule.tests.roles.confd.main | Include Variable Tests + run_once: true + ansible.builtin.include_tasks: "{{ molecule_tests_roles_confd_main_file }}" + loop: "{{ lookup('fileglob', 'variables/*.yml', wantlist=True) }}" + loop_control: + loop_var: molecule_tests_roles_confd_main_file diff --git a/automation/molecule/tests/roles/confd/variables/haproxy.tmpl.yml b/automation/molecule/tests/roles/confd/variables/haproxy.tmpl.yml new file mode 100644 index 000000000..d2731670e --- /dev/null +++ b/automation/molecule/tests/roles/confd/variables/haproxy.tmpl.yml @@ -0,0 +1,162 @@ +--- +# 🚀 These tasks aim to test the "ansible.builtin.lineinfile" task +# 🎯 The objective is to ensure that the lines are correctly replaced + +# 📂 Ensure tmp directory exists +- name: Molecule.tests.roles.confd.variables.haproxy.tmpl | Ensure tmp directory exists + run_once: true + delegate_to: localhost + ansible.builtin.file: + path: "./tmp" + state: directory + +# 🔄 Define a dummy template file +- name: Molecule.tests.roles.confd.variables.haproxy.tmpl | Set template file test data + ansible.builtin.set_fact: + haproxy_listen_port: + master: 5000 + replicas: 5001 + replicas_sync: 5002 + replicas_async: 5003 + stats: 7000 + inventory_hostname: una.name + cluster_vip: fake.vip.url.com + +# =============================== +# 💻 Case cluster_vip is defined +# =============================== + +# 📝 Establishing test data for haproxy.cluster_vip.defined.tmpl +- name: Molecule.tests.roles.confd.variables.haproxy.tmpl | Establish haproxy.tmpl Test Data + run_once: true + delegate_to: localhost + ansible.builtin.copy: + dest: "./tmp/haproxy.cluster_vip.defined.tmpl" + content: | + bind *:{{ haproxy_listen_port.stats }} + bind *:{{ haproxy_listen_port.master }} + bind *:{{ haproxy_listen_port.replicas }} + bind *:{{ haproxy_listen_port.replicas_sync }} + bind *:{{ haproxy_listen_port.replicas_async }} + +# 🚀 Execute the main task here +# This task updates the 'haproxy.tmpl' file +# replacing lines that start with 'bind' and include specific ports +# The new lines will either bind to the inventory_hostname or the cluster_vip, depending on the specific port. +- name: Molecule.tests.roles.confd.variables.haproxy.tmpl | Update haproxy.tmpl (replace "bind") + run_once: true + delegate_to: localhost + ansible.builtin.lineinfile: + path: ./tmp/haproxy.cluster_vip.defined.tmpl + regexp: "{{ bind_config_with_vip_item.regexp }}" + line: "{{ bind_config_with_vip_item.line }}" + backrefs: true + loop: + - regexp: "^.*bind.*:{{ haproxy_listen_port.stats }}$" + line: " bind {{ inventory_hostname }}:{{ haproxy_listen_port.stats }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.master }}$" + line: " bind {{ cluster_vip }}:{{ haproxy_listen_port.master }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas }}$" + line: " bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas_sync }}$" + line: " bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_sync }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas_async }}$" + line: " bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_async }}" + loop_control: + loop_var: bind_config_with_vip_item + label: "{{ bind_config_with_vip_item.line }}" + +# 🖨️ Debugging the established haproxy.tmpl +- name: Molecule.tests.roles.confd.variables.haproxy.tmpl | Debug haproxy.tmpl + run_once: true + delegate_to: localhost + ansible.builtin.command: + cmd: cat ./tmp/haproxy.cluster_vip.defined.tmpl + register: output +- name: Molecule.tests.roles.confd.variables.haproxy.tmpl | Debug haproxy.tmpl content + run_once: true + ansible.builtin.debug: + var: output.stdout_lines + +# ✅ Verifying the correctness of the established haproxy.tmpl +# If the lines are not replaced correctly, the test fails and an error message is displayed +- name: Molecule.tests.roles.confd.variables.haproxy.tmpl | Validate updated haproxy.tmpl + run_once: true + ansible.builtin.assert: + that: + - "output.stdout_lines[0] == ' bind una.name:7000'" + - "output.stdout_lines[1] == ' bind fake.vip.url.com:5000'" + - "output.stdout_lines[2] == ' bind fake.vip.url.com:5001'" + - "output.stdout_lines[3] == ' bind fake.vip.url.com:5002'" + - "output.stdout_lines[4] == ' bind fake.vip.url.com:5003'" + fail_msg: "Test failed: Lines are not replaced correctly in haproxy.tmpl." + success_msg: "Test passed: Lines are replaced correctly in haproxy.tmpl." + +# =================================== +# 💻 Case cluster_vip is not defined +# =================================== + +# 📝 Establishing test data for haproxy.cluster_vip.not.defined.tmpl +- name: Molecule.tests.roles.confd.variables.haproxy.tmpl | Establish haproxy.tmpl Test Data - 2nd round + run_once: true + delegate_to: localhost + ansible.builtin.copy: + dest: "./tmp/haproxy.cluster_vip.not.defined.tmpl" + content: | + bind *:{{ haproxy_listen_port.stats }} + bind *:{{ haproxy_listen_port.master }} + bind *:{{ haproxy_listen_port.replicas }} + bind *:{{ haproxy_listen_port.replicas_sync }} + bind *:{{ haproxy_listen_port.replicas_async }} + +# 🚀 Execute the new task here +# This task updates the 'haproxy.tmpl' file again, this time replacing lines that start with 'bind' and include specific ports +# The new lines will bind to the inventory_hostname. +- name: Molecule.tests.roles.confd.variables.haproxy.tmpl | Prepare haproxy.tmpl template file (replace "bind" - 2nd round) + run_once: true + delegate_to: localhost + ansible.builtin.lineinfile: + path: ./tmp/haproxy.cluster_vip.not.defined.tmpl + regexp: "{{ bind_config_without_vip_item.regexp }}" + line: "{{ bind_config_without_vip_item.line }}" + backrefs: true + loop: + - regexp: "^.*bind.*:{{ haproxy_listen_port.stats }}$" + line: " bind {{ inventory_hostname }}:{{ haproxy_listen_port.stats }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.master }}$" + line: " bind {{ inventory_hostname }}:{{ haproxy_listen_port.master }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas }}$" + line: " bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas_sync }}$" + line: " bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_sync }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas_async }}$" + line: " bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_async }}" + loop_control: + loop_var: bind_config_without_vip_item + label: "{{ bind_config_without_vip_item.line }}" + +# 🖨️ Debugging the established haproxy.tmpl - 2nd round +- name: Molecule.tests.roles.confd.variables.haproxy.tmpl | Debug haproxy.tmpl - 2nd round + run_once: true + delegate_to: localhost + ansible.builtin.command: + cmd: cat ./tmp/haproxy.cluster_vip.not.defined.tmpl + register: output_2 +- name: Molecule.tests.roles.confd.variables.haproxy.tmpl | Debug haproxy.tmpl content - 2nd round + run_once: true + ansible.builtin.debug: + var: output_2.stdout_lines + +# ✅ Verifying the correctness of the established haproxy.tmpl - 2nd round +# If the lines are not replaced correctly, the test fails and an error message is displayed +- name: Molecule.tests.roles.confd.variables.haproxy.tmpl | Verify haproxy.tmpl - 2nd round + run_once: true + ansible.builtin.assert: + that: + - "output_2.stdout_lines[0] == ' bind una.name:7000'" + - "output_2.stdout_lines[1] == ' bind una.name:5000'" + - "output_2.stdout_lines[2] == ' bind una.name:5001'" + - "output_2.stdout_lines[3] == ' bind una.name:5002'" + - "output_2.stdout_lines[4] == ' bind una.name:5003'" + fail_msg: "Test failed: Lines are not replaced correctly in haproxy.tmpl - 2nd round." + success_msg: "Test passed: Lines are replaced correctly in haproxy.tmpl - 2nd round." diff --git a/automation/molecule/tests/roles/deploy-finish/main.yml b/automation/molecule/tests/roles/deploy-finish/main.yml new file mode 100644 index 000000000..1ed8f3020 --- /dev/null +++ b/automation/molecule/tests/roles/deploy-finish/main.yml @@ -0,0 +1,13 @@ +--- +# 🚀 This task is designed to include and execute a series of variable tests for the 'deploy_finish' role in our Molecule test suite. +# 🎯 The objective is to ensure that all variable tests are run in a systematic and efficient manner, thus ensuring the integrity of our deployment process. + +# 🔄 Including and executing variable tests for the 'deploy_finish' role +# We use a loop to iterate over all the YAML files in the 'variables' directory, and for each file, we include its tasks in the current playbook. +# This allows us to run a comprehensive set of variable tests in an automated and efficient manner. +- name: Molecule.tests.roles.deploy_finish.main | Include and Execute Variable Tests + run_once: true + ansible.builtin.include_tasks: "{{ molecule_tests_roles_deploy_finish_main_file }}" + loop: "{{ lookup('fileglob', 'variables/*.yml', wantlist=True) }}" + loop_control: + loop_var: molecule_tests_roles_deploy_finish_main_file diff --git a/automation/molecule/tests/roles/deploy-finish/variables/haproxy_nodes.yml b/automation/molecule/tests/roles/deploy-finish/variables/haproxy_nodes.yml new file mode 100644 index 000000000..fff77e631 --- /dev/null +++ b/automation/molecule/tests/roles/deploy-finish/variables/haproxy_nodes.yml @@ -0,0 +1,71 @@ +--- +# 🚀 These tasks aim to test the haproxy_nodes variable +# 🎯 The objective is to guarantee that the list of nodes is correctly set + +# ============================================ +# 💻 Start haproxy_nodes Operations and Tests +# ============================================ + +# 📝 Establishing test data for haproxy_nodes +- name: Molecule.tests.roles.deploy_finish.variables.haproxy_nodes | Establish haproxy_nodes Test Data + run_once: true + ansible.builtin.set_fact: + haproxy_nodes: >- + {{ + groups['balancers'] + | default([]) + | map('extract', hostvars, 'inventory_hostname') + | join(',') + }} + +# 🖨️ Debugging the established haproxy_nodes +- name: Molecule.tests.roles.deploy_finish.variables.haproxy_nodes | Debug haproxy_nodes + run_once: true + ansible.builtin.debug: + var: haproxy_nodes + +# ✅ Verifying the correctness of the established haproxy_nodes +# If the haproxy_nodes is not set, the test fails and an error message is displayed +- name: Molecule.tests.roles.deploy_finish.variables.haproxy_nodes | Verify haproxy_nodes + run_once: true + ansible.builtin.assert: + that: + - "haproxy_nodes is not none" + - "haproxy_nodes != 'N/A'" + - "haproxy_nodes == 'una.name,10.172.0.21,10.172.0.22'" + fail_msg: "Test failed: haproxy_nodes is not set correctly." + success_msg: "Test passed: haproxy_nodes is set correctly." + +# ===================================================== +# 💻 Start postgres_cluster_nodes Operations and Tests +# ===================================================== + +# 📝 Establishing test data for postgres_cluster_nodes +- name: Molecule.tests.roles.deploy_finish.variables.haproxy_nodes | Establish postgres_cluster_nodes Test Data + run_once: true + ansible.builtin.set_fact: + postgres_cluster_nodes: >- + {{ + groups['postgres_cluster'] + | default([]) + | map('extract', hostvars, 'inventory_hostname') + | join(',') + }} + +# 🖨️ Debugging the established postgres_cluster_nodes +- name: Molecule.tests.roles.deploy_finish.variables.haproxy_nodes | Debug postgres_cluster_nodes + run_once: true + ansible.builtin.debug: + var: postgres_cluster_nodes + +# ✅ Verifying the correctness of the established postgres_cluster_nodes +# If the postgres_cluster_nodes is not set, the test fails and an error message is displayed +- name: Molecule.tests.roles.deploy_finish.variables.haproxy_nodes | Verify postgres_cluster_nodes + run_once: true + ansible.builtin.assert: + that: + - "postgres_cluster_nodes is not none" + - "postgres_cluster_nodes != 'N/A'" + - "postgres_cluster_nodes == 'una.name,10.172.0.21,10.172.0.22'" + fail_msg: "Test failed: postgres_cluster_nodes is not set correctly." + success_msg: "Test passed: postgres_cluster_nodes is set correctly." diff --git a/automation/molecule/tests/roles/haproxy/main.yml b/automation/molecule/tests/roles/haproxy/main.yml new file mode 100644 index 000000000..0eb59b4d0 --- /dev/null +++ b/automation/molecule/tests/roles/haproxy/main.yml @@ -0,0 +1,13 @@ +--- +# 🚀 The purpose of this task is to incorporate a series of variable tests for HAProxy, a reliable, high performance TCP/HTTP load balancer +# 🎯 The objective is to ensure that the variables used in the HAProxy configuration are correctly defined and functional + +# 🔄 Including variable tests for HAProxy +# This task iterates over all the YAML files in the 'variables' directory, and includes each file's tasks in the current playbook +# If a variable test fails, it will be immediately apparent, aiding in debugging and ensuring the robustness of the HAProxy configuration +- name: Molecule.tests.roles.haproxy.main | Incorporate Variable Tests + run_once: true + ansible.builtin.include_tasks: "{{ molecule_tests_roles_haproxy_main_file }}" + loop: "{{ lookup('fileglob', 'variables/*.yml', wantlist=True) }}" + loop_control: + loop_var: molecule_tests_roles_haproxy_main_file diff --git a/automation/molecule/tests/roles/haproxy/variables/haproxy.cfg.yml b/automation/molecule/tests/roles/haproxy/variables/haproxy.cfg.yml new file mode 100644 index 000000000..d6f676eb0 --- /dev/null +++ b/automation/molecule/tests/roles/haproxy/variables/haproxy.cfg.yml @@ -0,0 +1,163 @@ +--- +# 🚀 These tasks aim to test the "ansible.builtin.lineinfile" task +# 🎯 The objective is to ensure that the lines are correctly replaced + +# 📂 Ensure tmp directory exists +- name: Molecule.tests.roles.confd.variables.haproxy.cfg | Ensure tmp directory exists + run_once: true + delegate_to: localhost + ansible.builtin.file: + path: "./tmp" + state: directory + +# 🔄 Define a dummy template file +- name: Molecule.tests.roles.confd.variables.haproxy.cfg | Set template file test data + run_once: true + ansible.builtin.set_fact: + haproxy_listen_port: + master: 5000 + replicas: 5001 + replicas_sync: 5002 + replicas_async: 5003 + stats: 7000 + inventory_hostname: una.name + cluster_vip: fake.vip.url.com + +# =============================== +# 💻 Case cluster_vip is defined +# =============================== + +# 📝 Establishing test data for haproxy.cluster_vip.defined.cfg +- name: Molecule.tests.roles.confd.variables.haproxy.cfg | Establish haproxy.cfg Test Data + run_once: true + delegate_to: localhost + ansible.builtin.copy: + dest: "./tmp/haproxy.cluster_vip.defined.cfg" + content: | + bind *:{{ haproxy_listen_port.stats }} + bind *:{{ haproxy_listen_port.master }} + bind *:{{ haproxy_listen_port.replicas }} + bind *:{{ haproxy_listen_port.replicas_sync }} + bind *:{{ haproxy_listen_port.replicas_async }} + +# 🚀 Execute the main task here +# This task updates the 'haproxy.cfg' file +# replacing lines that start with 'bind' and include specific ports +# The new lines will either bind to the inventory_hostname or the cluster_vip, depending on the specific port. +- name: Molecule.tests.roles.confd.variables.haproxy.cfg | Update haproxy.cfg (replace "bind") + run_once: true + delegate_to: localhost + ansible.builtin.lineinfile: + path: ./tmp/haproxy.cluster_vip.defined.cfg + regexp: "{{ bind_config_with_vip_item.regexp }}" + line: "{{ bind_config_with_vip_item.line }}" + backrefs: true + loop: + - regexp: "^.*bind.*:{{ haproxy_listen_port.stats }}$" + line: " bind {{ inventory_hostname }}:{{ haproxy_listen_port.stats }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.master }}$" + line: " bind {{ cluster_vip }}:{{ haproxy_listen_port.master }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas }}$" + line: " bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas_sync }}$" + line: " bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_sync }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas_async }}$" + line: " bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_async }}" + loop_control: + loop_var: bind_config_with_vip_item + label: "{{ bind_config_with_vip_item.line }}" + +# 🖨️ Debugging the established haproxy.cfg +- name: Molecule.tests.roles.confd.variables.haproxy.cfg | Debug haproxy.cfg + run_once: true + delegate_to: localhost + ansible.builtin.command: + cmd: cat ./tmp/haproxy.cluster_vip.defined.cfg + register: output +- name: Molecule.tests.roles.confd.variables.haproxy.cfg | Debug haproxy.cfg content + run_once: true + ansible.builtin.debug: + var: output.stdout_lines + +# ✅ Verifying the correctness of the established haproxy.cfg +# If the lines are not replaced correctly, the test fails and an error message is displayed +- name: Molecule.tests.roles.confd.variables.haproxy.cfg | Validate updated haproxy.cfg + run_once: true + ansible.builtin.assert: + that: + - "output.stdout_lines[0] == ' bind una.name:7000'" + - "output.stdout_lines[1] == ' bind fake.vip.url.com:5000'" + - "output.stdout_lines[2] == ' bind fake.vip.url.com:5001'" + - "output.stdout_lines[3] == ' bind fake.vip.url.com:5002'" + - "output.stdout_lines[4] == ' bind fake.vip.url.com:5003'" + fail_msg: "Test failed: Lines are not replaced correctly in haproxy.cfg." + success_msg: "Test passed: Lines are replaced correctly in haproxy.cfg." + +# =================================== +# 💻 Case cluster_vip is not defined +# =================================== + +# 📝 Establishing test data for haproxy.cluster_vip.not.defined.cfg +- name: Molecule.tests.roles.confd.variables.haproxy.cfg | Establish haproxy.cfg Test Data - 2nd round + run_once: true + delegate_to: localhost + ansible.builtin.copy: + dest: "./tmp/haproxy.cluster_vip.not.defined.cfg" + content: | + bind *:{{ haproxy_listen_port.stats }} + bind *:{{ haproxy_listen_port.master }} + bind *:{{ haproxy_listen_port.replicas }} + bind *:{{ haproxy_listen_port.replicas_sync }} + bind *:{{ haproxy_listen_port.replicas_async }} + +# 🚀 Execute the new task here +# This task updates the 'haproxy.cfg' file again, this time replacing lines that start with 'bind' and include specific ports +# The new lines will bind to the inventory_hostname. +- name: Molecule.tests.roles.confd.variables.haproxy.cfg | Prepare haproxy.cfg template file (replace "bind" - 2nd round) + run_once: true + delegate_to: localhost + ansible.builtin.lineinfile: + path: ./tmp/haproxy.cluster_vip.not.defined.cfg + regexp: "{{ bind_config_without_vip_item.regexp }}" + line: "{{ bind_config_without_vip_item.line }}" + backrefs: true + loop: + - regexp: "^.*bind.*:{{ haproxy_listen_port.stats }}$" + line: " bind {{ inventory_hostname }}:{{ haproxy_listen_port.stats }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.master }}$" + line: " bind {{ inventory_hostname }}:{{ haproxy_listen_port.master }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas }}$" + line: " bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas_sync }}$" + line: " bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_sync }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas_async }}$" + line: " bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_async }}" + loop_control: + loop_var: bind_config_without_vip_item + label: "{{ bind_config_without_vip_item.line }}" + +# 🖨️ Debugging the established haproxy.cfg - 2nd round +- name: Molecule.tests.roles.confd.variables.haproxy.cfg | Debug haproxy.cfg - 2nd round + run_once: true + delegate_to: localhost + ansible.builtin.command: + cmd: cat ./tmp/haproxy.cluster_vip.not.defined.cfg + register: output_2 +- name: Molecule.tests.roles.confd.variables.haproxy.cfg | Debug haproxy.cfg content - 2nd round + run_once: true + ansible.builtin.debug: + var: output_2.stdout_lines + +# ✅ Verifying the correctness of the established haproxy.cfg - 2nd round +# If the lines are not replaced correctly, the test fails and an error message is displayed +- name: Molecule.tests.roles.confd.variables.haproxy.cfg | Verify haproxy.cfg - 2nd round + run_once: true + ansible.builtin.assert: + that: + - "output_2.stdout_lines[0] == ' bind una.name:7000'" + - "output_2.stdout_lines[1] == ' bind una.name:5000'" + - "output_2.stdout_lines[2] == ' bind una.name:5001'" + - "output_2.stdout_lines[3] == ' bind una.name:5002'" + - "output_2.stdout_lines[4] == ' bind una.name:5003'" + fail_msg: "Test failed: Lines are not replaced correctly in haproxy.cfg - 2nd round." + success_msg: "Test passed: Lines are replaced correctly in haproxy.cfg - 2nd round." diff --git a/automation/molecule/tests/roles/patroni/main.yml b/automation/molecule/tests/roles/patroni/main.yml new file mode 100644 index 000000000..3b5cca5e6 --- /dev/null +++ b/automation/molecule/tests/roles/patroni/main.yml @@ -0,0 +1,13 @@ +--- +# 🚀 The objective of this task is to include and execute variable tests for the main Patroni role +# 🎯 This ensures that all variable tests are run, providing comprehensive coverage and validation of the role's variables + +# 🔄 Including and executing variable tests for the main Patroni role +# We use a loop to iterate over all .yml files in the 'variables' directory +# Each file is included and its tasks are executed +- name: Molecule.tests.roles.patroni.main | Include and Execute Variable Tests + run_once: true + ansible.builtin.include_tasks: "{{ molecule_tests_roles_patroni_main_file }}" + loop: "{{ lookup('fileglob', 'variables/*.yml', wantlist=True) }}" + loop_control: + loop_var: molecule_tests_roles_patroni_main_file diff --git a/automation/molecule/tests/roles/patroni/variables/custom_wal_dir.yml b/automation/molecule/tests/roles/patroni/variables/custom_wal_dir.yml new file mode 100644 index 000000000..24fae8e12 --- /dev/null +++ b/automation/molecule/tests/roles/patroni/variables/custom_wal_dir.yml @@ -0,0 +1,119 @@ +--- +# 🚀 These tasks aim to validate the "custom_wal_dir" task +# 🎯 The goal is to verify the correct display of renaming based on PostgreSQL version + +# 🔄 Set postgresql_version for the first test scenario +- name: Molecule.tests.roles.patroni.variables.custom_wal_dir | Define PG version as 11 for scenario 1 + run_once: true + ansible.builtin.set_fact: + postgresql_version: 11 + +# ====================================== +# 💻 Scenario: PostgreSQL version >= 10 +# ====================================== + +# 🔄 Determine base pg_wal_dir name +- name: Molecule.tests.roles.patroni.variables.custom_wal_dir | Set pg_wal_dir based on postgresql_version + run_once: true + ansible.builtin.set_fact: + pg_wal_dir: "{{ 'pg_wal' if postgresql_version | int >= 10 else 'pg_xlog' }}" + +# 🔄 Determine the name based on postgresql_version +- name: Molecule.tests.roles.patroni.variables.custom_wal_dir | Determine name for scenario 1 + run_once: true + ansible.builtin.set_fact: + name_postgresql_version_11: "Rename {{ pg_wal_dir }} to {{ pg_wal_dir }}_old" + +# 🔄 Determine the mv command based on postgresql_version +- name: Molecule.tests.roles.patroni.variables.custom_wal_dir | Determine mv command for 'pg_wal' or 'pg_xlog' for scenario 1 + run_once: true + ansible.builtin.set_fact: + mv_command_postgresql_version_11: "mv {{ postgresql_data_dir }}/{{ pg_wal_dir }} {{ postgresql_data_dir }}/{{ pg_wal_dir }}_old" + +# 🚀 Display the name +- name: Molecule.tests.roles.patroni.variables.custom_wal_dir | Display name for scenario 1 + run_once: true + ansible.builtin.debug: + var: name_postgresql_version_11 + +# 🚀 Display the command +- name: Molecule.tests.roles.patroni.variables.custom_wal_dir | Display command for scenario 1 + run_once: true + ansible.builtin.debug: + var: mv_command_postgresql_version_11 + +# ✅ Verify if the name has been determined correctly +- name: Molecule.tests.roles.patroni.variables.custom_wal_dir | Validate name for scenario 1 + run_once: true + ansible.builtin.assert: + that: + - name_postgresql_version_11 == 'Rename pg_wal to pg_wal_old' + fail_msg: "Test failed: The name has not been determined correctly." + success_msg: "Test passed: The name has been determined correctly." + +# ✅ Verify if the command has been determined correctly +- name: Molecule.tests.roles.patroni.variables.custom_wal_dir | Validate command for scenario 1 + run_once: true + ansible.builtin.assert: + that: + - mv_command_postgresql_version_11 == 'mv /var/lib/pgsql/11/data/pg_wal /var/lib/pgsql/11/data/pg_wal_old' + fail_msg: "Test failed: The command has not been determined correctly." + success_msg: "Test passed: The command has been determined correctly." + +# ===================================== +# 💻 Scenario: PostgreSQL version < 10 +# ===================================== + +# 🔄 Set postgresql_version for the second test scenario +- name: Molecule.tests.roles.patroni.variables.custom_wal_dir | Define PG version as 9.6 for scenario 2 + run_once: true + ansible.builtin.set_fact: + postgresql_version: 9.6 + +# 🔄 Determine base pg_wal_dir name +- name: Molecule.tests.roles.patroni.variables.custom_wal_dir | Set pg_wal_dir based on postgresql_version + run_once: true + ansible.builtin.set_fact: + pg_wal_dir: "{{ 'pg_wal' if postgresql_version | int >= 10 else 'pg_xlog' }}" + +# 🔄 Determine the name based on postgresql_version +- name: Molecule.tests.roles.patroni.variables.custom_wal_dir | Determine name for scenario 2 + run_once: true + ansible.builtin.set_fact: + name_postgresql_version_9: "Rename {{ pg_wal_dir }} to {{ pg_wal_dir }}_old" + +# 🔄 Determine the mv command based on postgresql_version +- name: Molecule.tests.roles.patroni.variables.custom_wal_dir | Determine mv command for scenario 2 + run_once: true + ansible.builtin.set_fact: + mv_command_postgresql_version_9: "mv {{ postgresql_data_dir }}/{{ pg_wal_dir }} {{ postgresql_data_dir }}/{{ pg_wal_dir }}_old" + +# 🚀 Display the name +- name: Molecule.tests.roles.patroni.variables.custom_wal_dir | Display name for scenario 2 + run_once: true + ansible.builtin.debug: + var: name_postgresql_version_9 + +# 🚀 Display the command +- name: Molecule.tests.roles.patroni.variables.custom_wal_dir | Display command for scenario 2 + run_once: true + ansible.builtin.debug: + var: mv_command_postgresql_version_9 + +# ✅ Verify if the name has been determined correctly +- name: Molecule.tests.roles.patroni.variables.custom_wal_dir | Validate name for scenario 2 + run_once: true + ansible.builtin.assert: + that: + - name_postgresql_version_9 == 'Rename pg_xlog to pg_xlog_old' + fail_msg: "Test failed: The name has not been determined correctly in scenario 2." + success_msg: "Test passed: The name has been determined correctly in scenario 2." + +# ✅ Verify if the command has been determined correctly +- name: Molecule.tests.roles.patroni.variables.custom_wal_dir | Validate command for scenario 2 + run_once: true + ansible.builtin.assert: + that: + - mv_command_postgresql_version_9 == 'mv /var/lib/pgsql/9.6/data/pg_xlog /var/lib/pgsql/9.6/data/pg_xlog_old' + fail_msg: "Test failed: The command has not been determined correctly." + success_msg: "Test passed: The command has been determined correctly." diff --git a/automation/molecule/tests/roles/pre-checks/main.yml b/automation/molecule/tests/roles/pre-checks/main.yml new file mode 100644 index 000000000..722ea3dfe --- /dev/null +++ b/automation/molecule/tests/roles/pre-checks/main.yml @@ -0,0 +1,13 @@ +--- +# 🚀 This task is designed to include variable tests in the main pre-checks for Molecule tests +# 🎯 The objective is to ensure that all variable tests are properly included and executed + +# 🔄 Including variable tests in the main pre-checks for Molecule tests +# For each YAML file in the 'variables' directory, we include its tasks in the main pre-checks +# If a file does not exist or cannot be read, the task will fail and an error message will be displayed +- name: Molecule.tests.roles.pre_checks.main | Include Variable Tests in Main Pre-checks + run_once: true + ansible.builtin.include_tasks: "{{ molecule_tests_roles_pre_checks_main_file }}" + loop: "{{ lookup('fileglob', 'variables/*.yml', wantlist=True) }}" + loop_control: + loop_var: molecule_tests_roles_pre_checks_main_file diff --git a/automation/molecule/tests/roles/pre-checks/variables/pgbouncer.yml b/automation/molecule/tests/roles/pre-checks/variables/pgbouncer.yml new file mode 100644 index 000000000..199b2eb93 --- /dev/null +++ b/automation/molecule/tests/roles/pre-checks/variables/pgbouncer.yml @@ -0,0 +1,153 @@ +--- +# 🚀 These tasks aim to compute the overall pool size for PgBouncer, a PostgreSQL connection pooler +# 🎯 The objective is to guarantee that each database's pool size is precisely defined and calculated + +# 📝 Establishing test data for PgBouncer pools. Two databases are specified, each with a pool size indicated in pool_parameters +- name: Molecule.tests.roles.pre_checks.variables.pgbouncer | Establish PgBouncer Pools Test Data + run_once: true + ansible.builtin.set_fact: + pgbouncer_pools: + - dbname: db1 + pool_parameters: "pool_size=10" + - dbname: db2 + pool_parameters: "pool_size=20" + +# ================================================== +# 💾 Start pgbouncer_pool_size operations and tests +# ================================================== + +# 🔄 Calculating the overall pool size (pgbouncer_pool_size) +# For each database in pgbouncer_pools, we extract the pool size from pool_parameters and add it to the total +# If pool size is undefined, we use the default pool size (pgbouncer_default_pool_size), or 0 if that's also undefined +- name: Molecule.tests.roles.pre_checks.variables.pgbouncer | Compute Overall Pool Size + run_once: true + ansible.builtin.set_fact: + pgbouncer_pool_size: "{{ + (pgbouncer_pool_size | default(0) | int) + + + (pool_item.pool_parameters + | regex_search('pool_size=(\\d+)', multiline=False) + | regex_replace('[^0-9]', '') + | default(pgbouncer_default_pool_size | default(0), true) + | int) + }}" + loop: "{{ pgbouncer_pools | default([]) }}" + loop_control: + loop_var: pool_item + +# 🖨️ Debugging the calculated overall pool size +- name: Molecule.tests.roles.pre_checks.variables.pgbouncer | Debug Overall Pool Size + run_once: true + ansible.builtin.debug: + var: pgbouncer_pool_size + +# ✅ Verifying the correctness of the calculated overall pool size +# The expected overall pool size is 30 (10 from db1 and 20 from db2) +# If the calculated overall pool size is not 30, the test fails and an error message is displayed +- name: Molecule.tests.roles.pre_checks.variables.pgbouncer | Verify Pool Size Calculation + run_once: true + ansible.builtin.assert: + that: + - pgbouncer_pool_size | int == 30 + fail_msg: "Test failed: pgbouncer_pool_size is not equal to 30." + success_msg: "Test passed: pgbouncer_pool_size is equal to 30." + +# ======================================================== +# 🚀 Start pgbouncer_total_pool_size operations and tests +# ======================================================== + +# 🖨️ Debugging test variables +- name: Molecule.tests.roles.pre_checks.variables.pgbouncer | Debug Test Variables + run_once: true + ansible.builtin.debug: + var: "{{ debug_item }}" + loop: + - postgresql_databases + - pgbouncer_pools + - pgbouncer_default_pool_size + loop_control: + loop_var: debug_item + +# 📝 Establishing test data for PostgreSQL databases +- name: Molecule.tests.roles.pre_checks.variables.pgbouncer | Establish PostgreSQL Databases Test Data + run_once: true + ansible.builtin.set_fact: + postgresql_databases: + - db: db1 + - db: db2 + - db: db3 + +# 🔄 Calculating the overall pool size (pgbouncer_total_pool_size) across all databases +- name: Molecule.tests.roles.pre_checks.variables.pgbouncer | Compute Total Pool Size + run_once: true + ansible.builtin.set_fact: + pgbouncer_total_pool_size: >- + {{ + (pgbouncer_pool_size | int) + + + (postgresql_databases + | default([]) + | rejectattr('db', 'in', pgbouncer_pools | map(attribute='dbname') | list) + | length + ) * (pgbouncer_default_pool_size | default(0) | int) + }} + when: pgbouncer_pool_size is defined + +# 🖨️ Debugging the calculated overall pool size +- name: Molecule.tests.roles.pre_checks.variables.pgbouncer | Debug Total Pool Size + run_once: true + ansible.builtin.debug: + var: pgbouncer_total_pool_size + +# ✅ Verifying the correctness of the calculated overall pool size +# The expected overall pool size is 130 +# 10 from db1 +# 20 from db2 +# 100 from db3 as db3 is not defined in pgbouncer_pools and hence, its pool size is pgbouncer_default_pool_size which is 100 +# If the calculated overall pool size is not 130, the test fails and an error message is displayed +- name: Molecule.tests.roles.pre_checks.variables.pgbouncer | Verify Total Pool Size Calculation + run_once: true + ansible.builtin.assert: + that: + - pgbouncer_total_pool_size | int == 130 + fail_msg: "Test failed: pgbouncer_total_pool_size is not equal to 130." + success_msg: "Test passed: pgbouncer_total_pool_size is equal to 130." + +# ====================================================================== +# 📊 Start pgbouncer_total_pool_size (postgresql_databases not defined) +# ====================================================================== + +# 📝 Overriding postgresql_databases to an empty list for the next test +- name: Molecule.tests.roles.pre_checks.variables.pgbouncer | Override postgresql_databases + run_once: true + ansible.builtin.set_fact: + postgresql_databases: [] + +# 🔄 Compute the aggregate pool size (pgbouncer_total_pool_size) across all databases with postgresql_databases not defined. +- name: Molecule.tests.roles.pre_checks.variables.pgbouncer | Compute Total Pool Size (postgresql_databases not defined) + run_once: true + ansible.builtin.set_fact: + pgbouncer_total_pool_size: >- + {{ + (pgbouncer_pool_size | int) + + + (postgresql_databases + | default([]) + | rejectattr('db', 'in', pgbouncer_pools | map(attribute='dbname') | list) + | length + ) * (pgbouncer_default_pool_size | default(0) | int) + }} + when: pgbouncer_pool_size is defined + +# ✅ Test if the computed aggregate pool size is correct with postgresql_databases not defined. +# In this case, we expect the aggregate pool size to be 30 +# 10 from db1 +# 20 from db2 +# 0 from db3 as postgresql_databases is not defined +- name: Molecule.tests.roles.pre_checks.variables.pgbouncer | Verify Total Pool Size Calculation (postgresql_databases not defined) + run_once: true + ansible.builtin.assert: + that: + - pgbouncer_total_pool_size | int == 30 + fail_msg: "Test failed: pgbouncer_total_pool_size is not equal to 30." + success_msg: "Test passed: pgbouncer_total_pool_size is equal to 30." diff --git a/automation/molecule/tests/roles/pre-checks/variables/timescaledb.yml b/automation/molecule/tests/roles/pre-checks/variables/timescaledb.yml new file mode 100644 index 000000000..e492c49cb --- /dev/null +++ b/automation/molecule/tests/roles/pre-checks/variables/timescaledb.yml @@ -0,0 +1,64 @@ +--- +# 📝 These tasks aim to ensure that 'timescaledb' is included in the 'shared_preload_libraries' of PostgreSQL parameters +# 🎯 The objective is to guarantee that TimescaleDB, a time-series database built on PostgreSQL, is properly loaded and available + +# 🔄 Ensuring 'timescaledb' is included in 'shared_preload_libraries' +# If 'timescaledb' is not already in 'shared_preload_libraries', it is added +- name: Molecule.tests.roles.pre_checks.variables.timescaledb | Ensure 'timescaledb' is in 'shared_preload_libraries' + run_once: true + ansible.builtin.set_fact: + postgresql_parameters: >- + {{ postgresql_parameters | rejectattr('option', 'equalto', 'shared_preload_libraries') | list + + [{'option': 'shared_preload_libraries', 'value': new_value}] }} + vars: + shared_preload_libraries_item: >- + {{ + postgresql_parameters + | selectattr('option', 'equalto', 'shared_preload_libraries') + | list | last | default({'value': ''}) + }} + new_value: >- + {{ + (shared_preload_libraries_item.value ~ (',' if shared_preload_libraries_item.value else '') + if 'timescaledb' not in shared_preload_libraries_item.value.split(',') else shared_preload_libraries_item.value) + ~ ('timescaledb' if 'timescaledb' not in shared_preload_libraries_item.value.split(',') else '') + }} + +# 📝 Setting 'shared_preload_libraries_item' as a fact for further use +- name: Molecule.tests.roles.pre_checks.variables.timescaledb | Set 'shared_preload_libraries_item' as a fact + run_once: true + ansible.builtin.set_fact: + shared_preload_libraries_item: >- + {{ + postgresql_parameters + | selectattr('option', 'equalto', 'shared_preload_libraries') + | list | last | default({'value': ''}) + }} + +# ✅ Verifying that 'timescaledb' is included in 'shared_preload_libraries' +# If 'timescaledb' is not included, the test fails and an error message is displayed +- name: Molecule.tests.roles.pre_checks.variables.timescaledb | Assert that 'timescaledb' is in 'shared_preload_libraries' + run_once: true + ansible.builtin.assert: + that: + - "'timescaledb' in shared_preload_libraries_item.value.split(',')" + fail_msg: "'timescaledb' is not in 'shared_preload_libraries'" + success_msg: "'timescaledb' is in 'shared_preload_libraries'" + +# 📝 Setting 'origin_shared_preload_libraries_item' as a fact for further use +- name: Molecule.tests.roles.pre_checks.variables.timescaledb | Set 'origin_shared_preload_libraries_item' as a fact + run_once: true + ansible.builtin.set_fact: # yamllint disable rule:line-length + origin_shared_preload_libraries_item: "{{ postgresql_parameters | selectattr('option', 'equalto', 'shared_preload_libraries') | list | last | default({'value': ''}) }}" + +# ✅ Verifying that the new 'shared_preload_libraries_item' equals 'origin_shared_preload_libraries_item' +# If they are not equal, the test fails and an error message is displayed +- name: Molecule.tests.roles.pre_checks.variables.timescaledb | Assert that new 'shared_preload_libraries_item' equals 'origin_shared_preload_libraries_item' + run_once: true + ansible.builtin.assert: + that: + - shared_preload_libraries_item == origin_shared_preload_libraries_item + fail_msg: > + Assertion failed: shared_preload_libraries_item is "{{ shared_preload_libraries_item }}", + but expected "{{ origin_shared_preload_libraries_item }}" + success_msg: "shared_preload_libraries_item is correct" diff --git a/automation/molecule/tests/roles/swap/conditions/create.yml b/automation/molecule/tests/roles/swap/conditions/create.yml new file mode 100644 index 000000000..0a7ba9863 --- /dev/null +++ b/automation/molecule/tests/roles/swap/conditions/create.yml @@ -0,0 +1,32 @@ +--- +# 🚀 These tasks aim to set up the conditions for creating a swap file +# 🎯 The objective is to ensure that a swap file is created only if it does not already exist or if its size is not as expected + +# 📝 Loading system variables that may be needed for the swap file creation process +- name: Molecule.tests.roles.swap.conditions.create | Load System Variables + run_once: true + ansible.builtin.include_vars: + file: ../../../../../roles/common/defaults/system.yml + +# 🔄 Setting up a test condition where no swap file exists +# We initialize swap_exists with an empty stdout and stdout_lines +- name: Molecule.tests.roles.swap.conditions.create | Establish Swap File Non-Existence Test Condition + run_once: true + ansible.builtin.set_fact: + swap_exists: + stdout: "" + stdout_lines: [] + +# ✅ Verifying the condition for creating a swap file +# A swap file should be created if either of the following is true: +# 1. No swap file exists (indicated by an empty stdout in swap_exists) +# 2. The total size of all swap files (calculated from stdout_lines in swap_exists) is not equal to the expected swap file size (swap_file_size_mb) +- name: Molecule.tests.roles.swap.conditions.create | Verify Swap File Creation Condition + run_once: true + ansible.builtin.assert: + that: > + (swap_exists.stdout is defined and swap_exists.stdout | length < 1) or + (swap_exists.stdout_lines is defined and + (swap_exists.stdout_lines | map('trim') | map('int') | sum / 1024 / 1024) | round | int != swap_file_size_mb|int) + fail_msg: "Test failed: Condition for creating swap file is false." + success_msg: "Test passed: Condition for creating swap file is true." diff --git a/automation/molecule/tests/roles/swap/conditions/delete.yml b/automation/molecule/tests/roles/swap/conditions/delete.yml new file mode 100644 index 000000000..362577fee --- /dev/null +++ b/automation/molecule/tests/roles/swap/conditions/delete.yml @@ -0,0 +1,30 @@ +--- +# 🚀 These tasks aim to test the conditions for deleting a swap file +# 🎯 The objective is to verify that the swap file exists and its size is not equal to the desired size + +# 📝 Loading system variables that might be used in the following tasks +- name: Molecule.tests.roles.swap.conditions.delete | Load System Variables + run_once: true + ansible.builtin.include_vars: + file: ../../../../../roles/common/defaults/system.yml + +# 📝 Setting up test data for swap file existence and size +# Here, we assume that a 1MB swap file exists +- name: Molecule.tests.roles.swap.conditions.delete | Set Swap File Test Data + run_once: true + ansible.builtin.set_fact: + swap_exists: + stdout: "1048576" # 1MB in kilobytes + stdout_lines: + - "1048576" + +# ✅ Verifying the condition for deleting the swap file +# The swap file should be deleted if it exists and its size is not equal to the desired size (swap_file_size_mb) +# If the condition is not met, the test fails and an error message is displayed +- name: Molecule.tests.roles.swap.conditions.delete | Verify Condition for Deleting Swap File + run_once: true + ansible.builtin.assert: + that: (swap_exists.stdout is defined and swap_exists.stdout | length > 1) and + ((swap_exists.stdout_lines|map('trim')|map('int')|sum / 1024 / 1024)|round|int != swap_file_size_mb|int) + fail_msg: "Test failed: Condition for deleting swap file is false." + success_msg: "Test passed: Condition for deleting swap file is true." diff --git a/automation/molecule/tests/roles/swap/main.yml b/automation/molecule/tests/roles/swap/main.yml new file mode 100644 index 000000000..726c5cbd8 --- /dev/null +++ b/automation/molecule/tests/roles/swap/main.yml @@ -0,0 +1,13 @@ +--- +# 🚀 This task aims to include variable tests for the main swap role in Molecule +# 🎯 The objective is to ensure all conditions are tested by looping through each YAML file in the 'conditions' directory + +# 🔄 Including variable tests for the main swap role in Molecule +# For each YAML file in the 'conditions' directory, we include its tasks in the current playbook +# If a YAML file is not found or cannot be read, the playbook execution will fail at this point +- name: Molecule.tests.roles.swap.main | Include Variable Tests from Conditions Directory + run_once: true + ansible.builtin.include_tasks: "{{ molecule_tests_roles_swap_main_file }}" + loop: "{{ lookup('fileglob', 'conditions/*.yml', wantlist=True) }}" + loop_control: + loop_var: molecule_tests_roles_swap_main_file diff --git a/automation/molecule/tests/variables/asserts/apt_repository.yml b/automation/molecule/tests/variables/asserts/apt_repository.yml new file mode 100644 index 000000000..b9cae4b42 --- /dev/null +++ b/automation/molecule/tests/variables/asserts/apt_repository.yml @@ -0,0 +1,48 @@ +--- +# 🚀 These tasks aim to validate the apt_repository variable +# 🎯 The objective is to ensure that the repository URLs are correctly defined + +# ======================================== +# 💻 Start Repository Operations and Tests +# ======================================== + +# 🔄 Setting up a valid repository for testing +# We define a minor version of PostgreSQL Pro and the distribution details of the system +- name: Molecule.tests.variables.asserts.apt_repository | Set Valid Repository Test Data + run_once: true + ansible.builtin.set_fact: + postgrespro_minor_version: 12.4.1 + ansible_distribution: Debian + ansible_distribution_release: "stretch" + +# 📝 Constructing the repository URL for testing +# The URL is constructed based on the minor version of PostgreSQL Pro and the system's distribution details +- name: Molecule.tests.variables.asserts.apt_repository | Construct Repository URL Test Data + run_once: true + ansible.builtin.set_fact: + apt_repository: + repo: >- + {{ 'deb https://repo.postgrespro.ru//pgpro-archive/pgpro-' ~ + postgrespro_minor_version ~ '/' ~ (ansible_distribution | lower) ~ '/' ~ + ' ' ~ ansible_distribution_release ~ ' main' }} + +# 🖨️ Debugging the constructed repository URL +# The constructed URL is printed for debugging purposes +- name: Molecule.tests.variables.asserts.apt_repository | Debug Constructed Repository URL + run_once: true + ansible.builtin.debug: + var: apt_repository['repo'] + +# ✅ Verifying the correctness of the constructed repository URL +# The constructed URL is compared with the expected URL +# If the URLs do not match, the test fails and an error message is displayed +- name: Molecule.tests.variables.asserts.apt_repository | Verify Constructed Repository URL + run_once: true + ansible.builtin.assert: + that: + - "'repo' in apt_repository" + - "apt_repository['repo'] is not none" + - "apt_repository['repo'] != 'N/A'" + - "apt_repository['repo'] == 'deb https://repo.postgrespro.ru//pgpro-archive/pgpro-12.4.1/debian/ stretch main'" + fail_msg: "Test failed: The constructed repository URL does not match the expected URL." + success_msg: "Test passed: The constructed repository URL matches the expected URL." diff --git a/automation/molecule/tests/variables/asserts/baseurl.yml b/automation/molecule/tests/variables/asserts/baseurl.yml new file mode 100644 index 000000000..e03f10bd2 --- /dev/null +++ b/automation/molecule/tests/variables/asserts/baseurl.yml @@ -0,0 +1,43 @@ +--- +# 🚀 These tasks aim to validate the _baseurl variable for OracleLinux +# 🎯 The objective is to ensure that the URLs for OracleLinux are correctly set in _baseurl + +# ========================================================= +# 💻 Start OracleLinux URL Validation Operations and Tests +# ========================================================= + +# 🔄 Setting up a valid OracleLinux version and distribution for the test +# We define a specific minor version of PostgreSQL Pro and a major version of the Linux distribution +- name: Molecule.tests.variables.asserts.baseurl | Set OracleLinux Test Data (valid) + run_once: true + ansible.builtin.set_fact: + postgrespro_minor_version: 12.4.1 + ansible_distribution_major_version: 9 + +# 📝 Constructing the expected _baseurl for OracleLinux +# The URL is built based on the defined PostgreSQL Pro minor version and Linux distribution major version +- name: Molecule.tests.variables.asserts.baseurl | Construct Expected OracleLinux URL + run_once: true + ansible.builtin.set_fact: + _baseurl: # yamllint disable rule:line-length + OracleLinux: "https://repo.postgrespro.ru//pgpro-archive/pgpro-%7B%7B%20postgrespro_minor_version%20%7D%7D/oraclelinux/%7B%7B%20ansible_distribution_major_version%20%7D%7DServer/os/x86_64/rpms/" + +# 🖨️ Displaying the constructed OracleLinux URL for debugging purposes +- name: Molecule.tests.variables.asserts.baseurl | Debug Constructed OracleLinux URL + run_once: true + ansible.builtin.debug: + var: _baseurl['OracleLinux'] + +# ✅ Verifying the correctness of the constructed OracleLinux URL +# We check that the URL is correctly set in _baseurl and matches the expected URL +# If any of these conditions is not met, the test fails and an error message is displayed +- name: Molecule.tests.variables.asserts.baseurl | Verify OracleLinux URL in _baseurl + run_once: true + ansible.builtin.assert: + that: + - "'OracleLinux' in _baseurl" + - "_baseurl['OracleLinux'] is not none" + - "_baseurl['OracleLinux'] != 'N/A'" + - "_baseurl['OracleLinux'] == 'https://repo.postgrespro.ru//pgpro-archive/pgpro-12.4.1/oraclelinux/9Server/os/x86_64/rpms/'" + fail_msg: "Test failed: OracleLinux URL is not set correctly in _baseurl." + success_msg: "Test passed: OracleLinux URL is set correctly in _baseurl." diff --git a/automation/molecule/tests/variables/asserts/pg_probackup.yml b/automation/molecule/tests/variables/asserts/pg_probackup.yml new file mode 100644 index 000000000..c0c65544d --- /dev/null +++ b/automation/molecule/tests/variables/asserts/pg_probackup.yml @@ -0,0 +1,51 @@ +--- +# 🚀 These tasks aim to validate the values of pg_probackup related variables +# 🎯 The objective is to ensure that the values of these variables are correctly set + +# 📝 Setting the expected value for pg_probackup[0].value +- name: Molecule.tests.variables.asserts.pg_probackup | Establish Expected Value for pg_probackup[0].value + run_once: true + ansible.builtin.set_fact: # yamllint disable rule:line-length + origin_pg_probackup_command_value: "pg_probackup-{{ pg_probackup_version }} restore -B {{ pg_probackup_dir }} --instance {{ pg_probackup_instance }} -j {{ pg_probackup_threads }} {{ pg_probackup_add_keys }}" + +# 🖨️ Debugging the actual value of pg_probackup[0].value +- name: Molecule.tests.variables.asserts.pg_probackup | Debug Actual Value of pg_probackup[0].value + run_once: true + ansible.builtin.debug: + var: pg_probackup[0].value + +# ✅ Verifying the correctness of pg_probackup[0].value +# If the actual value is not equal to the expected value, the test fails and an error message is displayed +- name: Molecule.tests.variables.asserts.pg_probackup | Verify Value of pg_probackup[0].value + run_once: true + ansible.builtin.assert: + that: + - pg_probackup[0].value == origin_pg_probackup_command_value + fail_msg: > + Assertion failed: pg_probackup[0].value is "{{ pg_probackup[0].value }}", + but expected "{{ origin_pg_probackup_command_value}}" + success_msg: "pg_probackup[0].value is correct" + +# 📝 Setting the expected value for pg_probackup_patroni_cluster_bootstrap_command +- name: Molecule.tests.variables.asserts.pg_probackup | Establish Expected Value for pg_probackup_patroni_cluster_bootstrap_command + run_once: true + ansible.builtin.set_fact: # yamllint disable rule:line-length + origin_pg_probackup_patroni_cluster_bootstrap_command: "pg_probackup-{{ pg_probackup_version }} restore -B {{ pg_probackup_dir }} --instance {{ pg_probackup_instance }} -j {{ pg_probackup_threads }} {{ pg_probackup_add_keys }}" + +# 🖨️ Debugging the actual value of pg_probackup_patroni_cluster_bootstrap_command +- name: Molecule.tests.variables.asserts.pg_probackup | Debug Actual Value of pg_probackup_patroni_cluster_bootstrap_command + run_once: true + ansible.builtin.debug: + var: pg_probackup_patroni_cluster_bootstrap_command + +# ✅ Verifying the correctness of pg_probackup_patroni_cluster_bootstrap_command +# If the actual value is not equal to the expected value, the test fails and an error message is displayed +- name: Molecule.tests.variables.asserts.pg_probackup | Verify Value of pg_probackup_patroni_cluster_bootstrap_command + run_once: true + ansible.builtin.assert: + that: + - pg_probackup_patroni_cluster_bootstrap_command == origin_pg_probackup_patroni_cluster_bootstrap_command + fail_msg: > + Assertion failed: pg_probackup_patroni_cluster_bootstrap_command is "{{ pg_probackup_patroni_cluster_bootstrap_command }}", + but expected "{{ origin_pg_probackup_patroni_cluster_bootstrap_command }}" + success_msg: "pg_probackup_patroni_cluster_bootstrap_command is correct" diff --git a/automation/molecule/tests/variables/asserts/system_info.yml b/automation/molecule/tests/variables/asserts/system_info.yml new file mode 100644 index 000000000..7660156da --- /dev/null +++ b/automation/molecule/tests/variables/asserts/system_info.yml @@ -0,0 +1,92 @@ +--- +# 🚀 These tasks aim to test the system_info variable +# 🎯 The objective is to guarantee that the system informations are correctly set + +# ======================================== +# 💻 Start CPU model Operations and Tests +# ======================================== + +# 🔄 Define a valid CPU model +- name: Molecule.tests.variables.asserts.system_info | Set CPU Model Test Data (valid) + run_once: true + ansible.builtin.set_fact: + ansible_processor: ["GenuineIntel", "Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz", "Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz"] + ansible_processor_count: 6 + ansible_processor_cores: 12 + +# 📝 Establishing test data for CPU model +- name: Molecule.tests.variables.asserts.system_info | Establish CPU Model Test Data + run_once: true + ansible.builtin.set_fact: + system_info: + CPU model: >- + {{ ansible_processor[2] | default('N/A') }}, + count: {{ ansible_processor_count | default('N/A') }}, + cores: {{ ansible_processor_cores | default('N/A') }} + +# 🖨️ Debugging the established CPU model +- name: Molecule.tests.variables.asserts.system_info | Debug CPU Model + run_once: true + ansible.builtin.debug: + var: system_info['CPU model'] + +# ✅ Verifying the correctness of the established CPU model +# If the CPU model is not set, the test fails and an error message is displayed +- name: Molecule.tests.variables.asserts.system_info | Verify CPU Model + run_once: true + ansible.builtin.assert: + that: + - "'CPU model' in system_info" + - "system_info['CPU model'] is not none" + - "system_info['CPU model'] != 'N/A'" + - "system_info['CPU model'] == 'Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz, count: 6, cores: 12'" + fail_msg: "Test failed: CPU model is not set correctly in system_info." + success_msg: "Test passed: CPU model is set correctly in system_info." + +# ================================================== +# 💾 Start Disk space total Operations and Tests +# ================================================== + +# 🔄 Define a valid Disk space total +- name: Molecule.tests.variables.asserts.system_info | Set Disk space total Test Data (valid) + run_once: true + ansible.builtin.set_fact: + ansible_mounts: + - mount: "/" + size_total: "53687091200" # 50 GB + - mount: "/home" + size_total: "107374182400" # 100 GB + +# 📝 Establishing test data for Disk space total +- name: Molecule.tests.variables.asserts.system_info | Establish Disk space total Test Data + run_once: true + ansible.builtin.set_fact: + system_info: + Disk space total: >- + {{ + (ansible_mounts + | map(attribute='size_total') + | map('int') + | sum / 1024 / 1024 / 1024 + ) + | round(2) if ansible_mounts is defined else 'N/A' + }} GB + +# 🖨️ Debugging the established Disk space total +- name: Molecule.tests.variables.asserts.system_info | Debug Disk space total + run_once: true + ansible.builtin.debug: + var: system_info['Disk space total'] + +# ✅ Verifying the correctness of the established Disk space total +# If the Disk space total is not set, the test fails and an error message is displayed +- name: Molecule.tests.variables.asserts.system_info | Verify Disk space total + run_once: true + ansible.builtin.assert: + that: + - "'Disk space total' in system_info" + - "system_info['Disk space total'] is not none" + - "system_info['Disk space total'] != 'N/A'" + - "system_info['Disk space total'] == '150.0 GB'" + fail_msg: "Test failed: Disk space total is not set correctly in system_info." + success_msg: "Test passed: Disk space total is set correctly in system_info." diff --git a/automation/molecule/tests/variables/asserts/vip_manager_package_repo.yml b/automation/molecule/tests/variables/asserts/vip_manager_package_repo.yml new file mode 100644 index 000000000..1276a97c7 --- /dev/null +++ b/automation/molecule/tests/variables/asserts/vip_manager_package_repo.yml @@ -0,0 +1,93 @@ +--- +# 🚀 These tasks aim to define and verify 'vip_manager_package_repo' for different OS +# 🎯 The objective is to ensure accurate url + +# 🔄 Define the vip_manager_version +# This is the version of the VIP Manager that we will be using in our tests +- name: Molecule.tests.variables.asserts.vip_manager_package_repo| Common | Setting vip_manager_version + run_once: true + ansible.builtin.set_fact: + vip_manager_version: "3.0.0" + +- name: Molecule.tests.variables.asserts.vip_manager_package_repo| Common | Setting vip_manager_architecture_map + ansible.builtin.set_fact: + vip_manager_architecture_map: + amd64: x86_64 + x86_64: x86_64 + aarch64: arm64 + arm64: arm64 + when: vip_manager_architecture_map is not defined + +# =============================================== +# 💾 Start Debian-specific operations and tests +# =============================================== + +# 📁 Incorporate Debian-specific variables +# This task includes variables that are specific to Debian OS +- name: Molecule.tests.variables.asserts.vip_manager_package_repo | Debian | Incorporate RedHat-specific Variables + run_once: true + ansible.builtin.include_vars: + file: ../../../../roles/common/defaults/Debian.yml + +# 🔄 Define the origin_vip_manager_package_repo for Debian +# This task defines the expected URL for the VIP Manager package for Debian OS +- name: Molecule.tests.variables.asserts.vip_manager_package_repo | Debian | Define origin_vip_manager_package_repo + run_once: true + ansible.builtin.set_fact: # yamllint disable rule:line-length + origin_vip_manager_package_repo: "https://github.com/cybertec-postgresql/vip-manager/releases/download/v%7B%7B%20vip_manager_version%20%7D%7D/vip-manager_%7B%7B%20vip_manager_version%20%7D%7D_Linux_%7B%7B%20vip_manager_architecture_map[ansible_architecture]%7D%7D.deb" + +# 🖨️ Debug the vip_manager_package_repo for Debian +# This task prints the actual URL for the VIP Manager package for Debian OS +- name: Molecule.tests.variables.asserts.vip_manager_package_repo | Debian | Debug vip_manager_package_repo + run_once: true + ansible.builtin.debug: + var: vip_manager_package_repo + +# ✅ Validate the vip_manager_package_repo for Debian +# This task checks if the actual URL matches the expected URL for the VIP Manager package for Debian OS +- name: Molecule.tests.variables.asserts.vip_manager_package_repo | Debian | Validate vip_manager_package_repo + run_once: true + ansible.builtin.assert: + that: + - vip_manager_package_repo == origin_vip_manager_package_repo + fail_msg: > + Assertion failed: vip_manager_package_repo is "{{ vip_manager_package_repo }}", + but expected "{{ origin_vip_manager_package_repo }}" + success_msg: "vip_manager_package_repo is correct" + +# =============================================== +# 🚀 Start RedHat-specific operations and tests +# =============================================== + +# 📁 Incorporate RedHat-specific variables +# This task includes variables that are specific to RedHat OS +- name: Molecule.tests.variables.asserts.vip_manager_package_repo | RedHat | Incorporate RedHat-specific Variables + run_once: true + ansible.builtin.include_vars: + file: ../../../../roles/common/defaults/RedHat.yml + +# 🔄 Define the origin_vip_manager_package_repo for RedHat +# This task defines the expected URL for the VIP Manager package for RedHat OS +- name: Molecule.tests.variables.asserts.vip_manager_package_repo | RedHat | Define origin_vip_manager_package_repo + run_once: true + ansible.builtin.set_fact: # yamllint disable rule:line-length + origin_vip_manager_package_repo: "https://github.com/cybertec-postgresql/vip-manager/releases/download/v%7B%7B%20vip_manager_version%20%7D%7D/vip-manager_%7B%7B%20vip_manager_version%20%7D%7D_Linux_%7B%7B%20vip_manager_architecture_map[ansible_architecture]%7D%7D.rpm" + +# 🖨️ Debug the vip_manager_package_repo for RedHat +# This task prints the actual URL for the VIP Manager package for RedHat OS +- name: Molecule.tests.variables.asserts.vip_manager_package_repo | RedHat | Debug vip_manager_package_repo + run_once: true + ansible.builtin.debug: + var: vip_manager_package_repo + +# ✅ Validate the vip_manager_package_repo for RedHat +# This task checks if the actual URL matches the expected URL for the VIP Manager package for RedHat OS +- name: Molecule.tests.variables.asserts.vip_manager_package_repo | RedHat | Validate vip_manager_package_repo + run_once: true + ansible.builtin.assert: + that: + - vip_manager_package_repo == origin_vip_manager_package_repo + fail_msg: > + Assertion failed: vip_manager_package_repo is "{{ vip_manager_package_repo }}", + but expected "{{ origin_vip_manager_package_repo }}" + success_msg: "vip_manager_package_repo is correct" diff --git a/automation/molecule/tests/variables/asserts/wal_g_cron_jobs.yml b/automation/molecule/tests/variables/asserts/wal_g_cron_jobs.yml new file mode 100644 index 000000000..78bd7eebd --- /dev/null +++ b/automation/molecule/tests/variables/asserts/wal_g_cron_jobs.yml @@ -0,0 +1,136 @@ +--- +# 🚀 These tasks aim to define and verify 'wal_g_cron_jobs' for different OS +# 🎯 The objective is to ensure accurate setup and validation of wal-g cron jobs + +# 📝 Validate the structure and content of wal_g_cron_jobs. We expect two jobs, each with specific attributes. +- name: Molecule.tests.variables.asserts.wal_g_cron_jobs | Validate Structure and Content of wal_g_cron_jobs + run_once: true + ansible.builtin.assert: + that: + - wal_g_cron_jobs | length == 2 + - wal_g_cron_jobs[0].name == "WAL-G: Create daily backup" + - wal_g_cron_jobs[0].user == "postgres" + - wal_g_cron_jobs[0].file == "/etc/cron.d/walg" + - wal_g_cron_jobs[0].job == wal_g_backup_command | join('') + - wal_g_cron_jobs[1].name == "WAL-G: Delete old backups" + - wal_g_cron_jobs[1].user == "postgres" + - wal_g_cron_jobs[1].file == "/etc/cron.d/walg" + - wal_g_cron_jobs[1].job == wal_g_delete_command | join('') + fail_msg: "Test failed: wal_g_cron_jobs does not have the expected structure or content." + success_msg: "Test passed: wal_g_cron_jobs has the expected structure and content." + +# 📝 Validate the content of wal_g_backup_command and wal_g_delete_command. We expect specific commands for each job. +- name: Molecule.tests.variables.asserts.wal_g_cron_jobs | Validate Content of wal_g_backup_command and wal_g_delete_command + run_once: true + ansible.builtin.assert: + that: + - wal_g_backup_command[0] == "curl -I -s http://{{ inventory_hostname }}:{{ patroni_restapi_port }} | grep 200" + - wal_g_backup_command[1] == " && {{ wal_g_path }} backup-push {{ postgresql_data_dir }} > {{ postgresql_log_dir }}/walg_backup.log 2>&1" + - wal_g_delete_command[0] == "curl -I -s http://{{ inventory_hostname }}:{{ patroni_restapi_port }} | grep 200" + - wal_g_delete_command[1] == " && {{ wal_g_path }} delete retain FULL 4 --confirm > {{ postgresql_log_dir }}/walg_delete.log 2>&1" + fail_msg: "Test failed: wal_g_backup_command or wal_g_delete_command do not have the expected content." + success_msg: "Test passed: wal_g_backup_command and wal_g_delete_command have the expected content." + +# ================================================== +# 💾 Start Debian-specific operations and tests +# ================================================== + +# 📁 Load Debian-specific variables for the tests +- name: Molecule.tests.variables.asserts.wal_g_cron_jobs | Debian | Load Debian-specific Variables + run_once: true + ansible.builtin.include_vars: + file: ../../../../roles/common/defaults/Debian.yml + +# 🔄 Define the expected first wal_g_cron job for Debian +- name: Molecule.tests.variables.asserts.wal_g_cron_jobs | Debian | Define Expected First wal_g_cron Job + run_once: true + ansible.builtin.set_fact: # yamllint disable rule:line-length + origin_wal_g_cron_jobs_create_job: "curl -I -s http://{{ inventory_hostname }}:{{ patroni_restapi_port }} | grep 200 && {{ wal_g_path }} backup-push {{ postgresql_data_dir }} > {{ postgresql_log_dir }}/walg_backup.log 2>&1" + +# 🖨️ Display the first wal_g_cron job for Debian for debugging purposes +- name: Molecule.tests.variables.asserts.wal_g_cron_jobs | Debian | Debug First wal_g_cron Job + run_once: true + ansible.builtin.debug: + var: wal_g_cron_jobs[0].job + +# ✅ Verify that the first wal_g_cron job for Debian matches the expected job +- name: Molecule.tests.variables.asserts.wal_g_cron_jobs | Debian | Verify First wal_g_cron Job + run_once: true + ansible.builtin.assert: + that: + - wal_g_cron_jobs[0].job == origin_wal_g_cron_jobs_create_job + fail_msg: "Test failed: wal_g_cron_jobs[0].job is not as expected." + success_msg: "Test passed: wal_g_cron_jobs[0].job is as expected." + +# 🔄 Define the expected second wal_g_cron job for Debian +- name: Molecule.tests.variables.asserts.wal_g_cron_jobs | Debian | Define Expected Second wal_g_cron Job + run_once: true + ansible.builtin.set_fact: # yamllint disable rule:line-length + origin_wal_g_cron_jobs_delete_job: "curl -I -s http://{{ inventory_hostname }}:{{ patroni_restapi_port }} | grep 200 && {{ wal_g_path }} delete retain FULL 4 --confirm > {{ postgresql_log_dir }}/walg_delete.log 2>&1" + +# 🖨️ Display the second wal_g_cron job for Debian for debugging purposes +- name: Molecule.tests.variables.asserts.wal_g_cron_jobs | Debian | Debug Second wal_g_cron Job + run_once: true + ansible.builtin.debug: + var: wal_g_cron_jobs[1].job + +# ✅ Verify that the second wal_g_cron job for Debian matches the expected job +- name: Molecule.tests.variables.asserts.wal_g_cron_jobs | Debian | Verify Second wal_g_cron Job + run_once: true + ansible.builtin.assert: + that: + - wal_g_cron_jobs[1].job == origin_wal_g_cron_jobs_delete_job + fail_msg: "Test failed: wal_g_cron_jobs[1].job is not as expected." + success_msg: "Test passed: wal_g_cron_jobs[1].job is as expected." + +# ================================================== +# 🚀 Start RedHat-specific operations and tests +# ================================================== + +# 📁 Load RedHat-specific variables for the tests +- name: Molecule.tests.variables.asserts.wal_g_cron_jobs | RedHat | Load RedHat-specific Variables + run_once: true + ansible.builtin.include_vars: + file: ../../../../roles/common/defaults/RedHat.yml + +# 🔄 Define the expected first wal_g_cron job for RedHat +- name: Molecule.tests.variables.asserts.wal_g_cron_jobs | RedHat | Define Expected First wal_g_cron Job + run_once: true + ansible.builtin.set_fact: # yamllint disable rule:line-length + origin_wal_g_cron_jobs_create_job: "curl -I -s http://{{ inventory_hostname }}:{{ patroni_restapi_port }} | grep 200 && {{ wal_g_path }} backup-push {{ postgresql_data_dir }} > {{ postgresql_log_dir }}/walg_backup.log 2>&1" + +# 🖨️ Display the first wal_g_cron job for RedHat for debugging purposes +- name: Molecule.tests.variables.asserts.wal_g_cron_jobs | RedHat | Debug First wal_g_cron Job + run_once: true + ansible.builtin.debug: + var: wal_g_cron_jobs[0].job + +# ✅ Verify that the first wal_g_cron job for RedHat matches the expected job +- name: Molecule.tests.variables.asserts.wal_g_cron_jobs | RedHat | Verify First wal_g_cron Job + run_once: true + ansible.builtin.assert: + that: + - wal_g_cron_jobs[0].job == origin_wal_g_cron_jobs_create_job + fail_msg: "Test failed: wal_g_cron_jobs[0].job is not as expected." + success_msg: "Test passed: wal_g_cron_jobs[0].job is as expected." + +# 🔄 Define the expected second wal_g_cron job for RedHat +- name: Molecule.tests.variables.asserts.wal_g_cron_jobs | RedHat | Define Expected Second wal_g_cron Job + run_once: true + ansible.builtin.set_fact: # yamllint disable rule:line-length + origin_wal_g_cron_jobs_delete_job: "curl -I -s http://{{ inventory_hostname }}:{{ patroni_restapi_port }} | grep 200 && {{ wal_g_path }} delete retain FULL 4 --confirm > {{ postgresql_log_dir }}/walg_delete.log 2>&1" + +# 🖨️ Display the second wal_g_cron job for RedHat for debugging purposes +- name: Molecule.tests.variables.asserts.wal_g_cron_jobs | RedHat | Debug Second wal_g_cron Job + run_once: true + ansible.builtin.debug: + var: wal_g_cron_jobs[1].job + +# ✅ Verify that the second wal_g_cron job for RedHat matches the expected job +- name: Molecule.tests.variables.asserts.wal_g_cron_jobs | RedHat | Verify Second wal_g_cron Job + run_once: true + ansible.builtin.assert: + that: + - wal_g_cron_jobs[1].job == origin_wal_g_cron_jobs_delete_job + fail_msg: "Test failed: wal_g_cron_jobs[1].job is not as expected." + success_msg: "Test passed: wal_g_cron_jobs[1].job is as expected." diff --git a/automation/molecule/tests/variables/main.yml b/automation/molecule/tests/variables/main.yml new file mode 100644 index 000000000..03982bb26 --- /dev/null +++ b/automation/molecule/tests/variables/main.yml @@ -0,0 +1,13 @@ +--- +# 🚀 This task aims to include all assert tasks for the main variables in Molecule tests +# 🎯 The objective is to ensure that all assert tasks are executed for comprehensive testing + +# 🔄 Including all assert tasks found in the 'asserts' directory +# For each .yml file in the 'asserts' directory, we include the tasks defined in the file +# This allows us to modularize our tests and keep our codebase organized +- name: Molecule.tests.variables.main | Include All Assert Tasks for Comprehensive Testing + run_once: true + ansible.builtin.include_tasks: "{{ molecule_tests_variables_main_file }}" + loop: "{{ lookup('fileglob', 'asserts/*.yml', wantlist=True) }}" + loop_control: + loop_var: molecule_tests_variables_main_file diff --git a/automation/pg_upgrade.yml b/automation/pg_upgrade.yml new file mode 100644 index 000000000..a96749017 --- /dev/null +++ b/automation/pg_upgrade.yml @@ -0,0 +1,269 @@ +--- +# TODO: +# - Citus support +- name: "pg_upgrade.yml | Upgrade PostgreSQL {{ pg_old_version }} to the new version {{ pg_new_version }}" + hosts: postgres_cluster + gather_facts: true + become: true + become_user: postgres + any_errors_fatal: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + + - name: Include upgrade variables + ansible.builtin.include_vars: "roles/common/defaults/upgrade.yml" + + - name: "[Prepare] Get Patroni Cluster Leader Node" + ansible.builtin.uri: + url: http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/leader + status_code: 200 + register: patroni_leader_result + changed_when: false + failed_when: false + environment: + no_proxy: "{{ inventory_hostname }}" + + # Stop, if Patroni is unavailable + - name: The Patroni cluster is unhealthy + ansible.builtin.fail: + msg: "Patroni is unavailable on {{ ansible_hostname }}. Please check the cluster status." + changed_when: false + when: patroni_leader_result is undefined or patroni_leader_result.status == -1 + + - name: '[Prepare] Add host to group "primary" (in-memory inventory)' + ansible.builtin.add_host: + name: "{{ item }}" + groups: primary + when: hostvars[item]['patroni_leader_result']['status'] == 200 + loop: "{{ groups['postgres_cluster'] }}" + changed_when: false + + - name: '[Prepare] Add hosts to group "secondary" (in-memory inventory)' + ansible.builtin.add_host: + name: "{{ item }}" + groups: secondary + when: hostvars[item]['patroni_leader_result']['status'] != 200 + loop: "{{ groups['postgres_cluster'] }}" + changed_when: false + + - name: "Print Patroni Cluster info" + ansible.builtin.debug: + msg: + - "Cluster Name: {{ patroni_cluster_name }}" + - "Cluster Leader: {{ ansible_hostname }}" + when: inventory_hostname in groups['primary'] + tags: + - always + +- name: "(1/6) PRE-UPGRADE: Perform Pre-Checks" + hosts: "primary:secondary" + gather_facts: false + become: true + become_user: postgres + any_errors_fatal: true + environment: "{{ proxy_env | default({}) }}" + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + - name: Include upgrade variables + ansible.builtin.include_vars: "roles/common/defaults/upgrade.yml" + tasks: + - name: Running Pre-Checks + ansible.builtin.include_role: + name: upgrade + tasks_from: pre_checks + tags: + - upgrade + - pre-checks + +- name: "(2/6) PRE-UPGRADE: Install new PostgreSQL packages" + hosts: "primary:secondary" + gather_facts: false + become: true + become_user: root + any_errors_fatal: true + environment: "{{ proxy_env | default({}) }}" + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + - name: Include upgrade variables + ansible.builtin.include_vars: "roles/common/defaults/upgrade.yml" + tasks: + - name: Install packages + ansible.builtin.include_role: + name: upgrade + tasks_from: packages + tags: + - upgrade + - upgrade-check + - packages + +- name: "(3/6) PRE-UPGRADE: Initialize new db, schema compatibility check, and pg_upgrade --check" + hosts: "primary:secondary" + gather_facts: false + become: true + become_user: postgres + any_errors_fatal: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + - name: Include upgrade variables + ansible.builtin.include_vars: "roles/common/defaults/upgrade.yml" + tasks: + - name: Create Data directory and initdb + ansible.builtin.include_role: + name: upgrade + tasks_from: initdb + + # (optional) copy files specified in variable: + # 'copy_files_to_all_server' + - name: Copy files + ansible.builtin.include_role: + name: copy + + - name: Check Schema Compatibility + ansible.builtin.import_role: + name: upgrade + tasks_from: schema_compatibility + when: schema_compatibility_check | bool + + - name: Check pg_upgrade + ansible.builtin.import_role: + name: upgrade + tasks_from: upgrade_check + tags: + - upgrade + - upgrade-check + - schema-compatibility-check + +- name: "(4/6) PRE-UPGRADE: Prepare the Patroni configuration" + hosts: "primary:secondary" + gather_facts: false + become: true + become_user: postgres + any_errors_fatal: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + - name: Include upgrade variables + ansible.builtin.include_vars: "roles/common/defaults/upgrade.yml" + tasks: + - name: Patroni config + ansible.builtin.include_role: + name: upgrade + tasks_from: update_config + tags: + - upgrade + - update-config + +- name: "(5/6) UPGRADE: Upgrade PostgreSQL" + hosts: "primary:secondary" + gather_facts: false + become: true + become_user: postgres + any_errors_fatal: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + - name: Include upgrade variables + ansible.builtin.include_vars: "roles/common/defaults/upgrade.yml" + tasks: + - name: Enable maintenance mode + ansible.builtin.include_role: + name: upgrade + tasks_from: maintenance_enable + + - name: Stop Services + ansible.builtin.include_role: + name: upgrade + tasks_from: stop_services + + - name: Check 'Latest checkpoint location' + ansible.builtin.include_role: + name: upgrade + tasks_from: checkpoint_location + + - name: Upgrade Primary + ansible.builtin.include_role: + name: upgrade + tasks_from: upgrade_primary + + - name: Upgrade Secondary + ansible.builtin.include_role: + name: upgrade + tasks_from: upgrade_secondary + + # if pg_new_wal_dir is defined + - name: Create WAL dir symlink + ansible.builtin.include_role: + name: upgrade + tasks_from: custom_wal_dir + when: pg_new_wal_dir | length > 0 + + - name: Remove old cluster from DCS + ansible.builtin.include_role: + name: upgrade + tasks_from: dcs_remove_cluster + + - name: Start Services + ansible.builtin.include_role: + name: upgrade + tasks_from: start_services + + - name: Disable maintenance mode + ansible.builtin.include_role: + name: upgrade + tasks_from: maintenance_disable + tags: + - upgrade + +- name: "(6/6) POST-UPGRADE: Analyze a PostgreSQL database (update optimizer statistics) and Post-Upgrade tasks" + hosts: "primary:secondary" + gather_facts: false + become: true + become_user: postgres + any_errors_fatal: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + - name: Include upgrade variables + ansible.builtin.include_vars: "roles/common/defaults/upgrade.yml" + tasks: + - name: Analyze database + ansible.builtin.include_role: + name: upgrade + tasks_from: statistics + tags: analyze, statistics + + - name: Update extensions + ansible.builtin.include_role: + name: upgrade + tasks_from: extensions + when: update_extensions | bool + tags: update_extensions + + - name: Running Post-Checks + ansible.builtin.include_role: + name: upgrade + tasks_from: post_checks + + - name: Running Post-Upgrade tasks + ansible.builtin.include_role: + name: upgrade + tasks_from: post_upgrade + tags: + - upgrade + - post-upgrade diff --git a/automation/pg_upgrade_rollback.yml b/automation/pg_upgrade_rollback.yml new file mode 100644 index 000000000..dba57a8c0 --- /dev/null +++ b/automation/pg_upgrade_rollback.yml @@ -0,0 +1,55 @@ +--- +# This playbook performs a rollback of a PostgreSQL upgrade. +# It's designed to be used when a PostgreSQL upgrade hasn't been fully completed and the new version hasn't been started. +# The rollback operation is performed by starting the Patroni cluster with the old version of PostgreSQL using the same PGDATA. +# The playbook first checks the health of the current cluster, verifies the version of PostgreSQL, and ensures the new PostgreSQL is not running. +# If these checks pass, the playbook switches back to the old PostgreSQL paths and restarts the Patroni service. + +- name: "pg_upgrade_rollback.yml | Rollback the PostgreSQL upgrade" + hosts: postgres_cluster + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + - name: Include upgrade variables + ansible.builtin.include_vars: "roles/common/defaults/upgrade.yml" + tasks: + - name: '[Prepare] Add host to group "primary" (in-memory inventory)' + ansible.builtin.add_host: + name: "{{ item }}" + groups: primary + # As Primary we specify the host in the 'master' group in the inventory file. + loop: "{{ groups['master'] }}" + changed_when: false + + - name: '[Prepare] Add hosts to group "secondary" (in-memory inventory)' + ansible.builtin.add_host: + name: "{{ item }}" + groups: secondary + # As Secondary we specify the hosts in the 'replica' group in the inventory file. + loop: "{{ groups['replica'] }}" + changed_when: false + tags: + - always + +- name: "Perform Rollback" + hosts: "primary:secondary" + gather_facts: false + become: true + become_user: postgres + any_errors_fatal: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + - name: Include upgrade variables + ansible.builtin.include_vars: "roles/common/defaults/upgrade.yml" + tasks: + - name: Running rollback.yml + ansible.builtin.include_role: + name: upgrade + tasks_from: rollback + tags: + - rollback diff --git a/automation/plugins/callback/json_log.py b/automation/plugins/callback/json_log.py new file mode 100644 index 000000000..a8e1b8d04 --- /dev/null +++ b/automation/plugins/callback/json_log.py @@ -0,0 +1,125 @@ +import json +import os +from datetime import datetime +from ansible.plugins.callback import CallbackBase + + +# This Ansible callback plugin logs playbook results in JSON format. +# The log file path can be specified using the environment variable ANSIBLE_JSON_LOG_FILE. +# The log level can be controlled via the environment variable ANSIBLE_JSON_LOG_LEVEL. +# Available log levels: INFO (default), DETAIL, and DEBUG. + +class CallbackModule(CallbackBase): + CALLBACK_VERSION = 1.0 + CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'json_log' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self): + super(CallbackModule, self).__init__() + self.log_file_path = os.getenv('ANSIBLE_JSON_LOG_FILE') + self.log_level = os.getenv('ANSIBLE_JSON_LOG_LEVEL', 'INFO').upper() + self.results_started = False + + if self.log_file_path: + self._display.display(f"JSON Log callback plugin initialized. Log file: {self.log_file_path}") + # Initialize the log file + with open(self.log_file_path, 'w') as log_file: + log_file.write('[\n') + + def _record_task_result(self, result): + if not self.log_file_path: + return + + # Build the basic result structure with task, host, and timestamp + base_result = { + 'time': datetime.now().isoformat(), + 'task': result._task.get_name(), + 'host': result._host.get_name() + } + + # Add item information if available + if '_ansible_item_label' in result._result: + base_result['item'] = result._result['_ansible_item_label'] + elif 'item' in result._result: + base_result['item'] = result._result['item'] + + # Extend the result based on the log level + if self.log_level == 'DEBUG': + full_result = {**base_result, **result._result} + self._write_result_to_file(full_result) + elif self.log_level == 'DETAIL': + detailed_result = { + 'changed': result._result.get('changed', False), + 'failed': result._result.get('failed', False), + 'msg': result._result.get('msg', ''), + 'stdout': result._result.get('stdout', ''), + 'stderr': result._result.get('stderr', '') + } + self._write_result_to_file({**base_result, **detailed_result}) + else: + basic_result = { + 'changed': result._result.get('changed', False), + 'failed': result._result.get('failed', False), + 'msg': result._result.get('msg', '') + } + self._write_result_to_file({**base_result, **basic_result}) + + def _write_result_to_file(self, result): + try: + with open(self.log_file_path, 'a') as log_file: + if self.results_started: + log_file.write(',\n') + self.results_started = True + json.dump(result, log_file, indent=4) + except IOError as e: + self._display.warning(f"Failed to write to log file {self.log_file_path}: {e}") + + def v2_runner_item_on_ok(self, result): + # Records the result of a successfully executed task item. + self._record_task_result(result) + + def v2_runner_item_on_failed(self, result): + # Records the result of a failed task item. + self._record_task_result(result) + + def v2_runner_item_on_skipped(self, result): + # Do not record the result of a skipped task item. + pass + + def v2_runner_on_ok(self, result): + # Records the result of a successfully executed task. + self._record_task_result(result) + + def v2_runner_on_failed(self, result, ignore_errors=False): + # Records the result of a failed task. + self._record_task_result(result) + + def v2_runner_on_unreachable(self, result): + # Records the result of a task that failed because the host was unreachable. + self._record_task_result(result) + + def v2_playbook_on_stats(self, stats): + # Closes the JSON array in the log file when the playbook execution is complete. + if not self.log_file_path: + return + + summary = { + 'time': datetime.now().isoformat(), + 'summary': {}, + 'status': 'success' + } + + for host in stats.processed.keys(): + host_summary = stats.summarize(host) + summary['summary'][host] = host_summary + if host_summary['failures'] > 0 or host_summary['unreachable'] > 0: + summary['status'] = 'failed' + + try: + with open(self.log_file_path, 'a') as log_file: + log_file.write(',\n') + json.dump(summary, log_file, indent=4) + log_file.write('\n]\n') + except IOError as e: + self._display.warning(f"Failed to write to log file {self.log_file_path}: {e}") diff --git a/automation/remove_cluster.yml b/automation/remove_cluster.yml new file mode 100644 index 000000000..8dee6a740 --- /dev/null +++ b/automation/remove_cluster.yml @@ -0,0 +1,78 @@ +--- +- name: remove_cluster.yml | Remove PostgreSQL HA Cluster + hosts: postgres_cluster + become: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tasks: + - block: + - name: Stop Patroni service + ansible.builtin.service: + name: patroni + state: stopped + enabled: false + - name: Delete PostgreSQL database content + ansible.builtin.file: + path: "{{ postgresql_data_dir }}" + state: absent + when: remove_postgres | default(false) | bool + ignore_errors: true + - block: + - name: Delete PgBackRest repository + ansible.builtin.file: + # path: pgbackrest_conf global repo1-path + path: /var/lib/pgbackrest + state: absent + - name: Delete PgBackRest cron + ansible.builtin.file: + path: /etc/cron.d/pgbackrest + state: absent + when: pgbackrest_install | default(false) | bool + ignore_errors: true + +- name: remove_cluster.yml | Consul Cluster Play + hosts: consul_instances + become: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tasks: + - block: + - name: Stop Consul service + ansible.builtin.service: + name: consul + state: stopped + enabled: false + - name: Delete Consul content + ansible.builtin.file: + path: "{{ consul_data_path }}" + state: absent + - name: Delete Consul serivce + ansible.builtin.file: + path: /lib/systemd/system/consul.service + state: absent + when: remove_consul | default(false) | bool + ignore_errors: true + +- name: remove_cluster.yml | Etcd Cluster Play + hosts: etcd_cluster + become: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tasks: + - block: + - name: Stop Etcd service + ansible.builtin.service: + name: etcd + state: stopped + enabled: false + - name: Delete Etcd content + ansible.builtin.file: + path: "{{ etcd_data_dir }}/member" + state: absent + when: remove_etcd | default(false) | bool + ignore_errors: true diff --git a/automation/requirements.txt b/automation/requirements.txt new file mode 100644 index 000000000..d4cef3836 --- /dev/null +++ b/automation/requirements.txt @@ -0,0 +1,5 @@ +ansible==9.13.0 +boto3==1.37.18 +dopy==0.3.7 +google-auth==2.38.0 +hcloud==2.4.0 diff --git a/automation/requirements.yml b/automation/requirements.yml new file mode 100644 index 000000000..7553c0f4e --- /dev/null +++ b/automation/requirements.yml @@ -0,0 +1,22 @@ +--- +collections: + - name: amazon.aws + version: ">=9.3.0" + - name: google.cloud + version: ">=1.5.1" + - name: azure.azcollection + version: ">=3.3.1" + - name: community.digitalocean + version: ">=1.27.0" + - name: hetzner.hcloud + version: ">=4.3.0" + - name: community.postgresql + version: ">=3.12.0" + - name: community.docker + version: ">=4.4.0" + - name: community.general + version: ">=10.4.0" + - name: ansible.posix + version: ">=1.6.2" + - name: ansible.utils + version: ">=5.1.2" diff --git a/automation/roles/add_repository/README.md b/automation/roles/add_repository/README.md new file mode 100644 index 000000000..21a88513b --- /dev/null +++ b/automation/roles/add_repository/README.md @@ -0,0 +1 @@ +# Ansible Role: add_repository diff --git a/automation/roles/add_repository/tasks/extensions.yml b/automation/roles/add_repository/tasks/extensions.yml new file mode 100644 index 000000000..f1edc3b47 --- /dev/null +++ b/automation/roles/add_repository/tasks/extensions.yml @@ -0,0 +1,61 @@ +--- +# Extension Auto-Setup: repository + +# TimescaleDB (if 'enable_timescale' is 'true') +- block: + # Debian based + - name: Add TimescaleDB repository + ansible.builtin.deb822_repository: + name: "timescaledb" + types: [deb] + uris: "https://packagecloud.io/timescale/timescaledb/%7B%7B%20ansible_distribution%20|%20lower%20%7D%7D" # amd64 and arm64 are supported + signed_by: "https://packagecloud.io/timescale/timescaledb/gpgkey" + suites: "{{ ansible_distribution_release }}" + components: [main] + state: present + enabled: true + when: ansible_os_family == "Debian" + + # RedHat based + - name: Add TimescaleDB repository + ansible.builtin.yum_repository: + name: "timescaledb" + description: "TimescaleDB Repository" + baseurl: "https://packagecloud.io/timescale/timescaledb/el/%7B%7B%20ansible_distribution_major_version%20%7D%7D/x86_64" # no arm64 support + gpgkey: "https://packagecloud.io/timescale/timescaledb/gpgkey" + gpgcheck: "no" + when: ansible_os_family == "RedHat" and ansible_architecture in ["x86_64", "amd64"] + environment: "{{ proxy_env | default({}) }}" + when: (enable_timescale | default(false) | bool) or (enable_timescaledb | default(false) | bool) + tags: add_repo, timescaledb, timescale + +# Citus (if 'enable_citus' is 'true') +- block: + # Debian based + - name: Add Citus repository + ansible.builtin.deb822_repository: + name: "citusdata" + types: [deb] + uris: "https://repos.citusdata.com/community/%7B%7B%20ansible_distribution%20|%20lower%20%7D%7D/" # no arm64 support (TODO) + suites: "{{ ansible_distribution_release }}" + components: [main] + signed_by: "https://repos.citusdata.com/community/gpgkey" + state: present + enabled: true + when: ansible_os_family == "Debian" and ansible_architecture in ["x86_64", "amd64"] + + # RedHat based + # TODO: Tests have shown that distributions such as Rocky Linux, AlmaLinux, Oracle Linux, and CentOS Stream are not yet supported. + # - name: Add Citus repository + # ansible.builtin.yum_repository: + # name: "citusdata" + # description: "Citus Repository" + # baseurl: "https://repos.citusdata.com/community/yum/%7B%7B%20ansible_distribution_major_version%20%7D%7D/x86_64" + # gpgkey: "https://repos.citusdata.com/community/gpgkey" + # gpgcheck: "no" + # when: ansible_os_family == "RedHat" + environment: "{{ proxy_env | default({}) }}" + when: + - enable_citus | default(false) | bool + - postgresql_version | int >= 11 + tags: add_repo, citus diff --git a/automation/roles/add_repository/tasks/main.yml b/automation/roles/add_repository/tasks/main.yml new file mode 100644 index 000000000..ade841fc2 --- /dev/null +++ b/automation/roles/add_repository/tasks/main.yml @@ -0,0 +1,167 @@ +--- +- block: # Debian/Ubuntu + - name: Make sure the python3-debian package is present + ansible.builtin.apt: + pkg: + - python3-debian + state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + + - name: Add repository + ansible.builtin.deb822_repository: + name: "{{ item.name | default(item.repo.split('//')[1].split('/')[0] | replace('.', '-')) }}" + types: "{{ item.types | default(['deb']) }}" + uris: "{{ item.uris | default(item.repo.split(' ')[1]) }}" + signed_by: "{{ item.signed_by | default(item.key | default(omit)) }}" + suites: "{{ item.suites | default(item.repo.split(' ')[2]) }}" + components: "{{ item.components | default(item.repo.split(' ')[3]) }}" + enabled: "{{ item.enabled | default(true) }}" + state: present + loop: "{{ apt_repository }}" + when: apt_repository | length > 0 + + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: installation_method == "repo" and ansible_os_family == "Debian" + tags: add_repo + +- block: # RedHat/CentOS + - name: Add repository GPG key + ansible.builtin.command: "rpm --import https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux-{{ ansible_distribution_major_version }}" + when: ansible_distribution == "AlmaLinux" + + - name: Add repository + ansible.builtin.yum_repository: + name: "{{ item.name }}" + description: "{{ item.description }}" + baseurl: "{{ item.baseurl }}" + gpgkey: "{{ item.gpgkey | default(omit) }}" + gpgcheck: "{{ item.gpgcheck | default(true) }}" + enabled: "{{ item.enabled | default(true) }}" + loop: "{{ yum_repository | flatten(1) }}" + when: yum_repository | length > 0 + + # Install Epel Repository + - name: Get epel-release-latest rpm package + ansible.builtin.get_url: + url: "https://dl.fedoraproject.org/pub/epel/epel-release-latest-%7B%7B%20ansible_distribution_major_version%20%7D%7D.noarch.rpm" + dest: /tmp/ + timeout: 30 + validate_certs: false + when: install_epel_repo|bool + tags: install_epel_repo + + - name: Install EPEL repository + ansible.builtin.package: + name: "/tmp/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm" + state: present + disable_gpg_check: true + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: install_epel_repo|bool + tags: install_epel_repo + + # Add repository to install dependencies for postgresql-devel package + - block: + # PowerTools repository + - name: Enable PowerTools repository + ansible.builtin.command: dnf config-manager --set-enabled "[Pp]ower[Tt]ools" + when: + - ansible_distribution_major_version is version("8", "==") + - ansible_distribution != "OracleLinux" + - ansible_distribution != "RedHat" + + # CodeReady Linux Builder (crb) repository + - name: Enable CodeReady Linux Builder (crb) repository + ansible.builtin.command: dnf config-manager --set-enabled crb + when: + - ansible_distribution_major_version is version("9", ">=") + - ansible_distribution != "OracleLinux" + + # CodeReady Builder repository for OracleLinux + - name: Enable CodeReady Builder repository + ansible.builtin.command: dnf config-manager --enable ol{{ ansible_distribution_major_version }}_codeready_builder + when: + - ansible_distribution == "OracleLinux" + - ansible_distribution_major_version is version("8", ">=") + vars: + pg_devel_package: "postgresql{{ postgresql_version | string | replace('.', '') }}-devel" + when: + - pg_devel_package in postgresql_packages + + # Install PostgreSQL Repository + - name: Get pgdg-redhat-repo-latest.noarch.rpm + ansible.builtin.get_url: # yamllint disable rule:line-length + url: "https://download.postgresql.org/pub/repos/yum/reporpms/EL-%7B%7B%20ansible_distribution_major_version%20%7D%7D-%7B%7B%20pgdg_architecture_map[ansible_architecture]%20%7D%7D/pgdg-redhat-repo-latest.noarch.rpm" + dest: /tmp/ + timeout: 30 + validate_certs: false + when: install_postgresql_repo|bool + tags: install_postgresql_repo + + - name: Install PostgreSQL repository + ansible.builtin.package: + name: /tmp/pgdg-redhat-repo-latest.noarch.rpm + state: present + disable_gpg_check: true + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: install_postgresql_repo|bool + tags: install_postgresql_repo + + # Enable Debuginfo repository + - block: + - name: Enable PostgreSQL debuginfo repository + ansible.builtin.shell: | + set -o pipefail; + sed -i '/\[pgdg[0-9]*-debuginfo\]/,/^$/ s/enabled=0/enabled=1/' {{ pgdg_redhat_repo_path }} + sed -i '/\[pgdg[0-9]*-debuginfo\]/,/^$/ s/gpgcheck=1/gpgcheck=0/' {{ pgdg_redhat_repo_path }} + vars: + pgdg_redhat_repo_path: "/etc/yum.repos.d/pgdg-redhat-all.repo" + when: ansible_architecture in ["x86_64", "amd64"] + + # Check if the repository entry exists in the file + - name: Check if pgdg{{ postgresql_version }}-debuginfo exists in repo file + ansible.builtin.lineinfile: + path: "{{ pgdg_redhat_repo_path }}" + regexp: '^\[pgdg{{ postgresql_version }}-debuginfo\]' + state: absent + check_mode: true + changed_when: false + register: repo_check + vars: + pgdg_redhat_repo_path: "/etc/yum.repos.d/pgdg-redhat-all.repo" + + # If the entry does not exist, try to add the repository + - name: Add pgdg{{ postgresql_version }}-debuginfo repo if not present + ansible.builtin.yum_repository: # yamllint disable rule:line-length + name: "pgdg{{ postgresql_version }}-debuginfo" + description: "PostgreSQL {{ postgresql_version }} for RHEL {{ ansible_distribution_major_version }} - {{ pgdg_architecture_map[ansible_architecture] }} - Debuginfo" + baseurl: "https://download.postgresql.org/pub/repos/yum/debug/%7B%7B%20postgresql_version%20%7D%7D/redhat/rhel-%7B%7B%20ansible_distribution_major_version%20%7D%7D-%7B%7B%20pgdg_architecture_map[ansible_architecture]%20%7D%7D/" + gpgcheck: false + enabled: true + when: repo_check.found == 0 + when: debuginfo_package in postgresql_packages + vars: + debuginfo_package: "postgresql{{ postgresql_version }}-debuginfo" + tags: install_postgresql_repo, debuginfo + environment: "{{ proxy_env | default({}) }}" + when: installation_method == "repo" and ansible_os_family == "RedHat" + tags: add_repo + +- name: Extensions repository + ansible.builtin.import_tasks: extensions.yml + when: installation_method == "repo" diff --git a/automation/roles/authorized_keys/README.md b/automation/roles/authorized_keys/README.md new file mode 100644 index 000000000..ef8a23034 --- /dev/null +++ b/automation/roles/authorized_keys/README.md @@ -0,0 +1 @@ +# Ansible Role: authorized_keys diff --git a/automation/roles/authorized_keys/defaults/main.yml b/automation/roles/authorized_keys/defaults/main.yml new file mode 100644 index 000000000..d8db2bdfe --- /dev/null +++ b/automation/roles/authorized_keys/defaults/main.yml @@ -0,0 +1,2 @@ +--- +ssh_public_keys: [] diff --git a/automation/roles/authorized_keys/tasks/main.yml b/automation/roles/authorized_keys/tasks/main.yml new file mode 100644 index 000000000..80d5b4a63 --- /dev/null +++ b/automation/roles/authorized_keys/tasks/main.yml @@ -0,0 +1,28 @@ +--- +- block: + - name: Get system username + become: false + ansible.builtin.command: whoami + register: system_user + changed_when: false + + - name: "Add public keys to ~{{ system_user.stdout }}/.ssh/authorized_keys" + ansible.posix.authorized_key: + user: "{{ system_user.stdout }}" + key: "{{ item }}" + state: present + loop: '{{ ssh_public_keys_list | map(''replace'', ''"'', '''') | map(''replace'', "''", "") | list }}' + vars: + ssh_public_keys_list: >- + {{ + (ssh_public_keys + | replace('\n', ',') + | split(',') + | map('trim') + | list) + if ssh_public_keys is string else ssh_public_keys + }} + when: + - ssh_public_keys is defined + - ssh_public_keys | length > 0 + tags: ssh_public_keys diff --git a/automation/roles/cloud_resources/README.md b/automation/roles/cloud_resources/README.md new file mode 100644 index 000000000..d808204d6 --- /dev/null +++ b/automation/roles/cloud_resources/README.md @@ -0,0 +1 @@ +# Ansible Role: cloud_resources diff --git a/automation/roles/cloud_resources/defaults/main.yml b/automation/roles/cloud_resources/defaults/main.yml new file mode 100644 index 000000000..9cf5ad95f --- /dev/null +++ b/automation/roles/cloud_resources/defaults/main.yml @@ -0,0 +1,74 @@ +# yamllint disable rule:line-length +--- +cloud_provider: "{{ provision | default('') }}" # Specifies the Cloud provider for server creation. Available options: 'aws', 'gcp', 'azure', 'digitalocean', 'hetzner'. +state: present # Set to 'present' to create a server, 'absent' to delete. + +server_count: "{{ servers_count | default(3) }}" # Number of servers in the cluster. +server_name: "{{ patroni_cluster_name }}-pgnode" # (optional) If not provided, a name will be auto-generated. Servers will be automatically named with suffixes 01, 02, 03, etc. +server_type: "" # (required) Server type. +server_image: "" # (required) OS image for the server. For Azure, use variables 'azure_vm_image_offer', 'azure_vm_image_publisher', 'azure_vm_image_sku', 'azure_vm_image_version' instead of variable 'server_image' +server_location: "" # (required) Server location or region. +server_network: "" # (optional) If provided, the server will be added to this network (needs to be created beforehand). +server_spot: false # Spot instance. Applicable for AWS, GCP, Azure. + +volume_type: "" # Volume type. Defaults: 'gp3' for AWS, 'pd-ssd' for GCP, 'StandardSSD_LRS' for Azure. +volume_size: 100 # Storage size for the data directory (in gigabytes). +system_volume_type: "" # System volume type. Defaults: 'gp3' for AWS, 'pd-ssd' for GCP, 'StandardSSD_LRS' for Azure. +system_volume_size: 100 # System volume size (in gigabytes). Applicable for AWS, GCP, Azure. + +ssh_key_name: "" # Name of the SSH key to be added to the server. +# Note: If not provided, all cloud available SSH keys will be added (applicable to DigitalOcean, Hetzner). +ssh_key_content: "" # (optional) If provided, the public key content will be added to the cloud (directly to the server for GCP). + +# Firewall / Security Group +cloud_firewall: true # Specify 'false' if you don't want to configure Firewall rules, or want to manage them yourself. + +ssh_public_access: true # Allow public ssh access (required for deployment from the public network). +ssh_public_allowed_ips: "" # (comma-separated list of IP addresses in CIDR format) If empty, then public access is allowed for any IP address. +netdata_public_access: true # Allow access to the Netdata monitoring from the public network (if 'netdata_install' is 'true'). +netdata_public_allowed_ips: "" # (comma-separated list of IP addresses in CIDR format) If empty, then public access is allowed for any IP address. +database_public_access: false # Allow access to the database from the public network. +database_public_allowed_ips: "" # (comma-separated list of IP addresses in CIDR format) If empty, then public access is allowed for any IP address. + +# Load balancer +cloud_load_balancer: true # Create a Load Balancer in the Cloud. + +# Backups (if 'pgbackrest_install' or 'wal_g_install' is 'true') +aws_s3_bucket_create: true # if 'cloud_provider=aws' +aws_s3_bucket_name: "{{ patroni_cluster_name }}-backup" # Name of the S3 bucket. Bucket naming rules: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html +aws_s3_bucket_region: "{{ server_location }}" # The AWS region to use. +aws_s3_bucket_object_lock_enabled: false # Whether S3 Object Lock to be enabled. +aws_s3_bucket_encryption: "AES256" # Describes the default server-side encryption to apply to new objects in the bucket. Choices: "AES256", "aws:kms" +aws_s3_bucket_block_public_acls: true # Sets BlockPublicAcls value. +aws_s3_bucket_ignore_public_acls: true # Sets IgnorePublicAcls value. +aws_s3_bucket_absent: false # Allow to delete S3 bucket when deleting a cluster servers using the 'state=absent' variable. + +gcp_bucket_create: true # if 'cloud_provider=gcp' +gcp_bucket_name: "{{ patroni_cluster_name }}-backup" # Name of the GCS bucket. +gcp_bucket_storage_class: "MULTI_REGIONAL" # The bucket’s default storage class. Values include: MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, DURABLE_REDUCED_AVAILABILITY. +gcp_bucket_default_object_acl: "projectPrivate" # Apply a predefined set of default object access controls to this bucket. +gcp_bucket_absent: false # Allow to delete GCS bucket when deleting a cluster servers using the 'state=absent' variable. + +azure_blob_storage_create: true # if 'cloud_provider=azure' +azure_blob_storage_name: "{{ patroni_cluster_name }}-backup" # Name of a blob container within the storage account. +azure_blob_storage_blob_type: "block" # Type of blob object. Values include: block, page. +azure_blob_storage_account_name: "{{ patroni_cluster_name | lower | replace('-', '') | truncate(24, true, '') }}" # Storage account name must be between 3 and 24 characters in length and use numbers and lower-case letters only. +azure_blob_storage_account_type: "Standard_RAGRS" # Type of storage account. Values include: Standard_LRS, Standard_GRS, Standard_RAGRS, Standard_ZRS, Standard_RAGZRS, Standard_GZRS, Premium_LRS, Premium_ZRS. +azure_blob_storage_account_kind: "BlobStorage" # The kind of storage. Values include: Storage, StorageV2, BlobStorage, BlockBlobStorage, FileStorage. +azure_blob_storage_account_access_tier: "Hot" # The access tier for this storage account. Required when kind=BlobStorage. +azure_blob_storage_account_public_network_access: "Enabled" # Allow public network access to Storage Account to create Blob Storage container. +azure_blob_storage_account_allow_blob_public_access: false # Disallow public anonymous access. +azure_blob_storage_absent: false # Allow to delete Azure Blob Storage when deleting a cluster servers using the 'state=absent' variable. + +digital_ocean_spaces_create: true # if 'cloud_provider=digitalocean' +digital_ocean_spaces_name: "{{ patroni_cluster_name }}-backup" # Name of the Spaces Object Storage (S3 bucket). +digital_ocean_spaces_region: "nyc3" # The region to create the Space in. +digital_ocean_spaces_absent: false # Allow to delete Spaces Object Storage when deleting a cluster servers using the 'state=absent' variable. + +hetzner_object_storage_create: true # if 'cloud_provider=hetzner' +hetzner_object_storage_name: "{{ patroni_cluster_name }}-backup" # Name of the Object Storage (S3 bucket). +hetzner_object_storage_region: "{{ server_location }}" # The region where the Object Storage (S3 bucket) will be created. +hetzner_object_storage_endpoint: "https://{{ hetzner_object_storage_region }}.your-objectstorage.com" +hetzner_object_storage_access_key: "" # (required) Object Storage ACCESS KEY +hetzner_object_storage_secret_key: "" # (required) Object Storage SECRET KEY +hetzner_object_storage_absent: false # Allow to delete Object Storage when deleting a cluster servers using the 'state=absent' variable. diff --git a/automation/roles/cloud_resources/tasks/aws.yml b/automation/roles/cloud_resources/tasks/aws.yml new file mode 100644 index 000000000..2ed5326a0 --- /dev/null +++ b/automation/roles/cloud_resources/tasks/aws.yml @@ -0,0 +1,543 @@ +--- +# Dependencies +- name: Install Python dependencies + block: + - name: Ensure that 'python3-pip' package is present on controlling host + ansible.builtin.command: which pip3 + register: pip3_check + failed_when: false + changed_when: false + + - name: Clean dnf cache + ansible.builtin.command: dnf clean all + when: + - pip3_check.rc != 0 + - ansible_os_family == "RedHat" + + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: + - pip3_check.rc != 0 + - ansible_os_family == "Debian" + + - name: Install 'python3-pip' package on controlling host + ansible.builtin.package: + name: python3-pip + state: present + register: package_status + until: package_status is success + delay: 10 + retries: 3 + when: + - pip3_check.rc != 0 + - ansible_distribution != "MacOSX" + + - name: Ensure that 'boto3' dependency is present on controlling host + ansible.builtin.pip: + name: boto3 + executable: pip3 + extra_args: --user + become: false + vars: + ansible_become: false + environment: + PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" + delegate_to: 127.0.0.1 + run_once: true + +# SSH key +- block: + # Delete the temporary ssh key from the cloud (if exists) + - name: "AWS: Remove temporary SSH key '{{ ssh_key_name }}' from cloud (if any)" + amazon.aws.ec2_key: + access_key: "{{ lookup('ansible.builtin.env', 'AWS_ACCESS_KEY_ID') }}" + secret_key: "{{ lookup('ansible.builtin.env', 'AWS_SECRET_ACCESS_KEY') }}" + name: "{{ ssh_key_name }}" + region: "{{ server_location }}" + state: absent + when: + - ssh_key_name is defined + - tmp_ssh_key_name is defined + - ssh_key_name == tmp_ssh_key_name + + # if ssh_key_name and ssh_key_content is specified, add this ssh key to the cloud + - name: "AWS: Add SSH key '{{ ssh_key_name }}' to cloud" + amazon.aws.ec2_key: + access_key: "{{ lookup('ansible.builtin.env', 'AWS_ACCESS_KEY_ID') }}" + secret_key: "{{ lookup('ansible.builtin.env', 'AWS_SECRET_ACCESS_KEY') }}" + name: "{{ ssh_key_name }}" + key_material: "{{ ssh_key_content }}" + region: "{{ server_location }}" + state: present + register: ssh_key_result + when: + - ssh_key_name | length > 0 + - ssh_key_content | length > 0 + when: state == 'present' + +# Create (if state is present) +- block: + # if server_network is specified, get vpc id for this subnet + - block: + - name: "AWS: Gather information about VPC for '{{ server_network }}'" + amazon.aws.ec2_vpc_subnet_info: + region: "{{ server_location }}" + subnet_ids: "{{ server_network }}" + register: custom_vpc_subnet_info + + - name: "Set variable: vpc_id" + ansible.builtin.set_fact: + vpc_id: "{{ custom_vpc_subnet_info.subnets[0].vpc_id }}" + when: server_network | length > 0 + + # if server_network is not specified, use default vpc subnet + - block: + - name: "AWS: Gather information about default VPC" + amazon.aws.ec2_vpc_net_info: + region: "{{ server_location }}" + filters: + "is-default": true + register: default_vpc_info + + - name: "AWS: Gather information about VPC subnet for default VPC" + amazon.aws.ec2_vpc_subnet_info: + region: "{{ server_location }}" + filters: + vpc-id: "{{ default_vpc_info.vpcs[0].id }}" + register: default_vpc_subnet_info + + - name: "Set variable: vpc_id" + ansible.builtin.set_fact: + vpc_id: "{{ default_vpc_info.vpcs[0].id }}" + + - name: "Set variable: server_network" + ansible.builtin.set_fact: + server_network: "{{ default_vpc_subnet_info.subnets[0].id }}" + when: server_network | length < 1 + + # Security Group (Firewall) + - name: "AWS: Create or modify Security Group" + amazon.aws.ec2_security_group: + name: "{{ patroni_cluster_name }}-security-group" + state: present + description: "Security Group for Postgres cluster" + vpc_id: "{{ vpc_id }}" + region: "{{ server_location }}" + rules: "{{ rules }}" + vars: + vpc_subnet_info: "{{ custom_vpc_subnet_info if custom_vpc_subnet_info.subnets | default('') | length > 0 else default_vpc_subnet_info }}" + rules: >- + {{ + ([ + { + 'rule_desc': 'SSH public access', + 'proto': 'tcp', + 'ports': [ansible_ssh_port | default(22)], + 'cidr_ip': ssh_public_allowed_ips | default('0.0.0.0/0', true) | split(',') + } + ] if ssh_public_access | bool else []) + + ([ + { + 'rule_desc': 'Netdata public access', + 'proto': 'tcp', + 'ports': [netdata_port | default('19999')], + 'cidr_ip': netdata_public_allowed_ips | default('0.0.0.0/0', true) | split(',') + } + ] if netdata_install | bool and netdata_public_access | bool else []) + + ([ + { + 'rule_desc': 'Database public access', + 'proto': 'tcp', + 'ports': + ([ + haproxy_listen_port.master | default('5000'), + haproxy_listen_port.replicas | default('5001'), + haproxy_listen_port.replicas_sync | default('5002'), + haproxy_listen_port.replicas_async | default('5003') + ] if with_haproxy_load_balancing | bool else []) + + ([ + pgbouncer_listen_port | default('6432') + ] if not with_haproxy_load_balancing | bool and pgbouncer_install | bool else []) + + ([ + postgresql_port | default('5432') + ] if not with_haproxy_load_balancing | bool and not pgbouncer_install | bool else []), + 'cidr_ip': database_public_allowed_ips | default('0.0.0.0/0', true) | split(',') + } + ] if database_public_access | bool else []) + + ([{ + 'rule_desc': 'Netdata internal access', + 'proto': 'tcp', + 'ports': [netdata_port | default('19999')], + 'cidr_ip': vpc_subnet_info.subnets[0].cidr_block }] if netdata_install | bool else []) + + ([{ + 'rule_desc': 'PgBouncer internal access', + 'proto': 'tcp', + 'ports': [pgbouncer_listen_port | default('6432')], + 'cidr_ip': vpc_subnet_info.subnets[0].cidr_block }] if pgbouncer_install | bool else []) + + ([{ + 'rule_desc': 'PostgreSQL internal access', + 'proto': 'tcp', + 'ports': [postgresql_port | default('5432')], + 'cidr_ip': vpc_subnet_info.subnets[0].cidr_block }]) + + ([{ + 'rule_desc': 'Patroni REST API internal access', + 'proto': 'tcp', + 'ports': [patroni_restapi_port | default('8008')], 'cidr_ip': vpc_subnet_info.subnets[0].cidr_block }]) + + ([{ + 'rule_desc': 'HAProxy internal access', + 'proto': 'tcp', + 'ports': [ + haproxy_listen_port.master | default('5000'), + haproxy_listen_port.replicas | default('5001'), + haproxy_listen_port.replicas_sync | default('5002'), + haproxy_listen_port.replicas_async | default('5003'), + haproxy_listen_port.stats | default('7000') + ], + 'cidr_ip': vpc_subnet_info.subnets[0].cidr_block + }] if with_haproxy_load_balancing | bool else []) + + ([{ + 'rule_desc': 'etcd internal access', + 'proto': 'tcp', + 'ports': [ + etcd_client_port | default('2379'), + etcd_peer_port | default('2380') + ], + 'cidr_ip': vpc_subnet_info.subnets[0].cidr_block + }] if dcs_type == 'etcd' else []) + + ([{ + 'rule_desc': 'Consul internal access', + 'proto': 'tcp', + 'ports': [ + consul_ports_dns | default('8600'), + consul_ports_http | default('8500'), + consul_ports_rpc | default('8400'), + consul_ports_serf_lan | default('8301'), + consul_ports_serf_wan | default('8302'), + consul_ports_server | default('8300') + ], + 'cidr_ip': vpc_subnet_info.subnets[0].cidr_block + }] if dcs_type == 'consul' else []) + + [{ + 'rule_desc': 'SSH internal access', + 'proto': 'tcp', + 'ports': [ansible_ssh_port | default(22)], + 'cidr_ip': vpc_subnet_info.subnets[0].cidr_block + }] + }} + register: ec2_security_group_result + when: cloud_firewall | bool + + # Server and volume + - name: "AWS: Create or modify EC2 instance" + amazon.aws.ec2_instance: + access_key: "{{ lookup('ansible.builtin.env', 'AWS_ACCESS_KEY_ID') }}" + secret_key: "{{ lookup('ansible.builtin.env', 'AWS_SECRET_ACCESS_KEY') }}" + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + state: present + instance_type: "{{ server_type }}" + image_id: "{{ server_image }}" + key_name: "{{ ssh_key_name }}" + region: "{{ server_location }}" + security_groups: "{{ ([] if not cloud_firewall | bool else [patroni_cluster_name + '-security-group']) }}" + vpc_subnet_id: "{{ server_network }}" + network_interfaces: + - assign_public_ip: true + delete_on_termination: true + volumes: + - device_name: /dev/sda1 + ebs: + volume_type: "{{ system_volume_type | default('gp3', true) }}" + volume_size: "{{ system_volume_size | default(80) | int }}" + delete_on_termination: true + - device_name: /dev/sdb + ebs: + volume_type: "{{ volume_type | default('gp3', true) }}" + volume_size: "{{ volume_size | int }}" + delete_on_termination: true + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + register: server_result + until: + - server_result.instances[0].public_ip_address is defined + - server_result.instances[0].public_ip_address | length > 0 + retries: 3 + delay: 10 + when: not server_spot | default(aws_ec2_spot_instance | default(false)) | bool + + # Spot instance (if 'server_spot' is 'true') + - block: + - name: "AWS: Gather information about EC2 Spot instances" + amazon.aws.ec2_instance_info: + access_key: "{{ lookup('ansible.builtin.env', 'AWS_ACCESS_KEY_ID') }}" + secret_key: "{{ lookup('ansible.builtin.env', 'AWS_SECRET_ACCESS_KEY') }}" + region: "{{ server_location }}" + filters: + instance-lifecycle: "spot" + instance-type: "{{ server_type }}" + image-id: "{{ server_image }}" + instance-state-name: ["pending", "running", "shutting-down", "stopping", "stopped"] + "tag:Name": "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + register: ec2_spot_instance_info + + # if spot instances are still created, create them + - name: "AWS: Create a request for EC2 Spot instance" + amazon.aws.ec2_spot_instance: + access_key: "{{ lookup('ansible.builtin.env', 'AWS_ACCESS_KEY_ID') }}" + secret_key: "{{ lookup('ansible.builtin.env', 'AWS_SECRET_ACCESS_KEY') }}" + region: "{{ server_location }}" + state: present + launch_specification: + instance_type: "{{ server_type }}" + image_id: "{{ server_image }}" + key_name: "{{ ssh_key_name }}" + network_interfaces: + - subnet_id: "{{ server_network }}" + groups: "{{ ec2_security_group_result.group_id }}" + associate_public_ip_address: true + delete_on_termination: true + device_index: 0 + block_device_mappings: + - device_name: /dev/sda1 + ebs: + volume_type: "{{ volume_type | default('gp3', true) }}" + volume_size: 100 # TODO: use 'system_volume_size' variable (https://github.com/ansible-collections/amazon.aws/issues/1949) + delete_on_termination: true + - device_name: /dev/sdb + ebs: + volume_type: "{{ volume_type | default('gp3', true) }}" + volume_size: 100 # TODO: use 'volume_size' variable (https://github.com/ansible-collections/amazon.aws/issues/1949) + delete_on_termination: true + tags: + Name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + loop: "{{ ec2_spot_instance_info.results }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + register: ec2_spot_request_result + when: item.instances[0] | default('') | length < 1 + + - name: "AWS: Rename the EC2 Spot instance" + amazon.aws.ec2_instance: + access_key: "{{ lookup('ansible.builtin.env', 'AWS_ACCESS_KEY_ID') }}" + secret_key: "{{ lookup('ansible.builtin.env', 'AWS_SECRET_ACCESS_KEY') }}" + region: "{{ server_location }}" + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + filters: + spot-instance-request-id: "{{ item.spot_request.spot_instance_request_id }}" + loop: "{{ ec2_spot_request_result.results }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + register: ec2_spot_instance_result + until: + - ec2_spot_instance_result.instances[0].public_ip_address is defined + - ec2_spot_instance_result.instances[0].public_ip_address | length > 0 + retries: 3 + delay: 10 + when: item.spot_request.spot_instance_request_id is defined + + # if spot instances are created now + - name: "Set variable: server_result" + ansible.builtin.set_fact: + server_result: "{{ ec2_spot_instance_result }}" + when: ec2_spot_instance_result.changed | default(false) + + # if spot instances have already been created + - name: "Set variable: server_result" + ansible.builtin.set_fact: + server_result: "{{ ec2_spot_instance_info }}" + when: not ec2_spot_instance_result.changed | default(false) + when: server_spot | default(aws_ec2_spot_instance | default(false)) | bool + + # Load Balancer (ELB) + - name: "AWS: Create or modify Elastic Load Balancer (ELB)" + amazon.aws.elb_classic_lb: + access_key: "{{ lookup('ansible.builtin.env', 'AWS_ACCESS_KEY_ID') }}" + secret_key: "{{ lookup('ansible.builtin.env', 'AWS_SECRET_ACCESS_KEY') }}" + name: "{{ patroni_cluster_name }}-{{ item }}" + region: "{{ server_location }}" + security_group_ids: + - "{{ ec2_security_group_result.group_id }}" + subnets: + - "{{ server_network }}" + instance_ids: "{{ server_result.results | map(attribute='instances') | map('first') | map(attribute='instance_id') }}" + purge_instance_ids: true + listeners: + - protocol: tcp + load_balancer_port: "{{ pgbouncer_listen_port | default('6432') if pgbouncer_install | bool else postgresql_port | default('5432') }}" + instance_port: "{{ pgbouncer_listen_port | default('6432') if pgbouncer_install | bool else postgresql_port | default('5432') }}" + health_check: + ping_protocol: "http" + ping_port: "{{ patroni_restapi_port }}" + ping_path: "/{{ item }}" + interval: 5 + timeout: 2 + unhealthy_threshold: 2 + healthy_threshold: 3 + idle_timeout: 600 + scheme: "{{ 'internet-facing' if database_public_access | bool else 'internal' }}" + state: present + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}" + register: aws_elb_classic_lb + when: cloud_load_balancer | bool and + (item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) + + # S3 bucket (Backups) + - name: "AWS: Create S3 bucket '{{ aws_s3_bucket_name }}'" + amazon.aws.s3_bucket: + access_key: "{{ lookup('ansible.builtin.env', 'AWS_ACCESS_KEY_ID') }}" + secret_key: "{{ lookup('ansible.builtin.env', 'AWS_SECRET_ACCESS_KEY') }}" + name: "{{ aws_s3_bucket_name }}" + region: "{{ aws_s3_bucket_region }}" + object_lock_enabled: "{{ aws_s3_bucket_object_lock_enabled }}" + encryption: "{{ aws_s3_bucket_encryption }}" + public_access: + block_public_acls: "{{ aws_s3_bucket_block_public_acls }}" + ignore_public_acls: "{{ aws_s3_bucket_ignore_public_acls }}" + state: present + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - aws_s3_bucket_create | bool + when: state == 'present' + +- name: Wait for host to be available via SSH + ansible.builtin.wait_for: + host: "{{ item.instances[0].public_ip_address }}" + port: 22 + delay: 5 + timeout: 300 + loop: "{{ server_result.results }}" + loop_control: + label: "{{ item.instances[0].public_ip_address | default('N/A') }}" + when: + - server_result.results is defined + - item.instances is defined + +# Info +- name: Server info + ansible.builtin.debug: + msg: + id: "{{ item.instances[0].instance_id }}" + name: "{{ item.instances[0].tags.Name }}" + image: "{{ item.instances[0].image_id }}" + type: "{{ item.instances[0].instance_type }}" + volume_size: "{{ volume_size }} GB" + public_ip: "{{ item.instances[0].public_ip_address }}" + private_ip: "{{ item.instances[0].private_ip_address }}" + loop: "{{ server_result.results }}" + loop_control: + label: "{{ item.instances[0].public_ip_address | default('N/A') }}" + when: + - server_result.results is defined + - item.instances is defined + +# Inventory +- block: + - name: "Inventory | Initialize ip_addresses variable" + ansible.builtin.set_fact: + ip_addresses: [] + + - name: "Inventory | Extract IP addresses" + ansible.builtin.set_fact: + ip_addresses: >- + {{ ip_addresses + + [{'public_ip': item.instances[0].public_ip_address, + 'private_ip': item.instances[0].private_ip_address}] + }} + loop: "{{ server_result.results | selectattr('instances', 'defined') }}" + loop_control: + label: "public_ip: {{ item.instances[0].public_ip_address }}, private_ip: {{ item.instances[0].private_ip_address }}" + + - name: "Inventory | Generate in-memory inventory" + ansible.builtin.import_tasks: inventory.yml + when: + - server_result.results is defined + - server_result.results | selectattr('instances', 'defined') + +# Delete the temporary ssh key from the cloud after creating the EC2 instance +- name: "AWS: Remove temporary SSH key '{{ ssh_key_name }}' from cloud" + amazon.aws.ec2_key: + access_key: "{{ lookup('ansible.builtin.env', 'AWS_ACCESS_KEY_ID') }}" + secret_key: "{{ lookup('ansible.builtin.env', 'AWS_SECRET_ACCESS_KEY') }}" + name: "{{ ssh_key_name }}" + region: "{{ server_location }}" + state: absent + register: ssh_key_result + when: + - ssh_key_name is defined + - tmp_ssh_key_name is defined + - ssh_key_name == tmp_ssh_key_name + +# Delete (if state is absent) +- block: + - name: "AWS: Delete EC2 instance" + amazon.aws.ec2_instance: + access_key: "{{ lookup('ansible.builtin.env', 'AWS_ACCESS_KEY_ID') }}" + secret_key: "{{ lookup('ansible.builtin.env', 'AWS_SECRET_ACCESS_KEY') }}" + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + region: "{{ server_location }}" + state: absent + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + + - name: "AWS: Delete Elastic Load Balancer (ELB)" + amazon.aws.elb_classic_lb: + access_key: "{{ lookup('ansible.builtin.env', 'AWS_ACCESS_KEY_ID') }}" + secret_key: "{{ lookup('ansible.builtin.env', 'AWS_SECRET_ACCESS_KEY') }}" + name: "{{ patroni_cluster_name }}-{{ item }}" + region: "{{ server_location }}" + state: absent + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}" + when: item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool) + + - name: "AWS: Delete Security Group" + amazon.aws.ec2_security_group: + name: "{{ patroni_cluster_name }}-security-group" + region: "{{ server_location }}" + state: absent + register: ec2_security_group_delete + until: ec2_security_group_delete is success + delay: 10 + retries: 3 + + - name: "AWS: Delete S3 bucket '{{ aws_s3_bucket_name }}'" + amazon.aws.s3_bucket: + access_key: "{{ lookup('ansible.builtin.env', 'AWS_ACCESS_KEY_ID') }}" + secret_key: "{{ lookup('ansible.builtin.env', 'AWS_SECRET_ACCESS_KEY') }}" + name: "{{ aws_s3_bucket_name }}" + region: "{{ aws_s3_bucket_region }}" + state: absent + force: true + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - aws_s3_bucket_absent | default(false) | bool + when: state == 'absent' diff --git a/automation/roles/cloud_resources/tasks/azure.yml b/automation/roles/cloud_resources/tasks/azure.yml new file mode 100644 index 000000000..24db50957 --- /dev/null +++ b/automation/roles/cloud_resources/tasks/azure.yml @@ -0,0 +1,594 @@ +--- +# Dependencies +- name: Install Python dependencies + block: + - name: Ensure that 'python3-pip' package is present on controlling host + ansible.builtin.command: which pip3 + register: pip3_check + failed_when: false + changed_when: false + + - name: Clean dnf cache + ansible.builtin.command: dnf clean all + when: + - pip3_check.rc != 0 + - ansible_os_family == "RedHat" + + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: + - pip3_check.rc != 0 + - ansible_os_family == "Debian" + + - name: Install 'python3-pip' package on controlling host + ansible.builtin.package: + name: python3-pip + state: present + register: package_status + until: package_status is success + delay: 10 + retries: 3 + when: + - pip3_check.rc != 0 + - ansible_distribution != "MacOSX" + + - name: Ensure that Azure collection is installed on controlling host + ansible.builtin.command: ansible-galaxy collection list azure.azcollection + changed_when: false + failed_when: false + register: azcollection_result + + - name: Azure collection not installed + ansible.builtin.fail: + msg: + - "Please install Azure collection" + - "ansible-galaxy collection install azure.azcollection" + when: + - azcollection_result.stderr is search("unable to find") + + - name: Get ansible_collections path + ansible.builtin.shell: > + set -o pipefail; + ansible-galaxy collection list | grep ansible_collections | head -n 1 | awk '{print $2}' + args: + executable: /bin/bash + register: collections_path + changed_when: false + when: ansible_collections_path is not defined + + - name: Ensure that Azure collection requirements is present on controlling host + ansible.builtin.pip: + requirements: "{{ ansible_collections_path | default(collections_path.stdout) }}/azure/azcollection/requirements.txt" + executable: pip3 + extra_args: "--trusted-host=pypi.python.org --trusted-host=pypi.org --trusted-host=files.pythonhosted.org" + umask: "0022" + environment: + PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" + + # Azure CLI + # Note: required for task "Add virtual machine IP addresses to Load Balancer backend pool" + - block: + - name: Check if Azure CLI is installed + ansible.builtin.command: az --version + register: az_version_result + changed_when: false + failed_when: false + + # try to install CLI (if not installed) + - name: Install Azure CLI + community.general.homebrew: + name: azure-cli + state: present + ignore_errors: true + when: + - az_version_result.rc != 0 + - ansible_distribution == "MacOSX" + + - name: Install Azure CLI + ansible.builtin.shell: > + set -o pipefail; + curl -sL https://aka.ms/InstallAzureCli | bash + args: + executable: /bin/bash + ignore_errors: true + when: + - az_version_result.rc != 0 + - ansible_distribution != "MacOSX" + + # login + - name: Login to Azure using Service Principal + ansible.builtin.shell: | + az login --service-principal \ + --username "{{ lookup('env', 'AZURE_CLIENT_ID') }}" \ + --password "{{ lookup('env', 'AZURE_SECRET') }}" \ + --tenant "{{ lookup('env', 'AZURE_TENANT') }}" + args: + executable: /bin/bash + when: cloud_load_balancer | bool + delegate_to: 127.0.0.1 + become: false + run_once: true + +# Create (if state is present) +- block: + # if ssh_key_content is not defined, get the user public key from the system (if exists) + - name: "Set variable: ssh_key_content" + ansible.builtin.set_fact: + ssh_key_content: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" + no_log: true # do not display the public key + when: ssh_key_content is not defined or ssh_key_content | length < 1 + + - name: "Azure: Create resource group" + azure.azcollection.azure_rm_resourcegroup: + name: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + location: "{{ server_location }}" + + # if server_network is not specified, create a network and subnet + - block: + - name: "Azure: Create virtual network" + azure.azcollection.azure_rm_virtualnetwork: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ azure_virtual_network | default('postgres-cluster-network') }}" + address_prefixes_cidr: ["{{ azure_virtual_network_prefix | default('10.0.0.0/16') }}"] + + - name: "Azure: Create subnet" + azure.azcollection.azure_rm_subnet: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ azure_subnet | default('postgres-cluster-subnet') }}" + address_prefix_cidr: "{{ azure_subnet_prefix | default('10.0.1.0/24') }}" + virtual_network: "{{ azure_virtual_network | default('postgres-cluster-network') }}" + when: server_network | length < 1 + + - name: "Azure: Gather information about network" + azure.azcollection.azure_rm_virtualnetwork_info: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ server_network | default(azure_virtual_network | default('postgres-cluster-network'), true) }}" + register: network_info + + - name: "Azure: Create public IP address" + azure.azcollection.azure_rm_publicipaddress: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-public-ip" + allocation_method: "Static" + sku: "Standard" + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-public-ip" + register: public_ip_address + + # Security Group (Firewall) + - name: "Azure: Create or modify Security Group" + azure.azcollection.azure_rm_securitygroup: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ patroni_cluster_name }}-security-group" + rules: "{{ rules }}" + vars: + rules: >- + {{ + ([{ + 'name': 'public-ssh-rule', + 'description': 'SSH public access', + 'protocol': 'Tcp', + 'destination_port_range': [ansible_ssh_port | default(22)], + 'source_address_prefix': ssh_public_allowed_ips | default('0.0.0.0/0', true) | split(','), + 'access': 'Allow', + 'priority': 1200, + 'direction': 'Inbound' + }] if ssh_public_access | bool else []) + + ([{ + 'name': 'public-netdata-rule', + 'description': 'Netdata public access', + 'protocol': 'Tcp', + 'destination_port_range': [netdata_port | default('19999')], + 'source_address_prefix': netdata_public_allowed_ips | default('0.0.0.0/0', true) | split(','), + 'access': 'Allow', + 'priority': 1400, + 'direction': 'Inbound' + }] if netdata_install | bool and netdata_public_access | bool else []) + + ([{ + 'name': 'public-database-rule', + 'description': 'Database public access', + 'protocol': 'Tcp', + 'destination_port_range': ([ + haproxy_listen_port.master | default('5000'), + haproxy_listen_port.replicas | default('5001'), + haproxy_listen_port.replicas_sync | default('5002'), + haproxy_listen_port.replicas_async | default('5003') + ] if with_haproxy_load_balancing | bool else []) + + ([pgbouncer_listen_port | default('6432')] if not with_haproxy_load_balancing | bool and pgbouncer_install | bool else []) + + ([postgresql_port | default('5432')] if not with_haproxy_load_balancing | bool and not pgbouncer_install | bool else []), + 'source_address_prefix': '0.0.0.0/0', + 'access': 'Allow', + 'priority': 1300, + 'direction': 'Inbound' + }] if database_public_access | bool else []) + + [{ + 'name': 'private-postgres-cluster-rule', + 'description': 'Postgres cluster ports', + 'protocol': 'Tcp', + 'destination_port_range': [ansible_ssh_port | default(22)] + + ([ + haproxy_listen_port.master | default('5000'), + haproxy_listen_port.replicas | default('5001'), + haproxy_listen_port.replicas_sync | default('5002'), + haproxy_listen_port.replicas_async | default('5003'), + haproxy_listen_port.stats | default('7000') + ] if with_haproxy_load_balancing | bool else []) + + ([pgbouncer_listen_port | default('6432')] if pgbouncer_install | bool else []) + + [ + postgresql_port | default('5432'), + patroni_restapi_port | default('8008'), + ] + + ([ + etcd_client_port | default('2379'), + etcd_peer_port | default('2380'), + ] if dcs_type == 'etcd' else []) + + ([ + consul_ports_dns | default('8600'), + consul_ports_http | default('8500'), + consul_ports_rpc | default('8400'), + consul_ports_serf_lan | default('8301'), + consul_ports_serf_wan | default('8302'), + consul_ports_server | default('8300') + ] if dcs_type == 'consul' else []) + + ([netdata_port | default('19999')] if netdata_install | bool else []), + 'source_address_prefix': network_info.virtualnetworks[0].address_prefixes, + 'access': 'Allow', + 'priority': 1000, + 'direction': 'Inbound' + }] + }} + when: cloud_firewall | bool + + # Network interface + - name: "Azure: Create network interface" + azure.azcollection.azure_rm_networkinterface: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-network-interface" + virtual_network: "{{ server_network | default(azure_virtual_network | default('postgres-cluster-network'), true) }}" + subnet_name: "{{ azure_subnet | default('postgres-cluster-subnet') }}" + security_group: "{{ patroni_cluster_name }}-security-group" + ip_configurations: + - name: ipconfig1 + primary: true + public_ip_address_name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-public-ip" + dns_servers: + - 8.8.8.8 + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-network-interface" + + # Server and volume + - name: "Azure: Create virtual machine" + azure.azcollection.azure_rm_virtualmachine: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + state: present + started: true + location: "{{ server_location }}" + vm_size: "{{ server_type }}" + priority: "{{ 'Spot' if server_spot | default(false) | bool else 'None' }}" + eviction_policy: "{{ 'Deallocate' if server_spot | default(false) | bool else omit }}" + admin_username: "{{ azure_admin_username | default('azureadmin') }}" + ssh_public_keys: + - path: /home/azureadmin/.ssh/authorized_keys + key_data: "{{ ssh_key_content }}" + ssh_password_enabled: false + image: + offer: "{{ azure_vm_image_offer | default('0001-com-ubuntu-server-jammy') }}" + publisher: "{{ azure_vm_image_publisher | default('Canonical') }}" + sku: "{{ azure_vm_image_sku | default('22_04-lts-gen2') }}" + version: "{{ azure_vm_image_version | default('latest') }}" + os_type: Linux + os_disk_size_gb: "{{ system_volume_size | default('80') }}" # system disk size + managed_disk_type: "{{ system_volume_type | default('StandardSSD_LRS', true) }}" + data_disks: + - lun: 0 + disk_size_gb: "{{ volume_size | int }}" + managed_disk_type: "{{ volume_type | default('StandardSSD_LRS', true) }}" + network_interface_names: + - "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-network-interface" + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + register: server_result + + # Load Balancer + - name: "Azure: Create public IP address for Load Balancer" + azure.azcollection.azure_rm_publicipaddress: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ patroni_cluster_name }}-{{ item }}-public-ip" + allocation_method: "Static" + sku: "Standard" + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}-public-ip" + register: azure_load_balancer_public_ip + when: database_public_access | bool and cloud_load_balancer | bool and + (item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) + + - name: "Azure: Create or modify Load Balancer" + azure.azcollection.azure_rm_loadbalancer: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ patroni_cluster_name }}-{{ item }}" + location: "{{ server_location }}" + frontend_ip_configurations: + - name: "{{ patroni_cluster_name }}-{{ item }}-frontend" + public_ip_address: "{{ database_public_access | bool | ternary(patroni_cluster_name ~ '-' ~ item ~ '-public-ip', omit) }}" + subnet: "{{ database_public_access | bool | ternary(omit, network_info.virtualnetworks[0].subnets[0].id) }}" + backend_address_pools: + - name: "{{ patroni_cluster_name }}-{{ item }}-backend" + probes: + - name: "{{ patroni_cluster_name }}-{{ item }}-health-probe" + protocol: "Http" + port: "{{ patroni_restapi_port }}" + request_path: "/{{ item }}" + interval: 5 + fail_count: 2 + load_balancing_rules: + - name: "{{ patroni_cluster_name }}-{{ item }}-rule" + frontend_ip_configuration: "{{ patroni_cluster_name }}-{{ item }}-frontend" + frontend_port: "{{ pgbouncer_listen_port | default('6432') if pgbouncer_install | bool else postgresql_port | default('5432') }}" + backend_address_pool: "{{ patroni_cluster_name }}-{{ item }}-backend" + backend_port: "{{ pgbouncer_listen_port | default('6432') if pgbouncer_install | bool else postgresql_port | default('5432') }}" + probe: "{{ patroni_cluster_name }}-{{ item }}-health-probe" + protocol: "Tcp" + idle_timeout: 10 # in minutes + enable_floating_ip: false + disable_outbound_snat: true + sku: "Standard" + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}" + register: azure_load_balancer + when: cloud_load_balancer | bool and + (item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) + + - name: Extract virtual machine private IPs + ansible.builtin.set_fact: + private_ips: >- + {{ + private_ips | default([]) + + [item.ansible_facts.azure_vm.network_profile.network_interfaces[0].properties.ip_configurations[0].private_ip_address] + }} + loop: "{{ server_result.results | selectattr('ansible_facts.azure_vm', 'defined') }}" + loop_control: + label: "{{ item.ansible_facts.azure_vm.network_profile.network_interfaces[0].properties.ip_configurations[0].private_ip_address }}" + + # Note: We use Azure CLI here because there is no ansible module available to manage the list of IP addresses within a backend pool. + - name: "Azure: Add virtual machine IP addresses to Load Balancer backend pool" + ansible.builtin.shell: | + {% for ip in private_ips %} + az network lb address-pool address add \ + --resource-group {{ azure_resource_group | default('postgres-cluster-resource-group-' ~ server_location) }} \ + --lb-name {{ patroni_cluster_name }}-{{ item }} \ + --pool-name {{ patroni_cluster_name }}-{{ item }}-backend \ + --vnet {{ azure_virtual_network | default('postgres-cluster-network') }} \ + --name address-{{ ip }} \ + --ip-address {{ ip }} + {% endfor %} + args: + executable: /bin/bash + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}-backend" + when: cloud_load_balancer | bool and + (item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) + + # Azure Blob Storage (Backups) + - block: + - name: "Azure: Create Storage Account '{{ azure_blob_storage_account_name }}'" + azure.azcollection.azure_rm_storageaccount: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ azure_blob_storage_account_name }}" + account_type: "{{ azure_blob_storage_account_type }}" + kind: "{{ azure_blob_storage_account_kind }}" + access_tier: "{{ azure_blob_storage_account_access_tier }}" + public_network_access: "{{ azure_blob_storage_account_public_network_access }}" + allow_blob_public_access: "{{ azure_blob_storage_account_allow_blob_public_access }}" + state: present + + - name: "Azure: Get Storage Account info" + azure.azcollection.azure_rm_storageaccount_info: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ azure_blob_storage_account_name }}" + show_connection_string: true + no_log: true # do not output storage account contents to the ansible log + register: azure_storage_account_info + + - name: "Set variable: azure_storage_account_key" + ansible.builtin.set_fact: + azure_storage_account_key: "{{ azure_storage_account_info.storageaccounts[0].primary_endpoints.key }}" + no_log: true # do not output storage account contents to the ansible log + + - name: "Azure: Create Blob Storage container '{{ azure_blob_storage_name }}'" + azure.azcollection.azure_rm_storageblob: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + account_name: "{{ azure_blob_storage_account_name }}" + container: "{{ azure_blob_storage_name }}" + blob_type: "{{ azure_blob_storage_blob_type }}" + state: present + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - azure_blob_storage_create | bool + when: state == 'present' + +- name: "Wait for host to be available via SSH" + ansible.builtin.wait_for: + host: "{{ item.state.ip_address }}" + port: 22 + delay: 5 + timeout: 300 + loop: "{{ public_ip_address.results }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + when: + - public_ip_address.results is defined + - item.state.ip_address is defined + +# Info +- name: Server info + ansible.builtin.debug: + msg: + id: "{{ item.ansible_facts.azure_vm.id | default('N/A') }}" + name: "{{ item.ansible_facts.azure_vm.name | default('N/A') }}" + image: "{{ item.ansible_facts.azure_vm.storage_profile.image_reference | default('N/A') }}" + type: "{{ item.ansible_facts.azure_vm.hardware_profile.vm_size | default('N/A') }}" + volume_size: "{{ item.ansible_facts.azure_vm.storage_profile.data_disks[0].disk_size_gb | default('N/A') }} GB" + volume_type: "{{ item.ansible_facts.azure_vm.storage_profile.data_disks[0].managed_disk.storage_account_type | default('N/A') }}" + public_ip: >- + {{ + public_ip_address.results | selectattr('idx', 'equalto', item.idx) | map(attribute='state.ip_address') | first | default('N/A') + }} + private_ip: >- + {{ + item.ansible_facts.azure_vm.network_profile.network_interfaces[0].properties.ip_configurations[0].private_ip_address | default('N/A') + }} + loop: "{{ server_result.results }}" + loop_control: + label: "{{ item.ansible_facts.azure_vm.name | default('N/A') }}" + when: + - server_result.results is defined + - item.ansible_facts is defined + +# Inventory +- block: + - name: "Inventory | Initialize ip_addresses variable" + ansible.builtin.set_fact: + ip_addresses: [] + + - name: "Inventory | Extract IP addresses" + ansible.builtin.set_fact: + ip_addresses: >- + {{ ip_addresses + + [{ + 'public_ip': public_ip_address.results | selectattr('idx', 'equalto', item.idx) | map(attribute='state.ip_address') | first, + 'private_ip': item.ansible_facts.azure_vm.network_profile.network_interfaces[0].properties.ip_configurations[0].private_ip_address + }] + }} + loop: "{{ server_result.results | selectattr('ansible_facts.azure_vm', 'defined') }}" + loop_control: + label: >- + public_ip: {{ public_ip_address.results | selectattr('idx', 'equalto', item.idx) | map(attribute='state.ip_address') | first }}, + private_ip: {{ item.ansible_facts.azure_vm.network_profile.network_interfaces[0].properties.ip_configurations[0].private_ip_address }} + + - name: "Inventory | Generate in-memory inventory" + ansible.builtin.import_tasks: inventory.yml + when: + - server_result.results is defined + - server_result.results | selectattr('ansible_facts.azure_vm', 'defined') + +# Delete (if state is absent) +- block: + - name: "Azure: Delete virtual machine" + azure.azcollection.azure_rm_virtualmachine: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + state: absent + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + + - name: "Azure: Delete network interface" + azure.azcollection.azure_rm_networkinterface: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-network-interface" + state: absent + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-network-interface" + + - name: "Azure: Delete public IP address" + azure.azcollection.azure_rm_publicipaddress: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-public-ip" + state: absent + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-public-ip" + + - name: "Azure: Delete Load Balancer" + azure.azcollection.azure_rm_loadbalancer: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ patroni_cluster_name }}-{{ item }}" + location: "{{ server_location }}" + state: absent + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}" + when: cloud_load_balancer | bool and + (item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) + + - name: "Azure: Delete Load Balancer public IP address" + azure.azcollection.azure_rm_publicipaddress: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ patroni_cluster_name }}-{{ item }}-public-ip" + state: absent + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}-public-ip" + when: database_public_access | bool and cloud_load_balancer | bool and + (item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) + + - name: "Azure: Delete Security Group" + azure.azcollection.azure_rm_securitygroup: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ patroni_cluster_name }}-security-group" + state: absent + + - block: + - name: "Azure: Delete Blob Storage '{{ azure_blob_storage_name }}'" + azure.azcollection.azure_rm_storageblob: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + storage_account_name: "{{ azure_blob_storage_account_name }}" + container: "{{ azure_blob_storage_name }}" + state: absent + + - name: "Azure: Delete Storage Account '{{ azure_blob_storage_account_name }}'" + azure.azcollection.azure_rm_storageaccount: + resource_group: "{{ azure_resource_group | default('postgres-cluster-resource-group' ~ '-' ~ server_location) }}" + name: "{{ azure_blob_storage_account_name }}" + force_delete_nonempty: true + state: absent + ignore_errors: true + when: (pgbackrest_install | bool or wal_g_install | bool) and azure_blob_storage_absent | bool + when: state == 'absent' diff --git a/automation/roles/cloud_resources/tasks/digitalocean.yml b/automation/roles/cloud_resources/tasks/digitalocean.yml new file mode 100644 index 000000000..a778cfd54 --- /dev/null +++ b/automation/roles/cloud_resources/tasks/digitalocean.yml @@ -0,0 +1,762 @@ +--- +# Dependencies +- name: Install Python dependencies + block: + - name: Ensure that 'python3-pip' package is present on controlling host + ansible.builtin.command: which pip3 + register: pip3_check + failed_when: false + changed_when: false + + - name: Clean dnf cache + ansible.builtin.command: dnf clean all + when: + - pip3_check.rc != 0 + - ansible_os_family == "RedHat" + + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: + - pip3_check.rc != 0 + - ansible_os_family == "Debian" + + - name: Install 'python3-pip' package on controlling host + ansible.builtin.package: + name: python3-pip + state: present + register: package_status + until: package_status is success + delay: 10 + retries: 3 + when: + - pip3_check.rc != 0 + - ansible_distribution != "MacOSX" + + - name: Ensure that 'dopy' dependency is present on controlling host + ansible.builtin.pip: + name: dopy + executable: pip3 + extra_args: --user + become: false + vars: + ansible_become: false + environment: + PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" + + - name: Ensure that 'boto3' dependency is present on controlling host + ansible.builtin.pip: + name: boto3 + executable: pip3 + extra_args: --user + become: false + vars: + ansible_become: false + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - digital_ocean_spaces_create | bool + environment: + PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" + delegate_to: 127.0.0.1 + run_once: true + +# SSH key +- block: + # Delete the temporary ssh key from the cloud (if exists) + - name: "DigitalOcean: Remove temporary SSH key '{{ ssh_key_name }}' from cloud (if any)" + community.digitalocean.digital_ocean_sshkey: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + name: "{{ ssh_key_name }}" + ssh_pub_key: "{{ ssh_key_content }}" + state: absent + when: + - ssh_key_name is defined + - tmp_ssh_key_name is defined + - ssh_key_name == tmp_ssh_key_name + + # if ssh_key_name and ssh_key_content is specified, add this ssh key to the cloud + - name: "DigitalOcean: Add SSH key '{{ ssh_key_name }}' to cloud" + community.digitalocean.digital_ocean_sshkey: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + name: "{{ ssh_key_name }}" + ssh_pub_key: "{{ ssh_key_content }}" + state: present + when: + - ssh_key_name | length > 0 + - ssh_key_content | length > 0 + + - name: "DigitalOcean: Gather information about SSH keys" + community.digitalocean.digital_ocean_sshkey_info: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + register: ssh_keys + + # if ssh_key_name is specified, get the fingerprint of one ssh key + # or if tmp_ssh_key_name is used and ssh_public_keys is difined + - name: "DigitalOcean: Get fingerprint for SSH key '{{ ssh_key_name }}'" + ansible.builtin.set_fact: + ssh_key_fingerprint: "{{ [item.fingerprint] }}" + loop: "{{ ssh_keys.data | lower }}" + loop_control: # do not display the public key + label: "{{ item.name }}" + when: + - ((ssh_key_name | length > 0 and ssh_key_name != (tmp_ssh_key_name | default(''))) or + (ssh_key_name == (tmp_ssh_key_name | default('')) and ssh_public_keys | default('') | length > 0)) + - item.name == ssh_key_name | lower + + # Stop, if the ssh key is not found + - name: "DigitalOcean: Fail if SSH key '{{ ssh_key_name }}' is not found" + ansible.builtin.fail: + msg: "SSH key {{ ssh_key_name }} not found. Ensure that key has been added to DigitalOcean." + when: + - (ssh_key_name | length > 0 and ssh_key_name != (tmp_ssh_key_name | default(''))) + - ssh_key_fingerprint is not defined + + # if ssh_key_name is not specified, and ssh_public_keys is not defined + # get the fingerprint of all ssh keys + - name: "DigitalOcean: Get fingerprint for all SSH keys" + ansible.builtin.set_fact: + ssh_key_fingerprint: "{{ ssh_key_fingerprint | default([]) + [item.fingerprint] }}" + loop: "{{ ssh_keys.data | lower }}" + loop_control: # do not display the public key + label: "{{ item.name }}" + when: + - (ssh_key_name | length < 1 or ssh_key_name == (tmp_ssh_key_name | default(''))) + - (ssh_public_keys is not defined or ssh_public_keys | length < 1) + when: state == 'present' + +# Create (if state is present) +- block: + - name: "DigitalOcean: Gather information about VPC" + community.digitalocean.digital_ocean_vpc_info: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + register: vpc_info + + # if server_network is not specified and the default VPC is present + - name: Extract ip_range from default VPC + ansible.builtin.set_fact: + default_ip_range: >- + {{ + vpc_info.data + | selectattr('region', 'equalto', server_location) + | selectattr('default', 'equalto', true) + | map(attribute='ip_range') + | first + }} + when: + - server_network | length < 1 + - vpc_info.data | selectattr('region', 'equalto', server_location) | selectattr('default', 'equalto', true) | list | length > 0 + + # if server_network is not specified and there is no default VPC, create a network + - name: "DigitalOcean: Create a VPC '{{ digital_ocean_vpc_name | default('network-' + server_location) }}'" + community.digitalocean.digital_ocean_vpc: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + name: "{{ digital_ocean_vpc_name | default('network-' + server_location) }}" + region: "{{ server_location }}" + state: present + register: digital_ocean_vpc + when: + - server_network | length < 1 + - vpc_info.data | selectattr('region', 'equalto', server_location) | selectattr('default', 'equalto', true) | list | length == 0 + + - name: "Set variable: server_network" + ansible.builtin.set_fact: + server_network: "{{ digital_ocean_vpc_name | default('network-' + server_location) }}" + when: digital_ocean_vpc is changed + + - name: "DigitalOcean: Gather information about VPC" + community.digitalocean.digital_ocean_vpc_info: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + register: vpc_info + when: digital_ocean_vpc is changed + + # if server_network is specified + - name: "Fail if no VPC found in the specified region" + ansible.builtin.fail: + msg: "No VPC found with name '{{ server_network }}' in region '{{ server_location }}'" + when: + - server_network | length > 0 + - vpc_info.data | selectattr('region', 'equalto', server_location) | selectattr('name', 'equalto', server_network) | list | length == 0 + + - name: Extract ip_range from VPC "{{ server_network }}" + ansible.builtin.set_fact: + vpc_ip_range: >- + {{ + vpc_info.data + | selectattr('region', 'equalto', server_location) + | selectattr('name', 'equalto', server_network) + | map(attribute='ip_range') + | first + }} + when: server_network | length > 0 + + - name: Extract id from VPC "{{ server_network }}" + ansible.builtin.set_fact: + vpc_id: >- + {{ + vpc_info.data + | selectattr('region', 'equalto', server_location) + | selectattr('name', 'equalto', server_network) + | map(attribute='id') + | first + }} + when: server_network | length > 0 + + - name: "DigitalOcean: Create a tag '{{ patroni_cluster_name }}'" + community.digitalocean.digital_ocean_tag: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + name: "{{ patroni_cluster_name }}" + state: present + + # Firewall + - name: "DigitalOcean: Create or modify public firewall" + community.digitalocean.digital_ocean_firewall: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + name: "{{ patroni_cluster_name }}-public-firewall" + state: "present" + inbound_rules: "{{ inbound_rules }}" + outbound_rules: + - protocol: "tcp" + ports: "1-65535" + destinations: + addresses: ["0.0.0.0/0", "::/0"] + - protocol: "udp" + ports: "1-65535" + destinations: + addresses: ["0.0.0.0/0", "::/0"] + - protocol: "icmp" + ports: "1-65535" + destinations: + addresses: ["0.0.0.0/0", "::/0"] + tags: + - "{{ patroni_cluster_name }}" # Only VMs with this tag will be affected + vars: + inbound_rules: >- + {{ + ([ + { + 'protocol': 'tcp', + 'ports': ansible_ssh_port | default('22'), + 'sources': { + 'addresses': ssh_public_allowed_ips | default('0.0.0.0/0,::/0', true) | split(',') + } + } + ] if ssh_public_access | bool else []) + + ([ + { + 'protocol': 'tcp', + 'ports': netdata_port | default('19999'), + 'sources': { + 'addresses': netdata_public_allowed_ips | default('0.0.0.0/0,::/0', true) | split(',') + } + } + ] if netdata_install | bool and netdata_public_access | bool else []) + + ([ + { + 'protocol': 'tcp', + 'ports': haproxy_listen_port.master | default('5000'), + 'sources': { + 'addresses': database_public_allowed_ips | default('0.0.0.0/0,::/0', true) | split(',') + } + }, + { + 'protocol': 'tcp', + 'ports': haproxy_listen_port.replicas | default('5001'), + 'sources': { + 'addresses': database_public_allowed_ips | default('0.0.0.0/0,::/0', true) | split(',') + } + }, + { + 'protocol': 'tcp', + 'ports': haproxy_listen_port.replicas_sync | default('5002'), + 'sources': { + 'addresses': database_public_allowed_ips | default('0.0.0.0/0,::/0', true) | split(',') + } + }, + { + 'protocol': 'tcp', + 'ports': haproxy_listen_port.replicas_async | default('5003'), + 'sources': { + 'addresses': database_public_allowed_ips | default('0.0.0.0/0,::/0', true) | split(',') + } + } + ] if database_public_access | bool and with_haproxy_load_balancing | bool else []) + + ([ + { + 'protocol': 'tcp', + 'ports': pgbouncer_listen_port | default('6432'), + 'sources': { + 'addresses': database_public_allowed_ips | default('0.0.0.0/0,::/0', true) | split(',') + } + } + ] if database_public_access | bool and (not with_haproxy_load_balancing | bool and pgbouncer_install | bool) else []) + + ([ + { + 'protocol': 'tcp', + 'ports': postgresql_port | default('5432'), + 'sources': { + 'addresses': database_public_allowed_ips | default('0.0.0.0/0,::/0', true) | split(',') + } + } + ] if database_public_access | bool and (not with_haproxy_load_balancing | bool and not pgbouncer_install | bool) else []) + }} + when: + - cloud_firewall | bool + - (ssh_public_access | bool or netdata_public_access | bool or database_public_access | bool) + + - name: "DigitalOcean: Create or modify Postgres cluster firewall" + community.digitalocean.digital_ocean_firewall: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + name: "{{ patroni_cluster_name }}-private-firewall" + state: "present" + inbound_rules: "{{ inbound_rules }}" + outbound_rules: + - protocol: "tcp" + ports: "1-65535" + destinations: + addresses: ["0.0.0.0/0", "::/0"] + - protocol: "udp" + ports: "1-65535" + destinations: + addresses: ["0.0.0.0/0", "::/0"] + - protocol: "icmp" + ports: "1-65535" + destinations: + addresses: ["0.0.0.0/0", "::/0"] + tags: + - "{{ patroni_cluster_name }}" # Only VMs with this tag will be affected + vars: + sources_addresses: "{{ (server_network | length > 0) | ternary(vpc_ip_range, default_ip_range) }}" + inbound_rules: >- + {{ + ([ + { + 'protocol': 'tcp', + 'ports': ansible_ssh_port | default('22'), + 'sources': { + 'addresses': [sources_addresses] + } + } + ]) + + ([ + { + 'protocol': 'tcp', + 'ports': netdata_port | default('19999'), + 'sources': { + 'addresses': [sources_addresses] + } + } + ] if netdata_install | bool else []) + + ([ + { + 'protocol': 'tcp', + 'ports': haproxy_listen_port.master | default('5000'), + 'sources': { + 'addresses': [sources_addresses] + } + }, + { + 'protocol': 'tcp', + 'ports': haproxy_listen_port.replicas | default('5001'), + 'sources': { + 'addresses': [sources_addresses] + } + }, + { + 'protocol': 'tcp', + 'ports': haproxy_listen_port.replicas_sync | default('5002'), + 'sources': { + 'addresses': [sources_addresses] + } + }, + { + 'protocol': 'tcp', + 'ports': haproxy_listen_port.replicas_async | default('5003'), + 'sources': { + 'addresses': [sources_addresses] + } + }, + { + 'protocol': 'tcp', + 'ports': haproxy_listen_port.stats | default('7000'), + 'sources': { + 'addresses': [sources_addresses] + } + } + ] if with_haproxy_load_balancing | bool else []) + + ([ + { + 'protocol': 'tcp', + 'ports': pgbouncer_listen_port | default('6432'), + 'sources': { + 'addresses': [sources_addresses] + } + } + ] if pgbouncer_install | bool else []) + + ([ + { + 'protocol': 'tcp', + 'ports': postgresql_port | default('5432'), + 'sources': { + 'addresses': [sources_addresses] + } + }, + { + 'protocol': 'tcp', + 'ports': patroni_restapi_port | default('8008'), + 'sources': { + 'addresses': [sources_addresses] + } + } + ]) + + ([ + { + 'protocol': 'tcp', + 'ports': etcd_client_port | default('2379'), + 'sources': { + 'addresses': [sources_addresses] + } + }, + { + 'protocol': 'tcp', + 'ports': etcd_peer_port | default('2380'), + 'sources': { + 'addresses': [sources_addresses] + } + } + ] if dcs_type == 'etcd' else []) + + ([ + { + 'protocol': 'tcp', + 'ports': consul_ports.dns | default('8600'), + 'sources': { + 'addresses': [sources_addresses] + } + }, + { + 'protocol': 'tcp', + 'ports': consul_ports.http | default('8500'), + 'sources': { + 'addresses': [sources_addresses] + } + }, + { + 'protocol': 'tcp', + 'ports': consul_ports.rpc | default('8400'), + 'sources': { + 'addresses': [sources_addresses] + } + }, + { + 'protocol': 'tcp', + 'ports': consul_ports.serf_lan | default('8301'), + 'sources': { + 'addresses': [sources_addresses] + } + }, + { + 'protocol': 'tcp', + 'ports': consul_ports.serf_wan | default('8302'), + 'sources': { + 'addresses': [sources_addresses] + } + }, + { + 'protocol': 'tcp', + 'ports': consul_ports.server | default('8300'), + 'sources': { + 'addresses': [sources_addresses] + } + } + ] if dcs_type == 'consul' else []) + }} + when: cloud_firewall | bool + + # Server and volume + - name: "DigitalOcean: Create or modify Droplet" + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + state: present + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + unique_name: true + size: "{{ server_type }}" + region: "{{ server_location }}" + image: "{{ server_image }}" + ssh_keys: "{{ ssh_key_fingerprint }}" + vpc_uuid: "{{ vpc_id | default(omit) }}" + wait_timeout: 500 + tags: + - "{{ patroni_cluster_name }}" + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + register: droplet_result + + - name: "DigitalOcean: Create or modify Block Storage" + community.digitalocean.digital_ocean_block_storage: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + state: present + command: create + volume_name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-storage" + region: "{{ server_location }}" + block_size: "{{ volume_size | int }}" + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-storage" + register: block_storage_result + + - name: "DigitalOcean: Attach Block Storage to Droplet" + community.digitalocean.digital_ocean_block_storage: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + state: present + command: attach + volume_name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-storage" + region: "{{ server_location }}" + droplet_id: "{{ item.data.droplet.id }}" + loop: "{{ droplet_result.results }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-storage" + when: droplet_result.results is defined + + # Load Balancer + - name: "Set variable: digital_ocean_load_balancer_port" + ansible.builtin.set_fact: + digital_ocean_load_balancer_port: "{{ pgbouncer_listen_port }}" + when: + - cloud_load_balancer | bool + - pgbouncer_install | bool + - digital_ocean_load_balancer_port | default('') | length < 1 + + - name: "Set variable: digital_ocean_load_balancer_target_port" + ansible.builtin.set_fact: + digital_ocean_load_balancer_target_port: "{{ pgbouncer_listen_port }}" + when: + - cloud_load_balancer | bool + - pgbouncer_install | bool + - digital_ocean_load_balancer_target_port | default('') | length < 1 + + # if 'pgbouncer_install' is 'false' + - name: "Set variable: digital_ocean_load_balancer_port" + ansible.builtin.set_fact: + digital_ocean_load_balancer_port: "{{ postgresql_port }}" + when: + - cloud_load_balancer | bool + - not pgbouncer_install | bool + - digital_ocean_load_balancer_port | default('') | length < 1 + + - name: "Set variable: digital_ocean_load_balancer_target_port" + ansible.builtin.set_fact: + digital_ocean_load_balancer_target_port: "{{ postgresql_port }}" + when: + - cloud_load_balancer | bool + - not pgbouncer_install | bool + - digital_ocean_load_balancer_target_port | default('') | length < 1 + + - name: "DigitalOcean: Create or modify Load Balancer" + community.digitalocean.digital_ocean_load_balancer: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + state: present + name: "{{ patroni_cluster_name }}-{{ item }}" + region: "{{ server_location }}" + forwarding_rules: + - entry_protocol: tcp + entry_port: "{{ digital_ocean_load_balancer_port }}" + target_protocol: tcp + target_port: "{{ digital_ocean_load_balancer_target_port }}" + health_check: + protocol: http + port: "{{ patroni_restapi_port }}" + path: "/{{ item }}" + check_interval_seconds: 5 + response_timeout_seconds: 3 + unhealthy_threshold: 2 + healthy_threshold: 3 + size: "{{ (digital_ocean_load_balancer_size | default('lb-medium')) if server_location in ['ams2', 'nyc2', 'sfo1'] else omit }}" + size_unit: "{{ (digital_ocean_load_balancer_size_unit | default(3)) if server_location not in ['ams2', 'nyc2', 'sfo1'] else omit }}" + vpc_uuid: "{{ vpc_id | default(omit) }}" + tag: "{{ patroni_cluster_name }}" # a tag associated with droplets for load balancing. + loop: + - primary + - replica + - sync + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}" + when: cloud_load_balancer | bool and + (item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) + + - name: "DigitalOcean: Gather information about load balancers" + community.digitalocean.digital_ocean_load_balancer_info: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + register: digitalocean_load_balancer + when: cloud_load_balancer | bool + + # Spaces Object Storage (Backups) + - name: "DigitalOcean: Create Spaces Bucket '{{ digital_ocean_spaces_name }}'" + community.digitalocean.digital_ocean_spaces: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + name: "{{ digital_ocean_spaces_name }}" + region: "{{ digital_ocean_spaces_region }}" + aws_access_key_id: "{{ AWS_ACCESS_KEY_ID }}" + aws_secret_access_key: "{{ AWS_SECRET_ACCESS_KEY }}" + state: present + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - digital_ocean_spaces_create | bool + when: state == 'present' + +- name: Wait for host to be available via SSH + ansible.builtin.wait_for: + host: "{{ (item.data.droplet.networks.v4 | selectattr('type', 'equalto', 'public')).0.ip_address }}" + port: 22 + delay: 5 + timeout: 300 + loop: "{{ droplet_result.results }}" + loop_control: + label: "{{ (item.data.droplet.networks.v4 | default('') | selectattr('type', 'equalto', 'public')).0.ip_address | default('N/A') }}" + when: + - droplet_result.results is defined + - item.data is defined + +# Info +- name: Server info + ansible.builtin.debug: + msg: + id: "{{ item.data.droplet.id }}" + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + image: "{{ item.data.droplet.image.description }}" + type: "{{ server_type }}" + volume_size: "{{ volume_size }} GB" + public_ip: "{{ (item.data.droplet.networks.v4 | selectattr('type', 'equalto', 'public')).0.ip_address }}" + private_ip: "{{ (item.data.droplet.networks.v4 | selectattr('type', 'equalto', 'private')).0.ip_address }}" + loop: "{{ droplet_result.results }}" + loop_control: + index_var: idx + label: "{{ (item.data.droplet.networks.v4 | default('') | selectattr('type', 'equalto', 'public')).0.ip_address | default('N/A') }}" + when: + - droplet_result.results is defined + - item.data is defined + +# Inventory +- block: + - name: "Inventory | Initialize ip_addresses variable" + ansible.builtin.set_fact: + ip_addresses: [] + + - name: "Inventory | Extract IP addresses" + ansible.builtin.set_fact: + ip_addresses: >- + {{ ip_addresses + + [{'public_ip': (item.data.droplet.networks.v4 | selectattr('type', 'equalto', 'public')).0.ip_address, + 'private_ip': (item.data.droplet.networks.v4 | selectattr('type', 'equalto', 'private')).0.ip_address}] + }} + loop: "{{ droplet_result.results | selectattr('data', 'defined') }}" + loop_control: + label: >- + public_ip: {{ (item.data.droplet.networks.v4 | selectattr('type', 'equalto', 'public')).0.ip_address }}, + private_ip: {{ (item.data.droplet.networks.v4 | selectattr('type', 'equalto', 'private')).0.ip_address }} + + - name: "Inventory | Generate in-memory inventory" + ansible.builtin.import_tasks: inventory.yml + when: + - droplet_result.results is defined + - droplet_result.results | selectattr('data', 'defined') + +# Delete the temporary SSH key from the cloud after creating the droplet +- name: "DigitalOcean: Remove temporary SSH key '{{ ssh_key_name }}' from cloud" + community.digitalocean.digital_ocean_sshkey: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + name: "{{ ssh_key_name }}" + ssh_pub_key: "{{ ssh_key_content }}" + state: absent + when: + - ssh_key_name is defined + - tmp_ssh_key_name is defined + - ssh_key_name == tmp_ssh_key_name + +# Delete (if state is absent) +- block: + - name: "DigitalOcean: Delete Droplet" + community.digitalocean.digital_ocean_droplet: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + state: absent + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + unique_name: true + region: "{{ server_location }}" + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + register: droplet_absent + until: not droplet_absent.failed + retries: 3 + delay: 5 + + - name: "DigitalOcean: Delete Block Storage" + community.digitalocean.digital_ocean_block_storage: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + state: absent + command: create + volume_name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-storage" + region: "{{ server_location }}" + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-storage" + register: block_storage_absent + until: not block_storage_absent.failed + retries: 3 + delay: 5 + + - name: "DigitalOcean: Delete Load Balancer" + community.digitalocean.digital_ocean_load_balancer: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + state: absent + name: "{{ patroni_cluster_name }}-{{ item }}" + region: "{{ server_location }}" + loop: + - primary + - replica + - sync + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}" + when: cloud_load_balancer | bool and + (item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) + + - name: "DigitalOcean: Delete public firewall" + community.digitalocean.digital_ocean_firewall: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + state: "absent" + name: "{{ patroni_cluster_name }}-public-firewall" + + - name: "DigitalOcean: Delete Postgres cluster firewall" + community.digitalocean.digital_ocean_firewall: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + state: "absent" + name: "{{ patroni_cluster_name }}-private-firewall" + + - name: "DigitalOcean: Delete Spaces Bucket '{{ digital_ocean_spaces_name }}'" + community.digitalocean.digital_ocean_spaces: + oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}" + name: "{{ digital_ocean_spaces_name }}" + region: "{{ digital_ocean_spaces_region }}" + aws_access_key_id: "{{ AWS_ACCESS_KEY_ID }}" + aws_secret_access_key: "{{ AWS_SECRET_ACCESS_KEY }}" + state: absent + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - digital_ocean_spaces_absent | bool + ignore_errors: true + when: state == 'absent' diff --git a/automation/roles/cloud_resources/tasks/gcp.yml b/automation/roles/cloud_resources/tasks/gcp.yml new file mode 100644 index 000000000..ce5b8ba3d --- /dev/null +++ b/automation/roles/cloud_resources/tasks/gcp.yml @@ -0,0 +1,712 @@ +--- +# Dependencies +- name: Install Python dependencies + block: + - name: Ensure that 'python3-pip' package is present on controlling host + ansible.builtin.command: which pip3 + register: pip3_check + failed_when: false + changed_when: false + + - name: Clean dnf cache + ansible.builtin.command: dnf clean all + when: + - pip3_check.rc != 0 + - ansible_os_family == "RedHat" + + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: + - pip3_check.rc != 0 + - ansible_os_family == "Debian" + + - name: Install 'python3-pip' package on controlling host + ansible.builtin.package: + name: python3-pip + state: present + register: package_status + until: package_status is success + delay: 10 + retries: 3 + when: + - pip3_check.rc != 0 + - ansible_distribution != "MacOSX" + + - name: Ensure that 'google-auth' dependency is present on controlling host + ansible.builtin.pip: + name: google-auth + executable: pip3 + extra_args: --user + become: false + vars: + ansible_become: false + environment: + PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" + delegate_to: 127.0.0.1 + run_once: true + +# Check if GCP_SERVICE_ACCOUNT_CONTENTS is defined +- name: Lookup the GCP_SERVICE_ACCOUNT_CONTENTS environmental variable + ansible.builtin.set_fact: + gcp_service_account_contents_raw: "{{ lookup('ansible.builtin.env', 'GCP_SERVICE_ACCOUNT_CONTENTS') | default('') }}" + no_log: true + +- name: "Fail if no GCP service account information is provided" + ansible.builtin.fail: + msg: "GCP_SERVICE_ACCOUNT_CONTENTS is not defined or empty. Please provide GCP service account credentials." + when: gcp_service_account_contents_raw | length < 1 + +# Decode GCP Service Account if base64 encoded +- name: "Set variable: gcp_service_account_contents (b64decode)" + ansible.builtin.set_fact: + gcp_service_account_contents: "{{ gcp_service_account_contents_raw | b64decode }}" + no_log: true + when: gcp_service_account_contents_raw is match('^[a-zA-Z0-9+/]+={0,2}$') + +# Set GCP Service Account Contents to raw value if not base64 encoded +- name: "Set variable: gcp_service_account_contents" + ansible.builtin.set_fact: + gcp_service_account_contents: "{{ gcp_service_account_contents_raw }}" + no_log: true + when: gcp_service_account_contents is not defined + +# Project info +- name: "GCP: Gather information about project" + google.cloud.gcp_resourcemanager_project_info: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + register: project_info + when: gcp_project is not defined or gcp_project | length < 1 + +# Create (if state is present) +- block: + # if ssh_key_content is not defined, get the user public key from the system (if exists) + - name: "Set variable: ssh_key_content" + ansible.builtin.set_fact: + ssh_key_content: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" + no_log: true # do not display the public key + when: ssh_key_content is not defined or + ssh_key_content | length < 1 + + # if server_network is not specified, use default network + - name: "Set variable: gcp_network_name" + ansible.builtin.set_fact: + gcp_network_name: "{{ server_network if server_network is defined and server_network | length > 0 else 'default' }}" + + - name: "GCP: Gather information about network" + google.cloud.gcp_compute_subnetwork_info: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + region: "{{ server_location[:-2] if server_location[-2:] | regex_search('-[a-z]$') else server_location }}" + filters: + - name = "{{ gcp_network_name }}" + register: subnetwork_info + + - name: "GCP: Extract ip_range for network '{{ gcp_network_name }}'" + ansible.builtin.set_fact: + gcp_network_ip_range: "{{ subnetwork_info.resources[0].ipCidrRange }}" + + # Firewall + - name: "GCP: Create or modify SSH public firewall rule" + google.cloud.gcp_compute_firewall: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-ssh-public" + description: "Firewall rule for public SSH access to Postgres cluster servers" + allowed: + - ip_protocol: tcp + ports: + - "{{ ansible_ssh_port | default(22) }}" + source_ranges: "{{ ssh_public_allowed_ips | default('0.0.0.0/0', true) | split(',') }}" + target_tags: + - "{{ patroni_cluster_name }}" # Only VMs with this tag will be affected + network: + selfLink: "global/networks/{{ gcp_network_name }}" + state: present + when: + - ssh_public_access | bool + - cloud_firewall | bool + + - name: "GCP: Create or modify Netdata public firewall rule" + google.cloud.gcp_compute_firewall: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-netdata-public" + description: "Firewall rule for public Netdata monitoring access" + allowed: + - ip_protocol: tcp + ports: + - "{{ netdata_port | default('19999') }}" + source_ranges: "{{ netdata_public_allowed_ips | default('0.0.0.0/0', true) | split(',') }}" + target_tags: + - "{{ patroni_cluster_name }}" # Only VMs with this tag will be affected + network: + selfLink: "global/networks/{{ gcp_network_name }}" + state: present + when: + - netdata_install | bool + - netdata_public_access | bool + - cloud_firewall | bool + + - name: "GCP: Create or modify Database public firewall rule" + google.cloud.gcp_compute_firewall: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-database-public" + description: "Firewall rule for public database access" + allowed: + - ip_protocol: tcp + ports: "{{ allowed_ports }}" + source_ranges: "{{ database_public_allowed_ips | default('0.0.0.0/0', true) | split(',') }}" + target_tags: + - "{{ patroni_cluster_name }}" # Only VMs with this tag will be affected + network: + selfLink: "global/networks/{{ gcp_network_name }}" + state: present + vars: + allowed_ports: >- + {{ + ([postgresql_port | default('5432')] if not with_haproxy_load_balancing | bool and not pgbouncer_install | bool else []) + + ([pgbouncer_listen_port | default('6432')] if not with_haproxy_load_balancing | bool and pgbouncer_install | bool else []) + + ([haproxy_listen_port.master | default('5000'), + haproxy_listen_port.replicas | default('5001'), + haproxy_listen_port.replicas_sync | default('5002'), + haproxy_listen_port.replicas_async | default('5003')] if with_haproxy_load_balancing | bool else []) + }} + when: + - database_public_access | bool + - cloud_firewall | bool + + - name: "GCP: Create or modify Postgres cluster firewall rule" + google.cloud.gcp_compute_firewall: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-firewall-rule" + description: "Firewall rule for Postgres cluster" + allowed: + - ip_protocol: tcp + ports: "{{ allowed_ports }}" + source_ranges: + - "{{ gcp_network_ip_range }}" + target_tags: + - "{{ patroni_cluster_name }}" # Only VMs with this tag will be affected + network: + selfLink: "global/networks/{{ gcp_network_name }}" + state: present + vars: + allowed_ports: >- + {{ + [ansible_ssh_port | default('22')] + + ([netdata_port | default('19999')] if netdata_install | bool else []) + + ([pgbouncer_listen_port | default('6432')] if pgbouncer_install | bool else []) + + [postgresql_port | default('5432')] + + [patroni_restapi_port | default('8008')] + + ([haproxy_listen_port.master | default('5000'), + haproxy_listen_port.replicas | default('5001'), + haproxy_listen_port.replicas_sync | default('5002'), + haproxy_listen_port.replicas_async | default('5003'), + haproxy_listen_port.stats | default('7000')] if with_haproxy_load_balancing | bool else []) + + ([etcd_client_port | default('2379'), etcd_peer_port | default('2380')] if dcs_type == 'etcd' else []) + + ([consul_ports_dns | default('8600'), + consul_ports_http | default('8500'), + consul_ports_rpc | default('8400'), + consul_ports_serf_lan | default('8301'), + consul_ports_serf_wan | default('8302'), + consul_ports_server | default('8300')] if dcs_type == 'consul' else []) + }} + when: cloud_firewall | bool + + # if 'cloud_load_balancer' is 'true' + # https://cloud.google.com/load-balancing/docs/tcp#firewall-rules + - name: "GCP: Create health checks and LB firewall rule" + google.cloud.gcp_compute_firewall: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-lb-firewall-rule" + description: "Firewall rule for Health Checks and Load Balancer access to the database" + priority: 900 + allowed: + - ip_protocol: tcp + ports: + - "{{ patroni_restapi_port | default('8008') }}" + - "{{ pgbouncer_listen_port | default('6432') if pgbouncer_install | bool else postgresql_port | default('5432') }}" + source_ranges: + - "35.191.0.0/16" + - "130.211.0.0/22" + target_tags: + - "{{ patroni_cluster_name }}" # Only VMs with this tag will be affected + network: + selfLink: "global/networks/{{ gcp_network_name }}" + state: present + when: cloud_load_balancer | bool + + # Server and volume + - name: "GCP: Create or modify VM instance" + google.cloud.gcp_compute_instance: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + zone: "{{ server_location + '-b' if not server_location is match('.*-[a-z]$') else server_location }}" # add "-b" if the zone is not defined + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + machine_type: "{{ server_type }}" + disks: + - device_name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-system" + auto_delete: true + boot: true + initialize_params: + disk_name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-system" + source_image: "{{ server_image }}" + disk_size_gb: "{{ system_volume_size | default('80') }}" # system disk size + disk_type: "{{ system_volume_type | default('pd-ssd', true) }}" + - device_name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-storage" + auto_delete: true + initialize_params: + disk_name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-storage" + disk_size_gb: "{{ volume_size | int }}" + disk_type: "{{ volume_type | default('pd-ssd', true) }}" + network_interfaces: + - network: + selfLink: "global/networks/{{ gcp_network_name }}" + access_configs: + - name: External NAT + type: ONE_TO_ONE_NAT + metadata: + ssh-keys: "root:{{ ssh_key_content }}" + scheduling: + on_host_maintenance: "{{ 'TERMINATE' if (server_spot | bool or server_type is search('metal')) else 'MIGRATE' }}" + preemptible: "{{ server_spot | default(gcp_compute_instance_preemptible | default(false)) | bool }}" + tags: + items: + - "{{ patroni_cluster_name }}" + labels: + cluster: "{{ patroni_cluster_name }}" + status: "{{ gcp_instance_status | default('RUNNING') }}" + state: present + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + register: server_result + until: server_result is success + delay: 10 + retries: 3 + + # Load Balancer + # This block creates Global External classic proxy Network Load Balancer. + # Global objects are required because the gcp_compute_target_tcp_proxy module can only be global, as it requires the use of a global forwarding rule. + # Using global objects instead of regional ones allows us to utilize a TCP proxy for correct traffic load balancing. + # Note: Regional internal load balancers are passthrough and do not work correctly with the health checks we use through the Patroni REST API. + - block: + - name: "GCP: [Load Balancer] Create instance group" + google.cloud.gcp_compute_instance_group: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}" + description: "{{ patroni_cluster_name }} instance group" + region: "{{ region }}" + zone: "{{ zone }}" + named_ports: + - name: postgres + port: "{{ postgresql_port | default('5432') }}" + - name: pgbouncer + port: "{{ pgbouncer_listen_port | default('6432') }}" + network: + selfLink: "global/networks/{{ gcp_network_name }}" + instances: "{{ instances_selflink }}" + state: present + vars: + region: "{{ server_location[:-2] if server_location[-2:] | regex_search('-[a-z]$') else server_location }}" + zone: "{{ server_location + '-b' if not server_location is match('.*-[a-z]$') else server_location }}" # add "-b" if the zone is not defined + # The module only works if selfLink is set manually, issue: https://github.com/ansible-collections/google.cloud/issues/614 + instances_selflink: >- # TODO: use "{{ server_result.results | map(attribute='selfLink') | map('community.general.dict_kv', 'selfLink') | list }}" + [ + {% for i in range(1, (server_count | int) + 1) %} + { + "selfLink": "zones/{{ zone }}/instances/{{ server_name }}{{ '%02d' % i }}" + }{% if not loop.last %},{% endif %} + {% endfor %} + ] + register: instance_group + # Ignore error if resource already exists on re-run + failed_when: instance_group is failed and 'memberAlreadyExists' not in (instance_group.msg | default('')) + + - name: "GCP: [Load Balancer] Create health check" + google.cloud.gcp_compute_health_check: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-{{ item }}-hc" + description: "{{ patroni_cluster_name }} {{ item }} health check" + type: "HTTP" + http_health_check: + port: "{{ patroni_restapi_port }}" + request_path: "/{{ item }}" + check_interval_sec: "{{ gcp_compute_health_check_interval_sec | default(3) }}" + timeout_sec: "{{ gcp_compute_health_check_check_timeout_sec | default(2) }}" + unhealthy_threshold: "{{ gcp_compute_health_check_unhealthy_threshold | default(2) }}" + healthy_threshold: "{{ gcp_compute_health_check_healthy_threshold | default(3) }}" + state: present + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}-hc" + register: health_check + when: item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool) + + - name: "GCP: [Load Balancer] Create backend service" + google.cloud.gcp_compute_backend_service: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-{{ item }}" + description: "{{ patroni_cluster_name }} {{ item }} backend" + protocol: "TCP" + port_name: "{{ 'pgbouncer' if pgbouncer_install | bool else 'postgres' }}" + load_balancing_scheme: "EXTERNAL" + backends: + - group: "zones/{{ zone }}/instanceGroups/{{ patroni_cluster_name }}" + balancing_mode: "CONNECTION" + max_connections_per_instance: "{{ gcp_lb_max_connections | default(10000) }}" + health_checks: + - "/global/healthChecks/{{ patroni_cluster_name }}-{{ item }}-hc" + timeout_sec: "{{ gcp_compute_backend_service_timeout_sec | default(5) }}" + log_config: + enable: "{{ gcp_compute_backend_service_log_enable | default(false) }}" + state: present + vars: + zone: "{{ server_location + '-b' if not server_location is match('.*-[a-z]$') else server_location }}" # add "-b" if the zone is not defined + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}" + register: backend_service + # Ignore error if resource already exists on re-run + failed_when: backend_service is failed and 'resource.fingerprint' not in (backend_service.msg | default('')) + when: item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool) + + - name: "GCP: [Load Balancer] Create target TCP proxy" + google.cloud.gcp_compute_target_tcp_proxy: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-{{ item }}-proxy" + description: "{{ patroni_cluster_name }} {{ item }} TCP Proxy" + service: + selfLink: "/global/backendServices/{{ patroni_cluster_name }}-{{ item }}" + proxy_header: "NONE" + state: present + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}-proxy" + register: target_tcp_proxy + when: item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool) + + - name: "GCP: [Load Balancer] Reserve static IP address" + google.cloud.gcp_compute_global_address: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-{{ item }}-ip" + description: "{{ patroni_cluster_name }} {{ item }} load balancer IP address" + address_type: "EXTERNAL" + ip_version: "IPV4" + state: present + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}-ip" + register: load_balancer_ip + when: item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool) + + - name: "GCP: [Load Balancer] Create forwarding rule" + google.cloud.gcp_compute_global_forwarding_rule: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-{{ item }}-fr" + description: "{{ patroni_cluster_name }} {{ item }} forwarding rule" + load_balancing_scheme: "EXTERNAL" + ip_address: "{{ (load_balancer_ip.results | selectattr('item', 'equalto', item) | map(attribute='address') | first) }}" + ip_protocol: "TCP" + port_range: "{{ pgbouncer_listen_port | default('6432') if pgbouncer_install | bool else postgresql_port | default('5432') }}" + target: "/global/targetTcpProxies/{{ patroni_cluster_name }}-{{ item }}-proxy" + state: present + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}-fr" + register: gcp_load_balancer + when: item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool) + when: cloud_load_balancer | bool + + # GCS Bucket (Backups) + - name: "GCP: Create bucket '{{ gcp_bucket_name }}'" + google.cloud.gcp_storage_bucket: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ gcp_bucket_name }}" + storage_class: "{{ gcp_bucket_storage_class }}" + predefined_default_object_acl: "{{ gcp_bucket_default_object_acl }}" + state: present + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - gcp_bucket_create | bool + when: state == 'present' + +- name: Wait for host to be available via SSH + ansible.builtin.wait_for: + host: "{{ item.networkInterfaces[0].accessConfigs[0].natIP }}" + port: 22 + delay: 5 + timeout: "{{ 1800 if server_type is search('metal') else 300 }}" # timeout 30 minutes for bare metal instances and 5 minutes for regular VMs + loop: "{{ server_result.results }}" + loop_control: + label: "{{ item.networkInterfaces[0].accessConfigs[0].natIP | default('N/A') }}" + when: + - server_result.results is defined + - item.networkInterfaces is defined + +# Info +- name: Server info + ansible.builtin.debug: + msg: + id: "{{ item.id }}" + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + image: "{{ item.disks[0].licenses[0] | basename }}" + type: "{{ item.machineType | basename }}" + volume_size: "{{ volume_size }} GB" + public_ip: "{{ item.networkInterfaces[0].accessConfigs[0].natIP }}" + private_ip: "{{ item.networkInterfaces[0].networkIP }}" + loop: "{{ server_result.results }}" + loop_control: + index_var: idx + label: "{{ item.networkInterfaces[0].accessConfigs[0].natIP | default('N/A') }}" + when: + - server_result.results is defined + - item.networkInterfaces is defined + +# Inventory +- block: + - name: "Inventory | Initialize ip_addresses variable" + ansible.builtin.set_fact: + ip_addresses: [] + + - name: "Inventory | Extract IP addresses" + ansible.builtin.set_fact: + ip_addresses: >- + {{ ip_addresses + + [{'public_ip': item.networkInterfaces[0].accessConfigs[0].natIP, + 'private_ip': item.networkInterfaces[0].networkIP}] + }} + loop: "{{ server_result.results | selectattr('networkInterfaces', 'defined') }}" + loop_control: + label: "public_ip: {{ item.networkInterfaces[0].accessConfigs[0].natIP }}, private_ip: {{ item.networkInterfaces[0].networkIP }}" + + - name: "Inventory | Generate in-memory inventory" + ansible.builtin.import_tasks: inventory.yml + when: + - server_result.results is defined + - server_result.results | selectattr('networkInterfaces', 'defined') + +# Delete (if state is absent) +- block: + - name: "GCP: Delete VM instance" + google.cloud.gcp_compute_instance: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + zone: "{{ server_location + '-b' if not server_location is match('.*-[a-z]$') else server_location }}" # add "-b" if the zone is not defined + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + state: absent + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + + - name: "GCP: [Load Balancer] Delete forwarding rule" + google.cloud.gcp_compute_global_forwarding_rule: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-{{ item }}-fr" + target: "/global/targetTcpProxies/{{ patroni_cluster_name }}-{{ item }}-proxy" + state: absent + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}-fr" + when: item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool) + + - name: "GCP: [Load Balancer] Delete static IP address" + google.cloud.gcp_compute_global_address: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-{{ item }}-ip" + address_type: "EXTERNAL" + ip_version: "IPV4" + state: absent + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}-ip" + when: item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool) + + - name: "GCP: [Load Balancer] Delete target TCP proxy" + google.cloud.gcp_compute_target_tcp_proxy: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-{{ item }}-proxy" + service: + selfLink: "/global/backendServices/{{ patroni_cluster_name }}-{{ item }}" + state: absent + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}-proxy" + when: item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool) + + - name: "GCP: [Load Balancer] Delete backend service" + google.cloud.gcp_compute_backend_service: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-{{ item }}" + state: absent + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}" + when: item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool) + + - name: "GCP: [Load Balancer] Delete health check" + google.cloud.gcp_compute_health_check: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-{{ item }}-hc" + state: absent + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}-hc" + when: item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool) + + - name: "GCP: [Load Balancer] Delete instance group" + google.cloud.gcp_compute_instance_group: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}" + region: "{{ server_location[:-2] if server_location[-2:] | regex_search('-[a-z]$') else server_location }}" + zone: "{{ server_location + '-b' if not server_location is match('.*-[a-z]$') else server_location }}" + state: absent + + - name: "GCP: Delete SSH public firewall rule" + google.cloud.gcp_compute_firewall: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-ssh-public" + state: absent + + - name: "GCP: Delete Netdata public firewall rule" + google.cloud.gcp_compute_firewall: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-netdata-public" + state: absent + + - name: "GCP: Delete Database public firewall rule" + google.cloud.gcp_compute_firewall: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-database-public" + state: absent + + - name: "GCP: Delete Postgres cluster firewall rule" + google.cloud.gcp_compute_firewall: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-firewall-rule" + state: absent + + - name: "GCP: Delete health checks and LB firewall rule" + google.cloud.gcp_compute_firewall: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ patroni_cluster_name }}-lb-firewall-rule" + state: absent + + - name: "GCP: Delete bucket '{{ gcp_bucket_name }}'" + google.cloud.gcp_storage_bucket: + auth_kind: "serviceaccount" + service_account_contents: "{{ gcp_service_account_contents }}" + project: "{{ gcp_project | default(project_info.resources[0].projectNumber) }}" + name: "{{ gcp_bucket_name }}" + state: absent + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - gcp_bucket_absent | bool + when: state == 'absent' diff --git a/automation/roles/cloud_resources/tasks/hetzner.yml b/automation/roles/cloud_resources/tasks/hetzner.yml new file mode 100644 index 000000000..6b8ad792a --- /dev/null +++ b/automation/roles/cloud_resources/tasks/hetzner.yml @@ -0,0 +1,814 @@ +--- +# Dependencies +- name: Install Python dependencies + block: + - name: Ensure that 'python3-pip' package is present on controlling host + ansible.builtin.command: which pip3 + register: pip3_check + failed_when: false + changed_when: false + + - name: Clean dnf cache + ansible.builtin.command: dnf clean all + when: + - pip3_check.rc != 0 + - ansible_os_family == "RedHat" + + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: + - pip3_check.rc != 0 + - ansible_os_family == "Debian" + + - name: Install 'python3-pip' package on controlling host + ansible.builtin.package: + name: python3-pip + state: present + register: package_status + until: package_status is success + delay: 10 + retries: 3 + when: + - pip3_check.rc != 0 + - ansible_distribution != "MacOSX" + + - name: Ensure that 'hcloud' dependency is present on controlling host + ansible.builtin.pip: + name: hcloud + executable: pip3 + extra_args: --user + become: false + vars: + ansible_become: false + run_once: true + environment: + PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" + + - name: Ensure that 'boto3' dependency is present on controlling host + ansible.builtin.pip: + name: boto3 + executable: pip3 + extra_args: --user + become: false + vars: + ansible_become: false + environment: + PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - hetzner_object_storage_create | bool + delegate_to: 127.0.0.1 + run_once: true + +# SSH key +- block: + # Delete the temporary ssh key from the cloud (if exists) + - name: "Hetzner Cloud: Remove temporary SSH key '{{ ssh_key_name }}' from cloud (if any)" + hetzner.hcloud.ssh_key: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + name: "{{ ssh_key_name }}" + state: absent + when: + - ssh_key_name is defined + - tmp_ssh_key_name is defined + - ssh_key_name == tmp_ssh_key_name + + # if ssh_key_name and ssh_key_content is specified, add this ssh key to the cloud + - name: "Hetzner Cloud: Add SSH key '{{ ssh_key_name }}' to cloud" + hetzner.hcloud.ssh_key: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + name: "{{ ssh_key_name }}" + public_key: "{{ ssh_key_content }}" + state: present + when: + - ssh_key_name | length > 0 + - ssh_key_content | length > 0 + + # if ssh_key_name is specified + - name: "Hetzner Cloud: Gather information about SSH key '{{ ssh_key_name }}'" + hetzner.hcloud.ssh_key_info: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + name: "{{ ssh_key_name }}" + register: ssh_keys + when: ssh_key_name | length > 0 + + # Stop, if the ssh key is not found + - name: "Hetzner Cloud: Fail if SSH key is not found" + ansible.builtin.fail: + msg: "SSH key {{ ssh_key_name }} not found. Ensure that key has been added to Hetzner Cloud." + when: + - ssh_key_name | length > 0 + - ssh_keys.hcloud_ssh_key_info is defined + - ssh_keys.hcloud_ssh_key_info | length < 1 + + - name: "Set variable: ssh_key_names" + ansible.builtin.set_fact: + ssh_key_names: "{{ ssh_key_names | default([]) + [item.name] }}" + loop: "{{ ssh_keys.hcloud_ssh_key_info }}" + no_log: true # do not display the public key + when: + - ssh_key_name | length > 0 + - ssh_keys.hcloud_ssh_key_info is defined + - ssh_keys.hcloud_ssh_key_info | length > 0 + + # if ssh_key_name is not specified, and ssh_public_keys is not defined + # get the names of all ssh keys + - name: "Hetzner Cloud: Gather information about SSH keys" + hetzner.hcloud.ssh_key_info: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + register: ssh_keys + when: + - (ssh_key_name | length < 1 or ssh_key_name == (tmp_ssh_key_name | default(''))) + - (ssh_public_keys is not defined or ssh_public_keys | length < 1) + + - name: "Hetzner Cloud: Get names of all SSH keys" + ansible.builtin.set_fact: + ssh_key_names: "{{ ssh_key_names | default([]) + [item.name] }}" + loop: "{{ ssh_keys.hcloud_ssh_key_info }}" + loop_control: # do not display the public key + label: "{{ item.name }}" + when: + - (ssh_key_name | length < 1 or ssh_key_name == (tmp_ssh_key_name | default(''))) + - (ssh_public_keys is not defined or ssh_public_keys | length < 1) + when: state == 'present' + +# Create (if state is present) +- block: + - name: "Hetzner Cloud: Gather information about network zones" + ansible.builtin.uri: + url: "https://api.hetzner.cloud/v1/locations" + method: GET + headers: + Authorization: "Bearer {{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + return_content: true + register: hetzner_locations_response + failed_when: hetzner_locations_response.status != 200 + + - name: "Hetzner Cloud: Extract network zone for server_location" + ansible.builtin.set_fact: + target_network_zone: "{{ item.network_zone }}" + loop: "{{ hetzner_locations_response.json.locations }}" + loop_control: + label: "network_zone: {{ item.network_zone }}" + when: item.name == server_location + + - name: "Hetzner Cloud: Gather information about networks" + hetzner.hcloud.network_info: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + register: network_info + until: network_info is success + delay: 5 + retries: 3 + + # if server_network is specified + - name: "Hetzner Cloud: Check if network '{{ server_network }}' exists for given location" + ansible.builtin.fail: + msg: "No network with name '{{ server_network }}' in location '{{ target_network_zone }}'" + when: + - server_network | length > 0 + - not (network_info.hcloud_network_info + | selectattr("name", "equalto", server_network) + | selectattr("subnetworks", "defined") + | map(attribute='subnetworks') + | flatten + | selectattr("network_zone", "equalto", target_network_zone) + | list | length > 0) + + - name: "Hetzner Cloud: Extract ip_range for network '{{ server_network }}'" + ansible.builtin.set_fact: + server_network_ip_range: "{{ item.ip_range }}" + loop: "{{ network_info.hcloud_network_info }}" + when: + - server_network | length > 0 + - item.name == server_network + loop_control: + label: "{{ item.ip_range }}" + + # if server_network is not specified, create a network and subnet + - block: + - name: "Hetzner Cloud: Create a network '{{ hcloud_network_name | default('postgres-cluster-network-' + target_network_zone) }}'" + hetzner.hcloud.network: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + name: "{{ hcloud_network_name | default('postgres-cluster-network-' + target_network_zone) }}" + ip_range: "{{ hcloud_network_ip_range | default('10.0.0.0/16') }}" + state: present + + - name: "Hetzner Cloud: Create a subnetwork in network '{{ hcloud_network_name | default('postgres-cluster-network-' + target_network_zone) }}'" + hetzner.hcloud.subnetwork: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + network: "{{ hcloud_network_name | default('postgres-cluster-network-' + target_network_zone) }}" + ip_range: "{{ hcloud_subnetwork_ip_range | default('10.0.1.0/24') }}" + network_zone: "{{ target_network_zone }}" + type: server + state: present + + - name: "Set variable: server_network" + ansible.builtin.set_fact: + server_network: "{{ hcloud_network_name | default('postgres-cluster-network-' + target_network_zone) }}" + server_network_ip_range: "{{ hcloud_network_ip_range | default('10.0.0.0/16') }}" + when: server_network | length < 1 + + # Firewall + - name: "Hetzner Cloud: Create or modify public firewall" + hetzner.hcloud.firewall: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + state: "present" + name: "{{ patroni_cluster_name }}-public-firewall" + rules: "{{ rules }}" + vars: + rules: >- + {{ + ([ + { + 'description': 'SSH', + 'direction': 'in', + 'protocol': 'tcp', + 'port': ansible_ssh_port | default('22'), + 'source_ips': ssh_public_allowed_ips | default('0.0.0.0/0,::/0', true) | split(',') + } + ] if ssh_public_access | bool else []) + + ([ + { + 'description': 'Netdata', + 'direction': 'in', + 'protocol': 'tcp', + 'port': netdata_port | default('19999'), + 'source_ips': netdata_public_allowed_ips | default('0.0.0.0/0,::/0', true) | split(',') + } + ] if netdata_install | bool and netdata_public_access | bool else []) + + ([ + { + 'description': 'HAProxy - master', + 'direction': 'in', + 'protocol': 'tcp', + 'port': haproxy_listen_port.master, + 'source_ips': database_public_allowed_ips | default('0.0.0.0/0,::/0', true) | split(',') + }, + { + 'description': 'HAProxy - replicas', + 'direction': 'in', + 'protocol': 'tcp', + 'port': haproxy_listen_port.replicas, + 'source_ips': database_public_allowed_ips | default('0.0.0.0/0,::/0', true) | split(',') + }, + { + 'description': 'HAProxy - replicas_sync', + 'direction': 'in', + 'protocol': 'tcp', + 'port': haproxy_listen_port.replicas_sync, + 'source_ips': database_public_allowed_ips | default('0.0.0.0/0,::/0', true) | split(',') + }, + { + 'description': 'HAProxy - replicas_async', + 'direction': 'in', + 'protocol': 'tcp', + 'port': haproxy_listen_port.replicas_async, + 'source_ips': database_public_allowed_ips | default('0.0.0.0/0,::/0', true) | split(',') + } + ] if database_public_access | bool and with_haproxy_load_balancing | bool else []) + + ([ + { + 'description': 'PgBouncer', + 'direction': 'in', + 'protocol': 'tcp', + 'port': pgbouncer_listen_port | default('6432'), + 'source_ips': database_public_allowed_ips | default('0.0.0.0/0,::/0', true) | split(',') + } + ] if database_public_access | bool and (not with_haproxy_load_balancing | bool and pgbouncer_install | bool) else []) + + ([ + { + 'description': 'PostgreSQL', + 'direction': 'in', + 'protocol': 'tcp', + 'port': postgresql_port | default('5432'), + 'source_ips': database_public_allowed_ips | default('0.0.0.0/0,::/0', true) | split(',') + } + ] if database_public_access | bool and (not with_haproxy_load_balancing | bool and not pgbouncer_install | bool) else []) + }} + when: + - cloud_firewall | bool + - (ssh_public_access | bool or netdata_public_access | bool or database_public_access | bool) + + - name: "Hetzner Cloud: Create or modify Postgres cluster firewall" + hetzner.hcloud.firewall: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + state: "present" + name: "{{ patroni_cluster_name }}-firewall" + rules: "{{ rules }}" + vars: + rules: >- + {{ + ([ + { + 'description': 'SSH', + 'direction': 'in', + 'protocol': 'tcp', + 'port': ansible_ssh_port | default('22'), + 'source_ips': [server_network_ip_range] + } + ]) + + ([ + { + 'description': 'Netdata', + 'direction': 'in', + 'protocol': 'tcp', + 'port': netdata_port | default('19999'), + 'source_ips': [server_network_ip_range] + } + ] if netdata_install | bool else []) + + ([ + { + 'description': 'HAProxy - master', + 'direction': 'in', + 'protocol': 'tcp', + 'port': haproxy_listen_port.master, + 'source_ips': [server_network_ip_range] + }, + { + 'description': 'HAProxy - replicas', + 'direction': 'in', + 'protocol': 'tcp', + 'port': haproxy_listen_port.replicas, + 'source_ips': [server_network_ip_range] + }, + { + 'description': 'HAProxy - replicas_sync', + 'direction': 'in', + 'protocol': 'tcp', + 'port': haproxy_listen_port.replicas_sync, + 'source_ips': [server_network_ip_range] + }, + { + 'description': 'HAProxy - replicas_async', + 'direction': 'in', + 'protocol': 'tcp', + 'port': haproxy_listen_port.replicas_async, + 'source_ips': [server_network_ip_range] + } + ] if with_haproxy_load_balancing | bool else []) + + ([ + { + 'description': 'PgBouncer', + 'direction': 'in', + 'protocol': 'tcp', + 'port': pgbouncer_listen_port | default('6432'), + 'source_ips': [server_network_ip_range] + } + ] if pgbouncer_install | bool else []) + + ([ + { + 'description': 'PostgreSQL', + 'direction': 'in', + 'protocol': 'tcp', + 'port': postgresql_port | default('5432'), + 'source_ips': [server_network_ip_range] + }, + { + 'description': 'Patroni', + 'direction': 'in', + 'protocol': 'tcp', + 'port': patroni_restapi_port | default('8008'), + 'source_ips': [server_network_ip_range] + } + ]) + + ([ + { + 'description': 'ETCD', + 'direction': 'in', + 'protocol': 'tcp', + 'port': etcd_client_port | default('2379'), + 'source_ips': [server_network_ip_range] + }, + { + 'description': 'ETCD', + 'direction': 'in', + 'protocol': 'tcp', + 'port': etcd_peer_port | default('2380'), + 'source_ips': [server_network_ip_range] + } + ] if dcs_type == 'etcd' else []) + + ([ + { + 'description': 'Consul', + 'direction': 'in', + 'protocol': 'tcp', + 'port': consul_ports.dns | default('8600'), + 'source_ips': [server_network_ip_range] + }, + { + 'description': 'Consul', + 'direction': 'in', + 'protocol': 'tcp', + 'port': consul_ports.http | default('8500'), + 'source_ips': [server_network_ip_range] + }, + { + 'description': 'Consul', + 'direction': 'in', + 'protocol': 'tcp', + 'port': consul_ports.rpc | default('8400'), + 'source_ips': [server_network_ip_range] + }, + { + 'description': 'Consul', + 'direction': 'in', + 'protocol': 'tcp', + 'port': consul_ports.serf_lan | default('8301'), + 'source_ips': [server_network_ip_range] + }, + { + 'description': 'Consul', + 'direction': 'in', + 'protocol': 'tcp', + 'port': consul_ports.serf_wan | default('8302'), + 'source_ips': [server_network_ip_range] + }, + { + 'description': 'Consul', + 'direction': 'in', + 'protocol': 'tcp', + 'port': consul_ports.server | default('8300'), + 'source_ips': [server_network_ip_range] + } + ] if dcs_type == 'consul' else []) + }} + when: + - cloud_firewall | bool + + # Object Storage (S3 bucket for backups) + - name: "Hetzner Cloud: Create Object Storage (S3 bucket) '{{ hetzner_object_storage_name }}'" + amazon.aws.s3_bucket: + endpoint_url: "{{ hetzner_object_storage_endpoint }}" + ceph: true + aws_access_key: "{{ hetzner_object_storage_access_key }}" + aws_secret_key: "{{ hetzner_object_storage_secret_key }}" + name: "{{ hetzner_object_storage_name }}" + region: "{{ hetzner_object_storage_region }}" + requester_pays: false + state: present + register: s3_bucket_result + failed_when: s3_bucket_result.failed and not "GetBucketRequestPayment" in s3_bucket_result.msg + # TODO: https://github.com/ansible-collections/amazon.aws/issues/2447 + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - hetzner_object_storage_create | bool + - hetzner_object_storage_access_key | length > 0 + - hetzner_object_storage_secret_key | length > 0 + + # Server and volume + - name: "Hetzner Cloud: Create or modify server" + hetzner.hcloud.server: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + state: present + server_type: "{{ server_type | lower }}" + image: "{{ server_image | lower }}" + ssh_keys: "{{ ssh_key_names }}" + location: "{{ server_location }}" + enable_ipv4: true + enable_ipv6: false + private_networks: + - "{{ server_network }}" + firewalls: "{{ firewalls_list }}" + labels: + cluster: "{{ patroni_cluster_name }}" + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + register: server_result + vars: + firewalls_list: >- + {{ + ([patroni_cluster_name + '-public-firewall'] if cloud_firewall | bool and + (ssh_public_access | bool or netdata_public_access | bool or database_public_access | bool) else []) + + ([patroni_cluster_name + '-firewall'] if cloud_firewall | bool else []) + }} + + - name: "Hetzner Cloud: Add server to network '{{ server_network }}'" + hetzner.hcloud.server_network: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + network: "{{ server_network }}" + server: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + state: present + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + when: server_network | length > 0 + + - name: "Hetzner Cloud: Create or modify volume" + hetzner.hcloud.volume: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-storage" + state: present + size: "{{ volume_size | int }}" + server: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-storage" + + # Load Balancer + - name: "Hetzner Cloud: Create or modify Load Balancer" + hetzner.hcloud.load_balancer: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + name: "{{ patroni_cluster_name }}-{{ item }}" + location: "{{ server_location }}" + load_balancer_type: "{{ hetzner_load_balancer_type | default('lb21') }}" + algorithm: round_robin + delete_protection: true + state: present + labels: + cluster: "{{ patroni_cluster_name }}" + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}" + when: cloud_load_balancer | bool and + (item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) + + - name: "Hetzner Cloud: Configure Load Balancer service" + hetzner.hcloud.load_balancer_service: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + load_balancer: "{{ patroni_cluster_name }}-{{ item }}" + listen_port: "{{ hetzner_load_balancer_port | default(database_port) }}" + destination_port: "{{ pgbouncer_listen_port | default(6432) if pgbouncer_install | bool else postgresql_port | default(5432) }}" + protocol: tcp + health_check: + protocol: http + port: "{{ patroni_restapi_port }}" + interval: 5 + timeout: 2 + retries: 3 + http: + path: "/{{ item }}" + status_codes: + - "200" + state: present + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}" + vars: + database_port: "{{ pgbouncer_listen_port | default(6432) if pgbouncer_install | bool else postgresql_port | default(5432) }}" + when: cloud_load_balancer | bool and + (item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) + + - name: "Hetzner Cloud: Add Load Balancer to network '{{ server_network }}'" + hetzner.hcloud.load_balancer_network: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + load_balancer: "{{ patroni_cluster_name }}-{{ item }}" + network: "{{ server_network }}" + state: present + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}" + when: cloud_load_balancer | bool and + (item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) + + - name: "Hetzner Cloud: Disable public interface for Load Balancer" + hetzner.hcloud.load_balancer: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + name: "{{ patroni_cluster_name }}-{{ item }}" + location: "{{ server_location }}" + disable_public_interface: true + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}" + register: hetzner_load_balancer_disable_public + until: hetzner_load_balancer_disable_public is success + delay: 5 + retries: 3 + when: (cloud_load_balancer | bool and not database_public_access | bool) and + (item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) + + - name: "Hetzner Cloud: Enable public interface for Load Balancer" + hetzner.hcloud.load_balancer: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + name: "{{ patroni_cluster_name }}-{{ item }}" + location: "{{ server_location }}" + disable_public_interface: false + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}" + register: hetzner_load_balancer_enable_public + until: hetzner_load_balancer_enable_public is success + delay: 5 + retries: 3 + when: (cloud_load_balancer | bool and database_public_access | bool) and + (item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) + + - name: "Hetzner Cloud: Add servers to Load Balancer (use label_selector 'cluster={{ patroni_cluster_name }}')" + hetzner.hcloud.load_balancer_target: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + load_balancer: "{{ patroni_cluster_name }}-{{ item }}" + type: label_selector + label_selector: "cluster={{ patroni_cluster_name }}" + use_private_ip: true + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}" + when: cloud_load_balancer | bool and + (item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) + + - name: "Hetzner Cloud: Gather information about Load Balancers" + hetzner.hcloud.load_balancer_info: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + register: hetzner_load_balancer + until: hetzner_load_balancer is success + delay: 5 + retries: 3 + when: cloud_load_balancer | bool + when: state == 'present' + +- name: Wait for host to be available via SSH + ansible.builtin.wait_for: + host: "{{ item.hcloud_server.ipv4_address }}" + port: 22 + delay: 5 + timeout: 300 + loop: "{{ server_result.results }}" + loop_control: + label: "{{ item.hcloud_server.ipv4_address | default('N/A') }}" + when: + - server_result.results is defined + - item.hcloud_server is defined + +# Info +- name: Server info + ansible.builtin.debug: + msg: + id: "{{ item.hcloud_server.id }}" + name: "{{ item.hcloud_server.name }}" + image: "{{ item.hcloud_server.image }}" + type: "{{ item.hcloud_server.server_type }}" + volume_size: "{{ volume_size }} GB" + public_ip: "{{ item.hcloud_server.ipv4_address }}" + private_ip: "{{ item.hcloud_server.private_networks_info[0].ip }}" + loop: "{{ server_result.results }}" + loop_control: + index_var: idx + label: "{{ item.hcloud_server.ipv4_address | default('N/A') }}" + when: + - server_result.results is defined + - item.hcloud_server is defined + +# Inventory +- block: + - name: "Inventory | Initialize ip_addresses variable" + ansible.builtin.set_fact: + ip_addresses: [] + + - name: "Inventory | Extract IP addresses" + ansible.builtin.set_fact: + ip_addresses: >- + {{ ip_addresses + + [{'public_ip': item.hcloud_server.ipv4_address, + 'private_ip': item.hcloud_server.private_networks_info[0].ip}] + }} + loop: "{{ server_result.results | selectattr('hcloud_server', 'defined') }}" + loop_control: + label: "public_ip: {{ item.hcloud_server.ipv4_address }}, private_ip: {{ item.hcloud_server.private_networks_info[0].ip }}" + + - name: "Inventory | Generate in-memory inventory" + ansible.builtin.import_tasks: inventory.yml + when: + - server_result.results is defined + - server_result.results | selectattr('hcloud_server', 'defined') + +# Delete the temporary ssh key from the cloud after creating the server +- name: "Hetzner Cloud: Remove temporary SSH key {{ ssh_key_name }} from cloud" + hetzner.hcloud.ssh_key: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + name: "{{ ssh_key_name }}" + state: absent + when: + - ssh_key_name is defined + - tmp_ssh_key_name is defined + - ssh_key_name == tmp_ssh_key_name + +# Delete (if state is absent) +- block: + - name: "Hetzner Cloud: Delete server" + hetzner.hcloud.server: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + state: absent + location: "{{ server_location }}" + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}" + + - name: "Hetzner Cloud: Delete volume" + hetzner.hcloud.volume: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + name: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-storage" + state: absent + location: "{{ server_location }}" + loop: "{{ range(0, server_count | int) | list }}" + loop_control: + index_var: idx + label: "{{ server_name | lower }}{{ '%02d' % (idx + 1) }}-storage" + + - name: "Hetzner Cloud: Disable protection for Load Balancer (if exists)" + hetzner.hcloud.load_balancer: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + name: "{{ patroni_cluster_name }}-{{ item }}" + location: "{{ server_location }}" + delete_protection: false + failed_when: false + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}" + when: cloud_load_balancer | bool and + (item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) + + - name: "Hetzner Cloud: Delete Load Balancer" + hetzner.hcloud.load_balancer: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + name: "{{ patroni_cluster_name }}-{{ item }}" + location: "{{ server_location }}" + state: absent + loop: + - "primary" + - "replica" + - "sync" + loop_control: + label: "{{ patroni_cluster_name }}-{{ item }}" + when: cloud_load_balancer | bool and + (item == 'primary' or + (item == 'replica' and server_count | int > 1) or + (item in ['sync', 'async'] and server_count | int > 1 and synchronous_mode | bool)) + + - name: "Hetzner Cloud: Delete public firewall" + hetzner.hcloud.firewall: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + state: "absent" + name: "{{ patroni_cluster_name }}-public-firewall" + + - name: "Hetzner Cloud: Delete Postgres cluster firewall" + hetzner.hcloud.firewall: + api_token: "{{ lookup('ansible.builtin.env', 'HCLOUD_API_TOKEN') }}" + state: "absent" + name: "{{ patroni_cluster_name }}-firewall" + + - name: "Hetzner Cloud: Delete Object Storage (S3 bucket) '{{ hetzner_object_storage_name }}'" + amazon.aws.s3_bucket: + endpoint_url: "{{ hetzner_object_storage_endpoint }}" + ceph: true + access_key: "{{ hetzner_object_storage_access_key }}" + secret_key: "{{ hetzner_object_storage_secret_key }}" + name: "{{ hetzner_object_storage_name }}" + region: "{{ hetzner_object_storage_region }}" + requester_pays: false + state: absent + force: true + when: + - (pgbackrest_install | bool or wal_g_install | bool) + - hetzner_object_storage_absent | bool + - hetzner_object_storage_access_key | length > 0 + - hetzner_object_storage_secret_key | length > 0 + when: state == 'absent' diff --git a/automation/roles/cloud_resources/tasks/inventory.yml b/automation/roles/cloud_resources/tasks/inventory.yml new file mode 100644 index 000000000..c6301c104 --- /dev/null +++ b/automation/roles/cloud_resources/tasks/inventory.yml @@ -0,0 +1,71 @@ +--- +- name: "Inventory | Set variable: ssh_private_key_file" + ansible.builtin.set_fact: + ssh_private_key_file: "~{{ lookup('env', 'USER') }}/.ssh/{{ tmp_ssh_key_name }}" + when: + - ssh_key_name is defined + - tmp_ssh_key_name is defined + - ssh_key_name == tmp_ssh_key_name + +- name: "Inventory | Add host to 'postgres_cluster', 'master' groups" + ansible.builtin.add_host: + name: "{{ item.private_ip }}" + groups: + - postgres_cluster + - master + ansible_ssh_host: "{{ item.public_ip }}" + ansible_ssh_private_key_file: "{{ ssh_private_key_file | default(None) }}" + loop: "{{ [ip_addresses[0]] }}" # add the first item in the list + loop_control: + label: "{{ item.public_ip }}" + changed_when: false + +- name: "Inventory | Add host to 'postgres_cluster', 'replica' groups" + ansible.builtin.add_host: + name: "{{ item.private_ip }}" + groups: + - postgres_cluster + - replica + ansible_ssh_host: "{{ item.public_ip }}" + ansible_ssh_private_key_file: "{{ ssh_private_key_file | default(None) }}" + loop: "{{ ip_addresses[1:] }}" # start with the 2nd item of the list + loop_control: + label: "{{ item.public_ip }}" + when: ip_addresses | length > 1 # only if there is more than one item + changed_when: false + +- name: "Inventory | Add host to 'balancers' group" + ansible.builtin.add_host: + name: "{{ item.private_ip }}" + group: balancers + ansible_ssh_host: "{{ item.public_ip }}" + ansible_ssh_private_key_file: "{{ ssh_private_key_file | default(None) }}" + loop: "{{ ip_addresses }}" + loop_control: + label: "{{ item.public_ip }}" + changed_when: false + when: with_haproxy_load_balancing | bool + +- name: "Inventory | Add host to 'etcd_cluster' group" + ansible.builtin.add_host: + name: "{{ item.private_ip }}" + group: etcd_cluster + ansible_ssh_host: "{{ item.public_ip }}" + ansible_ssh_private_key_file: "{{ ssh_private_key_file | default(None) }}" + loop: "{{ ip_addresses[:7] }}" # no more than 7 servers for the etcd cluster + loop_control: + label: "{{ item.public_ip }}" + changed_when: false + when: not dcs_exists | bool and dcs_type == "etcd" + +- name: "Inventory | Add host to 'consul_instances' group" + ansible.builtin.add_host: + name: "{{ item.private_ip }}" + group: consul_instances + ansible_ssh_host: "{{ item.public_ip }}" + ansible_ssh_private_key_file: "{{ ssh_private_key_file | default(None) }}" + loop: "{{ ip_addresses }}" + loop_control: + label: "{{ item.public_ip }}" + changed_when: false + when: dcs_type == "consul" diff --git a/automation/roles/cloud_resources/tasks/main.yml b/automation/roles/cloud_resources/tasks/main.yml new file mode 100644 index 000000000..aa46d5b2e --- /dev/null +++ b/automation/roles/cloud_resources/tasks/main.yml @@ -0,0 +1,61 @@ +--- +- name: Ensure that required variables are specified + ansible.builtin.fail: + msg: + - "One or more required variables have empty values." + - "Please specify value for variables: 'server_type', 'server_image', 'server_location'." + when: state == 'present' and + (server_type | length < 1 or + (server_image | length < 1 and cloud_provider != 'azure') or + server_location | length < 1) + +# if ssh_key_name is not specified +# with each new execution of the playbook, a new temporary ssh key is created +- block: + - name: Generate a unique temporary SSH key name + ansible.builtin.set_fact: + tmp_ssh_key_name: "ssh_key_tmp_{{ lookup('password', '/dev/null chars=ascii_lowercase length=7') }}" + + - name: Generate a new temporary SSH key to access the server for deployment + ansible.builtin.user: + name: "{{ lookup('env', 'USER') }}" + generate_ssh_key: true + ssh_key_bits: 2048 + ssh_key_file: ".ssh/{{ tmp_ssh_key_name }}" + ssh_key_comment: "{{ tmp_ssh_key_name }}" + force: true + register: tmp_ssh_key_result + when: + - state == 'present' + - ssh_key_name | length < 1 + - ssh_key_content | length < 1 + - not (postgresql_cluster_maintenance|default(false)|bool) # exclude for config_pgcluster.yml + +# set_fact: ssh_key_name and ssh_key_content +- name: "Set variable: ssh_key_name and ssh_key_content" + ansible.builtin.set_fact: + ssh_key_name: "{{ tmp_ssh_key_name }}" + ssh_key_content: "{{ tmp_ssh_key_result.ssh_public_key }}" + when: + - tmp_ssh_key_result.ssh_public_key is defined + - tmp_ssh_key_result.ssh_public_key | length > 0 + +- name: Import tasks for AWS + ansible.builtin.import_tasks: aws.yml + when: cloud_provider | lower == 'aws' + +- name: Import tasks for GCP + ansible.builtin.import_tasks: gcp.yml + when: cloud_provider | lower == 'gcp' + +- name: Import tasks for Azure + ansible.builtin.import_tasks: azure.yml + when: cloud_provider | lower == 'azure' + +- name: Import tasks for DigitalOcean + ansible.builtin.import_tasks: digitalocean.yml + when: cloud_provider | lower in ['digitalocean', 'do'] + +- name: Import tasks for Hetzner Cloud + ansible.builtin.import_tasks: hetzner.yml + when: cloud_provider | lower == 'hetzner' diff --git a/automation/roles/common/README.md b/automation/roles/common/README.md new file mode 100644 index 000000000..99bee19fa --- /dev/null +++ b/automation/roles/common/README.md @@ -0,0 +1,3 @@ +# Ansible Role: common + +It is intended for storing default variable files for a collection. diff --git a/automation/roles/common/defaults/Debian.yml b/automation/roles/common/defaults/Debian.yml new file mode 100644 index 000000000..29a27f033 --- /dev/null +++ b/automation/roles/common/defaults/Debian.yml @@ -0,0 +1,212 @@ +--- +# yamllint disable rule:line-length + +# PostgreSQL variables +postgresql_cluster_name: "main" +# You can specify custom data dir path. Example: "/pgdata/{{ postgresql_version }}/main" +postgresql_data_dir: "\ + {% if cloud_provider | default('') | length > 0 %}\ + {{ pg_data_mount_path | default('/pgdata') }}/{{ postgresql_version }}/{{ postgresql_cluster_name }}\ + {% else %}\ + /var/lib/postgresql/{{ postgresql_version }}/{{ postgresql_cluster_name }}\ + {% endif %}" +# Note: When deploying to cloud providers, we create a disk and mount the data directory, +# along the path defined in the "pg_data_mount_path" variable (or use "/pgdata" by default). + +# You can specify custom WAL dir path. Example: "/pgwal/{{ postgresql_version }}/pg_wal" +postgresql_wal_dir: "" # if defined, symlink will be created [optional] +postgresql_conf_dir: "/etc/postgresql/{{ postgresql_version }}/{{ postgresql_cluster_name }}" +postgresql_bin_dir: "/usr/lib/postgresql/{{ postgresql_version }}/bin" +postgresql_log_dir: "/var/log/postgresql" +postgresql_unix_socket_dir: "/var/run/postgresql" +postgresql_home_dir: "/var/lib/postgresql" + +# stats_temp_directory (mount the statistics directory in tmpfs) +# if postgresql_version < 15 +postgresql_stats_temp_directory_path: "/var/lib/pgsql_stats_tmp" # or 'none' +postgresql_stats_temp_directory_size: "1024m" + +# Repository +apt_repository: + - repo: "deb https://apt.postgresql.org/pub/repos/apt/ {{ ansible_distribution_release }}-pgdg main" # postgresql apt repository + key: "https://apt.postgresql.org/pub/repos/apt/ACCC4CF8.asc" # postgresql apt repository key +# - repo: "deb https://deb.debian.org/debian/ {{ ansible_distribution_release }} main" +# - repo: "deb https://deb.debian.org/debian/ {{ ansible_distribution_release }}-updates main" +# - repo: "deb https://security.debian.org/debian-security/ {{ ansible_distribution_release }}/updates main" + +# Packages +system_packages: + - python3 + - python3-dev + - python3-psycopg2 + - python3-setuptools + - python3-pip + - curl + - less + - sudo + - vim + - gcc + - jq + - iptables + - acl + - dnsutils + - moreutils + - unzip + - tar + - zstd + +install_perf: false # or 'true' to install "perf" (Linux profiling with performance counters) and "FlameGraph". + +postgresql_packages: + - postgresql-{{ postgresql_version }} + - postgresql-client-{{ postgresql_version }} + - postgresql-contrib-{{ postgresql_version }} + - postgresql-server-dev-{{ postgresql_version }} + - postgresql-{{ postgresql_version }}-dbgsym +# - postgresql-{{ postgresql_version }}-repack +# - postgresql-{{ postgresql_version }}-cron +# - postgresql-{{ postgresql_version }}-pg-stat-kcache +# - postgresql-{{ postgresql_version }}-pg-wait-sampling +# - postgresql-{{ postgresql_version }}-postgis-3 +# - postgresql-{{ postgresql_version }}-pgrouting +# - postgresql-{{ postgresql_version }}-pgvector +# - postgresql-{{ postgresql_version }}-pgaudit +# - postgresql-{{ postgresql_version }}-partman + +# Extra packages +etcd_package_repo: "https://github.com/etcd-io/etcd/releases/download/v%7B%7B%20etcd_version%20%7D%7D/etcd-v%7B%7B%20etcd_version%20%7D%7D-linux-%7B%7B%20etcd_architecture_map[ansible_architecture]%20%7D%7D.tar.gz" +vip_manager_package_repo: "https://github.com/cybertec-postgresql/vip-manager/releases/download/v%7B%7B%20vip_manager_version%20%7D%7D/vip-manager_%7B%7B%20vip_manager_version%20%7D%7D_Linux_%7B%7B%20vip_manager_architecture_map[ansible_architecture]%20%7D%7D.deb" + +installation_method: "repo" # "repo" (default) or "file" + +# The Patroni package will be installed from the deb package by default. +# You also have the option of choosing an installation method using the pip package. +patroni_installation_method: "deb" # "deb" (default) or "pip" + +# if patroni_installation_method: "pip" +patroni_install_version: "latest" # or a specific version (e.q., '3.3.2') + +# if patroni_installation_method: "deb" +patroni_packages: + - patroni + - python3-{{ dcs_type }} + +# if patroni_installation_method: "deb" (optional) +# You can preload the patroni deb package to your APT repository, or explicitly specify the path to the package in this variable: +patroni_deb_package_repo: [] +# - "https://apt.postgresql.org/pub/repos/apt/pool/main/p/patroni/patroni_3.3.0-1.pgdg22.04%2B1_all.deb" # (package for Ubuntu 22.04) + +# if patroni_installation_method: "pip" (optional) +# Packages from your repository will be used to install. By default, it is installed from the public pip repository. +pip_package_repo: "https://bootstrap.pypa.io/get-pip.py" # latest version pip3 for python3 (or use "pip-.tar.gz"). +patroni_pip_requirements_repo: [] +# - "http://my-repo.url/setuptools-41.2.0.zip" +# - "http://my-repo.url/setuptools_scm-3.3.3.tar.gz" +# - "http://my-repo.url/urllib3-1.24.3.tar.gz" +# - "http://my-repo.url/PyYAML-5.1.2.tar.gz" +# - "http://my-repo.url/chardet-3.0.4.tar.gz" +# - "http://my-repo.url/idna-2.8.tar.gz" +# - "http://my-repo.url/certifi-2019.9.11.tar.gz" +# - "http://my-repo.url/requests-2.22.0.tar.gz" +# - "http://my-repo.url/six-1.12.0.tar.gz" +# - "http://my-repo.url/kazoo-2.6.1.tar.gz" +# - "http://my-repo.url/dnspython-1.16.0.zip" +# - "http://my-repo.url/python-etcd-0.4.5.tar.gz" +# - "http://my-repo.url/Click-7.0.tar.gz" +# - "http://my-repo.url/prettytable-0.7.2.tar.gz" +# - "http://my-repo.url/pytz-2019.2.tar.gz" +# - "http://my-repo.url/tzlocal-2.0.0.tar.gz" +# - "http://my-repo.url/wheel-0.33.6.tar.gz" +# - "http://my-repo.url/python-dateutil-2.8.0.tar.gz" +# - "http://my-repo.url/psutil-5.6.3.tar.gz" +# - "http://my-repo.url/cdiff-1.0.tar.gz" +patroni_pip_package_repo: [] +# - "http://my-repo.url/patroni-1.6.0.tar.gz" + +# if with_haproxy_load_balancing: true +haproxy_installation_method: "deb" # "deb" (default) or "src" +confd_package_repo: "https://github.com/kelseyhightower/confd/releases/download/v0.16.0/confd-0.16.0-linux-%7B%7B%20confd_architecture_map[ansible_architecture]%20%7D%7D" + +# if haproxy_installation_method: "src" (optional) +haproxy_major: "1.8" +haproxy_version: "1.8.31" # version to build from source +lua_src_repo: "https://www.lua.org/ftp/lua-5.3.5.tar.gz" # required for build haproxy +haproxy_src_repo: "https://www.haproxy.org/download/%7B%7B%20haproxy_major%20%7D%7D/src/haproxy-%7B%7B%20haproxy_version%20%7D%7D.tar.gz" +haproxy_compile_requirements: + - unzip + - gzip + - make + - gcc + - build-essential + - libc6-dev + - libpcre3-dev + - liblua5.3-dev + - libreadline-dev + - zlib1g-dev + - libsystemd-dev + - ca-certificates + - libssl-dev + +# ================================================================================================= # +# Offline installation (if installation_method: "file") +# +# You can also download the necessary packages into /autobase/automation/files/ directory. +# Packages from this directory will be used for installation. + +# if installation_method: "file" and patroni_installation_method: "deb" +patroni_deb_package_file: "patroni_3.3.0-1.pgdg22.04%2B1_all.deb" +# (package for Ubuntu 22.04) https://apt.postgresql.org/pub/repos/apt/pool/main/p/patroni/ + +# if installation_method: "file" and patroni_installation_method: "pip" +pip_package_file: "pip-24.2.tar.gz" # https://pypi.org/project/pip/#files +patroni_pip_requirements_file: [] +# - "setuptools-41.2.0.zip" # https://pypi.org/project/setuptools/#files +# - "setuptools_scm-3.3.3.tar.gz" # https://pypi.org/project/setuptools-scm/#files +# - "urllib3-1.24.3.tar.gz" # https://pypi.org/project/urllib3/1.24.3/#files +# - "PyYAML-5.1.2.tar.gz" # https://pypi.org/project/PyYAML/#files +# - "chardet-3.0.4.tar.gz" # https://pypi.org/project/chardet/#files # (required for "requests") +# - "idna-2.8.tar.gz" # https://pypi.org/project/idna/#files # (required for "requests") +# - "certifi-2019.9.11.tar.gz" # https://pypi.org/project/certifi/#files # (required for "requests") +# - "requests-2.22.0.tar.gz" # https://pypi.org/project/requests/#files +# - "six-1.12.0.tar.gz" # https://pypi.org/project/six/#files +# - "kazoo-2.6.1.tar.gz" # https://pypi.org/project/kazoo/#files +# - "dnspython-1.16.0.zip" # https://pypi.org/project/dnspython/#files # (required for "python-etcd") +# - "python-etcd-0.4.5.tar.gz" # https://pypi.org/project/python-etcd/#files +# - "Click-7.0.tar.gz" # https://pypi.org/project/click/#files +# - "prettytable-0.7.2.tar.gz" # https://pypi.org/project/PrettyTable/#files +# - "pytz-2019.2.tar.gz" # https://pypi.org/project/pytz/#files # (required for "tzlocal") +# - "tzlocal-2.0.0.tar.gz" # https://pypi.org/project/tzlocal/#files +# - "wheel-0.33.6.tar.gz" # https://pypi.org/project/wheel/#files +# - "python-dateutil-2.8.0.tar.gz" # https://pypi.org/project/python-dateutil/#files +# - "psutil-5.6.3.tar.gz" # https://pypi.org/project/psutil/#files +# - "cdiff-1.0.tar.gz" # https://pypi.org/project/cdiff/#files +patroni_pip_package_file: [] +# - "patroni-3.3.2.tar.gz" # https://pypi.org/project/patroni/#files + +# additional packages +etcd_package_file: "etcd-v3.5.17-linux-{{ etcd_architecture_map[ansible_architecture] }}.tar.gz" # https://github.com/etcd-io/etcd/releases +vip_manager_package_file: "vip-manager_3.0.0_Linux_{{ vip_manager_architecture_map[ansible_architecture] }}.deb" # https://github.com/cybertec-postgresql/vip-manager/releases +wal_g_package_file: "wal-g-pg-ubuntu-20.04-{{ wal_g_architecture_map[ansible_architecture] }}.tar.gz" # https://github.com/wal-g/wal-g/releases + +# if with_haproxy_load_balancing: true +haproxy_package_file: [] +# - "haproxy_1.8.25-1~bpo9+1_amd64.deb" + +confd_package_file: "confd-0.16.0-linux-{{ confd_architecture_map[ansible_architecture] }}" # https://github.com/kelseyhightower/confd/releases + +# if haproxy_installation_method: 'src' (optional) +lua_src_file: "lua-5.3.5.tar.gz" # https://www.lua.org/ftp/lua-5.3.5.tar.gz (required for build haproxy) +haproxy_src_file: "haproxy-1.8.31.tar.gz" # http://www.haproxy.org/download/1.8/src/ + +# ------------------------------------------------------------------------------------------------- # +# (optional) Specify additional deb packages if required (for any installation_method) +# this packages will be installed before all other packages. +packages_from_file: [] +# - "my-package-name_1_amd64.deb" +# - "my-package-name_2_amd64.deb" +# - "" + +# ---------------------------------------------------------------------------------------------------------------------------------- +# Attention! If you want to use the installation method "file". +# You need to independently determine all the necessary the dependencies of deb packages for your version of the Linux distribution. +# ---------------------------------------------------------------------------------------------------------------------------------- diff --git a/automation/roles/common/defaults/RedHat.yml b/automation/roles/common/defaults/RedHat.yml new file mode 100644 index 000000000..cbe40cde8 --- /dev/null +++ b/automation/roles/common/defaults/RedHat.yml @@ -0,0 +1,240 @@ +--- +# yamllint disable rule:line-length + +# PostgreSQL variables +# +# You can specify custom data dir path. Example: "/pgdata/{{ postgresql_version }}/data" +postgresql_data_dir: "\ + {% if cloud_provider | default('') | length > 0 %}\ + {{ pg_data_mount_path | default('/pgdata') }}/{{ postgresql_version }}/data\ + {% else %}\ + /var/lib/pgsql/{{ postgresql_version }}/data\ + {% endif %}" +# Note: When deploying to cloud providers, we create a disk and mount the data directory, +# along the path defined in the "pg_data_mount_path" variable (or use "/pgdata" by default). + +# You can specify custom WAL dir path. Example: "/pgwal/{{ postgresql_version }}/pg_wal" +postgresql_wal_dir: "" # if defined, symlink will be created [optional] +postgresql_conf_dir: "{{ postgresql_data_dir }}" +postgresql_bin_dir: "/usr/pgsql-{{ postgresql_version }}/bin" +postgresql_log_dir: "/var/log/postgresql" +postgresql_unix_socket_dir: "/var/run/postgresql" +postgresql_home_dir: "/var/lib/pgsql" + +# stats_temp_directory (mount the statistics directory in tmpfs) +# if postgresql_version < 15 +postgresql_stats_temp_directory_path: "/var/lib/pgsql_stats_tmp" # or 'none' +postgresql_stats_temp_directory_size: "1024m" + +# Repository +yum_repository: [] +# - name: "repo name" +# description: "repo description" +# baseurl: "https://my-repo.url/" +# gpgkey: "https://my-repo-key.url/" +# gpgcheck: "yes" + +install_postgresql_repo: true # or 'false' (installed from the package "pgdg-redhat-repo-latest.noarch.rpm") +install_epel_repo: true # or 'false' (installed from the package "epel-release-latest.noarch.rpm") +install_scl_repo: true # or 'false' (Redhat 7 family only) + +# Packages (for yum repo) +python_version: "3" # override the version (e.q, 3.11) only if patroni_installation_method: "pip" is used + +os_specific_packages: + RedHat-8: + - python2 + - python3-libselinux + - python3-libsemanage + - python3-policycoreutils + - dnf-utils + RedHat-9: + - python3-libselinux + - python3-libsemanage + - python3-policycoreutils + - dnf-utils +system_packages: + - "{{ os_specific_packages[ansible_os_family ~ '-' ~ ansible_distribution_major_version] }}" + - python{{ python_version }} + - python{{ python_version }}-devel + - python{{ python_version }}-psycopg2 + - python{{ python_version }}-setuptools + - python{{ python_version }}-pip + - python{{ python_version }}-urllib3 + - less + - sudo + - vim + - gcc + - jq + - iptables + - acl + - bind-utils + - moreutils + - unzip + - tar + - zstd + +install_perf: false # or 'true' to install "perf" (Linux profiling with performance counters) and "FlameGraph". + +# The glibc-langpack package includes the basic information required to support the language in your applications. +# for RHEL version 8 (only) +glibc_langpack: + - "glibc-langpack-en" +# - "glibc-langpack-ru" +# - "glibc-langpack-de" + +postgresql_packages: + - postgresql{{ postgresql_version }} + - postgresql{{ postgresql_version }}-server + - postgresql{{ postgresql_version }}-contrib + - postgresql{{ postgresql_version }}-devel +# - postgresql{{ postgresql_version }}-debuginfo +# - pg_repack_{{ postgresql_version }} +# - pg_cron_{{ postgresql_version }} +# - pg_stat_kcache_{{ postgresql_version }} +# - pg_wait_sampling_{{ postgresql_version }} +# - postgis33_{{ postgresql_version }} +# - pgrouting_{{ postgresql_version }} +# - pgvector_{{ postgresql_version }} +# - pgaudit17_{{ postgresql_version }} +# - pg_partman_{{ postgresql_version }} + +# Extra packages +etcd_package_repo: "https://github.com/etcd-io/etcd/releases/download/v%7B%7B%20etcd_version%20%7D%7D/etcd-v%7B%7B%20etcd_version%20%7D%7D-linux-%7B%7B%20etcd_architecture_map[ansible_architecture]%20%7D%7D.tar.gz" +vip_manager_package_repo: "https://github.com/cybertec-postgresql/vip-manager/releases/download/v%7B%7B%20vip_manager_version%20%7D%7D/vip-manager_%7B%7B%20vip_manager_version%20%7D%7D_Linux_%7B%7B%20vip_manager_architecture_map[ansible_architecture]%20%7D%7D.rpm" + +installation_method: "repo" # "repo" (default) or "file" + +# The Patroni package will be installed from the rpm package by default. +# You also have the option of choosing an installation method using the pip package. +patroni_installation_method: "rpm" # "rpm" (default) or "pip" + +# if patroni_installation_method: "pip" +patroni_install_version: "latest" # or a specific version (e.q., '3.3.2') + +# if patroni_installation_method: "rpm" +patroni_packages: + - patroni + - patroni-{{ dcs_type }} + +# if patroni_installation_method: "rpm" (optional) +# You can preload the patroni rpm package to your YUM repository, or explicitly specify the path to the package in this variable: +patroni_rpm_package_repo: [] +# - "https://download.postgresql.org/pub/repos/yum/common/redhat/rhel-8-x86_64/patroni-3.3.2-1PGDG.rhel8.noarch.rpm" # (package for RHEL 8) + +# if patroni_installation_method: "pip" (optional) +# Packages from your repository will be used to install. By default, it is installed from the public pip repository. +pip_package_repo: "https://bootstrap.pypa.io/get-pip.py" # latest version pip3 for python3 (or use "pip-.tar.gz"). +patroni_pip_requirements_repo: [] +# - "http://my-repo.url/setuptools-41.2.0.zip" +# - "http://my-repo.url/setuptools_scm-3.3.3.tar.gz" +# - "http://my-repo.url/urllib3-1.24.3.tar.gz" +# - "http://my-repo.url/PyYAML-5.1.2.tar.gz" +# - "http://my-repo.url/chardet-3.0.4.tar.gz" +# - "http://my-repo.url/idna-2.8.tar.gz" +# - "http://my-repo.url/certifi-2019.9.11.tar.gz" +# - "http://my-repo.url/requests-2.22.0.tar.gz" +# - "http://my-repo.url/six-1.12.0.tar.gz" +# - "http://my-repo.url/kazoo-2.6.1.tar.gz" +# - "http://my-repo.url/dnspython-1.16.0.zip" +# - "http://my-repo.url/python-etcd-0.4.5.tar.gz" +# - "http://my-repo.url/Click-7.0.tar.gz" +# - "http://my-repo.url/prettytable-0.7.2.tar.gz" +# - "http://my-repo.url/pytz-2019.2.tar.gz" +# - "http://my-repo.url/tzlocal-2.0.0.tar.gz" +# - "http://my-repo.url/wheel-0.33.6.tar.gz" +# - "http://my-repo.url/python-dateutil-2.8.0.tar.gz" +# - "http://my-repo.url/psutil-5.6.3.tar.gz" +# - "http://my-repo.url/cdiff-1.0.tar.gz" +patroni_pip_package_repo: [] +# - "http://my-repo.url/patroni-1.6.0.tar.gz" + +# if with_haproxy_load_balancing: true +haproxy_installation_method: "rpm" # "rpm" (default) or "src" +confd_package_repo: "https://github.com/kelseyhightower/confd/releases/download/v0.16.0/confd-0.16.0-linux-%7B%7B%20confd_architecture_map[ansible_architecture]%20%7D%7D" + +# if haproxy_installation_method: "src" (optional) +haproxy_major: "1.8" +haproxy_version: "1.8.31" # version to build from source +lua_src_repo: "https://www.lua.org/ftp/lua-5.3.5.tar.gz" # required for build haproxy +haproxy_src_repo: "https://www.haproxy.org/download/%7B%7B%20haproxy_major%20%7D%7D/src/haproxy-%7B%7B%20haproxy_version%20%7D%7D.tar.gz" +haproxy_compile_requirements: + - unzip + - gzip + - make + - gcc + - gcc-c++ + - pcre-devel + - zlib-devel + - readline-devel + - openssl + - openssl-devel + - openssl-libs + - systemd-devel + +# ================================================================================================= # +# Offline installation (if installation_method: "file") +# +# You can also download the necessary packages into /autobase/automation/files/ directory. +# Packages from this directory will be used for installation. + +# if installation_method: "file" and patroni_installation_method: "rpm" +patroni_rpm_package_file: "patroni-3.3.2-1PGDG.rhel8.noarch.rpm" +# (package for RHEL 8) https://download.postgresql.org/pub/repos/yum/common/redhat/rhel-8-x86_64/ + +# if installation_method: "file" and patroni_installation_method: "pip" +pip_package_file: "pip-24.2.tar.gz" # https://pypi.org/project/pip/#files +patroni_pip_requirements_file: [] +# - "setuptools-41.2.0.zip" # https://pypi.org/project/setuptools/#files +# - "setuptools_scm-3.3.3.tar.gz" # https://pypi.org/project/setuptools-scm/#files +# - "urllib3-1.24.3.tar.gz" # https://pypi.org/project/urllib3/1.24.3/#files +# - "PyYAML-5.1.2.tar.gz" # https://pypi.org/project/PyYAML/#files +# - "chardet-3.0.4.tar.gz" # https://pypi.org/project/chardet/#files # (required for "requests") +# - "idna-2.8.tar.gz" # https://pypi.org/project/idna/#files # (required for "requests") +# - "certifi-2019.9.11.tar.gz" # https://pypi.org/project/certifi/#files # (required for "requests") +# - "requests-2.22.0.tar.gz" # https://pypi.org/project/requests/#files +# - "six-1.12.0.tar.gz" # https://pypi.org/project/six/#files +# - "kazoo-2.6.1.tar.gz" # https://pypi.org/project/kazoo/#files +# - "dnspython-1.16.0.zip" # https://pypi.org/project/dnspython/#files # (required for "python-etcd") +# - "python-etcd-0.4.5.tar.gz" # https://pypi.org/project/python-etcd/#files +# - "Click-7.0.tar.gz" # https://pypi.org/project/click/#files +# - "prettytable-0.7.2.tar.gz" # https://pypi.org/project/PrettyTable/#files +# - "pytz-2019.2.tar.gz" # https://pypi.org/project/pytz/#files # (required for "tzlocal") +# - "tzlocal-2.0.0.tar.gz" # https://pypi.org/project/tzlocal/#files +# - "wheel-0.33.6.tar.gz" # https://pypi.org/project/wheel/#files +# - "python-dateutil-2.8.0.tar.gz" # https://pypi.org/project/python-dateutil/#files +# - "psutil-5.6.3.tar.gz" # https://pypi.org/project/psutil/#files +# - "cdiff-1.0.tar.gz" # https://pypi.org/project/cdiff/#files +patroni_pip_package_file: [] +# - "patroni-3.3.2.tar.gz" # https://pypi.org/project/patroni/#files + +# additional packages +etcd_package_file: "etcd-v3.5.17-linux-{{ etcd_architecture_map[ansible_architecture] }}.tar.gz" # https://github.com/etcd-io/etcd/releases +vip_manager_package_file: "vip-manager_3.0.0_Linux_{{ vip_manager_architecture_map[ansible_architecture] }}.rpm" # https://github.com/cybertec-postgresql/vip-manager/releases +wal_g_package_file: "wal-g-pg-ubuntu-20.04-{{ wal_g_architecture_map[ansible_architecture] }}.tar.gz" # https://github.com/wal-g/wal-g/releases + +# if with_haproxy_load_balancing: true +haproxy_package_file: [] +# - "rh-haproxy18-runtime-3.1-2.el7.x86_64.rpm" +# - "rh-haproxy18-haproxy-1.8.31-1.el7.x86_64.rpm" + +confd_package_file: "confd-0.16.0-linux-amd64" # https://github.com/kelseyhightower/confd/releases + +# if haproxy_installation_method: 'src' (optional) +lua_src_file: "lua-5.3.5.tar.gz" # https://www.lua.org/ftp/lua-5.3.5.tar.gz (required for build haproxy) +haproxy_src_file: "haproxy-1.8.31.tar.gz" # http://www.haproxy.org/download/1.8/src/ + +# ------------------------------------------------------------------------------------------------- # +# (optional) Specify additional rpm packages if required (for any installation_method) +# this packages will be installed before all other packages. +packages_from_file: [] +# - "python3-psycopg2-2.7.7-2.el7.x86_64.rpm" # https://mirror.linux-ia64.org/epel/7/x86_64/Packages/p/ # (required for patroni rpm) +# - "libyaml-0.1.4-11.el7_0.x86_64.rpm" # (required for patroni rpm) +# - "jq-1.5-1.el7.x86_64.rpm" # https://mirror.linux-ia64.org/epel/7/x86_64/Packages/j/ +# - "other-package-name_1_amd64.rpm" +# - "" + +# ---------------------------------------------------------------------------------------------------------------------------------- +# Attention! If you want to use the installation method "file". +# You need to independently determine all the necessary the dependencies of rpm packages for your version of the Linux distribution. +# ---------------------------------------------------------------------------------------------------------------------------------- diff --git a/automation/roles/common/defaults/main.yml b/automation/roles/common/defaults/main.yml new file mode 100644 index 000000000..78d76d946 --- /dev/null +++ b/automation/roles/common/defaults/main.yml @@ -0,0 +1,718 @@ +--- +# --------------------------------------------------------------------- +# Proxy variables (optional) for download packages using a proxy server +proxy_env: {} # yamllint disable rule:braces +# http_proxy: http://10.128.64.9:3128 +# https_proxy: http://10.128.64.9:3128 +# --------------------------------------------------------------------- + +# Cluster variables +cluster_vip: "" # IP address for client access to the databases in the cluster (optional). +vip_interface: "{{ ansible_default_ipv4.interface }}" # interface name (e.g., "ens32"). +# Note: VIP-based solutions such as keepalived or vip-manager may not function correctly in cloud environments. + +patroni_cluster_name: "postgres-cluster" # the cluster name (must be unique for each cluster) + +patroni_superuser_username: "postgres" +patroni_superuser_password: "" # Please specify a password. If not defined, will be generated automatically during deployment. +patroni_replication_username: "replicator" +patroni_replication_password: "" # Please specify a password. If not defined, will be generated automatically during deployment. + +synchronous_mode: false # or 'true' for enable synchronous database replication +synchronous_mode_strict: false # if 'true' then block all client writes to the master, when a synchronous replica is not available +synchronous_node_count: 1 # number of synchronous standby databases + +# Load Balancing +with_haproxy_load_balancing: false # or 'true' if you want to install and configure the load-balancing +haproxy_listen_port: + master: 5000 + replicas: 5001 + replicas_sync: 5002 + replicas_async: 5003 + # The following ('_direct') ports are used for direct connections to the PostgreSQL database, + # bypassing the PgBouncer connection pool (if 'pgbouncer_install' is 'true'). + # Uncomment the relevant lines if you need to set up direct connections. + # master_direct: 6000 + # replicas_direct: 6001 + # replicas_sync_direct: 6002 + # replicas_async_direct: 6003 + stats: 7000 +haproxy_maxconn: + global: 100000 + master: 10000 + replica: 10000 +haproxy_timeout: + client: "60m" + server: "60m" +# Optionally declare log format for haproxy. +# Uncomment following lines (and remove extra space in front of variable definition) for JSON structured log format. +# haproxy_log_format: "{ +# \"pid\":%pid,\ +# \"haproxy_frontend_type\":\"tcp\",\ +# \"haproxy_process_concurrent_connections\":%ac,\ +# \"haproxy_frontend_concurrent_connections\":%fc,\ +# \"haproxy_backend_concurrent_connections\":%bc,\ +# \"haproxy_server_concurrent_connections\":%sc,\ +# \"haproxy_backend_queue\":%bq,\ +# \"haproxy_server_queue\":%sq,\ +# \"haproxy_queue_wait_time\":%Tw,\ +# \"haproxy_server_wait_time\":%Tc,\ +# \"response_time\":%Td,\ +# \"session_duration\":%Tt,\ +# \"request_termination_state\":\"%tsc\",\ +# \"haproxy_server_connection_retries\":%rc,\ +# \"remote_addr\":\"%ci\",\ +# \"remote_port\":%cp,\ +# \"frontend_addr\":\"%fi\",\ +# \"frontend_port\":%fp,\ +# \"frontend_ssl_version\":\"%sslv\",\ +# \"frontend_ssl_ciphers\":\"%sslc\",\ +# \"haproxy_frontend_name\":\"%f\",\ +# \"haproxy_backend_name\":\"%b\",\ +# \"haproxy_server_name\":\"%s\",\ +# \"response_size\":%B,\ +# \"request_size\":%U\ +# }" + +# keepalived (if 'cluster_vip' is specified and 'with_haproxy_load_balancing' is 'true') +keepalived_virtual_router_id: "{{ cluster_vip.split('.')[3] | int }}" # The last octet of 'cluster_vip' IP address is used by default. +# virtual_router_id - must be unique in the network (available values are 0..255). + +# vip-manager (if 'cluster_vip' is specified and 'with_haproxy_load_balancing' is 'false') +vip_manager_version: "3.0.0" # version to install +vip_manager_conf: "/etc/patroni/vip-manager.yml" +vip_manager_interval: "1000" # time (in milliseconds) after which vip-manager wakes up and checks if it needs to register or release ip addresses. +vip_manager_iface: "{{ vip_interface }}" # interface to which the virtual ip will be added +vip_manager_ip: "{{ cluster_vip }}" # the virtual ip address to manage +vip_manager_mask: "24" # netmask for the virtual ip +vip_manager_dcs_type: "{{ dcs_type }}" # etcd, consul or patroni + +# TLS certificate +tls_cert_generate: "{{ tls_enable | default(true) }}" # or 'false' if you do not want to generate a self-signed certificate. +tls_cert_valid_days: 3650 +tls_dir: "/etc/tls" +tls_cert: "server.crt" +tls_privatekey: "server.key" +tls_ca_cert: "ca.crt" +tls_ca_key: "ca.key" + +# DCS (Distributed Consensus Store) +dcs_type: "etcd" # or 'consul' +dcs_exists: false # or 'true' if you do not want Autobase to deploy and manage etcd cluster and prefer to manage it yourself. + +# if dcs_type: "etcd" and dcs_exists: false +etcd_version: "3.5.20" # version for deploy etcd cluster +etcd_data_dir: "/var/lib/etcd" +etcd_cluster_name: "etcd-{{ patroni_cluster_name }}" # ETCD_INITIAL_CLUSTER_TOKEN +etcd_on_dedicated_nodes: "{{ groups['etcd_cluster'] | difference(groups['postgres_cluster']) | length > 0 }}" # 'true' or 'false' +# TLS +# Enables TLS encryption with a self-signed certificate if 'tls_cert_generate' is true. +etcd_tls_enable: "{{ tls_cert_generate | default(true) }}" +etcd_tls_dir: "/etc/etcd/tls" +etcd_tls_ca_crt: "ca.crt" +etcd_tls_ca_key: "ca.key" +etcd_tls_server_crt: "server.crt" +etcd_tls_server_key: "server.key" +etcd_client_cert_auth: "{{ 'true' if not etcd_on_dedicated_nodes | bool else 'false' }}" +# We disable client certificate authentication when etcd runs on dedicated nodes. +# This allows Patroni to connect without requiring a client certificate, ensuring secure encrypted communication +# using only the CA certificate while avoiding the need to regenerate etcd certificates when adding new Patroni clusters. + +# if dcs_type: "etcd" and dcs_exists: true +patroni_etcd_hosts: [] # list of servers of an existing etcd cluster +# - { host: "10.128.64.140", port: "2379" } +# - { host: "10.128.64.142", port: "2379" } +# - { host: "10.128.64.143", port: "2379" } +patroni_etcd_namespace: "service" # (optional) etcd namespace (prefix) +patroni_etcd_username: "" # (optional) username for etcd authentication +patroni_etcd_password: "" # (optional) password for etcd authentication +patroni_etcd_protocol: "{{ 'https' if etcd_tls_enable | bool else 'http' }}" +patroni_etcd_cacert: "/etc/patroni/tls/etcd/ca.crt" +patroni_etcd_cert: "/etc/patroni/tls/etcd/server.crt" +patroni_etcd_key: "/etc/patroni/tls/etcd/server.key" + +# if dcs_type: "consul" +consul_version: "latest" # or a specific version (e.q., '1.18.2') if 'consul_install_from_repo' is 'false' +consul_install_from_repo: true # specify 'false' only if patroni_installation_method: "pip" is used +consul_config_path: "/etc/consul" +consul_configd_path: "{{ consul_config_path }}/conf.d" +consul_data_path: "/var/lib/consul" +consul_domain: "consul" # Consul domain name +consul_datacenter: "dc1" # Datacenter label (can be specified for each host in the inventory) +consul_disable_update_check: true # Disables automatic checking for security bulletins and new version releases +consul_enable_script_checks: true # This controls whether health checks that execute scripts are enabled on this agent +consul_enable_local_script_checks: true # Enable them when they are defined in the local configuration files +consul_ui: false # Enable the consul UI? +consul_syslog_enable: true # Enable logging to syslog +consul_iface: "{{ ansible_default_ipv4.interface }}" # specify the interface name with a Private IP (ex. "enp7s0") +consul_client_address: "127.0.0.1" # Client address. Affects DNS, HTTP, HTTPS, and gRPC client interfaces. +consul_on_dedicated_nodes: "{{ groups['consul_instances'] | difference(groups['postgres_cluster']) | length > 0 }}" # 'true' or 'false' +# TLS +# Enables TLS encryption with a self-signed certificate if 'tls_cert_generate' is true. +# If 'tls_cert_generate' is false, you must provide your own CA certificate, server certificate, and server key in the 'files/' directory. +consul_tls_enable: "{{ tls_cert_generate | default(true) }}" +consul_tls_dir: "/etc/consul/tls" +consul_tls_ca_crt: "ca.crt" +consul_tls_ca_key: "ca.key" +consul_tls_server_crt: "server.crt" +consul_tls_server_key: "server.key" +# DNS +consul_recursors: [] # List of upstream DNS servers +consul_dnsmasq_enable: true # Enable DNS forwarding with Dnsmasq +consul_dnsmasq_cache: 0 # dnsmasq cache-size (0 - disable caching) +consul_dnsmasq_servers: "{{ nameservers }}" # Upstream DNS servers used by dnsmasq + +# if dcs_type: "consul" and dcs_exists: true +consul_join: [] # List of LAN servers of an existing consul cluster, to join. +# - "10.128.64.140" +# - "10.128.64.142" +# - "10.128.64.143" + +# https://developer.hashicorp.com/consul/docs/discovery/services +consul_services: + - name: "{{ patroni_cluster_name }}" + id: "{{ patroni_cluster_name }}-master" + tags: ["master", "primary"] + port: "{{ pgbouncer_listen_port }}" # or "{{ postgresql_port }}" if pgbouncer_install: false + checks: + - { http: "http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/primary", interval: "2s" } + - { args: ["systemctl", "status", "pgbouncer"], interval: "5s" } # comment out this check if pgbouncer_install: false + - name: "{{ patroni_cluster_name }}" + id: "{{ patroni_cluster_name }}-replica" + tags: ["replica"] + port: "{{ pgbouncer_listen_port }}" + checks: + - { http: "http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/replica?lag={{ patroni_maximum_lag_on_replica }}", interval: "2s" } + - { args: ["systemctl", "status", "pgbouncer"], interval: "5s" } +# - name: "{{ patroni_cluster_name }}" +# id: "{{ patroni_cluster_name }}-sync-replica" +# tags: ['sync-replica'] +# port: "{{ pgbouncer_listen_port }}" +# checks: +# - { http: "http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/sync", interval: "2s" } +# - { args: ["systemctl", "status", "pgbouncer"], interval: "5s" } +# - name: "{{ patroni_cluster_name }}" +# id: "{{ patroni_cluster_name }}-async-replica" +# tags: ['async-replica'] +# port: "{{ pgbouncer_listen_port }}" +# checks: +# - { http: "http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/async?lag={{ patroni_maximum_lag_on_replica }}", interval: "2s" } +# - { args: ["systemctl", "status", "pgbouncer"], interval: "5s" } + +# PostgreSQL variables +postgresql_version: 17 +# postgresql_data_dir: see Debian.yml or RedHat.yml +postgresql_listen_addr: "0.0.0.0" # Listen on all interfaces. Or use "{{ inventory_hostname }},127.0.0.1" to listen on a specific IP address. +postgresql_port: 5432 +postgresql_encoding: "UTF8" # for bootstrap only (initdb) +postgresql_locale: "en_US.UTF-8" # for bootstrap only (initdb) +postgresql_data_checksums: true # for bootstrap only (initdb) +postgresql_password_encryption_algorithm: "scram-sha-256" # or "md5" if your clients do not work with passwords encrypted with SCRAM-SHA-256 + +# (optional) list of users to be created (if not already exists) +postgresql_users: + - { name: "{{ pgbouncer_auth_username }}", password: "{{ pgbouncer_auth_password }}", flags: "LOGIN", role: "" } +# - { name: "monitoring_auth_username", password: "monitoring_user_password", flags: "LOGIN", role: "pg_monitor" } # monitoring Service Account +# - { name: "mydb-user", password: "mydb-user-pass", flags: "SUPERUSER" } +# - { name: "", password: "", flags: "NOSUPERUSER" } +# - { name: "", password: "", flags: "NOSUPERUSER" } +# - { name: "", password: "", flags: "NOLOGIN" } + +# (optional) list of databases to be created (if not already exists) +postgresql_databases: [] +# - { db: "mydatabase", encoding: "UTF8", lc_collate: "ru_RU.UTF-8", lc_ctype: "ru_RU.UTF-8", owner: "mydb-user" } +# - { db: "mydatabase2", encoding: "UTF8", lc_collate: "ru_RU.UTF-8", lc_ctype: "ru_RU.UTF-8", owner: "mydb-user", conn_limit: "50" } +# - { db: "", encoding: "UTF8", lc_collate: "en_US.UTF-8", lc_ctype: "en_US.UTF-8", owner: "" } +# - { db: "", encoding: "UTF8", lc_collate: "en_US.UTF-8", lc_ctype: "en_US.UTF-8", owner: "" } + +# (optional) list of schemas to be created (if not already exists) +postgresql_schemas: [] +# - { schema: "myschema", db: "mydatabase", owner: "mydb-user" } + +# (optional) list of privileges to be granted (if not already exists) or revoked +# https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_privs_module.html#examples +# The db (which is the database to connect to) and role parameters are required +postgresql_privs: [] +# - { role: "test", privs: "SELECT,INSERT,UPDATE", type: "table", db: "test2", objs: "test" } # grant SELECT, INSERT, UPDATE on a table to role test +# - { role: "test-user", privs: "ALL", type: "database", db: "test-db", objs: "test-db" } # grant ALL on a database to role test-user +# - { role: "mydb-user", privs: "SELECT", type: "table", db: "mydb", objs: "my_table", schema: "my_schema" } # grant SELECT on a table and schema +# - { role: "user", privs: "EXECUTE", type: "function", db: "db1", objs: "pg_ls_waldir()", schema: "pg_catalog" } # grant EXECUTE on a function +# - { role: "user, privs: "SELECT", type: "table", db: "mydb", objs: "table2", schema: "schema2", state: "absent" } # revoke SELECT on a table2 and schema2 +# - { role: "test, test2", privs: "CREATE", type: "database", db: "test2", objs: "test2" } # grant CREATE on a database test2 to role test and test2 + +# (optional) list of database extensions to be created (if not already exists) +postgresql_extensions: [] +# - { ext: "pg_stat_statements", db: "postgres" } +# - { ext: "pg_stat_statements", db: "mydatabase" } +# - { ext: "pg_stat_statements", db: "mydatabase", schema: "myschema" } +# - { ext: "pg_stat_statements", db: "" } +# - { ext: "pg_stat_statements", db: "" } +# - { ext: "pg_repack", db: "" } # postgresql--repack package is required +# - { ext: "pg_stat_kcache", db: "" } # postgresql--pg-stat-kcache package is required +# - { ext: "", db: "" } +# - { ext: "", db: "" } + +# postgresql parameters to bootstrap dcs (are parameters for example) +postgresql_parameters: + - { option: "max_connections", value: "1000" } + - { option: "superuser_reserved_connections", value: "5" } + - { option: "password_encryption", value: "{{ postgresql_password_encryption_algorithm }}" } + - { option: "ssl", value: "{{ 'on' if tls_cert_generate | bool else 'off' }}" } + - { option: "ssl_prefer_server_ciphers", value: "{{ 'on' if tls_cert_generate | bool else 'off' }}" } + - { option: "ssl_cert_file", value: "{{ tls_dir }}/{{ tls_cert }}" } + - { option: "ssl_key_file", value: "{{ tls_dir }}/{{ tls_privatekey }}" } + - { option: "ssl_ca_file", value: "{{ tls_dir }}/{{ tls_ca_cert }}" } + - { option: "ssl_min_protocol_version", value: "TLSv1.2" } + - { option: "max_locks_per_transaction", value: "512" } + - { option: "max_prepared_transactions", value: "0" } + - { option: "huge_pages", value: "try" } # "vm.nr_hugepages" is auto-configured for shared_buffers >= 8GB (if huge_pages_auto_conf is true) + - { option: "shared_buffers", value: "{{ (ansible_memtotal_mb * 0.25) | int }}MB" } # by default, 25% of RAM + - { option: "effective_cache_size", value: "{{ (ansible_memtotal_mb * 0.75) | int }}MB" } # by default, 75% of RAM + - { option: "work_mem", value: "128MB" } # please change this value + - { option: "maintenance_work_mem", value: "256MB" } # please change this value + - { option: "checkpoint_timeout", value: "15min" } + - { option: "checkpoint_completion_target", value: "0.9" } + - { option: "min_wal_size", value: "2GB" } + - { option: "max_wal_size", value: "8GB" } # or 16GB/32GB + - { option: "wal_buffers", value: "32MB" } + - { option: "default_statistics_target", value: "1000" } + - { option: "seq_page_cost", value: "1" } + - { option: "random_page_cost", value: "1.1" } # or "4" for HDDs with slower random access + - { option: "effective_io_concurrency", value: "200" } # or "2" for traditional HDDs with lower I/O parallelism + - { option: "synchronous_commit", value: "on" } # or 'off' if you can you lose single transactions in case of a crash + - { option: "autovacuum", value: "on" } # never turn off the autovacuum! + - { option: "autovacuum_max_workers", value: "5" } + - { option: "autovacuum_vacuum_scale_factor", value: "0.01" } # or 0.005/0.001 + - { option: "autovacuum_analyze_scale_factor", value: "0.01" } + - { option: "autovacuum_vacuum_cost_limit", value: "500" } # or 1000/5000 + - { option: "autovacuum_vacuum_cost_delay", value: "2" } + - { option: "autovacuum_naptime", value: "1s" } + - { option: "max_files_per_process", value: "4096" } + - { option: "archive_mode", value: "on" } + - { option: "archive_timeout", value: "1800s" } + - { option: "archive_command", value: "cd ." } # not doing anything yet with WAL-s + # - { option: "archive_command", value: "{{ wal_g_archive_command }}" } # archive WAL-s using WAL-G + # - { option: "archive_command", value: "{{ pgbackrest_archive_command }}" } # archive WAL-s using pgbackrest + - { option: "wal_level", value: "logical" } + - { option: "wal_keep_size", value: "2GB" } + - { option: "max_wal_senders", value: "10" } + - { option: "max_replication_slots", value: "10" } + - { option: "hot_standby", value: "on" } + - { option: "wal_log_hints", value: "on" } + - { option: "wal_compression", value: "on" } + - { option: "shared_preload_libraries", value: "pg_stat_statements,auto_explain" } + - { option: "pg_stat_statements.max", value: "10000" } + - { option: "pg_stat_statements.track", value: "all" } + - { option: "pg_stat_statements.track_utility", value: "false" } + - { option: "pg_stat_statements.save", value: "true" } + - { option: "auto_explain.log_min_duration", value: "10s" } # enable auto_explain for 10-second logging threshold. Decrease this value if necessary + - { option: "auto_explain.log_analyze", value: "true" } + - { option: "auto_explain.log_buffers", value: "true" } + - { option: "auto_explain.log_timing", value: "false" } + - { option: "auto_explain.log_triggers", value: "true" } + - { option: "auto_explain.log_verbose", value: "true" } + - { option: "auto_explain.log_nested_statements", value: "true" } + - { option: "auto_explain.sample_rate", value: "0.01" } # enable auto_explain for 1% of queries logging threshold + - { option: "track_io_timing", value: "on" } + - { option: "log_lock_waits", value: "on" } + - { option: "log_temp_files", value: "0" } + - { option: "track_activities", value: "on" } + - { option: "track_activity_query_size", value: "4096" } + - { option: "track_counts", value: "on" } + - { option: "track_functions", value: "all" } + - { option: "log_checkpoints", value: "on" } + - { option: "logging_collector", value: "on" } + - { option: "log_truncate_on_rotation", value: "on" } + - { option: "log_rotation_age", value: "1d" } + - { option: "log_rotation_size", value: "0" } + - { option: "log_line_prefix", value: "%t [%p-%l] %r %q%u@%d " } + - { option: "log_filename", value: "postgresql-%a.log" } + - { option: "log_directory", value: "{{ postgresql_log_dir }}" } + - { option: "hot_standby_feedback", value: "on" } # allows feedback from a hot standby to the primary that will avoid query conflicts + - { option: "max_standby_streaming_delay", value: "30s" } + - { option: "wal_receiver_status_interval", value: "10s" } + - { option: "idle_in_transaction_session_timeout", value: "10min" } # reduce this timeout if possible + - { option: "jit", value: "off" } + - { option: "max_worker_processes", value: "{{ [ansible_processor_vcpus | int, 16] | max }}" } + - { option: "max_parallel_workers", value: "{{ [(ansible_processor_vcpus | int // 2), 8] | max }}" } + - { option: "max_parallel_workers_per_gather", value: "2" } + - { option: "max_parallel_maintenance_workers", value: "2" } + - { option: "tcp_keepalives_count", value: "10" } + - { option: "tcp_keepalives_idle", value: "300" } + - { option: "tcp_keepalives_interval", value: "30" } +# - { option: "", value: "" } +# - { option: "", value: "" } + +# Set this variable to 'true' if you want the cluster to be automatically restarted +# after changing the 'postgresql_parameters' variable that requires a restart in the 'config_pgcluster.yml' playbook. +# By default, the cluster will not be automatically restarted. +pending_restart: false + +# specify additional hosts that will be added to the pg_hba.conf +postgresql_pg_hba: + - { type: "local", database: "all", user: "{{ patroni_superuser_username }}", address: "", method: "trust" } + - { type: "local", database: "all", user: "{{ pgbouncer_auth_username }}", address: "", method: "trust" } # required for pgbouncer auth_user + - { type: "local", database: "replication", user: "{{ patroni_replication_username }}", address: "", method: "trust" } + - { type: "local", database: "all", user: "all", address: "", method: "{{ postgresql_password_encryption_algorithm }}" } + - { type: "host", database: "all", user: "all", address: "127.0.0.1/32", method: "{{ postgresql_password_encryption_algorithm }}" } + - { type: "host", database: "all", user: "all", address: "::1/128", method: "{{ postgresql_password_encryption_algorithm }}" } + - { type: "{{ host_type }}", database: "all", user: "all", address: "0.0.0.0/0", method: "{{ postgresql_password_encryption_algorithm }}" } +# - { type: "{{ host_type }}", database: "mydatabase", user: "mydb-user", address: "192.168.0.0/24", method: "{{ postgresql_password_encryption_algorithm }}" } +# - { type: "{{ host_type }}", database: "all", user: "all", address: "192.168.0.0/24", method: "ident", options: "map=main" } # use pg_ident + +host_type: "{{ 'hostssl' if tls_cert_generate | bool else 'host' }}" + +# list of lines that Patroni will use to generate pg_ident.conf +postgresql_pg_ident: [] +# - { mapname: "main", system_username: "postgres", pg_username: "backup" } +# - { mapname: "", system_username: "", pg_username: "" } + +# the password file (~/.pgpass) +postgresql_pgpass: + - "localhost:{{ postgresql_port }}:*:{{ patroni_superuser_username }}:{{ patroni_superuser_password }}" + - "{{ inventory_hostname }}:{{ postgresql_port }}:*:{{ patroni_superuser_username }}:{{ patroni_superuser_password }}" + - "*:{{ pgbouncer_listen_port }}:*:{{ patroni_superuser_username }}:{{ patroni_superuser_password }}" +# - hostname:port:database:username:password + +# PgBouncer parameters +pgbouncer_install: true # or 'false' if you do not want to install and configure the pgbouncer service +pgbouncer_processes: 1 # Number of pgbouncer processes to be used. Multiple processes use the so_reuseport option for better performance. +pgbouncer_conf_dir: "/etc/pgbouncer" +pgbouncer_log_dir: "/var/log/pgbouncer" +pgbouncer_listen_addr: "0.0.0.0" # Listen on all interfaces. Or use "{{ inventory_hostname }}" to listen on a specific IP address. +pgbouncer_listen_port: 6432 +pgbouncer_max_client_conn: 100000 +pgbouncer_max_db_connections: 10000 +pgbouncer_max_prepared_statements: 1024 +pgbouncer_query_wait_timeout: 120 +pgbouncer_default_pool_size: 100 +pgbouncer_default_pool_mode: "session" +pgbouncer_admin_users: "{{ patroni_superuser_username }}" # comma-separated list of users, who are allowed to change settings +pgbouncer_stats_users: "{{ patroni_superuser_username }}" # comma-separated list of users who are just allowed to use SHOW command +pgbouncer_ignore_startup_parameters: "extra_float_digits,geqo,search_path" +pgbouncer_auth_type: "{{ postgresql_password_encryption_algorithm }}" +pgbouncer_auth_user: true # or 'false' if you want to manage the list of users for authentication in the database via userlist.txt +pgbouncer_auth_username: pgbouncer # user who can query the database via the user_search function +pgbouncer_auth_password: "" # If not defined, a password will be generated automatically during deployment +pgbouncer_auth_dbname: "postgres" +pgbouncer_tls_dir: "{{ tls_dir }}" +pgbouncer_client_tls_sslmode: "{{ 'require' if tls_cert_generate | bool else 'disable' }}" +pgbouncer_client_tls_key_file: "{{ tls_privatekey }}" +pgbouncer_client_tls_cert_file: "{{ tls_cert }}" +pgbouncer_client_tls_ca_file: "{{ tls_ca_cert }}" +pgbouncer_client_tls_protocols: "secure" # allowed values: tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3, all, secure (tlsv1.2,tlsv1.3) +pgbouncer_client_tls_ciphers: "secure" # allowed values: default, secure, fast, normal, all (not recommended) +pgbouncer_server_tls_sslmode: "{{ 'require' if tls_cert_generate | bool else 'disable' }}" +pgbouncer_server_tls_protocols: "secure" +pgbouncer_server_tls_ciphers: "secure" +pgbouncer_server_tls_key_file: "{{ tls_privatekey }}" +pgbouncer_server_tls_cert_file: "{{ tls_cert }}" +pgbouncer_server_tls_ca_file: "{{ tls_ca_cert }}" + +pgbouncer_pools: + - { name: "postgres", dbname: "postgres", pool_parameters: "" } +# - { name: "mydatabase", dbname: "mydatabase", pool_parameters: "pool_size=20 pool_mode=transaction" } +# - { name: "", dbname: "", pool_parameters: "" } +# - { name: "", dbname: "", pool_parameters: "" } + +# Extended variables (optional) +patroni_restapi_listen_addr: "0.0.0.0" # Listen on all interfaces. Or use "{{ inventory_hostname }}" to listen on a specific IP address. +patroni_restapi_port: 8008 +patroni_restapi_username: "patroni" +patroni_restapi_password: "" # If not defined, a password will be generated automatically during deployment. +patroni_restapi_request_queue_size: 5 +patroni_ttl: 30 +patroni_loop_wait: 10 +patroni_retry_timeout: 10 +patroni_master_start_timeout: 300 +patroni_maximum_lag_on_failover: 1048576 # (1MB) the maximum bytes a follower may lag to be able to participate in leader election. +patroni_maximum_lag_on_replica: "100MB" # the maximum of lag that replica can be in order to be available for read-only queries. + +# https://patroni.readthedocs.io/en/latest/yaml_configuration.html#postgresql +patroni_callbacks: [] +# - {action: "on_role_change", script: ""} +# - {action: "on_stop", script: ""} +# - {action: "on_restart", script: ""} +# - {action: "on_reload", script: ""} +# - {action: "on_role_change", script: ""} + +# https://patroni.readthedocs.io/en/latest/replica_bootstrap.html#standby-cluster +# Requirements: +# 1. the cluster name for Standby Cluster must be unique ('patroni_cluster_name' variable) +# 2. the IP addresses (or network) of the Standby Cluster servers must be added to the pg_hba.conf of the Main Cluster ('postgresql_pg_hba' variable). +patroni_standby_cluster: + host: "" # an address of remote master + port: "5432" # a port of remote master +# primary_slot_name: "" # which slot on the remote master to use for replication (optional) +# restore_command: "" # command to restore WAL records from the remote master to standby leader (optional) +# recovery_min_apply_delay: "" # how long to wait before actually apply WAL records on a standby leader (optional) + +# Permanent replication slots. +# These slots will be preserved during switchover/failover. +# https://patroni.readthedocs.io/en/latest/dynamic_configuration.html +patroni_slots: [] +# - slot: "logical_replication_slot" # the name of the permanent replication slot. +# type: "logical" # the type of slot. Could be 'physical' or 'logical' (if the slot is logical, you have to define 'database' and 'plugin'). +# plugin: "pgoutput" # the plugin name for the logical slot. +# database: "postgres" # the database name where logical slots should be created. +# - slot: "test_logical_replication_slot" +# type: "logical" +# plugin: "pgoutput" +# database: "test" + +patroni_log_destination: stderr # or 'logfile' +# if patroni_log_destination: logfile +patroni_log_dir: /var/log/patroni +patroni_log_level: info +patroni_log_traceback_level: error +patroni_log_format: "%(asctime)s %(levelname)s: %(message)s" +patroni_log_dateformat: "" +patroni_log_max_queue_size: 1000 +patroni_log_file_num: 4 +patroni_log_file_size: 25000000 # bytes +patroni_log_loggers_patroni_postmaster: warning +patroni_log_loggers_urllib3: warning # or 'debug' + +patroni_watchdog_mode: automatic # or 'off', 'required' +patroni_watchdog_device: /dev/watchdog + +patroni_postgresql_use_pg_rewind: true # or 'false' +# try to use pg_rewind on the former leader when it joins cluster as a replica. + +patroni_remove_data_directory_on_rewind_failure: false # or 'true' (if use_pg_rewind: 'true') +# avoid removing the data directory on an unsuccessful rewind +# if 'true', Patroni will remove the PostgreSQL data directory and recreate the replica. + +patroni_remove_data_directory_on_diverged_timelines: false # or 'true' +# if 'true', Patroni will remove the PostgreSQL data directory and recreate the replica +# if it notices that timelines are diverging and the former master can not start streaming from the new master. + +# https://patroni.readthedocs.io/en/latest/replica_bootstrap.html#bootstrap +patroni_cluster_bootstrap_method: "initdb" # or "wal-g", "pgbackrest", "pg_probackup" + +# https://patroni.readthedocs.io/en/latest/replica_bootstrap.html#building-replicas +patroni_create_replica_methods: + # - pgbackrest + # - wal_g + # - pg_probackup + - basebackup + +pgbackrest: + - { option: "command", value: "{{ pgbackrest_patroni_cluster_restore_command }}" } + - { option: "keep_data", value: "True" } + - { option: "no_params", value: "True" } +wal_g: + - { option: "command", value: "{{ wal_g_patroni_cluster_bootstrap_command }}" } + - { option: "no_params", value: "True" } +basebackup: + - { option: "max-rate", value: "1000M" } + - { option: "checkpoint", value: "fast" } +# - { option: "waldir", value: "{{ postgresql_wal_dir }}" } +pg_probackup: + - { option: "command", value: "{{ pg_probackup_restore_command }}" } + - { option: "no_params", value: "true" } + +# "restore_command" written to recovery.conf when configuring follower (create replica) +postgresql_restore_command: "" +# postgresql_restore_command: "{{ wal_g_path }} wal-fetch %f %p" # restore WAL-s using WAL-G +# postgresql_restore_command: "pgbackrest --stanza={{ pgbackrest_stanza }} archive-get %f %p" # restore WAL-s using pgbackrest + +# postgresql_restore_command: "pg_probackup-{{ pg_probackup_version }} archive-get -B +# {{ pg_probackup_dir }} --instance {{ pg_probackup_instance }} --wal-file-path=%p +# --wal-file-name=%f" # restore WAL-s using pg_probackup + +# pg_probackup +pg_probackup_install: false # or 'true' +pg_probackup_install_from_postgrespro_repo: true # or 'false' +pg_probackup_version: "{{ postgresql_version }}" +pg_probackup_instance: "pg_probackup_instance_name" +pg_probackup_dir: "/mnt/backup_dir" +pg_probackup_threads: "4" +pg_probackup_add_keys: "--recovery-target=latest --skip-external-dirs --no-validate" +# ⚠️ Ensure there is a space at the beginning of each part to prevent commands from concatenating. +pg_probackup_command_parts: + - "pg_probackup-{{ pg_probackup_version }}" + - " restore -B {{ pg_probackup_dir }}" + - " --instance {{ pg_probackup_instance }}" + - " -j {{ pg_probackup_threads }}" + - " {{ pg_probackup_add_keys }}" +pg_probackup_restore_command: "{{ pg_probackup_command_parts | join('') }}" +pg_probackup_patroni_cluster_bootstrap_command: "{{ pg_probackup_command_parts | join('') }}" + +# WAL-G +wal_g_install: false # or 'true' +wal_g_version: "3.0.5" +wal_g_installation_method: "binary" # or "src" to build from source code +wal_g_path: "/usr/local/bin/wal-g --config {{ postgresql_home_dir }}/.walg.json" +wal_g_json: # config https://github.com/wal-g/wal-g#configuration + - { option: "AWS_ACCESS_KEY_ID", value: "{{ AWS_ACCESS_KEY_ID | default('') }}" } # define values or pass via --extra-vars + - { option: "AWS_SECRET_ACCESS_KEY", value: "{{ AWS_SECRET_ACCESS_KEY | default('') }}" } # define values or pass via --extra-vars + - { option: "WALG_S3_PREFIX", value: "{{ WALG_S3_PREFIX | default('s3://' + patroni_cluster_name) }}" } # define values or pass via --extra-vars + - { option: "WALG_COMPRESSION_METHOD", value: "{{ WALG_COMPRESSION_METHOD | default('brotli') }}" } # or "lz4", "lzma", "zstd" + - { option: "WALG_DELTA_MAX_STEPS", value: "{{ WALG_DELTA_MAX_STEPS | default('6') }}" } # determines how many delta backups can be between full backups + - { option: "WALG_PREFETCH_DIR", value: "{{ wal_g_prefetch_dir_path }}" } # prevent pg_rewind failures by setting non-default prefetch directory + - { option: "PGDATA", value: "{{ postgresql_data_dir }}" } + - { option: "PGHOST", value: "{{ postgresql_unix_socket_dir }}" } + - { option: "PGPORT", value: "{{ postgresql_port }}" } + - { option: "PGUSER", value: "{{ patroni_superuser_username }}" } +# - { option: "AWS_S3_FORCE_PATH_STYLE", value: "true" } # to use Minio.io S3-compatible storage +# - { option: "AWS_ENDPOINT", value: "http://minio:9000/" } # to use Minio.io S3-compatible storage +# - { option: "", value: "" } +wal_g_archive_command: "{{ wal_g_path }} wal-push %p" +wal_g_patroni_cluster_bootstrap_command: "{{ wal_g_path }} backup-fetch {{ postgresql_data_dir }} LATEST" +wal_g_patroni_cluster_bootstrap_recovery_conf: + - restore_command: "{{ wal_g_path }} wal-fetch %f %p" + - recovery_target_action: "promote" + - recovery_target_timeline: "latest" +# - recovery_target_time: "2020-06-01 11:00:00+03" # Point-in-Time Recovery (example) +wal_g_prefetch_dir_create: true # or 'false' +wal_g_prefetch_dir_path: "{{ postgresql_home_dir }}/wal-g-prefetch" + +# Define job_parts outside of wal_g_cron_jobs +# ⚠️ Ensure there is a space at the beginning of each part to prevent commands from concatenating. +wal_g_backup_command: + - "curl -I -s http://{{ inventory_hostname }}:{{ patroni_restapi_port }} | grep 200" + - " && {{ wal_g_path }} backup-push {{ postgresql_data_dir }} > {{ postgresql_log_dir }}/walg_backup.log 2>&1" +wal_g_delete_command: + - "curl -I -s http://{{ inventory_hostname }}:{{ patroni_restapi_port }} | grep 200" + - " && {{ wal_g_path }} delete retain FULL 4 --confirm > {{ postgresql_log_dir }}/walg_delete.log 2>&1" + +wal_g_cron_jobs: + - name: "WAL-G: Create daily backup" + user: "postgres" + file: /etc/cron.d/walg + minute: "00" + hour: "{{ WALG_BACKUP_HOUR | default('3') }}" + day: "*" + month: "*" + weekday: "*" + job: "{{ wal_g_backup_command | join('') }}" + - name: "WAL-G: Delete old backups" + user: "postgres" + file: /etc/cron.d/walg + minute: "30" + hour: "6" + day: "*" + month: "*" + weekday: "*" + job: "{{ wal_g_delete_command | join('') }}" + +# pgBackRest +pgbackrest_install: false # or 'true' to install and configure backups using pgBackRest +pgbackrest_install_from_pgdg_repo: true # or 'false' +pgbackrest_stanza: "{{ patroni_cluster_name }}" # specify your --stanza +pgbackrest_repo_type: "posix" # or "s3", "gcs", "azure" +pgbackrest_repo_host: "" # dedicated repository host (optional) +pgbackrest_repo_user: "postgres" # if "repo_host" is set (optional) +pgbackrest_conf_file: "/etc/pgbackrest/pgbackrest.conf" +# config https://pgbackrest.org/configuration.html +pgbackrest_conf: + global: # [global] section + - { option: "log-level-file", value: "detail" } + - { option: "log-path", value: "/var/log/pgbackrest" } + - { option: "repo1-type", value: "{{ pgbackrest_repo_type | lower }}" } + # - { option: "repo1-host", value: "{{ pgbackrest_repo_host }}" } + # - { option: "repo1-host-user", value: "{{ pgbackrest_repo_user }}" } + - { option: "repo1-path", value: "/var/lib/pgbackrest" } + - { option: "repo1-retention-full", value: "4" } + - { option: "repo1-retention-archive", value: "4" } + - { option: "repo1-bundle", value: "y" } + - { option: "repo1-block", value: "y" } + - { option: "start-fast", value: "y" } + - { option: "stop-auto", value: "y" } + - { option: "link-all", value: "y" } + - { option: "resume", value: "n" } + - { option: "spool-path", value: "/var/spool/pgbackrest" } + - { option: "archive-async", value: "y" } # Enables asynchronous WAL archiving (details: https://pgbackrest.org/user-guide.html#async-archiving) + - { option: "archive-get-queue-max", value: "1GiB" } + # - { option: "archive-push-queue-max", value: "100GiB" } + # - { option: "backup-standby", value: "y" } # When set to 'y', standby servers will be automatically added to the stanza section. + # - { option: "", value: "" } + stanza: # [stanza_name] section + - { option: "process-max", value: "4" } + - { option: "log-level-console", value: "info" } + - { option: "recovery-option", value: "recovery_target_action=promote" } + - { option: "pg1-socket-path", value: "{{ postgresql_unix_socket_dir }}" } + - { option: "pg1-path", value: "{{ postgresql_data_dir }}" } +# - { option: "", value: "" } +# (optional) dedicated backup server config (if "repo_host" is set) +pgbackrest_server_conf: + global: + - { option: "log-level-file", value: "detail" } + - { option: "log-level-console", value: "info" } + - { option: "log-path", value: "/var/log/pgbackrest" } + - { option: "repo1-type", value: "{{ pgbackrest_repo_type | lower }}" } + - { option: "repo1-path", value: "/var/lib/pgbackrest" } + - { option: "repo1-retention-full", value: "4" } + - { option: "repo1-retention-archive", value: "4" } + - { option: "repo1-bundle", value: "y" } + - { option: "repo1-block", value: "y" } + - { option: "archive-check", value: "y" } + - { option: "archive-copy", value: "n" } + - { option: "backup-standby", value: "y" } + - { option: "start-fast", value: "y" } + - { option: "stop-auto", value: "y" } + - { option: "link-all", value: "y" } + - { option: "resume", value: "n" } +# - { option: "", value: "" } +# the stanza section will be generated automatically + +pgbackrest_archive_command: "pgbackrest --stanza={{ pgbackrest_stanza }} archive-push %p" + +pgbackrest_patroni_cluster_restore_command: "/usr/bin/pgbackrest --stanza={{ pgbackrest_stanza }} --delta restore" # restore from latest backup +# '/usr/bin/pgbackrest --stanza={{ pgbackrest_stanza }} --type=time "--target=2020-06-01 11:00:00+03" --delta restore' # Point-in-Time Recovery (example) + +# By default, the cron jobs is created on the database server. +# If 'repo_host' is defined, the cron jobs will be created on the pgbackrest server. +pgbackrest_cron_jobs: + - name: "pgBackRest: Full Backup" + file: "/etc/cron.d/pgbackrest-{{ patroni_cluster_name }}" + user: "postgres" + minute: "00" + hour: "{{ PGBACKREST_BACKUP_HOUR | default('3') }}" + day: "*" + month: "*" + weekday: "0" + job: "pgbackrest --stanza={{ pgbackrest_stanza }} --type=full backup" + # job: "if [ $(psql -tAXc 'select pg_is_in_recovery()') = 'f' ]; then pgbackrest --stanza={{ pgbackrest_stanza }} --type=full backup; fi" + - name: "pgBackRest: Diff Backup" + file: "/etc/cron.d/pgbackrest-{{ patroni_cluster_name }}" + user: "postgres" + minute: "00" + hour: "3" + day: "*" + month: "*" + weekday: "1-6" + job: "pgbackrest --stanza={{ pgbackrest_stanza }} --type=diff backup" + # job: "if [ $(psql -tAXc 'select pg_is_in_recovery()') = 'f' ]; then pgbackrest --stanza={{ pgbackrest_stanza }} --type=diff backup; fi" + +# PITR mode (if patroni_cluster_bootstrap_method: "pgbackrest" or "wal-g"): +# 1) The database cluster directory will be cleaned (for "wal-g") or overwritten (for "pgbackrest" --delta restore). +# 2) And also the patroni cluster "{{ patroni_cluster_name }}" will be removed from the DCS (if exist) before recovery. +cluster_restore_timeout: 86400 # backup and WAL restore timeout in seconds (24 hours) + +disable_archive_command: true # or 'false' to not disable archive_command after restore +keep_patroni_dynamic_json: true # or 'false' to remove patroni.dynamic.json after restore (if exists) + +# Netdata - https://github.com/netdata/netdata +# Open up your web browser of choice and navigate to http://NODE:19999 +netdata_install: true # Install Netdata on Postgres cluster nodes (with kickstart.sh) +netdata_install_options: "--stable-channel --disable-telemetry --dont-wait" +netdata_conf: + web_default_port: "19999" # the listen port for the Netdata Web Server. + web_bind_to: "*" + db_mode: "dbengine" # dbengine, ram, none + dbengine_page_cache_size: "64MiB" # controls the size of the cache that keeps metric data on memory. + # Tier 0, per second data: + dbengine_tier_0_retention_size: "1024MiB" + dbengine_tier_0_retention_time: "14d" + # Tier 1, per minute data: + dbengine_tier_1_retention_size: "1024MiB" + dbengine_tier_1_retention_time: "3mo" + # Tier 2, per hour data: + dbengine_tier_2_retention_size: "1024MiB" + dbengine_tier_2_retention_time: "1y" + # With these defaults, Netdata requires approximately 4 GiB of storage space (including metadata). + # You can fine-tune retention for each tier by setting a time limit or size limit. Setting a limit to 0 disables it. +# More options you can specify in the roles/netdata/templates/netdata.conf.j2 +# https://learn.netdata.cloud/docs/netdata-agent/configuration diff --git a/automation/roles/common/defaults/system.yml b/automation/roles/common/defaults/system.yml new file mode 100644 index 000000000..b51d0d69a --- /dev/null +++ b/automation/roles/common/defaults/system.yml @@ -0,0 +1,259 @@ +--- +# DNS servers (/etc/resolv.conf) +nameservers: [] +# - "8.8.8.8" # example (Google Public DNS) +# - "9.9.9.9" # (Quad9 Public DNS) + +# /etc/hosts (optional) +etc_hosts: [] +# - "10.128.64.143 pgbackrest.minio.local minio.local s3.eu-west-3.amazonaws.com" # example (MinIO) +# - "" + +ntp_enabled: false # or 'true' if you want to install and configure the ntp service +ntp_servers: [] +# - "10.128.64.44" +# - "10.128.64.45" + +timezone: "" +# timezone: "Etc/UTC" +# timezone: "America/New_York" +# timezone: "Europe/Moscow" +# timezone: "Europe/Berlin" + +# Generate locale +# (except RHEL>=8,use glibc-langpack) +locale_gen: + - { language_country: "en_US", encoding: "UTF-8" } +# - { language_country: "ru_RU", encoding: "UTF-8" } +# - { language_country: "de_DE", encoding: "UTF-8" } +# - { language_country: "", encoding: "" } + +# Set system locale (LANG,LC_ALL) +locale: "en_US.utf-8" + +# Configure swap space (if not already exists) +swap_file_create: true # or 'false' +swap_file_path: /swapfile +swap_file_size_mb: "4096" # change this value for your system + +# Kernel parameters +sysctl_set: true # or 'false' +# these parameters for example! Specify kernel options for your system +sysctl_conf: + etcd_cluster: [] + consul_instances: [] + master: [] + replica: [] + pgbackrest: [] + postgres_cluster: + - { name: "vm.overcommit_memory", value: "2" } + - { name: "vm.swappiness", value: "1" } + - { name: "vm.min_free_kbytes", value: "102400" } + - { name: "vm.dirty_expire_centisecs", value: "1000" } + - { name: "vm.dirty_background_bytes", value: "67108864" } + - { name: "vm.dirty_bytes", value: "536870912" } + # - { name: "vm.nr_hugepages", value: "9510" } # example "9510"=18GB + - { name: "vm.zone_reclaim_mode", value: "0" } + - { name: "kernel.numa_balancing", value: "0" } + - { name: "kernel.sched_autogroup_enabled", value: "0" } + - { name: "net.ipv4.ip_nonlocal_bind", value: "1" } + - { name: "net.ipv4.ip_forward", value: "1" } + - { name: "net.ipv4.ip_local_port_range", value: "10000 65535" } + - { name: "net.core.netdev_max_backlog", value: "10000" } + - { name: "net.ipv4.tcp_max_syn_backlog", value: "8192" } + - { name: "net.core.somaxconn", value: "65535" } + - { name: "net.ipv4.tcp_tw_reuse", value: "1" } + # - { name: "net.netfilter.nf_conntrack_max", value: "1048576" } + # - { name: "kernel.sched_migration_cost_ns", value: "5000000" } + # - { name: "", value: "" } + balancers: + - { name: "net.ipv4.ip_nonlocal_bind", value: "1" } + - { name: "net.ipv4.ip_forward", value: "1" } + - { name: "net.ipv4.ip_local_port_range", value: "10000 65535" } + - { name: "net.core.netdev_max_backlog", value: "10000" } + - { name: "net.ipv4.tcp_max_syn_backlog", value: "8192" } + - { name: "net.core.somaxconn", value: "65535" } + - { name: "net.ipv4.tcp_tw_reuse", value: "1" } +# - { name: "net.netfilter.nf_conntrack_max", value: "1048576" } +# - { name: "", value: "" } + +# Huge Pages +# this setting will automatically configure "vm.nr_hugepages" for shared_buffers of 8GB or more +# if 'sysctl_set' is 'true', "vm.nr_hugepages" is undefined or insufficient in sysctl_conf, +# and "huge_pages" is not 'off' in postgresql_parameters. +huge_pages_auto_conf: true + +# Transparent Huge Pages +disable_thp: true # or 'false' + +# Max open file limit +set_limits: true # or 'false' +limits_user: "postgres" +soft_nofile: 65536 +hard_nofile: 200000 + +# I/O Scheduler (optional) +set_scheduler: false # or 'true' +scheduler: + - { sched: "deadline", nr_requests: "1024", device: "sda" } +# - { sched: "noop" , nr_requests: "1024", device: "sdb" } +# - { sched: "" , nr_requests: "1024", device: "" } + +# Non-multiqueue I/O schedulers: +# cfq - for desktop systems and slow SATA drives +# deadline - for SAS drives (recommended for databases) +# noop - for SSD drives +# Multiqueue I/O schedulers (blk-mq): +# mq-deadline - (recommended for databases) +# none - (ideal for fast random I/O devices such as NVMe) +# bfq - (avoid for databases) +# kyber + +# SSH Keys (optional) +enable_ssh_key_based_authentication: false # or 'true' for configure SSH Key-Based Authentication +ssh_key_user: "postgres" +ssh_key_state: "present" +ssh_known_hosts: "{{ groups['postgres_cluster'] }}" + +# List of public SSH keys. These keys will be added to the database server's  ~/.ssh/authorized_keys  file. +ssh_public_keys: [] + +# sudo +sudo_users: + - name: "postgres" + nopasswd: "yes" # or "no" to require a password + commands: "ALL" +# - name: "joe" # other user (example) +# nopasswd: "no" +# commands: "/usr/bin/find, /usr/bin/less, /usr/bin/tail, /bin/kill" + +# Firewall +firewall_enabled_at_boot: false # or 'true' for configure firewall (iptables) + +firewall_allowed_tcp_ports_for: + master: [] + replica: [] + pgbackrest: [] + postgres_cluster: + - "{{ ansible_ssh_port | default(22) }}" + - "{{ postgresql_port }}" + - "{{ pgbouncer_listen_port }}" + - "{{ patroni_restapi_port }}" + - "19999" # Netdata + # - "10050" # Zabbix agent + # - "" + etcd_cluster: + - "{{ ansible_ssh_port | default(22) }}" + - "2379" # ETCD port + - "2380" # ETCD port + # - "" + consul_instances: + - 8300 + - 8301 + - 8302 + - 8500 + - 8600 + balancers: + - "{{ ansible_ssh_port | default(22) }}" + - "{{ haproxy_listen_port.master }}" # HAProxy (read/write) master + - "{{ haproxy_listen_port.replicas }}" # HAProxy (read only) all replicas + - "{{ haproxy_listen_port.replicas_sync }}" # HAProxy (read only) synchronous replica only + - "{{ haproxy_listen_port.replicas_async }}" # HAProxy (read only) asynchronous replicas only + - "{{ haproxy_listen_port.stats }}" # HAProxy stats +# - "" + +firewall_additional_rules_for: + master: [] + replica: [] + postgres_cluster: [] + pgbackrest: [] + etcd_cluster: [] + consul_instances: [] + balancers: + - "iptables -p vrrp -A INPUT -j ACCEPT" # Keepalived (vrrp) + - "iptables -p vrrp -A OUTPUT -j ACCEPT" # Keepalived (vrrp) + +# disable firewalld (installed by default on RHEL/CentOS) or ufw (installed by default on Ubuntu) +firewall_disable_firewalld: true +firewall_disable_ufw: true + +# (optional) - Fetch files from the server in the "master" group. These files can later be copied to all servers. +fetch_files_from_master: [] +# - { src: "/etc/ssl/certs/ssl-cert-snakeoil.pem", dest: "files/ssl-cert-snakeoil.pem" } +# - { src: "/etc/ssl/private/ssl-cert-snakeoil.key", dest: "files/ssl-cert-snakeoil.key" } +# - { src: "/path/to/myfile", dest: "files/myfile" } + +# (optional) - Copy this files to all servers in the cluster ("master" and "replica" groups) +copy_files_to_all_server: [] +# - { src: "files/ssl-cert-snakeoil.pem", dest: "/etc/ssl/certs/ssl-cert-snakeoil.pem", owner: "postgres", group: "postgres", mode: "0644" } +# - { src: "files/ssl-cert-snakeoil.key", dest: "/etc/ssl/private/ssl-cert-snakeoil.key", owner: "postgres", group: "postgres", mode: "0600" } +# - { src: "files/myfile", dest: "/path/to/myfile", owner: "postgres", group: "postgres", mode: "0640" } + +# System cron jobs +cron_jobs: [] +# - name: "Example Job one" +# user: "postgres" +# file: /etc/cron.d/example_job_one +# minute: "00" +# hour: "1" +# day: "*" +# month: "*" +# weekday: "*" +# job: "echo 'example job one command'" +# - name: "Example Job two" +# user: "postgres" +# file: /etc/cron.d/example_job_two +# minute: "00" +# hour: "2" +# day: "*" +# month: "*" +# weekday: "*" +# job: "echo 'example job two command'" + +# (optional) Configure mount points in /etc/fstab and mount the file system (if 'mount.src' is defined) +mount: + - path: "/pgdata" + src: "" # device UUID or path. + fstype: ext4 # if 'zfs' is specified a ZFS pool will be created + opts: defaults,noatime # not applicable to 'zfs' + state: mounted +# - path: "/pgwal" +# src: "" +# fstype: ext4 +# opts: defaults,noatime +# state: mounted + +# (optional) Execute custom commands or scripts +# This can be a direct command, a bash script content, or a path to a script on the host +pre_deploy_command: "" # Command or script to be executed before the Postgres cluster deployment +pre_deploy_command_timeout: 3600 # Timeout in seconds +pre_deploy_command_hosts: "postgres_cluster" # host groups where the pre_deploy_command should be executed +pre_deploy_command_print: true # Print the command in the ansible log +pre_deploy_command_print_result: true # Print the result of the command execution to the ansible log +pre_deploy_command_log: "/var/tmp/pre_deploy_command.log" + +post_deploy_command: "" # Command or script to be executed after the Postgres cluster deployment +post_deploy_command_timeout: 3600 # Timeout in seconds +post_deploy_command_hosts: "postgres_cluster" # host groups where the post_deploy_command should be executed +post_deploy_command_print: true # Print the command in the ansible log +post_deploy_command_print_result: true # Print the result of the command execution to the ansible log +post_deploy_command_log: "/var/tmp/post_deploy_command.log" + +# Supported Linux versions +os_valid_distributions: + - RedHat + - CentOS + - Rocky + - OracleLinux + - Ubuntu + - Debian + - AlmaLinux + +os_minimum_versions: + RedHat: 8 + CentOS: 8 + Rocky: 8.4 + AlmaLinux: 8.3 + OracleLinux: 8 + Ubuntu: 22.04 + Debian: 11 diff --git a/automation/roles/common/defaults/upgrade.yml b/automation/roles/common/defaults/upgrade.yml new file mode 100644 index 000000000..7b64ad761 --- /dev/null +++ b/automation/roles/common/defaults/upgrade.yml @@ -0,0 +1,98 @@ +# yamllint disable rule:line-length +--- +# Variables for the pg_upgrade.yml playbook + +# Note: +# There is no need to plan additional disk space, because when updating PostgreSQL, hard links are used instead of copying files. +# However, it is required that the pg_old_datadir and pg_new_datadir are located within the same top-level directory (pg_upper_datadir). +# https://www.postgresql.org/docs/current/pgupgrade.html + +# PostgreSQL versions +pg_old_version: "" # specify the current (old) version of PostgreSQL +pg_new_version: "" # specify the target version of PostgreSQL for the upgrade + +# Paths for old and new PostgreSQL versions +# Adjust these variables if the paths are different from the default value. + +# Directory containing binaries for the old PostgreSQL version. +pg_old_bindir: "{{ postgresql_bin_dir | regex_replace('(/$)', '') | replace(postgresql_version | string, pg_old_version | string) }}" +# Data directory path for the old PostgreSQL version. +pg_old_datadir: "{{ postgresql_data_dir | regex_replace('(/$)', '') | replace(postgresql_version | string, pg_old_version | string) }}" +# Configuration directory path for the old PostgreSQL version. +pg_old_confdir: "{{ postgresql_conf_dir | regex_replace('(/$)', '') | replace(postgresql_version | string, pg_old_version | string) }}" + +# Directory containing binaries for the new PostgreSQL version. +pg_new_bindir: "{{ postgresql_bin_dir | regex_replace('(/$)', '') | replace(postgresql_version | string, pg_new_version | string) }}" +# Data directory path for the new PostgreSQL version. +pg_new_datadir: "{{ postgresql_data_dir | regex_replace('(/$)', '') | replace(postgresql_version | string, pg_new_version | string) }}" +# Configuration directory path for the new PostgreSQL version. +pg_new_confdir: "{{ postgresql_conf_dir | regex_replace('(/$)', '') | replace(postgresql_version | string, pg_new_version | string) }}" +# Custom WAL directory for the new PostgreSQL version (symlink will be created) [optional]. +pg_new_wal_dir: "{{ postgresql_wal_dir | regex_replace('(/$)', '') | replace(postgresql_version | string, pg_new_version | string) }}" + +# pg_upper_datadir: Specifies the top-level directory containing both old and new PostgreSQL data directories. +# The variable is derived from pg_new_datadir by removing any trailing slash and getting its grandparent directory. +# Adjust if the data directory location differs from the default. +# Example: /var/lib/postgresql, /var/lib/pgsql, /pgdata +pg_upper_datadir: "{{ pg_new_datadir | regex_replace('/$', '') | dirname | dirname }}" + +# List of package names for the new PostgreSQL version to be installed. +# automatically detects the list of packages based on the 'postgresql_packages' variable +pg_new_packages: "{{ postgresql_packages | replace(postgresql_version | string, pg_new_version | string) }}" + +# Alternatively, you can explicitly specify the list of new packages to install. +# This gives you more control and should be used if the automatic update does not meet your needs. +# Uncomment and modify the following lines according to your requirements. Example: +# pg_new_packages: +# - postgresql-{{ pg_new_version }} +# - postgresql-client-{{ pg_new_version }} +# - postgresql-server-dev-{{ pg_new_version }} +# - postgresql-contrib-{{ pg_new_version }} +# - postgresql-{{ pg_new_version }}-repack" + +pg_old_packages_remove: true # remove old postgresql packages after upgrade + +# Timeout (in seconds) to be used when starting/stopping PostgreSQL during the upgrade. +pg_start_stop_timeout: 1800 # 30 minutes + +# Patroni configuration file path. +patroni_config_file: /etc/patroni/patroni.yml + +schema_compatibility_check: true # If 'true', a compatibility check of the database schema with the new PostgreSQL version will be performed before the upgrade. +schema_compatibility_check_port: "{{ (postgresql_port | int) + 1 }}" # Port used to run a temporary PostgreSQL instance for schema compatibility checking. +schema_compatibility_check_timeout: 3600 # Maximum duration (in seconds) for the compatibility check (using pg_dumpall --schema-only). + +update_extensions: true # if 'true', try to update extensions automatically + +vacuumdb_parallel_jobs: "{{ [ansible_processor_vcpus | int // 2, 1] | max }}" # use 50% CPU cores +vacuumdb_analyze_timeout: 3600 # seconds. The maximum duration of analyze command (soft limit, exceeding won't halt playbook) +# terminate active queries that are longer than the specified time (in seconds) during the collection of statistics. +vacuumdb_analyze_terminate_treshold: 0 # (0 = do not terminate active backends) + +# Do not perform an upgrade if +max_replication_lag_bytes: 10485760 # 10 MiB - Maximum allowed replication lag in bytes +max_transaction_sec: 15 # Maximum allowed duration for a transactions in seconds + +# (optional) Copy any files located in the "files" directory to all servers +# example for Postgres Full-Text Search (FTS) files +copy_files_to_all_server: [] +# - { src: "files/numbers.syn", dest: "/usr/share/postgresql/{{ pg_new_version }}/tsearch_data/numbers.syn", owner: "root", group: "root", mode: "0644" } +# - { src: "files/part_of_speech_russian.stop", dest: "/usr/share/postgresql/{{ pg_new_version }}/tsearch_data/part_of_speech_russian.stop", owner: "root", group: "root", mode: "0644" } +# - { src: "files/ru_ru.affix", dest: "/usr/share/postgresql/{{ pg_new_version }}/tsearch_data/ru_ru.affix", owner: "root", group: "root", mode: "0644" } +# - { src: "files/ru_ru.dict", dest: "/usr/share/postgresql/{{ pg_new_version }}/tsearch_data/ru_ru.dict", owner: "root", group: "root", mode: "0644" } + +# if 'pgbouncer_install' is 'true' +pgbouncer_pool_pause: true # or 'false' if you don't want to pause pgbouncer pools during upgrade. +# the maximum waiting time (in seconds) for the pool to be paused. For each iteration of the loop when trying to pause all pools. +pgbouncer_pool_pause_timeout: 2 +# the time (in seconds) after which instead of waiting for the completion of the active queries, the script terminates the slow active queries. +pgbouncer_pool_pause_terminate_after: 30 +# the time (in seconds) after which the script exit with an error if it was not possible to pause all pgbouncer pools. +pgbouncer_pool_pause_stop_after: 60 +# wait for the completion of active queries that are executed longer than the specified time (in milliseconds) before trying to pause the pool. +pg_slow_active_query_treshold: 1000 +# terminate active queries that longer than the specified time (in milliseconds) after reaching "pgbouncer_pool_pause_terminate_after" before trying to pause the pool. +pg_slow_active_query_treshold_to_terminate: 100 # (0 = terminate all active backends) + +# if 'pgbackrest_install' is 'true' +pgbackrest_stanza_upgrade: true # perform the "stanza-upgrade" command after the upgrade. diff --git a/automation/roles/confd/README.md b/automation/roles/confd/README.md new file mode 100644 index 000000000..f26dcaf0a --- /dev/null +++ b/automation/roles/confd/README.md @@ -0,0 +1 @@ +# Ansible Role: confd diff --git a/automation/roles/confd/defaults/main.yml b/automation/roles/confd/defaults/main.yml new file mode 100644 index 000000000..8ae0e3bf1 --- /dev/null +++ b/automation/roles/confd/defaults/main.yml @@ -0,0 +1,7 @@ +--- +confd_architecture_map: + amd64: amd64 + x86_64: amd64 + aarch64: arm64 + arm64: arm64 + 64-bit: amd64 diff --git a/roles/confd/handlers/main.yml b/automation/roles/confd/handlers/main.yml similarity index 83% rename from roles/confd/handlers/main.yml rename to automation/roles/confd/handlers/main.yml index 98a120c8f..5db66f957 100644 --- a/roles/confd/handlers/main.yml +++ b/automation/roles/confd/handlers/main.yml @@ -1,11 +1,8 @@ --- - - name: Restart confd service - systemd: + ansible.builtin.systemd: daemon_reload: true name: confd enabled: true state: restarted listen: "restart confd" - -... diff --git a/automation/roles/confd/tasks/main.yml b/automation/roles/confd/tasks/main.yml new file mode 100644 index 000000000..f8820fa85 --- /dev/null +++ b/automation/roles/confd/tasks/main.yml @@ -0,0 +1,164 @@ +--- +# install confd package from repo +- name: Download and copy "confd" binary file to /usr/local/bin/ + ansible.builtin.get_url: + url: "{{ item }}" + dest: /usr/local/bin/confd + mode: u+x,g+x,o+x + timeout: 60 + validate_certs: false + loop: + - "{{ confd_package_repo }}" + environment: "{{ proxy_env | default({}) }}" + when: installation_method == "repo" and confd_package_repo | length > 0 + tags: get_confd, confd + +# install confd package from file +- name: Copy "confd" binary file to /usr/local/bin/ + ansible.builtin.copy: + src: "{{ confd_package_file }}" + dest: /usr/local/bin/confd + mode: u+x,g+x,o+x + when: installation_method == "file" and confd_package_file | length > 0 + tags: get_confd, confd + +- name: Create conf directories + ansible.builtin.file: + path: "{{ item }}" + state: directory + loop: + - /etc/confd/conf.d + - /etc/confd/templates + tags: confd_dir, confd_conf, confd + +# TLS (etcd) +- name: Copy etcd cert files for confd + ansible.builtin.include_role: + name: ../roles/tls_certificate/copy + vars: + tls_group_name: "etcd_cluster" + fetch_tls_dir: "{{ etcd_tls_dir | default('/etc/etcd/tls') }}" + copy_tls_dir: "{{ confd_etcd_tls_dir | default('/etc/confd/tls/etcd') }}" + copy_tls_owner: "root" + when: + - tls_cert_generate | bool + - dcs_type == "etcd" + - not dcs_exists | bool + tags: patroni, patroni_conf + +- block: + - name: Generate conf file "/etc/confd/confd.toml" + ansible.builtin.template: + src: templates/confd.toml.j2 + dest: /etc/confd/confd.toml + notify: "restart confd" + tags: confd_toml + + - name: Generate conf file "/etc/confd/conf.d/haproxy.toml" + ansible.builtin.template: + src: templates/haproxy.toml.j2 + dest: /etc/confd/conf.d/haproxy.toml + notify: "restart confd" + tags: haproxy_toml + + - name: Generate template "/etc/confd/templates/haproxy.tmpl" + ansible.builtin.template: + src: templates/haproxy.tmpl.j2 + dest: /etc/confd/templates/haproxy.tmpl + notify: "restart confd" + tags: haproxy_tmpl + when: add_balancer is not defined or not add_balancer|bool + tags: confd_conf, confd + +- block: # for add_balancer.yml + - name: "Fetch confd.toml, haproxy.toml, haproxy.tmpl conf files from {{ groups.balancers[0] }}" + run_once: true + ansible.builtin.fetch: + src: "{{ item }}" + dest: files/ + validate_checksum: true + flat: true + loop: + - /etc/confd/confd.toml + - /etc/confd/conf.d/haproxy.toml + - /etc/confd/templates/haproxy.tmpl + delegate_to: "{{ groups.balancers[0] }}" + + - name: Copy confd.toml, haproxy.toml, haproxy.tmpl conf files to replica + ansible.builtin.copy: + src: "files/{{ item.conf }}" + dest: "{{ item.dest }}" + loop: + - { conf: "confd.toml", dest: "/etc/confd/confd.toml" } + - { conf: "haproxy.toml", dest: "/etc/confd/conf.d/haproxy.toml" } + - { conf: "haproxy.tmpl", dest: "/etc/confd/templates/haproxy.tmpl" } + loop_control: + label: "{{ item.dest }}" + notify: "restart confd" + + - name: Remove confd.toml, haproxy.toml, haproxy.tmpl files from localhost + become: false + run_once: true + ansible.builtin.file: + path: "files/{{ item }}" + state: absent + loop: + - confd.toml + - haproxy.toml + - haproxy.tmpl + delegate_to: localhost + + - name: Prepare haproxy.tmpl template file (replace "bind" for stats) + ansible.builtin.lineinfile: + path: /etc/confd/templates/haproxy.tmpl + regexp: "{{ line_item.regexp }}" + line: "{{ line_item.line }}" + backrefs: true + loop: + - regexp: "^.*bind.*:{{ haproxy_listen_port.stats }}$" + line: " bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.stats }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.master }}$" + line: " bind {{ cluster_vip }}:{{ haproxy_listen_port.master }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas }}$" + line: " bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas_sync }}$" + line: " bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_sync }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas_async }}$" + line: " bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_async }}" + loop_control: + loop_var: line_item + label: "{{ line_item.line }}" + notify: "restart confd" + when: cluster_vip is defined and cluster_vip | length > 0 + + - name: Prepare haproxy.tmpl template file (replace "bind" for stats) + ansible.builtin.lineinfile: + path: /etc/confd/templates/haproxy.tmpl + regexp: "{{ line_item_2.regexp }}" + line: "{{ line_item_2.line }}" + backrefs: true + loop: + - regexp: "^.*bind.*:{{ haproxy_listen_port.stats }}$" + line: " bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.stats }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.master }}$" + line: " bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.master }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas }}$" + line: " bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.replicas }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas_sync }}$" + line: " bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.replicas_sync }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas_async }}$" + line: " bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.replicas_async }}" + loop_control: + loop_var: line_item_2 + label: "{{ line_item_2.line }}" + notify: "restart confd" + when: cluster_vip is not defined or cluster_vip | length < 1 + when: add_balancer is defined and add_balancer|bool + tags: confd_conf, confd + +- name: Copy systemd service file + ansible.builtin.template: + src: templates/confd.service.j2 + dest: /etc/systemd/system/confd.service + notify: "restart confd" + tags: confd_service, confd diff --git a/roles/confd/templates/confd.service.j2 b/automation/roles/confd/templates/confd.service.j2 similarity index 100% rename from roles/confd/templates/confd.service.j2 rename to automation/roles/confd/templates/confd.service.j2 diff --git a/automation/roles/confd/templates/confd.toml.j2 b/automation/roles/confd/templates/confd.toml.j2 new file mode 100644 index 000000000..59b045180 --- /dev/null +++ b/automation/roles/confd/templates/confd.toml.j2 @@ -0,0 +1,26 @@ +backend = "etcdv3" +nodes = [ +{% if not dcs_exists|bool %} + {% for host in groups['etcd_cluster'] %} + "{{ patroni_etcd_protocol | default('http', true) }}://{{ hostvars[host]['inventory_hostname'] }}:2379", + {% endfor %} +{% endif %} +{% if dcs_exists|bool %} + {% for etcd_hosts in patroni_etcd_hosts %} + "{{ patroni_etcd_protocol | default('http', true) }}://{{etcd_hosts.host}}:{{etcd_hosts.port}}", + {% endfor %} +{% endif %} +] +{% if etcd_tls_enable | default(false) | bool %} +scheme = "https" +client_cakeys = "{{ confd_etcd_tls_dir | default('/etc/confd/tls/etcd') }}/{{ confd_etcd_client_cakey | default('ca.crt') }}" +client_cert = "{{ confd_etcd_tls_dir | default('/etc/confd/tls/etcd') }}/{{ confd_etcd_client_cert | default('server.crt') }}" +client_key = "{{ confd_etcd_tls_dir | default('/etc/confd/tls/etcd') }}/{{ confd_etcd_client_key | default('server.key') }}" +{% endif %} +{% if patroni_etcd_username | default('') | length > 0 %} +basic_auth = true +username = "{{ patroni_etcd_username | default('') }}" +password = "{{ patroni_etcd_password | default('') }}" +{% endif %} +watch = true +interval = 10 diff --git a/automation/roles/confd/templates/haproxy.tmpl.j2 b/automation/roles/confd/templates/haproxy.tmpl.j2 new file mode 100644 index 000000000..a0acb597f --- /dev/null +++ b/automation/roles/confd/templates/haproxy.tmpl.j2 @@ -0,0 +1,197 @@ +global + maxconn {{ haproxy_maxconn.global }} + log /dev/log local0 + log /dev/log local1 notice + chroot /var/lib/haproxy + stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners + stats timeout 30s + user haproxy + group haproxy + daemon + +defaults + mode tcp + log global + option tcplog +{% if haproxy_log_format is defined %} + log-format '{{ haproxy_log_format }}' +{% endif %} + retries 2 + timeout queue 5s + timeout connect 5s + timeout client {{ haproxy_timeout.client }} + timeout server {{ haproxy_timeout.server }} + timeout check 15s + +listen stats + mode http + bind {{ inventory_hostname }}:{{ haproxy_listen_port.stats }} + stats enable + stats uri / + +listen master +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.master }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.master }} +{% endif %} + maxconn {{ haproxy_maxconn.master }} + option httpchk OPTIONS /primary + http-check expect status 200 + default-server inter 3s fastinter 1s fall 3 rise 4 on-marked-down shutdown-sessions +{% if pgbouncer_install|bool %} +{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split (index (split $data.conn_url ":") 1) "/") 2) "@" "/" -1)}}:{% endraw %}{{ pgbouncer_listen_port }}{% raw %} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} +{{end}}{% endraw %} +{% endif %} +{% if ( pgbouncer_install is not defined ) or ( not pgbouncer_install|bool ) %} +{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} +{{end}}{% endraw %} +{% endif %} + +{% if pgbouncer_install|bool and haproxy_listen_port.master_direct is defined %} +listen master_direct +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.master_direct }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.master_direct }} +{% endif %} + maxconn {{ haproxy_maxconn.master }} + option httpchk OPTIONS /primary + http-check expect status 200 + default-server inter 3s fastinter 1s fall 3 rise 4 on-marked-down shutdown-sessions +{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} +{{end}}{% endraw %} +{% endif %} + +listen replicas +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + {% if balancer_tags | default('') | length > 0 %} + option httpchk OPTIONS /replica?lag={{ patroni_maximum_lag_on_replica }}{{ '&' + balancer_tags.split(',') | map('trim') | map('regex_replace', '([^=]+)=(.*)', 'tag_\\1=\\2') | join('&') }} + {% else %} + option httpchk OPTIONS /replica?lag={{ patroni_maximum_lag_on_replica }} + {% endif %} + balance roundrobin + http-check expect status 200 + default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions +{% if pgbouncer_install|bool %} +{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split (index (split $data.conn_url ":") 1) "/") 2) "@" "/" -1)}}:{% endraw %}{{ pgbouncer_listen_port }}{% raw %} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} +{{end}}{% endraw %} +{% endif %} +{% if ( pgbouncer_install is not defined ) or ( not pgbouncer_install|bool ) %} +{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} +{{end}}{% endraw %} +{% endif %} + +{% if pgbouncer_install|bool and haproxy_listen_port.replicas_direct is defined %} +listen replicas_direct +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_direct }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_direct }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + {% if balancer_tags | default('') | length > 0 %} + option httpchk OPTIONS /replica?lag={{ patroni_maximum_lag_on_replica }}{{ '&' + balancer_tags.split(',') | map('trim') | map('regex_replace', '([^=]+)=(.*)', 'tag_\\1=\\2') | join('&') }} + {% else %} + option httpchk OPTIONS /replica?lag={{ patroni_maximum_lag_on_replica }} + {% endif %} + balance roundrobin + http-check expect status 200 + default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions +{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} +{{end}}{% endraw %} +{% endif %} + +listen replicas_sync +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_sync }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_sync }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + {% if balancer_tags | default('') | length > 0 %} + option httpchk OPTIONS /sync{{ '?' + balancer_tags.split(',') | map('trim') | map('regex_replace', '([^=]+)=(.*)', 'tag_\\1=\\2') | join('&') }} + {% else %} + option httpchk OPTIONS /sync + {% endif %} + balance roundrobin + http-check expect status 200 + default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions +{% if pgbouncer_install|bool %} +{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split (index (split $data.conn_url ":") 1) "/") 2) "@" "/" -1)}}:{% endraw %}{{ pgbouncer_listen_port }}{% raw %} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} +{{end}}{% endraw %} +{% endif %} +{% if ( pgbouncer_install is not defined ) or ( not pgbouncer_install|bool ) %} +{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} +{{end}}{% endraw %} +{% endif %} + +{% if pgbouncer_install|bool and haproxy_listen_port.replicas_sync_direct is defined %} +listen replicas_sync_direct +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_sync_direct }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_sync_direct }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + {% if balancer_tags | default('') | length > 0 %} + option httpchk OPTIONS /sync{{ '?' + balancer_tags.split(',') | map('trim') | map('regex_replace', '([^=]+)=(.*)', 'tag_\\1=\\2') | join('&') }} + {% else %} + option httpchk OPTIONS /sync + {% endif %} + balance roundrobin + http-check expect status 200 + default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions +{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} +{{end}}{% endraw %} +{% endif %} + +listen replicas_async +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_async }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_async }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + {% if balancer_tags | default('') | length > 0 %} + option httpchk OPTIONS /async?lag={{ patroni_maximum_lag_on_replica }}{{ '&' + balancer_tags.split(',') | map('trim') | map('regex_replace', '([^=]+)=(.*)', 'tag_\\1=\\2') | join('&') }} + {% else %} + option httpchk OPTIONS /async?lag={{ patroni_maximum_lag_on_replica }} + {% endif %} + balance roundrobin + http-check expect status 200 + default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions +{% if pgbouncer_install|bool %} +{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split (index (split $data.conn_url ":") 1) "/") 2) "@" "/" -1)}}:{% endraw %}{{ pgbouncer_listen_port }}{% raw %} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} +{{end}}{% endraw %} +{% endif %} +{% if ( pgbouncer_install is not defined ) or ( not pgbouncer_install|bool ) %} +{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} +{{end}}{% endraw %} +{% endif %} + +{% if pgbouncer_install|bool and haproxy_listen_port.replicas_async_direct is defined %} +listen replicas_async_direct +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_async_direct }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_async_direct }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + {% if balancer_tags | default('') | length > 0 %} + option httpchk OPTIONS /async?lag={{ patroni_maximum_lag_on_replica }}{{ '&' + balancer_tags.split(',') | map('trim') | map('regex_replace', '([^=]+)=(.*)', 'tag_\\1=\\2') | join('&') }} + {% else %} + option httpchk OPTIONS /async?lag={{ patroni_maximum_lag_on_replica }} + {% endif %} + balance roundrobin + http-check expect status 200 + default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions +{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} +{{end}}{% endraw %} +{% endif %} + diff --git a/roles/confd/templates/haproxy.toml.j2 b/automation/roles/confd/templates/haproxy.toml.j2 similarity index 79% rename from roles/confd/templates/haproxy.toml.j2 rename to automation/roles/confd/templates/haproxy.toml.j2 index 5063349ff..f63106ca2 100644 --- a/roles/confd/templates/haproxy.toml.j2 +++ b/automation/roles/confd/templates/haproxy.toml.j2 @@ -1,5 +1,5 @@ [template] -prefix = "/service/{{ patroni_cluster_name }}" +prefix = "/{{ patroni_etcd_namespace | default('service') }}/{{ patroni_cluster_name }}" src = "haproxy.tmpl" dest = "/etc/haproxy/haproxy.cfg" {% if haproxy_installation_method == "src" %} diff --git a/automation/roles/consul/CHANGELOG.md b/automation/roles/consul/CHANGELOG.md new file mode 100644 index 000000000..d32bd7d33 --- /dev/null +++ b/automation/roles/consul/CHANGELOG.md @@ -0,0 +1,1090 @@ +## 2.6.1 + +- Update CONTRIBUTORS +- Prevent gathering facts for the same servers in loop. (thanks Pavel Zinchuk) +- Update consul_systemd.service.j2 (thanks Han Sooloo) +- Allow "Connect" for any and all nodes (#401) (thanks adawalli) +- Increment to 1.7.3 (#395) (thanks Stuart Low) +- fix: Stop sending logs to syslog through systemd (#393) (thanks Samuel Mutel) +- Don't create TLS folder if consul_tls_copy_keys is false (#369) (thanks Samuel Mutel) +- Setup the role to be used in check mode (thanks Louis Paret) +- Use consul_node_name as empty by default to use hostname (#373) (thanks Louis Paret) +- Corrected version Fedora dropped libselinux-python. (#385) (thanks jebas) +- add leave_on_terminate (thanks Le Minh Duc) +- Allow consul connect on bootstrap nodes (thanks Robert Edström) +- Fix unarchive consul package to run_once (thanks schaltiemi) + +## 2.6.0 + +- Consul v1.7.0 +- Add GitHub workflows (thanks @gofrolist ) +- Modernize PID path (thanks @smutel) +- Add Consul automatic startup to systemd in Nix tasks (thanks @smutel) +- Add verify_incoming_rpc option (thanks @smutel) +- Update CONTRIBUTORS +- Update documentation + +## v2.5.4 + +- Consul v1.6.3 +- consul_manage_group now defaults to true +- Set consul_node_name to ansible_hostname, resolves #337 +- Enable consul Connect (thanks @imcitius) +- Cloud auto discovery (thanks @imcitius) +- Use generated password as token UUID source (thanks @jmariondev) +- Fix ACL Replication Token sed pattern (thanks @jmariondev) +- Add when_false to ACL master lookup (thanks @jmariondev) +- Ensure enable_tag_override is json (thanks @slomo) +- Add suport for -alt-domain (thanks @soloradish) +- Add enable_token_persistence option (thanks @smutel) +- Support new ARM builds for Consul 1.6.2+ (thanks @KyleOndy) +- Add CAP_NET_BIND_SERVICE to systemd unit (thanks @smutel) +- Fix configuration template (thanks @imcitius) +- Update documentation (thanks @karras) + +## v2.5.3 + +- Consul v1.6.2 +- Update documentation + +## v2.5.2 + +- Fix path / drop with_fileglob in install_remote (thanks @bbaassssiiee) +- Handle consul_encrypt_enable variable for Nix (thanks @bbaassssiiee) +- Parse acl_master_token from config (thanks @bbaassssiiee) +- Fix start service on Windows (thanks @imcitius) +- Preserve custom config (thanks @slomo) +- Update Windows for win_service usage (thanks @FozzY1234) +- Restart when TLS material changes (Thanks @bbaassssiiee) +- No tokens in logging (Thanks @bbaassssiiee) +- Flush handlers at the end of main (Thanks @bbaassssiiee) +- Read tokens from from previously bootstrapped server (Thanks @bbaassssiiee) +- Rename `consul_server_key` variable +- Sort keys in service configuration (thanks @slomo) + +## v2.5.1 + +- Consul v1.6.1 +- Add run_once to delegated tasks (thanks @liuxu623) +- Fix service restart on upgrades (thanks @jpiron) +- Fix log directory ownership (@thanks liuxu623) +- Handle missing unzip on control host (thanks @bbaassssiiee) +- Add Added version check for log_rotate_max_files (thanks @jasonneurohr) +- Update documentation + +## v2.5.0 + +- Consul v1.6.0 +- Add documentation for new TLS options (thanks @jasonneurohr) +- Add support for translate_wan_address (@calmacara) +- Add `-log-file` (thanks @liuxu623) + +## v2.4.5 + +- Consul v1.5.3 +- Update molecule configuration (thanks @gofrolist) +- Support TLS files in subdirectories - resolves #297 +- Update some bare variable comparisons - resolves #293 +- Update server address for usage with --limit (thanks @danielkucera) +- Update snapshot configuration for TLS (thanks @jasonneurohr) +- Add TLS minimum version and ciper suite preferences (thanks @jasonneurohr) +- Update documentation +- Update CONTRIBUTORS + +## v2.4.4 + +- Consul v1.5.2 (thanks @patsevanton) +- Add Molecule support (thanks @gofrolist) +- Correct several task issues (thanks @gofrolist) + +## v2.4.3 + +- Consul v1.5.1 +- Update documentation + +## v2.4.2 + +- Correct ACL typo correction (thanks @bewiwi) +- Fix unarchive failure case (thanks @cyril-dussert) +- Update CONTRIBUTORS + +## v2.4.1 + +- Add LimitNOFILE option to systemd unit (thanks @liuxu623) +- Fix typo in in replication token check (thanks @evilhamsterman) + +## v2.4.0 + +- Consul v1.5.0 +- Specify a token for a service (thanks @xeivieni) +- Empty consul_acl_master_token check (thanks @evilhamsterman) +- Separate Unix and Linux tasks from Windows tasks (thanks @evilhamsterman) + +## v2.3.6 + +- Continue with task cleanup +- Fix deleting of unregistered services (thanks @Shaiou) +- Fix issue in Amazon variables (thanks @ToROxI) +- Add bool filter to templates (thanks @eeroniemi) +- Fix CONSUL_ACL_POLICY (thanks @eeroniemi) +- Correct cleanup task fileglob bogusness +- Switch to SIGTERM in sysvinit stop + +## v2.3.5 + +- Consul v1.5.0 +- fixed multiarch deployment race condition (thanks @lanefu) +- Switched from systemctl command to systemd module [lint] +- Update for E504 use 'delegate_to: localhost' [lint] +- asserts +- install +- encrypt_gossip +- Update for E104 in with_fileglob for install_remote [lint] +- Update for E601 in syslog [lint] +- Update for E602 in tasks [lint] +- acl +- main +- Update example site playbook roles format +- Support install on Debian Testing (thanks @gfeun) +- Fix consul_bind_address (thanks @danielkucera) +- Custom bootstrap expect value (thanks @Roviluca) +- Fix Windows support for registering services (thanks @gyorgynadaban) +- Update documentation + +## v2.3.4 + +- Consul v1.4.3 +- Update documentation + +## v2.3.3 + +- Add services management (thanks @Sispheor) +- Add enable_local_script_checks configuration (thanks @canardleteer) +- Add ability to enable legacy GUI (thanks @imcitius) +- Optional domain datacenter delegation with `consul_delegate_datacenter_dns` + +## v2.3.2 + +- Consul v1.4.2 +- Remove token generation/retrieval on clients (thanks @jpiron) +- Add listen to all the handler tasks (@pwae) +- retry_join setup independent from the hosts servers (thanks @Fuochi-YNAP) + +## v2.3.1 + +- Add Consul 1.4.0 ACL configuration syntax support (thanks @jpiron) +- Fix unzip installation check task check mode (thanks @jpiron) +- Fix systemd configuration task handler notification (thanks @jpiron) + +## v2.3.0 + +- The role no longer attempts to install the unzip binary locally onto + the Ansible control host; it is now a hard dependency and role execution + will fail if unzip is not in the PATH on the control host. +- Snapshot agent installation and configuration (thanks @drewmullen) +- Delegate Consul datacenter DNS domain to Consul (thanks @teralype) +- Allow DNSmasq binding to particular interfaces (thanks @teralype) +- Update local tasks (thanks @sgrimm-sg) +- Update documentation + +## v2.2.0 + +- Consul v1.4.0 +- Update documentation + +## v2.1.1 + +- Consul v1.3.1 +- Configuration and documentation for gRPC (thanks @RavisMsk) +- Consistent boolean use +- Fix Consul restart handler reference (thanks @blaet) +- Write gossip key on all hosts (thanks @danielkucera) +- Protect local consul cluster key file (thanks @blaet) +- Support Amazon Linux (thanks @soloradish) +- Quite ACL replication token retrieval (thanks @jpiron) +- disable_keyring_file configuration option (thanks @vincepii) +- Update tests +- Update documentation + +## v2.1.0 + +- Consul v1.3.0 +- Fix undefined is_virtualenv condition (thanks @jpiron) +- Ensure idempotent folder permissions (thanks @jpiron) +- Add configurable systemd restart time (@thanks abarbare) +- Update documentation (thanks @jeffwelling, @megamorf) + +## v2.0.9 + +- Consul v1.2.3 +- Update documentation + +## v2.0.8 + +- Normalize conditionals in all tasks +- Update documentation + +## v2.0.7 + +- Add initial support for Alpine Linux (thanks @replicajune) +- Add support for verify_incoming_https (thanks @jeffwelling) +- Fix ACL token behavior on existing configuration (thanks @abarbare) +- Windows enhancements and fixes (thanks @imcitius) +- Update CONTRIBUTORS +- Update Meta +- Update documentation + +## v2.0.6 + +- Update meta for ArchLinux to allow Galaxy import + +## v2.0.4 + +- Consul 1.2.2 +- Update remaining deprecated tests (thanks @viruzzo) +- Added handler to reload configuration on Linux (thanks @viruzzo) +- Add support for Oracle Linux (thanks @TheLastChosenOne) +- Fix generate `consul_acl_master_token` when not provided (thanks @abarbare) +- Update CONTRIBUTORS + +## v2.0.3 + +- Fix jinja2 retry_join loops (thanks @Logan2211) +- Dependency Management Improvements (thanks @Logan2211) +- Update some deprecated tests in main tasks +- Update CONTRIBUTORS +- Update documentation + +## v2.0.2 + +- Consul v1.2.0 +- Update documentation + +## v2.0.1 + +- Add beta UI flag (thanks @coughlanio) +- Clean up dir tasks (thanks @soloradish) + +## v2.0.0 + +- Consul v1.1.0 +- Update configuration directory permissions (thanks @Rtzq0) +- Update service script dependency (thanks @mattburgess) +- Assert if consul_group_name missing from groups (thanks @suzuki-shunsuke) +- Add Archlinux support +- Change syslog user to root (no syslog user on Debian/dir task fails) +- Updated CHANGELOG ordering 🎉 +- Updated CONTRIBUTORS + +## v1.60.0 + +- Consul v1.0.7 +- Option for TLS files already on the remote host (thanks @calebtonn) +- Raise minimum Ansible version to 2.4.0.0 +- Update documentation +- Update Vagrant documentation + +## v1.50.1 + +- Revert to old style retry_join which doesn't fail in all cases + +## v1.50.0 + +- Consul v1.0.6 +- Add support for setting syslog facility and syslog file (thanks @ykhemani) +- Update configuration +- Update tests +- Update documentation (thanks also to @ChrisMcKee) + +## v1.40.0 + +- Consul v1.0.3 +- It's 2018! +- Update configuration +- Update documentation + +## v1.30.2 + +- Correct retry_join block (@thanks hwmrocker) + +## v1.30.1 + +- Add performance tuning configuration (thanks @t0k4rt) +- Set raft multiplier to 1 +- Conditionally install Python dependency baed on virtualenv or --user + Addresses https://github.com/brianshumate/ansible-consul/issues/129#issuecomment-356095611 +- Update includes to import_tasks and include_tasks +- Remove invalid consul_version key from configuration +- Update Vagrantfile +- Set client address to 0.0.0.0 so Vagrant based deploy checks now pass +- Update documentation + +## v1.30.0 + +- Consul v1.0.2 +- Update documentation + +## v1.29.0 + +- Consul v1.0.1 +- Fix idempotency (thanks @issmirnov) +- Make gossip encryption optional (thanks @hwmrocker) +- Install netaddr with `--user` +- Update documentation +- Update CONTRIBUTORS + +## v1.28.1 + +- Remove deprecated advertise_addrs to resolve #123 so that role works again + +## v1.28.0 + +- Consul 1.0! +- Fix python3 compatibility for meta data (thanks @groggemans) + +## v1.27.0 + +- Consul v0.9.3 +- Update server joining (thanks @groggemans) +- Fix types that should be lists (thanks @vincent-legoll) + +## v1.26.1 + +- Fix deprecation notice on include +- Change example server hostnames + +## v1.26.0 + +- Add node_meta config (thanks @groggemans) +- Add additional retry-join parameters (thanks @groggemans) +- Add DNSMasq for Red Hat (thanks @giannidallatorre) +- Fix typo (thanks @vincent-legoll) +- Allow post setup bootstrapping of ACLs (thanks @groggemans) +- Add disable_update_check to config options (thanks @groggemans) +- Fix list example data type (thanks @vincent-legoll) +- Remove tasks for installation of python-consul (thanks @vincent-legoll) + +## v1.25.4 + +- Add raft_protocol parameter, fix version compares (thanks @groggemans) +- Add missing address and port config (thanks @groggemans) +- Add missing ACL config options (thanks @groggemans) +- Prefer retry_join and retry_join_wan instead of start_join / start_join_wan +- DNSMasq updates (thanks @groggemans) + +## v1.25.3 + +- Consul v0.9.2 +- Add enable_script_checks parameter (thanks @groggemans) +- Update documentation + +## v1.25.2 + +- Rename `cluster_nodes` label to `consul_instances` + +## v1.25.1 + +- Support rolling upgrades on systemd based Linux (thanks oliverprater) +- Fix breaking change in paths and runtime warnings (thanks oliverprater) +- Set CONSUL_TLS_DIR default to `/etc/consul/ssl` for #95 + +## v1.25.0 + +- Consul version 0.9.0 +- Add `consul_tls_verify_server_hostname` to TLS configuration template +- Begin to add relevant Consul docs links to variable descriptions in README +- Fix formatting in README_VAGRANT (thanks @jstoja) +- Update CONTRIBUTORS + +## v1.24.3 + +- Consul v0.8.5 +- Fix "Check Consul HTTP API" via unix socket (thanks @vincent-legoll) +- Avoid warning about already existing directory (thanks @vincent-legoll) +- Fix typos in messages (thanks @vincent-legoll) +- Fix documentation about `consul_node_role` (thanks @vincent-legoll) +- Update documentation + +## v1.24.2 + +- Use consul_run_path variable (thanks @vincent-legoll) +- Replace remaining hardcoded paths (thanks @vincent-legoll) +- Factorize LOCK_FILE (thanks @vincent-legoll) +- CHANGELOG++ +- Update CONTRIBUTORS +- Update README + +## v1.24.1 + +- Add `ansible.cfg` for examples and install netaddr (thanks @arehmandev) +- Improve HTTP API check (thanks @dmke) +- Update CONTRIBUTORS + +## v1.24.0 + +- Consul 0.8.4 +- Remove `user_acl_policy.hcl.j2` and `user_custom.json.j2` +- Update configuration template with new ACL variables +- Remove consul_iface from vagrant_hosts +- Simplify ACL configuration +- Remove checks for `consul_acl_replication_token_display` +- Update Vagrantfile +- Update README + +## v1.23.1 + +- Add files directory + +## v1.23.0 + +- Combines all (client/server/bootstrap) config templates (thanks @groggemans) +- Template for dnsmasq settings (thanks @groggemans) + +## v1.22.0 + +- Revert changes from v1.21.2 and v1.21.1 + +## v1.21.2 + +Actually add new template files :facepalm: + +## v1.21.1 + +Update ACL tasks +Rename configd_50custom.json.j2 template tp user_custom.json.j2 +Rename configd_50acl_policy.hcl template to user_acl_policy.hcl.j2 +Do not enable a default set of ACL policies + +## v1.20.2 + +- Correct meta for Windows platform +- Update supported versions +- Update documentation + +## v1.20.1 + +- Update main tasks to move Windows specific tasks into blocks + +## v1.20.0 + +- Initial Windows support (thanks @judy) +- Update documentation +- Update CONTRIBUTORS + +## v1.19.1 + +- Consul version 0.8.3 +- Recurse perms through config, data, and log directories (thanks @misho-kr) +- Update documentation + +## v1.19.0 + +- Consul version 0.8.2 +- Enable consul_manage_group var and conditional in user_group tasks +- Initial multi datacenter awareness bits (thanks @groggemans) + +## v1.18.5 + +- Set `| bool` where needed to stop warnings about template delimiters +- Add consul group when managing the consul user + +## v1.18.4 + +- Correct links in README (thanks @MurphyMarkW) +- Lower minimum Debian version from 8.5. to 8 (addresses #63) + +## v1.18.3 + +- Generate correct JSON with TLS and ACL enabled (thanks @tbartelmess) +- Switch local tasks to `delegate_to` which should cover most concerns + +## v1.18.2 + +- Remove check from install_remote + +## v1.18.1 + +- Update stat task + +## v1.18.0 + +- Add new vars +- `consul_run_path` for the PID file +- Add bootstrap-expect toggle option (thanks @groggemans) +- Use directory variables in dirs tasks +- Do not attempt to install Consul binary if already found on consul_bin_path +- Fixes #60 +- Rename intermediate `boostrap_marker` var +- Formatting on CONTRIBUTING +- Update CONTRIBUTORS +- Updated tested versions +- Update documentation + +## v1.17.4 + +- Clean up task names and make more detailed; use consistent verb intros +- Switch to local_action on all local install tasks +- Already using grep, so let's just awk for the SHA and then register it + +## v1.17.3 + +- Revert local_action tasks +- Ansible generally spazzes out with "no action detected in task" + for any variation of local_task I tried + +## v1.17.2 + +- Switch to local_action for local tasks +- Wrap IPv6 addresses (thanks @tbartelmess) + +## v1.17.1 + +- Fix template filename (addresses #58) + +## v1.17.0 + +- Updated configuration directory structure (thanks @groggemans) +- Updated `consul_config_path` to point to `/etc/consul` +- Added `consul_configd_path` defaulting to `/etc/consul.d` +- Added `consul_debug` variable - defaults to _no_ (thanks @groggemans) +- Moved all config related tasks to `tasks/config.yml` (thanks @groggemans) +- Added ACL and TLS parameters to the main `config.json` (thanks @groggemans) +- Now using `/etc/consul/config.json` for all consul roles (thanks @groggemans) +- Fix small bug preventing RPC gossip key to be read (thanks @groggemans) +- Exposed `consul_node_role` as a fact (thanks @groggemans) +- Update documentation + +## v1.16.3 + +- Consul 0.8.1 +- Update documentation + +## v1.16.2 + +- Standing corrected - put node_role back into defaults as it will still be + overridden by host vars (sorry @groggemans) +- Update documentation + +## v1.16.1 + +- Revert node_role addition to default vars so clusters will still properly + come up since we basically lost access the bootstrap role + +## v1.16.0 + +- Cleanup templates and default vars (thanks @groggemans) +- Add default consul_node_role (client) (thanks @groggemans) +- Update 'gather server facts' task/option (thanks @groggemans) +- Make user management optional + move to own file (thanks @groggemans) +- Properly name-space all vars (thanks @groggemans) +- Move directory settings to own file (thanks @groggemans) +- Replace unsupported Jinja do with if/else (thanks @groggemans) +- Fix missing endif in server configuration template (thanks @groggemans) +- Re-expose consul_bind_address as fact (thanks @groggemans) +- Template output improvements and style changes (thanks @groggemans) +- Add spaces at front end back of JSON arrays (thanks @groggemans) +- Update Vagrantfile +- Update documentation + +## v1.15.0 + +- Add option to download binaries directly to remotes (thanks @jonhatalla) +- Add environment variable overrides for the following default variables: +- `consul_bind_address` +- `consul_datacenter` +- `consul_domain` +- `consul_group_name` +- `consul_log_level` +- `consul_syslog_enable` +- `consul_acl_default_policy` +- `consul_acl_down_policy` +- Rename `consul_src_files` variable +- Rename `consul_copy_keys` variable +- Rename `consul_ca_crt` variable +- Rename `consul_server_crt` variable +- Rename `consul_tls_server_key` variable +- Rename `consul_verify_outgoing` variable +- Rename `consul_verify_server_hostname` variable +- Move `consul_iface` default to value of `hostvars.consul_iface` +- Override with elsewhere or with `CONSUL_IFACE` environment variable +- Closes #40 +- Update documentation + +## v1.14.0 + +- Fix bootstrapping (thanks @groggemans) + +## v1.13.1 + +- Finish documentation updates + +## v1.13.0 + +- Cleanup of variables +- Fix statement preventing key transfer to new servers (thanks @groggemans) +- Change custom configuration naming convention +- Update documentation + +## v1.12.1 + +- Fix defaults, shake fist at YAML + +## v1.12.0 + +- Consul version 0.8.0 +- Update documentation + +## v1.11.3 + +- Update for config generation on only one host (thanks @misho-kr) +- Update meta + +## v1.11.2 + +- Fix documentation formatting issues +- Add support for Ubuntu 15.04 + +## v1.11.1 + +- Updated known good versions +- Format file names +- Look for existing config on all hosts (thanks @misho-kr) +- Update CONTRIBUTORS + +## v1.11.0 + +- File permission updates (thanks @arledesma) +- Explicit consul_user/consul_group ownership of configurations + (thanks @arledesma) +- Use consul_bin_path throughout (thanks @arledesma) + +## v1.10.5 + +- Additional fixes to debian init +- Add consul_config_custom for role users to specify new or overwrite + existing configuration (thanks @arledesma) + +## v1.10.4 + +- Corrections to config_debianint.j2 for #34 +- Update main task to prefer open Consul HTTP API port over PID file +- Update package cache before installing OS packages + (watch for and refuse reversion of this as it's occurred once now) + +## v1.10.3 + +- Allow specification of ports object (thanks @arledesma) +- Strict TLS material file permissions (thanks @arledesma) +- Update permissions modes to add leading zero +- Random task cleanup +- Update documentation + +## v1.10.2 + +- Update main task to create a mo better consul user (addresses #31) + +## v1.10.1 + +- Fixup client hosts in template (thanks @violuke) +- Optimize systemd unit file + +## v1.10.0 + +- Initial FreeBSD support +- Vagrantfile updated for FreeBSD +- Added checks for interface addresses for differences (obj vs. literal list) + in ipv4 addresses as returned by Linux vs. BSD/SmartOS +- New `consul_os` var gets operating system name as lowercase string +- Add AMD64 pass-through/kludge to consul_architecture_map configuration +- Update Vagrantfile +- Decrease RAM to 1024MB +- Add FreeBSD specific checks in inline script +- Add FreeBSD hard requirements (explicit MAC address, disable share, shell) +- Update documentation + +## v1.9.7 + +- Initial ARM support (thanks @lanefu) +- Update CONTRIBUTORS + +## v1.9.6 + +- Update license +- Update preinstall script +- Fix consul_bind_address (thanks @arledesma) +- Better config.json ingress with slurp (thanks @arledesma) + +## v1.9.5 + +- Initial SmartOS support (thanks @sperreault) +- Updated CONTRIBUTORS + +## v1.9.4 + +- Issue with ACL tasks + +## v1.9.3 + +- Fix local_action tasks + +## v1.9.2 + +- Keep gossip encryption in main tasks until we sort cross play var +- Compact YAML style for all tasks +- Fix task items, shorten timeouts +- Update documentation + +## v1.9.1 + +- Split gossip encryption out into separate task file + +## v1.9.0 + +- Local TLS keys (thanks @dggreenbaum) +- Remove Atlas support +- Update documentation + +## v1.8.2 + +- Update Consul bin path in keygen task + +## v1.8.1 + +- Consul 0.7.5 +- Update documentation +- Contributors correction + +## v1.8.0 + +- Consul 0.7.5 +- BREAKING CHANGE: Deprecate read/write of ACL tokens from file system + functionality and prefer setting tokens from existing cluster nodes with + `CONSUL_ACL_MASTER_TOKEN` and `CONSUL_ACL_REPLICATION_TOKEN` environment + variables instead +- Update documentation + +## v1.7.4 + +- Consul 0.7.3 +- Update documentation + +## v1.7.3 + +- Version updates +- Task edits +- add CONTRIBUTING.md + +## v1.7.2 + +- Fix non-working cleanup task +- Update README + +## v1.7.0 + +- Consul version 0.7.2 + +## v1.6.3 + +- Ensure that all local_action tasks have become: no (thanks @itewk) + +## v1.6.2 + +- Stop reconfiguring bootstrap node as it's not really necessary and + spurious races cause failure to re-establish cluster quorum when doing so +- CONSUL_VERSION environment variable +- Deprecated default variables cleanup + +## v1.6.1 + +- Drop Trusty support from meta for now (for #19) + +## v1.6.0 + +- Update task logic around initscripts (for #19) +- Fix issues in initscripts +- Rename Debian init script template +- Update documentation +- Fixing bug with deleting file. Better regex. Formatting. (Thanks @violuke) +- Remember ACL master/replication tokens between runs. + Actually set replication token. (Thanks @violuke) +- Typo fix (Thanks @violuke) +- Allowing re-running to add new nodes. More HA too. (Thanks @violuke) + +## v1.5.7 + +- Remove unnecessary code (thanks @kostyrevaa) +- Determine binary's SHA 256 from releases.hashicorp.com (for #16) +- Update documentation + +## v1.5.6 + +- Correct Atlas variable names + +## v1.5.5 + +- Initial attempts at idempotency in main tasks (for #14, #15) + +## v1.5.4 + +- Recursors as env var + +## v1.5.3 + +- Update start_join for client configuration template + +## v1.5.3 + +- Consul version 0.7.1 +- Consistent template names +- Update documentation + +## v1.5.1 + +- Fail when ethernet interface specified by consul_iface not found on + the system (addresses #13) + +## v1.5.0 + +- Add initial TLS support +- Update documentation + +## v1.4.1 + +- Move Dnsmasq restart to inside of tasks +- Add client dependencies for further configuration (thanks @crumohr) +- Fix error using predefined encryption key (thanks @crumohr) +- Removal of redundant includes (thanks @crumohr) + +## v1.4.0 + +- Compatibility with Ubuntu 16.04 (thanks @crumohr) +- iptables support (thanks @crumohr) +- Booleans instead of strings for variables (thanks @crumohr) +- Runnable if DNS is broken (thanks @crumohr) +- Remove unused variables +- Update block conditional for ACLs +- Update documentation + +## v1.3.4 + +- Update documentation + +## v1.3.3 + +- Update/validate CentOS 7 box +- Update documentation +- Updated failure cases for CentOS + +# v1.3.2 + +- Correct CONSUL_DNSMASQ_ENABLE var name + +## v1.3.1 + +- Correct variable names +- Add token display variables +- Update documentation +- Remove deprecated variables + +## v1.3.0 + +- Initial ACL support +- Initial Atlas support +- Streamline main tasks +- Update documentation +- Update variables + +## v1.2.16 + +- Clean up variables (thanks @jessedefer) +- Update documentation (thanks @jessedefer) +- Update CONTRIBUTORS + +## v1.2.15 + +- Fail on older versions +- Move distro vars to defaults +- Remove vars + +## v1.2.14 + +- Documentation updates + +## v1.2.13 + +- Doc meta + +## v1.2.12 + +- Update documentation + +## v1.2.11 + +- Update supported versions +- Fix up unarchive task quoting + +## v1.2.10 + +- Added consul_rpc_bind_address +- Updated documentation + +## v1.2.9 + +- Download once, copy many for Consul binary +- Rename package variables + +## v1.2.8 + +- Stop creating UI directory +- Set correct RAM in Vagrantfile + +## v1.2.7 + +- Secondary nodes now join only the bootstrap node +- Added consul_bootstrap_interface variable +- Add PIDFile to systemd unit +- Updated documentation + +## v1.2.6 + +- Update documentation +- Add `consul_node_name` variable +- Add `consul_dns_bind_address` variable +- Add `consul_http_bind_address` variable +- Add `consul_https_bind_address` variable +- Add initial ACL variables + +## v1.2.5 + +- Add LICENSE.txt for Apache 2.0 license + +## v1.2.4 + +- Updated README +- Undo 125bd4bb369bb85f58a09b5dc81839e2779bd29f as dots in node_name breaks + DNS API (without recursor option) and also breaks dnsmasq option + +## v1.2.3 + +- Still with the tests + +## v1.2.3 + +- Updated README + +## v1.2.1 + +- Tests work locally but not in Travis; trying an env var instead of cfg + +## v1.2.0 + +- Consul version 0.7.0 +- UI is built in now, so no longer downloaded / installed separately +- Usability improvements (thanks @Rodjers) + +## v1.1.0 + +- Bare role now installs and bootstraps cluster; included site.yml will also + reconfigure bootstrap node as server and optionally enable dnsmasq + forwarding for all cluster agents +- Remove bad client_addr bind in favor of default (localhost) + Some weirdness was occurring whereby the client APIs were listening on + TCP6/UDP6 sockets but not TCP4/UDP4 when client_addr set to 0.0.0.0 +- Adjust timeouts for cluster UI check +- Default configurable domain to "consul" so that examples from docs work, etc. +- Combine all OS vars into main (addresses undefined var warnings) +- Removed separate OS var files +- Updated known working software versions +- Any errors are fatal for the site.yml example playbook +- Explicit pid-file to use in wait_for +- Remove cruft from init script +- Update documentation + +## v1.0.15 + +- Meta update + +## v1.0.14 + +- Initial test +- Initial Travis CI setup + +## v1.0.13 + +- Add initial dnsmasq front end bits +- Reconfigure bootstrap node for normal operation (remove bootstrap-expect) + after initial cluster formation and restart bootstrap node + +## v1.0.12 + +- FIX: No such file or directory /etc/init.d/functions (thanks @oliverprater) +- FIX: Using bare variables is deprecated (thanks @oliverprater) +- Added CONTRIBUTORS.md +- Updated documentation + +## v1.0.11 + +- Renamed bootstrap template + +## v1.0.10 + +- Remove extra datacenter definition + +## v1.0.9 + +- Change datacenter value + +## v1.0.8 + +- Update documentation + +## v1.0.7 + +- Update supported versions +- Update documentation + +## v1.0.6 + +- Updated to Consul 0.6.4 +- Make bind_address configurable for #1 +- Cleaned up deprecaed bare variables +- Updated supporting software versions +- Updated documentation + +## v1.0.5 + +- Updated defaults and Consul version (thanks @bscott) +- Made cluster bootable and switch to become_user + other Ansibel best + practices (thanks @Rodjers) +- Updated minimum Ansible version required in meta + +## v1.0.4 + +- Renamed consul_nodes label for better compatibility with my other roles + +## v1.0.3 + +- Prefix /usr/local/bin in PATH for cases where the consul binary is not found +- Changed UI path +- Add generic SysV init script +- Add Debian init script +- Use systemd for distribution major versions >= 7 +- Remove Upstart script +- Updated configuration files + +## v1.0.2 + +- Removed the need for cluster_nodes variable +- Fix client template task +- Fix invalid JSON in the config.json outputs +- Updated documentation + +## v1.0.1 + +- Updated README + +## v1.0.0 + +- Installs Consul and Consul UI to each node +- Installs example configuration for bootstrap, server, and client +- Installs example upstart script diff --git a/automation/roles/consul/CONTRIBUTING.md b/automation/roles/consul/CONTRIBUTING.md new file mode 100644 index 000000000..849db0bb9 --- /dev/null +++ b/automation/roles/consul/CONTRIBUTING.md @@ -0,0 +1,91 @@ +# Contributing + +When contributing to this repository, please first discuss the change you wish +to make via issue, email, or any other method with the owners of this repository before making a change. + +Do note that this project has a code of conduct; please be sure to follow it +in all of your project interactions. + +## Pull Request Process + +1. Ensure any install or build artifacts are removed before the end of the layer when doing a build +1. Update the README.md or README_VAGRANT.md with details of changes to the interface, this includes new environment variables, exposed ports, useful file locations and container parameters +1. Increase the version numbers in any examples files and the README.md to the new version that this Pull Request would represent. + The versioning scheme we use is (mostly) [SemVer](http://semver.org/) +1. You may merge the Pull Request in once you have the sign-off of two other project contributors, or if you do not have permission to do that, you can request the second reviewer to merge it for you + +## Code of Conduct + +### Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project +and our community a harassment-free experience for everyone, regardless of age, +body size, disability, ethnicity, gender identity and expression, level of +experience, nationality, personal appearance, race, religion, or sexual +identity and orientation. + +### Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +- Showing empathy towards other community members +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community + +Examples of unacceptable behavior by participants include: + +- Use of sexualized language or imagery and unwelcome sexual attention + or advances +- Insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic + address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +### Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +### Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an +appointed representative at an online or offline event. Representation of a +project may be further defined and clarified by project maintainers. + +### Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project leadership: bas.meijer me com. + +All complaints will be reviewed and investigated and will result in a response +that is deemed necessary and appropriate to the circumstances. The project +team is obligated to maintain confidentiality with regard to the reporter of +an incident. Further details of specific enforcement policies may be posted +separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +### Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/automation/roles/consul/CONTRIBUTORS.md b/automation/roles/consul/CONTRIBUTORS.md new file mode 100644 index 000000000..119a1c952 --- /dev/null +++ b/automation/roles/consul/CONTRIBUTORS.md @@ -0,0 +1,90 @@ +# Contributors + +Thank you to all these fine folks for helping with ansible-consul! + +- [@abarbare](https://github.com/abarbare) +- [@adawalli](https://github.com/adawalli) +- [@arehmandev](https://github.com/arehmandev) +- [@arledesma](https://github.com/arledesma) +- [@arouene](https://github.com/arouene) +- [@bbaassssiiee](https://github.com/bbaassssiiee) +- [@blaet](https://github.com/blaet) +- [@bscott](https://github.com/bscott) +- [@calebtonn](https://github.com/calebtonn) +- [@calmacara](https://github.com/calmacara) +- [@canardleteer](https://github.com/canardleteer) +- [@ChrisMcKee](https://github.com/ChrisMcKee) +- [@chrisparnin](https://github.com/chrisparnin) +- [@coughlanio)](https://github.com/coughlanio) +- [@crumohr](https://github.com/crumohr) +- [@danielkucera](https://github.com/danielkucera) +- [@dggreenbaum](https://github.com/dggreenbaum) +- [@dmke](https://github.com/dmke) +- [@ducminhle](https://github.com/ducminhle) +- [@ecyril-dussert](https://github.com/cyril-dussert) +- [@eeroniemi](https://github.com/eeroniemi) +- [@evilhamsterman](https://github.com/evilhamsterman) +- [@FozzY1234](https://github.com/FozzY1234) +- [@Fuochi-YNAP](https://github.com/Fuochi-YNAP) +- [@giannidallatorre](https://github.com/giannidallatorre) +- [@GnomeZworc](https://github.com/GnomeZworc) +- [@gofrolist](https://github.com/gofrolist) +- [@groggemans](https://github.com/groggemans) +- [@gyorgynadaban](https://github.com/gyorgynadaban) +- [@HanSooloo](https://github.com/HanSooloo) +- [@hwmrocker](https://github.com/hwmrocker) +- [@imcitius](https://github.com/imcitius) +- [@issmirnov](https://github.com/issmirnov) +- [@itewk](https://github.com/itewk) +- [@jasonneurohr](https://github.com/jasonneurohr) +- [@jebas](https://github.com/jebas) +- [@jeffwelling](https://github.com/jeffwelling) +- [@jessedefer](https://github.com/jessedefer) +- [@jmariondev](https://github.com/jmariondev) +- [@jonhatalla](https://github.com/jonhatalla) +- [@jpiron](https://github.com/jpiron) +- [@jstoja](https://github.com/jstoja) +- [@judy](http://judy.github.io) +- [@kostyrevaa](https://github.com/kostyrevaa) +- [@KyleOndy](https://github.com/KyleOndy) +- [@lanefu](https://github.com/lanefu) +- [@Legogris](https://github.com/Legogris) +- [@Logan2211](https://github.com/Logan2211) +- [@MattBurgess](https://github.com/MattBurgess) +- [@megamorf](https://github.com/megamorf) +- [@misho-kr](https://github.com/misho-kr) +- [@MurphyMarkW](https://github.com/MurphyMarkW) +- [@oliverprater](https://github.com/oliverprater) +- [@paretl](https://github.com/paretl) +- [@patsevanton](https://github.com/patsevanton) +- [@pavel-z1](https://github.com/pavel-z1) +- [@pwae](https://github.com/perlboy) +- [@perlboy](https://github.com/pwae) +- [@RavisMsk](https://github.com/RavisMsk) +- [@replicajune](https://github.com/replicajune) +- [@Rodjers](https://github.com/Rodjers) +- [@Roviluca](https://github.com/Roviluca) +- [@Rtzq0](https://github.com/Rtzq0) +- [@schaltiemi](https://github.com/schaltiemi) +- [@Shaiou](https://github.com/Shaiou) +- [@Sispheor](https://github.com/Sispheor) +- [@slomo](https://github.com/jpiron/slomo) +- [@smutel](https://github.com/smutel) +- [@soloradish](https://github.com/soloradish) +- [@sperreault](https://github.com/sperreault) +- [@suzuki-shunsuke](https://github.com/suzuki-shunsuke) +- [@t0k4rt](https://github.com/@t0k4rt) +- [@tbartelmess](https://github.com/tbartelmess) +- [@teralype](https://github.com/teralype) +- [@TheLastChosenOne](https://github.com/TheLastChosenOne) +- [@timvaillancourt](https://github.com/timvaillancourt) +- [@vincent-legoll](https://github.com/vincent-legoll) +- [@vincepii](https://github.com/vincepii) +- [@violuke](https://github.com/violuke) +- [@viruzzo](https://github.com/viruzzo) +- [@xeivieni](https://github.com/xeivieni) +- [@ykhemani](https://github.com/ykhemani) + +If you have contributed but do not appear here, please fear not and accept +apologies for the omission. Contact `bas.meijer me com` and +please let me know! diff --git a/automation/roles/consul/LICENSE.txt b/automation/roles/consul/LICENSE.txt new file mode 100644 index 000000000..9e1b15a46 --- /dev/null +++ b/automation/roles/consul/LICENSE.txt @@ -0,0 +1,10 @@ +Copyright (c) 2018, Brian Shumate +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/automation/roles/consul/README.md b/automation/roles/consul/README.md new file mode 100644 index 000000000..78a6374fa --- /dev/null +++ b/automation/roles/consul/README.md @@ -0,0 +1,1295 @@ +# Consul + +![Molecule](https://github.com/ansible-community/ansible-consul/workflows/Molecule/badge.svg?branch=master&event=pull_request) +[![Average time to resolve an issue](http://isitmaintained.com/badge/resolution/ansible-community/ansible-consul.svg)](http://isitmaintained.com/project/ansible-community/ansible-consul "Average time to resolve an issue") +[![Percentage of issues still open](http://isitmaintained.com/badge/open/ansible-community/ansible-consul.svg)](http://isitmaintained.com/project/ansible-community/ansible-consul "Percentage of issues still open") + +This Ansible role installs [Consul](https://consul.io/), including establishing a filesystem structure and server or client agent configuration with support for some common operational features. + +It can also bootstrap a development or evaluation cluster of 3 server agents running in a Vagrant and VirtualBox based environment. See [README_VAGRANT.md](https://github.com/ansible-community/ansible-consul/blob/master/examples/README_VAGRANT.md) and the associated [Vagrantfile](https://github.com/ansible-community/ansible-consul/blob/master/examples/Vagrantfile) for more details. + +## Role Philosophy + +> “Another flaw in the human character is that everybody wants to build and nobody wants to do maintenance.”
+> ― Kurt Vonnegut, Hocus Pocus + +Please note that the original design goal of this role was more concerned with the initial installation and bootstrapping of a Consul server cluster environment and so it does not currently concern itself (all that much) with performing ongoing maintenance of a cluster. + +Many users have expressed that the Vagrant based environment makes getting a working local Consul server cluster environment up and running an easy process — so this role will target that experience as a primary motivator for existing. + +If you get some mileage from it in other ways, then all the better! + +## Role migration and installation + +This role was originally developed by Brian Shumate and was known on Ansible Galaxy as **brianshumate.consul**. Brian asked the community to be relieved of the maintenance burden, and therefore Bas Meijer transferred the role to **ansible-community** so that a team of volunteers can maintain it. At the moment there is no membership of ansible-community on https://galaxy.ansible.com and therefore to install this role into your project you should create a file `requirements.yml` in the subdirectory `roles/` of your project with this content: + +``` +--- +- src: https://github.com/ansible-community/ansible-consul.git + name: ansible-consul + scm: git + version: master +``` + +This repo has tagged releases that you can use to pin the version. + +Tower will install the role automatically, if you use the CLI to control ansible, then install it like: + +``` +ansible-galaxy install -p roles -r roles/requirements.yml +``` + +## Requirements + +This role requires a FreeBSD, Debian, or Red Hat Enterprise Linux distribution or Windows Server 2012 R2. + +The role might work with other OS distributions and versions, but is known to function well with the following software versions: + +- Consul: 1.8.7 +- Ansible: 2.8.2 +- Alpine Linux: 3.8 +- CentOS: 7, 8 +- Debian: 9 +- FreeBSD: 11 +- Mac OS X: 10.15 (Catalina) +- RHEL: 7, 8 +- Rocky Linux: 8 +- OracleLinux: 7, 8 +- Ubuntu: 16.04 +- Windows: Server 2012 R2 + +Note that for the "local" installation mode (the default), this role will locally download only one instance of the Consul archive, unzip it and install the resulting binary on all desired Consul hosts. + +To do so requires that `unzip` is available on the Ansible control host and the role will fail if it doesn't detect `unzip` in the PATH. + +Collection requirements for this role are listed in the [`requirements.yml`](requirements.yml) file. It is your responsibility to make sure that you install these collections to ensure that the role runs properly. Usually, this can be done with: + +``` +ansible-galaxy collection install -r requirements.yml +``` + +## Caveats + +This role does not fully support the limit option (`ansible -l`) to limit the hosts, as this will break populating required host variables. If you do use the limit option with this role, you can encounter template errors like: + +``` +Undefined is not JSON serializable. +``` + +## Role Variables + +The role uses variables defined in these 3 places: + +- Hosts inventory file (see `examples/vagrant_hosts` for an example) +- `vars/*.yml` (primarily OS/distributions specific variables) +- `defaults/main.yml` (everything else) + +> :warning: **NOTE**: The role relies on the inventory host group for the consul servers to be defined as the variable `consul_group_name` and it will not function properly otherwise. Alternatively the consul servers can be placed in the default host group `[consul_instances]` in the inventory as shown in the examples below. + +Many role variables can also take their values from environment variables as well; those are noted in the description where appropriate. + +### `consul_version` + +- Version to install +- Set value as `latest` for the latest available version of consul +- Default value: 1.8.7 + +### `consul_architecture_map` + +- Dictionary for translating _ansible_architecture_ values to Go architecture values + naming convention +- Default value: dict + +### `consul_architecture` + +- System architecture as determined by `{{ consul_architecture_map[ansible_architecture] }}` +- Default value (determined at runtime): amd64, arm, or arm64 + +### `consul_os` + +- Operating system name in lowercase representation +- Default value: `{{ ansible_os_family | lower }}` + +### `consul_install_dependencies` + +- Install python and package dependencies required for the role functions. +- Default value: true + +### `consul_zip_url` + +- Consul archive file download URL +- Default value: `https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_{{ consul_os }}_{{ consul_architecture }}.zip` + +### `consul_checksum_file_url` + +- Package SHA256 summaries file URL +- Default value: `https://releases.hashicorp.com/consul/{{ consul_version }}/{{ consul_version }}_SHA256SUMS` + +### `consul_bin_path` + +- Binary installation path +- Default Linux value: `/usr/local/bin` +- Default Windows value: `C:\ProgramData\consul\bin` + +### `consul_config_path` + +- Base configuration file path +- Default Linux value: `/etc/consul` +- Default Windows value: `C:\ProgramData\consul\config` + +### `consul_configd_path` + +- Additional configuration directory +- Default Linux value: `{{ consul_config_path }}/consul.d` +- Default Windows value: `C:\ProgramData\consul\config.d` + +### `consul_data_path` + +- Data directory path as defined in [data_dir or -data-dir](https://www.consul.io/docs/agent/options.html#_data_dir) +- Default Linux value: `/opt/consul` +- Default Windows value: `C:\ProgramData\consul\data` + +### `consul_configure_syslogd` + +- Enable configuration of rsyslogd or syslog-ng on Linux. If disabled, Consul will still log to syslog if `consul_syslog_enable` is true, but the syslog daemon won't be configured to write Consul logs to their own logfile. + - Override with `CONSUL_CONFIGURE_SYSLOGD` environment variable +- Default Linux value: _false_ + +### `consul_log_path` + +- If `consul_syslog_enable` is false + - Log path for use in [log_file or -log-file](https://www.consul.io/docs/agent/options.html#_log_file) +- If `consul_syslog_enable` is true + - Log path for use in rsyslogd configuration on Linux. Ignored if `consul_configure_syslogd` is false. +- Default Linux value: `/var/log/consul` + - Override with `CONSUL_LOG_PATH` environment variable +- Default Windows value: `C:\ProgramData\consul\log` + +### `consul_log_file` + +- If `consul_syslog_enable` is false + - Log file for use in [log_file or -log-file](https://www.consul.io/docs/agent/options.html#_log_file) +- If `consul_syslog_enable` is true + - Log file for use in rsyslogd configuration on Linux. Ignored if `consul_configure_syslogd` is false. +- Override with `CONSUL_LOG_FILE` environment variable +- Default Linux value: `consul.log` + +### `consul_log_rotate_bytes` + +- Log rotate bytes as defined in [log_rotate_bytes or -log-rotate-bytes](https://www.consul.io/docs/agent/options.html#_log_rotate_bytes) + - Override with `CONSUL_LOG_ROTATE_BYTES` environment variable +- Ignored if `consul_syslog_enable` is true +- Default value: 0 + +### `consul_log_rotate_duration` + +- Log rotate bytes as defined in [log_rotate_duration or -log-rotate-duration](https://www.consul.io/docs/agent/options.html#_log_rotate_duration) + - Override with `CONSUL_LOG_ROTATE_DURATION` environment variable +- Ignored if `consul_syslog_enable` is true +- Default value: 24h + +### `consul_log_rotate_max_files` + +- Log rotate bytes as defined in [log_rotate_max_files or -log-rotate-max-files](https://www.consul.io/docs/agent/options.html#_log_rotate_max_files) + - Override with `CONSUL_LOG_ROTATE_MAX_FILES` environment variable +- Ignored if `consul_syslog_enable` is true +- Default value: 0 + +### `consul_syslog_facility` + +- Syslog facility as defined in [syslog_facility](https://www.consul.io/docs/agent/options.html#syslog_facility) + - Override with `CONSUL_SYSLOG_FACILITY` environment variable +- Default Linux value: local0 + +### `syslog_user` + +- Owner of `rsyslogd` process on Linux. `consul_log_path`'s ownership is set to this user on Linux. Ignored if `consul_configure_syslogd` is false. + - Override with `SYSLOG_USER` environment variable +- Default Linux value: syslog + +### `syslog_group` + +- Group of user running `rsyslogd` process on Linux. `consul_log_path`'s group ownership is set to this group on Linux. Ignored if `consul_configure_syslogd` is false. + - Override with `SYSLOG_GROUP` environment variable +- Default value: adm + +### `consul_run_path` + +- Run path for process identifier (PID) file +- Default Linux value: `/run/consul` +- Default Windows value: `C:\ProgramData\consul` + +### `consul_user` + +- OS user +- Default Linux value: consul +- Default Windows value: LocalSystem + +### `consul_manage_user` + +- Whether to create the user defined by `consul_user` or not +- Default value: true + +### `consul_group` + +- OS group +- Default value: bin + +### `consul_manage_group` + +- Whether to create the group defined by `consul_group` or not +- Default value: true + +### `consul_group_name` + +- Inventory group name + - Override with `CONSUL_GROUP_NAME` environment variable +- Default value: consul_instances + +### `consul_retry_interval` + +- Interval for reconnection attempts to LAN servers +- Default value: 30s + +### `consul_retry_interval_wan` + +- Interval for reconnection attempts to WAN servers +- Default value: 30s + +### `consul_retry_join_skip_hosts` + +- If true, the config value for retry_join won't be populated by the default hosts servers. The value can be initialized using consul_join +- Default value: false + +### `consul_retry_max` + +- Max reconnection attempts to LAN servers before failing (0 = infinite) +- Default value: 0 + +### `consul_retry_max_wan` + +- Max reconnection attempts to WAN servers before failing (0 = infinite) +- Default value: _0_ + +### `consul_join` + +- List of LAN servers, not managed by this role, to join (IPv4 IPv6 or DNS addresses) +- Default value: [] + +### `consul_join_wan` + +- List of WAN servers, not managed by this role, to join (IPv4 IPv6 or DNS addresses) +- Default value: [] + +### `consul_servers` + +It's typically not necessary to manually alter this list. + +- List of server nodes +- Default value: List of all nodes in `consul_group_name` with + `consul_node_role` set to server or bootstrap + +### `consul_bootstrap_expect` + +- Boolean that adds bootstrap_expect value on Consul servers's config file +- Default value: false + +### `consul_bootstrap_expect_value` + +- Integer to define the minimum number of consul servers joined to the cluster in order to elect the leader. +- Default value: Calculated at runtime based on the number of nodes + +### `consul_gather_server_facts` + +This feature makes it possible to gather the `consul_advertise_address(_wan)` from servers that are currently not targeted by the playbook. + +To make this possible the `delegate_facts` option is used; note that his option has been problematic. + +- Gather facts from servers that are not currently targeted +- Default value: false + +### `consul_datacenter` + +- Datacenter label + - Override with `CONSUL_DATACENTER` environment variable- Default value: _dc1_ +- Default value: dc1 + +### `consul_domain` + +- Consul domain name as defined in [domain or -domain](https://www.consul.io/docs/agent/options.html#_domain) + - Override with `CONSUL_DOMAIN` environment variable +- Default value: consul + +### `consul_alt_domain` + +- Consul domain name as defined in [alt_domain or -alt-domain](https://www.consul.io/docs/agent/options.html#_alt_domain) + - Override with `CONSUL_ALT_DOMAIN` environment variable +- Default value: Empty string + +### `consul_node_meta` + +- Consul node meta data (key-value) +- Supported in Consul version 0.7.3 or later +- Default value: _{}_ +- Example: + +```yaml +consul_node_meta: + node_type: "my-custom-type" + node_meta1: "metadata1" + node_meta2: "metadata2" +``` + +### `consul_log_level` + +- Log level as defined in [log_level or -log-level](https://www.consul.io/docs/agent/options.html#_log_level) + - Override with `CONSUL_LOG_LEVEL` environment variable +- Default value: INFO + +### `consul_syslog_enable` + +- Log to syslog as defined in [enable_syslog or -syslog](https://www.consul.io/docs/agent/options.html#_syslog) + - Override with `CONSUL_SYSLOG_ENABLE` environment variable +- Default Linux value: false +- Default Windows value: false + +### `consul_iface` + +- Consul network interface + - Override with `CONSUL_IFACE` environment variable +- Default value: `{{ ansible_default_ipv4.interface }}` + +### `consul_bind_address` + +- Bind address + - Override with `CONSUL_BIND_ADDRESS` environment variable +- Default value: default ipv4 address, or address of interface configured by + `consul_iface` + +### `consul_advertise_address` + +- LAN advertise address +- Default value: `consul_bind_address` + +### `consul_advertise_address_wan` + +- Wan advertise address +- Default value: `consul_bind_address` + +### `consul_translate_wan_address` + +- Prefer a node's configured WAN address when serving DNS +- Default value: false + +### `consul_advertise_addresses` + +- Advanced advertise addresses settings +- Individual addresses can be overwritten using the `consul_advertise_addresses_*` variables +- Default value: + ```yaml + consul_advertise_addresses: + serf_lan: "{{ consul_advertise_addresses_serf_lan | default(consul_advertise_address+':'+consul_ports.serf_lan) }}" + serf_wan: "{{ consul_advertise_addresses_serf_wan | default(consul_advertise_address_wan+':'+consul_ports.serf_wan) }}" + rpc: "{{ consul_advertise_addresses_rpc | default(consul_bind_address+':'+consul_ports.server) }}" + ``` + +### `consul_client_address` + +- Client address +- Default value: 127.0.0.1 + +### `consul_addresses` + +- Advanced address settings +- Individual addresses kan be overwritten using the `consul_addresses_*` variables +- Default value: + ```yaml + consul_addresses: + dns: "{{ consul_addresses_dns | default(consul_client_address, true) }}" + http: "{{ consul_addresses_http | default(consul_client_address, true) }}" + https: "{{ consul_addresses_https | default(consul_client_address, true) }}" + rpc: "{{ consul_addresses_rpc | default(consul_client_address, true) }}" + grpc: "{{ consul_addresses_grpc | default(consul_client_address, true) }}" + ``` + +### `consul_ports` + +- The official documentation on the [Ports Used](https://www.consul.io/docs/agent/options.html#ports) +- The ports mapping is a nested dict object that allows setting the bind ports for the following keys: + - dns - The DNS server, -1 to disable. Default 8600. + - http - The HTTP API, -1 to disable. Default 8500. + - https - The HTTPS API, -1 to disable. Default -1 (disabled). + - rpc - The CLI RPC endpoint. Default 8400. This is deprecated in Consul 0.8 and later. + - grpc - The gRPC endpoint, -1 to disable. Default -1 (disabled). + - serf_lan - The Serf LAN port. Default 8301. + - serf_wan - The Serf WAN port. Default 8302. + - server - Server RPC address. Default 8300. + +For example, to enable the consul HTTPS API it is possible to set the variable as follows: + +- Default values: + +```yaml +consul_ports: + dns: "{{ consul_ports_dns | default('8600', true) }}" + http: "{{ consul_ports_http | default('8500', true) }}" + https: "{{ consul_ports_https | default('-1', true) }}" + rpc: "{{ consul_ports_rpc | default('8400', true) }}" + serf_lan: "{{ consul_ports_serf_lan | default('8301', true) }}" + serf_wan: "{{ consul_ports_serf_wan | default('8302', true) }}" + server: "{{ consul_ports_server | default('8300', true) }}" + grpc: "{{ consul_ports_grpc | default('-1', true) }}" +``` + +Notice that the dict object has to use precisely the names stated in the documentation! And all ports must be specified. Overwriting one or multiple ports can be done using the `consul_ports_*` variables. + +### `consul_node_name` + +- Define a custom node name (should not include dots) + See [node_name](https://www.consul.io/docs/agent/options.html#node_name) + - The default value on Consul is the hostname of the server. +- Default value: '' + +### `consul_recursors` + +- List of upstream DNS servers + See [recursors](https://www.consul.io/docs/agent/options.html#recursors) + - Override with `CONSUL_RECURSORS` environment variable +- Default value: Empty list + +### `consul_iptables_enable` + +- Whether to enable iptables rules for DNS forwarding to Consul + - Override with `CONSUL_IPTABLES_ENABLE` environment variable +- Default value: false + +### `consul_acl_policy` + +- Add basic ACL config file + - Override with `CONSUL_ACL_POLICY` environment variable +- Default value: false + +### `consul_acl_enable` + +- Enable ACLs + - Override with `CONSUL_ACL_ENABLE` environment variable +- Default value: false + +### `consul_acl_ttl` + +- TTL for ACL's + - Override with `CONSUL_ACL_TTL` environment variable +- Default value: 30s + +### `consul_acl_token_persistence` + +- Define if tokens set using the API will be persisted to disk or not + - Override with `CONSUL_ACL_TOKEN_PERSISTENCE` environment variable +- Default value: true + +### `consul_acl_datacenter` + +- ACL authoritative datacenter name + - Override with `CONSUL_ACL_DATACENTER` environment variable +- Default value: `{{ consul_datacenter }}` (`dc1`) + +### `consul_acl_down_policy` + +- Default ACL down policy + - Override with `CONSUL_ACL_DOWN_POLICY` environment variable +- Default value: allow + +### `consul_acl_token` + +- Default ACL token, only set if provided + - Override with `CONSUL_ACL_TOKEN` environment variable +- Default value: '' + +### `consul_acl_agent_token` + +- Used for clients and servers to perform internal operations to the service catalog. See: [acl_agent_token](https://www.consul.io/docs/agent/options.html#acl_agent_token) + - Override with `CONSUL_ACL_AGENT_TOKEN` environment variable +- Default value: '' + +### `consul_acl_agent_master_token` + +- A [special access token](https://www.consul.io/docs/agent/options.html#acl_agent_master_token) that has agent ACL policy write privileges on each agent where it is configured + - Override with `CONSUL_ACL_AGENT_MASTER_TOKEN` environment variable +- Default value: '' + +### `consul_acl_default_policy` + +- Default ACL policy + - Override with `CONSUL_ACL_DEFAULT_POLICY` environment variable +- Default value: allow + +### `consul_acl_master_token` + +- ACL master token + - Override with `CONSUL_ACL_MASTER_TOKEN` environment variable +- Default value: UUID + +### `consul_acl_master_token_display` + +- Display generated ACL Master Token + - Override with `CONSUL_ACL_MASTER_TOKEN_DISPLAY` environment variable +- Default value: false + +### `consul_acl_replication_enable` + +- Enable ACL replication without token (makes it possible to set the token + trough the API) + - Override with `CONSUL_ACL_REPLICATION_TOKEN_ENABLE` environment variable +- Default value: '' + +### `consul_acl_replication_token` + +- ACL replication token + - Override with `CONSUL_ACL_REPLICATION_TOKEN_DISPLAY` environment variable +- Default value: _SN4K3OILSN4K3OILSN4K3OILSN4K3OIL_ + +### `consul_tls_enable` + +- Enable TLS + - Override with `CONSUL_ACL_TLS_ENABLE` environment variable +- Default value: false + +### `consul_tls_copy_keys` + +- Enables or disables the management of the TLS files + - Disable it if you enable TLS (`consul_tls_enable`) but want to manage the + TLS files on your own +- Default value: true + +### `consul_tls_dir` + +- Target directory for TLS files + - Override with `CONSUL_TLS_DIR` environment variable +- Default value: `/etc/consul/ssl` + +### `consul_tls_ca_crt` + +- CA certificate filename + - Override with `CONSUL_TLS_CA_CRT` environment variable +- Default value: `ca.crt` + +### `consul_tls_server_crt` + +- Server certificate + - Override with `CONSUL_TLS_SERVER_CRT` environment variable +- Default value: `server.crt` + +### `consul_tls_server_key` + +- Server key + - Override with `CONSUL_TLS_SERVER_KEY` environment variable +- Default value: `server.key` + +### `consul_tls_files_remote_src` + +- Copy from remote source if TLS files are already on host +- Default value: false + +### `consul_encrypt_enable` + +- Enable Gossip Encryption +- Default value: true + +### `consul_encrypt_verify_incoming` + +- Verify incoming Gossip connections +- Default value: true + +### `consul_encrypt_verify_outgoing` + +- Verify outgoing Gossip connections +- Default value: true + +### `consul_disable_keyring_file` + +- If set, the keyring will not be persisted to a file. Any installed keys will be lost on shutdown, and only the given -encrypt key will be available on startup. +- Default value: false + +### `consul_raw_key` + +- Set the encryption key; should be the same across a cluster. If not present the key will be generated & retrieved from the bootstrapped server. +- Default value: '' + +### `consul_tls_verify_incoming` + +- Verify incoming connections + - Override with `CONSUL_TLS_VERIFY_INCOMING` environment variable +- Default value: false + +### `consul_tls_verify_outgoing` + +- Verify outgoing connections + - Override with `CONSUL_TLS_VERIFY_OUTGOING` environment variable +- Default value: true + +### `consul_tls_verify_incoming_rpc` + +- Verify incoming connections on RPC endpoints (client certificates) + - Override with `CONSUL_TLS_VERIFY_INCOMING_RPC` environment variable +- Default value: false + +### `consul_tls_verify_incoming_https` + +- Verify incoming connections on HTTPS endpoints (client certificates) + - Override with `CONSUL_TLS_VERIFY_INCOMING_HTTPS` environment variable +- Default value: false + +### `consul_tls_verify_server_hostname` + +- Verify server hostname + - Override with `CONSUL_TLS_VERIFY_SERVER_HOSTNAME` environment variable +- Default value: false + +### `consul_tls_min_version` + +- [Minimum acceptable TLS version](https://www.consul.io/docs/agent/options.html#tls_min_version) + - Can be overridden with `CONSUL_TLS_MIN_VERSION` environment variable +- Default value: tls12 + +### `consul_tls_cipher_suites` + +- [Comma-separated list of supported ciphersuites](https://www.consul.io/docs/agent/options.html#tls_cipher_suites) +- Default value: "" + +### `consul_tls_prefer_server_cipher_suites` + +- [Prefer server's cipher suite over client cipher suite](https://www.consul.io/docs/agent/options.html#tls_prefer_server_cipher_suites) + - Can be overridden with `CONSUL_TLS_PREFER_SERVER_CIPHER_SUITES` environment variable +- Default value: false + +### `auto_encrypt` + +- [Auto encrypt](https://www.consul.io/docs/agent/options#auto_encrypt) +- Default value: + +```yaml +auto_encrypt: + enabled: false +``` + +- Example: + +```yaml +auto_encrypt: + enabled: true + dns_san: ["consul.com"] + ip_san: ["127.0.0.1"] +``` + +### `consul_force_install` + +- If true, then always install consul. Otherwise, consul will only be installed either if + not present on the host, or if the installed version differs from `consul_version`. +- The role does not handle the orchestration of a rolling update of servers followed by client nodes +- Default value: false + +### `consul_install_remotely` + +- Whether to download the files for installation directly on the remote hosts +- This is the only option on Windows as WinRM is somewhat limited in this scope +- Default value: false + +### `consul_install_from_repo` + +- Boolean, whether to install consul from repository as opposed to installing the binary directly. +- Supported distros: Amazon Linux, CentOS, Debian, Fedora, Ubuntu, Red Hat, Rocky. +- Default value: false + +### `consul_ui` + +- Enable the consul ui? +- Default value: true + +### `consul_ui_legacy` + +- Enable legacy consul ui mode +- Default value: false + +### `consul_disable_update_check` + +- Disable the consul update check? +- Default value: false + +### `consul_enable_script_checks` + +- Enable script based checks? +- Default value: false +- This is discouraged in favor of `consul_enable_local_script_checks`. + +### `consul_enable_local_script_checks` + +- Enable locally defined script checks? +- Default value: false + +### `consul_raft_protocol` + +- Raft protocol to use. +- Default value: + - Consul versions <= 0.7.0: 1 + - Consul versions > 0.7.0: 3 + +### `consul_node_role` + +- The Consul role of the node, one of: _bootstrap_, _server_, or _client_ +- Default value: client + +One server should be designated as the bootstrap server, and the other +servers will connect to this server. You can also specify _client_ as the +role, and Consul will be configured as a client agent instead of a server. + +There are two methods to setup a cluster, the first one is to explicitly choose the bootstrap server, the other one is to let the servers elect a leader among +themselves. + +Here is an example of how the hosts inventory could be defined for a simple +cluster of 3 servers, the first one being the designated bootstrap / leader: + +```yaml +[consul_instances] +consul1.consul consul_node_role=bootstrap +consul2.consul consul_node_role=server +consul3.consul consul_node_role=server +consul4.local consul_node_role=client +``` + +Or you can use the simpler method of letting them do their election process: + +```yaml +[consul_instances] +consul1.consul consul_node_role=server consul_bootstrap_expect=true +consul2.consul consul_node_role=server consul_bootstrap_expect=true +consul3.consul consul_node_role=server consul_bootstrap_expect=true +consul4.local consul_node_role=client +``` + +> Note that this second form is the preferred one, because it is simpler. + +### `consul_autopilot_enable` + +Autopilot is a set of new features added in Consul 0.8 to allow for automatic operator-friendly management of Consul servers. It includes cleanup of dead servers, monitoring the state of the Raft cluster, and stable server introduction. + +https://www.consul.io/docs/guides/autopilot.html + +- Enable Autopilot config (will be written to bootsrapper node) + - Override with `CONSUL_AUTOPILOT_ENABLE` environment variable +- Default value: false + +#### `consul_autopilot_cleanup_dead_servers` + +Dead servers will periodically be cleaned up and removed from the Raft peer set, to prevent them from interfering with the quorum size and leader elections. This cleanup will also happen whenever a new server is successfully added to the cluster. + +- Enable Autopilot config (will be written to bootsrapper node) + - Override with `CONSUL_AUTOPILOT_CLEANUP_DEAD_SERVERS` environment variable +- Default value: false + +#### `consul_autopilot_last_contact_threshold` + +Used in the serf health check to determine node health. + +- Sets the threshold for time since last contact + - Override with `CONSUL_AUTOPILOT_LAST_CONTACT_THRESHOLD` environment variable +- Default value: 200ms + +#### `consul_autopilot_max_trailing_logs` + +- Used in the serf health check to set a max-number of log entries nodes can trail the leader + - Override with `CONSUL_AUTOPILOT_MAX_TRAILING_LOGS` environment variable +- Default value: 250 + +#### `consul_autopilot_server_stabilization_time` + +- Time to allow a new node to stabilize + - Override with `CONSUL_AUTOPILOT_SERVER_STABILIZATION_TIME` environment variable +- Default value: 10s + +#### `consul_autopilot_redundancy_zone_tag` + +_Consul Enterprise Only (requires that CONSUL_ENTERPRISE is set to true)_ + +- Override with `CONSUL_AUTOPILOT_REDUNDANCY_ZONE_TAG` environment variable +- Default value: az + +#### `consul_autopilot_disable_upgrade_migration` + +_Consul Enterprise Only (requires that CONSUL_ENTERPRISE is set to true)_ + +- Override with `CONSUL_AUTOPILOT_DISABLE_UPGRADE_MIGRATION` environment variable +- Default value: _false_ + +#### `consul_autopilot_upgrade_version_tag` + +_Consul Enterprise Only (requires that CONSUL_ENTERPRISE is set to true)_ + +- Override with `CONSUL_AUTOPILOT_UPGRADE_VERSION_TAG` environment variable +- Default value: '' + +### `consul_debug` + +- Enables the generation of additional config files in the Consul config + directory for debug purpose +- Default value: false + +### `consul_config_template_path` + +- If the default config template does not suit your needs, you can replace it with your own. +- Default value: `templates/config.json.j2`. + +#### Custom Configuration Section + +As Consul loads the configuration from files and directories in lexical order, typically merging on top of previously parsed configuration files, you may set custom configurations via `consul_config_custom`, which will be expanded into a file named `config_z_custom.json` within your `consul_config_path` which will be loaded after all other configuration by default. + +An example usage for enabling `telemetry`: + +```yaml +vars: + consul_config_custom: + telemetry: + dogstatsd_addr: "localhost:8125" + dogstatsd_tags: + - "security" + - "compliance" + disable_hostname: true +``` + +## Consul Snapshot Agent + +_Consul snapshot agent takes backup snaps on a set interval and stores them. Must have enterprise_ + +### `consul_snapshot` + +- Bool, true will setup and start snapshot agent (enterprise only) +- Default value: false + +### `consul_snapshot_storage` + +- Location snapshots will be stored. NOTE: path must end in snaps +- Default value: `{{ consul_config_path }}/snaps` + +### `consul_snapshot_interval` + +- Default value: 1h + +### `consul_snapshot_retain` + +## OS and Distribution Variables + +The `consul` binary works on most Linux platforms and is not distribution +specific. However, some distributions require installation of specific OS +packages with different package names. + +### `consul_centos_pkg` + +- Consul package filename +- Default value: `{{ consul_version }}_linux_amd64.zip` + +### `consul_centos_url` + +- Consul package download URL +- Default value: `{{ consul_zip_url }}` + +### `consul_centos_sha256` + +- Consul download SHA256 summary +- Default value: SHA256 summary + +### `consul_centos_os_packages` + +- List of OS packages to install +- Default value: list + +### `consul_debian_pkg` + +- Consul package filename +- Default value: `{{ consul_version }}_linux_amd64.zip` + +### `consul_debian_url` + +- Consul package download URL +- Default value: `{{ consul_zip_url }}` + +### `consul_debian_sha256` + +- Consul download SHA256 summary +- Default value: SHA256 SUM + +### `consul_debian_os_packages` + +- List of OS packages to install +- Default value: list + +### `consul_redhat_pkg` + +- Consul package filename +- Default value: `{{ consul_version }}_linux_amd64.zip` + +### `consul_redhat_url` + +- Consul package download URL +- Default value: `{{ consul_zip_url }}` + +### `consul_redhat_sha256` + +- Consul download SHA256 summary +- Default value: SHA256 summary + +### `consul_redhat_os_packages` + +- List of OS packages to install +- Default value: list + +### consul_systemd_restart_sec + +- Integer value for systemd unit `RestartSec` option +- Default value: 42 + +### consul_systemd_limit_nofile + +- Integer value for systemd unit `LimitNOFILE` option +- Default value: 65536 + +### consul_systemd_restart + +- String value for systemd unit `Restart` option +- Default value: `on-failure` + +### `consul_ubuntu_pkg` + +- Consul package filename +- Default value: `{{ consul_version }}_linux_amd64.zip` + +### `consul_ubuntu_url` + +- Consul package download URL +- Default value: `{{ consul_zip_url }}` + +### `consul_ubuntu_sha256` + +- Consul download SHA256 summary +- Default value: SHA256 summary + +### `consul_ubuntu_os_packages` + +- List of OS packages to install +- Default value: list + +### `consul_windows_pkg` + +- Consul package filename +- Default value: `{{ consul_version }}_windows_amd64.zip` + +### `consul_windows_url` + +- Consul package download URL +- Default value: `{{ consul_zip_url }}` + +### `consul_windows_sha256` + +- Consul download SHA256 summary +- Default value: SHA256 summary + +### `consul_windows_os_packages` + +- List of OS packages to install +- Default value: list + +### `consul_performance` + +- List of Consul performance tuning items +- Default value: list + +#### `raft_multiplier` + +- [Raft multiplier](https://www.consul.io/docs/agent/options.html#raft_multiplier) scales key Raft timing parameters +- Default value: 1 + +#### `leave_drain_time` + +- [Node leave drain time](https://www.consul.io/docs/agent/options.html#leave_drain_time) is the dwell time for a server to honor requests while gracefully leaving + +- Default value: 5s + +#### `rpc_hold_timeout` + +- [RPC hold timeout](https://www.consul.io/docs/agent/options.html#rpc_hold_timeout) is the duration that a client or server will retry internal RPC requests during leader elections +- Default value: 7s + +#### `leave_on_terminate` + +- [leave_on_terminate](https://www.consul.io/docs/agent/options.html#leave_on_terminate) If enabled, when the agent receives a TERM signal, it will send a Leave message to the rest of the cluster and gracefully leave. The default behavior for this feature varies based on whether or not the agent is running as a client or a server. On agents in client-mode, this defaults to true and for agents in server-mode, this defaults to false. + +### `consul_limit` + +- Consul node limits (key-value) +- Supported in Consul version 0.9.3 or later +- Default value: _{}_ +- Example: + +```yaml +consul_limits: + http_max_conns_per_client: 250 + rpc_max_conns_per_client: 150 +``` + +## Dependencies + +Ansible requires GNU tar and this role performs some local use of the unarchive module for efficiency, so ensure that your system has `gtar` and `unzip` installed and in the PATH. If you don't this role will install `unzip` on the remote machines to unarchive the ZIP files. + +If you're on system with a different (i.e. BSD) `tar`, like macOS and you see odd errors during unarchive tasks, you could be missing `gtar`. + +Installing Ansible on Windows requires the PowerShell Community Extensions. These already installed on Windows Server 2012 R2 and onward. If you're attempting this role on Windows Server 2008 or earlier, you'll want to install the extensions [here](https://pscx.codeplex.com/). + +## Example Playbook + +Basic installation is possible using the included `site.yml` playbook: + +``` +ansible-playbook -i hosts site.yml +``` + +You can also pass variables in using the `--extra-vars` option to the +`ansible-playbook` command: + +``` +ansible-playbook -i hosts site.yml --extra-vars "consul_datacenter=maui" +``` + +Be aware that for clustering, the included `site.yml` does the following: + +1. Executes consul role (installs Consul and bootstraps cluster) +2. Reconfigures bootstrap node to run without bootstrap-expect setting +3. Restarts bootstrap node + +### ACL Support + +Basic support for ACLs is included in the role. You can set the environment variables `CONSUL_ACL_ENABLE` to true, and also set the `CONSUL_ACL_DATACENTER` environment variable to its correct value for your environment prior to executing your playbook; for example: + +``` +CONSUL_ACL_ENABLE=true CONSUL_ACL_DATACENTER=maui \ +CONSUL_ACL_MASTER_TOKEN_DISPLAY=true ansible-playbook -i uat_hosts aloha.yml +``` + +If you want the automatically generated ACL Master Token value emitted to standard out during the play, set the environment variable `CONSUL_ACL_MASTER_TOKEN_DISPLAY` to true as in the above example. + +If you want to use existing tokens, set the environment variables `CONSUL_ACL_MASTER_TOKEN` and `CONSUL_ACL_REPLICATION_TOKEN` as well, for example: + +``` +CONSUL_ACL_ENABLE=true CONSUL_ACL_DATACENTER=stjohn \ +CONSUL_ACL_MASTER_TOKEN=0815C55B-3AD2-4C1B-BE9B-715CAAE3A4B2 \ +CONSUL_ACL_REPLICATION_TOKEN=C609E56E-DD0B-4B99-A0AD-B079252354A0 \ +CONSUL_ACL_MASTER_TOKEN_DISPLAY=true ansible-playbook -i uat_hosts sail.yml +``` + +There are a number of Ansible ACL variables you can override to further refine your initial ACL setup. They are not all currently picked up from environment variables, but do have some sensible defaults. + +Check `defaults/main.yml` to see how some of he defaults (i.e. tokens) are automatically generated. + +### Dnsmasq DNS Forwarding Support + +The role now includes support for [DNS forwarding](https://www.consul.io/docs/guides/forwarding.html) with [Dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html). + +Enable like this: + +``` +ansible-playbook -i hosts site.yml --extra-vars "consul_dnsmasq_enable=true" +``` + +Then, you can query any of the agents via DNS directly via port 53, +for example: + +``` +dig @consul1.consul consul3.node.consul + +; <<>> DiG 9.8.3-P1 <<>> @consul1.consul consul3.node.consul +; (1 server found) +;; global options: +cmd +;; Got answer: +;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 29196 +;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0 + +;; QUESTION SECTION: +;consul3.node.consul. IN A + +;; ANSWER SECTION: +consul3.node.consul. 0 IN A 10.1.42.230 + +;; Query time: 42 msec +;; SERVER: 10.1.42.210#53(10.1.42.210) +;; WHEN: Sun Aug 7 18:06:32 2016 +;; +``` + +### `consul_delegate_datacenter_dns` + +- Whether to delegate Consul datacenter DNS domain to Consul +- Default value: false + +### `consul_dnsmasq_enable` + +- Whether to install and configure DNS API forwarding on port 53 using DNSMasq + - Override with `CONSUL_DNSMASQ_ENABLE` environment variable +- Default value: false + +### `consul_dnsmasq_bind_interfaces` + +- Setting this option to _true_ prevents DNSmasq from binding by default 0.0.0.0, but instead instructs it to bind to the specific network interfaces that correspond to the `consul_dnsmasq_listen_addresses` option +- Default value: false + +### `consul_dnsmasq_consul_address` + +- Address used by DNSmasq to query consul +- Default value: `consul_address.dns` +- Defaults to 127.0.0.1 if consul's DNS is bound to all interfaces (eg `0.0.0.0`) + +### `consul_dnsmasq_cache` + +- dnsmasq cache-size +- If smaller then 0, the default dnsmasq setting will be used. +- Default value: _-1_ + +### `consul_dnsmasq_servers` + +- Upstream DNS servers used by dnsmasq +- Default value: _8.8.8.8_ and _8.8.4.4_ + +### `consul_dnsmasq_revservers` + +- Reverse lookup subnets +- Default value: _[]_ + +### `consul_dnsmasq_no_poll` + +- Do not poll /etc/resolv.conf +- Default value: false + +### `consul_dnsmasq_no_resolv` + +- Ignore /etc/resolv.conf file +- Default value: false + +### `consul_dnsmasq_local_service` + +- Only allow requests from local subnets +- Default value: false + +### `consul_dnsmasq_listen_addresses` + +- Custom list of addresses to listen on. +- Default value: _[]_ + +### `consul_connect_enabled` + +- Enable Consul Connect feature +- Default value: false + +### `consul_cleanup_ignore_files` + +- List of files to ignore during cleanup steps +- Default value: _[{{ consul_configd_path }}/consul.env]_ + +### iptables DNS Forwarding Support + +This role can also use iptables instead of Dnsmasq for forwarding DNS queries to Consul. You can enable it like this: + +``` +ansible-playbook -i hosts site.yml --extra-vars "consul_iptables_enable=true" +``` + +> Note that iptables forwarding and DNSmasq forwarding cannot be used +> simultaneously and the execution of the role will stop with error if such +> a configuration is specified. + +### TLS Support + +You can enable TLS encryption by dropping a CA certificate, server certificate, and server key into the role's `files` directory. + +By default these are named: + +- `ca.crt` (can be overridden by {{ consul_tls_ca_crt }}) +- `server.crt` (can be overridden by {{ consul_tls_server_crt }}) +- `server.key` (can be overridden by {{ consul_tls_server_key }}) + +Then either set the environment variable `CONSUL_TLS_ENABLE=true` or use the Ansible variable `consul_tls_enable=true` at role runtime. + +### Service management Support + +You can create a configuration file for [consul services](https://www.consul.io/docs/agent/services.html). +Add a list of service in the `consul_services`. + +| name | Required | Type | Default | Comment | +| --------------- | -------- | ---- | ------- | ---------------------------------- | +| consul_services | False | List | `[]` | List of service object (see below) | + +Services object: + +| name | Required | Type | Default | Comment | +| ------------------- | -------- | ------ | ------- | ---------------------------------------------------------------------------------------------------------- | +| name | True | string | | Name of the service | +| id | False | string | | Id of the service | +| tags | False | list | | List of string tags | +| address | False | string | | service-specific IP address | +| meta | False | dict | | Dict of 64 key/values with string semantics | +| port | False | int | | Port of the service | +| enable_tag_override | False | bool | | enable/disable the anti-entropy feature for the service | +| kind | False | string | | identify the service as a Connect proxy instance | +| proxy | False | dict | | [proxy configuration](https://www.consul.io/docs/connect/proxies.html#complete-configuration-example) | +| checks | False | list | | List of [checks configuration](https://www.consul.io/docs/agent/checks.html) | +| connect | False | dict | | [Connect object configuration](https://www.consul.io/docs/connect/index.html) | +| weights | False | dict | | [Weight of a service in DNS SRV responses](https://www.consul.io/docs/agent/services.html#dns-srv-weights) | +| token | False | string | | ACL token to use to register this service | + +Configuration example: + +```yaml +consul_services: + - name: "openshift" + tags: ["production"] + - name: "redis" + id: "redis" + tags: ["primary"] + address: "" + meta: + meta: "for my service" + proxy: + destination_service_name: "redis" + destination_service_id: "redis1" + local_service_address: "127.0.0.1" + local_service_port: 9090 + config: {} + upstreams: [] + checks: + - args: ["/home/consul/check.sh"] + interval: "10s" +``` + +Then you can check that the service is well added to the catalog + +``` +> consul catalog services +consul +openshift +redis +``` + +> **Note:** to delete a service that has been added from this role, remove it from the `consul_services` list and apply the role again. + +### Vagrant and VirtualBox + +See [examples/README_VAGRANT.md](https://github.com/ansible-community/ansible-consul/blob/master/examples/README_VAGRANT.md) for details on quick Vagrant deployments under VirtualBox for development, evaluation, testing, etc. + +## License + +BSD + +## Author Information + +[Brian Shumate](http://brianshumate.com) + +## Contributors + +Special thanks to the folks listed in [CONTRIBUTORS.md](https://github.com/ansible-community/ansible-consul/blob/master/CONTRIBUTORS.md) for their contributions to this project. + +Contributions are welcome, provided that you can agree to the terms outlined in [CONTRIBUTING.md](https://github.com/ansible-community/ansible-consul/blob/master/CONTRIBUTING.md). diff --git a/automation/roles/consul/defaults/main.yml b/automation/roles/consul/defaults/main.yml new file mode 100644 index 000000000..0448e010a --- /dev/null +++ b/automation/roles/consul/defaults/main.yml @@ -0,0 +1,270 @@ +--- +# yamllint disable rule:braces +# File: main.yml - Default variables for Consul + +## Core +consul_debug: false +is_virtualenv: "{{ lookup('env', 'VIRTUAL_ENV') | default('', true) }}" +consul_install_dependencies: true + +### Package +consul_version: "{{ lookup('env', 'CONSUL_VERSION') | default('1.8.7', true) }}" +consul_architecture_map: + # this first entry seems redundant + # (but it's required for reasons) + amd64: amd64 + x86_64: amd64 + # todo: arm32 / armelv5 + armv6l: armhfv6 + armv7l: armhfv6 + aarch64: arm64 + # Used by Apple Silicon Macs + arm64: arm64 + 32-bit: "386" + 64-bit: amd64 +consul_architecture: "{{ consul_architecture_map[ansible_architecture] }}" +consul_os: "\ + {% if ansible_os_family == 'Windows' %}\ + {{ 'windows' }}\ + {% else %}\ + {{ ansible_system | lower }}\ + {% endif %}" +consul_pkg: "consul{% if consul_enterprise %}-enterprise{% else %}{% endif %}_{{ consul_version }}_{{ consul_os }}_{{ consul_architecture }}.zip" +consul_zip_url: "https://releases.hashicorp.com/consul/%7B%7B%20consul_version%20%7D%7D/consul_%7B%7B%20consul_version%20%7D%7D_%7B%7B%20consul_os%20%7D%7D_%7B%7B%20consul_architecture%20%7D%7D.zip" +consul_checksum_file_url: "https://releases.hashicorp.com/consul/%7B%7B%20consul_version%20%7D%7D/consul_%7B%7B%20consul_version%20%7D%7D_SHA256SUMS" + +### Install Method +consul_force_install: false +consul_install_remotely: false +consul_install_from_repo: false + +### Paths +consul_bin_path: "/usr/local/bin" +consul_config_path: "/etc/consul" +consul_config_template_path: "templates/config.json.j2" +consul_configd_path: "/etc/consul.d" +consul_bootstrap_state: "{{ consul_config_path }}/.consul_bootstrapped" +consul_data_path: "/opt/consul" +consul_log_path: "{{ lookup('env', 'CONSUL_LOG_PATH') | default('/var/log/consul', true) }}" +consul_log_file: "{{ lookup('env', 'CONSUL_LOG_FILE') | default('consul.log', true) }}" +consul_run_path: "/run/consul" +consul_binary: "{{ consul_bin_path }}/consul" + +### System user and group +consul_manage_user: true +consul_user: "consul" +consul_manage_group: true +consul_group: "consul" +consul_systemd_restart: "on-failure" +consul_systemd_restart_sec: 42 +consul_systemd_limit_nofile: 65536 +consul_systemd_unit_path: "/lib/systemd/system" + +### Log user, group, facility +syslog_user: "{{ lookup('env', 'SYSLOG_USER') | default('root', true) }}" +syslog_group: "{{ lookup('env', 'SYSLOG_GROUP') | default('adm', true) }}" +consul_log_level: "{{ lookup('env', 'CONSUL_LOG_LEVEL') | default('INFO', true) }}" +consul_log_rotate_bytes: "{{ lookup('env', 'CONSUL_LOG_ROTATE_BYTES') | default(0, true) }}" +consul_log_rotate_duration: "{{ lookup('env', 'CONSUL_LOG_ROTATE_DURATION') | default('24h', true) }}" +consul_log_rotate_max_files: "{{ lookup('env', 'CONSUL_LOG_ROTATE_MAX_FILES') | default(0, true) }}" +consul_syslog_enable: "{{ lookup('env', 'CONSUL_SYSLOG_ENABLE') | default(false, true) }}" +consul_syslog_facility: "{{ lookup('env', 'CONSUL_SYSLOG_FACILITY') | default('local0', true) }}" +consul_configure_syslogd: "{{ lookup('env', 'CONSUL_CONFIGURE_SYSLOGD') | default(false, true) }}" + +### Consul settings +consul_datacenter: "{{ lookup('env', 'CONSUL_DATACENTER') | default('dc1', true) }}" +consul_domain: "{{ lookup('env', 'CONSUL_DOMAIN') | default('consul', true) }}" +consul_alt_domain: "{{ lookup('env', 'CONSUL_ALT_DOMAIN') | default('', true) }}" +consul_node_meta: {} +consul_iface: "\ + {% if ansible_os_family == 'Windows' %}\ + {{ lookup('env', 'CONSUL_IFACE') | default(ansible_interfaces[0].interface_name, true) }}\ + {% else %}\ + {{ lookup('env', 'CONSUL_IFACE') | default(ansible_default_ipv4.interface, true) }}\ + {% endif %}" +consul_node_role: "{{ lookup('env', 'CONSUL_NODE_ROLE') | default('client', true) }}" +consul_recursors: "{{ lookup('env', 'CONSUL_RECURSORS') | default('[]', true) }}" +consul_bootstrap_expect: "{{ lookup('env', 'CONSUL_BOOTSTRAP_EXPECT') | default(false, true) }}" +consul_bootstrap_expect_value: "{{ _consul_lan_servercount | int }}" +consul_ui: "{{ lookup('env', 'CONSUL_UI') | default(true, true) }}" +consul_ui_legacy: "{{ lookup('env', 'CONSUL_UI_LEGACY') | default(false, false) }}" +consul_disable_update_check: false +consul_enable_script_checks: false +consul_enable_local_script_checks: false +consul_raft_protocol: "\ + {% if consul_version is version_compare('0.7.0', '<=') %}\ + 1\ + {% else %}\ + 3\ + {% endif %}" +consul_retry_join_skip_hosts: false +consul_retry_interval: "30s" +consul_retry_interval_wan: "30s" +consul_retry_max: 0 +consul_retry_max_wan: 0 +consul_env_vars: + - "CONSUL_UI_BETA=false" + +### Autopilot +consul_autopilot_enable: "{{ lookup('env', 'CONSUL_AUTOPILOT_ENABLE') | default(false, true) }}" +consul_autopilot_cleanup_dead_servers: "{{ lookup('env', 'CONSUL_AUTOPILOT_CLEANUP_DEAD_SERVERS') | default(false, true) }}" +consul_autopilot_last_contact_threshold: "{{ lookup('env', 'CONSUL_AUTOPILOT_LAST_CONTACT_THRESHOLD') | default('200ms', true) }}" +consul_autopilot_max_trailing_logs: "{{ lookup('env', 'CONSUL_AUTOPILOT_MAX_TRAILING_LOGS') | default(250, true) }}" +consul_autopilot_server_stabilization_time: "{{ lookup('env', 'CONSUL_AUTOPILOT_SERVER_STABILIZATION_TIME') | default('10s', true) }}" +consul_autopilot_redundancy_zone_tag: "{{ lookup('env', 'CONSUL_AUTOPILOT_REDUNDANCY_ZONE_TAG') | default('az', true) }}" +consul_autopilot_disable_upgrade_migration: "{{ lookup('env', 'CONSUL_AUTOPILOT_DISABLE_UPGRADE_MIGRATION') | default(false, true) }}" +consul_autopilot_upgrade_version_tag: "{{ lookup('env', 'CONSUL_AUTOPILOT_UPGRADE_VERSION_TAG') | default('', true) }}" + +### Cloud auto discovery settings +consul_cloud_autodiscovery: false +consul_cloud_autodiscovery_provider: "" +consul_cloud_autodiscovery_params: "" +consul_cloud_autodiscovery_string: "provider={{ consul_cloud_autodiscovery_provider }} {{ consul_cloud_autodiscovery_params }}" + +### Addresses +consul_bind_address: "\ + {% if ansible_system == 'FreeBSD' or ansible_system == 'Darwin' %}\ + {{ lookup('env', 'CONSUL_BIND_ADDRESS') | default(hostvars[inventory_hostname]['ansible_' + consul_iface]['ipv4'][0]['address'], true) }}\ + {% elif ansible_os_family == 'Windows' %}\ + {{ lookup('env', 'CONSUL_BIND_ADDRESS') | default(hostvars[inventory_hostname]['ansible_ip_addresses'][0], true) }}\ + {% else %}\ + {{ lookup('env', 'CONSUL_BIND_ADDRESS') | default(hostvars[inventory_hostname]['ansible_' + consul_iface | replace('-', '_')]['ipv4']['address'], true) }}\ + {% endif %}" +consul_advertise_address: "{{ consul_bind_address }}" +consul_advertise_address_wan: "{{ consul_bind_address }}" +consul_translate_wan_address: false +consul_advertise_addresses: + serf_lan: "{{ consul_advertise_addresses_serf_lan | default(consul_advertise_address + ':' + consul_ports.serf_lan) }}" + serf_wan: "{{ consul_advertise_addresses_serf_wan | default(consul_advertise_address_wan + ':' + consul_ports.serf_wan) }}" + rpc: "{{ consul_advertise_addresses_rpc | default(consul_bind_address + ':' + consul_ports.server) }}" +consul_client_address: "127.0.0.1" +consul_addresses: + dns: "{{ consul_addresses_dns | default(consul_client_address, true) }}" + http: "{{ consul_addresses_http | default(consul_client_address, true) }}" + https: "{{ consul_addresses_https | default(consul_client_address, true) }}" + rpc: "{{ consul_addresses_rpc | default(consul_client_address, true) }}" + grpc: "{{ consul_addresses_grpc | default(consul_client_address, true) }}" + grpc_tls: "{{ consul_addresses_grpc_tls | default(consul_client_address, true) }}" + +### Ports +consul_ports: + dns: "{{ consul_ports_dns | default('8600', true) }}" + http: "{{ consul_ports_http | default('8500' if not consul_tls_enable | default(false) | bool else '-1') }}" + https: "{{ consul_ports_https | default('8500' if consul_tls_enable | default(false) | bool else '-1') }}" + rpc: "{{ consul_ports_rpc | default('8400', true) }}" + serf_lan: "{{ consul_ports_serf_lan | default('8301', true) }}" + serf_wan: "{{ consul_ports_serf_wan | default('8302', true) }}" + server: "{{ consul_ports_server | default('8300', true) }}" + grpc: "{{ consul_ports_grpc | default('-1', true) }}" + grpc_tls: "{{ consul_ports_grpc_tls | default('-1', true) }}" + +### Servers +consul_group_name: "{{ lookup('env', 'CONSUL_GROUP_NAME') | default('consul_instances', true) }}" +consul_join: [] +consul_join_wan: [] +consul_servers: "\ + {% set _consul_servers = [] %}\ + {% for host in groups[consul_group_name] %}\ + {% set _consul_node_role = hostvars[host]['consul_node_role'] | default('client', true) %}\ + {% if (_consul_node_role == 'server' or _consul_node_role == 'bootstrap') %}\ + {% if _consul_servers.append(host) %}{% endif %}\ + {% endif %}\ + {% endfor %}\ + {{ _consul_servers }}" +consul_gather_server_facts: false + +## ACL +consul_acl_policy: "{{ lookup('env', 'CONSUL_ACL_POLICY') | default(false, true) }}" + +### Shared ACL config ### +consul_acl_enable: "{{ lookup('env', 'CONSUL_ACL_ENABLE') | default(false, true) }}" +consul_acl_ttl: "{{ lookup('env', 'CONSUL_ACL_TTL') | default('30s', true) }}" +consul_acl_token_persistence: "{{ lookup('env', 'CONSUL_ACL_TOKEN_PERSISTENCE') | default(true, true) }}" +consul_acl_datacenter: "{{ lookup('env', 'CONSUL_ACL_DATACENTER') | default(consul_datacenter, true) }}" +consul_acl_down_policy: "{{ lookup('env', 'CONSUL_ACL_DOWN_POLICY') | default('extend-cache', true) }}" +consul_acl_token: "{{ lookup('env', 'CONSUL_ACL_TOKEN') | default('', true) }}" +consul_acl_agent_token: "{{ lookup('env', 'CONSUL_ACL_AGENT_TOKEN') | default('', true) }}" +consul_acl_agent_master_token: "{{ lookup('env', 'CONSUL_ACL_AGENT_MASTER_TOKEN') | default('', true) }}" + +### Server ACL settings ### +consul_acl_default_policy: "{{ lookup('env', 'CONSUL_ACL_DEFAULT_POLICY') | default('allow', true) }}" +consul_acl_master_token: "{{ lookup('env', 'CONSUL_ACL_MASTER_TOKEN') | default('', true) }}" +consul_acl_master_token_display: "{{ lookup('env', 'CONSUL_ACL_MASTER_TOKEN_DISPLAY') | default(false, true) }}" +consul_acl_replication_enable: "{{ lookup('env', 'CONSUL_ACL_REPLICATION_ENABLE') | default('', true) }}" +consul_acl_replication_token: "{{ lookup('env', 'CONSUL_ACL_REPLICATION_TOKEN') | default('', true) }}" + +## gossip encryption +consul_encrypt_enable: "{{ lookup('env', 'CONSUL_ENCRYPT_ENABLE') | default(true, true) }}" +consul_encrypt_verify_incoming: true +consul_encrypt_verify_outgoing: true +consul_disable_keyring_file: "{{ lookup('env', 'CONSUL_DISABLE_KEYRING_FILE') | default(false, true) }}" + +## TLS +consul_tls_enable: "{{ lookup('env', 'CONSUL_TLS_ENABLE') | default(false, true) }}" +consul_tls_dir: "{{ lookup('env', 'CONSUL_TLS_DIR') | default('/etc/consul/ssl', true) }}" +consul_tls_ca_crt: "{{ lookup('env', 'CONSUL_TLS_CA_CRT') | default('ca.crt', true) }}" +consul_tls_server_crt: "{{ lookup('env', 'CONSUL_SERVER_CRT') | default('server.crt', true) }}" +consul_tls_server_key: "{{ lookup('env', 'CONSUL_SERVER_KEY') | default('server.key', true) }}" +consul_tls_copy_keys: true +consul_tls_verify_incoming: "{{ lookup('env', 'CONSUL_TLS_VERIFY_INCOMING') | default(false, true) }}" +consul_tls_verify_outgoing: "{{ lookup('env', 'CONSUL_TLS_VERIFY_OUTGOING') | default(true, true) }}" +consul_tls_verify_incoming_rpc: "{{ lookup('env', 'CONSUL_TLS_VERIFY_INCOMING_RPC') | default(false, true) }}" +consul_tls_verify_incoming_https: "{{ lookup('env', 'CONSUL_TLS_VERIFY_INCOMING_HTTPS') | default(false, true) }}" +consul_tls_verify_server_hostname: "{{ lookup('env', 'CONSUL_TLS_VERIFY_SERVER_HOSTNAME') | default(false, true) }}" +consul_tls_files_remote_src: false +consul_tls_min_version: "{{ lookup('env', 'CONSUL_TLS_MIN_VERSION') | default('TLSv1_2', true) }}" +consul_tls_cipher_suites: "" +consul_tls_prefer_server_cipher_suites: "{{ lookup('env', 'CONSUL_TLS_PREFER_SERVER_CIPHER_SUITES') | default('false', true) }}" +auto_encrypt: + enabled: false + +## DNS +consul_delegate_datacenter_dns: "{{ lookup('env', 'CONSUL_DELEGATE_DATACENTER_DNS') | default(false, true) }}" +consul_dnsmasq_enable: "{{ lookup('env', 'CONSUL_DNSMASQ_ENABLE') | default(false, true) }}" +consul_dnsmasq_bind_interfaces: false +consul_dnsmasq_consul_address: "\ + {# Use localhost if DNS is listening on all interfaces #}\ + {% if consul_addresses.dns == '0.0.0.0' %}\ + 127.0.0.1\ + {% else %}\ + {{ consul_addresses.dns }}\ + {% endif %}" +consul_dnsmasq_cache: -1 +consul_dnsmasq_servers: + - 8.8.8.8 + - 8.8.4.4 +consul_dnsmasq_revservers: [] +consul_dnsmasq_no_poll: false +consul_dnsmasq_no_resolv: false +consul_dnsmasq_local_service: false +consul_dnsmasq_listen_addresses: [] +consul_iptables_enable: "{{ lookup('env', 'CONSUL_IPTABLES_ENABLE') | default(false, true) }}" + +# Consul Enterprise +consul_enterprise: "{{ lookup('env', 'CONSUL_ENTERPRISE') | default(false, true) }}" + +# Performance +consul_performance: + raft_multiplier: 1 + leave_drain_time: 5s + rpc_hold_timeout: 7s + +# Snapshot +consul_snapshot: false +consul_snapshot_storage: "{{ consul_config_path }}/snaps" +consul_snapshot_interval: 1h +consul_snapshot_retain: 30 +consul_snapshot_stale: false + +# services +consul_services: [] + +# enable Consul Connect +consul_connect_enabled: false + +# system limits +consul_limits: {} + +# files clean up +consul_cleanup_ignore_files: + - "{{ consul_configd_path }}/consul.env" diff --git a/automation/roles/consul/files/README.md b/automation/roles/consul/files/README.md new file mode 100644 index 000000000..2943511c3 --- /dev/null +++ b/automation/roles/consul/files/README.md @@ -0,0 +1,4 @@ +# files + +This directory is used for holding temporary files and should be present +in the role even when empty. diff --git a/automation/roles/consul/handlers/main.yml b/automation/roles/consul/handlers/main.yml new file mode 100644 index 000000000..f69cfcb0b --- /dev/null +++ b/automation/roles/consul/handlers/main.yml @@ -0,0 +1,35 @@ +--- +# File: main.yml - Handlers for Consul + +- name: restart consul + ansible.builtin.import_tasks: restart_consul.yml + +- name: start consul + ansible.builtin.import_tasks: start_consul.yml + +- name: reload consul configuration + ansible.builtin.import_tasks: reload_consul_conf.yml + +- name: restart dnsmasq + ansible.builtin.service: + name: dnsmasq + enabled: true + state: restarted + become: true + +- name: restart rsyslog + ansible.builtin.import_tasks: restart_rsyslog.yml + +- name: restart syslog-ng + ansible.builtin.import_tasks: restart_syslogng.yml + +- name: restart syslog-ng + ansible.builtin.import_tasks: restart_syslogng.yml + +- name: start snapshot + ansible.builtin.import_tasks: start_snapshot.yml + +- name: systemctl daemon-reload + ansible.builtin.systemd: + daemon_reload: true + become: true diff --git a/automation/roles/consul/handlers/reload_consul_conf.yml b/automation/roles/consul/handlers/reload_consul_conf.yml new file mode 100644 index 000000000..f9d0dff57 --- /dev/null +++ b/automation/roles/consul/handlers/reload_consul_conf.yml @@ -0,0 +1,8 @@ +--- +# Use SIGHUP to reload most configurations as per https://www.consul.io/docs/agent/options.html +# Cannot use `consul reload` because it requires the HTTP API to be bound to a non-loopback interface + +- name: reload consul configuration on unix + ansible.builtin.command: "pkill --pidfile '{{ consul_run_path }}/consul.pid' --signal SIGHUP" + when: ansible_os_family != "Windows" + listen: "reload consul configuration" diff --git a/automation/roles/consul/handlers/restart_consul.yml b/automation/roles/consul/handlers/restart_consul.yml new file mode 100644 index 000000000..3e80a3fd2 --- /dev/null +++ b/automation/roles/consul/handlers/restart_consul.yml @@ -0,0 +1,36 @@ +--- +- name: Daemon reload systemd in case the binaries upgraded + ansible.builtin.systemd: + daemon_reload: true + become: true + when: ansible_service_mgr == "systemd" + listen: "reload systemd daemon" + +- name: restart consul on unix + ansible.builtin.service: + name: consul + state: restarted + when: + - ansible_os_family != "Darwin" + - ansible_os_family != "Windows" + listen: "restart consul" + +- name: restart consul on Mac + ansible.builtin.include_tasks: "{{ role_path }}/handlers/restart_consul_mac.yml" + when: ansible_os_family == "Darwin" + listen: "restart consul" + +- name: restart consul on windows + ansible.windows.win_service: + name: consul + state: restarted + # Some tasks with `become: true` end up calling this task. Unfortunately, the `become` + # property is evaluated before the `when` condition and this results in an Ansible + # error. + become: false + when: ansible_os_family == "Windows" + register: windows_service_started + retries: 3 + delay: 1 + until: windows_service_started is succeeded + listen: "restart consul" diff --git a/automation/roles/consul/handlers/restart_consul_mac.yml b/automation/roles/consul/handlers/restart_consul_mac.yml new file mode 100644 index 000000000..d330eac46 --- /dev/null +++ b/automation/roles/consul/handlers/restart_consul_mac.yml @@ -0,0 +1,4 @@ +--- +- ansible.builtin.import_tasks: "stop_consul_mac.yml" + +- ansible.builtin.import_tasks: "start_consul_mac.yml" diff --git a/automation/roles/consul/handlers/restart_rsyslog.yml b/automation/roles/consul/handlers/restart_rsyslog.yml new file mode 100644 index 000000000..8efec4612 --- /dev/null +++ b/automation/roles/consul/handlers/restart_rsyslog.yml @@ -0,0 +1,9 @@ +--- +- name: restart rsyslog + ansible.builtin.service: + name: rsyslog + state: restarted + when: + - ansible_os_family != "Darwin" + - ansible_os_family != "Windows" + listen: "restart rsyslog" diff --git a/automation/roles/consul/handlers/restart_syslogng.yml b/automation/roles/consul/handlers/restart_syslogng.yml new file mode 100644 index 000000000..6444625ec --- /dev/null +++ b/automation/roles/consul/handlers/restart_syslogng.yml @@ -0,0 +1,6 @@ +--- +- name: restart syslog-ng + ansible.builtin.service: + name: syslog-ng + state: restarted + listen: "restart syslog-ng" diff --git a/automation/roles/consul/handlers/start_consul.yml b/automation/roles/consul/handlers/start_consul.yml new file mode 100644 index 000000000..761c58769 --- /dev/null +++ b/automation/roles/consul/handlers/start_consul.yml @@ -0,0 +1,21 @@ +--- +- name: start consul on unix + ansible.builtin.service: + name: consul + state: started + when: + - ansible_os_family != "Darwin" + - ansible_os_family != "Windows" + listen: "start consul" + +- name: start consul on Mac + ansible.builtin.include_tasks: "{{ role_path }}/handlers/start_consul_mac.yml" + when: ansible_os_family == "Darwin" + listen: "start consul" + +- name: start consul on windows + ansible.windows.win_service: + name: consul + state: started + when: ansible_os_family == "Windows" + listen: "start consul" diff --git a/automation/roles/consul/handlers/start_consul_mac.yml b/automation/roles/consul/handlers/start_consul_mac.yml new file mode 100644 index 000000000..66b896ce3 --- /dev/null +++ b/automation/roles/consul/handlers/start_consul_mac.yml @@ -0,0 +1,31 @@ +--- +- name: See if consul service exists + become: true + become_user: "{{ consul_user }}" + ansible.builtin.command: "launchctl list {{ consul_launchctl_ident }}" + changed_when: false + ignore_errors: true + register: consul_service_list + +- name: Get UID for consul user + ansible.builtin.command: "id -u {{ consul_user }}" + changed_when: false + register: uid + when: consul_service_list is failed + +- name: Load consul service + become: true + become_user: "{{ consul_user }}" + ansible.builtin.command: "launchctl bootstrap gui/{{ uid.stdout }} {{ consul_launchctl_plist }}" + when: consul_service_list is failed + +- name: Assert that consul service is running + # Normally we'd want to use `launchctl list` for this, but it has a nasty habit of + # randomly not returning the PID in its output, which makes it hard for us to see if the + # service is running. + ansible.builtin.command: "pgrep consul" + changed_when: false + register: consul_mac_service_pgrep + retries: 20 + delay: 3 + until: consul_mac_service_pgrep is succeeded diff --git a/automation/roles/consul/handlers/start_snapshot.yml b/automation/roles/consul/handlers/start_snapshot.yml new file mode 100644 index 000000000..d8308ef05 --- /dev/null +++ b/automation/roles/consul/handlers/start_snapshot.yml @@ -0,0 +1,10 @@ +--- +- name: start consul snapshot on unix + ansible.builtin.service: + name: consul_snapshot + state: started + enabled: true + when: + - ansible_os_family != "Darwin" + - ansible_os_family != "Windows" + listen: "start snapshot" diff --git a/automation/roles/consul/handlers/stop_consul_mac.yml b/automation/roles/consul/handlers/stop_consul_mac.yml new file mode 100644 index 000000000..45a808627 --- /dev/null +++ b/automation/roles/consul/handlers/stop_consul_mac.yml @@ -0,0 +1,29 @@ +--- +- name: See if consul service exists + become: true + become_user: "{{ consul_user }}" + ansible.builtin.command: "launchctl list {{ consul_launchctl_ident }}" + changed_when: false + ignore_errors: true + register: consul_service_list + +- name: Get UID for consul user + ansible.builtin.command: "id -u {{ consul_user }}" + changed_when: false + register: uid + when: consul_service_list is succeeded + +- name: Unload consul service + become: true + become_user: "{{ consul_user }}" + ansible.builtin.command: "launchctl bootout gui/{{ uid.stdout }} {{ consul_launchctl_plist }}" + changed_when: false + register: unload_consul_service + # Code 113 is returned by launchctl for the error "Could not find service in domain for + # port". This can also mean that the service hasn't yet been configured, so we don't + # want to treat that as as error. + failed_when: unload_consul_service.rc != 0 and unload_consul_service.rc != 113 + retries: 3 + delay: 3 + until: unload_consul_service is not failed + when: consul_service_list is succeeded diff --git a/automation/roles/consul/requirements.txt b/automation/roles/consul/requirements.txt new file mode 100644 index 000000000..800d79405 --- /dev/null +++ b/automation/roles/consul/requirements.txt @@ -0,0 +1,7 @@ +rich>=10.0.0,<11.0.0 +molecule===2.22 +docker +netaddr +testinfra +flake8 +yamllint diff --git a/automation/roles/consul/tasks/acl.yml b/automation/roles/consul/tasks/acl.yml new file mode 100644 index 000000000..d7bd89509 --- /dev/null +++ b/automation/roles/consul/tasks/acl.yml @@ -0,0 +1,102 @@ +--- +# File: acl.yml - ACL tasks for Consul +- block: + - name: Read ACL master token from previously boostrapped server + ansible.builtin.command: "cat {{ consul_config_path }}/config.json" + register: config_read + no_log: true + changed_when: false + run_once: true + + - name: Save acl_master_token from existing configuration + ansible.builtin.set_fact: + consul_acl_master_token: "{{ config_read.stdout | from_json | json_query(query_acl) }}" + vars: + query_acl: "acl.tokens.master" + no_log: true + + - name: Save acl_initial_management from existing configuration if acl_master_token not found + ansible.builtin.set_fact: + consul_acl_master_token: "{{ config_read.stdout | from_json | json_query(query_acl) }}" + vars: + query_acl: "acl.tokens.initial_management" + no_log: true + when: consul_acl_master_token | length == 0 + + when: + - bootstrap_state.stat.exists | bool + - (consul_acl_master_token is not defined or consul_acl_master_token | length == 0) + - consul_node_role == 'server' + +- block: + - name: Generate ACL master token + ansible.builtin.command: "echo {{ lookup('password', '/dev/null length=32 chars=ascii_letters') | to_uuid }}" + register: consul_acl_master_token_keygen + run_once: true + no_log: true + + - name: Save ACL master token + ansible.builtin.set_fact: + consul_acl_master_token: "{{ consul_acl_master_token_keygen.stdout }}" + no_log: true + + when: + - (consul_acl_master_token is not defined or consul_acl_master_token | length == 0) + - not bootstrap_state.stat.exists | bool + - consul_node_role == 'server' + +- name: Display ACL Master Token + ansible.builtin.debug: + msg: "{{ consul_acl_master_token }}" + run_once: true + when: + - consul_acl_master_token_display | bool + - consul_node_role == 'server' + +- block: + - name: Read ACL master token from previously boostrapped server + ansible.builtin.command: "cat {{ consul_config_path }}/config.json" + register: config_read + no_log: true + changed_when: false + run_once: true + + - name: Save acl_replication_token from existing configuration + ansible.builtin.set_fact: + consul_acl_replication_token: "{{ config_read.stdout | from_json | json_query(query_acl) }}" + vars: + query_acl: "acl.tokens.replication" + no_log: true + + when: + - bootstrap_state.stat.exists | bool + - (consul_acl_replication_token is not defined or consul_acl_replication_token | length == 0) + - consul_node_role == 'server' + +- block: + - name: Generate ACL replication token + ansible.builtin.command: "echo {{ lookup('password', '/dev/null length=32 chars=ascii_letters') | to_uuid }}" + register: consul_acl_replication_token_keygen + no_log: true + run_once: true + + - name: Save ACL replication token + ansible.builtin.set_fact: + consul_acl_replication_token: "{{ consul_acl_replication_token_keygen.stdout }}" + no_log: true + + when: + - (consul_acl_replication_token is not defined or consul_acl_replication_token | length == 0) + - not bootstrap_state.stat.exists | bool + - consul_node_role == 'server' + +- name: Create ACL policy configuration + ansible.builtin.template: + src: configd_50acl_policy.hcl.j2 + dest: "{{ consul_configd_path }}/50acl_policy.hcl" + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: "0600" + notify: + - restart consul + when: consul_acl_policy | bool diff --git a/automation/roles/consul/tasks/asserts.yml b/automation/roles/consul/tasks/asserts.yml new file mode 100644 index 000000000..e2520fd2d --- /dev/null +++ b/automation/roles/consul/tasks/asserts.yml @@ -0,0 +1,131 @@ +--- +# File: asserts.yml - Asserts for this playbook + +- name: Define supported *nix distributions + ansible.builtin.set_fact: + _consul_nix_distros: + - "RedHat" + - "CentOS" + - "Rocky" + - "AlmaLinux" + - "OracleLinux" + - "Fedora" + - "Debian" + - "FreeBSD" + - "SmartOS" + - "Ubuntu" + - "Archlinux" + - "Alpine" + - "Amazon" + - "Flatcar" + - "VMware Photon OS" + - "MacOSX" + +- name: Check distribution compatibility + ansible.builtin.fail: + msg: "{{ ansible_distribution }} is not currently supported by this role." + when: + - ansible_distribution not in _consul_nix_distros + - ansible_os_family != 'Windows' + +- name: Check Photon version + ansible.builtin.fail: + msg: "{{ ansible_distribution_version }} is not a supported version." + when: + - ansible_distribution in ['VMware Photon OS'] + - ansible_distribution_version is version_compare(4, '<') + +- name: Check CentOS, Red Hat or Oracle Linux version + ansible.builtin.fail: + msg: "{{ ansible_distribution_version }} is not a supported version." + when: + - ansible_distribution in ['RedHat', 'CentOS', 'OracleLinux', 'Rocky', 'AlmaLinux'] + - ansible_distribution_version is version_compare(6, '<') + +- name: Check Debian version + ansible.builtin.fail: + msg: "{{ ansible_distribution_version }} is not a supported version." + when: + - ansible_distribution == "Debian" + - (ansible_distribution_version != 'buster/sid') and (ansible_distribution_version is version_compare(8, '<')) + +- name: Check FreeBSD version + ansible.builtin.fail: + msg: "{{ ansible_distribution_version }} is not a supported version." + when: + - ansible_distribution == "FreeBSD" + - ansible_distribution_version is version_compare(10, '<') + +- name: Check Ubuntu version + ansible.builtin.fail: + msg: "{{ ansible_distribution_version }} is not a supported version." + when: + - ansible_distribution == "Ubuntu" + - ansible_distribution_version is version_compare(13.04, '<') + +- name: Check specified ethernet interface + ansible.builtin.fail: + msg: "The ethernet interface specified by consul_iface was not found." + when: + - ansible_os_family != 'Windows' + - consul_iface not in ansible_interfaces + +- name: Check iptables on Red Hat, CentOS or Oracle Linux + ansible.builtin.fail: + msg: "Use DNSmasq instead of iptables on {{ ansible_distribution }}." + when: + - consul_iptables_enable | bool + - ansible_distribution in ['RedHat', 'CentOS', 'OracleLinux', 'Rocky', 'AlmaLinux'] + - ansible_distribution_version is version_compare(6, '>=') + +- name: Check for both Dnsmasq and iptables enabled + ansible.builtin.fail: + msg: "EONEORTHEOTHER: DNSmasq and iptables together is not supported." + when: + - consul_dnsmasq_enable | bool + - consul_iptables_enable | bool + +- name: Check for iptables enabled but no recursors + ansible.builtin.fail: + msg: "Recursors are required if iptables is enabled." + when: + - consul_iptables_enable | bool + - consul_recursors | length == 0 + +- name: Check consul_group_name is included in groups + ansible.builtin.fail: + msg: "consul_group_name must be included in groups." + when: consul_group_name not in groups + +- name: Fail if more than one bootstrap server is defined + ansible.builtin.fail: + msg: "You can not define more than one bootstrap server." + when: + - _consul_bootstrap_servers | length > 1 + +- name: Fail if a bootstrap server is defined and bootstrap_expect is true + ansible.builtin.fail: + msg: "Can't use a bootstrap server and bootstrap_expect at the same time." + when: + - _consul_bootstrap_servers | length > 0 + - consul_bootstrap_expect | bool + +# Check for unzip binary + +- name: Check if unzip is installed on control host + ansible.builtin.shell: "command -v unzip -h >/dev/null 2>&1" + become: false + changed_when: false + check_mode: false + run_once: true + register: is_unzip_installed + ignore_errors: true + delegate_to: 127.0.0.1 + vars: + ansible_become: false + +- name: Install remotely if unzip is not installed on control host + ansible.builtin.set_fact: + consul_install_remotely: true + when: + - is_unzip_installed is failed diff --git a/automation/roles/consul/tasks/config.yml b/automation/roles/consul/tasks/config.yml new file mode 100644 index 000000000..28d2d6dae --- /dev/null +++ b/automation/roles/consul/tasks/config.yml @@ -0,0 +1,45 @@ +--- +# File: config.yml - Consul configuration tasks + +- name: Create configuration + ansible.builtin.copy: + dest: "{{ config_item.dest }}" + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + content: "{{ lookup('template', consul_config_template_path, convert_data=True) | to_nice_json }}" + mode: "0600" + with_items: + - dest: "{{ consul_config_path }}/config.json" + config_version: "{{ consul_node_role }}" + when: true + - dest: "{{ consul_config_path }}/bootstrap.json" + config_version: bootstrap + when: "{{ consul_debug | bool }}" + - dest: "{{ consul_config_path }}/server.json" + config_version: server + when: "{{ consul_debug | bool }}" + - dest: "{{ consul_config_path }}/client.json" + config_version: client + when: "{{ consul_debug | bool }}" + loop_control: + loop_var: config_item + when: + - config_item.when + notify: + - restart consul + +- name: Create custom configuration + ansible.builtin.copy: + dest: "{{ consul_configd_path }}/50custom.json" + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + content: "{{ lookup('template', 'templates/configd_50custom.json.j2', convert_data=True) | to_nice_json }}" + mode: "0600" + when: + - consul_config_custom is defined + notify: + - restart consul + +- name: Set fact list with custom configuration file + ansible.builtin.set_fact: + managed_files: "{{ managed_files | default([]) }} + [ '{{ consul_configd_path }}/50custom.json' ]" diff --git a/automation/roles/consul/tasks/config_windows.yml b/automation/roles/consul/tasks/config_windows.yml new file mode 100644 index 000000000..9cfe6abfe --- /dev/null +++ b/automation/roles/consul/tasks/config_windows.yml @@ -0,0 +1,45 @@ +--- +# File: config_windows.yml - Consul configuration tasks for Windows + +- name: Create configuration + ansible.windows.win_copy: + dest: "{{ config_item.dest }}" + content: "{{ lookup('template', consul_config_template_path, convert_data=True) | to_nice_json }}" + with_items: + - dest: "{{ consul_config_path }}/config.json" + config_version: "{{ consul_node_role }}" + when: true + - dest: "{{ consul_config_path }}/bootstrap.json" + config_version: "bootstrap" + when: "{{ consul_debug | bool }}" + - dest: "{{ consul_config_path }}/server.json" + config_version: "server" + when: "{{ consul_debug | bool }}" + - dest: "{{ consul_config_path }}/client.json" + config_version: "client" + when: "{{ consul_debug | bool }}" + loop_control: + loop_var: config_item + when: + - config_item.when + notify: + - restart consul + +- name: Create custom configuration + ansible.windows.win_copy: + dest: "{{ consul_configd_path }}/50custom.json" + content: "{{ lookup('template', 'templates/configd_50custom.json.j2', convert_data=True) | to_nice_json }}" + when: + - consul_config_custom is defined + notify: + - restart consul + +- name: Get Windows path for custom configuration file + ansible.windows.win_stat: + path: "{{ consul_configd_path }}/50custom.json" + register: custom_config_win_path + +- name: Set fact list with custom configuration file + ansible.builtin.set_fact: + managed_files: "{{ managed_files | default([]) }} + [ '{{ custom_config_win_path.results[0].stat.path }}' ]" + when: custom_config_win_path.stat.exists diff --git a/automation/roles/consul/tasks/dirs.yml b/automation/roles/consul/tasks/dirs.yml new file mode 100644 index 000000000..703af64d1 --- /dev/null +++ b/automation/roles/consul/tasks/dirs.yml @@ -0,0 +1,84 @@ +--- +# File: dirs.yml - Consul directories + +- name: Create directories + when: ansible_os_family != 'Windows' + block: + - name: Configuration and data directories + ansible.builtin.file: + dest: "{{ dir_item }}" + state: directory + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: "0700" + with_items: + - "{{ consul_config_path }}" + - "{{ consul_configd_path }}" + - "{{ consul_data_path }}" + loop_control: + loop_var: dir_item + - name: Run directory + ansible.builtin.file: + dest: "{{ consul_run_path }}" + state: directory + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: "0750" + when: not consul_install_from_repo | bool + +- name: Create log directory + ansible.builtin.file: + dest: "{{ consul_log_path }}" + state: directory + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: "0700" + when: + - ansible_os_family != 'Windows' + - not consul_syslog_enable | bool + - not consul_configure_syslogd | bool + +- name: Create log directory + ansible.builtin.file: + dest: "{{ log_item }}" + state: directory + owner: "{{ syslog_user }}" + group: "{{ syslog_group }}" + mode: "0700" + with_items: + - "{{ consul_log_path }}" + loop_control: + loop_var: log_item + when: + - ansible_os_family != 'Windows' + - consul_syslog_enable | bool + - consul_configure_syslogd | bool + +- name: Verify binary path + ansible.builtin.file: + path: "{{ consul_bin_path }}" + state: directory + owner: root + mode: "0755" + when: + # On macOS, we should not alter consul_bin_path, since it may be owned by the homebrew + # user. This may cause the role to fail on macOS systems where homebrew is not + # present, but in that case, the user should create the directory from their playbook + # before running this role. + - ansible_os_family != 'Darwin' + - ansible_os_family != 'Windows' + - not consul_install_from_repo | bool + +- name: Create directories on Windows + ansible.windows.win_file: + dest: "{{ dir_item }}" + state: directory + with_items: + - "{{ consul_config_path }}" + - "{{ consul_configd_path }}" + - "{{ consul_data_path }}" + - "{{ consul_log_path }}" + - "{{ consul_bin_path }}" + loop_control: + loop_var: dir_item + when: ansible_os_family == 'Windows' diff --git a/automation/roles/consul/tasks/dnsmasq.yml b/automation/roles/consul/tasks/dnsmasq.yml new file mode 100644 index 000000000..a13790a13 --- /dev/null +++ b/automation/roles/consul/tasks/dnsmasq.yml @@ -0,0 +1,97 @@ +--- +# File: dnsmasq.yml - Dnsmasq tasks for Consul + +- name: Install Dnsmasq package + ansible.builtin.package: + name: "{{ dnsmasq_package }}" + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + become: true + tags: dnsmasq, installation + +- name: Create Dnsmasq configuration directory + ansible.builtin.file: + path: /usr/local/etc/dnsmasq.d + state: directory + owner: root + group: wheel + mode: "0700" + become: true + when: ansible_os_family == "FreeBSD" + tags: dnsmasq + +- name: Include Dnsmasq configuration directory + ansible.builtin.lineinfile: + dest: /usr/local/etc/dnsmasq.conf + line: "conf-dir=/usr/local/etc/dnsmasq.d/,*.conf" + become: true + notify: restart dnsmasq + when: ansible_os_family == "FreeBSD" + tags: dnsmasq + +- name: Create Dnsmasq configuration + ansible.builtin.template: + src: dnsmasq-10-consul.j2 + dest: "{{ dnsmasq_item.dest }}" + owner: root + group: "{{ dnsmasq_item.group }}" + mode: "0644" + become: true + notify: restart dnsmasq + when: "{{ dnsmasq_item.when }}" # noqa no-jinja-when + loop: + - { dest: "/etc/dnsmasq.d/10-consul", group: "root", when: ansible_os_family|lower != "freebsd" } + - { dest: "/usr/local/etc/dnsmasq.d/consul.conf", group: "wheel", when: ansible_os_family|lower == "freebsd" } + loop_control: + loop_var: dnsmasq_item + tags: dnsmasq + +- name: Disable systemd-resolved + when: ansible_service_mgr == "systemd" + block: + - name: Check if systemd-resolved service exists + ansible.builtin.stat: + path: /lib/systemd/system/systemd-resolved.service + register: systemd_resolved_service + + - name: Disable systemd-resolved service + ansible.builtin.service: + name: systemd-resolved + enabled: false + state: stopped + become: true + when: systemd_resolved_service.stat.exists + + - name: Check if resolv.conf is pointing to systemd-resolved + ansible.builtin.stat: + path: /etc/resolv.conf + register: resolv_dot_conf + + - block: + - name: Remove resolv.conf association with systemd-resolved + ansible.builtin.file: + path: /etc/resolv.conf + state: absent + + - name: Create /etc/resolv.conf + ansible.builtin.file: + path: /etc/resolv.conf + state: touch + owner: root + group: root + mode: u=rw,g=r,o=r + + - name: Add a nameserver entry poining to localhost for dnsmasq + ansible.builtin.lineinfile: + path: /etc/resolv.conf + regexp: "^nameserver 127.0.0.1" + line: "nameserver 127.0.0.1" + unsafe_writes: true # to prevent failures in CI + become: true + when: + - resolv_dot_conf.stat.islnk is defined + - resolv_dot_conf.stat.islnk + - resolv_dot_conf.stat.lnk_source == "/run/systemd/resolve/stub-resolv.conf" diff --git a/automation/roles/consul/tasks/encrypt_gossip.yml b/automation/roles/consul/tasks/encrypt_gossip.yml new file mode 100644 index 000000000..0ac013748 --- /dev/null +++ b/automation/roles/consul/tasks/encrypt_gossip.yml @@ -0,0 +1,64 @@ +--- +# File: encrypt_gossip.yml - Gossip encryption tasks for Consul + +- block: + - name: Read gossip encryption key from previously boostrapped server + ansible.builtin.shell: | + set -o pipefail + cat {{ consul_config_path }}/bootstrap/config.json | grep "encrypt" | sed -E 's/"encrypt": "(.+)",?/\1/' | sed 's/^ *//;s/ *$//' + register: consul_key_read + run_once: true + + - name: Save gossip encryption key from existing configuration + ansible.builtin.set_fact: + consul_raw_key: "{{ consul_key_read.stdout }}" + ignore_errors: true + + when: + - consul_raw_key is not defined + - bootstrap_state.stat.exists | bool + +- name: Write gossip encryption key locally for use with new servers + ansible.builtin.copy: + content: "{{ consul_raw_key }}" + dest: /tmp/consul_raw.key + mode: "0600" + become: false + vars: + ansible_become: false + when: + - consul_raw_key is defined + - bootstrap_state.stat.exists | bool + delegate_to: 127.0.0.1 + +- name: Read gossip encryption key for servers that require it + ansible.builtin.set_fact: + consul_raw_key: "{{ lookup('file', '/tmp/consul_raw.key') }}" + when: + - consul_raw_key is not defined + - bootstrap_state.stat.exists | bool + +- name: Delete gossip encryption key file + ansible.builtin.file: + path: /tmp/consul_raw.key + state: absent + become: false + vars: + ansible_become: false + when: + - consul_raw_key is defined + - bootstrap_state.stat.exists | bool + delegate_to: 127.0.0.1 + +- block: + - name: Generate gossip encryption key + ansible.builtin.shell: "PATH={{ consul_bin_path }}:$PATH consul keygen" + register: consul_keygen + run_once: true + + - name: Write gossip encryption key to fact + ansible.builtin.set_fact: + consul_raw_key: "{{ consul_keygen.stdout }}" + when: + - consul_raw_key is not defined + - not bootstrap_state.stat.exists | bool diff --git a/automation/roles/consul/tasks/install.yml b/automation/roles/consul/tasks/install.yml new file mode 100644 index 000000000..df08f9f60 --- /dev/null +++ b/automation/roles/consul/tasks/install.yml @@ -0,0 +1,143 @@ +--- +# File: install.yml - package installation tasks for Consul + +- name: Install OS packages + ansible.builtin.package: + name: "{{ consul_os_packages }}" + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + tags: installation + when: not ansible_facts['os_family'] == "VMware Photon OS" and (consul_os_packages | length > 0) + +- name: Install OS packages + ansible.builtin.command: "tdnf install -y {{ package_item }}" + with_items: "{{ consul_os_packages }}" + loop_control: + loop_var: package_item + tags: installation + when: ansible_facts['os_family'] == "VMware Photon OS" + +- name: Update Alpine Package Manager (APK) + community.general.apk: + update_cache: true + run_once: true + when: ansible_os_family == "Alpine" + delegate_to: 127.0.0.1 + +- name: Read package checksum file + ansible.builtin.stat: + path: "{{ role_path }}/files/consul_{{ consul_version }}_SHA256SUMS" + become: false + vars: + ansible_become: false + run_once: true + register: consul_checksum + tags: installation + delegate_to: 127.0.0.1 + +- name: Download package checksum file + ansible.builtin.get_url: + url: "{{ consul_checksum_file_url }}" + dest: "{{ role_path }}/files/consul_{{ consul_version }}_SHA256SUMS" + become: false + vars: + ansible_become: false + run_once: true + tags: installation + when: not consul_checksum.stat.exists | bool + delegate_to: 127.0.0.1 + +- name: Read package checksum + ansible.builtin.shell: grep "{{ consul_pkg }}" "{{ role_path }}/files/consul_{{ consul_version }}_SHA256SUMS" | awk '{print $1}' + become: false + vars: + ansible_become: false + register: consul_sha256 + tags: + - installation + - skip_ansible_lint + run_once: true + delegate_to: 127.0.0.1 + +- name: Check Consul package file + ansible.builtin.stat: + path: "{{ role_path }}/files/{{ consul_pkg }}" + become: false + vars: + ansible_become: false + register: consul_package + tags: installation + run_once: true + delegate_to: 127.0.0.1 + +- name: Download Consul package + ansible.builtin.get_url: + url: "{{ consul_zip_url }}" + dest: "{{ role_path }}/files/{{ consul_pkg }}" + checksum: "sha256:{{ consul_sha256.stdout }}" + timeout: "42" + become: false + vars: + ansible_become: false + tags: installation + when: not consul_package.stat.exists | bool + run_once: true + delegate_to: 127.0.0.1 + ignore_errors: "{{ ansible_check_mode }}" + +- name: Create Temporary Directory for Extraction + ansible.builtin.tempfile: + state: directory + prefix: ansible-consul. + become: false + vars: + ansible_become: false + register: install_temp + tags: installation + run_once: true + delegate_to: 127.0.0.1 + +- name: Unarchive and install Consul + block: + - name: Unarchive Consul package + ansible.builtin.unarchive: + src: "{{ role_path }}/files/{{ consul_pkg }}" + dest: "{{ install_temp.path }}/" + creates: "{{ install_temp.path }}/consul" + become: false + vars: + ansible_become: false + tags: + - installation + - skip_ansible_lint + run_once: true + delegate_to: 127.0.0.1 + ignore_errors: "{{ ansible_check_mode }}" + + - name: Install Consul + ansible.builtin.copy: + src: "{{ install_temp.path }}/consul" + dest: "{{ consul_bin_path }}/consul" + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: "0755" + notify: + - restart consul + - reload systemd daemon + tags: installation + ignore_errors: "{{ ansible_check_mode }}" + always: + - name: Cleanup + ansible.builtin.file: + path: "{{ install_temp.path }}" + state: "absent" + become: false + vars: + ansible_become: false + tags: installation + run_once: true + delegate_to: 127.0.0.1 + ignore_errors: "{{ ansible_check_mode }}" diff --git a/automation/roles/consul/tasks/install_linux_repo.yml b/automation/roles/consul/tasks/install_linux_repo.yml new file mode 100644 index 000000000..90eb05a1c --- /dev/null +++ b/automation/roles/consul/tasks/install_linux_repo.yml @@ -0,0 +1,143 @@ +--- +# File: install_linux_repo.yml - package installation tasks for Consul + +- name: Install OS packages + ansible.builtin.package: + name: "{{ consul_repo_prerequisites }}" + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + become: true + when: consul_os_repo_prerequisites | default(true) | bool + tags: installation + +- name: Gather the package facts + ansible.builtin.package_facts: + manager: auto + +- name: Clean up previous consul data + block: + - name: Populate service facts + ansible.builtin.service_facts: + + - name: Stop service consul, if running + ansible.builtin.service: + name: consul + state: stopped + when: ansible_facts.services | join is match('.*consul.*') + + - name: Remove consul service unit files from previous installation + ansible.builtin.file: + path: "{{ service_unit_item }}" + state: absent + loop: + - /usr/lib/systemd/system/consul.service + - /etc/init.d/consul + loop_control: + loop_var: service_unit_item + + - name: Remove the user 'consul' + ansible.builtin.user: + name: consul + state: absent + remove: true + + when: "'consul' not in ansible_facts.packages" + become: true + +- name: Install repository + block: + - name: Add hashicorp repository + ansible.builtin.command: "yum-config-manager --add-repo {{ consul_repo_url }}" + args: + creates: /etc/yum.repos.d/hashicorp.repo + when: > + ansible_distribution|lower == 'redhat' or + ansible_distribution|lower == 'centos' or + ansible_distribution|lower == 'fedora' or + ansible_distribution|lower == 'amazon' or + ansible_distribution|lower == 'rocky' or + ansible_distribution|lower == 'almalinux' + + - name: Make sure the python3-debian package is present + ansible.builtin.apt: + pkg: + - python3-debian + state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "Debian" + + - name: Add hashicorp repository + ansible.builtin.deb822_repository: + name: "{{ consul_repo_url.split('//')[1] | replace('.', '-') }}" + types: "deb" + uris: "{{ consul_repo_url }}" + signed_by: "{{ consul_repo_url }}/gpg" + suites: "{{ ansible_distribution_release }}" + components: "main" + enabled: true + state: present + when: "ansible_os_family|lower == 'debian'" + + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "Debian" + + when: "ansible_os_family|lower in [ 'debian', 'redhat' ]" + become: true + +- name: Install consul package + ansible.builtin.package: + name: consul + state: latest + register: package_status + until: package_status is success + delay: 5 + retries: 3 + become: true + +- name: Create a directory /etc/systemd/system/consul.service.d + ansible.builtin.file: + path: /etc/systemd/system/consul.service.d + state: directory + mode: "0755" + owner: root + group: root + register: systemd_override + become: true + when: ansible_service_mgr == "systemd" + +- name: Override systemd service params + ansible.builtin.template: + src: consul_systemd_service.override.j2 + dest: /etc/systemd/system/consul.service.d/override.conf + owner: root + group: root + mode: "0644" + register: systemd_override + become: true + notify: + - systemctl daemon-reload + - restart consul + when: + - ansible_service_mgr == "systemd" + - consul_install_from_repo | bool + +- name: Flush handlers + ansible.builtin.meta: flush_handlers + +- name: As, this role work with json conf file only - delete file /etc/consul.d/consul.hcl + ansible.builtin.file: + path: /etc/consul.d/consul.hcl + state: absent + become: true diff --git a/automation/roles/consul/tasks/install_remote.yml b/automation/roles/consul/tasks/install_remote.yml new file mode 100644 index 000000000..a0920a713 --- /dev/null +++ b/automation/roles/consul/tasks/install_remote.yml @@ -0,0 +1,73 @@ +--- +# File: install_remote.yml - package installation tasks for Consul + +- name: Install OS packages + ansible.builtin.package: + name: "{{ consul_os_packages }}" + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + tags: installation + +- name: Validate remote Consul directory + ansible.builtin.tempfile: + state: directory + prefix: ansible-consul. + register: consul_temp_dir + +- name: Download and unarchive Consul + block: + - name: Read Consul package checksum file + ansible.builtin.stat: + path: "{{ consul_temp_dir.path }}/consul_{{ consul_version }}_SHA256SUMS" + register: consul_checksum + changed_when: false + tags: installation + + - name: Download Consul package checksum file + ansible.builtin.get_url: + url: "{{ consul_checksum_file_url }}" + dest: "{{ consul_temp_dir.path }}/consul_{{ consul_version }}_SHA256SUMS" + validate_certs: false + tags: installation + when: not consul_checksum.stat.exists | bool + + - name: Read Consul package checksum + ansible.builtin.shell: "grep {{ consul_pkg }} {{ consul_temp_dir.path }}/consul_{{ consul_version }}_SHA256SUMS" + register: consul_sha256 + changed_when: false + tags: + - installation + - skip_ansible_lint + + - name: Download Consul + ansible.builtin.get_url: + url: "{{ consul_zip_url }}" + dest: "{{ consul_temp_dir.path }}/{{ consul_pkg }}" + checksum: "sha256:{{ consul_sha256.stdout.split(' ') | first }}" + timeout: 42 + register: consul_download + tags: installation + + - name: Unarchive Consul and install binary + ansible.builtin.unarchive: + remote_src: true + src: "{{ consul_temp_dir.path }}/{{ consul_pkg }}" + dest: "{{ consul_bin_path }}" + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: "0755" + register: consul_install + notify: + - restart consul + - reload systemd daemon + when: consul_download is changed + tags: installation + always: + - name: Cleanup + ansible.builtin.file: + path: "{{ consul_temp_dir.path }}" + state: absent + tags: installation diff --git a/automation/roles/consul/tasks/install_windows.yml b/automation/roles/consul/tasks/install_windows.yml new file mode 100644 index 000000000..6da240c14 --- /dev/null +++ b/automation/roles/consul/tasks/install_windows.yml @@ -0,0 +1,68 @@ +--- +# File: install_remote.yml - package installation tasks for Consul + +- name: Verify TLS1.2 is used + ansible.windows.win_regedit: + path: HKLM:\SOFTWARE\Microsoft\.NETFramework\v4.0.30319 + name: SchUseStrongCrypto + data: 1 + type: dword + +- name: Create temporary directory to download Consul + ansible.windows.win_tempfile: + state: directory + prefix: ansible-consul. + register: consul_temp_dir + +- name: Download and unarchive Consul + block: + - name: Read Consul package checksum file + ansible.windows.win_stat: + path: "{{ consul_temp_dir.path }}\\consul_{{ consul_version }}_SHA256SUMS" + register: consul_checksum + tags: installation + + - name: Download Consul package checksum file + ansible.windows.win_get_url: + url: "{{ consul_checksum_file_url }}" + dest: "{{ consul_temp_dir.path }}\\consul_{{ consul_version }}_SHA256SUMS" + tags: installation + when: not consul_checksum.stat.exists | bool + + - name: Read Consul package checksum + ansible.windows.win_shell: "findstr {{ consul_pkg }} {{ consul_temp_dir.path }}\\consul_{{ consul_version }}_SHA256SUMS" + args: + chdir: "{{ consul_temp_dir.path }}" + register: consul_pkg_checksum + tags: installation + + - name: Download Consul + ansible.windows.win_get_url: + url: "{{ consul_zip_url }}" + dest: "{{ consul_temp_dir.path }}\\{{ consul_pkg }}" + tags: installation + + - name: Calculate checksum + ansible.windows.win_stat: + path: "{{ consul_temp_dir.path }}\\{{ consul_pkg }}" + checksum_algorithm: sha256 + register: consul_pkg_hash + tags: installation + + - name: Compare checksum to hashfile + ansible.builtin.fail: + msg: "Checksum {{ consul_pkg_checksum.stdout.split(' ') | first }} did not match calculated SHA256 {{ consul_pkg_hash.stat.checksum }}!" + when: + - consul_pkg_hash.stat.checksum != (consul_pkg_checksum.stdout.split(' ') | first) + + - name: Unarchive Consul and install binary + community.windows.win_unzip: + src: "{{ consul_temp_dir.path }}\\{{ consul_pkg }}" + dest: "{{ consul_bin_path }}" + tags: installation + always: + - name: Cleanup + ansible.windows.win_file: + path: "{{ consul_temp_dir.path }}" + state: absent + tags: installation diff --git a/automation/roles/consul/tasks/iptables.yml b/automation/roles/consul/tasks/iptables.yml new file mode 100644 index 000000000..0da9fc85c --- /dev/null +++ b/automation/roles/consul/tasks/iptables.yml @@ -0,0 +1,52 @@ +--- +# File: iptables.yml - iptables tasks for Consul + +- name: Install iptables + ansible.builtin.apt: + name: iptables + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + +- name: Redirect local DNS (1/4) + ansible.builtin.iptables: + table: nat + chain: PREROUTING + protocol: udp + match: udp + destination_port: 53 + jump: REDIRECT + to_ports: 8600 + +- name: Redirect local DNS (2/4) + ansible.builtin.iptables: + table: nat + chain: PREROUTING + protocol: tcp + match: tcp + destination_port: 53 + jump: REDIRECT + to_ports: 8600 + +- name: Redirect local DNS (3/4) + ansible.builtin.iptables: + table: nat + chain: OUTPUT + protocol: udp + match: udp + destination_port: 53 + jump: REDIRECT + to_ports: 8600 + destination: localhost + +- name: Redirect local DNS (4/4) + ansible.builtin.iptables: + table: nat + chain: OUTPUT + protocol: tcp + match: tcp + destination_port: 53 + jump: REDIRECT + to_ports: 8600 + destination: localhost diff --git a/automation/roles/consul/tasks/main.yml b/automation/roles/consul/tasks/main.yml new file mode 100644 index 000000000..d20b42863 --- /dev/null +++ b/automation/roles/consul/tasks/main.yml @@ -0,0 +1,87 @@ +--- +# File: main.yml - Main tasks for Consul +- name: Looking up latest version of Consul + ansible.builtin.set_fact: + consul_version: "{{ (lookup('url', 'https://api.github.com/repos/hashicorp/consul/releases/latest', split_lines=False) | + from_json).get('tag_name') | replace('v', '') }}" + when: 'consul_version == "latest"' + +- name: Install python dependencies + when: + - consul_install_dependencies | bool + block: + - name: Make sure the python3-pip package are present on controlling host + ansible.builtin.package: + name: python3-pip + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + delegate_to: 127.0.0.1 + run_once: true + ignore_errors: true + when: not is_virtualenv or is_virtualenv == None + + - name: Install netaddr dependency on controlling host (with --user) + ansible.builtin.pip: + name: netaddr + extra_args: --user + environment: + PIP_BREAK_SYSTEM_PACKAGES: "1" + delegate_to: 127.0.0.1 + become: false + vars: + ansible_become: false + run_once: true + when: not is_virtualenv or is_virtualenv == None + + - name: Install netaddr dependency on controlling host (virtualenv) + ansible.builtin.pip: + name: netaddr + environment: + PIP_BREAK_SYSTEM_PACKAGES: "1" + delegate_to: 127.0.0.1 + become: false + vars: + ansible_become: false + run_once: true + when: is_virtualenv is defined + +- name: Include checks/asserts + ansible.builtin.import_tasks: asserts.yml + +- name: Include OS-specific variables + ansible.builtin.include_vars: "{{ vars_file_item }}" + with_first_found: + - files: + - "{{ ansible_os_family }}-{{ ansible_distribution_major_version }}.yml" + - "{{ ansible_os_family }}.yml" + loop_control: + loop_var: vars_file_item + tags: always + +# ----------------------------------------------------------------------- +# Tasks for all *NIX operating systems +# ----------------------------------------------------------------------- +- name: Include NIX tasks + ansible.builtin.include_tasks: nix.yml + when: ansible_os_family != 'Windows' + +# ----------------------------------------------------------------------- +# Tasks for Windows +# ----------------------------------------------------------------------- +- name: Include Windows tasks + ansible.builtin.include_tasks: windows.yml + when: ansible_os_family == 'Windows' + +- name: Include services management + ansible.builtin.import_tasks: services.yml + when: + - consul_services is defined and consul_services|length>0 + - inventory_hostname in groups['postgres_cluster'] + tags: + - consul_services + +- name: flush_handlers + ansible.builtin.meta: flush_handlers diff --git a/automation/roles/consul/tasks/nix.yml b/automation/roles/consul/tasks/nix.yml new file mode 100644 index 000000000..db6898823 --- /dev/null +++ b/automation/roles/consul/tasks/nix.yml @@ -0,0 +1,317 @@ +--- +# Gathers facts (bind address) from servers not currently targeted. +# 'delegate_facts' is currently rather buggy in Ansible so this might not +# always work. Hence 'consul_gather_server_facts' defaults to 'no'. +- name: Gather facts from other servers + ansible.builtin.setup: + delegate_to: "{{ host_item }}" + delegate_facts: true + with_items: "{{ consul_servers | difference(play_hosts) }}" + loop_control: + loop_var: host_item + ignore_errors: true + run_once: true + when: consul_gather_server_facts | bool + +- name: Expose advertise_address(_wan) datacenter and node_role as facts + ansible.builtin.set_fact: + consul_advertise_address_wan: "{{ consul_advertise_address_wan }}" + consul_advertise_address: "{{ consul_advertise_address }}" + consul_bind_address: "{{ consul_bind_address }}" + consul_datacenter: "{{ consul_datacenter }}" + consul_node_role: "{{ consul_node_role }}" + +- name: Read bootstrapped state + ansible.builtin.stat: + path: "{{ consul_bootstrap_state }}" + register: bootstrap_state + ignore_errors: true + tags: always + +- name: Include user and group settings + ansible.builtin.import_tasks: user_group.yml + +- name: Install OS packages and consul - from the repository + ansible.builtin.include_tasks: install_linux_repo.yml + when: + - consul_install_from_repo | bool + +- name: Include directory settings + ansible.builtin.import_tasks: dirs.yml + +- name: Check for existing Consul binary + ansible.builtin.stat: + path: "{{ consul_binary }}" + register: consul_binary_installed + when: not consul_force_install + +- name: Get current Consul version + ansible.builtin.command: "{{ consul_binary }} --version" + changed_when: false + when: + - not consul_force_install + - consul_binary_installed.stat.exists + register: consul_installed_version + +- name: Calculate whether to install consul binary + ansible.builtin.set_fact: + consul_install_binary: "{{ consul_force_install or \ + not consul_binary_installed.stat.exists or \ + consul_installed_version.stdout_lines[0] != _consul_expected_version_string }}" + +- name: Install OS packages and consul - locally + ansible.builtin.include_tasks: install.yml + when: + - consul_install_binary | bool + - not consul_install_remotely | bool + - not consul_install_from_repo | bool + +- name: Install OS packages and consul - remotely + ansible.builtin.include_tasks: install_remote.yml + when: + - consul_install_binary | bool + - consul_install_remotely | bool + - not consul_install_from_repo | bool + +# XXX: Individual gossip tasks are deprecated and need to be removed +# - include_tasks: ../tasks/encrypt_gossip.yml +- block: + - block: + - name: Check for gossip encryption key on previously boostrapped server + ansible.builtin.slurp: + src: "{{ consul_config_path }}/config.json" + register: consul_config_b64 + ignore_errors: true + + - name: Deserialize existing configuration + ansible.builtin.set_fact: + consul_config: "{{ consul_config_b64.content | b64decode | from_json }}" + when: consul_config_b64.content is defined + + - name: Save gossip encryption key from existing configuration + ansible.builtin.set_fact: + consul_raw_key: "{{ consul_config.encrypt }}" + when: consul_config is defined + + no_log: true + when: + - consul_raw_key is not defined + - bootstrap_state.stat.exists | bool + - inventory_hostname in consul_servers + + # Key provided by extra vars or the above block + - name: Write gossip encryption key locally for use with new servers + ansible.builtin.copy: + content: "{{ consul_raw_key }}" + dest: "/tmp/consul_raw.key" + mode: "0600" + become: false + vars: + ansible_become: false + no_log: true + delegate_to: localhost + changed_when: false + when: consul_raw_key is defined + + # Generate new key if none was found + - block: + - name: Generate gossip encryption key + ansible.builtin.shell: "PATH={{ consul_bin_path }}:$PATH consul keygen" + register: consul_keygen + + - name: Write key locally to share with other nodes + ansible.builtin.copy: + content: "{{ consul_keygen.stdout }}" + dest: "/tmp/consul_raw.key" + become: false + vars: + ansible_become: false + delegate_to: localhost + + no_log: true + run_once: true + when: + # if files '/tmp/consul_raw.key' exist + - lookup('first_found', dict(files=['/tmp/consul_raw.key'], skip=true)) | ternary(false, true) + - not bootstrap_state.stat.exists | bool + + - name: Read gossip encryption key for servers that require it + ansible.builtin.set_fact: + consul_raw_key: "{{ lookup('file', '/tmp/consul_raw.key') }}" + no_log: true + when: + - consul_raw_key is not defined + + - name: Delete gossip encryption key file + ansible.builtin.file: + path: "/tmp/consul_raw.key" + state: absent + become: false + vars: + ansible_become: false + run_once: true + delegate_to: localhost + changed_when: false + no_log: true + when: + - consul_encrypt_enable | bool + +- name: Create ACL configuration + ansible.builtin.include_tasks: acl.yml + when: consul_acl_enable | bool + +- name: Create Consul configuration + ansible.builtin.import_tasks: config.yml + +- name: Create TLS configuration + ansible.builtin.include_tasks: tls.yml + when: consul_tls_enable | bool + +- name: Create syslog configuration + ansible.builtin.import_tasks: syslog.yml + +- name: Create BSD init script + ansible.builtin.template: + src: consul_bsdinit.j2 + dest: /etc/rc.d/consul + owner: root + group: wheel + mode: "0755" + when: ansible_os_family == "FreeBSD" + +- name: Create SYSV init script + ansible.builtin.template: + src: consul_sysvinit.j2 + dest: /etc/init.d/consul + owner: root + group: root + mode: "0755" + when: + - not ansible_service_mgr == "systemd" + - not ansible_os_family == "Debian" + - not ansible_os_family == "FreeBSD" + - not ansible_os_family == "Solaris" + - not ansible_os_family == "Darwin" + +- name: Create Debian init script + ansible.builtin.template: + src: consul_debianinit.j2 + dest: /etc/init.d/consul + owner: root + group: root + mode: "0755" + when: + - not ansible_service_mgr == "systemd" + - ansible_os_family == "Debian" + - not ansible_os_family == "FreeBSD" + - not ansible_os_family == "Solaris" + - not ansible_os_family == "Darwin" + +- name: Create systemd script + ansible.builtin.template: + src: consul_systemd.service.j2 + dest: "{{ consul_systemd_unit_path }}/consul.service" + owner: root + group: root + mode: "0644" + register: systemd_unit + notify: restart consul + when: + - ansible_service_mgr == "systemd" + - not ansible_os_family == "FreeBSD" + - not ansible_os_family == "Solaris" + - not ansible_os_family == "Darwin" + - not consul_install_from_repo | bool + +- name: Reload systemd + ansible.builtin.systemd: + daemon_reload: true + when: systemd_unit is changed + +- name: Enable consul at startup (systemd) + ansible.builtin.systemd: + name: consul + enabled: true + when: + - ansible_service_mgr == "systemd" + - not ansible_os_family == "FreeBSD" + - not ansible_os_family == "Solaris" + - not ansible_os_family == "Darwin" + +- name: Create launchctl plist file + ansible.builtin.template: + src: "consul_launchctl.plist.j2" + dest: "{{ consul_launchctl_plist }}" + mode: "0644" + validate: "plutil -lint %s" + when: ansible_os_family == "Darwin" + notify: restart consul + +- name: Create smf manifest + ansible.builtin.template: + src: consul_smf_manifest.j2 + dest: "{{ consul_smf_manifest }}" + owner: root + group: root + mode: "0644" + when: ansible_os_family == "Solaris" + register: smfmanifest + +- name: Import smf manifest + ansible.builtin.shell: "svccfg import {{ consul_smf_manifest }}" + when: + - smfmanifest is changed + - ansible_os_family == "Solaris" + tags: skip_ansible_lint + +- name: Import smf script + ansible.builtin.shell: "svcadm refresh consul" + when: + - smfmanifest is changed + - ansible_os_family == "Solaris" + tags: skip_ansible_lint + +- name: Enable Consul Snapshots on servers + ansible.builtin.include_tasks: snapshot.yml + when: + - ansible_service_mgr == "systemd" + - not ansible_os_family == "FreeBSD" + - not ansible_os_family == "Solaris" + - not ansible_os_family == "Darwin" + - consul_snapshot | bool + +- block: + - name: Start Consul + ansible.builtin.service: + name: consul + state: started + enabled: true + + - name: Check Consul HTTP API (via TCP socket) + ansible.builtin.wait_for: + delay: 15 + port: "{{ consul_ports.http | int }}" + host: "{{ consul_addresses.http }}" + when: (consul_ports.http|int > -1) and (consul_addresses.http|ansible.utils.ipaddr) + + - name: Check Consul HTTP API (via unix socket) + ansible.builtin.wait_for: + delay: 15 + path: "{{ consul_addresses.http | replace('unix://', '', 1) }}" + when: consul_addresses.http is match("unix://*") + + - name: Create bootstrapped state file + ansible.builtin.file: + dest: "{{ consul_bootstrap_state }}" + state: touch + mode: "0600" + + - ansible.builtin.include_tasks: ../tasks/iptables.yml + when: consul_iptables_enable | bool + + when: + - not bootstrap_state.stat.exists + - not ansible_os_family == "Darwin" + +- ansible.builtin.include_tasks: ../tasks/dnsmasq.yml + when: consul_dnsmasq_enable | bool diff --git a/automation/roles/consul/tasks/services.yml b/automation/roles/consul/tasks/services.yml new file mode 100644 index 000000000..ed9180302 --- /dev/null +++ b/automation/roles/consul/tasks/services.yml @@ -0,0 +1,101 @@ +--- +## File: services.yml - services configuration + +- name: Configure consul services + ansible.builtin.template: + dest: "{{ consul_configd_path }}/service_{{ service_item.id }}.json" + src: service.json.j2 + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: "0644" + with_items: "{{ consul_services }}" + loop_control: + loop_var: service_item + notify: + - restart consul + +- name: Get the list of service config files + ansible.builtin.find: + paths: "{{ consul_configd_path }}" + file_type: file + register: services_enabled_unix + when: ansible_os_family != 'Windows' + +- name: Get the list of service config files [Windows] + ansible.windows.win_find: + paths: "{{ consul_configd_path }}" + file_type: file + register: services_enabled_windows + when: ansible_os_family == 'Windows' + +- name: Set var for enabled services + ansible.builtin.set_fact: + services_enabled_files: "{{ services_enabled_unix['files'] }}" + when: ansible_os_family != 'Windows' + +- name: Set var for enabled services [Windows] + ansible.builtin.set_fact: + services_enabled_files: "{{ services_enabled_windows['files'] }}" + when: ansible_os_family == 'Windows' + +- name: Set fact with list of existing configuration files + ansible.builtin.set_fact: + list_current_service_config: "{{ list_current_service_config | default([]) + [config_file_item.path] }}" + with_items: "{{ services_enabled_files }}" + loop_control: + loop_var: config_file_item + +- name: Set fact with list of services we manage + ansible.builtin.set_fact: + managed_files: "{{ managed_files | default([]) }} + [ '{{ consul_configd_path }}/service_{{ service_item.id }}.json' ]" + with_items: "{{ consul_services }}" + loop_control: + loop_var: service_item + when: ansible_os_family != 'Windows' + +- name: Find all service config files that we manage [Windows] + ansible.windows.win_stat: + path: "{{ consul_configd_path }}/service_{{ service_config_item.id }}.json" + with_items: "{{ consul_services }}" + loop_control: + loop_var: service_config_item + register: managed_files_win_paths + when: ansible_os_family == 'Windows' + +- name: Set fact with list of services we manage [Windows] + ansible.builtin.set_fact: + managed_files: "{{ managed_files | default([]) }} + [ '{{ service_item.stat.path }}' ]" + with_items: "{{ managed_files_win_paths.results }}" + loop_control: + loop_var: service_item + when: ansible_os_family == 'Windows' + +- name: Delete non declared services + ansible.builtin.file: + path: "{{ non_declared_service_item }}" + state: absent + when: + - ansible_os_family != 'Windows' + - non_declared_service_item not in managed_files + - non_declared_service_item not in consul_cleanup_ignore_files + with_items: "{{ list_current_service_config }}" + loop_control: + loop_var: non_declared_service_item + ignore_errors: "{{ ansible_check_mode }}" + notify: + - restart consul + +- name: Delete non declared services [Windows] + ansible.windows.win_file: + path: "{{ non_declared_service_item }}" + state: absent + when: + - ansible_os_family == 'Windows' + - non_declared_service_item not in managed_files + - non_declared_service_item not in consul_cleanup_ignore_files + with_items: "{{ list_current_service_config }}" + loop_control: + loop_var: non_declared_service_item + ignore_errors: "{{ ansible_check_mode }}" + notify: + - restart consul diff --git a/automation/roles/consul/tasks/snapshot.yml b/automation/roles/consul/tasks/snapshot.yml new file mode 100644 index 000000000..4efb62672 --- /dev/null +++ b/automation/roles/consul/tasks/snapshot.yml @@ -0,0 +1,53 @@ +--- +# File: snapshot.yml - Create snapshot service +# template: consul_snapshot.service +# template: consul_snapshot.config /etc/consul/ +# set snaps to {{ snap storage location }} +# create snaps folder +# handler: start / enable service +# add entry to tasks/main.yml +# update readme +# update defaults/main.yml +# update my vars file + +- name: Create snapshot systemd script + ansible.builtin.template: + src: consul_systemd_snapshot.service.j2 + dest: /lib/systemd/system/consul_snapshot.service + owner: root + group: root + mode: "0644" + register: systemd_unit + notify: start snapshot + when: + - ansible_service_mgr == "systemd" + - not ansible_os_family == "FreeBSD" + - not ansible_os_family == "Solaris" + - consul_snapshot | bool + +- name: Create snapshot agent config + ansible.builtin.template: + src: consul_snapshot.json.j2 + dest: "{{ consul_config_path }}/consul_snapshot.json" + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: "0644" + notify: start snapshot + when: + - ansible_service_mgr == "systemd" + - not ansible_os_family == "FreeBSD" + - not ansible_os_family == "Solaris" + - consul_snapshot | bool + +- name: Reload systemd + ansible.builtin.systemd: + daemon_reload: true + when: systemd_unit | changed + +- name: Create snaps storage folder + ansible.builtin.file: + state: directory + path: "{{ consul_snapshot_storage }}" + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: "0744" diff --git a/automation/roles/consul/tasks/syslog.yml b/automation/roles/consul/tasks/syslog.yml new file mode 100644 index 000000000..8bae90e7f --- /dev/null +++ b/automation/roles/consul/tasks/syslog.yml @@ -0,0 +1,42 @@ +--- +# File: syslog.yml - syslog config for Consul logging + +- name: Detect syslog program + ansible.builtin.stat: + path: /usr/sbin/syslog-ng + register: stat_syslogng + when: + - ansible_os_family != 'Windows' + - consul_configure_syslogd | bool + +- name: Install syslog-ng config + ansible.builtin.template: + src: syslogng_consul.conf.j2 + dest: /etc/syslog-ng/conf.d/consul.conf + owner: root + group: root + mode: "0444" + when: + - ansible_os_family != 'Windows' + - consul_syslog_enable | bool + - consul_configure_syslogd | bool + - stat_syslogng.stat.exists + notify: + - restart syslog-ng + - restart consul + +- name: Install rsyslogd config + ansible.builtin.template: + src: rsyslogd_00-consul.conf.j2 + dest: /etc/rsyslog.d/00-consul.conf + owner: root + group: root + mode: "0444" + when: + - ansible_os_family != 'Windows' + - consul_syslog_enable | bool + - consul_configure_syslogd | bool + - not stat_syslogng.stat.exists + notify: + - restart rsyslog + - restart consul diff --git a/automation/roles/consul/tasks/tls.yml b/automation/roles/consul/tasks/tls.yml new file mode 100644 index 000000000..60c1adc6c --- /dev/null +++ b/automation/roles/consul/tasks/tls.yml @@ -0,0 +1,87 @@ +--- +# File: tls.yml - TLS tasks for Consul + +# Enables TLS encryption with a self-signed certificate if 'tls_cert_generate' is 'true'. +- block: + # if 'consul_on_dedicated_nodes' is 'false' + - name: Copy Consul TLS certificate, key and CA from the master node + ansible.builtin.include_role: + name: ../roles/tls_certificate/copy + vars: + tls_group_name: "postgres_cluster" + copy_tls_dir: "{{ consul_tls_dir | default('/etc/consul/tls') }}" + copy_tls_owner: "{{ consul_user }}" + when: not consul_on_dedicated_nodes | default(false) | bool + + # if 'consul_on_dedicated_nodes' is 'true' + - name: Generate Consul TLS certificate + ansible.builtin.include_role: + name: ../roles/tls_certificate/generate + vars: + generate_tls_owner: "{{ consul_user }}" + generate_tls_common_name: "{{ consul_tls_common_name | default('Consul') }}" + generate_tls_dir: "{{ consul_tls_dir | default('/etc/consul/tls') }}" + tls_group_name: "consul_instances" + tls_cert_regenerate: "{{ consul_tls_cert_regenerate | default(false) }}" # Do not generate new certificates if they already exist. + when: consul_on_dedicated_nodes | default(false) | bool + + - name: Copy Consul TLS files to all nodes + ansible.builtin.include_role: + name: ../roles/tls_certificate/copy + vars: + tls_group_name: "consul_instances" + fetch_tls_dir: "{{ consul_tls_dir | default('/etc/consul/tls') }}" + copy_tls_dir: "{{ consul_tls_dir | default('/etc/consul/tls') }}" + copy_tls_owner: "{{ consul_user }}" + when: consul_on_dedicated_nodes | default(false) | bool + when: tls_cert_generate | default(false) | bool + +# Copy the existing TLS certificates from the role's files directory if 'tls_cert_generate' is 'false'. +- block: + - name: Create SSL directory + ansible.builtin.file: + dest: "{{ consul_tls_dir }}" + state: directory + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: "0755" + + - name: Copy CA certificate + ansible.builtin.copy: + remote_src: "{{ consul_tls_files_remote_src }}" + src: "{{ consul_tls_ca_crt }}" + dest: "{{ consul_tls_dir }}/{{ consul_tls_ca_crt | basename }}" + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: "0644" + notify: restart consul + + when: + - not tls_cert_generate | default(false) | bool + - consul_tls_copy_keys | bool + +- block: + - name: Copy server certificate + ansible.builtin.copy: + remote_src: "{{ consul_tls_files_remote_src }}" + src: "{{ consul_tls_server_crt }}" + dest: "{{ consul_tls_dir }}/{{ consul_tls_server_crt | basename }}" + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: "0644" + notify: restart consul + + - name: Copy server key + ansible.builtin.copy: + remote_src: "{{ consul_tls_files_remote_src }}" + src: "{{ consul_tls_server_key }}" + dest: "{{ consul_tls_dir }}/{{ consul_tls_server_key | basename }}" + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: "0600" + notify: restart consul + + when: + - not tls_cert_generate | default(false) | bool + - consul_tls_copy_keys | bool + - auto_encrypt is not defined or (auto_encrypt is defined and not auto_encrypt.enabled | bool) or (consul_node_role != 'client') | bool diff --git a/automation/roles/consul/tasks/user_group.yml b/automation/roles/consul/tasks/user_group.yml new file mode 100644 index 000000000..ef80bac20 --- /dev/null +++ b/automation/roles/consul/tasks/user_group.yml @@ -0,0 +1,22 @@ +--- +# File: user_group.yml - User and group settings + +# Add group +- name: Add Consul group + ansible.builtin.group: + name: "{{ consul_group }}" + state: present + when: + - consul_manage_group | bool + - not consul_install_from_repo | bool + +# Add user +- name: Add Consul user + ansible.builtin.user: + name: "{{ consul_user }}" + comment: "Consul user" + group: "{{ consul_group }}" + system: true + when: + - consul_manage_user | bool + - not consul_install_from_repo | bool diff --git a/automation/roles/consul/tasks/windows.yml b/automation/roles/consul/tasks/windows.yml new file mode 100644 index 000000000..50da6ce13 --- /dev/null +++ b/automation/roles/consul/tasks/windows.yml @@ -0,0 +1,196 @@ +--- +# Gathers facts (bind address) from servers not currently targeted. +# 'delegate_facts' is currently rather buggy in Ansible so this might not +# always work. Hence 'consul_gather_server_facts' defaults to 'no'. +- name: (Windows) Gather facts from other servers + ansible.builtin.setup: + delegate_to: "{{ host_item }}" + delegate_facts: true + with_items: "{{ consul_servers | difference(play_hosts) }}" + loop_control: + loop_var: host_item + ignore_errors: true + when: consul_gather_server_facts | bool + +- name: (Windows) Expose bind_address, datacenter and node_role as facts + ansible.builtin.set_fact: + consul_bind_address: "{{ consul_bind_address }}" + consul_datacenter: "{{ consul_datacenter }}" + consul_node_role: "{{ consul_node_role }}" + +- name: (Windows) Read bootstrapped state + ansible.windows.win_stat: + path: "{{ consul_bootstrap_state }}" + register: bootstrap_state + ignore_errors: true + tags: always + +- name: (Windows) Include directory settings + ansible.builtin.import_tasks: dirs.yml + +- name: (Windows) Check for existing Consul binary + ansible.windows.win_stat: + path: "{{ consul_binary }}" + register: consul_binary_installed + +- name: (Windows) Get current Consul version + ansible.windows.win_command: "{{ consul_binary }} --version" + changed_when: false + when: + - not consul_force_install + - consul_binary_installed.stat.exists + register: consul_installed_version + +- name: (Windows) Calculate whether to install consul binary + ansible.builtin.set_fact: + consul_install_binary: "{{ consul_force_install or \ + not consul_binary_installed.stat.exists or \ + consul_installed_version.stdout_lines[0] != _consul_expected_version_string }}" + +- name: (Windows) Install OS packages and consul + ansible.builtin.include_tasks: install_windows.yml + when: consul_install_binary | bool + +- block: + - block: + - name: (Windows) Check for gossip encryption key on previously boostrapped server + ansible.builtin.slurp: + src: "{{ consul_config_path }}/config.json" + register: consul_config_b64 + ignore_errors: true + + - name: (Windows) Deserialize existing configuration + ansible.builtin.set_fact: + consul_config: "{{ consul_config_b64.content | b64decode | from_json }}" + when: consul_config_b64.content is defined + + - name: (Windows) Save gossip encryption key from existing configuration + ansible.builtin.set_fact: + consul_raw_key: "{{ consul_config.encrypt }}" + when: consul_config is defined + + no_log: true + when: + - consul_raw_key is not defined + - bootstrap_state.stat.exists | bool + - inventory_hostname in consul_servers + + # Key provided by extra vars or the above block + - name: (Windows) Write gossip encryption key locally for use with new servers + ansible.builtin.copy: + content: "{{ consul_raw_key }}" + dest: "/tmp/consul_raw.key" + mode: "0600" + become: false + vars: + ansible_become: false + no_log: true + run_once: true + register: consul_local_key + delegate_to: localhost + when: consul_raw_key is defined + + # Generate new key if non was found + - block: + - name: (Windows) Generate gossip encryption key + ansible.windows.win_shell: "{{ consul_binary }} keygen" + register: consul_keygen + + - name: (Windows) Write key locally to share with other nodes + ansible.builtin.copy: + content: "{{ consul_keygen.stdout }}" + dest: "/tmp/consul_raw.key" + mode: "0600" + become: false + vars: + ansible_become: false + delegate_to: localhost + + no_log: true + run_once: true + when: + - not consul_local_key.changed + - not bootstrap_state.stat.exists | bool + + - name: (Windows) Read gossip encryption key for servers that require it + ansible.builtin.set_fact: + consul_raw_key: "{{ lookup('file', '/tmp/consul_raw.key') }}" + no_log: true + when: + - consul_raw_key is not defined + + - name: (Windows) Delete gossip encryption key file + ansible.builtin.file: + path: "/tmp/consul_raw.key" + state: absent + become: false + vars: + ansible_become: false + run_once: true + delegate_to: localhost + no_log: true + when: + - consul_encrypt_enable + +- name: (Windows) Create Consul configuration + ansible.builtin.import_tasks: config_windows.yml + +- name: (Windows) Ensure neither ACL nor TLS are requested + ansible.builtin.fail: + msg: "ACL and TLS are not supported on Windows hosts yet." + when: + - (consul_acl_enable | bool) or (consul_tls_enable | bool) + +- name: (Windows) Create ACL configuration + ansible.builtin.include_tasks: acl.yml + when: consul_acl_enable | bool + +- name: (Windows) Create TLS configuration + ansible.builtin.include_tasks: tls.yml + when: consul_tls_enable | bool + +- block: + - name: Convert consul_binary from Unix -> Windows + ansible.windows.win_stat: + path: "{{ consul_binary }}" + register: consul_binary_win + + - name: Convert consul_config_path from Unix -> Windows + ansible.windows.win_stat: + path: "{{ consul_config_path }}" + register: consul_config_path_win + + - name: Convert consul_configd_path from Unix -> Windows + ansible.windows.win_stat: + path: "{{ consul_configd_path }}" + register: consul_configd_path_win + + - name: Create Consul as a service + ansible.windows.win_service: + name: Consul + path: "{{ consul_binary_win.stat.path }} agent \ + -config-file={{ consul_config_path_win.stat.path }}\\config.json \ + -config-dir={{ consul_configd_path_win.stat.path }}" + display_name: Consul Service + description: Consul + start_mode: auto + state: started + + - name: (Windows) Check Consul HTTP API + ansible.windows.win_wait_for: + delay: 5 + port: 8500 + + - name: (Windows) Create bootstrapped state file + ansible.windows.win_file: + dest: "{{ consul_bootstrap_state }}" + state: touch + when: ansible_os_family == "Windows" + + - ansible.builtin.include_tasks: ../tasks/iptables.yml + when: consul_iptables_enable | bool + + when: not bootstrap_state.stat.exists + +- ansible.builtin.include_tasks: ../tasks/dnsmasq.yml + when: consul_dnsmasq_enable | bool diff --git a/automation/roles/consul/templates/config.json.j2 b/automation/roles/consul/templates/config.json.j2 new file mode 100644 index 000000000..200233eb7 --- /dev/null +++ b/automation/roles/consul/templates/config.json.j2 @@ -0,0 +1,357 @@ +{# This template will be passed through the 'to_nice_json' filter #} +{# The filter fixes whitespace, indentation and comma's on the last item #} +{ + {# Common Settings #} + + {## Node ##} + {% if consul_node_name is defined %} + "node_name": "{{ consul_node_name }}", + {% endif %} + "datacenter": "{{ consul_datacenter }}", + "domain": "{{ consul_domain }}", + {% if consul_alt_domain %} + "alt_domain": "{{ consul_alt_domain }}", + {% endif %} + {% if consul_version is version_compare('0.7.3', '>=') and consul_node_meta | length > 0 %} + "node_meta": {{ consul_node_meta | default({})| to_json }}, + {% endif %} + {# Performance Settings #} + "performance": {{ consul_performance | to_json }}, + + {## Addresses ##} + "bind_addr": "{{ consul_bind_address }}", + "advertise_addr": "{{ consul_advertise_address }}", + "advertise_addr_wan": "{{ consul_advertise_address_wan }}", + "translate_wan_addrs": {{ consul_translate_wan_address | bool | to_json }}, + "client_addr": "{{ consul_client_address }}", + "addresses": { + {% if consul_version is version_compare('0.8.0', '<') %} + "rpc": "{{ consul_addresses.rpc }}", + {% endif %} + "dns": "{{ consul_addresses.dns }}", + "http": "{{ consul_addresses.http }}", + "https": "{{ consul_addresses.https }}", + {% if consul_version is version_compare('1.3.0', '>=') %} + "grpc": "{{ consul_addresses.grpc }}", + {% endif %} + {% if consul_version is version_compare('1.14.0', '>=') and consul_tls_enable %} + "grpc_tls": "{{ consul_addresses.grpc_tls }}", + {% endif %} + }, + {## Ports Used ##} + "ports": { + {% if consul_version is version_compare('0.8.0', '<') %} + "rpc": {{ consul_ports.rpc}}, + {% endif %} + "dns": {{ consul_ports.dns }}, + "http": {{ consul_ports.http }}, + "https": {{ consul_ports.https }}, + "serf_lan": {{ consul_ports.serf_lan }}, + "serf_wan": {{ consul_ports.serf_wan }}, + "server": {{ consul_ports.server }}, + {% if consul_version is version_compare('1.3.0', '>=') %} + "grpc": {{ consul_ports.grpc }}, + {% endif %} + {% if consul_version is version_compare('1.14.0', '>=') and consul_tls_enable %} + "grpc_tls": {{ consul_ports.grpc_tls }}, + {% endif %} + }, + + {## Raft protocol ##} + "raft_protocol": {{ consul_raft_protocol }}, + + {## DNS ##} + {% if consul_recursors | length > 0 %} + "recursors": {{ consul_recursors | to_json }}, + {% endif %} + + {## Agent ##} + "data_dir": "{{ consul_data_path }}", + "log_level": "{{ consul_log_level }}", + {% if consul_syslog_enable | bool %} + "enable_syslog": {{ consul_syslog_enable | bool | to_json }}, + "syslog_facility": "{{ consul_syslog_facility }}", + {% else %} + "log_file": "{{ consul_log_path }}/{{ consul_log_file }}", + "log_rotate_bytes": {{ consul_log_rotate_bytes }}, + "log_rotate_duration": "{{ consul_log_rotate_duration }}", + {% if consul_version is version_compare('1.5.3', '>=') %} + "log_rotate_max_files": {{ consul_log_rotate_max_files }}, + {% endif %} + {% endif %} + "disable_update_check": {{ consul_disable_update_check | bool | to_json }}, + "enable_script_checks": {{ consul_enable_script_checks | bool | to_json }}, + "enable_local_script_checks": {{ consul_enable_local_script_checks | bool | to_json }}, + {% if leave_on_terminate is defined %} + "leave_on_terminate": {{ leave_on_terminate | bool | to_json }}, + {% endif %} + + {## Encryption and TLS ##} + {% if consul_encrypt_enable | bool %} + "encrypt": "{{ consul_raw_key }}", + "encrypt_verify_incoming": {{ consul_encrypt_verify_incoming | bool | to_json }}, + "encrypt_verify_outgoing": {{ consul_encrypt_verify_outgoing | bool | to_json }}, + {% endif %} + {% if consul_disable_keyring_file | bool %} + "disable_keyring_file": true, + {% endif %} + {% if consul_tls_enable | bool %} + {% if consul_version is version_compare('1.12.0', '>=') %} + "tls": { + "defaults": { + "ca_file": "{{ consul_tls_dir }}/{{ consul_tls_ca_crt | basename }}", + {% if auto_encrypt is not defined or (auto_encrypt is defined and not auto_encrypt.enabled | bool) + or (config_item.config_version != 'client') | bool %} + "cert_file": "{{ consul_tls_dir }}/{{ consul_tls_server_crt | basename }}", + "key_file": "{{ consul_tls_dir }}/{{ consul_tls_server_key | basename }}", + "verify_incoming": {{ consul_tls_verify_incoming | bool | to_json }}, + {% else %} + "verify_incoming": false, + {% endif %} + "verify_outgoing": {{ consul_tls_verify_outgoing | bool | to_json }}, + "tls_min_version": "{{ consul_tls_min_version }}", + {% if consul_tls_cipher_suites is defined and consul_tls_cipher_suites %} + "tls_cipher_suites": "{{ consul_tls_cipher_suites}}", + {% endif %} + }, + {% if consul_tls_verify_incoming_rpc is defined or consul_tls_verify_server_hostname is defined %} + "internal_rpc": { + "verify_incoming": {{consul_tls_verify_incoming_rpc | bool| to_json }}, + "verify_server_hostname": {{ consul_tls_verify_server_hostname | bool | to_json }}, + }, + {% endif %} + {% if consul_tls_verify_incoming_https is defined %} + "https": { + "verify_incoming": {{consul_tls_verify_incoming_https | bool| to_json }}, + }, + {% endif %} + }, + {% else %} + "ca_file": "{{ consul_tls_dir }}/{{ consul_tls_ca_crt | basename }}", + {% if auto_encrypt is not defined or (auto_encrypt is defined and not auto_encrypt.enabled | bool) + or (config_item.config_version != 'client') | bool %} + "cert_file": "{{ consul_tls_dir }}/{{ consul_tls_server_crt | basename }}", + "key_file": "{{ consul_tls_dir }}/{{ consul_tls_server_key | basename }}", + "verify_incoming": {{ consul_tls_verify_incoming | bool | to_json }}, + {% else %} + "verify_incoming": false, + {% endif %} + "verify_outgoing": {{ consul_tls_verify_outgoing | bool | to_json }}, + "verify_incoming_rpc": {{consul_tls_verify_incoming_rpc | bool| to_json }}, + "verify_incoming_https": {{consul_tls_verify_incoming_https | bool| to_json }}, + "verify_server_hostname": {{ consul_tls_verify_server_hostname | bool | to_json }}, + "tls_min_version": "{{ consul_tls_min_version }}", + {% if consul_tls_cipher_suites is defined and consul_tls_cipher_suites %} + "tls_cipher_suites": "{{ consul_tls_cipher_suites}}", + {% endif %} + {% if consul_version is version_compare('1.11.0', '<') %} + "tls_prefer_server_cipher_suites": {{ consul_tls_prefer_server_cipher_suites | bool | to_json }}, + {% endif %} + {% endif %} + {% if auto_encrypt is defined %} + "auto_encrypt": { + {% if auto_encrypt.enabled | bool and (config_item.config_version != 'client') | bool %} + "allow_tls": true, + {% endif %} + {% if auto_encrypt.enabled | bool and (config_item.config_version == 'client') | bool %} + "tls": true, + {% endif %} + {% if auto_encrypt.dns_san is defined %} + "dns_san": {{ auto_encrypt.dns_san | list | to_json }}, + {% endif %} + {% if auto_encrypt.ip_san is defined %} + "ip_san": {{ auto_encrypt.ip_san | list | to_json }}, + {% endif %} + }, + {% endif %} + {% endif %} + + {## LAN Join ##} + "retry_interval": "{{ consul_retry_interval }}", + "retry_max": {{ consul_retry_max | int }}, + + "retry_join": + {% if not consul_cloud_autodiscovery | bool %} + {% if not consul_retry_join_skip_hosts %} + {% for server in _consul_lan_servers %} + {% set _ = consul_join.append(hostvars[server]['consul_advertise_address'] | default(hostvars[server]['consul_bind_address']) | default(hostvars[server]['ansible_default_ipv4']['address']) | mandatory) %} + {% endfor %} + {% endif %} + {{ consul_join | map('ansible.utils.ipwrap') | list | to_json }}, + {% else %} + ["{{ consul_cloud_autodiscovery_string }}"], + {% endif %} + + {## Server/Client ##} + "server": {{ (config_item.config_version != 'client') | bool | to_json }}, + + {## Enable Connect on Server ##} + {% if consul_connect_enabled | bool %} + "connect": { + "enabled": true + }, + {% endif %} + + {# Client Settings #} + {% if (config_item.config_version == 'client') %} + {## ACLs ##} + {% if consul_acl_enable | bool %} + {% if consul_version is version_compare('1.4.0', '>=') %} + "primary_datacenter": "{{ consul_acl_datacenter }}", + "acl": { + "enabled": true, + "default_policy": "{{ consul_acl_default_policy }}", + "down_policy": "{{ consul_acl_down_policy }}", + "token_ttl": "{{ consul_acl_ttl }}", + "enable_token_persistence": {{ consul_acl_token_persistence | bool | to_json}}, + "tokens": { + {% if consul_acl_token | trim != '' %} + "default": "{{ consul_acl_token }}", + {% endif %} + {% if consul_acl_agent_token | trim != '' %} + "agent": "{{ consul_acl_agent_token }}", + {% endif %} + {% if consul_acl_agent_master_token | trim != '' %} + {% if consul_version is version_compare('1.11.0', '>=') %} + "agent_recovery": "{{ consul_acl_agent_master_token }}", + {% else %} + "agent_master": "{{ consul_acl_agent_master_token }}", + {% endif %} + {% endif %} + } + }, + {% else %} + {% if consul_acl_token | trim != '' %} + "acl_token": "{{ consul_acl_token }}", + {% endif %} + {% if consul_acl_agent_token | trim != '' %} + "acl_agent_token": "{{ consul_acl_agent_token }}", + {% endif %} + {% if consul_acl_agent_master_token | trim != '' %} + "acl_agent_master_token": "{{ consul_acl_agent_master_token }}", + {% endif %} + "acl_ttl": "{{ consul_acl_ttl }}", + "acl_datacenter": "{{ consul_acl_datacenter }}", + "acl_down_policy": "{{ consul_acl_down_policy }}", + {% endif %} + {% endif %} + {% endif %} + + {# Server Settings #} + {% if (config_item.config_version == 'server') or (config_item.config_version == 'bootstrap') %} + + {## Bootstrap settings ##} + "bootstrap": {{ (config_item.config_version == 'bootstrap') | bool | to_json }}, + {% if consul_bootstrap_expect and not (config_item.config_version == 'bootstrap') %} + "bootstrap_expect": {{ consul_bootstrap_expect_value }}, + + {## AutoPilot ##} + {% if consul_autopilot_enable | bool %} + "autopilot": { + "cleanup_dead_servers": {{ consul_autopilot_cleanup_dead_servers | bool | to_json }}, + "last_contact_threshold": "{{ consul_autopilot_last_contact_threshold }}", + "max_trailing_logs": {{ consul_autopilot_max_trailing_logs }}, + "server_stabilization_time": "{{ consul_autopilot_server_stabilization_time }}"{{ ',' if consul_enterprise else '' }} + {% if consul_enterprise %} + "redundancy_zone_tag": "{{ consul_autopilot_redundancy_zone_tag }}", + "disable_upgrade_migration": {{ consul_autopilot_disable_upgrade_migration | bool | to_json }}, + "upgrade_version_tag": "{{ consul_autopilot_upgrade_version_tag }}" + {% endif %} + }, + {% endif %} + + {% endif %} + + {## WAN Join ##} + "retry_interval_wan": "{{ consul_retry_interval_wan }}", + "retry_max_wan": {{ consul_retry_max_wan | int }}, + + {% if _consul_wan_servercount | int > 0 %} + "retry_join_wan": + {% for server in _consul_wan_servers %} + {% set _ = consul_join_wan.append(hostvars[server]['consul_advertise_address_wan'] | default(hostvars[server]['consul_bind_address'])) %} + {% endfor %} + {{ consul_join_wan | map('ansible.utils.ipwrap') | list | to_json }}, + {% endif %} + + {## ACLs ##} + {% if consul_acl_enable | bool %} + {% if consul_acl_replication_enable | trim != '' %} + "enable_acl_replication": {{ consul_acl_replication_enable | bool | to_json }}, + {% endif %} + {% if consul_version is version_compare('1.4.0', '>=') %} + "primary_datacenter": "{{ consul_acl_datacenter }}", + "acl": { + "enabled": true, + "default_policy": "{{ consul_acl_default_policy }}", + "down_policy": "{{ consul_acl_down_policy }}", + "token_ttl": "{{ consul_acl_ttl }}", + "enable_token_persistence": {{ consul_acl_token_persistence | bool | to_json}}, + "tokens": { + {% if consul_acl_token | trim != '' %} + "default": "{{ consul_acl_token }}", + {% endif %} + {% if consul_acl_agent_token | trim != '' %} + "agent": "{{ consul_acl_agent_token }}", + {% endif %} + {% if consul_acl_agent_master_token | trim != '' %} + {% if consul_version is version_compare('1.11.0', '>=') %} + "agent_recovery": "{{ consul_acl_agent_master_token }}", + {% else %} + "agent_master": "{{ consul_acl_agent_master_token }}", + {% endif %} + {% endif %} + {% if consul_version is version_compare('0.9.1', '<') or consul_acl_master_token | trim != '' %} + {% if consul_version is version_compare('1.11.0', '>=') %} + "initial_management": "{{ consul_acl_master_token }}", + {% else %} + "master": "{{ consul_acl_master_token }}", + {% endif %} + {% endif %} + {% if consul_acl_replication_token | trim != '' %} + "replication": "{{ consul_acl_replication_token }}", + {% endif %} + } + }, + {% else %} + {% if consul_acl_token | trim != '' %} + "acl_token": "{{ consul_acl_token }}", + {% endif %} + {% if consul_acl_agent_token | trim != '' %} + "acl_agent_token": "{{ consul_acl_agent_token }}", + {% endif %} + {% if consul_acl_agent_master_token | trim != '' %} + "acl_agent_master_token": "{{ consul_acl_agent_master_token }}", + {% endif %} + "acl_ttl": "{{ consul_acl_ttl }}", + "acl_datacenter": "{{ consul_acl_datacenter }}", + "acl_down_policy": "{{ consul_acl_down_policy }}", + {% if consul_version is version_compare('0.9.1', '<') or + consul_acl_master_token | trim != '' %} + "acl_master_token": "{{ consul_acl_master_token }}", + {% endif %} + {% if consul_acl_replication_enable | trim != '' %} + "enable_acl_replication": {{ consul_acl_replication_enable | bool | to_json }}, + {% endif %} + {% if consul_acl_replication_token | trim != '' %} + "acl_replication_token": "{{ consul_acl_replication_token }}", + {% endif %} + "acl_default_policy": "{{ consul_acl_default_policy }}", + {% endif %} + {% endif %} + {% endif %} + + {## Limits ##} + {% if consul_version is version_compare('0.9.3', '>=') and consul_limits | length > 0 %} + "limits": {{ consul_limits | default({})| to_json }}, + {% endif %} + + {## UI ##} + {% if consul_version is version_compare('1.9.0', '>=') %} + "ui_config": { + "enabled": {{ consul_ui | bool | to_json }} + } + {% else %} + "ui": {{ consul_ui | bool | to_json }} + {% endif %} +} diff --git a/automation/roles/consul/templates/configd_50acl_policy.hcl.j2 b/automation/roles/consul/templates/configd_50acl_policy.hcl.j2 new file mode 100644 index 000000000..557a7859c --- /dev/null +++ b/automation/roles/consul/templates/configd_50acl_policy.hcl.j2 @@ -0,0 +1,44 @@ +# Default all keys to read-only +key "" { + policy = "read" +} +key "foo/" { + policy = "write" +} +key "foo/private/" { + # Deny access to the dir "foo/private" + policy = "deny" +} + +# Default all services to allow registration. Also permits all +# services to be discovered. +service "" { + policy = "write" +} + +# Deny registration access to services prefixed "secure-". +# Discovery of the service is still allowed in read mode. +service "secure-" { + policy = "read" +} + +# Allow firing any user event by default. +event "" { + policy = "write" +} + +# Deny firing events prefixed with "destroy-". +event "destroy-" { + policy = "deny" +} + +# Default prepared queries to read-only. +query "" { + policy = "read" +} + +# Read-only mode for the encryption keyring by default (list only) +keyring = "read" + +# Read-only mode for Consul operator interfaces (list only) +operator = "read" \ No newline at end of file diff --git a/automation/roles/consul/templates/configd_50custom.json.j2 b/automation/roles/consul/templates/configd_50custom.json.j2 new file mode 100644 index 000000000..ed892645b --- /dev/null +++ b/automation/roles/consul/templates/configd_50custom.json.j2 @@ -0,0 +1,6 @@ +{# consul_config_custom variables are free-style, passed through a hash -#} +{% if consul_config_custom -%} +{{ consul_config_custom | to_nice_json }} +{% else %} +{} +{% endif %} \ No newline at end of file diff --git a/automation/roles/consul/templates/consul_bsdinit.j2 b/automation/roles/consul/templates/consul_bsdinit.j2 new file mode 100644 index 000000000..46b934fc8 --- /dev/null +++ b/automation/roles/consul/templates/consul_bsdinit.j2 @@ -0,0 +1,49 @@ +#!/bin/sh + +# PROVIDE: consul +# REQUIRE: LOGIN +# KEYWORD: shutdown + +# shellcheck disable=SC1091 +. /etc/rc.subr + +name="consul" +# shellcheck disable=2034 +rcvar=$(set_rcvar) + + +load_rc_config $name +# shellcheck disable=2154 +: "${consul_enable="NO"}" +# shellcheck disable=2154 +: "${consul_users="consul"}" + +# shellcheck disable=2034 +restart_cmd=consul_restart +# shellcheck disable=2034 +start_cmd=consul_start +# shellcheck disable=2034 +stop_cmd=consul_stop + +consul_start() { + echo "Starting ${name}." + for user in ${consul_users}; do + mkdir {{ consul_run_path }} + chown -R "{{ consul_user }}:{{ consul_group }}" {{ consul_run_path }} + su -m "${user}" -c "{{ consul_bin_path }}/consul agent -config-file={{ consul_config_path }}/config.json -config-dir={{ consul_configd_path }} -pid-file={{ consul_run_path }}/consul.pid&" + done +} + +consul_stop() { + echo "Stopping $name." + pids=$(pgrep consul) + pkill consul + wait_for_pids "${pids}" +} + +consul_restart() { + consul_stop + consul_start +} + +run_rc_command "$1" diff --git a/automation/roles/consul/templates/consul_debianinit.j2 b/automation/roles/consul/templates/consul_debianinit.j2 new file mode 100644 index 000000000..0537754b3 --- /dev/null +++ b/automation/roles/consul/templates/consul_debianinit.j2 @@ -0,0 +1,129 @@ +#!/bin/sh +### BEGIN INIT INFO +# Provides: consul +# Required-Start: $local_fs $remote_fs +# Required-Stop: $local_fs $remote_fs +# Default-Start: 2 3 4 5 +# Default-Stop: S 0 1 6 +# Short-Description: Distributed service discovery framework +# Description: Distributed service discovery / health check framework +### END INIT INFO + +# Do NOT "set -e" + +# PATH should only include /usr/* if it runs after the mountnfs.sh script + +PATH="{{ consul_bin_path }}:/usr/sbin:/usr/bin:/sbin:/bin" +DESC="Consul service discovery framework" +NAME="consul" +DAEMON="{{ consul_bin_path }}/${NAME}" +PIDFILE="{{ consul_run_path }}/${NAME}.pid" +DAEMON_ARGS="agent -config-file={{ consul_config_path }}/config.json -config-dir={{ consul_configd_path }}" +USER={{ consul_user }} +SCRIPTNAME=/etc/init.d/"${NAME}" + +# Exit if Consul is not installed +[ -x "${DAEMON}" ] || exit 0 + +# Read default variables file +[ -r /etc/default/"${NAME}" ] && . /etc/default/"${NAME}" + +# Source rcS variables +[ -f /etc/default/rcS ] && . /etc/default/rcS + +# Source LSB functions +. /lib/lsb/init-functions + +# Make sure PID dir exists +mkrundir() { + [ ! -d {{ consul_run_path }} ] && mkdir -p {{ consul_run_path }} + chown {{ consul_user }} {{ consul_run_path }} +} + +# Start the Consul service +do_start() { + echo "Starting consul and backgrounding" + mkrundir + start-stop-daemon --start --quiet --pidfile "${PIDFILE}" --exec "${DAEMON}" --chuid "${USER}" --background --make-pidfile --test > /dev/null \ + || return 1 + start-stop-daemon --start --quiet --pidfile "${PIDFILE}" --exec "${DAEMON}" --chuid "${USER}" --background --make-pidfile -- \ + ${DAEMON_ARGS} \ + || return 2 + + echo -n "Waiting for Consul service..." + for i in `seq 1 30`; do + if ! start-stop-daemon --quiet --stop --test --pidfile "${PIDFILE}" --exec "${DAEMON}" --user "${USER}"; then + echo " FAIL: consul process died" + return 2 + fi + if "${DAEMON}" info >/dev/null; then + echo " OK" + return 0 + fi + echo -n . + sleep 1 + done + echo " FAIL: consul process is alive, but is not listening." + return 2 +} + +# Stop the Consul service +do_stop() { + "${DAEMON}" leave + start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile "${PIDFILE}" --name "${NAME}" + RETVAL="$?" + [ "${RETVAL}" = 2 ] && return 2 + start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec "${DAEMON}" + [ "$?" = 2 ] && return 2 + rm -f "${PIDFILE}" + return "${RETVAL}" +} + +# Reload Consul +do_reload() { + start-stop-daemon --stop --signal 1 --quiet --pidfile "${PIDFILE}" --name "${NAME}" + return 0 +} + +case "$1" in + start) + [ "${VERBOSE}" != no ] && log_daemon_msg "Starting ${DESC}" "${NAME}" + do_start + case "$?" in + 0|1) [ "${VERBOSE}" != no ] && log_end_msg 0 ;; + 2) [ "${VERBOSE}" != no ] && log_end_msg 1 ;; + esac + ;; + stop) + [ "${VERBOSE}" != no ] && log_daemon_msg "Stopping ${DESC}" "${NAME}" + do_stop + case "$?" in + 0|1) [ "${VERBOSE}" != no ] && log_end_msg 0 ;; + 2) [ "${VERBOSE}" != no ] && log_end_msg 1 ;; + esac + ;; + restart|force-reload) + log_daemon_msg "Restarting ${DESC}" "${NAME}" + do_stop + case "$?" in + 0|1) + do_start + case "$?" in + 0) log_end_msg 0 ;; + 1) log_end_msg 1 ;; + *) log_end_msg 1 ;; + esac + ;; + *) + # Stop failed + log_end_msg 1 + ;; + esac + ;; + *) + echo "Usage: ${SCRIPTNAME} {start|stop|restart|force-reload}" >&2 + exit 3 + ;; +esac + +: diff --git a/automation/roles/consul/templates/consul_launchctl.plist.j2 b/automation/roles/consul/templates/consul_launchctl.plist.j2 new file mode 100644 index 000000000..ed3ddf625 --- /dev/null +++ b/automation/roles/consul/templates/consul_launchctl.plist.j2 @@ -0,0 +1,38 @@ + + + + + EnvironmentVariables + + PATH + {{ consul_bin_path }} + + KeepAlive + + Label + {{ consul_launchctl_ident }} + MachServices + + {{ consul_launchctl_ident }} + + + ProcessType + Background + ProgramArguments + + {{ consul_bin_path }}/consul + agent + -config-file={{ consul_config_path }}/config.json + -config-dir={{ consul_configd_path }} + -pid-file={{ consul_run_path }}/consul.pid + + RunAtLoad + + StandardErrorPath + {{ consul_log_path }}/log.stderr + StandardOutPath + {{ consul_log_path }}/log.stdout + UserName + {{ consul_user }} + + diff --git a/automation/roles/consul/templates/consul_smf_manifest.j2 b/automation/roles/consul/templates/consul_smf_manifest.j2 new file mode 100644 index 000000000..187c8dad1 --- /dev/null +++ b/automation/roles/consul/templates/consul_smf_manifest.j2 @@ -0,0 +1,53 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/automation/roles/consul/templates/consul_snapshot.json.j2 b/automation/roles/consul/templates/consul_snapshot.json.j2 new file mode 100644 index 000000000..66ecebf33 --- /dev/null +++ b/automation/roles/consul/templates/consul_snapshot.json.j2 @@ -0,0 +1,27 @@ +{ +"snapshot_agent": { + "http_addr": "{% if consul_tls_enable | bool %}https://{% endif %}{{ consul_client_address }}:{% if consul_tls_enable | bool %}{{ consul_ports.https }}{% else %}{{ consul_ports.http }}{% endif %}", + {% if consul_tls_enable | bool -%} + "ca_file": "{{ consul_tls_dir }}/{{ consul_tls_ca_crt }}", + "cert_file": "{{ consul_tls_dir }}/{{ consul_tls_server_crt }}", + "key_file": "{{ consul_tls_dir }}/{{ consul_tls_server_key }}", + {% endif %} + "log": { + "level": "INFO", + "enable_syslog": true, + "syslog_facility": "LOCAL0" + }, + "snapshot": { + "interval": "{{ consul_snapshot_interval }}", + "retain": {{ consul_snapshot_retain }}, + "stale": false, + "service": "consul_snapshot", + "deregister_after": "72h", + "lock_key": "consul_snapshot/lock", + "max_failures": 3 + }, + "local_storage": { + "path": "{{ consul_snapshot_storage }}" + } +} +} diff --git a/automation/roles/consul/templates/consul_systemd.service.j2 b/automation/roles/consul/templates/consul_systemd.service.j2 new file mode 100644 index 000000000..0a4883bb8 --- /dev/null +++ b/automation/roles/consul/templates/consul_systemd.service.j2 @@ -0,0 +1,44 @@ +### BEGIN INIT INFO +# Provides: consul +# Required-Start: $local_fs $remote_fs +# Required-Stop: $local_fs $remote_fs +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Consul agent +# Description: Consul service discovery framework +### END INIT INFO + +[Unit] +Description=Consul agent +Requires=network-online.target +After=network-online.target + +[Service] +User={{ consul_user }} +Group={{ consul_group }} +PIDFile={{ consul_run_path }}/consul.pid +PermissionsStartOnly=true +{% if consul_ui_legacy %} +Environment=CONSUL_UI_LEGACY=true +{% endif %} +ExecStartPre=-/bin/mkdir -m 0750 -p {{ consul_run_path }} +ExecStartPre=/bin/chown -R {{ consul_user }}:{{ consul_group }} {{ consul_run_path }} +ExecStart={{ consul_bin_path }}/consul agent \ + -config-file={{ consul_config_path }}/config.json \ + -config-dir={{ consul_configd_path}} \ + -pid-file={{ consul_run_path }}/consul.pid +ExecReload=/bin/kill -HUP $MAINPID +KillMode=process +KillSignal=SIGTERM +Restart={{ consul_systemd_restart }} +RestartSec={{ consul_systemd_restart_sec }}s +StandardOutput=null +StandardError=null +{% for var in consul_env_vars %} +Environment={{ var }} +{% endfor %} +LimitNOFILE={{ consul_systemd_limit_nofile }} +AmbientCapabilities=CAP_NET_BIND_SERVICE + +[Install] +WantedBy=multi-user.target diff --git a/automation/roles/consul/templates/consul_systemd_service.override.j2 b/automation/roles/consul/templates/consul_systemd_service.override.j2 new file mode 100644 index 000000000..642704ab9 --- /dev/null +++ b/automation/roles/consul/templates/consul_systemd_service.override.j2 @@ -0,0 +1,10 @@ +# WARNING!!! Ansible managed. + +[Unit] +ConditionFileNotEmpty= +ConditionFileNotEmpty={{ consul_config_path }}/config.json + +[Service] +ExecStart= +ExecStart=/usr/bin/consul agent -config-file={{ consul_config_path }}/config.json -config-dir={{ consul_configd_path }} + diff --git a/automation/roles/consul/templates/consul_systemd_snapshot.service.j2 b/automation/roles/consul/templates/consul_systemd_snapshot.service.j2 new file mode 100644 index 000000000..0fa70df8a --- /dev/null +++ b/automation/roles/consul/templates/consul_systemd_snapshot.service.j2 @@ -0,0 +1,33 @@ +### BEGIN INIT INFO +# Provides: consul +# Required-Start: $local_fs $remote_fs +# Required-Stop: $local_fs $remote_fs +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Consul snapshot agent +# Description: Consul service snapshot agent +### END INIT INFO + +[Unit] +Description=Consul snapshot agent +Requires=network-online.target +Requisite=consul.service +After=network-online.target + +[Service] +User={{ consul_user }} +Group={{ consul_group }} +PIDFile={{ consul_run_path }}/consul_snapshot.pid +PermissionsStartOnly=true +ExecStart={{ consul_bin_path }}/consul snapshot agent \ +-config-file={{ consul_config_path }}/consul_snapshot.json +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGTERM +Restart={{ consul_systemd_restart }} +RestartSec=42s +{% for var in consul_env_vars %} +Environment={{ var }} +{% endfor %} + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/automation/roles/consul/templates/consul_sysvinit.j2 b/automation/roles/consul/templates/consul_sysvinit.j2 new file mode 100644 index 000000000..895e2b538 --- /dev/null +++ b/automation/roles/consul/templates/consul_sysvinit.j2 @@ -0,0 +1,96 @@ +#!/bin/bash +# +# chkconfig: 2345 95 95 +# description: Consul service discovery framework +# processname: consul +# pidfile: {{ consul_run_path }}/consul.pid + +{% if ansible_distribution == "Ubuntu" %} +. /lib/lsb/init-functions +{% else %} +. /etc/init.d/functions +{% endif %} + +CONSUL={{ consul_bin_path }}/consul +CONFIG={{ consul_config_path }}/config.json +CONFIGD={{ consul_configd_path }} +PID_FILE={{ consul_run_path }}/consul.pid +LOCK_FILE=/var/lock/subsys/consul +{% if consul_ui_legacy %} +CONSUL_UI_LEGACY=true +{% endif %} + +[ -e /etc/sysconfig/consul ] && . /etc/sysconfig/consul + +export GOMAXPROCS=$(nproc) + +mkrundir() { + [ ! -d {{ consul_run_path }} ] && mkdir -p {{ consul_run_path }} + chown {{ consul_user }} {{ consul_run_path }} +} + +KILLPROC_OPT="-p ${PID_FILE}" +mkpidfile() { + mkrundir + [ ! -f "${PID_FILE}" ] && pidofproc "${CONSUL}" > "${PID_FILE}" + chown -R {{ consul_user }} {{ consul_run_path }} + if [ $? -ne 0 ] ; then + rm "${PID_FILE}" + KILLPROC_OPT="" + fi +} + +start() { + echo -n "Starting consul: " + mkrundir + mkpidfile + # [ -f "${PID_FILE}" ] && rm "${PID_FILE}" + daemon --user={{ consul_user }} \ + --pidfile="${PID_FILE}" \ + "${CONSUL}" agent -config-file="${CONFIG}" -config-dir="${CONFIGD}" -pid-file="${PID_FILE}" & + retcode=$? + touch ${LOCK_FILE} + return "${retcode}" +} + +stop() { + echo -n "Shutting down consul: " + if ("${CONSUL}" info 2>/dev/null | grep -q 'server = false' 2>/dev/null) ; then + "${CONSUL}" leave + fi + + mkpidfile + killproc "${KILLPROC_OPT}" "${CONSUL}" -SIGTERM + + retcode=$? + rm -f "${LOCK_FILE}" "${PID_FILE}" + return "${retcode}" +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + status) + "${CONSUL}" info + ;; + restart) + stop + start + ;; + reload) + mkpidfile + killproc "${KILLPROC_OPT}" "${CONSUL}" -HUP + ;; + condrestart) + [ -f ${LOCK_FILE} ] && restart || : + ;; + *) + echo "Usage: consul {start|stop|status|reload|restart}" + exit 1 + ;; +esac +exit $? diff --git a/automation/roles/consul/templates/dnsmasq-10-consul.j2 b/automation/roles/consul/templates/dnsmasq-10-consul.j2 new file mode 100644 index 000000000..1aea4e5d1 --- /dev/null +++ b/automation/roles/consul/templates/dnsmasq-10-consul.j2 @@ -0,0 +1,52 @@ +{# Enable forward lookups for the consul domain with conditional delegation -#} +{% if consul_delegate_datacenter_dns | bool -%} +server=/{{ consul_datacenter }}.{{ consul_domain }}/{{ consul_dnsmasq_consul_address }}#{{ consul_ports.dns }} +{% if consul_alt_domain -%} +server=/{{ consul_datacenter }}.{{ consul_alt_domain }}/{{ consul_dnsmasq_consul_address }}#{{ consul_ports.dns }} +{% endif -%} +{% else %} +server=/{{ consul_domain }}/{{ consul_dnsmasq_consul_address }}#{{ consul_ports.dns }} +{% if consul_alt_domain -%} +server=/{{ consul_alt_domain }}/{{ consul_dnsmasq_consul_address }}#{{ consul_ports.dns }} +{% endif -%} +{% endif -%} + +{# Only bind to specific interfaces -#} +{% if consul_dnsmasq_bind_interfaces | bool -%} +bind-interfaces +{% endif -%} + +{# Reverse DNS lookups -#} +{% for revserver in consul_dnsmasq_revservers -%} + rev-server={{ revserver }},{{ consul_dnsmasq_consul_address }}#{{ consul_ports.dns }} +{% endfor -%} + +{# Only accept DNS queries from hosts in the local subnet -#} +{% if consul_dnsmasq_local_service | bool -%} + local-service +{% endif -%} + +{# Don't poll /etc/resolv.conf for changes -#} +{% if consul_dnsmasq_no_poll | bool -%} + no-poll +{% endif -%} + +{# Dont use /etc/resolv.conf to get upstream servers -#} +{% if consul_dnsmasq_no_resolv | bool -%} + no-resolv +{% endif -%} + +{# Upstream DNS servers -#} +{% for server in consul_dnsmasq_servers -%} + server={{ server }} +{% endfor -%} + +{# Custom listen addresses -#} +{% for address in consul_dnsmasq_listen_addresses -%} + listen-address={{ address }} +{% endfor -%} + +{# Cache size -#} +{% if consul_dnsmasq_cache > 0 -%} + cache-size={{ consul_dnsmasq_cache }} +{% endif -%} diff --git a/automation/roles/consul/templates/rsyslogd_00-consul.conf.j2 b/automation/roles/consul/templates/rsyslogd_00-consul.conf.j2 new file mode 100644 index 000000000..f51db5f1f --- /dev/null +++ b/automation/roles/consul/templates/rsyslogd_00-consul.conf.j2 @@ -0,0 +1 @@ +{{ consul_syslog_facility }}.* {{ consul_log_path }}/{{ consul_log_file }} diff --git a/automation/roles/consul/templates/service.json.j2 b/automation/roles/consul/templates/service.json.j2 new file mode 100644 index 000000000..0db5cf89e --- /dev/null +++ b/automation/roles/consul/templates/service.json.j2 @@ -0,0 +1,39 @@ +{ + "service": { + "name": "{{ service_item.name }}", + {% if service_item.id is defined -%} + "id": "{{ service_item.id }}", + {% endif -%} + {% if service_item.port is defined -%} + "port": {{ service_item.port }}, + {% endif -%} + {% if service_item.address is defined -%} + "address": "{{ service_item.address }}", + {% endif -%} + {% if service_item.enable_tag_override is defined -%} + "enable_tag_override": {{ service_item.enable_tag_override | bool | to_json }}, + {% endif -%} + {% if service_item.kind is defined -%} + "kind": "{{ service_item.kind }}", + {% endif -%} + {% if service_item.proxy is defined -%} + "proxy": {{ service_item.proxy | to_json(sort_keys=True) }}, + {% endif -%} + {% if service_item.meta is defined -%} + "meta": {{ service_item.meta | to_json(sort_keys=True) }}, + {% endif -%} + {% if service_item.checks is defined -%} + "checks": {{ service_item.checks | to_json(sort_keys=True) }}, + {% endif -%} + {% if service_item.connect is defined -%} + "connect": {{ service_item.connect | to_json(sort_keys=True) }}, + {% endif -%} + {% if service_item.weights is defined -%} + "weights": {{ service_item.weights | to_json(sort_keys=True) }}, + {% endif -%} + {% if service_item.token is defined -%} + "token": {{ service_item.token | to_json }}, + {% endif -%} + "tags": {{ service_item.tags|default([])|to_json(sort_keys=True) }} + } +} diff --git a/automation/roles/consul/templates/syslogng_consul.conf.j2 b/automation/roles/consul/templates/syslogng_consul.conf.j2 new file mode 100644 index 000000000..861774464 --- /dev/null +++ b/automation/roles/consul/templates/syslogng_consul.conf.j2 @@ -0,0 +1,3 @@ +destination d_consul { file("{{ consul_log_path }}/{{ consul_log_file }}"); }; +filter f_consul { facility({{ consul_syslog_facility }}); }; +log { source(s_sys); filter(f_consul); destination(d_consul); }; diff --git a/automation/roles/consul/vars/Amazon.yml b/automation/roles/consul/vars/Amazon.yml new file mode 100644 index 000000000..cdf9e7fc3 --- /dev/null +++ b/automation/roles/consul/vars/Amazon.yml @@ -0,0 +1,12 @@ +--- +# File: Amazon.yml - Amazonlinux variables for Consul +consul_os_packages: + - git + - unzip + +consul_syslog_enable: false + +consul_repo_prerequisites: + - yum-utils + +consul_repo_url: https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo diff --git a/automation/roles/consul/vars/Archlinux.yml b/automation/roles/consul/vars/Archlinux.yml new file mode 100644 index 000000000..3fe0aa949 --- /dev/null +++ b/automation/roles/consul/vars/Archlinux.yml @@ -0,0 +1,7 @@ +--- +# File: Archlinux.yml - Archlinux variables for Consul + +consul_os_packages: + - unzip + +consul_syslog_enable: false diff --git a/automation/roles/consul/vars/Darwin.yml b/automation/roles/consul/vars/Darwin.yml new file mode 100644 index 000000000..efddcbd41 --- /dev/null +++ b/automation/roles/consul/vars/Darwin.yml @@ -0,0 +1,16 @@ +--- +# File: MacOSX.yml - Mac OS X variables for Consul + +consul_config_path: "/Users/{{ consul_user }}/Library/Preferences/io.consul" + +consul_configd_path: "{{ consul_config_path }}/consul.d" + +consul_launchctl_ident: "io.consul" + +consul_launchctl_plist: "/Library/LaunchAgents/{{ consul_launchctl_ident }}.plist" + +consul_log_path: "/Users/{{ consul_user }}/Library/Logs/consul" + +consul_os_packages: [] + +consul_run_path: "/Users/{{ consul_user }}/Library/Caches/io.consul" diff --git a/automation/roles/consul/vars/Debian.yml b/automation/roles/consul/vars/Debian.yml new file mode 100644 index 000000000..589562341 --- /dev/null +++ b/automation/roles/consul/vars/Debian.yml @@ -0,0 +1,12 @@ +--- +# File: Debian.yml - Debian OS variables for Consul + +consul_os_packages: + - unzip + +dnsmasq_package: dnsmasq + +consul_repo_prerequisites: + - gpg + +consul_repo_url: "https://apt.releases.hashicorp.com/" diff --git a/automation/roles/consul/vars/Flatcar.yml b/automation/roles/consul/vars/Flatcar.yml new file mode 100644 index 000000000..9337bdb05 --- /dev/null +++ b/automation/roles/consul/vars/Flatcar.yml @@ -0,0 +1,6 @@ +--- +# File: Flatcar.yml - Flatcar variables for Consul + +consul_os_packages: [] + +consul_systemd_unit_path: "/etc/systemd/system" diff --git a/automation/roles/consul/vars/FreeBSD.yml b/automation/roles/consul/vars/FreeBSD.yml new file mode 100644 index 000000000..4511196a7 --- /dev/null +++ b/automation/roles/consul/vars/FreeBSD.yml @@ -0,0 +1,7 @@ +--- +# File: FreeBSD.yml - FreeBSD OS variables for Consul + +consul_os_packages: + - unzip + +dnsmasq_package: dnsmasq diff --git a/automation/roles/consul/vars/RedHat.yml b/automation/roles/consul/vars/RedHat.yml new file mode 100644 index 000000000..a6871492f --- /dev/null +++ b/automation/roles/consul/vars/RedHat.yml @@ -0,0 +1,14 @@ +--- +# File: RedHat.yml - Red Hat OS variables for Consul + +consul_os_packages: + - python3-libselinux + - unzip + +consul_repo_prerequisites: + - yum-utils + +consul_repo_url: "{{ 'https://rpm.releases.hashicorp.com/fedora/hashicorp.repo' if ansible_distribution == 'Fedora' else + 'https://rpm.releases.hashicorp.com/RHEL/hashicorp.repo' }}" + +dnsmasq_package: dnsmasq diff --git a/automation/roles/consul/vars/Solaris.yml b/automation/roles/consul/vars/Solaris.yml new file mode 100644 index 000000000..3a1369565 --- /dev/null +++ b/automation/roles/consul/vars/Solaris.yml @@ -0,0 +1,8 @@ +--- +# File: Solaris.yml - Solaris OS variables for Consul + +consul_os_packages: + - unzip + +consul_pkg: "consul_{{ consul_version }}_solaris_amd64.zip" +consul_smf_manifest: "/opt/local/lib/svc/manifest/consul.xml" diff --git a/automation/roles/consul/vars/VMware Photon OS.yml b/automation/roles/consul/vars/VMware Photon OS.yml new file mode 100644 index 000000000..eb10477ed --- /dev/null +++ b/automation/roles/consul/vars/VMware Photon OS.yml @@ -0,0 +1,3 @@ +--- +consul_os_packages: + - unzip diff --git a/automation/roles/consul/vars/Windows.yml b/automation/roles/consul/vars/Windows.yml new file mode 100644 index 000000000..eae3ae9a1 --- /dev/null +++ b/automation/roles/consul/vars/Windows.yml @@ -0,0 +1,17 @@ +--- +# File: Windows.yml - Windows OS variables for Consul + +# paths +consul_windows_path: /ProgramData/consul +consul_bin_path: "{{ consul_windows_path }}/bin" +consul_config_path: "{{ consul_windows_path }}/config" +consul_configd_path: "{{ consul_config_path }}.d/" +consul_bootstrap_state: "{{ consul_windows_path }}/.consul_bootstrapped" +consul_data_path: "{{ consul_windows_path }}/data" +consul_log_path: "{{ consul_windows_path }}/log" +consul_run_path: "{{ consul_windows_path }}" +consul_binary: "{{ consul_windows_path }}/bin/consul.exe" +consul_syslog_enable: false + +# users +consul_user: LocalSystem diff --git a/automation/roles/consul/vars/main.yml b/automation/roles/consul/vars/main.yml new file mode 100644 index 000000000..388ff8750 --- /dev/null +++ b/automation/roles/consul/vars/main.yml @@ -0,0 +1,37 @@ +--- +# Pure internal helper variables + +_consul_lan_servers: "\ + {% set __consul_lan_servers = [] %}\ + {% for server in consul_servers %}\ + {% set _consul_datacenter = hostvars[server]['consul_datacenter'] | default('dc1', true) %}\ + {% if _consul_datacenter == consul_datacenter %}\ + {% if __consul_lan_servers.append(server) %}{% endif %}\ + {% endif %}\ + {% endfor %}\ + {{ __consul_lan_servers }}" +_consul_lan_servercount: "{{ (_consul_lan_servers | length) + (consul_join | length) }}" + +_consul_wan_servers: "\ + {% set __consul_wan_servers = [] %}\ + {% for server in consul_servers %}\ + {% set _consul_datacenter = hostvars[server]['consul_datacenter'] | default('dc1', true) %}\ + {% if _consul_datacenter != consul_datacenter %}\ + {% if __consul_wan_servers.append(server) %}{% endif %}\ + {% endif %}\ + {% endfor %}\ + {{ __consul_wan_servers }}" +_consul_wan_servercount: "{{ (_consul_wan_servers | length) + (consul_join_wan | length) }}" + +_consul_bootstrap_servers: "\ + {% set __consul_bootstrap_servers = [] %}\ + {% for server in _consul_lan_servers %}\ + {% set _consul_node_role = hostvars[server]['consul_node_role'] | default('client', true) %}\ + {% if _consul_node_role == 'bootstrap' %}\ + {% if __consul_bootstrap_servers.append(server) %}{% endif %}\ + {% endif %}\ + {% endfor %}\ + {{ __consul_bootstrap_servers }}" +_consul_bootstrap_server: "{{ _consul_bootstrap_servers[0] }}" + +_consul_expected_version_string: "Consul v{{ consul_version }}" diff --git a/automation/roles/consul/version.txt b/automation/roles/consul/version.txt new file mode 100644 index 000000000..752d2e754 --- /dev/null +++ b/automation/roles/consul/version.txt @@ -0,0 +1 @@ +commit: 6251974 on Nov 15, 2022 diff --git a/automation/roles/copy/README.md b/automation/roles/copy/README.md new file mode 100644 index 000000000..0e105514b --- /dev/null +++ b/automation/roles/copy/README.md @@ -0,0 +1 @@ +# Ansible Role: copy diff --git a/automation/roles/copy/tasks/main.yml b/automation/roles/copy/tasks/main.yml new file mode 100644 index 000000000..2f6182d7a --- /dev/null +++ b/automation/roles/copy/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: Fetch files from the master + become: true + become_user: root + run_once: true + ansible.builtin.fetch: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + flat: true + validate_checksum: true + loop: "{{ fetch_files_from_master }}" + delegate_to: "{{ groups.master[0] }}" + when: + - fetch_files_from_master is defined + - fetch_files_from_master | length > 0 + tags: fetch_files + +- name: Copy files to all servers + become: true + become_user: root + ansible.builtin.copy: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: "{{ item.owner }}" + group: "{{ item.group }}" + mode: "{{ item.mode }}" + loop: "{{ copy_files_to_all_server }}" + when: + - copy_files_to_all_server is defined + - copy_files_to_all_server | length > 0 + tags: copy_files diff --git a/automation/roles/cron/README.md b/automation/roles/cron/README.md new file mode 100644 index 000000000..302ac9052 --- /dev/null +++ b/automation/roles/cron/README.md @@ -0,0 +1 @@ +# Ansible Role: cron diff --git a/automation/roles/cron/defaults/main.yml b/automation/roles/cron/defaults/main.yml new file mode 100644 index 000000000..b96340247 --- /dev/null +++ b/automation/roles/cron/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for cron + +cron_jobs: [] diff --git a/automation/roles/cron/tasks/main.yml b/automation/roles/cron/tasks/main.yml new file mode 100644 index 000000000..e37adc682 --- /dev/null +++ b/automation/roles/cron/tasks/main.yml @@ -0,0 +1,67 @@ +--- +# tasks file for cron + +- name: Make sure that the cronie package is installed + ansible.builtin.package: + name: cronie + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: + - cron_jobs is defined and cron_jobs | length > 0 + - ansible_os_family == "RedHat" + tags: cron + +- name: Make sure that the cron package is installed + ansible.builtin.apt: + name: cron + state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: + - cron_jobs is defined and cron_jobs | length > 0 + - ansible_os_family == "Debian" + tags: cron + +- name: Add cron jobs + ansible.builtin.cron: + cron_file: "{{ item.file | default('') }}" + user: "{{ item.user | default('postgres') }}" + minute: "{{ item.minute | default('*') }}" + hour: "{{ item.hour | default('*') }}" + day: "{{ item.day | default('*') }}" + month: "{{ item.month | default('*') }}" + weekday: "{{ item.weekday | default('*') }}" + name: "{{ item.name }}" + disabled: "{{ item.disabled | default(False) }}" + state: "{{ item.state | default('present') }}" + job: "{{ item.job }}" + loop: "{{ cron_jobs }}" + when: + - cron_jobs is defined and cron_jobs | length > 0 + - remove_postgres is undefined + tags: cron + +- name: Uninstall cron jobs + ansible.builtin.cron: + cron_file: "{{ item.file | default('') }}" + user: "{{ item.user | default('postgres') }}" + minute: "{{ item.minute | default('*') }}" + hour: "{{ item.hour | default('*') }}" + day: "{{ item.day | default('*') }}" + month: "{{ item.month | default('*') }}" + weekday: "{{ item.weekday | default('*') }}" + name: "{{ item.name }}" + state: "absent" + job: "{{ item.job }}" + loop: "{{ cron_jobs }}" + when: + - cron_jobs is defined and cron_jobs | length > 0 + - remove_postgres is defined and remove_postgres | bool + tags: cron, uninstall diff --git a/automation/roles/deploy_finish/README.md b/automation/roles/deploy_finish/README.md new file mode 100644 index 000000000..2e00cbefb --- /dev/null +++ b/automation/roles/deploy_finish/README.md @@ -0,0 +1 @@ +# Ansible Role: deploy_finish diff --git a/automation/roles/deploy_finish/tasks/main.yml b/automation/roles/deploy_finish/tasks/main.yml new file mode 100644 index 000000000..5c9619bd0 --- /dev/null +++ b/automation/roles/deploy_finish/tasks/main.yml @@ -0,0 +1,303 @@ +# yamllint disable rule:line-length +--- +- name: Make sure handlers are flushed immediately + ansible.builtin.meta: flush_handlers + +# Get info +- name: Get Postgres users + run_once: true + become: true + become_user: postgres + ansible.builtin.command: "{{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -Xc '\\du'" + register: users_result + delegate_to: "{{ groups.master[0] }}" + changed_when: false + ignore_errors: true + tags: users, users_list, cluster_info, cluster_status, point_in_time_recovery + +- name: Get Postgres databases + run_once: true + become: true + become_user: postgres + ansible.builtin.command: "{{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -Xc '\\l'" + register: dbs_result + delegate_to: "{{ groups.master[0] }}" + changed_when: false + ignore_errors: true + tags: databases, db_list, cluster_info, cluster_status, point_in_time_recovery + +- name: Get Postgres cluster info + run_once: true + become: true + become_user: postgres + ansible.builtin.command: patronictl -c /etc/patroni/patroni.yml list + register: patronictl_result + environment: + PATH: "{{ ansible_env.PATH }}:/usr/bin:/usr/local/bin" + changed_when: false + ignore_errors: true + tags: patroni_status, cluster_info, cluster_status, point_in_time_recovery + +# Print info +- name: Postgres list of users + run_once: true + ansible.builtin.debug: + msg: "{{ users_result.stdout_lines }}" + when: users_result.stdout_lines is defined + tags: users, users_list, cluster_info, cluster_status, point_in_time_recovery + +- name: Postgres list of databases + run_once: true + ansible.builtin.debug: + msg: "{{ dbs_result.stdout_lines }}" + when: dbs_result.stdout_lines is defined + tags: databases, db_list, cluster_info, cluster_status, point_in_time_recovery + +- name: Postgres Cluster info + run_once: true + ansible.builtin.debug: + msg: "{{ patronictl_result.stdout_lines }}" + when: patronictl_result.stdout_lines is defined + tags: patroni_status, cluster_info, cluster_status, point_in_time_recovery + +# Connection info +# Note: if the variable 'mask_password' is 'true', do not print the superuser password in connection info. + +# if 'cluster_vip' is defined +- block: + # if 'with_haproxy_load_balancing' is 'true' + - name: Connection info + run_once: true + ansible.builtin.debug: + msg: + address: "{{ cluster_vip }}" + port: + primary: "{{ haproxy_listen_port.master }}" + replica: "{{ haproxy_listen_port.replicas }}" + replica_sync: "{{ haproxy_listen_port.replicas_sync if synchronous_mode | bool else omit }}" + replica_async: "{{ haproxy_listen_port.replicas_async if synchronous_mode | bool else omit }}" + superuser: "{{ superuser_username }}" + password: "{{ superuser_password }}" + when: with_haproxy_load_balancing | bool + + # if 'with_haproxy_load_balancing' is 'false' + - name: Connection info + run_once: true + ansible.builtin.debug: + msg: + address: "{{ cluster_vip }}" + port: "{{ pgbouncer_listen_port if pgbouncer_install | bool else postgresql_port }}" + superuser: "{{ superuser_username }}" + password: "{{ superuser_password }}" + when: not with_haproxy_load_balancing | bool + ignore_errors: true + vars: + superuser_username: "{{ patroni_superuser_username }}" + superuser_password: "{{ '********' if mask_password | default(false) | bool else patroni_superuser_password }}" + when: + - cluster_vip is defined and cluster_vip | length > 0 + - dcs_type == "etcd" + - (cloud_provider | default('') | length < 1 or not cloud_load_balancer | default(true) | bool) + tags: conn_info, cluster_info, cluster_status + +# if 'cluster_vip' is not defined +- block: + # if 'with_haproxy_load_balancing' is 'true' + - name: Connection info + run_once: true + ansible.builtin.debug: + msg: + public_address: "{{ public_haproxy_ip_addresses if database_public_access | default(false) | bool else omit }}" + address: "{{ haproxy_ip_addresses }}" + port: + primary: "{{ haproxy_listen_port.master }}" + replica: "{{ haproxy_listen_port.replicas }}" + replica_sync: "{{ haproxy_listen_port.replicas_sync if synchronous_mode | bool else omit }}" + replica_async: "{{ haproxy_listen_port.replicas_async if synchronous_mode | bool else omit }}" + superuser: "{{ superuser_username }}" + password: "{{ superuser_password }}" + when: with_haproxy_load_balancing | bool + + # if 'with_haproxy_load_balancing' is 'false' and 'pgbouncer_install' is 'true' + - name: Connection info + run_once: true + ansible.builtin.debug: + msg: + public_address: "{{ public_postgres_ip_addresses if database_public_access | default(false) | bool else omit }}" + address: "{{ postgres_ip_addresses }}" + port: "{{ pgbouncer_listen_port }}" + superuser: "{{ superuser_username }}" + password: "{{ superuser_password }}" + when: not with_haproxy_load_balancing | bool and pgbouncer_install | bool + + # if 'with_haproxy_load_balancing' is 'false' and 'pgbouncer_install' is 'false' + - name: Connection info + run_once: true + ansible.builtin.debug: + msg: + public_address: "{{ public_postgres_ip_addresses if database_public_access | default(false) | bool else omit }}" + address: "{{ postgres_ip_addresses }}" + port: "{{ postgresql_port }}" + superuser: "{{ superuser_username }}" + password: "{{ superuser_password }}" + connection_string: + read_write: "postgresql://{{ superuser_username }}:{{ superuser_password }}@{{ libpq_postgres_host_port }}/postgres?target_session_attrs=read-write" + read_only: "postgresql://{{ superuser_username }}:{{ superuser_password }}@{{ libpq_postgres_host_port }}/postgres?target_session_attrs=read-only{{ libpq_load_balance }}" + when: not with_haproxy_load_balancing | bool and not pgbouncer_install | bool + ignore_errors: true + vars: + public_haproxy_ip_addresses: "{{ groups['balancers'] | default([]) | map('extract', hostvars, 'ansible_ssh_host') | join(',') }}" + public_postgres_ip_addresses: "{{ groups['postgres_cluster'] | default([]) | map('extract', hostvars, 'ansible_ssh_host') | join(',') }}" + haproxy_ip_addresses: "{{ groups['balancers'] | default([]) | map('extract', hostvars, 'inventory_hostname') | join(',') }}" + postgres_ip_addresses: "{{ groups['postgres_cluster'] | default([]) | map('extract', hostvars, 'inventory_hostname') | join(',') }}" + superuser_username: "{{ patroni_superuser_username }}" + superuser_password: "{{ '********' if mask_password | default(false) | bool else patroni_superuser_password }}" + libpq_postgres_host_port: "{{ postgres_ip_addresses.split(',') | map('regex_replace', '$', ':' + postgresql_port | string) | join(',') }}" + libpq_load_balance: "{{ '&load_balance_hosts=random' if postgresql_version | int >= 16 else '' }}" + when: + - (cluster_vip is not defined or cluster_vip | length < 1) + - dcs_type == "etcd" + - (cloud_provider | default('') | length < 1 or not cloud_load_balancer | default(true) | bool) + tags: conn_info, cluster_info, cluster_status + +# if dcs_type: "consul" +- name: Connection info + run_once: true + ansible.builtin.debug: + msg: + address: + primary: "master.{{ patroni_cluster_name }}.service.consul" + replica: "replica.{{ patroni_cluster_name }}.service.consul" + replica_sync: "{{ 'sync-replica.' ~ patroni_cluster_name ~ '.service.consul' if synchronous_mode | bool else omit }}" + replica_async: "{{ 'async-replica.' ~ patroni_cluster_name ~ '.service.consul' if synchronous_mode | bool else omit }}" + port: "{{ pgbouncer_listen_port if pgbouncer_install | bool else postgresql_port }}" + superuser: "{{ superuser_username }}" + password: "{{ superuser_password }}" + ignore_errors: true + vars: + superuser_username: "{{ patroni_superuser_username }}" + superuser_password: "{{ '********' if mask_password | default(false) | bool else patroni_superuser_password }}" + when: + - dcs_type == "consul" + - (cloud_provider | default('') | length < 1 or not cloud_load_balancer | default(true) | bool) + tags: conn_info, cluster_info, cluster_status + +# if 'cloud_provider' and `cloud_load_balancer` is defined + +# AWS +- name: Connection info + run_once: true + ansible.builtin.debug: + msg: + address: + primary: "{{ load_balancer_primary }}" + replica: "{{ load_balancer_replica if load_balancer_replica != 'N/A' else omit }}" + replica_sync: "{{ load_balancer_replica_sync if synchronous_mode | bool else omit }}" + port: "{{ pgbouncer_listen_port if pgbouncer_install | bool else postgresql_port }}" + superuser: "{{ superuser_username }}" + password: "{{ superuser_password }}" + ignore_errors: true + vars: + load_balancer_primary: "{{ (hostvars['localhost']['aws_elb_classic_lb']['results'] | selectattr('item', 'equalto', 'primary') | first).elb.dns_name | default('N/A') }}" + load_balancer_replica: "{{ (hostvars['localhost']['aws_elb_classic_lb']['results'] | selectattr('item', 'equalto', 'replica') | first).elb.dns_name | default('N/A') }}" + load_balancer_replica_sync: "{{ (hostvars['localhost']['aws_elb_classic_lb']['results'] | selectattr('item', 'equalto', 'sync') | first).elb.dns_name | default('N/A') }}" + superuser_username: "{{ patroni_superuser_username }}" + superuser_password: "{{ '********' if mask_password | default(false) | bool else patroni_superuser_password }}" + when: cloud_provider | default('') | lower == 'aws' and cloud_load_balancer | default(true) | bool + tags: conn_info, cluster_info, cluster_status + +# GCP +- name: Connection info + run_once: true + ansible.builtin.debug: + msg: + address: + primary: "{{ load_balancer_primary }}" + replica: "{{ load_balancer_replica if load_balancer_replica != 'N/A' else omit }}" + replica_sync: "{{ load_balancer_replica_sync if synchronous_mode | bool else omit }}" + port: "{{ pgbouncer_listen_port if pgbouncer_install | bool else postgresql_port }}" + superuser: "{{ superuser_username }}" + password: "{{ superuser_password }}" + ignore_errors: true + vars: + load_balancer_primary: "{{ (hostvars['localhost']['gcp_load_balancer']['results'] | selectattr('item', 'equalto', 'primary') | first).IPAddress | default('N/A') }}" + load_balancer_replica: "{{ (hostvars['localhost']['gcp_load_balancer']['results'] | selectattr('item', 'equalto', 'replica') | first).IPAddress | default('N/A') }}" + load_balancer_replica_sync: "{{ (hostvars['localhost']['gcp_load_balancer']['results'] | selectattr('item', 'equalto', 'sync') | first).IPAddress | default('N/A') }}" + superuser_username: "{{ patroni_superuser_username }}" + superuser_password: "{{ '********' if mask_password | default(false) | bool else patroni_superuser_password }}" + when: cloud_provider | default('') | lower == 'gcp' and cloud_load_balancer | default(true) | bool + tags: conn_info, cluster_info, cluster_status + +# Azure +- name: Connection info + run_once: true + ansible.builtin.debug: + msg: + address: + primary: "{{ lb_primary_public if database_public_access | default(false) | bool else lb_primary_private }}" + replica: "{{ (lb_replica_public if lb_replica_public != 'N/A' else omit) if database_public_access | default(false) | bool else (lb_replica_private if lb_replica_private != 'N/A' else omit) }}" + replica_sync: "{{ (lb_sync_public if database_public_access | default(false) | bool else lb_sync_private) if synchronous_mode | bool else omit }}" + port: "{{ pgbouncer_listen_port if pgbouncer_install | bool else postgresql_port }}" + superuser: "{{ superuser_username }}" + password: "{{ superuser_password }}" + ignore_errors: true + vars: + lb_primary_public: "{{ (hostvars['localhost']['azure_load_balancer_public_ip']['results'] | selectattr('item', 'equalto', 'primary') | first).state.ip_address | default('N/A') }}" + lb_replica_public: "{{ (hostvars['localhost']['azure_load_balancer_public_ip']['results'] | selectattr('item', 'equalto', 'replica') | first).state.ip_address | default('N/A') }}" + lb_sync_public: "{{ (hostvars['localhost']['azure_load_balancer_public_ip']['results'] | selectattr('item', 'equalto', 'sync') | first).state.ip_address | default('N/A') }}" + lb_primary_private: "{{ (hostvars['localhost']['azure_load_balancer']['results'] | selectattr('item', 'equalto', 'primary') | first).state.frontend_ip_configurations[0].private_ip_address | default('N/A') }}" + lb_replica_private: "{{ (hostvars['localhost']['azure_load_balancer']['results'] | selectattr('item', 'equalto', 'replica') | first).state.frontend_ip_configurations[0].private_ip_address | default('N/A') }}" + lb_sync_private: "{{ (hostvars['localhost']['azure_load_balancer']['results'] | selectattr('item', 'equalto', 'sync') | first).state.frontend_ip_configurations[0].private_ip_address | default('N/A') }}" + superuser_username: "{{ patroni_superuser_username }}" + superuser_password: "{{ '********' if mask_password | default(false) | bool else patroni_superuser_password }}" + when: cloud_provider | default('') | lower == 'azure' and cloud_load_balancer | default(true) | bool + tags: conn_info, cluster_info, cluster_status + +# DigitalOcean +- name: Connection info + run_once: true + ansible.builtin.debug: + msg: + address: + primary: "{{ load_balancer_primary }}" + replica: "{{ load_balancer_replica if load_balancer_replica != 'N/A' else omit }}" + replica_sync: "{{ load_balancer_replica_sync if synchronous_mode | bool else omit }}" + port: "{{ digital_ocean_load_balancer_port | default(database_port) }}" + superuser: "{{ superuser_username }}" + password: "{{ superuser_password }}" + ignore_errors: true + vars: + load_balancer_primary: "{{ (hostvars['localhost']['digitalocean_load_balancer']['data'] | selectattr('name', 'equalto', patroni_cluster_name + '-primary') | first).ip | default('N/A') }}" + load_balancer_replica: "{{ (hostvars['localhost']['digitalocean_load_balancer']['data'] | selectattr('name', 'equalto', patroni_cluster_name + '-replica') | first).ip | default('N/A') }}" + load_balancer_replica_sync: "{{ (hostvars['localhost']['digitalocean_load_balancer']['data'] | selectattr('name', 'equalto', patroni_cluster_name + '-sync') | first).ip | default('N/A') }}" + database_port: "{{ pgbouncer_listen_port if pgbouncer_install | bool else postgresql_port }}" + superuser_username: "{{ patroni_superuser_username }}" + superuser_password: "{{ '********' if mask_password | default(false) | bool else patroni_superuser_password }}" + when: cloud_provider | default('') | lower == 'digitalocean' and cloud_load_balancer | default(true) | bool + tags: conn_info, cluster_info, cluster_status + +# Hetzner Cloud +- name: Connection info + run_once: true + ansible.builtin.debug: + msg: + address: + primary: "{{ lb_primary_public if database_public_access | default(false) | bool else lb_primary_private }}" + replica: "{{ (lb_replica_public if lb_replica_public != 'N/A' else omit) if database_public_access | default(false) | bool else (lb_replica_private if lb_replica_private != 'N/A' else omit) }}" + replica_sync: "{{ (lb_replica_sync_public if database_public_access | default(false) | bool else lb_replica_sync_private) if synchronous_mode | bool else omit }}" + port: "{{ hetzner_load_balancer_port | default(database_port) }}" + superuser: "{{ superuser_username }}" + password: "{{ superuser_password }}" + ignore_errors: true + vars: + lb_primary_public: "{{ (hostvars['localhost']['hetzner_load_balancer']['hcloud_load_balancer_info'] | selectattr('name', 'equalto', patroni_cluster_name + '-primary') | first).ipv4_address | default('N/A') }}" + lb_primary_private: "{{ (hostvars['localhost']['hetzner_load_balancer']['hcloud_load_balancer_info'] | selectattr('name', 'equalto', patroni_cluster_name + '-primary') | first).private_ipv4_address | default('N/A') }}" + lb_replica_public: "{{ (hostvars['localhost']['hetzner_load_balancer']['hcloud_load_balancer_info'] | selectattr('name', 'equalto', patroni_cluster_name + '-replica') | first).ipv4_address | default('N/A') }}" + lb_replica_private: "{{ (hostvars['localhost']['hetzner_load_balancer']['hcloud_load_balancer_info'] | selectattr('name', 'equalto', patroni_cluster_name + '-replica') | first).private_ipv4_address | default('N/A') }}" + lb_replica_sync_public: "{{ (hostvars['localhost']['hetzner_load_balancer']['hcloud_load_balancer_info'] | selectattr('name', 'equalto', patroni_cluster_name + '-sync') | first).ipv4_address | default('N/A') }}" + lb_replica_sync_private: "{{ (hostvars['localhost']['hetzner_load_balancer']['hcloud_load_balancer_info'] | selectattr('name', 'equalto', patroni_cluster_name + '-sync') | first).private_ipv4_address | default('N/A') }}" + database_port: "{{ pgbouncer_listen_port if pgbouncer_install | bool else postgresql_port }}" + superuser_username: "{{ patroni_superuser_username }}" + superuser_password: "{{ '********' if mask_password | default(false) | bool else patroni_superuser_password }}" + when: cloud_provider | default('') | lower == 'hetzner' and cloud_load_balancer | default(true) | bool + tags: conn_info, cluster_info, cluster_status diff --git a/automation/roles/etc_hosts/README.md b/automation/roles/etc_hosts/README.md new file mode 100644 index 000000000..d133738fd --- /dev/null +++ b/automation/roles/etc_hosts/README.md @@ -0,0 +1 @@ +# Ansible Role: etc_hosts diff --git a/roles/etc_hosts/tasks/main.yml b/automation/roles/etc_hosts/tasks/main.yml similarity index 73% rename from roles/etc_hosts/tasks/main.yml rename to automation/roles/etc_hosts/tasks/main.yml index bdfd780d1..66d3fa75c 100644 --- a/roles/etc_hosts/tasks/main.yml +++ b/automation/roles/etc_hosts/tasks/main.yml @@ -1,15 +1,12 @@ --- - - name: Add entries into /etc/hosts file - lineinfile: + ansible.builtin.lineinfile: path: /etc/hosts regexp: "^{{ item }}" line: "{{ item }}" - unsafe_writes: true # to prevent failures in CI + unsafe_writes: true # to prevent failures in CI loop: "{{ etc_hosts }}" when: - etc_hosts is defined - etc_hosts | length > 0 tags: etc_hosts - -... diff --git a/automation/roles/etcd/README.md b/automation/roles/etcd/README.md new file mode 100644 index 000000000..cb9cedcf7 --- /dev/null +++ b/automation/roles/etcd/README.md @@ -0,0 +1 @@ +# Ansible Role: etcd diff --git a/automation/roles/etcd/defaults/main.yml b/automation/roles/etcd/defaults/main.yml new file mode 100644 index 000000000..a873d0ef7 --- /dev/null +++ b/automation/roles/etcd/defaults/main.yml @@ -0,0 +1,10 @@ +--- +etcd_architecture_map: + amd64: amd64 + x86_64: amd64 + armv6l: armhfv6 + armv7l: armhfv6 + aarch64: arm64 + arm64: arm64 + 32-bit: "386" + 64-bit: amd64 diff --git a/automation/roles/etcd/tasks/main.yml b/automation/roles/etcd/tasks/main.yml new file mode 100644 index 000000000..aea7b24d3 --- /dev/null +++ b/automation/roles/etcd/tasks/main.yml @@ -0,0 +1,190 @@ +--- +- name: Make sure handlers are flushed immediately + ansible.builtin.meta: flush_handlers + +- name: Make sure the unzip/tar packages are present + ansible.builtin.package: + name: + - unzip + - tar + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + tags: etcd, etcd_install + +- block: # install etcd package from repo + - name: Download "etcd" package + ansible.builtin.get_url: + url: "{{ item }}" + dest: /tmp/ + timeout: 60 + validate_certs: false + loop: + - "{{ etcd_package_repo }}" + environment: "{{ proxy_env | default({}) }}" + + - name: Extract "etcd" into /tmp + ansible.builtin.unarchive: + src: "/tmp/{{ etcd_package_repo | basename }}" + dest: /tmp/ + extra_opts: + - --no-same-owner + remote_src: true + + - name: Copy "etcd" and "etcdctl" binary files to /usr/local/bin/ + ansible.builtin.copy: + src: "/tmp/{{ etcd_package_repo.split('.tar.gz')[0] | basename }}/{{ item }}" + dest: /usr/local/bin/ + mode: u+x,g+x,o+x + remote_src: true + loop: + - etcd + - etcdctl + when: + - installation_method == "repo" + - etcd_package_repo | length > 0 + - not ansible_check_mode + tags: etcd, etcd_install + +- block: # install etcd package from file + - name: Extract "etcd" into /tmp + ansible.builtin.unarchive: + src: "{{ etcd_package_file }}" + dest: /tmp/ + extra_opts: + - --no-same-owner + + - name: Copy "etcd" and "etcdctl" binary files to /usr/local/bin/ + ansible.builtin.copy: + src: "/tmp/{{ etcd_package_file.split('.tar.gz')[0] | basename }}/{{ item }}" + dest: /usr/local/bin/ + mode: u+x,g+x,o+x + remote_src: true + loop: + - etcd + - etcdctl + when: + - installation_method == "file" + - etcd_package_file | length > 0 + - not ansible_check_mode + tags: etcd, etcd_install + +- name: Add etcd user + ansible.builtin.user: + name: etcd + shell: /usr/sbin/nologin + home: "{{ etcd_data_dir }}" + tags: etcd, etcd_conf + +- name: Create etcd conf directory + ansible.builtin.file: + path: /etc/etcd + state: directory + tags: etcd, etcd_conf + +# TLS +- block: + # if 'etcd_on_dedicated_nodes' is 'false' + - name: Copy etcd TLS certificate, key and CA from the master node + ansible.builtin.include_role: + name: ../roles/tls_certificate/copy + vars: + tls_group_name: "postgres_cluster" + copy_tls_dir: "{{ etcd_tls_dir | default('/etc/etcd/tls') }}" + copy_tls_owner: "etcd" + when: not etcd_on_dedicated_nodes | default(false) | bool + + # if 'etcd_on_dedicated_nodes' is 'true' + - name: Generate etcd TLS certificate + ansible.builtin.include_role: + name: ../roles/tls_certificate/generate + vars: + tls_group_name: "etcd_cluster" + generate_tls_owner: "etcd" + generate_tls_common_name: "etcd" + generate_tls_dir: "{{ etcd_tls_dir | default('/etc/etcd/tls') }}" + tls_cert_regenerate: "{{ etcd_tls_cert_regenerate | default(false) }}" # Do not generate new certificates if they already exist. + when: etcd_on_dedicated_nodes | default(false) | bool + + - name: Copy etcd TLS files to all etcd nodes + ansible.builtin.include_role: + name: ../roles/tls_certificate/copy + vars: + tls_group_name: "etcd_cluster" + fetch_tls_dir: "{{ etcd_tls_dir | default('/etc/etcd/tls') }}" + copy_tls_dir: "{{ etcd_tls_dir | default('/etc/etcd/tls') }}" + copy_tls_owner: "etcd" + when: etcd_on_dedicated_nodes | default(false) | bool + when: tls_cert_generate | default(true) | bool + tags: etcd, etcd_conf + +- name: Create etcd data directory + ansible.builtin.file: + path: "{{ etcd_data_dir }}" + state: directory + owner: etcd + mode: "0700" + tags: etcd, etcd_conf + +- name: Generate conf file "/etc/etcd/etcd.conf" + ansible.builtin.template: + src: templates/etcd.conf.j2 + dest: /etc/etcd/etcd.conf + tags: etcd, etcd_conf + +- name: Copy systemd service file + ansible.builtin.template: + src: templates/etcd.service.j2 + dest: /etc/systemd/system/etcd.service + tags: etcd, etcd_conf + +- name: Enable and start etcd service + ansible.builtin.systemd: + daemon_reload: true + name: etcd + enabled: true + state: started + tags: etcd, etcd_start + +- name: Wait for port 2379 to become open on the host + ansible.builtin.wait_for: + port: 2379 + host: 127.0.0.1 + state: started + timeout: 120 + delay: 10 + ignore_errors: false + tags: etcd, etcd_start + +- block: + - name: Wait until the etcd cluster is healthy + ansible.builtin.command: > + /usr/local/bin/etcdctl endpoint health + --endpoints={{ patroni_etcd_protocol | default('http', true) }}://{{ inventory_hostname }}:2379 + {% if etcd_tls_enable | default(false) | bool %} + --cacert={{ etcd_tls_dir | default('/etc/etcd/tls') }}/{{ etcd_tls_ca_crt | default('ca.crt') }} + --cert={{ etcd_tls_dir | default('/etc/etcd/tls') }}/{{ etcd_tls_server_crt | default('server.crt') }} + --key={{ etcd_tls_dir | default('/etc/etcd/tls') }}/{{ etcd_tls_server_key | default('server.key') }} + {% endif %} + environment: + ETCDCTL_API: "3" + register: etcd_health_result + until: > + 'is healthy' in etcd_health_result.stdout or + 'is healthy' in etcd_health_result.stderr + retries: 10 + delay: 10 + changed_when: false + ignore_errors: false + + - name: cluster health + ansible.builtin.debug: + msg: > + {{ etcd_health_result.stdout + if etcd_health_result.stdout | length > 0 + else etcd_health_result.stderr }} + when: not ansible_check_mode + tags: etcd, etcd_start, etcd_status diff --git a/automation/roles/etcd/templates/etcd.conf.j2 b/automation/roles/etcd/templates/etcd.conf.j2 new file mode 100644 index 000000000..e6486dbe1 --- /dev/null +++ b/automation/roles/etcd/templates/etcd.conf.j2 @@ -0,0 +1,24 @@ +ETCD_NAME="{{ ansible_hostname }}" +ETCD_LISTEN_CLIENT_URLS="{{ patroni_etcd_protocol | default('http', true) }}://{{ inventory_hostname }}:2379,{{ patroni_etcd_protocol | default('http', true) }}://127.0.0.1:2379" +ETCD_ADVERTISE_CLIENT_URLS="{{ patroni_etcd_protocol | default('http', true) }}://{{ inventory_hostname }}:2379" +ETCD_LISTEN_PEER_URLS="{{ patroni_etcd_protocol | default('http', true) }}://{{ inventory_hostname }}:2380" +ETCD_INITIAL_ADVERTISE_PEER_URLS="{{ patroni_etcd_protocol | default('http', true) }}://{{ inventory_hostname }}:2380" +ETCD_INITIAL_CLUSTER_TOKEN="{{ etcd_cluster_name }}" +ETCD_INITIAL_CLUSTER="{% for host in groups['etcd_cluster'] %}{{ hostvars[host]['ansible_hostname'] }}={{ patroni_etcd_protocol | default('http', true) }}://{{ hostvars[host]['inventory_hostname'] }}:2380{% if not loop.last %},{% endif %}{% endfor %}" +ETCD_INITIAL_CLUSTER_STATE="new" +ETCD_DATA_DIR="{{ etcd_data_dir }}" +ETCD_ELECTION_TIMEOUT="5000" +ETCD_HEARTBEAT_INTERVAL="1000" +ETCD_INITIAL_ELECTION_TICK_ADVANCE="false" +ETCD_AUTO_COMPACTION_RETENTION="1" +{% if etcd_tls_enable | default(false) | bool %} +ETCD_CERT_FILE="{{ etcd_tls_dir | default('/etc/etcd/tls') }}/{{ etcd_tls_server_crt | default('server.crt') }}" +ETCD_KEY_FILE="{{ etcd_tls_dir | default('/etc/etcd/tls') }}/{{ etcd_tls_server_key | default('server.key') }}" +ETCD_TRUSTED_CA_FILE="{{ etcd_tls_dir | default('/etc/etcd/tls') }}/{{ etcd_tls_ca_crt | default('ca.crt') }}" +ETCD_PEER_CERT_FILE="{{ etcd_tls_dir | default('/etc/etcd/tls') }}/{{ etcd_tls_server_crt | default('server.crt') }}" +ETCD_PEER_KEY_FILE="{{ etcd_tls_dir | default('/etc/etcd/tls') }}/{{ etcd_tls_server_key | default('server.key') }}" +ETCD_PEER_TRUSTED_CA_FILE="{{ etcd_tls_dir | default('/etc/etcd/tls') }}/{{ etcd_tls_ca_crt | default('ca.crt') }}" +ETCD_PEER_CLIENT_CERT_AUTH="{{ etcd_peer_client_cert_auth | default("true") }}" +ETCD_CLIENT_CERT_AUTH="{{ etcd_client_cert_auth | default("true") }}" +ETCD_TLS_MIN_VERSION="TLS1.2" +{% endif %} diff --git a/roles/etcd/templates/etcd.service.j2 b/automation/roles/etcd/templates/etcd.service.j2 similarity index 81% rename from roles/etcd/templates/etcd.service.j2 rename to automation/roles/etcd/templates/etcd.service.j2 index 6ad807c66..5c2cf971f 100644 --- a/roles/etcd/templates/etcd.service.j2 +++ b/automation/roles/etcd/templates/etcd.service.j2 @@ -3,7 +3,7 @@ Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target - + [Service] Type=notify WorkingDirectory={{ etcd_data_dir }} @@ -13,9 +13,12 @@ User=etcd ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/local/bin/etcd" Restart=on-failure LimitNOFILE=65536 + +{% if ansible_virtualization_type not in ['container', 'docker', 'lxc', 'podman'] %} IOSchedulingClass=realtime IOSchedulingPriority=0 Nice=-20 - +{% endif %} + [Install] WantedBy=multi-user.target diff --git a/roles/ansible-role-firewall/.gitignore b/automation/roles/firewall/.gitignore similarity index 100% rename from roles/ansible-role-firewall/.gitignore rename to automation/roles/firewall/.gitignore diff --git a/roles/ansible-role-firewall/.travis.yml b/automation/roles/firewall/.travis.yml similarity index 100% rename from roles/ansible-role-firewall/.travis.yml rename to automation/roles/firewall/.travis.yml diff --git a/roles/ansible-role-firewall/.yamllint b/automation/roles/firewall/.yamllint similarity index 100% rename from roles/ansible-role-firewall/.yamllint rename to automation/roles/firewall/.yamllint diff --git a/roles/ansible-role-firewall/LICENSE b/automation/roles/firewall/LICENSE similarity index 100% rename from roles/ansible-role-firewall/LICENSE rename to automation/roles/firewall/LICENSE diff --git a/roles/ansible-role-firewall/README.md b/automation/roles/firewall/README.md similarity index 96% rename from roles/ansible-role-firewall/README.md rename to automation/roles/firewall/README.md index ac7dbdc14..28acfd30c 100644 --- a/roles/ansible-role-firewall/README.md +++ b/automation/roles/firewall/README.md @@ -48,7 +48,7 @@ Any additional (custom) rules to be added to the firewall (in the same format yo # Allow only the IP 167.89.89.18 to access port 4949 (Munin). firewall_additional_rules: - "iptables -A INPUT -p tcp --dport 4949 -s 167.89.89.18 -j ACCEPT" - + # Allow only the IP 214.192.48.21 to access port 3306 (MySQL). firewall_additional_rules: - "iptables -A INPUT -p tcp --dport 3306 -s 214.192.48.21 -j ACCEPT" @@ -80,7 +80,7 @@ None. roles: - { role: geerlingguy.firewall } -*Inside `vars/main.yml`*: +_Inside `vars/main.yml`_: firewall_allowed_tcp_ports: - "22" @@ -89,8 +89,8 @@ None. ## TODO - - Make outgoing ports more configurable. - - Make other firewall features (like logging) configurable. +- Make outgoing ports more configurable. +- Make other firewall features (like logging) configurable. ## License diff --git a/roles/ansible-role-firewall/defaults/main.yml b/automation/roles/firewall/defaults/main.yml similarity index 100% rename from roles/ansible-role-firewall/defaults/main.yml rename to automation/roles/firewall/defaults/main.yml diff --git a/automation/roles/firewall/handlers/main.yml b/automation/roles/firewall/handlers/main.yml new file mode 100644 index 000000000..f94b6656d --- /dev/null +++ b/automation/roles/firewall/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart firewall + ansible.builtin.service: + name: firewall + state: restarted diff --git a/roles/ansible-role-firewall/tasks/disable-other-firewalls.yml b/automation/roles/firewall/tasks/disable-other-firewalls.yml similarity index 85% rename from roles/ansible-role-firewall/tasks/disable-other-firewalls.yml rename to automation/roles/firewall/tasks/disable-other-firewalls.yml index 3c381d8c4..63fbf6268 100644 --- a/roles/ansible-role-firewall/tasks/disable-other-firewalls.yml +++ b/automation/roles/firewall/tasks/disable-other-firewalls.yml @@ -1,8 +1,6 @@ --- - name: Check if firewalld package is installed (on RHEL). - command: yum list installed firewalld - args: - warn: false + ansible.builtin.command: yum list installed firewalld register: firewalld_installed failed_when: false changed_when: false @@ -12,7 +10,7 @@ check_mode: false - name: Disable the firewalld service (on RHEL, if configured). - service: + ansible.builtin.service: name: firewalld state: stopped enabled: false @@ -22,9 +20,7 @@ - firewalld_installed.rc == 0 - name: Check if ufw package is installed (on Ubuntu). - command: service ufw status - args: - warn: false + ansible.builtin.command: service ufw status register: ufw_installed failed_when: false changed_when: false @@ -34,7 +30,7 @@ check_mode: false - name: Disable the ufw firewall (on Ubuntu, if configured). - service: + ansible.builtin.service: name: ufw state: stopped enabled: false @@ -44,9 +40,7 @@ - ufw_installed.rc == 0 - name: Check if ufw package is installed (on Archlinux). - command: pacman -Q ufw - args: - warn: false + ansible.builtin.command: pacman -Q ufw register: ufw_installed ignore_errors: true changed_when: false @@ -56,7 +50,7 @@ check_mode: false - name: Disable the ufw firewall (on Archlinux, if configured). - service: + ansible.builtin.service: name: ufw state: stopped enabled: false @@ -64,5 +58,3 @@ - ansible_distribution == "Archlinux" - firewall_disable_ufw - ufw_installed.rc == 0 - -... diff --git a/roles/ansible-role-firewall/tasks/main.yml b/automation/roles/firewall/tasks/main.yml similarity index 66% rename from roles/ansible-role-firewall/tasks/main.yml rename to automation/roles/firewall/tasks/main.yml index 275423036..6461b5389 100644 --- a/roles/ansible-role-firewall/tasks/main.yml +++ b/automation/roles/firewall/tasks/main.yml @@ -1,74 +1,76 @@ --- -# yamllint disable rule:line-length - - name: Ensure iptables is present. - package: name=iptables state=present + ansible.builtin.package: + name: iptables + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 - name: Flush iptables the first time playbook runs. - command: > - iptables -F - creates=/etc/firewall.bash + ansible.builtin.command: + cmd: iptables -F + creates: /etc/firewall.bash - name: Copy firewall script into place. - template: + ansible.builtin.template: src: firewall.bash.j2 dest: /etc/firewall.bash owner: root group: root - mode: 0744 + mode: "0744" notify: restart firewall - name: Copy firewall init script into place. - template: + ansible.builtin.template: src: firewall.init.j2 dest: /etc/init.d/firewall owner: root group: root - mode: 0755 + mode: "0755" when: "ansible_service_mgr != 'systemd'" - name: Copy firewall systemd unit file into place (for systemd systems). - template: + ansible.builtin.template: src: firewall.unit.j2 dest: /etc/systemd/system/firewall.service owner: root group: root - mode: 0644 + mode: "0644" when: "ansible_service_mgr == 'systemd'" # prevents firewall initialization from severing the SSH connection - block: - name: Load the nf_conntrack_ipv4 module - modprobe: + community.general.modprobe: name: nf_conntrack_ipv4 state: present when: ansible_kernel is version('4.19', '<') and - not (ansible_os_family == "RedHat" and ansible_distribution_version is version('8.3', '>=')) + not (ansible_os_family == "RedHat" and ansible_distribution_version is version('8.3', '>=')) - name: Load the nf_conntrack module - modprobe: + community.general.modprobe: name: nf_conntrack state: present when: ansible_kernel is version('4.19', '>=') or - (ansible_os_family == "RedHat" and ansible_distribution_version is version('8.3', '>=')) + (ansible_os_family == "RedHat" and ansible_distribution_version is version('8.3', '>=')) - name: sysctl | keep connections alive when enabling the firewall - sysctl: + ansible.posix.sysctl: name: net.netfilter.nf_conntrack_tcp_be_liberal value: "1" state: present sysctl_set: true reload: true - failed_when: false # to prevent failures in CI + failed_when: false # to prevent failures in CI ignore_errors: true - name: Configure the firewall service. - service: + ansible.builtin.service: name: firewall state: "{{ firewall_state }}" enabled: "{{ firewall_enabled_at_boot }}" -- import_tasks: disable-other-firewalls.yml +- ansible.builtin.import_tasks: disable-other-firewalls.yml when: firewall_disable_firewalld|bool or firewall_disable_ufw|bool - -... diff --git a/roles/ansible-role-firewall/templates/firewall.bash.j2 b/automation/roles/firewall/templates/firewall.bash.j2 similarity index 100% rename from roles/ansible-role-firewall/templates/firewall.bash.j2 rename to automation/roles/firewall/templates/firewall.bash.j2 diff --git a/roles/ansible-role-firewall/templates/firewall.init.j2 b/automation/roles/firewall/templates/firewall.init.j2 similarity index 100% rename from roles/ansible-role-firewall/templates/firewall.init.j2 rename to automation/roles/firewall/templates/firewall.init.j2 diff --git a/roles/ansible-role-firewall/templates/firewall.unit.j2 b/automation/roles/firewall/templates/firewall.unit.j2 similarity index 100% rename from roles/ansible-role-firewall/templates/firewall.unit.j2 rename to automation/roles/firewall/templates/firewall.unit.j2 diff --git a/automation/roles/haproxy/README.md b/automation/roles/haproxy/README.md new file mode 100644 index 000000000..ef920849c --- /dev/null +++ b/automation/roles/haproxy/README.md @@ -0,0 +1 @@ +# Ansible Role: haproxy diff --git a/roles/haproxy/handlers/main.yml b/automation/roles/haproxy/handlers/main.yml similarity index 78% rename from roles/haproxy/handlers/main.yml rename to automation/roles/haproxy/handlers/main.yml index a883441a3..e013261ba 100644 --- a/roles/haproxy/handlers/main.yml +++ b/automation/roles/haproxy/handlers/main.yml @@ -1,7 +1,6 @@ --- - - name: Restart haproxy service - systemd: + ansible.builtin.systemd: daemon_reload: true name: haproxy enabled: true @@ -9,13 +8,11 @@ listen: "restart haproxy" - name: Check HAProxy is started and accepting connections - wait_for: + ansible.builtin.wait_for: port: "{{ haproxy_listen_port.stats }}" - host: "{{ hostvars[inventory_hostname]['inventory_hostname'] }}" + host: "{{ inventory_hostname }}" state: started timeout: 120 delay: 10 ignore_errors: false listen: "restart haproxy" - -... diff --git a/automation/roles/haproxy/tasks/main.yml b/automation/roles/haproxy/tasks/main.yml new file mode 100644 index 000000000..6d5d207f8 --- /dev/null +++ b/automation/roles/haproxy/tasks/main.yml @@ -0,0 +1,392 @@ +--- +- name: Gather facts from postgres_cluster hosts + ansible.builtin.setup: + delegate_to: "{{ item }}" + delegate_facts: true + loop: "{{ groups['postgres_cluster'] }}" + when: hostvars[groups['postgres_cluster'][0]].ansible_hostname is not defined + +# Install HAProxy from rpm/deb packages + +# RedHat +- name: Install HAProxy package + ansible.builtin.package: + name: haproxy + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: + - ansible_os_family == "RedHat" + - installation_method == "repo" + - haproxy_installation_method == "rpm" + tags: haproxy, load_balancing + +# Debian +- name: Install HAProxy package + ansible.builtin.apt: + name: haproxy + state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: + - ansible_os_family == "Debian" + - installation_method == "repo" + - haproxy_installation_method == "deb" + tags: haproxy, load_balancing + +# from file (rpm/deb packages) +- block: + - name: Copy packages into /tmp + ansible.builtin.copy: + src: "{{ item }}" + dest: /tmp/ + loop: "{{ haproxy_package_file }}" + register: copy_packages_result + + - name: Install packages + ansible.builtin.apt: + force_apt_get: true + deb: "/tmp/{{ item }}" + state: present + loop: "{{ haproxy_package_file | map('basename') | list }}" + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "Debian" and copy_packages_result.changed + + - name: Install packages + ansible.builtin.package: + name: "/tmp/{{ item }}" + state: present + loop: "{{ haproxy_package_file | map('basename') | list }}" + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "RedHat" and copy_packages_result.changed + when: haproxy_package_file is defined and haproxy_package_file | length > 0 + tags: haproxy, load_balancing + +# Build and install HAproxy from source +- name: Setting facts + ansible.builtin.set_fact: + target_linux: "{% if haproxy_major is version('2.0', '>=') %}linux-glibc{% else %}linux2628{% endif %}" + when: haproxy_installation_method == "src" + tags: haproxy, load_balancing + +# from repo +- block: + - name: "Download HAProxy and lua source files" + ansible.builtin.get_url: + url: "{{ item }}" + dest: /tmp/ + timeout: 120 + validate_certs: false + loop: + - "{{ haproxy_src_repo }}" + - "{{ lua_src_repo }}" + environment: "{{ proxy_env | default({}) }}" + + - name: "Extract HAProxy source files into /tmp" + ansible.builtin.unarchive: + src: "/tmp/{{ haproxy_src_repo | basename }}" + dest: /tmp/ + extra_opts: + - --no-same-owner + remote_src: true + when: haproxy_src_repo | length > 0 + + - name: "Extract lua source files into /tmp" + ansible.builtin.unarchive: + src: "/tmp/{{ lua_src_repo | basename }}" + dest: /tmp/ + extra_opts: + - --no-same-owner + remote_src: true + when: lua_src_repo | length > 0 + tags: lua + when: installation_method == "repo" and haproxy_installation_method == "src" + tags: haproxy, load_balancing + +# from file +- block: + - name: "Extract HAProxy source files into /tmp" + ansible.builtin.unarchive: + src: "{{ haproxy_src_file }}" + dest: /tmp/ + extra_opts: + - --no-same-owner + when: haproxy_src_file | length > 0 + + - name: "Extract lua source files into /tmp" + ansible.builtin.unarchive: + src: "{{ lua_src_file }}" + dest: /tmp/ + extra_opts: + - --no-same-owner + when: lua_src_file | length > 0 + tags: lua + when: installation_method == "file" and haproxy_installation_method == "src" + tags: haproxy, load_balancing + +- name: Install the prerequisites packages to compile HAProxy + ansible.builtin.package: + name: "{{ haproxy_compile_requirements }}" + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: haproxy_installation_method == "src" + tags: haproxy, haproxy_requirements, load_balancing + +- block: + - name: Build and install lua (required for haproxy) + become: true + become_user: root + ansible.builtin.shell: "make INSTALL_TOP=/opt/{{ lua_src_repo.split('.tar.gz')[0] | basename }} linux install" + args: + chdir: "/tmp/{{ lua_src_repo.split('.tar.gz')[0] | basename }}" + tags: lua + + - name: Build HAProxy + become: true + become_user: root + community.general.make: + chdir: "/tmp/{{ haproxy_src_repo.split('.tar.gz')[0] | basename }}" + params: + TARGET: "{{ target_linux }}" + USE_GETADDRINFO: 1 + USE_ZLIB: 1 + USE_REGPARM: 1 + USE_OPENSSL: 1 + USE_LIBCRYPT: 1 + USE_SYSTEMD: 1 + USE_PCRE: 1 + USE_NS: 1 + USE_TFO: 1 + USE_LUA: 1 + LUA_INC: "/opt/{{ lua_src_repo.split('.tar.gz')[0] | basename }}/include" + LUA_LIB: "/opt/{{ lua_src_repo.split('.tar.gz')[0] | basename }}/lib" + + - name: Install HAProxy + become: true + become_user: root + community.general.make: + chdir: "/tmp/{{ haproxy_src_repo.split('.tar.gz')[0] | basename }}" + target: install + when: installation_method == "repo" and haproxy_installation_method == "src" + tags: haproxy, load_balancing + +# installation_method: "file" +- block: + - name: Build and install lua (required for haproxy) + become: true + become_user: root + ansible.builtin.shell: "make INSTALL_TOP=/opt/{{ lua_src_file.split('.tar.gz')[0] | basename }} linux install" + args: + chdir: "/tmp/{{ lua_src_file.split('.tar.gz')[0] | basename }}" + tags: lua + + - name: Build HAProxy + become: true + become_user: root + community.general.make: + chdir: "/tmp/{{ haproxy_src_file.split('.tar.gz')[0] | basename }}" + params: + TARGET: "{{ target_linux }}" + USE_GETADDRINFO: 1 + USE_ZLIB: 1 + USE_REGPARM: 1 + USE_OPENSSL: 1 + USE_LIBCRYPT: 1 + USE_SYSTEMD: 1 + USE_PCRE: 1 + USE_NS: 1 + USE_TFO: 1 + USE_LUA: 1 + LUA_INC: "/opt/{{ lua_src_file.split('.tar.gz')[0] | basename }}/include" + LUA_LIB: "/opt/{{ lua_src_file.split('.tar.gz')[0] | basename }}/lib" + + - name: Install HAProxy + become: true + become_user: root + community.general.make: + chdir: "/tmp/{{ haproxy_src_file.split('.tar.gz')[0] | basename }}" + target: install + when: installation_method == "file" and haproxy_installation_method == "src" + tags: haproxy, load_balancing + +# Configure +- name: Make sure the kernel parameter "net.ipv4.ip_nonlocal_bind" are enabled + ansible.posix.sysctl: + name: "net.ipv4.ip_nonlocal_bind" + value: "1" + sysctl_set: true + state: present + reload: true + ignore_errors: true # to prevent test failures in CI + tags: haproxy, load_balancing + +- name: Add haproxy group + ansible.builtin.group: + name: haproxy + state: present + tags: haproxy, load_balancing + +- name: Add haproxy user + ansible.builtin.user: + name: haproxy + comment: "HAProxy user" + group: haproxy + shell: /usr/sbin/nologin + tags: haproxy, load_balancing + +- name: Create directories + ansible.builtin.file: + dest: "{{ item }}" + state: directory + owner: haproxy + group: haproxy + loop: + - /etc/haproxy + - /run/haproxy + - /var/lib/haproxy/dev + tags: haproxy, load_balancing + +- name: Generate conf file "/etc/haproxy/haproxy.cfg" + ansible.builtin.template: + src: templates/haproxy.cfg.j2 + dest: /etc/haproxy/haproxy.cfg + owner: haproxy + group: haproxy + notify: "restart haproxy" + when: (add_balancer is not defined or not add_balancer|bool) and + (postgresql_cluster_maintenance is not defined or not postgresql_cluster_maintenance|bool) + tags: haproxy, haproxy_conf, load_balancing + +- name: Generate systemd service file "/etc/systemd/system/haproxy.service" + ansible.builtin.template: + src: templates/haproxy.service.j2 + dest: /etc/systemd/system/haproxy.service + owner: haproxy + group: haproxy + notify: "restart haproxy" + tags: haproxy, haproxy_service, load_balancing + +- block: # for add_balancer.yml + - name: "Fetch haproxy.cfg file from {{ groups.balancers[0] }}" + run_once: true + ansible.builtin.fetch: + src: /etc/haproxy/haproxy.cfg + dest: files/haproxy.cfg + validate_checksum: true + flat: true + notify: "restart haproxy" + delegate_to: "{{ groups.balancers[0] }}" + + - name: Copy haproxy.cfg file to replica + ansible.builtin.copy: + src: files/haproxy.cfg + dest: /etc/haproxy/haproxy.cfg + owner: haproxy + group: haproxy + notify: "restart haproxy" + + - name: Remove haproxy.cfg file from localhost + become: false + run_once: true + ansible.builtin.file: + path: files/haproxy.cfg + state: absent + delegate_to: localhost + + - name: Prepare haproxy.cfg conf file (replace "bind") + ansible.builtin.lineinfile: + path: /etc/haproxy/haproxy.cfg + regexp: "{{ bind_config_without_vip_item.regexp }}" + line: "{{ bind_config_without_vip_item.line }}" + backrefs: true + loop: + - regexp: "^.*bind.*:{{ haproxy_listen_port.stats }}$" + line: " bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.stats }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.master }}$" + line: " bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.master }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas }}$" + line: " bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.replicas }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas_sync }}$" + line: " bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.replicas_sync }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas_async }}$" + line: " bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.replicas_async }}" + loop_control: + loop_var: bind_config_without_vip_item + label: "{{ bind_config_without_vip_item.line }}" + notify: "restart haproxy" + when: cluster_vip is not defined or cluster_vip | length < 1 + + - name: Prepare haproxy.cfg conf file (replace "bind" for stats) + ansible.builtin.lineinfile: + path: /etc/haproxy/haproxy.cfg + regexp: "{{ bind_config_with_vip_item.regexp }}" + line: "{{ bind_config_with_vip_item.line }}" + backrefs: true + loop: + - regexp: "^.*bind.*:{{ haproxy_listen_port.stats }}$" + line: " bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.stats }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.master }}$" + line: " bind {{ cluster_vip }}:{{ haproxy_listen_port.master }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas }}$" + line: " bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas_sync }}$" + line: " bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_sync }}" + - regexp: "^.*bind.*:{{ haproxy_listen_port.replicas_async }}$" + line: " bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_async }}" + loop_control: + loop_var: bind_config_with_vip_item + label: "{{ bind_config_with_vip_item.line }}" + notify: "restart haproxy" + when: cluster_vip is defined and cluster_vip | length > 0 + when: add_balancer is defined and add_balancer|bool + tags: haproxy, haproxy_conf, load_balancing + +- block: + - name: selinux | make sure the python3-libsemanage, python3-policycoreutils packages is present + ansible.builtin.package: + name: "{{ packages }}" + state: present + update_cache: true + vars: + packages: + - python3-libsemanage + - python3-policycoreutils + register: package_status + until: package_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: + - ansible_os_family == "RedHat" + - installation_method == "repo" + - haproxy_installation_method == "rpm" + + - name: selinux | set haproxy_connect_any flag to enable tcp connections + ansible.posix.seboolean: + name: haproxy_connect_any + state: true + persistent: true + + - name: selinux | change the haproxy_t domain to permissive + community.general.selinux_permissive: + name: haproxy_t + permissive: true + when: ansible_selinux.status is defined and ansible_selinux.status == 'enabled' + ignore_errors: true + tags: haproxy, load_balancing, haproxy_selinux diff --git a/automation/roles/haproxy/templates/haproxy.cfg.j2 b/automation/roles/haproxy/templates/haproxy.cfg.j2 new file mode 100644 index 000000000..d7299e347 --- /dev/null +++ b/automation/roles/haproxy/templates/haproxy.cfg.j2 @@ -0,0 +1,209 @@ +global + maxconn {{ haproxy_maxconn.global }} + log /dev/log local0 + log /dev/log local1 notice + chroot /var/lib/haproxy + stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners + stats timeout 30s + user haproxy + group haproxy + daemon + +defaults + mode tcp + log global + option tcplog +{% if haproxy_log_format is defined %} + log-format '{{ haproxy_log_format }}' +{% endif %} + retries 2 + timeout queue 5s + timeout connect 5s + timeout client {{ haproxy_timeout.client }} + timeout server {{ haproxy_timeout.server }} + timeout check 15s + +listen stats + mode http + bind {{ inventory_hostname }}:{{ haproxy_listen_port.stats }} + stats enable + stats uri / + +listen master +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.master }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.master }} +{% endif %} + maxconn {{ haproxy_maxconn.master }} + option httpchk OPTIONS /primary + http-check expect status 200 + default-server inter 3s fastinter 1s fall 3 rise 4 on-marked-down shutdown-sessions +{% if pgbouncer_install|bool %} + {% for host in groups['postgres_cluster'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ pgbouncer_listen_port }} check port {{ patroni_restapi_port }} + {% endfor %} +{% endif %} +{% if ( pgbouncer_install is not defined ) or ( not pgbouncer_install|bool ) %} + {% for host in groups['postgres_cluster'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ postgresql_port }} check port {{ patroni_restapi_port }} + {% endfor %} +{% endif %} + +{% if pgbouncer_install|bool and haproxy_listen_port.master_direct is defined %} +listen master_direct +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.master_direct }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.master_direct }} +{% endif %} + maxconn {{ haproxy_maxconn.master }} + option httpchk OPTIONS /primary + http-check expect status 200 + default-server inter 3s fastinter 1s fall 3 rise 4 on-marked-down shutdown-sessions + {% for host in groups['postgres_cluster'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ postgresql_port }} check port {{ patroni_restapi_port }} + {% endfor %} +{% endif %} + +listen replicas +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + {% if balancer_tags | default('') | length > 0 %} + option httpchk OPTIONS /replica?lag={{ patroni_maximum_lag_on_replica }}{{ '&' + balancer_tags.split(',') | map('trim') | map('regex_replace', '([^=]+)=(.*)', 'tag_\\1=\\2') | join('&') }} + {% else %} + option httpchk OPTIONS /replica?lag={{ patroni_maximum_lag_on_replica }} + {% endif %} + balance roundrobin + http-check expect status 200 + default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions +{% if pgbouncer_install|bool %} + {% for host in groups['postgres_cluster'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ pgbouncer_listen_port }} check port {{ patroni_restapi_port }} + {% endfor %} +{% endif %} +{% if ( pgbouncer_install is not defined ) or ( not pgbouncer_install|bool ) %} + {% for host in groups['postgres_cluster'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ postgresql_port }} check port {{ patroni_restapi_port }} + {% endfor %} +{% endif %} + +{% if pgbouncer_install|bool and haproxy_listen_port.replicas_direct is defined %} +listen replicas_direct +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_direct }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_direct }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + {% if balancer_tags | default('') | length > 0 %} + option httpchk OPTIONS /replica?lag={{ patroni_maximum_lag_on_replica }}{{ '&' + balancer_tags.split(',') | map('trim') | map('regex_replace', '([^=]+)=(.*)', 'tag_\\1=\\2') | join('&') }} + {% else %} + option httpchk OPTIONS /replica?lag={{ patroni_maximum_lag_on_replica }} + {% endif %} + balance roundrobin + http-check expect status 200 + default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions + {% for host in groups['postgres_cluster'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ postgresql_port }} check port {{ patroni_restapi_port }} + {% endfor %} +{% endif %} + +listen replicas_sync +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_sync }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_sync }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + {% if balancer_tags | default('') | length > 0 %} + option httpchk OPTIONS /sync{{ '?' + balancer_tags.split(',') | map('trim') | map('regex_replace', '([^=]+)=(.*)', 'tag_\\1=\\2') | join('&') }} + {% else %} + option httpchk OPTIONS /sync + {% endif %} + balance roundrobin + http-check expect status 200 + default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions +{% if pgbouncer_install|bool %} + {% for host in groups['postgres_cluster'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ pgbouncer_listen_port }} check port {{ patroni_restapi_port }} + {% endfor %} +{% endif %} +{% if ( pgbouncer_install is not defined ) or ( not pgbouncer_install|bool ) %} + {% for host in groups['postgres_cluster'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ postgresql_port }} check port {{ patroni_restapi_port }} + {% endfor %} +{% endif %} + +{% if pgbouncer_install|bool and haproxy_listen_port.replicas_sync_direct is defined %} +listen replicas_sync_direct +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_sync_direct }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_sync_direct }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + {% if balancer_tags | default('') | length > 0 %} + option httpchk OPTIONS /sync{{ '?' + balancer_tags.split(',') | map('trim') | map('regex_replace', '([^=]+)=(.*)', 'tag_\\1=\\2') | join('&') }} + {% else %} + option httpchk OPTIONS /sync + {% endif %} + balance roundrobin + http-check expect status 200 + default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions + {% for host in groups['postgres_cluster'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ postgresql_port }} check port {{ patroni_restapi_port }} + {% endfor %} +{% endif %} + +listen replicas_async +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_async }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_async }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + {% if balancer_tags | default('') | length > 0 %} + option httpchk OPTIONS /async?lag={{ patroni_maximum_lag_on_replica }}{{ '&' + balancer_tags.split(',') | map('trim') | map('regex_replace', '([^=]+)=(.*)', 'tag_\\1=\\2') | join('&') }} + {% else %} + option httpchk OPTIONS /async?lag={{ patroni_maximum_lag_on_replica }} + {% endif %} + balance roundrobin + http-check expect status 200 + default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions +{% if pgbouncer_install|bool %} + {% for host in groups['postgres_cluster'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ pgbouncer_listen_port }} check port {{ patroni_restapi_port }} + {% endfor %} +{% endif %} +{% if ( pgbouncer_install is not defined ) or ( not pgbouncer_install|bool ) %} + {% for host in groups['postgres_cluster'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ postgresql_port }} check port {{ patroni_restapi_port }} + {% endfor %} +{% endif %} + +{% if pgbouncer_install|bool and haproxy_listen_port.replicas_async_direct is defined %} +listen replicas_async_direct +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_async_direct }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_async_direct }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + {% if balancer_tags | default('') | length > 0 %} + option httpchk OPTIONS /async?lag={{ patroni_maximum_lag_on_replica }}{{ '&' + balancer_tags.split(',') | map('trim') | map('regex_replace', '([^=]+)=(.*)', 'tag_\\1=\\2') | join('&') }} + {% else %} + option httpchk OPTIONS /async?lag={{ patroni_maximum_lag_on_replica }} + {% endif %} + balance roundrobin + http-check expect status 200 + default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions + {% for host in groups['postgres_cluster'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ postgresql_port }} check port {{ patroni_restapi_port }} + {% endfor %} +{% endif %} + diff --git a/roles/haproxy/templates/haproxy.service.j2 b/automation/roles/haproxy/templates/haproxy.service.j2 similarity index 91% rename from roles/haproxy/templates/haproxy.service.j2 rename to automation/roles/haproxy/templates/haproxy.service.j2 index 66d3f753d..1b4fb0a81 100644 --- a/roles/haproxy/templates/haproxy.service.j2 +++ b/automation/roles/haproxy/templates/haproxy.service.j2 @@ -3,8 +3,8 @@ Description=HAProxy Load Balancer After=network.target [Service] -Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/var/run/haproxy/haproxy.pid" -ExecStartPre=/bin/mkdir -p /var/run/haproxy +Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy/haproxy.pid" +ExecStartPre=/bin/mkdir -p /run/haproxy {% if haproxy_installation_method == "src" %} ExecStartPre=/usr/local/sbin/haproxy -f $CONFIG -c -q ExecStart=/usr/local/sbin/haproxy -Ws -f $CONFIG -p $PIDFILE diff --git a/automation/roles/hostname/README.md b/automation/roles/hostname/README.md new file mode 100644 index 000000000..fcc69a5e9 --- /dev/null +++ b/automation/roles/hostname/README.md @@ -0,0 +1 @@ +# Ansible Role: hostname diff --git a/roles/hostname/tasks/main.yml b/automation/roles/hostname/tasks/main.yml similarity index 52% rename from roles/hostname/tasks/main.yml rename to automation/roles/hostname/tasks/main.yml index 3e3c63e48..39501d90a 100644 --- a/roles/hostname/tasks/main.yml +++ b/automation/roles/hostname/tasks/main.yml @@ -1,22 +1,17 @@ --- -# yamllint disable rule:line-length - - block: - name: Change hostname - hostname: + ansible.builtin.hostname: name: "{{ hostname }}" - name: Change hostname in /etc/hosts - lineinfile: + ansible.builtin.lineinfile: dest: /etc/hosts regexp: "{{ item.regexp }}" line: "{{ item.line }}" state: present no_log: true loop: - - {regexp: '^127\.0\.0\.1[ \t]+localhost', line: '127.0.0.1 localhost {{ ansible_hostname }}'} - - {regexp: '^{{ ansible_default_ipv4.address }}.+$', line: '{{ ansible_default_ipv4.address }} {{ ansible_hostname }}'} + - { regexp: '^127\.0\.0\.1[ \t]+localhost', line: "127.0.0.1 localhost {{ ansible_hostname }}" } when: hostname is defined and hostname | length > 0 tags: hostname - -... diff --git a/automation/roles/io_scheduler/README.md b/automation/roles/io_scheduler/README.md new file mode 100644 index 000000000..c4a7301ce --- /dev/null +++ b/automation/roles/io_scheduler/README.md @@ -0,0 +1 @@ +# Ansible Role: io_scheduler diff --git a/roles/io-scheduler/handlers/main.yml b/automation/roles/io_scheduler/handlers/main.yml similarity index 85% rename from roles/io-scheduler/handlers/main.yml rename to automation/roles/io_scheduler/handlers/main.yml index f2717d7c7..18145b145 100644 --- a/roles/io-scheduler/handlers/main.yml +++ b/automation/roles/io_scheduler/handlers/main.yml @@ -1,11 +1,8 @@ --- - - name: Start io-scheduler service - systemd: + ansible.builtin.systemd: daemon_reload: true name: io-scheduler state: restarted enabled: true listen: "restart io-scheduler" - -... diff --git a/roles/io-scheduler/tasks/main.yml b/automation/roles/io_scheduler/tasks/main.yml similarity index 82% rename from roles/io-scheduler/tasks/main.yml rename to automation/roles/io_scheduler/tasks/main.yml index e91e9365c..8570b34eb 100644 --- a/roles/io-scheduler/tasks/main.yml +++ b/automation/roles/io_scheduler/tasks/main.yml @@ -1,7 +1,6 @@ --- - - name: Create systemd unit file io-scheduler.service - template: + ansible.builtin.template: src: templates/io-scheduler.service.j2 dest: /etc/systemd/system/io-scheduler.service notify: "restart io-scheduler" @@ -9,6 +8,4 @@ tags: scheduler, io_scheduler - name: Make sure handlers are flushed immediately - meta: flush_handlers - -... + ansible.builtin.meta: flush_handlers diff --git a/roles/io-scheduler/templates/io-scheduler.service.j2 b/automation/roles/io_scheduler/templates/io-scheduler.service.j2 similarity index 100% rename from roles/io-scheduler/templates/io-scheduler.service.j2 rename to automation/roles/io_scheduler/templates/io-scheduler.service.j2 diff --git a/automation/roles/keepalived/README.md b/automation/roles/keepalived/README.md new file mode 100644 index 000000000..f3bda7e06 --- /dev/null +++ b/automation/roles/keepalived/README.md @@ -0,0 +1 @@ +# Ansible Role: keepalived diff --git a/automation/roles/keepalived/defaults/main.yml b/automation/roles/keepalived/defaults/main.yml new file mode 100644 index 000000000..60fb6d0d6 --- /dev/null +++ b/automation/roles/keepalived/defaults/main.yml @@ -0,0 +1,14 @@ +--- +keepalived_instances: + - name: VI_1 + state: BACKUP + interface: "{{ vip_interface }}" + virtual_router_id: "{{ keepalived_virtual_router_id | default(123) }}" + priority: 100 + advert_int: 2 + check_status_command: /usr/libexec/keepalived/haproxy_check.sh + authentication: + auth_type: PASS + auth_pass: "1ce24b6e" + virtual_ipaddresses: + - "{{ cluster_vip }}" diff --git a/automation/roles/keepalived/handlers/main.yml b/automation/roles/keepalived/handlers/main.yml new file mode 100644 index 000000000..03292a4f4 --- /dev/null +++ b/automation/roles/keepalived/handlers/main.yml @@ -0,0 +1,30 @@ +--- +- name: Restart keepalived service + ansible.builtin.systemd: + daemon_reload: true + name: keepalived + enabled: true + state: restarted + listen: "restart keepalived" + +# This task checks the cluster VIP's availability, selecting the appropriate port: +# - during maintenance, it targets the database through PgBouncer or directly. +# - otherwise, it defaults to SSH, applicable during deployment when the database is not yet available. +- name: Wait for the cluster ip address (VIP) "{{ cluster_vip }}" is running + ansible.builtin.wait_for: + host: "{{ cluster_vip }}" + port: "{{ target_port }}" + state: started + timeout: 15 # max wait time: 30 seconds + delay: 2 + vars: + target_port: >- + {{ + (postgresql_cluster_maintenance | default(false) | bool) | + ternary( + pgbouncer_listen_port if pgbouncer_install | bool else postgresql_port, + ansible_ssh_port | default(22) + ) + }} + ignore_errors: true # noqa ignore-errors # show the error and continue the playbook execution + listen: "restart keepalived" diff --git a/roles/keepalived/tasks/main.yml b/automation/roles/keepalived/tasks/main.yml similarity index 63% rename from roles/keepalived/tasks/main.yml rename to automation/roles/keepalived/tasks/main.yml index 3df42dc67..a79ae1f92 100644 --- a/roles/keepalived/tasks/main.yml +++ b/automation/roles/keepalived/tasks/main.yml @@ -1,13 +1,16 @@ --- - - name: Install keepalived packages - package: + ansible.builtin.package: name: keepalived + register: package_status + until: package_status is success + delay: 5 + retries: 3 environment: "{{ proxy_env | default({}) }}" tags: keepalived_install, keepalived -- name: Make sure the kernel parameters "net.ipv4.ip_nonlocal_bind", "net.ipv4.ip_forward" are enabled # yamllint disable rule:line-length - sysctl: +- name: Make sure the kernel parameters "net.ipv4.ip_nonlocal_bind", "net.ipv4.ip_forward" are enabled + ansible.posix.sysctl: name: "{{ item }}" value: "1" sysctl_set: true @@ -19,70 +22,80 @@ tags: keepalived_conf, keepalived - name: Make sure the "/usr/libexec/keepalived" directory exists - file: + ansible.builtin.file: dest: /usr/libexec/keepalived state: directory owner: root group: root + mode: "0750" tags: keepalived_conf, keepalived -- name: Create vrrp_script "/usr/libexec/keepalived/haproxy_check.sh" # yamllint disable rule:line-length - copy: +- name: Create vrrp_script "/usr/libexec/keepalived/haproxy_check.sh" + ansible.builtin.copy: content: | #!/bin/bash - /bin/kill -0 `cat /var/run/haproxy/haproxy.pid` + /bin/kill -0 `cat /run/haproxy/haproxy.pid` dest: /usr/libexec/keepalived/haproxy_check.sh owner: root group: root - mode: 0700 + mode: "0700" notify: "restart keepalived" tags: keepalived_conf, keepalived - name: Generate conf file "/etc/keepalived/keepalived.conf" - template: + ansible.builtin.template: src: templates/keepalived.conf.j2 dest: /etc/keepalived/keepalived.conf + owner: root + group: root + mode: "0644" notify: "restart keepalived" when: add_balancer is not defined or not add_balancer|bool tags: keepalived_conf, keepalived -- block: # for add_balancer.yml - - name: Fetch keepalived.conf conf file from master +- block: # for add_balancer.yml + - name: "Fetch keepalived.conf conf file from {{ groups.balancers[0] }}" run_once: true - fetch: + ansible.builtin.fetch: src: /etc/keepalived/keepalived.conf dest: files/keepalived.conf validate_checksum: true flat: true - delegate_to: "{{ groups.master[0] }}" + delegate_to: "{{ groups.balancers[0] }}" - name: Copy keepalived.conf conf file to replica - copy: + ansible.builtin.copy: src: files/keepalived.conf dest: /etc/keepalived/keepalived.conf notify: "restart keepalived" + - name: Remove keepalived.conf file from localhost + become: false + run_once: true + ansible.builtin.file: + path: files/keepalived.conf + state: absent + delegate_to: localhost + - name: Prepare keepalived.conf conf file (replace "interface") - lineinfile: + ansible.builtin.lineinfile: path: /etc/keepalived/keepalived.conf regexp: "{{ item.regexp }}" line: "{{ item.line }}" backrefs: true loop: - - {regexp: '^.*interface', line: ' interface {{ vip_interface }}'} + - { regexp: "^.*interface", line: " interface {{ vip_interface }}" } loop_control: label: "{{ item.line }}" notify: "restart keepalived" when: add_balancer is defined and add_balancer|bool tags: keepalived_conf, keepalived -- name: selinux | change the keepalived_t domain to permissive - selinux_permissive: +- name: Selinux | Change the keepalived_t domain to permissive + community.general.selinux_permissive: name: keepalived_t permissive: true when: ansible_selinux.status is defined and - ansible_selinux.status == 'enabled' - ignore_errors: true + ansible_selinux.status == 'enabled' + ignore_errors: true # noqa ignore-errors tags: keepalived, keepalived_selinux - -... diff --git a/automation/roles/keepalived/templates/keepalived.conf.j2 b/automation/roles/keepalived/templates/keepalived.conf.j2 new file mode 100644 index 000000000..80d524e9e --- /dev/null +++ b/automation/roles/keepalived/templates/keepalived.conf.j2 @@ -0,0 +1,37 @@ +global_defs { + router_id ocp_vrrp + enable_script_security + script_user root +} + +{% for instance in keepalived_instances %} +{% if instance.check_status_command is defined %} +vrrp_script chk_command_{{ instance.virtual_router_id }} { + script "{{ instance.check_status_command }}" + interval 2 + weight 2 +} +{% endif %} + +vrrp_instance {{ instance.name }} { + interface {{ instance.interface }} + virtual_router_id {{ instance.virtual_router_id }} + priority {{ instance.priority }} + advert_int {{ instance.advert_int }} + state {{ instance.state }} + virtual_ipaddress { + {% for ip in instance.virtual_ipaddresses %} + {{ ip }} + {% endfor %} + } + {% if instance.check_status_command is defined %} + track_script { + chk_command_{{ instance.virtual_router_id }} + } + {% endif %} + authentication { + auth_type {{ instance.authentication.auth_type }} + auth_pass {{ instance.authentication.auth_pass }} + } +} +{% endfor %} diff --git a/automation/roles/locales/README.md b/automation/roles/locales/README.md new file mode 100644 index 000000000..0c16c19bf --- /dev/null +++ b/automation/roles/locales/README.md @@ -0,0 +1 @@ +# Ansible Role: locales diff --git a/roles/locales/tasks/main.yml b/automation/roles/locales/tasks/main.yml similarity index 55% rename from roles/locales/tasks/main.yml rename to automation/roles/locales/tasks/main.yml index 8d9d0bfe3..29d7fc18e 100644 --- a/roles/locales/tasks/main.yml +++ b/automation/roles/locales/tasks/main.yml @@ -1,98 +1,76 @@ --- - -# for OracleLinux 7 -- name: Reinstall glibc-common to avoid problem when generating locales - command: yum reinstall glibc-common -y - args: - warn: false - environment: "{{ proxy_env | default({}) }}" - when: (ansible_distribution == "OracleLinux" and - ansible_distribution_major_version == '7') - - block: + # Debian - name: Generate locales become: true - locale_gen: + community.general.locale_gen: name: "{{ item.language_country }}.{{ item.encoding }}" state: present loop: "{{ locale_gen | flatten(1) }}" when: ansible_os_family == "Debian" - - name: Generate locales - become: true - command: > - localedef -c - -i {{ item.language_country }} - -f {{ item.encoding }} {{ item.language_country }}.{{ item.encoding }} - changed_when: false - loop: "{{ locale_gen | flatten(1) }}" - when: ansible_os_family == "RedHat" and - ansible_distribution_major_version == '7' - + # RedHat - name: Install glibc-langpack - dnf: + ansible.builtin.dnf: name: "{{ item }}" loop: "{{ glibc_langpack }}" environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "RedHat" and - ansible_distribution_major_version >= '8' + when: ansible_os_family == "RedHat" when: locale_gen is defined and locale_gen | length > 0 tags: locales, locale_gen - block: - name: Set locale "{{ locale }}" into /etc/default/locale - lineinfile: + ansible.builtin.lineinfile: dest: "/etc/default/locale" regexp: "{{ item.regexp }}" line: "{{ item.line }}" owner: root group: root - mode: '0644' + mode: "0644" loop: - - {regexp: '^LANG=', line: 'LANG={{ locale }}'} - - {regexp: '^LANGUAGE=', line: 'LANGUAGE={{ locale }}'} - - {regexp: '^LC_ALL=', line: 'LC_ALL={{ locale }}'} + - { regexp: "^LANG=", line: "LANG={{ locale }}" } + - { regexp: "^LANGUAGE=", line: "LANGUAGE={{ locale }}" } + - { regexp: "^LC_ALL=", line: "LC_ALL={{ locale }}" } loop_control: label: "{{ item.line }}" when: ansible_os_family == "Debian" - name: Check that the /etc/locale.conf exists - stat: + ansible.builtin.stat: path: /etc/locale.conf register: locale_conf when: ansible_os_family == "RedHat" - name: Set locale "{{ locale }}" into /etc/locale.conf - lineinfile: + ansible.builtin.lineinfile: dest: "/etc/locale.conf" regexp: "{{ item.regexp }}" line: "{{ item.line }}" owner: root group: root - mode: '0644' + mode: "0644" loop: - - {regexp: '^LANG=', line: 'LANG={{ locale }}'} - - {regexp: '^LC_ALL=', line: 'LC_ALL={{ locale }}'} + - { regexp: "^LANG=", line: "LANG={{ locale }}" } + - { regexp: "^LC_ALL=", line: "LC_ALL={{ locale }}" } loop_control: label: "{{ item.line }}" when: ansible_os_family == "RedHat" and locale_conf.stat.exists - name: Set locale "{{ locale }}" into /etc/environment - lineinfile: + ansible.builtin.lineinfile: dest: "/etc/environment" regexp: "{{ item.regexp }}" line: "{{ item.line }}" owner: root group: root - mode: '0644' + mode: "0644" loop: - - {regexp: 'LANG=', line: 'export LANG={{ locale }}'} - - {regexp: 'LC_ALL=', line: 'export LC_ALL={{ locale }}'} + - { regexp: "LANG=", line: "export LANG={{ locale }}" } + - { regexp: "LC_ALL=", line: "export LC_ALL={{ locale }}" } loop_control: label: "{{ item.line }}" when: ansible_os_family == "RedHat" ignore_errors: true when: locale is defined and locale | length > 0 tags: locales, locale_env - -... diff --git a/automation/roles/mount/README.md b/automation/roles/mount/README.md new file mode 100644 index 000000000..994794307 --- /dev/null +++ b/automation/roles/mount/README.md @@ -0,0 +1 @@ +# Ansible Role: mount diff --git a/automation/roles/mount/defaults/main.yml b/automation/roles/mount/defaults/main.yml new file mode 100644 index 000000000..2ed0a0856 --- /dev/null +++ b/automation/roles/mount/defaults/main.yml @@ -0,0 +1,10 @@ +--- +# Example for --extra-vars +# '{"mount": [{"path": "/pgdata", "src": "UUID=83304ebb-d942-4093-975b-8253be2aabe1", "fstype": "ext4", "opts": "defaults,noatime", "state": "mounted"}]}' + +mount: + - path: "" + src: "" + fstype: "" + opts: "" + state: "" diff --git a/automation/roles/mount/tasks/main.yml b/automation/roles/mount/tasks/main.yml new file mode 100644 index 000000000..0261008bf --- /dev/null +++ b/automation/roles/mount/tasks/main.yml @@ -0,0 +1,146 @@ +--- +- block: + # Try to detect an empty disk (if 'cloud_provider' is defined) + - name: Detect empty volume + ansible.builtin.shell: | + set -o pipefail; + lsblk -e7 --output NAME,FSTYPE,TYPE --json \ + | jq -r '.blockdevices[] | select(.children == null and .fstype == null and .type == "disk") | .name' + args: + executable: /bin/bash + register: lsblk_disk + changed_when: false + when: (cloud_provider | default('') | length > 0) and mount[0].src | length < 1 + + # Show the error message, if empty volume is not detected + - name: Empty volume is not detected + ansible.builtin.fail: + msg: "Whoops! The empty volume is not detected. Skip mounting." + ignore_errors: true + when: lsblk_disk.stdout is defined and lsblk_disk.stdout | length < 1 + + # Filesystem + - name: Create "{{ pg_data_mount_fstype | default('ext4') }}" filesystem on the disk "/dev/{{ lsblk_disk.stdout }}" + community.general.filesystem: + dev: "/dev/{{ lsblk_disk.stdout }}" + fstype: "{{ pg_data_mount_fstype | default('ext4') }}" + when: + - (lsblk_disk.stdout is defined and lsblk_disk.stdout | length > 0) + - ((pg_data_mount_fstype is defined and pg_data_mount_fstype != 'zfs') or + (pg_data_mount_fstype is not defined and mount[0].fstype != 'zfs')) + + # UUID + - name: Get UUID of the disk "/dev/{{ lsblk_disk.stdout }}" + ansible.builtin.shell: | + set -o pipefail; + lsblk -no UUID /dev/{{ lsblk_disk.stdout }} | tr -d '\n' + args: + executable: /bin/bash + register: lsblk_uuid + changed_when: false + when: + - (lsblk_disk.stdout is defined and lsblk_disk.stdout | length > 0) + - ((pg_data_mount_fstype is defined and pg_data_mount_fstype != 'zfs') or + (pg_data_mount_fstype is not defined and mount[0].fstype != 'zfs')) + + - name: "Set mount variables" + ansible.builtin.set_fact: + mount: + - src: "UUID={{ lsblk_uuid.stdout }}" + path: "{{ pg_data_mount_path | default('/pgdata', true) }}" + fstype: "{{ pg_data_mount_fstype | default('ext4', true) }}" + when: lsblk_uuid.stdout is defined + + # Mount + - name: Mount the filesystem + ansible.posix.mount: + path: "{{ item.path }}" + src: "{{ item.src }}" + fstype: "{{ item.fstype | default(pg_data_mount_fstype | default('ext4', true), true) }}" + opts: "{{ item.opts | default('defaults,noatime') }}" + state: "{{ item.state | default('mounted') }}" + loop: "{{ mount }}" + when: + - (item.src | length > 0 and item.path | length > 0) + - ((pg_data_mount_fstype is defined and pg_data_mount_fstype != 'zfs') or + (pg_data_mount_fstype is not defined and item.fstype != 'zfs')) + + # ZFS Pool (if fstype is 'zfs') + - block: + - name: Install zfs + ansible.builtin.package: + name: zfsutils-linux + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: ansible_distribution == 'Ubuntu' + + - name: Install zfs + ansible.builtin.package: + name: + - "linux-headers-{{ ansible_kernel }}" + - dpkg-dev + - zfs-dkms + - zfsutils-linux + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: ansible_distribution == 'Debian' + + - block: # RedHat based + - name: Download zfs-release + ansible.builtin.get_url: + url: "https://zfsonlinux.org/epel/zfs-release-2-3.el%7B%7B%20ansible_distribution_major_version%20%7D%7D.noarch.rpm" + dest: /tmp/zfs-release.rpm + + - name: Install zfs-release + ansible.builtin.package: + name: /tmp/zfs-release.rpm + state: present + disable_gpg_check: true + register: package_status + until: package_status is success + delay: 5 + retries: 3 + + - name: Install zfs + ansible.builtin.package: + name: + - kernel-devel + - zfs + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: ansible_os_family == 'RedHat' + + - name: Load the ZFS module + community.general.modprobe: + name: zfs + state: present + + - name: Ensure zfs is loaded at boot + ansible.builtin.lineinfile: + path: /etc/modules-load.d/zfs.conf + line: zfs + create: true + + - name: Create zpool (use {{ mount[0].src | default("/dev/" + lsblk_disk.stdout, true) }}) + ansible.builtin.command: >- + zpool create -f + -O compression=on + -O atime=off + -O recordsize=128k + -O logbias=throughput + -m {{ pg_data_mount_path | default(mount[0].path | default('/pgdata', true), true) }} + pgdata {{ mount[0].src | default("/dev/" + lsblk_disk.stdout, true) }} + when: + - (mount[0].src | length > 0 or lsblk_disk.stdout | default('') | length > 0) + - ((pg_data_mount_fstype is defined and pg_data_mount_fstype == 'zfs') or + (pg_data_mount_fstype is not defined and mount[0].fstype == 'zfs')) + tags: mount, zpool diff --git a/automation/roles/netdata/README.md b/automation/roles/netdata/README.md new file mode 100644 index 000000000..f4afa80e7 --- /dev/null +++ b/automation/roles/netdata/README.md @@ -0,0 +1 @@ +# Ansible Role: netdata diff --git a/automation/roles/netdata/tasks/main.yml b/automation/roles/netdata/tasks/main.yml new file mode 100644 index 000000000..b9563ee43 --- /dev/null +++ b/automation/roles/netdata/tasks/main.yml @@ -0,0 +1,35 @@ +--- +- block: + - name: Download Netdata installation script + ansible.builtin.get_url: + url: "{{ netdata_kickstart_url | default('https://get.netdata.cloud/kickstart.sh') }}" + dest: /tmp/netdata-kickstart.sh + mode: +x + register: get_url_status + until: get_url_status is success + delay: 10 + retries: 3 + + - name: Install Netdata + ansible.builtin.command: /tmp/netdata-kickstart.sh {{ netdata_install_options | default('--dont-wait') }} + register: install_status + until: install_status is success + delay: 10 + retries: 3 + + - name: Configure Netdata + ansible.builtin.template: + src: templates/netdata.conf.j2 + dest: /etc/netdata/netdata.conf + owner: root + group: root + mode: u=wrx,g=rx,o=r,+x + + - name: Restart Netdata + ansible.builtin.service: + name: netdata + state: restarted + environment: "{{ proxy_env | default({}) }}" + ignore_errors: "{{ netdata_install_ignore_errors | default(true) }}" # show the error and continue the playbook execution + when: netdata_install | default(false)| bool + tags: netdata diff --git a/automation/roles/netdata/templates/netdata.conf.j2 b/automation/roles/netdata/templates/netdata.conf.j2 new file mode 100644 index 000000000..1cc247c45 --- /dev/null +++ b/automation/roles/netdata/templates/netdata.conf.j2 @@ -0,0 +1,695 @@ +# netdata configuration +# +# You can download the latest version of this file, using: +# +# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf +# or +# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf +# +# You can uncomment and change any of the options below. +# The value shown in the commented settings, is the default value. +# + +# global netdata configuration + +[global] + # run as user = netdata + # host access prefix = + hostname = {{ ansible_hostname }} + # profile = standalone + # glibc malloc arena max for plugins = 1 + # glibc malloc arena max for netdata = 1 + # cpu cores = 2 + # libuv worker threads = 16 + # timezone = Etc/UTC + # OOM score = 0 + # process scheduling policy = batch + # process nice level = 0 + # pthread stack size = 8MiB + # is ephemeral node = no + # has unstable connection = no + +[db] + # enable replication = yes + # replication period = 1d + # replication step = 10m + # cleanup orphan hosts after = 1h + # update every = 1s + db = {{ netdata_conf.db_mode | default('dbengine') }} + # memory deduplication (ksm) = auto + # cleanup ephemeral hosts after = 1d + # cleanup obsolete charts after = 1h + # gap when lost iterations above = 1 + # dbengine page type = gorilla + dbengine page cache size = {{ netdata_conf.dbengine_page_cache_size | default('32MiB') }} + # dbengine extent cache size = off + # dbengine enable journal integrity check = no + # dbengine use all ram for caches = no + # dbengine out of memory protection = 191.07MiB + # dbengine use direct io = yes + # dbengine pages per extent = 109 + # storage tiers = 3 + # dbengine tier backfill = new + # dbengine tier 1 update every iterations = 60 + # dbengine tier 2 update every iterations = 60 + dbengine tier 0 retention size = {{ dbengine_tier_0_retention_size | default('1024MiB') }} + dbengine tier 0 retention time = {{ dbengine_tier_0_retention_time | default('14d') }} + dbengine tier 1 retention size = {{ dbengine_tier_1_retention_size | default('1024MiB') }} + dbengine tier 1 retention time = {{ dbengine_tier_1_retention_time | default('3mo') }} + dbengine tier 2 retention size = {{ dbengine_tier_2_retention_size | default('1024MiB') }} + dbengine tier 2 retention time = {{ dbengine_tier_2_retention_time | default('1y') }} + # replication threads = 1 + +[directories] + # config = /etc/netdata + # stock config = /usr/lib/netdata/conf.d + # log = /var/log/netdata + # web = /usr/share/netdata/web + # cache = /var/cache/netdata + # lib = /var/lib/netdata + # lock = /var/lib/netdata/lock + # cloud.d = /var/lib/netdata/cloud.d + # plugins = "/usr/libexec/netdata/plugins.d" "/etc/netdata/custom-plugins.d" + # home = /var/lib/netdata + # registry = /var/lib/netdata/registry + # stock health config = /usr/lib/netdata/conf.d/health.d + # health config = /etc/netdata/health.d + +[logs] + # facility = daemon + # logs flood protection period = 1m + # logs to trigger flood protection = 1000 + # level = info + # debug = /var/log/netdata/debug.log + # daemon = journal + # collector = journal + # access = /var/log/netdata/access.log + # health = journal + # debug flags = 0x0000000000000000 + +[environment variables] + # TZ = :/etc/localtime + # PATH = /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/snap/bin:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin + # PYTHONPATH = + +[host labels] + # name = value + +[cloud] + # conversation log = no + # scope = full + # query threads = 6 + # proxy = env + +[ml] + # enabled = auto + # maximum num samples to train = 21600 + # minimum num samples to train = 900 + # train every = 3h + # number of models per dimension = 18 + # delete models older than = 7d + # num samples to diff = 1 + # num samples to smooth = 3 + # num samples to lag = 5 + # random sampling ratio = 0.20000 + # maximum number of k-means iterations = 1000 + # dimension anomaly score threshold = 0.99000 + # host anomaly rate threshold = 1.00000 + # anomaly detection grouping method = average + # anomaly detection grouping duration = 5m + # num training threads = 1 + # flush models batch size = 256 + # dimension anomaly rate suppression window = 15m + # dimension anomaly rate suppression threshold = 450 + # enable statistics charts = yes + # hosts to skip from training = !* + # charts to skip from training = netdata.* + # stream anomaly detection charts = yes + +[health] + # silencers file = /var/lib/netdata/health.silencers.json + # enabled = yes + # enable stock health configuration = yes + # use summary for notifications = yes + # default repeat warning = off + # default repeat critical = off + # in memory max health log entries = 1000 + # health log retention = 5d + # script to execute on alarm = /usr/libexec/netdata/plugins.d/alarm-notify.sh + # enabled alarms = * + # run at least every = 10s + # postpone alarms during hibernation for = 1m + +[web] + # ssl key = /etc/netdata/ssl/key.pem + # ssl certificate = /etc/netdata/ssl/cert.pem + # tls version = 1.3 + # tls ciphers = none + # ses max tg_des_window = 15 + # des max tg_des_window = 15 + # mode = static-threaded + # listen backlog = 4096 + default port = {{ netdata_port | default(netdata_conf.web_default_port | default(19999)) }} + bind to = {{ netdata_conf.web_bind_to | default('localhost') }} + # bearer token protection = no + # disconnect idle clients after = 1m + # timeout for first request = 1m + # accept a streaming request every = off + # respect do not track policy = no + # x-frame-options response header = + # allow connections from = localhost * + # allow connections by dns = heuristic + # allow dashboard from = localhost * + # allow dashboard by dns = heuristic + # allow badges from = * + # allow badges by dns = heuristic + # allow streaming from = * + # allow streaming by dns = heuristic + # allow netdata.conf from = localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.* 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.* 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.* 172.31.* UNKNOWN + # allow netdata.conf by dns = no + # allow management from = localhost + # allow management by dns = heuristic + # enable gzip compression = yes + # gzip compression strategy = default + # gzip compression level = 3 + # ssl skip certificate verification = no + # web server threads = 6 + # web server max sockets = 131072 + +[registry] + # enabled = no + # netdata unique id file = /var/lib/netdata/registry/netdata.public.unique.id + # registry db file = /var/lib/netdata/registry/registry.db + # registry log file = /var/lib/netdata/registry/registry-log.db + # registry save db every new entries = 1000000 + # registry expire idle persons = 1y + # registry domain = + # registry to announce = https://registry.my-netdata.io + # registry hostname = ip-172-31-32-178 + # verify browser cookies support = yes + # enable cookies SameSite and Secure = yes + # max URL length = 1024 + # max URL name length = 50 + # use mmap = no + # netdata management api key file = /var/lib/netdata/netdata.api.key + # allow from = * + # allow by dns = heuristic + +[pulse] + # extended = no + # update every = 1s + +[plugins] + # idlejitter = yes + # netdata pulse = yes + # profile = no + # tc = yes + # diskspace = yes + # proc = yes + # cgroups = yes + # timex = yes + # enable running new plugins = yes + # check for new plugins every = 1m + # slabinfo = no + # nfacct = yes + # statsd = yes + # perf = yes + # network-viewer = yes + # systemd-journal = yes + # ioping = yes + # charts.d = yes + # debugfs = yes + # go.d = yes + # ebpf = yes + # apps = yes + # python.d = yes + +[statsd] + # update every (flushInterval) = 1s + # udp messages to process at once = 10 + # create private charts for metrics matching = * + # max private charts hard limit = 1000 + # set charts as obsolete after = off + # decimal detail = 1000 + # disconnect idle tcp clients after = 10m + # private charts hidden = no + # histograms and timers percentile (percentThreshold) = 95.00000 + # dictionaries max unique dimensions = 200 + # add dimension for number of events received = no + # gaps on gauges (deleteGauges) = no + # gaps on counters (deleteCounters) = no + # gaps on meters (deleteMeters) = no + # gaps on sets (deleteSets) = no + # gaps on histograms (deleteHistograms) = no + # gaps on timers (deleteTimers) = no + # gaps on dictionaries (deleteDictionaries) = no + # statsd server max TCP sockets = 131072 + # listen backlog = 4096 + # default port = 8125 + # bind to = udp:localhost tcp:localhost + +[plugin:idlejitter] + # loop time = 20ms + +[plugin:nfacct] + # update every = 1s + # command options = + +[plugin:perf] + # update every = 1s + # command options = + +[plugin:network-viewer] + # update every = 1s + # command options = + +[plugin:systemd-journal] + # update every = 1s + # command options = + +[plugin:ioping] + # update every = 1s + # command options = + +[plugin:charts.d] + # update every = 1s + # command options = + +[plugin:debugfs] + # update every = 1s + # command options = + +[plugin:tc] + # script to run to get tc values = /usr/libexec/netdata/plugins.d/tc-qos-helper.sh + # enable tokens charts for all interfaces = no + # enable ctokens charts for all interfaces = no + # enable show all classes and qdiscs for all interfaces = no + # cleanup unused classes every = 120 + +[plugin:proc:diskspace] + # remove charts of unmounted disks = yes + # update every = 1s + # check for new mount points every = 15s + # exclude space metrics on paths = /dev /dev/shm /proc/* /sys/* /var/run/user/* /run/lock /run/user/* /snap/* /var/lib/docker/* /var/lib/containers/storage/* /run/credentials/* /run/containerd/* /rpool /rpool/* + # exclude space metrics on filesystems = *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs cgroup cgroup2 hugetlbfs devtmpfs fuse.lxcfs + # exclude inode metrics on filesystems = msdosfs msdos vfat overlayfs aufs* *unionfs + # space usage for all disks = auto + # inodes usage for all disks = auto + +[plugin:proc] + # /proc/net/dev = yes + # /proc/pagetypeinfo = no + # /proc/stat = yes + # /proc/uptime = yes + # /proc/loadavg = yes + # /proc/sys/fs/file-nr = yes + # /proc/sys/kernel/random/entropy_avail = yes + # /run/reboot_required = yes + # /proc/pressure = yes + # /proc/interrupts = yes + # /proc/softirqs = yes + # /proc/vmstat = yes + # /proc/meminfo = yes + # /sys/kernel/mm/ksm = yes + # /sys/block/zram = yes + # /sys/devices/system/edac/mc = yes + # /sys/devices/pci/aer = yes + # /sys/devices/system/node = yes + # /proc/net/wireless = yes + # /proc/net/sockstat = yes + # /proc/net/sockstat6 = yes + # /proc/net/netstat = yes + # /proc/net/sctp/snmp = yes + # /proc/net/softnet_stat = yes + # /proc/net/ip_vs/stats = yes + # /sys/class/infiniband = yes + # /proc/net/stat/conntrack = yes + # /proc/net/stat/synproxy = yes + # /proc/diskstats = yes + # /proc/mdstat = yes + # /proc/net/rpc/nfsd = yes + # /proc/net/rpc/nfs = yes + # /proc/spl/kstat/zfs/arcstats = yes + # /sys/fs/btrfs = yes + # ipc = yes + # /sys/class/power_supply = yes + # /sys/class/drm = yes + +[plugin:cgroups] + # update every = 1s + # check for new cgroups every = 10s + # use unified cgroups = auto + # max cgroups to allow = 1000 + # max cgroups depth to monitor = 0 + # enable by default cgroups matching = !*/init.scope !/system.slice/run-*.scope *user.slice/docker-* !*user.slice* *.scope /machine.slice/*.service */kubepods/pod*/* */kubepods/*/pod*/* */*-kubepods-pod*/* */*-kubepods-*-pod*/* !*kubepods* !*kubelet* !*/vcpu* !*/emulator !*.mount !*.partition !*.service !*.service/udev !*.socket !*.slice !*.swap !*.user !/ !/docker !*/libvirt !/lxc !/lxc/*/* !/lxc.monitor* !/lxc.pivot !/lxc.payload !*lxcfs.service/.control !/machine !/qemu !/system !/systemd !/user * + # enable by default cgroups names matching = * + # search for cgroups in subpaths matching = !*/init.scope !*-qemu !*.libvirt-qemu !/init.scope !/system !/systemd !/user !/lxc/*/* !/lxc.monitor !/lxc.payload/*/* !/lxc.payload.* * + # script to get cgroup names = /usr/libexec/netdata/plugins.d/cgroup-name.sh + # script to get cgroup network interfaces = /usr/libexec/netdata/plugins.d/cgroup-network + # run script to rename cgroups matching = !/ !*.mount !*.socket !*.partition /machine.slice/*.service !*.service !*.slice !*.swap !*.user !init.scope !*.scope/vcpu* !*.scope/emulator *.scope *docker* *lxc* *qemu* */kubepods/pod*/* */kubepods/*/pod*/* */*-kubepods-pod*/* */*-kubepods-*-pod*/* !*kubepods* !*kubelet* *.libvirt-qemu * + # cgroups to match as systemd services = !/system.slice/*/*.service /system.slice/*.service + +[plugin:timex] + # update every = 10s + # clock synchronization state = yes + # time offset = yes + +[plugin:go.d] + # update every = 1s + # command options = + +[plugin:ebpf] + # update every = 1s + # command options = + +[plugin:apps] + # update every = 1s + # command options = + +[plugin:python.d] + # update every = 1s + # command options = + +[plugin:proc:/proc/stat] + # cpu utilization = yes + # per cpu core utilization = no + # cpu interrupts = yes + # context switches = yes + # processes started = yes + # processes running = yes + # keep per core files open = yes + # keep cpuidle files open = yes + # core_throttle_count = auto + # package_throttle_count = no + # cpu frequency = yes + # cpu idle states = no + # core_throttle_count filename to monitor = /sys/devices/system/cpu/%s/thermal_throttle/core_throttle_count + # package_throttle_count filename to monitor = /sys/devices/system/cpu/%s/thermal_throttle/package_throttle_count + # scaling_cur_freq filename to monitor = /sys/devices/system/cpu/%s/cpufreq/scaling_cur_freq + # time_in_state filename to monitor = /sys/devices/system/cpu/%s/cpufreq/stats/time_in_state + # schedstat filename to monitor = /proc/schedstat + # cpuidle name filename to monitor = /sys/devices/system/cpu/cpu%zu/cpuidle/state%zu/name + # cpuidle time filename to monitor = /sys/devices/system/cpu/cpu%zu/cpuidle/state%zu/time + # filename to monitor = /proc/stat + +[plugin:proc:/proc/uptime] + # filename to monitor = /proc/uptime + +[plugin:proc:/proc/loadavg] + # filename to monitor = /proc/loadavg + # enable load average = yes + # enable total processes = yes + +[plugin:proc:/proc/sys/fs/file-nr] + # filename to monitor = /proc/sys/fs/file-nr + +[plugin:proc:/proc/sys/kernel/random/entropy_avail] + # filename to monitor = /proc/sys/kernel/random/entropy_avail + +[plugin:proc:/proc/pressure] + # base path of pressure metrics = /proc/pressure + # enable cpu some pressure = yes + # enable cpu full pressure = no + # enable memory some pressure = yes + # enable memory full pressure = yes + # enable io some pressure = yes + # enable io full pressure = yes + # enable irq some pressure = no + # enable irq full pressure = yes + +[plugin:proc:/proc/interrupts] + # interrupts per core = no + # filename to monitor = /proc/interrupts + +[plugin:proc:/proc/softirqs] + # interrupts per core = no + # filename to monitor = /proc/softirqs + +[plugin:proc:/proc/vmstat] + # filename to monitor = /proc/vmstat + # swap i/o = auto + # disk i/o = yes + # memory page faults = yes + # out of memory kills = yes + # system-wide numa metric summary = auto + # transparent huge pages = auto + # zswap i/o = auto + # memory ballooning = auto + # kernel same memory = auto + +[plugin:proc:/sys/devices/system/node] + # directory to monitor = /sys/devices/system/node + # enable per-node numa metrics = auto + +[plugin:proc:/proc/meminfo] + # system ram = yes + # system swap = auto + # hardware corrupted ECC = auto + # committed memory = yes + # writeback memory = yes + # kernel memory = yes + # slab memory = yes + # hugepages = auto + # transparent hugepages = auto + # memory reclaiming = yes + # high low memory = yes + # cma memory = auto + # direct maps = yes + # filename to monitor = /proc/meminfo + +[plugin:proc:/sys/kernel/mm/ksm] + # /sys/kernel/mm/ksm/pages_shared = /sys/kernel/mm/ksm/pages_shared + # /sys/kernel/mm/ksm/pages_sharing = /sys/kernel/mm/ksm/pages_sharing + # /sys/kernel/mm/ksm/pages_unshared = /sys/kernel/mm/ksm/pages_unshared + # /sys/kernel/mm/ksm/pages_volatile = /sys/kernel/mm/ksm/pages_volatile + +[plugin:proc:/sys/devices/system/edac/mc] + # directory to monitor = /sys/devices/system/edac/mc + +[plugin:proc:/sys/class/pci/aer] + # enable root ports = no + # enable pci slots = no + +[plugin:proc:/proc/net/wireless] + # filename to monitor = /proc/net/wireless + # status for all interfaces = auto + # quality for all interfaces = auto + # discarded packets for all interfaces = auto + # missed beacon for all interface = auto + +[plugin:proc:/proc/net/sockstat] + # ipv4 sockets = auto + # ipv4 TCP sockets = auto + # ipv4 TCP memory = auto + # ipv4 UDP sockets = auto + # ipv4 UDP memory = auto + # ipv4 UDPLITE sockets = auto + # ipv4 RAW sockets = auto + # ipv4 FRAG sockets = auto + # ipv4 FRAG memory = auto + # update constants every = 1m + # filename to monitor = /proc/net/sockstat + +[plugin:proc:/proc/net/sockstat6] + # ipv6 TCP sockets = auto + # ipv6 UDP sockets = auto + # ipv6 UDPLITE sockets = auto + # ipv6 RAW sockets = auto + # ipv6 FRAG sockets = auto + # filename to monitor = /proc/net/sockstat6 + +[plugin:proc:/proc/net/netstat] + # bandwidth = auto + # input errors = auto + # multicast bandwidth = auto + # broadcast bandwidth = auto + # multicast packets = auto + # broadcast packets = auto + # ECN packets = auto + # TCP reorders = auto + # TCP SYN cookies = auto + # TCP out-of-order queue = auto + # TCP connection aborts = auto + # TCP memory pressures = auto + # TCP SYN queue = auto + # TCP accept queue = auto + # filename to monitor = /proc/net/netstat + +[plugin:proc:/proc/net/snmp] + # ipv4 packets = auto + # ipv4 fragments sent = auto + # ipv4 fragments assembly = auto + # ipv4 errors = auto + # ipv4 TCP connections = auto + # ipv4 TCP packets = auto + # ipv4 TCP errors = auto + # ipv4 TCP opens = auto + # ipv4 TCP handshake issues = auto + # ipv4 UDP packets = auto + # ipv4 UDP errors = auto + # ipv4 ICMP packets = auto + # ipv4 ICMP messages = auto + # ipv4 UDPLite packets = auto + # filename to monitor = /proc/net/snmp + +[plugin:proc:/proc/net/snmp6] + # ipv6 packets = auto + # ipv6 fragments sent = auto + # ipv6 fragments assembly = auto + # ipv6 errors = auto + # ipv6 UDP packets = auto + # ipv6 UDP errors = auto + # ipv6 UDPlite packets = auto + # ipv6 UDPlite errors = auto + # bandwidth = auto + # multicast bandwidth = auto + # broadcast bandwidth = auto + # multicast packets = auto + # icmp = auto + # icmp redirects = auto + # icmp errors = auto + # icmp echos = auto + # icmp group membership = auto + # icmp router = auto + # icmp neighbor = auto + # icmp mldv2 = auto + # icmp types = auto + # ect = auto + # filename to monitor = /proc/net/snmp6 + +[plugin:proc:/proc/net/sctp/snmp] + # established associations = auto + # association transitions = auto + # fragmentation = auto + # packets = auto + # packet errors = auto + # chunk types = auto + # filename to monitor = /proc/net/sctp/snmp + +[plugin:proc:/proc/net/softnet_stat] + # softnet_stat per core = no + # filename to monitor = /proc/net/softnet_stat + +[plugin:proc:/proc/net/ip_vs_stats] + # IPVS bandwidth = yes + # IPVS connections = yes + # IPVS packets = yes + # filename to monitor = /proc/net/ip_vs_stats + +[plugin:proc:/sys/class/infiniband] + # dirname to monitor = /sys/class/infiniband + # bandwidth counters = yes + # packets counters = yes + # errors counters = yes + # hardware packets counters = auto + # hardware errors counters = auto + # monitor only active ports = auto + # disable by default interfaces matching = + # refresh ports state every = 30s + +[plugin:proc:/proc/net/stat/nf_conntrack] + # filename to monitor = /proc/net/stat/nf_conntrack + # netfilter new connections = no + # netfilter connection changes = no + # netfilter connection expectations = no + # netfilter connection searches = no + # netfilter errors = no + # netfilter connections = yes + +[plugin:proc:/proc/sys/net/netfilter/nf_conntrack_max] + # filename to monitor = /proc/sys/net/netfilter/nf_conntrack_max + # read every seconds = 10 + +[plugin:proc:/proc/sys/net/netfilter/nf_conntrack_count] + # filename to monitor = /proc/sys/net/netfilter/nf_conntrack_count + +[plugin:proc:/proc/net/stat/synproxy] + # SYNPROXY cookies = auto + # SYNPROXY SYN received = auto + # SYNPROXY connections reopened = auto + # filename to monitor = /proc/net/stat/synproxy + +[plugin:proc:/proc/diskstats] + # enable new disks detected at runtime = yes + # performance metrics for physical disks = auto + # performance metrics for virtual disks = auto + # performance metrics for partitions = no + # bandwidth for all disks = auto + # operations for all disks = auto + # merged operations for all disks = auto + # i/o time for all disks = auto + # queued operations for all disks = auto + # utilization percentage for all disks = auto + # extended operations for all disks = auto + # backlog for all disks = auto + # bcache for all disks = auto + # bcache priority stats update every = off + # remove charts of removed disks = yes + # path to get block device = /sys/block/%s + # path to get block device bcache = /sys/block/%s/bcache + # path to get virtual block device = /sys/devices/virtual/block/%s + # path to get block device infos = /sys/dev/block/%lu:%lu/%s + # path to device mapper = /dev/mapper + # path to /dev/disk = /dev/disk + # path to /sys/block = /sys/block + # path to /dev/disk/by-label = /dev/disk/by-label + # path to /dev/disk/by-id = /dev/disk/by-id + # path to /dev/vx/dsk = /dev/vx/dsk + # name disks by id = no + # preferred disk ids = * + # exclude disks = loop* ram* + # filename to monitor = /proc/diskstats + # performance metrics for disks with major 259 = yes + +[plugin:proc:/proc/mdstat] + # faulty devices = yes + # nonredundant arrays availability = yes + # mismatch count = auto + # disk stats = yes + # operation status = yes + # make charts obsolete = yes + # filename to monitor = /proc/mdstat + # mismatch_cnt filename to monitor = /sys/block/%s/md/mismatch_cnt + +[plugin:proc:/proc/net/rpc/nfsd] + # filename to monitor = /proc/net/rpc/nfsd + +[plugin:proc:/proc/net/rpc/nfs] + # filename to monitor = /proc/net/rpc/nfs + +[plugin:proc:/proc/spl/kstat/zfs/arcstats] + # filename to monitor = /proc/spl/kstat/zfs/arcstats + +[plugin:proc:/sys/fs/btrfs] + # path to monitor = /sys/fs/btrfs + # check for btrfs changes every = 1m + # physical disks allocation = auto + # data allocation = auto + # metadata allocation = auto + # system allocation = auto + # commit stats = auto + # error stats = auto + +[plugin:proc:ipc] + # message queues = yes + # semaphore totals = yes + # shared memory totals = yes + # msg filename to monitor = /proc/sysvipc/msg + # shm filename to monitor = /proc/sysvipc/shm + # max dimensions in memory allowed = 50 + +[plugin:proc:/sys/class/power_supply] + # battery capacity = yes + # battery power = yes + # battery charge = no + # battery energy = no + # power supply voltage = no + # keep files open = auto + # directory to monitor = /sys/class/power_supply + +[plugin:proc:/sys/class/drm] + # directory to monitor = /sys/class/drm + +[plugin:proc:/proc/net/dev] + # compressed packets for all interfaces = no + # disable by default interfaces matching = lo fireqos* *-ifb fwpr* fwbr* fwln* ifb4* diff --git a/automation/roles/ntp/README.md b/automation/roles/ntp/README.md new file mode 100644 index 000000000..9ec6e3321 --- /dev/null +++ b/automation/roles/ntp/README.md @@ -0,0 +1 @@ +# Ansible Role: ntp diff --git a/roles/ntp/handlers/main.yml b/automation/roles/ntp/handlers/main.yml similarity index 80% rename from roles/ntp/handlers/main.yml rename to automation/roles/ntp/handlers/main.yml index 8c30b96a1..3f40e9db9 100644 --- a/roles/ntp/handlers/main.yml +++ b/automation/roles/ntp/handlers/main.yml @@ -1,24 +1,21 @@ --- - - name: Restart ntp service - systemd: + ansible.builtin.systemd: name: ntp enabled: true state: restarted listen: "restart ntp" - name: Restart ntpd service - systemd: + ansible.builtin.systemd: name: ntpd enabled: true state: restarted listen: "restart ntpd" - name: Restart chronyd service - systemd: + ansible.builtin.systemd: name: chronyd enabled: true state: restarted listen: "restart chronyd" - -... diff --git a/roles/ntp/tasks/main.yml b/automation/roles/ntp/tasks/main.yml similarity index 51% rename from roles/ntp/tasks/main.yml rename to automation/roles/ntp/tasks/main.yml index fbc155d64..8c4a07477 100644 --- a/roles/ntp/tasks/main.yml +++ b/automation/roles/ntp/tasks/main.yml @@ -1,51 +1,45 @@ --- - - block: + # Debian - name: Install ntp package - package: + ansible.builtin.package: name: ntp state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "Debian" or - (ansible_os_family == "RedHat" and - ansible_distribution_major_version == '7') + when: ansible_os_family == "Debian" tags: ntp_install - name: Copy the ntp.conf file - template: + ansible.builtin.template: src: ntp.conf.j2 dest: /etc/ntp.conf notify: "restart ntp" when: ansible_os_family == "Debian" tags: ntp_conf - - name: Copy the ntp.conf file - template: - src: ntp.conf.j2 - dest: /etc/ntp.conf - notify: "restart ntpd" - when: ansible_os_family == "RedHat" and - ansible_distribution_major_version == '7' - tags: ntp_conf - + # RedHat - name: Install chrony package - package: + ansible.builtin.package: name: chrony state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 environment: "{{ proxy_env | default({}) }}" tags: ntp_install - when: ansible_os_family == "RedHat" and - ansible_distribution_major_version >= '8' + when: ansible_os_family == "RedHat" - name: Copy the chrony.conf template file - template: + ansible.builtin.template: src: chrony.conf.j2 dest: /etc/chrony.conf notify: "restart chronyd" tags: ntp_conf - when: ansible_os_family == "RedHat" and - ansible_distribution_major_version >= '8' + when: ansible_os_family == "RedHat" when: ntp_enabled is defined and ntp_enabled|bool tags: ntp - -... diff --git a/roles/ntp/templates/chrony.conf.j2 b/automation/roles/ntp/templates/chrony.conf.j2 similarity index 100% rename from roles/ntp/templates/chrony.conf.j2 rename to automation/roles/ntp/templates/chrony.conf.j2 diff --git a/roles/ntp/templates/ntp.conf.j2 b/automation/roles/ntp/templates/ntp.conf.j2 similarity index 100% rename from roles/ntp/templates/ntp.conf.j2 rename to automation/roles/ntp/templates/ntp.conf.j2 diff --git a/automation/roles/packages/README.md b/automation/roles/packages/README.md new file mode 100644 index 000000000..0882d7b9d --- /dev/null +++ b/automation/roles/packages/README.md @@ -0,0 +1 @@ +# Ansible Role: packages diff --git a/automation/roles/packages/defaults/main.yml b/automation/roles/packages/defaults/main.yml new file mode 100644 index 000000000..8e5e255ed --- /dev/null +++ b/automation/roles/packages/defaults/main.yml @@ -0,0 +1,24 @@ +--- +pgdg_architecture_map: + amd64: x86_64 + x86_64: x86_64 + aarch64: aarch64 + +# Extension Auto-Setup: +pgvectorscale_architecture_map: + x86_64: amd64 + amd64: amd64 + aarch64: arm64 + arm64: arm64 + +paradedb_architecture_map_dep: + x86_64: amd64 + amd64: amd64 + aarch64: arm64 + arm64: arm64 + +paradedb_architecture_map_rpm: + x86_64: x86_64 + amd64: x86_64 + aarch64: aarch64 + arm64: aarch64 diff --git a/automation/roles/packages/tasks/extensions.yml b/automation/roles/packages/tasks/extensions.yml new file mode 100644 index 000000000..600eadb11 --- /dev/null +++ b/automation/roles/packages/tasks/extensions.yml @@ -0,0 +1,383 @@ +--- +# Extension Auto-Setup: packages + +# TimescaleDB (if 'enable_timescale' is 'true') +- name: Install TimescaleDB package + ansible.builtin.package: + name: "{{ item }}" + state: present + loop: "{{ timescaledb_package }}" + vars: + timescaledb_package: >- + [{% if pg_version | default(postgresql_version) | int >= 11 %} + "timescaledb-2-postgresql-{{ pg_version | default(postgresql_version) }}" + {% else %} + "timescaledb-postgresql-{{ pg_version | default(postgresql_version) }}" + {% endif %}] + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: (enable_timescale | default(false) | bool) or (enable_timescaledb | default(false) | bool) + tags: timescaledb, timescale + +# Citus (if 'enable_citus' is 'true') +- name: Install Citus package + ansible.builtin.package: + name: "{{ item }}" + state: present + loop: "{{ citus_package }}" + vars: + citus_package: >- + [{% if ansible_os_family == 'Debian' and pg_version | default(postgresql_version) | int >= 15 %} + "postgresql-{{ pg_version | default(postgresql_version) }}-citus-{{ citus_version | default('13.0') }}" + {% elif ansible_os_family == 'Debian' and pg_version | default(postgresql_version) | int == 14 %} + "postgresql-{{ pg_version | default(postgresql_version) }}-citus-12.1" + {% elif ansible_os_family == 'Debian' and pg_version | default(postgresql_version) | int == 13 %} + "postgresql-{{ pg_version | default(postgresql_version) }}-citus-11.3" + {% elif ansible_os_family == 'Debian' and pg_version | default(postgresql_version) | int == 12 %} + "postgresql-{{ pg_version | default(postgresql_version) }}-citus-10.2" + {% elif ansible_os_family == 'Debian' and pg_version | default(postgresql_version) | int == 11 %} + "postgresql-{{ pg_version | default(postgresql_version) }}-citus-10.0" + {% else %} + "citus_{{ pg_version | default(postgresql_version) }}" + {% endif %}] + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: + - enable_citus | default(false) | bool + - (ansible_os_family == 'Debian' and pg_version | default(postgresql_version) | int >= 11) or + (ansible_os_family == 'RedHat' and pg_version | default(postgresql_version) | int >= 12) + - ansible_architecture in ["x86_64", "amd64"] # no arm64 support + tags: citus + +# pg_repack (if 'enable_pg_repack' is 'true') +- name: Install pg_repack package + ansible.builtin.package: + name: "{{ pg_repack_package }}" + state: present + vars: + pg_repack_package: >- + {% if ansible_os_family == 'Debian' %} + postgresql-{{ pg_version | default(postgresql_version) }}-repack + {% else %} + pg_repack_{{ pg_version | default(postgresql_version) }} + {% endif %} + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: enable_pg_repack | default(false) | bool + tags: pg_repack + +# pg_cron (if 'enable_pg_cron' is 'true') +- name: Install pg_cron package + ansible.builtin.package: + name: "{{ pg_cron_package }}" + state: present + vars: + pg_cron_package: >- + {% if ansible_os_family == 'Debian' %} + postgresql-{{ pg_version | default(postgresql_version) }}-cron + {% else %} + pg_cron_{{ pg_version | default(postgresql_version) }} + {% endif %} + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: enable_pg_cron | default(false) | bool + tags: pg_cron + +# pgaudit (if 'enable_pgaudit' is 'true') +- name: Install pgaudit package + ansible.builtin.package: + name: "{{ pgaudit_package }}" + state: present + vars: + pgaudit_package: >- + {% if ansible_os_family == 'Debian' %} + postgresql-{{ pg_version | default(postgresql_version) }}-pgaudit + {% elif ansible_os_family == 'RedHat' and pg_version | default(postgresql_version) | int >= 16 %} + pgaudit_{{ pg_version | default(postgresql_version) }} + {% elif ansible_os_family == 'RedHat' and pg_version | default(postgresql_version) | int == 15 %} + pgaudit17_{{ pg_version | default(postgresql_version) }} + {% elif ansible_os_family == 'RedHat' and pg_version | default(postgresql_version) | int == 14 %} + pgaudit16_{{ pg_version | default(postgresql_version) }} + {% elif ansible_os_family == 'RedHat' and pg_version | default(postgresql_version) | int == 13 %} + pgaudit15_{{ pg_version | default(postgresql_version) }} + {% elif ansible_os_family == 'RedHat' and pg_version | default(postgresql_version) | int == 12 %} + pgaudit14_{{ pg_version | default(postgresql_version) }} + {% elif ansible_os_family == 'RedHat' and pg_version | default(postgresql_version) | int == 11 %} + pgaudit13_{{ pg_version | default(postgresql_version) }} + {% endif %} + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: enable_pgaudit | default(false) | bool + tags: pgaudit + +# postgis (if 'enable_postgis' is 'true') +- name: Install postgis package + ansible.builtin.package: + name: "{{ postgis_package }}" + state: present + vars: + postgis_package: >- + {% if ansible_os_family == 'Debian' %} + postgresql-{{ pg_version | default(postgresql_version) }}-postgis-3 + {% elif ansible_os_family == 'RedHat' and pg_version | default(postgresql_version) | int == 16 %} + postgis34_{{ pg_version | default(postgresql_version) }} + {% else %} + postgis33_{{ pg_version | default(postgresql_version) }} + {% endif %} + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: enable_postgis | default(false) | bool + tags: postgis + +# pgrouting (if 'enable_pgrouting' is 'true') +- name: Install pgrouting package + ansible.builtin.package: + name: "{{ pgrouting_package }}" + state: present + vars: + pgrouting_package: >- + {% if ansible_os_family == 'Debian' %} + postgresql-{{ pg_version | default(postgresql_version) }}-pgrouting + {% else %} + pgrouting_{{ pg_version | default(postgresql_version) }} + {% endif %} + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: enable_pgrouting | default(false) | bool and + not (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('20.04', '<=')) + tags: pgrouting + +# pg_stat_kcache (if 'enable_pg_stat_kcache' is 'true') +- name: Install pg_stat_kcache package + ansible.builtin.package: + name: "{{ pg_stat_kcache_package }}" + state: present + vars: + pg_stat_kcache_package: >- + {% if ansible_os_family == 'Debian' %} + postgresql-{{ pg_version | default(postgresql_version) }}-pg-stat-kcache + {% else %} + pg_stat_kcache_{{ pg_version | default(postgresql_version) }} + {% endif %} + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: enable_pg_stat_kcache | default(false) | bool + tags: pg_stat_kcache + +# pg_wait_sampling (if 'enable_pg_wait_sampling' is 'true') +- name: Install pg_wait_sampling package + ansible.builtin.package: + name: "{{ pg_wait_sampling_package }}" + state: present + vars: + pg_wait_sampling_package: >- + {% if ansible_os_family == 'Debian' %} + postgresql-{{ pg_version | default(postgresql_version) }}-pg-wait-sampling + {% else %} + pg_wait_sampling_{{ pg_version | default(postgresql_version) }} + {% endif %} + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: enable_pg_wait_sampling | default(false) | bool + tags: pg_wait_sampling + +# pg_partman (if 'enable_pg_partman' is 'true') +- name: Install pg_partman package + ansible.builtin.package: + name: "{{ pg_partman_package }}" + state: present + vars: + pg_partman_package: >- + {% if ansible_os_family == 'Debian' %} + postgresql-{{ pg_version | default(postgresql_version) }}-partman + {% else %} + pg_partman_{{ pg_version | default(postgresql_version) }} + {% endif %} + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: enable_pg_partman | default(false) | bool + tags: pg_partman + +# pgvector (if 'enable_pgvector' or 'enable_paradedb' is 'true') +- name: Install pgvector package + ansible.builtin.package: + name: "{{ pgvector_package }}" + state: present + vars: + pgvector_package: >- + {% if ansible_os_family == 'Debian' %} + postgresql-{{ pg_version | default(postgresql_version) }}-pgvector + {% else %} + pgvector_{{ pg_version | default(postgresql_version) }} + {% endif %} + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: + - (enable_pgvector | default(false)| bool) or + (enable_pgvectorscale | default(false) | bool) or + (enable_paradedb | default(false) | bool) + - (ansible_os_family == 'Debian' and pg_version | default(postgresql_version) | int >= 11) or + (ansible_os_family == 'RedHat' and pg_version | default(postgresql_version) | int >= 12) + tags: pgvector + +# pgvectorscale - https://github.com/timescale/pgvectorscale +# (if 'enable_pgvectorscale' is 'true') +- block: + - name: Looking up the latest version of pgvectorscale + ansible.builtin.set_fact: + pgvectorscale_version: >- + {{ + (lookup('url', 'https://api.github.com/repos/timescale/pgvectorscale/releases/latest', split_lines=False) + | from_json).get('tag_name') + | replace('v', '') + }} + check_mode: false + when: pgvectorscale_version | default('latest') == 'latest' + + - name: Download pgvectorscale archive + ansible.builtin.get_url: + url: "{{ pgvectorscale_repo }}/{{ pgvectorscale_archive }}" + dest: "/tmp/{{ pgvectorscale_archive }}" + timeout: 60 + validate_certs: false + check_mode: false + + - name: Extract pgvectorscale package + ansible.builtin.unarchive: + src: "/tmp/{{ pgvectorscale_archive }}" + dest: "/tmp/" + remote_src: true + check_mode: false + + # Debian (only deb packages are available) + - name: "Install pgvectorscale v{{ pgvectorscale_version }} package" + ansible.builtin.apt: + deb: "/tmp/{{ pgvectorscale_package }}" + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + vars: + pgvectorscale_repo: "https://github.com/timescale/pgvectorscale/releases/download/%7B%7B%20pgvectorscale_version%20%7D%7D" + pgvectorscale_archive: "pgvectorscale-{{ pgvectorscale_version }}-pg{{ pg_version | default(postgresql_version) }}-{{ pgvectorscale_architecture_map[ansible_architecture] }}.zip" # yamllint disable rule:line-length + pgvectorscale_package: "pgvectorscale-postgresql-{{ pg_version | default(postgresql_version) }}_{{ pgvectorscale_version }}-Linux_{{ pgvectorscale_architecture_map[ansible_architecture] }}.deb" # yamllint disable rule:line-length + when: + - enable_pgvectorscale | default(false) | bool + - ansible_os_family == "Debian" + - ansible_distribution_release in ['bookworm', 'jammy', 'noble'] + +# ParadeDB (pg_search, pg_analytics) - https://github.com/paradedb/paradedb +# (if 'enable_paradedb' or 'enable_pg_search', 'enable_pg_analytics' is 'true') +- block: + # pg_search + - block: + - name: Looking up the latest version of pg_search + ansible.builtin.set_fact: + pg_search_version: >- + {{ + (lookup('url', 'https://api.github.com/repos/paradedb/paradedb/releases/latest', split_lines=False) + | from_json).get('tag_name') + | replace('v', '') + }} + when: pg_search_version | default('latest') == 'latest' + + # Debian + - name: "Install pg_search v{{ pg_search_version }} package" + ansible.builtin.apt: + deb: "{{ pg_search_package }}" + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + vars: + pg_search_repo: "https://github.com/paradedb/paradedb/releases/download/v%7B%7B%20pg_search_version%20%7D%7D/" + pg_search_package: "{{ pg_search_repo }}/postgresql-{{ pg_version | default(postgresql_version) }}-pg-search_{{ pg_search_version }}-1PARADEDB-{{ ansible_distribution_release }}_{{ paradedb_architecture_map_dep[ansible_architecture] }}.deb" # yamllint disable rule:line-length + when: + - ansible_os_family == "Debian" + - ansible_distribution_release in ['bookworm', 'jammy', 'noble'] + + # RedHat + - name: "Install pg_search v{{ pg_search_version }} package" + ansible.builtin.dnf: + name: "{{ pg_search_package }}" + disable_gpg_check: true + register: dnf_status + until: dnf_status is success + delay: 5 + retries: 3 + vars: + pg_search_repo: "https://github.com/paradedb/paradedb/releases/download/v%7B%7B%20pg_search_version%20%7D%7D/" + pg_search_package: "{{ pg_search_repo }}/pg_search_{{ pg_version | default(postgresql_version) }}-{{ pg_search_version }}-1PARADEDB.el{{ ansible_distribution_major_version }}.{{ paradedb_architecture_map_rpm[ansible_architecture] }}.rpm" # yamllint disable rule:line-length + when: + - ansible_os_family == "RedHat" + - ansible_distribution_major_version in ['8', '9'] + when: (enable_paradedb | default(false) | bool) or (enable_pg_search | default(false) | bool) + + # pg_analytics + - block: + - name: Looking up the latest version of pg_analytics + ansible.builtin.set_fact: + pg_analytics_version: >- + {{ + (lookup('url', 'https://api.github.com/repos/paradedb/pg_analytics/releases/latest', split_lines=False) + | from_json).get('tag_name') + | replace('v', '') + }} + when: pg_analytics_version | default('latest') == 'latest' + + # Debian + - name: "Install pg_analytics v{{ pg_analytics_version }} package" + ansible.builtin.apt: + deb: "{{ pg_analytics_package }}" + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + vars: + pg_analytics_repo: "https://github.com/paradedb/pg_analytics/releases/download/v%7B%7B%20pg_analytics_version%20%7D%7D/" + pg_analytics_package: "{{ pg_analytics_repo }}/postgresql-{{ pg_version | default(postgresql_version) }}-pg-analytics_{{ pg_analytics_version }}-1PARADEDB-{{ ansible_distribution_release }}_{{ paradedb_architecture_map_dep[ansible_architecture] }}.deb" # yamllint disable rule:line-length + when: + - ansible_os_family == "Debian" + - ansible_distribution_release in ['bookworm', 'jammy', 'noble'] + + # RedHat + - name: "Install pg_analytics v{{ pg_analytics_version }} package" + ansible.builtin.dnf: + name: "{{ pg_analytics_package }}" + disable_gpg_check: true + register: dnf_status + until: dnf_status is success + delay: 5 + retries: 3 + vars: + pg_analytics_repo: "https://github.com/paradedb/pg_analytics/releases/download/v%7B%7B%20pg_analytics_version%20%7D%7D/" + pg_analytics_package: "{{ pg_analytics_repo }}/pg_analytics_{{ pg_version | default(postgresql_version) }}-{{ pg_analytics_version }}-1PARADEDB.el{{ ansible_distribution_major_version }}.{{ paradedb_architecture_map_rpm[ansible_architecture] }}.rpm" # yamllint disable rule:line-length + when: + - ansible_os_family == "RedHat" + - ansible_distribution_major_version in ['8', '9'] + when: (enable_paradedb | default(false) | bool) or (enable_pg_analytics | default(false) | bool) + when: (enable_paradedb | default(false) | bool) or (enable_pg_search | default(false) | bool) or (enable_pg_analytics | default(false) | bool) + tags: paradedb, pg_search, pg_analytics +# Note: We use the 'pg_version' variable to be able to reuse this code in the upgrade role. diff --git a/automation/roles/packages/tasks/main.yml b/automation/roles/packages/tasks/main.yml new file mode 100644 index 000000000..896082f3a --- /dev/null +++ b/automation/roles/packages/tasks/main.yml @@ -0,0 +1,249 @@ +--- +# Install packages from files +- block: + - name: Copy packages into /tmp + ansible.builtin.copy: + src: "{{ item }}" + dest: /tmp/ + loop: "{{ packages_from_file }}" + register: copy_packages_result + + - name: Install packages + ansible.builtin.apt: + force_apt_get: true + deb: "/tmp/{{ item }}" + state: present + loop: "{{ packages_from_file | map('basename') | list }}" + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "Debian" and copy_packages_result.changed + + - name: Install packages + ansible.builtin.package: + name: "/tmp/{{ item }}" + state: present + loop: "{{ packages_from_file | map('basename') | list }}" + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "RedHat" and copy_packages_result.changed + when: packages_from_file is defined and packages_from_file | length > 0 + tags: install_packages_from_file + +# Install packages from repository + +# RedHat +- name: Update dnf cache + ansible.builtin.shell: dnf clean all && dnf -y makecache + args: + executable: /bin/bash + register: dnf_status + until: dnf_status is success + delay: 5 + retries: 3 + when: + - installation_method == "repo" + - ansible_os_family == "RedHat" + environment: "{{ proxy_env | default({}) }}" + tags: install_packages, install_postgres + +- name: Install system packages + ansible.builtin.dnf: + name: "{{ item }}" + state: present + disablerepo: "pgdg*" + loop: "{{ system_packages | list }}" + register: package_status + until: package_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: + - installation_method == "repo" + - ansible_os_family == "RedHat" + - system_packages | default('') | length > 0 + tags: install_packages + +- name: Set Python alternative + community.general.alternatives: + name: python3 + path: /usr/bin/python{{ python_version }} + link: /usr/bin/python3 + priority: 1100 + state: selected + when: + - installation_method == "repo" + - ansible_os_family == "RedHat" + - python_version | default('') | length > 1 + tags: install_packages + +- name: Set Pip alternative + community.general.alternatives: + name: pip3 + path: /usr/bin/pip{{ python_version }} + link: /usr/bin/pip3 + priority: 1100 + state: selected + when: + - installation_method == "repo" + - ansible_os_family == "RedHat" + - python_version | default('') | length > 1 + - pip_package in system_packages + vars: + pip_package: "python{{ python_version | default('3') }}-pip" + tags: install_packages + +# Debian +- name: Update apt cache + ansible.builtin.apt: + update_cache: true + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: + - installation_method == "repo" + - ansible_os_family == "Debian" + +- name: Install system packages + ansible.builtin.apt: + name: "{{ item }}" + state: present + loop: "{{ system_packages | list }}" + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: + - installation_method == "repo" + - ansible_os_family == "Debian" + - system_packages | default('') | length > 0 + tags: install_packages + +# Install PostgreSQL from repository + +# RedHat +- block: # Preparing to install PostgreSQL + - name: PostgreSQL | check if appstream module is enabled + ansible.builtin.command: "dnf -y -C module list postgresql" + register: postgresql_module_result + changed_when: false + + - name: PostgreSQL | disable appstream module + ansible.builtin.command: "dnf -y -C module disable postgresql" + when: "'[x] ' not in postgresql_module_result.stdout" + when: + - installation_method == "repo" + - ansible_os_family == "RedHat" + - ansible_distribution_major_version >= '8' + ignore_errors: true + tags: install_postgres + +- name: Install PostgreSQL packages + ansible.builtin.package: + name: "{{ item }}" + state: present + loop: "{{ postgresql_packages | list }}" + register: package_status + until: package_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: + - installation_method == "repo" + - ansible_os_family == "RedHat" + - postgresql_packages | default('') | length > 0 + tags: install_packages, install_postgres + +# Debian +- block: # Preparing to install PostgreSQL + - name: PostgreSQL | ensure postgresql database-cluster manager package + ansible.builtin.package: + name: postgresql-common + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + + - name: PostgreSQL | disable initializing of a default postgresql cluster + ansible.builtin.replace: + path: /etc/postgresql-common/createcluster.conf + replace: create_main_cluster = false + regexp: ^#?create_main_cluster.*$ + + - name: PostgreSQL | disable log rotation with logrotate for postgresql + ansible.builtin.file: + dest: /etc/logrotate.d/postgresql-common + state: absent + when: + - installation_method == "repo" + - ansible_os_family == "Debian" + tags: install_postgres + +# PostgreSQL prepare for install (for RHEL) +- block: + - name: PostgreSQL | check if appstream module is enabled + ansible.builtin.command: "dnf -y -C module list postgresql" + register: postgresql_module_result + changed_when: false + + - name: PostgreSQL | disable appstream module + ansible.builtin.command: "dnf -y -C module disable postgresql" + when: "'[x] ' not in postgresql_module_result.stdout" + when: installation_method == "repo" and ansible_os_family == "RedHat" + ignore_errors: true + tags: install_postgres + +# Install PostgreSQL from repository +# RedHat +- name: Install PostgreSQL packages + ansible.builtin.package: + name: "{{ item }}" + state: present + loop: "{{ postgresql_packages }}" + register: package_status + until: package_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: + - installation_method == "repo" + - ansible_os_family == "RedHat" + - postgresql_packages | default('') | length > 0 + tags: install_packages, install_postgres + +# Debian +- name: Install PostgreSQL packages + ansible.builtin.apt: + name: "{{ item }}" + state: present + loop: "{{ postgresql_packages | list }}" + environment: "{{ proxy_env | default({}) }}" + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: + - installation_method == "repo" + - ansible_os_family == "Debian" + - postgresql_packages | default('') | length > 0 + tags: install_packages, install_postgres + +# Extensions +- name: Install PostgreSQL Extensions + ansible.builtin.import_tasks: extensions.yml + when: installation_method == "repo" + tags: install_packages, install_postgres, install_extensions + +# Install perf (if 'install_perf' is 'true') +- name: Install perf + ansible.builtin.import_tasks: perf.yml + when: install_perf | bool + tags: install_packages, install_perf, perf diff --git a/automation/roles/packages/tasks/perf.yml b/automation/roles/packages/tasks/perf.yml new file mode 100644 index 000000000..42bae7333 --- /dev/null +++ b/automation/roles/packages/tasks/perf.yml @@ -0,0 +1,175 @@ +--- +# Install "perf" (Linux profiling with performance counters) and "FlameGraph". + +# RedHat +- name: Install perf + ansible.builtin.dnf: + name: perf + state: present + disablerepo: "pgdg*" + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "RedHat" + +# Debian +- name: Install perf + ansible.builtin.apt: + name: "{{ 'linux-tools-common' if ansible_distribution == 'Ubuntu' else 'linux-perf' }}" + state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "Debian" + +# Check if perf is installed correctly, or build a perf from the source code +- name: Check if perf is installed + ansible.builtin.command: perf --version + register: perf_result + failed_when: false + changed_when: false + tags: perf + +# Build perf from source (if perf is not installed) +- block: + - name: Extract kernel version + ansible.builtin.set_fact: + kernel_version: >- + {{ ansible_kernel.split('-')[0] + if not ansible_kernel.split('-')[0].endswith('.0') + else ansible_kernel.split('-')[0][:-2] }} + kernel_major_version: "{{ ansible_kernel.split('.')[0] }}" + + - name: Download kernel source + ansible.builtin.get_url: + url: "https://mirrors.edge.kernel.org/pub/linux/kernel/v%7B%7B%20kernel_major_version%20%7D%7D.x/linux-%7B%7B%20kernel_version%20%7D%7D.tar.gz" + dest: "/tmp/linux-source-{{ kernel_version }}.tar.gz" + register: get_url_result + + - name: Extract kernel source + ansible.builtin.unarchive: + src: "/tmp/linux-source-{{ kernel_version }}.tar.gz" + dest: /tmp/ + remote_src: true + when: + - get_url_result is defined + - get_url_result is success + + - name: Install basic build tools + ansible.builtin.package: + name: + - make + - gcc + - flex + - bison + state: present + register: build_tools_result + when: + - get_url_result is defined + - get_url_result is success + + - name: Install required libraries + ansible.builtin.package: + name: + - pkg-config + - libzstd1 + - libdwarf-dev + - libdw-dev + - binutils-dev + - libcap-dev + - libelf-dev + - libnuma-dev + - python3-dev + - libssl-dev + - libunwind-dev + - libdwarf-dev + - zlib1g-dev + - liblzma-dev + - libaio-dev + - libtraceevent-dev + - debuginfod + - libpfm4-dev + - libslang2-dev + - systemtap-sdt-dev + - libperl-dev + - binutils-dev + - libbabeltrace-dev + - libiberty-dev + - libzstd-dev + state: present + when: + - ansible_os_family == "Debian" + - build_tools_result is defined + - build_tools_result is success + + - name: Install required libraries + ansible.builtin.package: + name: + - pkgconf + - libzstd + - libdwarf-devel + - elfutils-libelf-devel + - binutils-devel + - libcap-devel + - numactl-devel + - python3-devel + - openssl-devel + - libunwind-devel + - zlib-devel + - xz-devel + - libaio-devel + - libtraceevent-devel + - slang-devel + - systemtap-sdt-devel + - perl-devel + - libbabeltrace-devel + - libzstd-devel + state: present + when: + - ansible_os_family == "RedHat" + - build_tools_result is defined + - build_tools_result is success + + - name: Build perf from source + become: true + become_user: root + community.general.make: + chdir: "/tmp/linux-{{ kernel_version }}/tools/perf" + jobs: "{{ ansible_processor_vcpus }}" # use all CPU cores + register: build_perf_result + when: + - build_tools_result is defined + - build_tools_result is success + + - name: Copy perf to /usr/local/bin + ansible.builtin.copy: + src: "/tmp/linux-{{ kernel_version }}/tools/perf/perf" + dest: "/usr/local/bin/perf" + mode: "0755" + remote_src: true + when: + - build_perf_result is defined + - build_perf_result is success + ignore_errors: true # do not stop the playbook if perf could not be installed + when: + - perf_result.rc is defined + - perf_result.rc != 0 + tags: perf + +# FlameGraph +- block: + - name: Make sure the git are present + ansible.builtin.package: + name: git + state: present + + - name: "Download 'FlameGraph' to /var/opt/FlameGraph" + ansible.builtin.git: + repo: https://github.com/brendangregg/FlameGraph.git + dest: "/var/opt/FlameGraph" + single_branch: true + version: master + update: false + tags: flamegraph diff --git a/automation/roles/pam_limits/README.md b/automation/roles/pam_limits/README.md new file mode 100644 index 000000000..8f7515131 --- /dev/null +++ b/automation/roles/pam_limits/README.md @@ -0,0 +1 @@ +# Ansible Role: pam_limits diff --git a/roles/pam_limits/tasks/main.yml b/automation/roles/pam_limits/tasks/main.yml similarity index 60% rename from roles/pam_limits/tasks/main.yml rename to automation/roles/pam_limits/tasks/main.yml index c18025640..13e784f55 100644 --- a/roles/pam_limits/tasks/main.yml +++ b/automation/roles/pam_limits/tasks/main.yml @@ -1,15 +1,12 @@ --- - - name: Linux PAM limits | add or modify nofile limits - pam_limits: + community.general.pam_limits: domain: "{{ limits_user }}" limit_type: "{{ item.limit_type }}" limit_item: "{{ item.limit_item }}" value: "{{ item.value }}" loop: - - {limit_type: 'soft', limit_item: 'nofile', value: "{{ soft_nofile }}"} - - {limit_type: 'hard', limit_item: 'nofile', value: "{{ hard_nofile }}"} + - { limit_type: "soft", limit_item: "nofile", value: "{{ soft_nofile }}" } + - { limit_type: "hard", limit_item: "nofile", value: "{{ hard_nofile }}" } when: set_limits is defined and set_limits|bool tags: limits, pam_limits - -... diff --git a/automation/roles/patroni/README.md b/automation/roles/patroni/README.md new file mode 100644 index 000000000..40f640742 --- /dev/null +++ b/automation/roles/patroni/README.md @@ -0,0 +1 @@ +# Ansible Role: patroni diff --git a/automation/roles/patroni/config/tasks/main.yml b/automation/roles/patroni/config/tasks/main.yml new file mode 100644 index 000000000..acdb70123 --- /dev/null +++ b/automation/roles/patroni/config/tasks/main.yml @@ -0,0 +1,105 @@ +--- +# Patroni configure +- name: Make sure that conf directory exists + ansible.builtin.file: + path: /etc/patroni + state: directory + owner: postgres + group: postgres + mode: "0750" + tags: patroni, patroni_conf + +- name: Make sure that patroni log directory exists + ansible.builtin.file: + path: "{{ patroni_log_dir }}" + owner: postgres + group: postgres + state: directory + mode: "0750" + when: patroni_log_destination == 'logfile' + tags: patroni + +- name: Make sure the postgresql log directory "{{ postgresql_log_dir }}" exists + ansible.builtin.file: + path: "{{ postgresql_log_dir }}" + owner: postgres + group: postgres + state: directory + mode: "0700" + tags: patroni + +- name: Update conf file "/etc/patroni/patroni.yml" + ansible.builtin.template: + src: ../templates/patroni.yml.j2 + dest: /etc/patroni/patroni.yml + owner: postgres + group: postgres + mode: "0640" + notify: "reload patroni" + tags: patroni, patroni_conf + +- block: + - name: Update postgresql parameters in DCS + ansible.builtin.uri: + url: http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/config + method: PATCH + user: "{{ patroni_restapi_username | default(omit) }}" + password: "{{ patroni_restapi_password | default(omit) }}" + body: '{"postgresql":{"parameters":{"{{ item.option }}":"{{ item.value }}"}}}' + body_format: json + loop: "{{ postgresql_parameters }}" + when: item.value != "null" + + - name: Delete postgresql parameters from DCS + ansible.builtin.uri: + url: http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/config + method: PATCH + user: "{{ patroni_restapi_username | default(omit) }}" + password: "{{ patroni_restapi_password | default(omit) }}" + body: '{"postgresql":{"parameters":{"{{ item.option }}":null}}}' + body_format: json + loop: "{{ postgresql_parameters }}" + when: item.value == "null" + environment: + no_proxy: "{{ inventory_hostname }}" + when: inventory_hostname in groups['primary'] + tags: patroni, patroni_conf + +- name: Delete postgresql parameters from "/etc/patroni/patroni.yml" + ansible.builtin.lineinfile: + path: /etc/patroni/patroni.yml + regexp: "^.*{{ item.option }}: {{ item.value }}$" + state: absent + loop: "{{ postgresql_parameters }}" + when: item.value == "null" + tags: patroni, patroni_conf + +# Update pg_hba.conf +- ansible.builtin.import_tasks: pg_hba.yml + tags: patroni, patroni_conf, pg_hba, pg_hba_generate + +# pending_restart +- block: + - name: "Check if there are any changed parameters that require a restart" + become: true + become_user: postgres + community.postgresql.postgresql_query: + login_host: "127.0.0.1" + login_port: "{{ postgresql_port }}" + login_user: "{{ patroni_superuser_username }}" + login_password: "{{ patroni_superuser_password }}" + login_db: "postgres" + query: "select name from pg_settings where pending_restart is true" + register: pending_restart_result + + - name: "Set pg_pending_restart_settings variable" + ansible.builtin.set_fact: + pg_pending_restart_settings: "{{ pending_restart_result.query_result }}" + + - name: "Display parameters requiring PostgreSQL restart" + ansible.builtin.debug: + msg: + - "On server {{ ansible_hostname }}, the following parameters have changed and require PostgreSQL to restart:" + - "{{ pg_pending_restart_settings | map(attribute='name') | list }}" + when: pg_pending_restart_settings | length > 0 + tags: patroni, patroni_conf diff --git a/automation/roles/patroni/config/tasks/pg_hba.yml b/automation/roles/patroni/config/tasks/pg_hba.yml new file mode 100644 index 000000000..5acaab1f8 --- /dev/null +++ b/automation/roles/patroni/config/tasks/pg_hba.yml @@ -0,0 +1,10 @@ +--- +- name: Update pg_hba.conf + ansible.builtin.template: + src: ../templates/pg_hba.conf.j2 + dest: "{{ postgresql_conf_dir }}/pg_hba.conf" + owner: postgres + group: postgres + mode: "0640" + notify: "reload postgres" + tags: pg_hba, pg_hba_generate diff --git a/automation/roles/patroni/handlers/main.yml b/automation/roles/patroni/handlers/main.yml new file mode 100644 index 000000000..a8cf0b9b0 --- /dev/null +++ b/automation/roles/patroni/handlers/main.yml @@ -0,0 +1,17 @@ +--- +- name: Reload patroni service + ansible.builtin.systemd: + daemon_reload: true + name: patroni + enabled: true + state: reloaded + listen: "reload patroni" + +- name: Reload postgres + become: true + become_user: postgres + ansible.builtin.command: "{{ postgresql_bin_dir }}/pg_ctl reload -D {{ postgresql_data_dir }}" + register: pg_ctl_reload_result + changed_when: pg_ctl_reload_result.rc == 0 + failed_when: false # exec 'reload' on all running postgres (to re-run with --tag pg_hba). + listen: "reload postgres" diff --git a/roles/patroni/library/yedit.py b/automation/roles/patroni/library/yedit.py similarity index 95% rename from roles/patroni/library/yedit.py rename to automation/roles/patroni/library/yedit.py index 9eb79e365..6bc0f5d64 100644 --- a/roles/patroni/library/yedit.py +++ b/automation/roles/patroni/library/yedit.py @@ -1,7 +1,8 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # pylint: disable=wrong-import-order,wrong-import-position,unused-import @@ -67,7 +68,7 @@ aliases: [] edits: description: - - A list of edits to perform. These follow the same format as a single edit + - A list of edits to perform. These follow the same format as a single edit required: false aliases: [] value_type: @@ -298,8 +299,7 @@ def remove_entry(data, key, index=None, value=None, sep='.'): for arr_ind, dict_key in key_indexes[:-1]: if dict_key and isinstance(data, dict): data = data.get(dict_key) - elif (arr_ind and isinstance(data, list) and - int(arr_ind) <= len(data) - 1): + elif (arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1): data = data[int(arr_ind)] else: return None @@ -326,8 +326,7 @@ def add_entry(data, key, item=None, sep='.'): ''' if key == '': pass - elif (not (key and Yedit.valid_key(key, sep)) and - isinstance(data, (list, dict))): + elif (not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict))): return None key_indexes = Yedit.parse_key(key, sep) @@ -338,14 +337,12 @@ def add_entry(data, key, item=None, sep='.'): continue elif data and not isinstance(data, dict): - raise YeditException("Unexpected item type found while going through key " + - "path: {0} (at key: {1})".format(key, dict_key)) + raise YeditException("Unexpected item type found while going through key " + "path: {0} (at key: {1})".format(key, dict_key)) data[dict_key] = {} data = data[dict_key] - elif (arr_ind and isinstance(data, list) and - int(arr_ind) <= len(data) - 1): + elif (arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1): data = data[int(arr_ind)] else: raise YeditException("Unexpected item type found while going through key path: {0}".format(key)) @@ -357,7 +354,7 @@ def add_entry(data, key, item=None, sep='.'): # expected list entry elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data): # noqa: E501 # key is next element in array so append - if int(key_indexes[-1][0]) > len(data)-1: + if int(key_indexes[-1][0]) > len(data) - 1: data.append(item) else: data[int(key_indexes[-1][0])] = item @@ -383,16 +380,14 @@ def get_entry(data, key, sep='.'): ''' if key == '': pass - elif (not (key and Yedit.valid_key(key, sep)) and - isinstance(data, (list, dict))): + elif (not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict))): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes: if dict_key and isinstance(data, dict): data = data.get(dict_key) - elif (arr_ind and isinstance(data, list) and - int(arr_ind) <= len(data) - 1): + elif (arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1): data = data[int(arr_ind)] else: return None @@ -447,8 +442,7 @@ def write(self): elif self.content_type == 'json': Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True)) else: - raise YeditException('Unsupported content_type: {0}.'.format(self.content_type) + - 'Please specify a content_type of yaml or json.') + raise YeditException('Unsupported content_type: {0}.'.format(self.content_type) + 'Please specify a content_type of yaml or json.') return (True, self.yaml_dict) @@ -629,8 +623,7 @@ def update(self, path, value, index=None, curr_value=None): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member if not isinstance(value, dict): - raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' + - 'value=[{0}] type=[{1}]'.format(value, type(value))) + raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' + 'value=[{0}] type=[{1}]'.format(value, type(value))) entry.update(value) return (True, self.yaml_dict) @@ -771,8 +764,7 @@ def parse_value(inc_value, vtype=''): try: inc_value = yaml.safe_load(inc_value) except Exception: - raise YeditException('Could not determine type of incoming value. ' + - 'value=[{0}] vtype=[{1}]'.format(type(inc_value), vtype)) + raise YeditException('Could not determine type of incoming value. ' + 'value=[{0}] vtype=[{1}]'.format(type(inc_value), vtype)) return inc_value @@ -821,8 +813,8 @@ def run_ansible(params): if yamlfile.yaml_dict is None and state != 'present': return {'failed': True, - 'msg': 'Error opening file [{0}]. Verify that the '.format(params['src']) + - 'file exists, that it is has correct permissions, and is valid yaml.'} + 'msg': 'Error opening file [{0}]. Verify that the '.format(params['src']) + + 'file exists, that it is has correct permissions, and is valid yaml.'} if state == 'list': if params['content']: @@ -905,11 +897,13 @@ def run_ansible(params): return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state} return {'failed': True, 'msg': 'Unkown state passed'} + def json_roundtrip_clean(js): ''' Clean-up any non-string keys from a Python object, to ensure it can be serialized as JSON ''' cleaned_json = json.dumps(js, skipkeys=True) return json.loads(cleaned_json) + # pylint: disable=too-many-branches def main(): ''' ansible oc module for secrets ''' diff --git a/automation/roles/patroni/tasks/custom_wal_dir.yml b/automation/roles/patroni/tasks/custom_wal_dir.yml new file mode 100644 index 000000000..a2f73585d --- /dev/null +++ b/automation/roles/patroni/tasks/custom_wal_dir.yml @@ -0,0 +1,153 @@ +--- +# 🔄 Determine base pg_wal_dir name +- name: Roles.patroni.custom_wal_dir | Set pg_wal_dir based on postgresql_version + ansible.builtin.set_fact: + pg_wal_dir: "{{ 'pg_wal' if postgresql_version | int >= 10 else 'pg_xlog' }}" + +- name: "Make sure {{ postgresql_data_dir }}/{{ pg_wal_dir }} is symlink" + ansible.builtin.stat: + path: "{{ postgresql_data_dir }}/{{ pg_wal_dir }}" + register: sym + +# Synchronize WAL`s (if wal dir is not symlink) +- block: + - name: Make sure the custom WAL directory "{{ postgresql_wal_dir }}" exists and is empty + ansible.builtin.file: + path: "{{ postgresql_wal_dir }}" + state: "{{ item }}" + owner: postgres + group: postgres + mode: "0700" + loop: + - absent + - directory + + - name: Make sure rsync is installed (for synchronize wal dir) + ansible.builtin.package: + name: + - rsync + - sshpass + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + + - name: Execute CHECKPOINT before stopping PostgreSQL + become: true + become_user: postgres + ansible.builtin.command: > + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc "CHECKPOINT" + + - name: Stop patroni service on the Replica (for create symlink) + become: true + become_user: root + ansible.builtin.systemd: + name: patroni + state: stopped + when: inventory_hostname in groups['replica'] + + - name: Stop patroni service on the Leader (for create symlink) + become: true + become_user: root + ansible.builtin.systemd: + name: patroni + state: stopped + when: inventory_hostname == groups['master'][0] + + - name: Make sure PostgreSQL is stopped + become: true + become_user: postgres + ansible.builtin.command: "{{ postgresql_bin_dir }}/pg_ctl status -D {{ postgresql_data_dir }}" + register: stop_result + changed_when: false + failed_when: false + until: stop_result.rc == 3 + retries: 100 + delay: 6 + + - name: "Synchronize {{ postgresql_data_dir }}/{{ pg_wal_dir }} to {{ postgresql_wal_dir }}" + become: true + become_user: postgres + ansible.posix.synchronize: + src: "{{ postgresql_data_dir }}/{{ pg_wal_dir }}/" + dest: "{{ postgresql_wal_dir }}/" + delegate_to: "{{ inventory_hostname }}" + + - name: "Rename {{ pg_wal_dir }} to {{ pg_wal_dir }}_old" + ansible.builtin.command: mv {{ postgresql_data_dir }}/{{ pg_wal_dir }} {{ postgresql_data_dir }}/{{ pg_wal_dir }}_old + + - name: "Create symlink {{ postgresql_data_dir }}/{{ pg_wal_dir }} -> {{ postgresql_wal_dir }}" + become: true + become_user: postgres + ansible.builtin.file: + src: "{{ postgresql_wal_dir }}" + dest: "{{ postgresql_data_dir }}/{{ pg_wal_dir }}" + state: link + + - name: Start patroni service on the Leader + become: true + become_user: root + ansible.builtin.systemd: + name: patroni + state: started + when: inventory_hostname == groups['master'][0] + + - name: "Wait for port {{ patroni_restapi_port }} to become open on the host" + ansible.builtin.wait_for: + port: "{{ patroni_restapi_port }}" + host: "{{ inventory_hostname }}" + state: started + timeout: 120 + delay: 10 + ignore_errors: false + when: inventory_hostname == groups['master'][0] + + - name: Check that the patroni is healthy (the leader with the lock) + ansible.builtin.uri: + url: "http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/leader" + status_code: 200 + register: patroni_result + until: patroni_result.status == 200 + retries: 120 + delay: 10 + environment: + no_proxy: "{{ inventory_hostname }}" + when: inventory_hostname == groups['master'][0] and not ansible_check_mode + + - name: Start patroni service on the Replica + become: true + become_user: root + ansible.builtin.systemd: + name: patroni + state: started + when: inventory_hostname in groups['replica'] + + - name: "Wait for port {{ patroni_restapi_port }} to become open on the host" + ansible.builtin.wait_for: + port: "{{ patroni_restapi_port }}" + host: "{{ inventory_hostname }}" + state: started + timeout: 120 + delay: 10 + ignore_errors: false + when: inventory_hostname in groups['replica'] + + - name: Check that the patroni is healthy on the Replica + ansible.builtin.uri: + url: "http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/health" + status_code: 200 + register: patroni_result + until: patroni_result.status == 200 + retries: 120 + delay: 10 + environment: + no_proxy: "{{ inventory_hostname }}" + when: inventory_hostname in groups['replica'] and not ansible_check_mode + + - name: "Remove {{ pg_wal_dir }}_old directory" + ansible.builtin.file: + path: "{{ postgresql_data_dir }}/{{ pg_wal_dir }}_old" + state: absent + when: sym.stat.exists and not sym.stat.islnk | bool diff --git a/roles/patroni/tasks/main.yml b/automation/roles/patroni/tasks/main.yml similarity index 52% rename from roles/patroni/tasks/main.yml rename to automation/roles/patroni/tasks/main.yml index 3595087a3..ddd0dff83 100644 --- a/roles/patroni/tasks/main.yml +++ b/automation/roles/patroni/tasks/main.yml @@ -1,46 +1,49 @@ --- -# yamllint disable rule:line-length -# yamllint disable rule:comments-indentation - - name: Make sure handlers are flushed immediately - meta: flush_handlers + ansible.builtin.meta: flush_handlers # pip install -- import_tasks: pip.yml - when: patroni_installation_method == "pip" +- ansible.builtin.import_tasks: pip.yml + when: + - patroni_installation_method == "pip" + - pip_package not in system_packages + vars: + pip_package: "python{{ python_version | default('3') }}-pip" tags: patroni, patroni_install, pip # Patroni install -- block: # installation_method: "repo" and patroni_installation_method: "pip" +- block: # installation_method: "repo" and patroni_installation_method: "pip" - name: Copy patroni requirements.txt file - copy: + ansible.builtin.copy: src: requirements.txt dest: /tmp/requirements.txt when: patroni_pip_requirements_repo | length < 1 - name: Install setuptools - pip: - name: setuptools + ansible.builtin.pip: + name: setuptools<66.0.0 # https://github.com/pypa/setuptools/issues/3772 state: latest executable: pip3 extra_args: "--trusted-host=pypi.python.org --trusted-host=pypi.org --trusted-host=files.pythonhosted.org" umask: "0022" environment: PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" when: patroni_pip_requirements_repo | length < 1 - name: Install requirements - pip: + ansible.builtin.pip: requirements: /tmp/requirements.txt executable: pip3 extra_args: "--trusted-host=pypi.python.org --trusted-host=pypi.org --trusted-host=files.pythonhosted.org" umask: "0022" environment: PATH: "{{ ansible_env.PATH }}:{{ postgresql_bin_dir }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" when: patroni_pip_requirements_repo | length < 1 - name: Install patroni - pip: + ansible.builtin.pip: name: patroni state: latest executable: pip3 @@ -48,26 +51,26 @@ umask: "0022" environment: PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" when: patroni_pip_package_repo | length < 1 and patroni_install_version == "latest" - name: "Install patroni {{ patroni_install_version }}" - pip: + ansible.builtin.pip: name: "patroni=={{ patroni_install_version }}" executable: pip3 extra_args: "--trusted-host=pypi.python.org --trusted-host=pypi.org --trusted-host=files.pythonhosted.org" umask: "0022" environment: PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" when: patroni_pip_package_repo | length < 1 and patroni_install_version != "latest" when: installation_method == "repo" and patroni_installation_method == "pip" environment: "{{ proxy_env | default({}) }}" - vars: - ansible_python_interpreter: /usr/bin/python3 tags: patroni, patroni_install -- block: # when "patroni_pip_requirements_repo" and "patroni_pip_package_repo" is defined +- block: # when "patroni_pip_requirements_repo" and "patroni_pip_package_repo" is defined - name: Download patroni requirements - get_url: + ansible.builtin.get_url: url: "{{ item }}" dest: /tmp/ timeout: 120 @@ -76,7 +79,7 @@ when: patroni_pip_requirements_repo | length > 0 - name: Download patroni package - get_url: + ansible.builtin.get_url: url: "{{ item }}" dest: /tmp/ timeout: 60 @@ -85,7 +88,7 @@ when: patroni_pip_package_repo | length > 0 - name: Install requirements - pip: + ansible.builtin.pip: name: "file:///tmp/{{ item }}" executable: pip3 extra_args: "--no-index --find-links=file:///tmp --ignore-installed" @@ -93,10 +96,11 @@ loop: "{{ patroni_pip_requirements_repo | map('basename') | list }}" environment: PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" when: patroni_pip_requirements_repo | length > 0 - name: Install patroni - pip: + ansible.builtin.pip: name: "file:///tmp/{{ item }}" executable: pip3 extra_args: "--no-index --find-links=file:///tmp --ignore-installed" @@ -104,29 +108,28 @@ loop: "{{ patroni_pip_package_repo | map('basename') | list }}" environment: PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" when: patroni_pip_package_repo | length > 0 when: installation_method == "repo" and patroni_installation_method == "pip" - vars: - ansible_python_interpreter: /usr/bin/python3 tags: patroni, patroni_install -- block: # installation_method: "file" and patroni_installation_method: "pip" +- block: # installation_method: "file" and patroni_installation_method: "pip" - name: Copy patroni requirements - copy: + ansible.builtin.copy: src: "{{ item }}" dest: /tmp/ loop: "{{ patroni_pip_requirements_file }}" when: patroni_pip_requirements_file | length > 0 - name: Copy patroni package - copy: + ansible.builtin.copy: src: "{{ item }}" dest: /tmp/ loop: "{{ patroni_pip_package_file }}" when: patroni_pip_package_file | length > 0 - name: Install requirements - pip: + ansible.builtin.pip: name: "file:///tmp/{{ item }}" executable: pip3 extra_args: "--no-index --find-links=file:///tmp --ignore-installed" @@ -134,10 +137,11 @@ loop: "{{ patroni_pip_requirements_file | map('basename') | list }}" environment: PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" when: patroni_pip_requirements_file | length > 0 - name: Install patroni - pip: + ansible.builtin.pip: name: "file:///tmp/{{ item }}" executable: pip3 extra_args: "--no-index --find-links=file:///tmp --ignore-installed" @@ -145,31 +149,38 @@ loop: "{{ patroni_pip_package_file | map('basename') | list }}" environment: PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" when: patroni_pip_package_file | length > 0 when: installation_method == "file" and patroni_installation_method == "pip" - vars: - ansible_python_interpreter: /usr/bin/python3 tags: patroni, patroni_install -- block: # installation_method: "repo" and patroni_installation_method: "rpm/deb" +- block: # installation_method: "repo" and patroni_installation_method: "rpm/deb" # Debian - name: Install patroni package - package: - name: patroni + ansible.builtin.package: + name: "{{ patroni_packages | default('patroni') }}" state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 when: ansible_os_family == "Debian" and patroni_deb_package_repo | length < 1 # RedHat - name: Install patroni package - package: - name: patroni + ansible.builtin.package: + name: "{{ patroni_packages | default('patroni') }}" state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 when: ansible_os_family == "RedHat" and patroni_rpm_package_repo | length < 1 # when patroni_deb_package_repo or patroni_rpm_package_repo URL is defined # Debian - name: Download patroni deb package - get_url: + ansible.builtin.get_url: url: "{{ item }}" dest: /tmp/ timeout: 60 @@ -178,16 +189,20 @@ when: ansible_os_family == "Debian" and patroni_deb_package_repo | length > 0 - name: Install patroni from deb package - apt: + ansible.builtin.apt: force_apt_get: true deb: "/tmp/{{ item }}" state: present loop: "{{ patroni_deb_package_repo | map('basename') | list }}" + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 when: ansible_os_family == "Debian" and patroni_deb_package_repo | length > 0 # RedHat - name: Download patroni rpm package - get_url: + ansible.builtin.get_url: url: "{{ item }}" dest: /tmp/ timeout: 60 @@ -196,10 +211,14 @@ when: ansible_os_family == "RedHat" and patroni_rpm_package_repo | length > 0 - name: Install patroni from rpm package - package: + ansible.builtin.package: name: "/tmp/{{ item }}" state: present loop: "{{ patroni_rpm_package_repo | map('basename') | list }}" + register: package_status + until: package_status is success + delay: 5 + retries: 3 when: ansible_os_family == "RedHat" and patroni_rpm_package_repo | length > 0 environment: "{{ proxy_env | default({}) }}" when: installation_method == "repo" and (patroni_installation_method == "rpm" or patroni_installation_method == "deb") @@ -209,67 +228,105 @@ - block: # Debian - name: Copy patroni deb package into /tmp - copy: + ansible.builtin.copy: src: "{{ patroni_deb_package_file }}" dest: /tmp/ when: ansible_os_family == "Debian" - name: Install patroni from deb package - apt: + ansible.builtin.apt: force_apt_get: true deb: "/tmp/{{ patroni_deb_package_file | basename }}" state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 when: ansible_os_family == "Debian" # RedHat - name: Copy patroni rpm package into /tmp - copy: + ansible.builtin.copy: src: "{{ patroni_rpm_package_file }}" dest: /tmp/ when: ansible_os_family == "RedHat" - name: Install patroni from rpm package - package: + ansible.builtin.package: name: "/tmp/{{ patroni_rpm_package_file | basename }}" state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 when: ansible_os_family == "RedHat" when: installation_method == "file" and (patroni_installation_method == "rpm" or patroni_installation_method == "deb") tags: patroni, patroni_install # Patroni configure - name: Create conf directory - file: + ansible.builtin.file: path: /etc/patroni state: directory owner: postgres group: postgres - mode: 0750 + mode: "0750" + tags: patroni, patroni_conf + +# TLS (etcd) +- name: Copy etcd cert files for patroni + ansible.builtin.include_role: + name: ../roles/tls_certificate/copy + vars: + tls_group_name: "etcd_cluster" + fetch_tls_dir: "{{ etcd_tls_dir | default('/etc/etcd/tls') }}" + copy_tls_dir: "{{ patroni_etcd_tls_dir | default('/etc/patroni/tls/etcd') }}" + copy_tls_owner: "postgres" + when: + - tls_cert_generate | bool + - dcs_type == "etcd" + - not dcs_exists | bool + tags: patroni, patroni_conf + +# TLS (consul) +- name: Copy consul cert files for patroni + ansible.builtin.include_role: + name: ../roles/tls_certificate/copy + vars: + tls_group_name: "consul_instances" + fetch_tls_dir: "{{ consul_tls_dir | default('/etc/consul/tls') }}" + copy_tls_dir: "{{ patroni_consul_tls_dir | default('/etc/patroni/tls/consul') }}" + copy_tls_owner: "postgres" + when: + - tls_cert_generate | bool + - dcs_type == "consul" + - not dcs_exists | bool tags: patroni, patroni_conf - name: Generate conf file "/etc/patroni/patroni.yml" - template: + ansible.builtin.template: src: templates/patroni.yml.j2 dest: /etc/patroni/patroni.yml owner: postgres group: postgres - mode: 0640 + mode: "0640" when: existing_pgcluster is not defined or not existing_pgcluster|bool tags: patroni, patroni_conf - name: Create patroni log directory - file: + ansible.builtin.file: path: "{{ patroni_log_dir }}" owner: postgres group: postgres state: directory - mode: 0750 + mode: "0750" when: patroni_log_destination == 'logfile' tags: patroni, patroni_conf -- block: # for add_pgnode.yml +- block: # for add_pgnode.yml - name: Fetch patroni.yml conf file from master run_once: true - fetch: + ansible.builtin.fetch: src: /etc/patroni/patroni.yml dest: files/patroni.yml validate_checksum: true @@ -277,68 +334,89 @@ delegate_to: "{{ groups.master[0] }}" - name: Copy patroni.yml conf file to replica - copy: + ansible.builtin.copy: src: files/patroni.yml dest: /etc/patroni/patroni.yml owner: postgres group: postgres - mode: 0640 + mode: "0640" + + - name: Remove patroni.yml conf files from localhost + become: false + run_once: true + ansible.builtin.file: + path: files/patroni.yml + state: absent + delegate_to: localhost - name: Prepare patroni.yml conf file (replace "name","listen","connect_address") - lineinfile: + ansible.builtin.lineinfile: path: /etc/patroni/patroni.yml - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" + regexp: "{{ patroni_config_without_cluster_vip_item.regexp }}" + line: "{{ patroni_config_without_cluster_vip_item.line }}" backrefs: true loop: - - {regexp: '^name:', line: 'name: {{ ansible_hostname }}'} - - {regexp: '^ listen: .*:8008$', line: ' listen: {{ hostvars[inventory_hostname].inventory_hostname }}:8008'} - - {regexp: '^ connect_address: .*:8008$', line: ' connect_address: {{ hostvars[inventory_hostname].inventory_hostname }}:8008'} - - {regexp: '^ listen: ((?!8008).)*$', line: ' listen: {{ hostvars[inventory_hostname].inventory_hostname }},127.0.0.1:{{ postgresql_port }}'} - - {regexp: '^ connect_address: ((?!8008).)*$', line: ' connect_address: {{ hostvars[inventory_hostname].inventory_hostname }}:{{ postgresql_port }}'} + - regexp: "^name:" + line: "name: {{ ansible_hostname }}" + - regexp: "^ listen: .*:{{ patroni_restapi_port }}$" + line: " listen: {{ hostvars[inventory_hostname].inventory_hostname }}:{{ patroni_restapi_port }}" + - regexp: "^ connect_address: .*:{{ patroni_restapi_port }}$" + line: " connect_address: {{ hostvars[inventory_hostname].inventory_hostname }}:{{ patroni_restapi_port }}" + - regexp: "^ listen: ((?!{{ patroni_restapi_port }}).)*$" + line: " listen: {{ hostvars[inventory_hostname].inventory_hostname }},127.0.0.1:{{ postgresql_port }}" + - regexp: "^ connect_address: ((?!{{ patroni_restapi_port }}).)*$" + line: " connect_address: {{ hostvars[inventory_hostname].inventory_hostname }}:{{ postgresql_port }}" loop_control: - label: "{{ item.line }}" + loop_var: patroni_config_without_cluster_vip_item + label: "{{ patroni_config_without_cluster_vip_item.line }}" when: with_haproxy_load_balancing|bool or pgbouncer_install|bool or (cluster_vip is not defined or cluster_vip | length < 1) - name: Prepare patroni.yml conf file (replace "name","listen","connect_address") - lineinfile: + ansible.builtin.lineinfile: path: /etc/patroni/patroni.yml - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" + regexp: "{{ patroni_config_with_cluster_vip_item.regexp }}" + line: "{{ patroni_config_with_cluster_vip_item.line }}" backrefs: true loop: - - {regexp: '^name:', line: 'name: {{ ansible_hostname }}'} - - {regexp: '^ listen: .*:8008$', line: ' listen: {{ hostvars[inventory_hostname].inventory_hostname }}:8008'} - - {regexp: '^ connect_address: .*:8008$', line: ' connect_address: {{ hostvars[inventory_hostname].inventory_hostname }}:8008'} - - {regexp: '^ listen: ((?!8008).)*$', line: ' listen: {{ hostvars[inventory_hostname].inventory_hostname }},{{ cluster_vip }},127.0.0.1:{{ postgresql_port }}'} # noqa 204 - - {regexp: '^ connect_address: ((?!8008).)*$', line: ' connect_address: {{ hostvars[inventory_hostname].inventory_hostname }}:{{ postgresql_port }}'} + - regexp: "^name:" + line: "name: {{ ansible_hostname }}" + - regexp: "^ listen: .*:{{ patroni_restapi_port }}$" + line: " listen: {{ hostvars[inventory_hostname].inventory_hostname }}:{{ patroni_restapi_port }}" + - regexp: "^ connect_address: .*:{{ patroni_restapi_port }}$" + line: " connect_address: {{ hostvars[inventory_hostname].inventory_hostname }}:{{ patroni_restapi_port }}" + - regexp: "^ listen: ((?!{{ patroni_restapi_port }}).)*$" + line: " listen: {{ hostvars[inventory_hostname].inventory_hostname }},{{ cluster_vip }},127.0.0.1:{{ postgresql_port }}" + - regexp: "^ connect_address: ((?!{{ patroni_restapi_port }}).)*$" + line: " connect_address: {{ hostvars[inventory_hostname].inventory_hostname }}:{{ postgresql_port }}" loop_control: - label: "{{ item.line }}" + loop_var: patroni_config_with_cluster_vip_item + label: "{{ patroni_config_with_cluster_vip_item.line }}" when: not with_haproxy_load_balancing|bool and not pgbouncer_install|bool and (cluster_vip is defined and cluster_vip | length > 0) when: existing_pgcluster is defined and existing_pgcluster|bool tags: patroni, patroni_conf - name: Copy systemd service file "/etc/systemd/system/patroni.service" - template: + ansible.builtin.template: src: templates/patroni.service.j2 dest: /etc/systemd/system/patroni.service owner: postgres group: postgres - mode: 0644 + mode: "0644" tags: patroni, patroni_conf, patroni_service - name: Prepare PostgreSQL | create statistics directory (if not already exists) - file: + ansible.builtin.file: path: "{{ postgresql_stats_temp_directory_path }}" state: directory - mode: 01777 + mode: "01777" when: - postgresql_stats_temp_directory_path is defined - postgresql_stats_temp_directory_path != 'none' + - postgresql_version | int <= 14 tags: patroni, pgsql_stats_tmp - name: Prepare PostgreSQL | mount the statistics directory in memory (tmpfs) - mount: + ansible.posix.mount: path: "{{ postgresql_stats_temp_directory_path }}" src: tmpfs fstype: tmpfs @@ -347,62 +425,60 @@ when: - postgresql_stats_temp_directory_path is defined - postgresql_stats_temp_directory_path != 'none' + - postgresql_version | int <= 14 tags: patroni, pgsql_stats_tmp - name: Prepare PostgreSQL | make sure the postgresql log directory "{{ postgresql_log_dir }}" exists - file: + ansible.builtin.file: path: "{{ postgresql_log_dir }}" owner: postgres group: postgres state: directory - mode: 0700 + mode: "0700" tags: patroni - name: Prepare PostgreSQL | make sure the custom WAL directory "{{ postgresql_wal_dir }}" exists - file: + ansible.builtin.file: path: "{{ postgresql_wal_dir }}" owner: postgres group: postgres state: directory - mode: 0700 + mode: "0700" when: postgresql_wal_dir is defined and postgresql_wal_dir | length > 0 tags: patroni, custom_wal_dir -- block: # wheh postgresql NOT exists or PITR +- block: # when postgresql NOT exists or PITR - name: Prepare PostgreSQL | make sure PostgreSQL data directory "{{ postgresql_data_dir }}" exists - file: + ansible.builtin.file: path: "{{ postgresql_data_dir }}" owner: postgres group: postgres state: directory - mode: 0700 - - - name: Prepare PostgreSQL | check that data directory "{{ postgresql_data_dir }}" is not initialized - stat: - path: "{{ postgresql_data_dir }}/PG_VERSION" - register: pgdata_initialized - when: patroni_cluster_bootstrap_method == "initdb" - - - name: Prepare PostgreSQL | data directory check result - fail: - msg: "Whoops! data directory {{ postgresql_data_dir }} is already initialized" - when: pgdata_initialized.stat.exists is defined and - pgdata_initialized.stat.exists - tags: patroni, patroni_check_init - - # for Debian based distros only - # patroni bootstrap failure is possible if the postgresql config files are missing - - name: Prepare PostgreSQL | make sure the postgresql config files exists - stat: + mode: "0700" + + # for Debian based distros only + # patroni bootstrap failure is possible if the PostgreSQL config files are missing + - name: Prepare PostgreSQL | make sure PostgreSQL config directory exists + ansible.builtin.file: + path: /etc/postgresql + state: directory + owner: postgres + group: postgres + recurse: true + when: ansible_os_family == "Debian" and + postgresql_packages|join(" ") is not search("postgrespro") + + - name: Prepare PostgreSQL | make sure PostgreSQL config files exists + ansible.builtin.stat: path: "{{ postgresql_conf_dir }}/postgresql.conf" register: postgresql_conf_file when: ansible_os_family == "Debian" and - postgresql_packages|join(" ") is not search("postgrespro") + postgresql_packages|join(" ") is not search("postgrespro") - - name: Prepare PostgreSQL | generate default postgresql config files + - name: Prepare PostgreSQL | generate default PostgreSQL config files become: true become_user: postgres - command: > + ansible.builtin.command: > /usr/bin/pg_createcluster {{ postgresql_version }} {{ postgresql_cluster_name }} -d {{ postgresql_data_dir }} -p {{ postgresql_port }} @@ -411,81 +487,66 @@ register: pg_createcluster_result failed_when: pg_createcluster_result.rc != 0 when: (ansible_os_family == "Debian" and - postgresql_packages|join(" ") is not search("postgrespro")) and - not postgresql_conf_file.stat.exists + postgresql_packages|join(" ") is not search("postgrespro")) and + not postgresql_conf_file.stat.exists + + # When performing PITR, we do not clear the directory if pgbackrest is used to be able to use the '--delta restore' option. + # In all other cases, we must clear the data directory before restore. + - block: + - name: Prepare PostgreSQL | Make sure the patroni service is stopped + ansible.builtin.systemd: + name: patroni + state: stopped + + - name: Prepare PostgreSQL | make sure the data directory "{{ postgresql_data_dir }}" is empty + ansible.builtin.file: + path: "{{ postgresql_data_dir }}" + state: "{{ item }}" + owner: postgres + group: postgres + mode: "0700" + loop: + - absent + - directory - - name: Prepare PostgreSQL | make sure the data directory "{{ postgresql_data_dir }}" is empty - file: - path: "{{ postgresql_data_dir }}" - state: "{{ item }}" - owner: postgres - group: postgres - mode: 0700 - loop: - - absent - - directory - when: patroni_cluster_bootstrap_method != "pgbackrest" # --delta restore - - - name: Prepare PostgreSQL | make sure the custom WAL directory "{{ postgresql_wal_dir }}" is empty - file: - path: "{{ postgresql_wal_dir }}" - state: "{{ item }}" - owner: postgres - group: postgres - mode: 0700 - loop: - - absent - - directory - when: (postgresql_wal_dir is defined and postgresql_wal_dir | length > 0) and - patroni_cluster_bootstrap_method != "pgbackrest" # --delta restore - when: postgresql_exists != "true" or patroni_cluster_bootstrap_method != "initdb" + - name: Prepare PostgreSQL | make sure the custom WAL directory "{{ postgresql_wal_dir }}" is empty + ansible.builtin.file: + path: "{{ postgresql_wal_dir }}" + state: "{{ item }}" + owner: postgres + group: postgres + mode: "0700" + loop: + - absent + - directory + when: postgresql_wal_dir | default('') | length > 0 + when: (inventory_hostname == groups['master'][0] and patroni_cluster_bootstrap_method != "pgbackrest") + or (inventory_hostname in groups['replica'] and ('pgbackrest' not in patroni_create_replica_methods or new_node | default(false) | bool)) + when: not postgresql_exists | default(false) | bool or patroni_cluster_bootstrap_method != "initdb" tags: patroni, point_in_time_recovery -- block: # when postgresql exists - - name: Prepare PostgreSQL | check that data directory "{{ postgresql_data_dir }}" is initialized - stat: - path: "{{ postgresql_data_dir }}/PG_VERSION" - register: pgdata_initialized - - - name: Prepare PostgreSQL | data directory check result - fail: - msg: "Whoops! data directory {{ postgresql_data_dir }} is not initialized" - when: not pgdata_initialized.stat.exists - tags: patroni, patroni_check_init - - - block: # for master only +- block: # when postgresql exists + - block: # for master only - name: Prepare PostgreSQL | check PostgreSQL is started on Master become: true become_user: postgres - command: "{{ postgresql_bin_dir }}/pg_ctl status -D {{ postgresql_data_dir }}" + ansible.builtin.command: "{{ postgresql_bin_dir }}/pg_ctl status -D {{ postgresql_data_dir }}" register: pg_ctl_status_result changed_when: false failed_when: - pg_ctl_status_result.rc != 0 - pg_ctl_status_result.rc != 3 - # "Debian" - - name: Prepare PostgreSQL | start PostgreSQL on Master - become: true - become_user: postgres - command: "/usr/bin/pg_ctlcluster {{ postgresql_version }} {{ postgresql_cluster_name }} start" - register: pg_start_on_master - when: pg_ctl_status_result.rc == 3 and - (ansible_os_family == "Debian" and postgresql_packages|join(" ") is not search("postgrespro")) - - # "RedHat" or PostgresPro - name: Prepare PostgreSQL | start PostgreSQL on Master become: true become_user: postgres - command: "{{ postgresql_bin_dir }}/pg_ctl start -D {{ postgresql_data_dir }}" - register: pg_start_on_master - when: pg_ctl_status_result.rc == 3 and - (ansible_os_family == "RedHat" or postgresql_packages|join(" ") is search("postgrespro")) + ansible.builtin.command: "{{ postgresql_bin_dir }}/pg_ctl start -D {{ postgresql_data_dir }} -w -t {{ pg_ctl_timeout | default(3600) }}" + when: pg_ctl_status_result.rc == 3 - name: Prepare PostgreSQL | check PostgreSQL is accepting connections become: true become_user: postgres - command: "{{ postgresql_bin_dir }}/pg_isready -p {{ postgresql_port }}" + ansible.builtin.command: "{{ postgresql_bin_dir }}/pg_isready -p {{ postgresql_port }}" register: pg_isready_result until: pg_isready_result.rc == 0 retries: 30 @@ -493,23 +554,21 @@ changed_when: false - name: Prepare PostgreSQL | generate pg_hba.conf on Master - template: + ansible.builtin.template: src: templates/pg_hba.conf.j2 dest: "{{ postgresql_conf_dir }}/pg_hba.conf" owner: postgres group: postgres - mode: 0640 + mode: "0640" - name: Prepare PostgreSQL | reload for apply the pg_hba.conf become: true become_user: postgres - command: "{{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -c 'SELECT pg_reload_conf()'" - register: psql_reload_result - failed_when: psql_reload_result.rc != 0 + ansible.builtin.command: "{{ postgresql_bin_dir }}/pg_ctl reload -D {{ postgresql_data_dir }}" - name: Prepare PostgreSQL | make sure the user "{{ patroni_superuser_username }}" are present, and password does not differ from the specified - postgresql_user: - db: postgres + community.postgresql.postgresql_user: + login_db: postgres name: "{{ patroni_superuser_username }}" password: "{{ patroni_superuser_password }}" encrypted: true @@ -521,8 +580,8 @@ become_user: postgres - name: Prepare PostgreSQL | make sure the user "{{ patroni_replication_username }}" are present, and password does not differ from the specified - postgresql_user: - db: postgres + community.postgresql.postgresql_user: + login_db: postgres name: "{{ patroni_replication_username }}" password: "{{ patroni_replication_password }}" encrypted: true @@ -532,12 +591,12 @@ state: present become: true become_user: postgres - when: is_master == "true" + when: inventory_hostname == groups['master'][0] - name: Prepare PostgreSQL | check PostgreSQL is started become: true become_user: postgres - command: "{{ postgresql_bin_dir }}/pg_ctl status -D {{ postgresql_data_dir }}" + ansible.builtin.command: "{{ postgresql_bin_dir }}/pg_ctl status -D {{ postgresql_data_dir }}" register: pg_ctl_status_result changed_when: false failed_when: @@ -547,110 +606,112 @@ - name: Prepare PostgreSQL | waiting for CHECKPOINT to complete before stopping postgresql become: true become_user: postgres - command: "{{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -c 'CHECKPOINT'" + ansible.builtin.command: > + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc "CHECKPOINT" register: checkpoint_result until: checkpoint_result.rc == 0 retries: 300 delay: 10 when: pg_ctl_status_result.rc == 0 - # "Debian" - - name: Prepare PostgreSQL | stop PostgreSQL (will be managed by patroni) - become: true - become_user: postgres - command: "/usr/bin/pg_ctlcluster {{ postgresql_version }} {{ postgresql_cluster_name }} stop -m fast -- -w -t 600" - when: (checkpoint_result.rc is defined and checkpoint_result.rc == 0) and - (ansible_os_family == "Debian" and postgresql_packages|join(" ") is not search("postgrespro")) - - # "RedHat" or PostgresPro - name: Prepare PostgreSQL | stop PostgreSQL (will be managed by patroni) become: true become_user: postgres - command: "{{ postgresql_bin_dir }}/pg_ctl stop -D {{ postgresql_data_dir }} -m fast -w -t 600" - when: (checkpoint_result.rc is defined and checkpoint_result.rc == 0) and - (ansible_os_family == "RedHat" or postgresql_packages|join(" ") is search("postgrespro")) + ansible.builtin.command: "{{ postgresql_bin_dir }}/pg_ctl stop -D {{ postgresql_data_dir }} -m fast -w -t {{ pg_ctl_timeout | default(3600) }}" + when: checkpoint_result.rc is defined and checkpoint_result.rc == 0 - name: Prepare PostgreSQL | check PostgreSQL is stopped become: true become_user: postgres - command: "{{ postgresql_bin_dir }}/pg_ctl status -D {{ postgresql_data_dir }}" + ansible.builtin.command: "{{ postgresql_bin_dir }}/pg_ctl status -D {{ postgresql_data_dir }}" register: pg_ctl_stop_result failed_when: pg_ctl_stop_result.rc != 3 changed_when: false - when: postgresql_exists == "true" and patroni_cluster_bootstrap_method == "initdb" + when: postgresql_exists | default(false) | bool and patroni_cluster_bootstrap_method == "initdb" tags: patroni, patroni_start_master -- block: # PITR (custom bootstrap) -# Prepare (install pexpect, ruamel.yaml) +- block: # PITR (custom bootstrap) + # Prepare (install pexpect, ruamel.yaml) - name: Prepare | Make sure the ansible required python library is exist - pip: + ansible.builtin.pip: name: "{{ item }}" state: present executable: pip3 extra_args: "--trusted-host=pypi.python.org --trusted-host=pypi.org --trusted-host=files.pythonhosted.org" umask: "0022" loop: - - pexpect - - ruamel.yaml + - pexpect==4.9.0 + - ruamel.yaml==0.17.40 environment: PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" - vars: - ansible_python_interpreter: /usr/bin/python3 -# Run PITR + PIP_BREAK_SYSTEM_PACKAGES: "1" + + # Run PITR - name: Stop patroni service on the Replica servers (if running) - systemd: + ansible.builtin.systemd: name: patroni state: stopped - when: is_master != "true" + when: inventory_hostname in groups['replica'] - name: Stop patroni service on the Master server (if running) - systemd: + ansible.builtin.systemd: name: patroni state: stopped - when: is_master == "true" + when: inventory_hostname == groups['master'][0] + + - name: Check that PostgreSQL is stopped + become: true + become_user: postgres + ansible.builtin.command: "{{ postgresql_bin_dir }}/pg_ctl status -D {{ postgresql_data_dir }}" + register: pg_ctl_status_result + changed_when: false + failed_when: false + + - name: Stop PostgreSQL + become: true + become_user: postgres + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/pg_ctl stop -D {{ postgresql_data_dir }} -m fast -w -t {{ pg_ctl_timeout | default(3600) }} + when: pg_ctl_status_result.rc is defined and (pg_ctl_status_result.rc != 3 and pg_ctl_status_result.rc != 4) - name: Remove patroni cluster "{{ patroni_cluster_name }}" from DCS (if exist) become: true become_user: postgres - expect: + ansible.builtin.expect: command: "patronictl -c /etc/patroni/patroni.yml remove {{ patroni_cluster_name }}" responses: - 'Please confirm the cluster name to remove': '{{ patroni_cluster_name }}' - 'You are about to remove all information in DCS': 'Yes I am aware' + "Please confirm the cluster name to remove": "{{ patroni_cluster_name }}" + "You are about to remove all information in DCS": "Yes I am aware" register: patronictl_remove_result - changed_when: - patronictl_remove_result.rc == 0 and + changed_when: patronictl_remove_result.rc == 0 and patronictl_remove_result.stdout|lower is not search("key not found") - failed_when: - patronictl_remove_result.rc != 0 + failed_when: patronictl_remove_result.rc != 0 environment: PATH: "{{ ansible_env.PATH }}:/usr/bin:/usr/local/bin" - when: is_master == "true" - vars: - ansible_python_interpreter: /usr/bin/python3 + when: inventory_hostname == groups['master'][0] - - block: # for pgbackrest only (for use --delta restore) + - block: # for pgbackrest only (for use --delta restore) - name: Run "{{ pgbackrest_patroni_cluster_restore_command }}" on Master - command: > + ansible.builtin.command: > {{ pgbackrest_patroni_cluster_restore_command }} {{ '--target-action=promote' if pgbackrest_patroni_cluster_restore_command is search('--type=') else '' }} - async: 86400 # timeout 24 hours + async: "{{ cluster_restore_timeout | default(86400) }}" # timeout 24 hours poll: 0 register: pgbackrest_restore_master - when: is_master == "true" + when: inventory_hostname == groups['master'][0] # if patroni_create_replica_methods: "pgbackrest" - name: Run "{{ pgbackrest_patroni_cluster_restore_command }}" on Replica - command: > + ansible.builtin.command: > {{ pgbackrest_patroni_cluster_restore_command }} - {{ '--target-action=shutdown' if pgbackrest_patroni_cluster_restore_command is search('--type=') else '' }} - async: 86400 # timeout 24 hours + {{ '--target-action=pause' if pgbackrest_patroni_cluster_restore_command is search('--type=') else '' }} + async: "{{ cluster_restore_timeout | default(86400) }}" # timeout 24 hours poll: 0 register: pgbackrest_restore_replica - when: is_master != "true" and 'pgbackrest' in patroni_create_replica_methods + when: inventory_hostname in groups['replica'] and 'pgbackrest' in patroni_create_replica_methods - name: Waiting for restore from backup - async_status: + ansible.builtin.async_status: jid: "{{ item.ansible_job_id }}" loop: - "{{ pgbackrest_restore_master }}" @@ -659,119 +720,149 @@ label: "{{ item.changed }}" register: pgbackrest_restore_jobs_result until: pgbackrest_restore_jobs_result.finished - retries: 2880 # timeout 24 hours + retries: "{{ (cluster_restore_timeout | default(86400)) | int // 30 }}" # timeout 24 hours delay: 30 when: item.ansible_job_id is defined - - name: Start PostgreSQL for Recovery # Debian - command: "/usr/bin/pg_ctlcluster {{ postgresql_version }} {{ postgresql_cluster_name }} start -o '-c hot_standby=off'" - when: ansible_os_family == "Debian" and - (is_master == "true" or - (is_master != "true" and 'pgbackrest' in patroni_create_replica_methods)) - - - name: Start PostgreSQL for Recovery # RedHat or PostgresPro - command: "{{ postgresql_bin_dir }}/pg_ctl start -D {{ postgresql_data_dir }} -o '-c hot_standby=off'" - when: (ansible_os_family == "RedHat" or postgresql_packages|join(" ") is search("postgrespro")) and - (is_master == "true" or - (is_master != "true" and 'pgbackrest' in patroni_create_replica_methods)) + - name: Remove patroni.dynamic.json file + ansible.builtin.file: + path: "{{ postgresql_data_dir }}/patroni.dynamic.json" + state: absent + failed_when: false + when: not keep_patroni_dynamic_json|bool + + - name: Start PostgreSQL for Recovery + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/pg_ctl start -D {{ postgresql_data_dir }} -w -t {{ pg_ctl_timeout | default(3600) }} + -o '--config-file={{ postgresql_conf_dir }}/postgresql.conf' + -o '-c hot_standby=off' + {% if postgresql_version | int >= 12 %} + -o '-c restore_command="pgbackrest --stanza={{ pgbackrest_stanza }} archive-get %f %p"' + {% endif %} + -o '-c archive_command=/bin/true' + -l /tmp/pg_recovery_{{ ansible_date_time.date }}.log + async: "{{ pg_ctl_timeout | default(3600) }}" # run the command asynchronously + poll: 0 + register: pg_ctl_start_result + when: inventory_hostname == groups['master'][0] or (inventory_hostname in groups['replica'] and 'pgbackrest' in patroni_create_replica_methods) + + - name: Wait for the PostgreSQL start command to complete + ansible.builtin.async_status: + jid: "{{ pg_ctl_start_result.ansible_job_id }}" + register: pg_ctl_start_job_result + until: pg_ctl_start_job_result.finished + retries: "{{ (pg_ctl_timeout | default(3600) | int) // 10 }}" + delay: 10 + when: inventory_hostname == groups['master'][0] or (inventory_hostname in groups['replica'] and 'pgbackrest' in patroni_create_replica_methods) - - name: Waiting for PostgreSQL Recovery to complete (WAL apply) - command: "{{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -tAc 'SELECT pg_is_in_recovery()'" + - name: Wait for PostgreSQL recovery to complete (WAL apply) + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres + -tAXc "select pg_is_in_recovery()" register: pg_is_in_recovery - until: pg_is_in_recovery.stdout != "t" - retries: 1200 # timeout 10 hours + until: pg_is_in_recovery.stdout == "f" + retries: "{{ (cluster_restore_timeout | default(86400)) | int // 30 }}" # timeout 24 hours delay: 30 changed_when: false failed_when: false - when: is_master == "true" or - (is_master != "true" and 'pgbackrest' in patroni_create_replica_methods) + when: inventory_hostname == groups['master'][0] + + - name: Check PostgreSQL recovery log + ansible.builtin.command: "grep -A2 'recovery stopping' /tmp/pg_recovery_{{ ansible_date_time.date }}.log" + register: pg_recovery_result + changed_when: false + failed_when: false + when: inventory_hostname == groups['master'][0] + + - name: PostgreSQL recovery details + ansible.builtin.debug: + msg: "{{ pg_recovery_result.stdout_lines }}" + when: pg_recovery_result.stdout_lines is defined - name: Check that PostgreSQL is stopped - command: "{{ postgresql_bin_dir }}/pg_ctl status -D {{ postgresql_data_dir }}" + ansible.builtin.command: "{{ postgresql_bin_dir }}/pg_ctl status -D {{ postgresql_data_dir }}" register: pg_ctl_status_result changed_when: false failed_when: false - - name: Stop PostgreSQL # "Debian" - command: "/usr/bin/pg_ctlcluster {{ postgresql_version }} {{ postgresql_cluster_name }} stop -m fast -- -w -t 600" - when: ansible_os_family == "Debian" and - (pg_ctl_status_result.rc is defined and pg_ctl_status_result.rc != 3) - - - name: Stop PostgreSQL # "RedHat" or PostgresPro - command: "{{ postgresql_bin_dir }}/pg_ctl stop -D {{ postgresql_data_dir }} -m fast -w -t 600" - when: (ansible_os_family == "RedHat" or postgresql_packages|join(" ") is search("postgrespro")) and - (pg_ctl_status_result.rc is defined and pg_ctl_status_result.rc != 3) + - name: Stop PostgreSQL + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/pg_ctl stop -D {{ postgresql_data_dir }} -m fast -w -t {{ pg_ctl_timeout | default(3600) }} + when: pg_ctl_status_result.rc is defined and (pg_ctl_status_result.rc != 3 and pg_ctl_status_result.rc != 4) when: patroni_cluster_bootstrap_method == "pgbackrest" become: true become_user: postgres environment: "{{ proxy_env | default({}) }}" when: patroni_cluster_bootstrap_method != "initdb" and - (pgbackrest_install|bool or wal_g_install|bool) + (pgbackrest_install|bool or wal_g_install|bool) tags: patroni, point_in_time_recovery -- block: # PITR (custom bootstrap) - disable archive_command - - name: Check the patroni.dynamic.json exists - stat: +- block: # PITR (custom bootstrap) - disable archive_command + - name: Check if patroni.dynamic.json exists + ansible.builtin.stat: path: "{{ postgresql_data_dir }}/patroni.dynamic.json" register: patroni_dynamic_json - name: Remove patroni.dynamic.json file - file: + ansible.builtin.file: path: "{{ postgresql_data_dir }}/patroni.dynamic.json" state: absent - when: not keep_patroni_dynamic_json|bool + when: + - patroni_dynamic_json is defined + - patroni_dynamic_json.stat is defined + - patroni_dynamic_json.stat.exists + - not keep_patroni_dynamic_json | bool - name: Edit patroni.dynamic.json | disable archive_command (if enabled) yedit: src: "{{ postgresql_data_dir }}/patroni.dynamic.json" key: postgresql.parameters.archive_command - value: "cd ." # not doing anything yet with WAL-s + value: "cd ." # not doing anything yet with WAL-s content_type: json - vars: - ansible_python_interpreter: /usr/bin/python3 - when: patroni_dynamic_json.stat.exists and - keep_patroni_dynamic_json|bool and disable_archive_command|bool + when: + - patroni_dynamic_json is defined + - patroni_dynamic_json.stat is defined + - patroni_dynamic_json.stat.exists + - disable_archive_command | bool - name: Edit patroni.yml | disable archive_command (if enabled) yedit: src: /etc/patroni/patroni.yml key: bootstrap.dcs.postgresql.parameters.archive_command - value: "cd ." # not doing anything yet with WAL-s - vars: - ansible_python_interpreter: /usr/bin/python3 - when: disable_archive_command|bool + value: "cd ." # not doing anything yet with WAL-s + when: disable_archive_command | bool when: patroni_cluster_bootstrap_method != "initdb" and - (pgbackrest_install|bool or wal_g_install|bool) and - (existing_pgcluster is not defined or not existing_pgcluster|bool) + (pgbackrest_install | bool or wal_g_install | bool) and + (existing_pgcluster is not defined or not existing_pgcluster | bool) become: true become_user: postgres tags: patroni, point_in_time_recovery -- block: # PITR (custom bootstrap) - generate pg_hba.conf +- block: # PITR (custom bootstrap) - generate pg_hba.conf - name: Generate pg_hba.conf (before start patroni) - template: + ansible.builtin.template: src: templates/pg_hba.conf.j2 dest: "{{ postgresql_conf_dir }}/pg_hba.conf" owner: postgres group: postgres - mode: 0640 + mode: "0640" when: patroni_cluster_bootstrap_method != "initdb" and - postgresql_conf_dir != postgresql_data_dir and - (existing_pgcluster is not defined or not existing_pgcluster|bool) + postgresql_conf_dir != postgresql_data_dir and + (existing_pgcluster is not defined or not existing_pgcluster|bool) tags: patroni -- block: # start patroni on master +- block: # start patroni on master - name: Start patroni service on the Master server - systemd: + ansible.builtin.systemd: daemon_reload: true name: patroni state: restarted enabled: true - - name: Wait for port 8008 to become open on the host - wait_for: - port: 8008 - host: "{{ hostvars[inventory_hostname]['inventory_hostname'] }}" + - name: "Wait for port {{ patroni_restapi_port }} to become open on the host" + ansible.builtin.wait_for: + port: "{{ patroni_restapi_port }}" + host: "{{ inventory_hostname }}" state: started timeout: 120 delay: 10 @@ -781,19 +872,35 @@ - name: Wait for PostgreSQL Recovery to complete (WAL apply) become: true become_user: postgres - command: "{{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -tAc 'SELECT pg_is_in_recovery()'" + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select pg_is_in_recovery()" register: pg_is_in_recovery until: pg_is_in_recovery.stdout == "f" - retries: 1200 # timeout 10 hours + retries: "{{ (cluster_restore_timeout | default(86400)) | int // 30 }}" # timeout 24 hours delay: 30 changed_when: false failed_when: false when: patroni_cluster_bootstrap_method == "wal-g" - - name: Check PostgreSQL is started and accepting connections on Master + - name: Wait for the Standby cluster initialization to complete + ansible.builtin.uri: + url: "http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/standby-leader" + status_code: 200 + register: standby_leader_result + until: standby_leader_result.status == 200 + retries: "{{ (cluster_restore_timeout | default(86400)) | int // 30 }}" # timeout 24 hours + delay: 30 + environment: + no_proxy: "{{ inventory_hostname }}" + when: + - (patroni_standby_cluster.host is defined and patroni_standby_cluster.host | length > 0) + - not ansible_check_mode + + - name: Check PostgreSQL is started and accepting connections become: true become_user: postgres - command: "{{ postgresql_bin_dir }}/pg_isready -p {{ postgresql_port }}" + ansible.builtin.command: "{{ postgresql_bin_dir }}/pg_isready -p {{ postgresql_port }}" register: pg_isready_result until: pg_isready_result.rc == 0 retries: 1000 @@ -801,65 +908,93 @@ changed_when: false - name: Wait for the cluster to initialize (master is the leader with the lock) - uri: - url: "http://{{ hostvars[inventory_hostname]['inventory_hostname'] }}:8008/leader" + ansible.builtin.uri: + url: "http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/leader" status_code: 200 register: result until: result.status == 200 retries: 10 delay: 2 - when: is_master == "true" + environment: + no_proxy: "{{ inventory_hostname }}" + when: + - (patroni_standby_cluster.host is not defined or patroni_standby_cluster.host | length < 1) + - not ansible_check_mode + when: inventory_hostname == groups['master'][0] tags: patroni, patroni_start_master, point_in_time_recovery -- block: # pg_hba (using a templates/pg_hba.conf.j2) +- block: # pg_hba (using a templates/pg_hba.conf.j2) - name: Prepare PostgreSQL | generate pg_hba.conf - template: + ansible.builtin.template: src: templates/pg_hba.conf.j2 dest: "{{ postgresql_conf_dir }}/pg_hba.conf" owner: postgres group: postgres - mode: 0640 + mode: "0640" register: generate_pg_hba - when: is_master == "true" or - ((is_master != "true" and postgresql_conf_dir != postgresql_data_dir) - or postgresql_exists == "true") + when: inventory_hostname == groups['master'][0] or + ((inventory_hostname in groups['replica'] and postgresql_conf_dir != postgresql_data_dir) + or postgresql_exists | default(false) | bool) - name: Prepare PostgreSQL | reload for apply the pg_hba.conf become: true become_user: postgres - command: "{{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -c 'SELECT pg_reload_conf()'" - register: psql_reload_result - changed_when: psql_reload_result.rc == 0 - failed_when: false # exec pg_reload_conf on all running postgres (to re-run with --tag pg_hba). + ansible.builtin.command: "{{ postgresql_bin_dir }}/pg_ctl reload -D {{ postgresql_data_dir }}" + register: pg_ctl_reload_result + changed_when: pg_ctl_reload_result.rc == 0 + failed_when: false # exec 'reload' on all running postgres (to re-run with --tag pg_hba). when: generate_pg_hba is changed when: existing_pgcluster is not defined or not existing_pgcluster|bool tags: patroni, pg_hba, pg_hba_generate -- block: # PITR (custom bootstrap) - superuser and replication +- block: # PITR (custom bootstrap) - password reset for PostgreSQL users - name: Make sure the Master is not in recovery mode - command: "{{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -tAc 'SELECT pg_is_in_recovery()'" + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select pg_is_in_recovery()" register: pg_is_in_recovery - until: pg_is_in_recovery.stdout != "t" - retries: 1200 # timeout 10 hours + until: pg_is_in_recovery.stdout == "f" + retries: "{{ (cluster_restore_timeout | default(86400)) | int // 30 }}" # timeout 24 hours delay: 30 changed_when: false - when: is_master == "true" - - - name: Make sure the postgresql users are present, and password does not differ from the specified - postgresql_user: - db: postgres - name: "{{ item.role }}" - password: "{{ item.pass }}" - role_attr_flags: "{{ item.role_attr }}" + when: inventory_hostname == groups['master'][0] + + - name: Make sure the superuser and replication users are present, and password does not differ from the specified + community.postgresql.postgresql_user: + login_db: postgres + name: "{{ item.name }}" + password: "{{ item.password }}" + role_attr_flags: "{{ item.flags }}" login_unix_socket: "{{ postgresql_unix_socket_dir }}" port: "{{ postgresql_port }}" register: postgresql_user_result loop: - - {role: '{{ patroni_superuser_username }}', pass: '{{ patroni_superuser_password }}', role_attr: 'SUPERUSER'} - - {role: '{{ patroni_replication_username }}', pass: '{{ patroni_replication_password }}', role_attr: 'LOGIN,REPLICATION'} + - { name: "{{ patroni_superuser_username }}", password: "{{ patroni_superuser_password }}", flags: "SUPERUSER" } + - { name: "{{ patroni_replication_username }}", password: "{{ patroni_replication_password }}", flags: "LOGIN,REPLICATION" } + loop_control: + label: "{{ item.name }}" + when: + - inventory_hostname == groups['master'][0] + - (patroni_superuser_username and patroni_superuser_password) is defined + - (patroni_superuser_username and patroni_superuser_password) | length > 0 + - (patroni_replication_username and patroni_replication_password) is defined + - (patroni_replication_username and patroni_replication_password) | length > 0 + + - name: Make sure the postgresql users are present, and password does not differ from the specified + community.postgresql.postgresql_user: + login_db: postgres + name: "{{ item.name }}" + password: "{{ item.password }}" + role_attr_flags: "{{ item.flags }}" + login_unix_socket: "{{ postgresql_unix_socket_dir }}" + port: "{{ postgresql_port }}" + loop: "{{ postgresql_users }}" loop_control: - label: "{{ item.role }}" - when: is_master == "true" + label: "{{ item.name }}" + ignore_errors: true + when: + - inventory_hostname == groups['master'][0] + - (postgresql_users is defined and postgresql_users | length > 0) - name: Update postgresql authentication in patroni.yml yedit: @@ -874,20 +1009,18 @@ - key: postgresql.authentication.superuser.password value: "{{ patroni_superuser_password }}" state: present - vars: - ansible_python_interpreter: /usr/bin/python3 when: hostvars[groups['master'][0]]['postgresql_user_result'] is changed when: patroni_cluster_bootstrap_method != "initdb" and - (pgbackrest_install|bool or wal_g_install|bool) and - (existing_pgcluster is not defined or not existing_pgcluster|bool) + (pgbackrest_install|bool or wal_g_install|bool) and + (existing_pgcluster is not defined or not existing_pgcluster|bool) become: true become_user: postgres tags: patroni, point_in_time_recovery -- block: # for add_pgnode.yml +- block: # for add_pgnode.yml - name: Prepare PostgreSQL | fetch pg_hba.conf file from master run_once: true - fetch: + ansible.builtin.fetch: src: "{{ postgresql_conf_dir }}/pg_hba.conf" dest: files/pg_hba.conf validate_checksum: true @@ -895,81 +1028,84 @@ delegate_to: "{{ groups.master[0] }}" - name: Prepare PostgreSQL | copy pg_hba.conf file to replica - copy: + ansible.builtin.copy: src: files/pg_hba.conf dest: "{{ postgresql_conf_dir }}/pg_hba.conf" owner: postgres group: postgres - mode: 0640 + mode: "0640" when: existing_pgcluster is defined and existing_pgcluster|bool - and postgresql_conf_dir != postgresql_data_dir + and postgresql_conf_dir != postgresql_data_dir tags: patroni, pg_hba, pg_hba_generate -- block: # start patroni on replica +- block: # start patroni on replica - name: Start patroni service on Replica servers - systemd: + ansible.builtin.systemd: daemon_reload: true name: patroni state: restarted enabled: true - - name: Wait for port 8008 to become open on the host - wait_for: - port: 8008 - host: "{{ hostvars[inventory_hostname]['inventory_hostname'] }}" + - name: "Wait for port {{ patroni_restapi_port }} to become open on the host" + ansible.builtin.wait_for: + port: "{{ patroni_restapi_port }}" + host: "{{ inventory_hostname }}" state: started timeout: 120 delay: 10 ignore_errors: false - name: Check that the patroni is healthy on the replica server - uri: - url: "http://{{ hostvars[inventory_hostname]['inventory_hostname'] }}:8008/health" + ansible.builtin.uri: + url: "http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/health" status_code: 200 register: replica_result until: replica_result.status == 200 - retries: 1200 # timeout 10 hours + retries: "{{ (cluster_restore_timeout | default(86400)) | int // 30 }}" # timeout 24 hours delay: 30 - when: is_master != "true" + environment: + no_proxy: "{{ inventory_hostname }}" + when: not ansible_check_mode + when: inventory_hostname in groups['replica'] tags: patroni, patroni_start_replica, point_in_time_recovery # create symlink pg_xlog/pg_wal to custom WAL dir -- import_tasks: custom_wal_dir.yml +- ansible.builtin.import_tasks: custom_wal_dir.yml when: postgresql_wal_dir is defined and postgresql_wal_dir | length > 0 - tags: patroni, custom_wal_dir + tags: patroni, custom_wal_dir, point_in_time_recovery # disable postgresql from autostart -- block: # "Debian" +- block: # "Debian" - name: Turning off postgresql autostart from config "start.conf" (will be managed by patroni) - copy: + ansible.builtin.copy: dest: "{{ postgresql_conf_dir }}/start.conf" content: "manual" owner: postgres group: postgres - mode: 0644 + mode: "0644" - name: Disable "postgresql@{{ postgresql_version }}-{{ postgresql_cluster_name }}" service - systemd: + ansible.builtin.systemd: name: "postgresql@{{ postgresql_version }}-{{ postgresql_cluster_name }}" enabled: false daemon_reload: true when: ansible_os_family == "Debian" and - postgresql_packages|join(" ") is not search("postgrespro") + postgresql_packages|join(" ") is not search("postgrespro") tags: patroni, postgresql_disable # "RedHat" - name: Disable "postgresql-{{ postgresql_version }}" service (will be managed by patroni) - systemd: + ansible.builtin.systemd: name: "postgresql-{{ postgresql_version }}" enabled: false daemon_reload: true when: ansible_os_family == "RedHat" and - postgresql_packages|join(" ") is not search("postgrespro") + postgresql_packages|join(" ") is not search("postgrespro") tags: patroni, postgresql_disable # PostgresPro - name: Disable "postgrespro-std-{{ postgresql_version }}" service (will be managed by patroni) - systemd: + ansible.builtin.systemd: name: "postgrespro-std-{{ postgresql_version }}" enabled: false daemon_reload: true @@ -978,15 +1114,13 @@ # PATRONICTL_CONFIG_FILE (patroni v1.6.1 and higher) - name: Add PATRONICTL_CONFIG_FILE environment variable into /etc/environment - lineinfile: + ansible.builtin.lineinfile: dest: "/etc/environment" state: present - regexp: "^export PATRONICTL_CONFIG_FILE" - line: "export PATRONICTL_CONFIG_FILE=/etc/patroni/patroni.yml" + regexp: "^PATRONICTL_CONFIG_FILE" + line: "PATRONICTL_CONFIG_FILE=/etc/patroni/patroni.yml" owner: root group: root - mode: '0644' + mode: "0644" ignore_errors: true tags: patroni, patroni_env - -... diff --git a/roles/patroni/tasks/pip.yml b/automation/roles/patroni/tasks/pip.yml similarity index 66% rename from roles/patroni/tasks/pip.yml rename to automation/roles/patroni/tasks/pip.yml index 453be4a20..ffec0640b 100644 --- a/roles/patroni/tasks/pip.yml +++ b/automation/roles/patroni/tasks/pip.yml @@ -5,20 +5,30 @@ # Installing latest version pip3 for python3 # install pip package from repo (installation_method: "repo") -- block: # with get-pip.py +- block: # with get-pip.py - name: pip | get-pip.py - get_url: - url: "{{ item }}" + ansible.builtin.get_url: + url: "{{ pip_package_repo }}" + dest: /tmp/ + timeout: 60 + validate_certs: false + register: get_pip_result + when: ansible_python_version is version('3.7', '>=') + + - name: pip | get-pip.py + ansible.builtin.get_url: + url: "https://bootstrap.pypa.io/pip/3.6/get-pip.py" dest: /tmp/ timeout: 60 validate_certs: false - loop: - - "{{ pip_package_repo }}" register: get_pip_result + when: + - ansible_python_version is version('3.6', '>=') + - ansible_python_version is version('3.7', '<') - name: pip | install pip - command: > - python3 {{ pip_package_repo | basename }} + ansible.builtin.command: > + python3 get-pip.py --trusted-host=pypi.python.org --trusted-host=pypi.org --trusted-host=files.pythonhosted.org @@ -27,13 +37,13 @@ when: get_pip_result.changed environment: "{{ proxy_env | default({}) }}" when: installation_method == "repo" and - pip_package_repo is search("get-pip.py") and - proxy_env is defined and proxy_env | length > 0 + patroni_installation_method == "pip" and + pip_package_repo is search("get-pip.py") tags: pip_install, pip -- block: # with tar.gz +- block: # with tar.gz - name: pip | download pip package - get_url: + ansible.builtin.get_url: url: "{{ item }}" dest: /tmp/ timeout: 60 @@ -43,7 +53,7 @@ register: pip_package_result - name: pip | extract pip package into /tmp - unarchive: + ansible.builtin.unarchive: src: "/tmp/{{ pip_package_repo | basename }}" dest: /tmp/ extra_opts: @@ -51,18 +61,18 @@ remote_src: true - name: pip | install pip - command: "python3 setup.py install" + ansible.builtin.command: "python3 setup.py install" args: chdir: "/tmp/{{ pip_package_repo.split('.tar.gz')[0] | basename }}" when: pip_package_result.changed when: installation_method == "repo" and - pip_package_repo is search("tar.gz") + pip_package_repo is search("tar.gz") tags: pip_install, pip # install pip package from file (installation_method: "file") - block: - name: pip | extract pip package into /tmp - unarchive: + ansible.builtin.unarchive: src: "{{ pip_package_file }}" dest: /tmp/ extra_opts: @@ -70,12 +80,10 @@ register: pip_package_result - name: pip | install pip - command: python3 setup.py install + ansible.builtin.command: python3 setup.py install args: chdir: "/tmp/{{ pip_package_file.split('.tar.gz')[0] | basename }}" when: pip_package_result.changed when: installation_method == "file" and - pip_package_file | length > 0 + pip_package_file | length > 0 tags: pip_install, pip - -... diff --git a/roles/patroni/templates/patroni.service.j2 b/automation/roles/patroni/templates/patroni.service.j2 similarity index 62% rename from roles/patroni/templates/patroni.service.j2 rename to automation/roles/patroni/templates/patroni.service.j2 index 5f6e1377b..a6bf5630a 100644 --- a/roles/patroni/templates/patroni.service.j2 +++ b/automation/roles/patroni/templates/patroni.service.j2 @@ -1,26 +1,32 @@ [Unit] -Description=Runners to orchestrate a high-availability PostgreSQL - patroni +Description=Runners to orchestrate a high-availability PostgreSQL - Patroni After=syslog.target network.target - + [Service] Type=simple - + User=postgres Group=postgres # Read in configuration file if it exists, otherwise proceed EnvironmentFile=-/etc/patroni_env.conf +# The default is the user's home directory, and if you want to change it, you must provide an absolute path. # WorkingDirectory=~ - + # Where to send early-startup messages from the server # This is normally controlled by the global default set by systemd # StandardOutput=syslog # Pre-commands to start watchdog device # Uncomment if watchdog is part of your patroni setup +{% if patroni_watchdog_mode in ['automatic', 'required'] %} +ExecStartPre=-/usr/bin/sudo /sbin/modprobe softdog +ExecStartPre=-/usr/bin/sudo /bin/chown postgres {{ patroni_watchdog_device }} +{% else %} #ExecStartPre=-/usr/bin/sudo /sbin/modprobe softdog -#ExecStartPre=-/usr/bin/sudo /bin/chown postgres /dev/watchdog +#ExecStartPre=-/usr/bin/sudo /bin/chown postgres {{ patroni_watchdog_device }} +{% endif %} # Start the patroni process {% if patroni_installation_method == 'pip' %} @@ -31,15 +37,17 @@ ExecStart=/usr/bin/patroni /etc/patroni/patroni.yml # Send HUP to reload from patroni.yml ExecReload=/bin/kill -s HUP $MAINPID - -# only kill the patroni process, not it's children, so it will gracefully stop postgres + +# Only kill the patroni process, not it's children, so it will gracefully stop postgres KillMode=process - + # Give a reasonable amount of time for the server to start up/shut down TimeoutSec=60 - -# Do not restart the service if it crashes, we want to manually inspect database on failure -Restart=no - + +# Restart the service if it crashed +Restart=on-failure + +LimitNOFILE={{ patroni_systemd_limit_nofile | default(65536) }} + [Install] WantedBy=multi-user.target diff --git a/roles/patroni/templates/patroni.yml.j2 b/automation/roles/patroni/templates/patroni.yml.j2 similarity index 53% rename from roles/patroni/templates/patroni.yml.j2 rename to automation/roles/patroni/templates/patroni.yml.j2 index a585b62fa..a3f4cd251 100644 --- a/roles/patroni/templates/patroni.yml.j2 +++ b/automation/roles/patroni/templates/patroni.yml.j2 @@ -3,7 +3,7 @@ scope: {{ patroni_cluster_name }} name: {{ ansible_hostname }} -namespace: /service/ +namespace: /{{ patroni_etcd_namespace | default('service') }} {% if patroni_log_destination == 'logfile' %} log: @@ -21,23 +21,57 @@ log: {% endif %} restapi: - listen: {{ hostvars[inventory_hostname]['inventory_hostname'] }}:8008 - connect_address: {{ hostvars[inventory_hostname]['inventory_hostname'] }}:8008 + listen: {{ patroni_restapi_listen_addr }}:{{ patroni_restapi_port }} + connect_address: {{ inventory_hostname }}:{{ patroni_restapi_port }} # certfile: /etc/ssl/certs/ssl-cert-snakeoil.pem # keyfile: /etc/ssl/private/ssl-cert-snakeoil.key +{% if patroni_restapi_password | default('') | length > 0 %} + authentication: + username: {{ patroni_restapi_username | default('patroni') }} + password: {{ patroni_restapi_password }} +{% else %} # authentication: # username: username # password: password +{% endif %} +{% if patroni_restapi_request_queue_size is defined %} + request_queue_size: {{ patroni_restapi_request_queue_size |int }} +{% endif %} -{% if not dcs_exists|bool and dcs_type == 'etcd' %} -etcd: +{% if dcs_type == 'etcd' %} +etcd3: + {% if not dcs_exists|bool %} hosts: {% for host in groups['etcd_cluster'] %}{{ hostvars[host]['inventory_hostname'] }}:2379{% if not loop.last %},{% endif %}{% endfor %} -{% endif %} -{% if dcs_exists|bool and dcs_type == 'etcd' %} -etcd: + {% endif %} + {% if dcs_exists|bool %} hosts: {% for etcd_hosts in patroni_etcd_hosts %}{{etcd_hosts.host}}:{{etcd_hosts.port}}{% if not loop.last %},{% endif %}{% endfor %} + {% endif %} + + {% if etcd_tls_enable | default(false) | bool %} + protocol: https + cacert: {{ patroni_etcd_cacert | default('/etc/patroni/tls/etcd/ca.crt') }} + cert: {{ patroni_etcd_cert | default('/etc/patroni/tls/etcd/server.crt') }} + key: {{ patroni_etcd_key | default('/etc/patroni/tls/etcd/server.key') }} + {% endif %} + {% if patroni_etcd_username | default('') | length > 0 %} + username: {{ patroni_etcd_username | default('') }} + {% endif %} + {% if patroni_etcd_password | default('') | length > 0 %} + password: {{ patroni_etcd_password }} + {% endif %} {% endif %} +{% if dcs_type == 'consul' %} +consul: + host: 127.0.0.1:8500 + checks: [] + {% if consul_tls_enable | default(false) | bool %} + scheme: https + cacert: {{ patroni_consul_cacert | default('/etc/patroni/tls/consul/ca.crt') }} + cert: {{ patroni_consul_cert | default('/etc/patroni/tls/consul/server.crt') }} + key: {{ patroni_consul_key | default('/etc/patroni/tls/consul/server.key') }} + {% endif %} +{% endif %} bootstrap: method: {{ patroni_cluster_bootstrap_method }} @@ -46,68 +80,79 @@ bootstrap: command: {{ wal_g_patroni_cluster_bootstrap_command }} no_params: True recovery_conf: - recovery_target_action: promote - recovery_target_timeline: latest - restore_command: wal-g wal-fetch %f %p + {% for item in wal_g_patroni_cluster_bootstrap_recovery_conf %} + {% for key, value in item.items() %} + {{ key }}: {{ value }} + {% endfor %} + {% endfor %} +{% endif %} +{% if patroni_cluster_bootstrap_method == 'pg_probackup' %} + pg_probackup: + command: {{ pg_probackup_patroni_cluster_bootstrap_command }} + no_params: true {% endif %} dcs: ttl: {{ patroni_ttl |d(30, true) |int }} loop_wait: {{ patroni_loop_wait |d(10, true) |int }} retry_timeout: {{ patroni_retry_timeout |d(10, true) |int }} - maximum_lag_on_failover: {{ patroni_maximum_lag_on_failover |d(1048576, true) |int }} + maximum_lag_on_failover: {{ patroni_maximum_lag_on_failover | d(1048576) | int }} master_start_timeout: {{ patroni_master_start_timeout |d(300, true) |int }} synchronous_mode: {{ synchronous_mode |string |d(false, true) |lower }} synchronous_mode_strict: {{ synchronous_mode_strict |string |d(false, true) |lower }} synchronous_node_count: {{ synchronous_node_count |d(1, true) |int }} - {% if patroni_standby_cluster.host is defined and patroni_standby_cluster.host | length > 0 %} + postgresql: + use_pg_rewind: {{ patroni_postgresql_use_pg_rewind |string |d(false, true) |lower }} + use_slots: true + parameters: + {% for parameter in postgresql_parameters %} + {{ parameter.option }}: "{{ parameter.value }}" + {% endfor %} + {% if patroni_standby_cluster.host is defined and patroni_standby_cluster.host | length > 0 %} standby_cluster: host: {{ patroni_standby_cluster.host }} port: {{ patroni_standby_cluster.port }} - {% if patroni_standby_cluster.primary_slot_name is defined and patroni_standby_cluster.primary_slot_name | length > 0 %} + {% if patroni_standby_cluster.primary_slot_name is defined and patroni_standby_cluster.primary_slot_name | length > 0 %} primary_slot_name: {{ patroni_standby_cluster.primary_slot_name }} - {% endif %} - {% if patroni_standby_cluster.restore_command is defined and patroni_standby_cluster.restore_command | length > 0 %} + {% endif %} + {% if patroni_standby_cluster.restore_command is defined and patroni_standby_cluster.restore_command | length > 0 %} restore_command: {{ patroni_standby_cluster.restore_command }} - {% endif %} - {% if patroni_standby_cluster.recovery_min_apply_delay is defined and patroni_standby_cluster.recovery_min_apply_delay | length > 0 %} + {% endif %} + {% if patroni_standby_cluster.recovery_min_apply_delay is defined and patroni_standby_cluster.recovery_min_apply_delay | length > 0 %} recovery_min_apply_delay: {{ patroni_standby_cluster.recovery_min_apply_delay }} + {% endif %} {% endif %} - {% endif %} - postgresql: - use_pg_rewind: {{ patroni_postgresql_use_pg_rewind |string |d(false, true) |lower }} - use_slots: true - parameters: - {% for parameter in postgresql_parameters %} - {{ parameter.option }}: {{ parameter.value }} + {% if patroni_slots is defined and patroni_slots | length > 0 %} + slots: + {% for slot in patroni_slots %} + {{ slot.slot }}: + type: {{ slot.type }} + {% if slot.plugin | default('') | length > 0 %} + plugin: {{ slot.plugin }} + {% endif %} + {% if slot.database | default('') | length > 0 %} + database: {{ slot.database }} + {% endif %} {% endfor %} + {% endif %} -{% if postgresql_exists == 'true' %} -# initdb: # List options to be passed on to initdb -# - encoding: UTF8 -# - data-checksums -{% endif %} -{% if postgresql_exists == 'false' %} initdb: # List options to be passed on to initdb - encoding: {{ postgresql_encoding }} - locale: {{ postgresql_locale }} {% if postgresql_data_checksums|bool %} - data-checksums {% endif %} -{% endif %} pg_hba: # Add following lines to pg_hba.conf after running 'initdb' - - host replication {{ patroni_replication_username }} 127.0.0.1/32 md5 - - host all all 0.0.0.0/0 md5 + - host replication {{ patroni_replication_username }} 127.0.0.1/32 {{ postgresql_password_encryption_algorithm }} + - host all all 0.0.0.0/0 {{ postgresql_password_encryption_algorithm }} postgresql: -{% if not with_haproxy_load_balancing|bool and not pgbouncer_install|bool and (cluster_vip is defined and cluster_vip | length > 0) %} - listen: {{ hostvars[inventory_hostname]['inventory_hostname'] }},{{ cluster_vip }},127.0.0.1:{{ postgresql_port }} -{% else %} - listen: {{ hostvars[inventory_hostname]['inventory_hostname'] }},127.0.0.1:{{ postgresql_port }} -{% endif %} - connect_address: {{ hostvars[inventory_hostname]['inventory_hostname'] }}:{{ postgresql_port }} + listen: {{ postgresql_listen_addr }}:{{ postgresql_port }} + connect_address: {{ inventory_hostname }}:{{ postgresql_port }} +{% if patroni_superuser_username == 'postgres' %} use_unix_socket: true +{% endif %} data_dir: {{ postgresql_data_dir }} bin_dir: {{ postgresql_bin_dir }} config_dir: {{ postgresql_conf_dir }} @@ -124,7 +169,7 @@ postgresql: # password: rewind_password parameters: unix_socket_directories: {{ postgresql_unix_socket_dir }} -{% if postgresql_stats_temp_directory_path is defined and postgresql_stats_temp_directory_path != 'none' %} +{% if postgresql_stats_temp_directory_path is defined and postgresql_stats_temp_directory_path != 'none' and postgresql_version | int <= 14 %} stats_temp_directory: {{ postgresql_stats_temp_directory_path }} {% endif %} @@ -138,12 +183,14 @@ postgresql: remove_data_directory_on_rewind_failure: {{ patroni_remove_data_directory_on_rewind_failure |string |d(false, true) |lower }} remove_data_directory_on_diverged_timelines: {{ patroni_remove_data_directory_on_diverged_timelines |string |d(false, true) |lower }} -# callbacks: -# on_start: -# on_stop: -# on_restart: -# on_reload: -# on_role_change: +{% if patroni_callbacks is defined and patroni_callbacks | length > 0 %} + callbacks: + {% for callback in patroni_callbacks %} + {% if callback.script | length > 0 %} + {{ callback.action }}: '{{ callback.script }}' + {% endif %} + {% endfor %} +{% endif %} create_replica_methods: {% if patroni_create_replica_methods is defined and patroni_create_replica_methods | length > 0 %} @@ -167,9 +214,12 @@ postgresql: {% for item in basebackup %} {{ item.option }}: '{{ item.value }}' {% endfor %} - {% if postgresql_version is version('10', '>=') and postgresql_wal_dir is defined and postgresql_wal_dir | length > 0 %} - waldir: {{ postgresql_wal_dir }} - {% endif %} + {% endif %} + {% if 'pg_probackup' in patroni_create_replica_methods %} + pg_probackup: + {% for item in pg_probackup %} + {{ item.option }}: {{ item.value }} + {% endfor %} {% endif %} {% else %} - basebackup @@ -183,16 +233,27 @@ postgresql: {% endif %} watchdog: - mode: off # Allowed values: off, automatic, required - device: /dev/watchdog + mode: {{ patroni_watchdog_mode if patroni_watchdog_mode in ['off', 'automatic', 'required'] else 'off'}} # Allowed values: off, automatic, required + device: {{ patroni_watchdog_device }} # Path to the watchdog device safety_margin: 5 tags: - nofailover: false +{% if patroni_tags is defined and patroni_tags | length > 0 %} + {{ patroni_tags | replace(" ", "") | replace("=", ": ") | replace(",", "\n ") }} +{% endif %} +{% set normalized_tags = patroni_tags | default('') | replace(" ", "") %} +{% if 'nosync=' not in normalized_tags %} + nosync: false +{% endif %} +{% if 'noloadbalance=' not in normalized_tags %} noloadbalance: false +{% endif %} +{% if 'nofailover=' not in normalized_tags %} + nofailover: false +{% endif %} +{% if 'clonefrom=' not in normalized_tags %} clonefrom: false - nosync: false - - # specify a node to replicate from (cascading replication) -# replicatefrom: (node name) - +{% endif %} +{% if 'replicatefrom=' not in normalized_tags and patroni_replicatefrom | default('') | length > 0 %} + replicatefrom: {{ patroni_replicatefrom | default('') }} +{% endif %} diff --git a/roles/patroni/templates/pg_hba.conf.j2 b/automation/roles/patroni/templates/pg_hba.conf.j2 similarity index 80% rename from roles/patroni/templates/pg_hba.conf.j2 rename to automation/roles/patroni/templates/pg_hba.conf.j2 index 84ab48684..53187ecb1 100644 --- a/roles/patroni/templates/pg_hba.conf.j2 +++ b/automation/roles/patroni/templates/pg_hba.conf.j2 @@ -85,14 +85,14 @@ # TYPE DATABASE USER ADDRESS METHOD {% for client in postgresql_pg_hba %} - {{ client.type.ljust(10) |default('host') }}{{ client.database.ljust(25) |default('all') }}{{ client.user.ljust(25) |default('all') }}{{ client.address.ljust(25) |default('') }}{{ client.method |default('md5') }} {{ client.options |default(None) }} + {{ client.type.ljust(10) |default('{% if tls_cert_generate | default(false) | bool %}hostssl{% else %}host{% endif %}') }}{{ client.database.ljust(25) |default('all') }}{{ client.user.ljust(25) |default('all') }}{{ client.address.ljust(25) |default('') }}{{ client.method |default('md5') }} {{ client.options |default(None) }} {% endfor %} {% for patroni in groups['postgres_cluster'] %} - host all all {{ hostvars[patroni]['inventory_hostname'] }}/32 md5 + {% if tls_cert_generate | default(false) | bool %}hostssl{% else %}host{% endif %} all all {{ hostvars[patroni]['inventory_hostname'] }}/32 {{ postgresql_password_encryption_algorithm }} {% endfor %} # Allow replication connections from localhost, by a user with the # replication privilege. - host replication {{ patroni_replication_username }} localhost trust + {% if tls_cert_generate | default(false) | bool %}hostssl{% else %}host{% endif %} replication {{ patroni_replication_username }} localhost trust {% for host in groups['postgres_cluster'] %} - host replication {{ patroni_replication_username }} {{ hostvars[host]['inventory_hostname'] }}/32 md5 + {% if tls_cert_generate | default(false) | bool %}hostssl{% else %}host{% endif %} replication {{ patroni_replication_username }} {{ hostvars[host]['inventory_hostname'] }}/32 {{ postgresql_password_encryption_algorithm }} {% endfor %} diff --git a/automation/roles/pg_probackup/README.md b/automation/roles/pg_probackup/README.md new file mode 100644 index 000000000..00df54ed7 --- /dev/null +++ b/automation/roles/pg_probackup/README.md @@ -0,0 +1 @@ +# Ansible Role: pg_probackup diff --git a/automation/roles/pg_probackup/tasks/main.yml b/automation/roles/pg_probackup/tasks/main.yml new file mode 100644 index 000000000..e25e755b0 --- /dev/null +++ b/automation/roles/pg_probackup/tasks/main.yml @@ -0,0 +1,107 @@ +--- +- block: # Debian family + - name: Make sure the gnupg, apt-transport-https and python3-debian packages are present + ansible.builtin.apt: + pkg: + - gnupg + - apt-transport-https + - python3-debian + state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + + - name: Add pgdg repository and signing key + ansible.builtin.deb822_repository: + name: "{{ pg_probackup_repo_name | default('pg_probackup') }}" + types: [deb] + uris: "http://repo.postgrespro.ru/pg_probackup/deb/" + signed_by: "https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG-PROBACKUP" + suites: "{{ ansible_distribution_release }}" + components: "main-{{ ansible_distribution_release }}" + state: present + enabled: true + when: ansible_os_family == "Debian" + + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + + - name: Install pg_probackup + ansible.builtin.package: + name: + - pg-probackup-{{ pg_probackup_version }} + - pg-probackup-{{ pg_probackup_version }}-dbg + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: + - installation_method == "repo" + - ansible_os_family == "Debian" + - pg_probackup_install_from_postgrespro_repo|bool + tags: pg_probackup, pg_probackup_repo, pg_probackup_install + +- block: # RedHat family + # RPM Centos Packages + - name: Get pg_probackup-repo-centos.noarch.rpm + ansible.builtin.get_url: + url: "https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm" + dest: "/tmp/pg_probackup-repo.rpm" + timeout: 30 + validate_certs: false + when: ansible_distribution == 'CentOS' + # RPM Oracle Linux Packages + - name: Get pg_probackup-repo-oraclelinux.noarch.rpm + ansible.builtin.get_url: + url: "https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm" + dest: "/tmp/pg_probackup-repo.rpm" + timeout: 30 + validate_certs: false + when: ansible_distribution == 'OracleLinux' + # RPM RHEL Packages + - name: Get pg_probackup-repo-rhel.noarch.rpm + ansible.builtin.get_url: + url: "https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm" + dest: "/tmp/pg_probackup-repo.rpm" + timeout: 30 + validate_certs: false + when: + - ansible_distribution != 'CentOS' + - ansible_distribution != 'OracleLinux' + + - name: Make sure pgdg repository is installed + ansible.builtin.package: + name: /tmp/pg_probackup-repo.rpm + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + + - name: Clean dnf cache + ansible.builtin.command: dnf clean all + + - name: Install pg_probackup + ansible.builtin.package: + name: + - pg_probackup-{{ pg_probackup_version }} + - pg_probackup-{{ pg_probackup_version }}-debuginfo + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: + - installation_method == "repo" + - ansible_os_family == "RedHat" + - pg_probackup_install_from_postgrespro_repo|bool + tags: pg_probackup, pg_probackup_repo, pg_probackup_install diff --git a/automation/roles/pgbackrest/README.md b/automation/roles/pgbackrest/README.md new file mode 100644 index 000000000..9526ca207 --- /dev/null +++ b/automation/roles/pgbackrest/README.md @@ -0,0 +1 @@ +# Ansible Role: pgbackrest diff --git a/automation/roles/pgbackrest/stanza-create/tasks/main.yml b/automation/roles/pgbackrest/stanza-create/tasks/main.yml new file mode 100644 index 000000000..877baa25d --- /dev/null +++ b/automation/roles/pgbackrest/stanza-create/tasks/main.yml @@ -0,0 +1,68 @@ +--- +# Create a stanza locally (if "pgbackrest_repo_host" is not set) +- block: + - name: Get repo1-path and repo1_type value + ansible.builtin.set_fact: + repo1_path: "{{ pgbackrest_conf['global'] | selectattr('option', 'equalto', 'repo1-path') | map(attribute='value') | list | first }}" + repo1_type: "{{ pgbackrest_conf['global'] | selectattr('option', 'equalto', 'repo1-type') | map(attribute='value') | list | first }}" + + - name: "Make sure the {{ repo1_path }} directory exists" + ansible.builtin.file: + path: "{{ repo1_path }}" + state: directory + owner: postgres + group: postgres + mode: "0750" + when: repo1_type | lower == 'posix' + + - name: Create stanza "{{ pgbackrest_stanza }}" + become: true + become_user: postgres + ansible.builtin.command: "pgbackrest --stanza={{ pgbackrest_stanza }} --no-online stanza-create" + register: stanza_create_result + changed_when: + - stanza_create_result.rc == 0 + - stanza_create_result.stdout is not search("already exists") + when: repo1_type | lower == 'posix' or + (repo1_type | lower != 'posix' and inventory_hostname == groups['master'][0]) # run only on master if it's not posix + when: + - pgbackrest_repo_host | length < 1 + - "'postgres_cluster' in group_names" + tags: pgbackrest, pgbackrest_stanza_create + +# Create a stanza on the dedicated repository host +- block: + # It will create a stanza on the dedicated repository host only when pgbackrest_repo_host is defined and has a value. + # The delegate_to parameter is used to execute the task on a different host than the one specified in the play's hosts parameter. + # In this case, the task is delegated to the first host in the pgbackrest group in the invetory. + - name: Get repo1-path value + delegate_to: "{{ groups['pgbackrest'][0] }}" + run_once: true + ansible.builtin.set_fact: + repo1_path: "{{ pgbackrest_server_conf['global'] | selectattr('option', 'equalto', 'repo1-path') | map(attribute='value') | list | first }}" + when: pgbackrest_repo_type | lower == 'posix' + + - name: "Make sure the {{ repo1_path }} directory exists" + delegate_to: "{{ groups['pgbackrest'][0] }}" + run_once: true + ansible.builtin.file: + path: "{{ repo1_path }}" + state: directory + owner: "{{ pgbackrest_repo_user }}" + group: "{{ pgbackrest_repo_user }}" + mode: "0750" + when: repo1_path | default('') | length > 0 + + - name: Create stanza "{{ pgbackrest_stanza }}" + become: true + become_user: "{{ pgbackrest_repo_user }}" + delegate_to: "{{ groups['pgbackrest'][0] }}" + run_once: true + ansible.builtin.command: "pgbackrest --stanza={{ pgbackrest_stanza }} --no-online stanza-create" + register: stanza_create_result + changed_when: + - stanza_create_result.rc == 0 + - stanza_create_result.stdout is not search("already exists") + when: + - pgbackrest_repo_host | length > 0 + tags: pgbackrest, pgbackrest_stanza_create diff --git a/automation/roles/pgbackrest/tasks/auto_conf.yml b/automation/roles/pgbackrest/tasks/auto_conf.yml new file mode 100644 index 000000000..9fa426cce --- /dev/null +++ b/automation/roles/pgbackrest/tasks/auto_conf.yml @@ -0,0 +1,225 @@ +# yamllint disable rule:line-length +--- +# AWS S3 bucket (if 'cloud_provider=aws') +- name: "Set variable 'pgbackrest_conf' for backup in AWS S3 bucket" + ansible.builtin.set_fact: + pgbackrest_conf: + global: + - { option: "log-level-file", value: "detail" } + - { option: "log-path", value: "/var/log/pgbackrest" } + - { option: "repo1-type", value: "s3" } + - { option: "repo1-path", value: "{{ PGBACKREST_REPO_PATH | default('/pgbackrest') }}" } + - { option: "repo1-s3-key", value: "{{ PGBACKREST_S3_KEY | default(lookup('ansible.builtin.env', 'AWS_ACCESS_KEY_ID')) }}" } + - { option: "repo1-s3-key-secret", value: "{{ PGBACKREST_S3_KEY_SECRET | default(lookup('ansible.builtin.env', 'AWS_SECRET_ACCESS_KEY')) }}" } + - { option: "repo1-s3-bucket", value: "{{ PGBACKREST_S3_BUCKET | default(aws_s3_bucket_name | default(patroni_cluster_name + '-backup')) }}" } + - { + option: "repo1-s3-endpoint", + value: "{{ PGBACKREST_S3_ENDPOINT | default('s3.' + (aws_s3_bucket_region | default(server_location)) + '.amazonaws.com') }}", + } + - { option: "repo1-s3-region", value: "{{ PGBACKREST_S3_REGION | default(aws_s3_bucket_region | default(server_location)) }}" } + - { option: "repo1-retention-full", value: "{{ PGBACKREST_RETENTION_FULL | default('4') }}" } + - { option: "repo1-retention-archive", value: "{{ PGBACKREST_RETENTION_ARCHIVE | default('4') }}" } + - { option: "repo1-retention-archive-type", value: "{{ PGBACKREST_RETENTION_ARCHIVE_TYPE | default('full') }}" } + - { option: "repo1-bundle", value: "y" } + - { option: "repo1-block", value: "y" } + - { option: "start-fast", value: "y" } + - { option: "stop-auto", value: "y" } + - { option: "link-all", value: "y" } + - { option: "resume", value: "n" } + - { option: "archive-async", value: "y" } + - { option: "archive-get-queue-max", value: "1GiB" } + - { option: "spool-path", value: "/var/spool/pgbackrest" } + - { option: "process-max", value: "{{ PGBACKREST_PROCESS_MAX | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "backup-standby", value: "{{ 'y' if groups['postgres_cluster'] | length > 1 else 'n' }}" } + stanza: + - { option: "log-level-console", value: "info" } + - { option: "recovery-option", value: "recovery_target_action=promote" } + - { option: "pg1-path", value: "{{ postgresql_data_dir }}" } + delegate_to: localhost + run_once: true # noqa run-once + no_log: true # do not output contents to the ansible log + when: cloud_provider | default('') | lower == 'aws' + +# GCS Bucket (if 'cloud_provider=gcp') +- block: + - name: "Set variable 'pgbackrest_conf' for backup in GCS Bucket" + ansible.builtin.set_fact: + pgbackrest_conf: + global: + - { option: "log-level-file", value: "detail" } + - { option: "log-path", value: "/var/log/pgbackrest" } + - { option: "repo1-type", value: "gcs" } + - { option: "repo1-path", value: "{{ PGBACKREST_REPO_PATH | default('/pgbackrest') }}" } + - { option: "repo1-gcs-key", value: "{{ PGBACKREST_GCS_KEY | default(postgresql_home_dir + '/gcs-key.json') }}" } + - { option: "repo1-gcs-bucket", value: "{{ PGBACKREST_GCS_BUCKET | default(gcp_bucket_name | default(patroni_cluster_name + '-backup')) }}" } + - { option: "repo1-retention-full", value: "{{ PGBACKREST_RETENTION_FULL | default('4') }}" } + - { option: "repo1-retention-archive", value: "{{ PGBACKREST_RETENTION_ARCHIVE | default('4') }}" } + - { option: "repo1-retention-archive-type", value: "{{ PGBACKREST_RETENTION_ARCHIVE_TYPE | default('full') }}" } + - { option: "repo1-bundle", value: "y" } + - { option: "repo1-block", value: "y" } + - { option: "start-fast", value: "y" } + - { option: "stop-auto", value: "y" } + - { option: "link-all", value: "y" } + - { option: "resume", value: "n" } + - { option: "archive-async", value: "y" } + - { option: "archive-get-queue-max", value: "1GiB" } + - { option: "spool-path", value: "/var/spool/pgbackrest" } + - { option: "process-max", value: "{{ PGBACKREST_PROCESS_MAX | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "backup-standby", value: "{{ 'y' if groups['postgres_cluster'] | length > 1 else 'n' }}" } + stanza: + - { option: "log-level-console", value: "info" } + - { option: "recovery-option", value: "recovery_target_action=promote" } + - { option: "pg1-path", value: "{{ postgresql_data_dir }}" } + no_log: true # do not output contents to the ansible log + + # if 'gcs_key_file' is not defined, copy GCS key file from GCP_SERVICE_ACCOUNT_CONTENTS environment variable. + - block: + - name: "Get GCP service account contents from localhost" + ansible.builtin.set_fact: + gcp_service_account_contents: "{{ lookup('ansible.builtin.env', 'GCP_SERVICE_ACCOUNT_CONTENTS') }}" + delegate_to: localhost + run_once: true # noqa run-once + no_log: true # do not output GCP service account contents to the ansible log + + - name: "Copy GCP service account contents to {{ PGBACKREST_GCS_KEY | default(postgresql_home_dir + '/gcs-key.json') }}" + ansible.builtin.copy: + content: "{{ gcp_service_account_contents }}" + dest: "{{ PGBACKREST_GCS_KEY | default(postgresql_home_dir + '/gcs-key.json') }}" + mode: "0600" + owner: "postgres" + group: "postgres" + no_log: true # do not output GCP service account contents to the ansible log + when: gcs_key_file is not defined + + # if 'gcs_key_file' is defined, copy this GCS key file. + - name: "Copy GCS key file to {{ PGBACKREST_GCS_KEY | default(postgresql_home_dir + '/gcs-key.json') }}" + ansible.builtin.copy: + src: "{{ gcs_key_file }}" + dest: "{{ PGBACKREST_GCS_KEY | default(postgresql_home_dir + '/gcs-key.json') }}" + mode: "0600" + owner: "postgres" + group: "postgres" + no_log: true # do not output GCP service account contents to the ansible log + when: gcs_key_file is defined and gcs_key_file | length > 0 + when: cloud_provider | default('') | lower == 'gcp' + +# Azure Blob Storage (if 'cloud_provider=azure') +- name: "Set variable 'pgbackrest_conf' for backup in Azure Blob Storage" + ansible.builtin.set_fact: + pgbackrest_conf: + global: + - { option: "log-level-file", value: "detail" } + - { option: "log-path", value: "/var/log/pgbackrest" } + - { option: "repo1-type", value: "azure" } + - { option: "repo1-path", value: "{{ PGBACKREST_REPO_PATH | default('/pgbackrest') }}" } + - { option: "repo1-azure-key", value: "{{ PGBACKREST_AZURE_KEY | default(hostvars['localhost']['azure_storage_account_key'] | default('')) }}" } + - { option: "repo1-azure-key-type", value: "{{ PGBACKREST_AZURE_KEY_TYPE | default('shared') }}" } + - { + option: "repo1-azure-account", + value: "{{ PGBACKREST_AZURE_ACCOUNT | default(azure_blob_storage_account_name | default(patroni_cluster_name | lower | replace('-', '') | truncate(24, true, ''))) }}", + } + - { + option: "repo1-azure-container", + value: "{{ PGBACKREST_AZURE_CONTAINER | default(azure_blob_storage_name | default(patroni_cluster_name + '-backup')) }}", + } + - { option: "repo1-retention-full", value: "{{ PGBACKREST_RETENTION_FULL | default('4') }}" } + - { option: "repo1-retention-archive", value: "{{ PGBACKREST_RETENTION_ARCHIVE | default('4') }}" } + - { option: "repo1-retention-archive-type", value: "{{ PGBACKREST_RETENTION_ARCHIVE_TYPE | default('full') }}" } + - { option: "repo1-bundle", value: "y" } + - { option: "repo1-block", value: "y" } + - { option: "start-fast", value: "y" } + - { option: "stop-auto", value: "y" } + - { option: "link-all", value: "y" } + - { option: "resume", value: "n" } + - { option: "archive-async", value: "y" } + - { option: "archive-get-queue-max", value: "1GiB" } + - { option: "spool-path", value: "/var/spool/pgbackrest" } + - { option: "process-max", value: "{{ PGBACKREST_PROCESS_MAX | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "backup-standby", value: "{{ 'y' if groups['postgres_cluster'] | length > 1 else 'n' }}" } + stanza: + - { option: "log-level-console", value: "info" } + - { option: "recovery-option", value: "recovery_target_action=promote" } + - { option: "pg1-path", value: "{{ postgresql_data_dir }}" } + no_log: true # do not output contents to the ansible log + when: cloud_provider | default('') | lower == 'azure' + +# DigitalOcean Spaces Object Storage (if 'cloud_provider=digitalocean') +# Note: requires the Spaces access keys "AWS_ACCESS_KEY_ID" and "AWS_SECRET_ACCESS_KEY" (https://cloud.digitalocean.com/account/api/spaces) +- name: "Set variable 'pgbackrest_conf' for backup in DigitalOcean Spaces Object Storage" + ansible.builtin.set_fact: + pgbackrest_conf: + global: + - { option: "log-level-file", value: "detail" } + - { option: "log-path", value: "/var/log/pgbackrest" } + - { option: "repo1-type", value: "s3" } + - { option: "repo1-path", value: "{{ PGBACKREST_REPO_PATH | default('/pgbackrest') }}" } + - { option: "repo1-s3-key", value: "{{ PGBACKREST_S3_KEY | default(AWS_ACCESS_KEY_ID | default('')) }}" } + - { option: "repo1-s3-key-secret", value: "{{ PGBACKREST_S3_KEY_SECRET | default(AWS_SECRET_ACCESS_KEY | default('')) }}" } + - { option: "repo1-s3-bucket", value: "{{ PGBACKREST_S3_BUCKET | default(digital_ocean_spaces_name | default(patroni_cluster_name + '-backup')) }}" } + - { + option: "repo1-s3-endpoint", + value: "{{ PGBACKREST_S3_ENDPOINT | default('https://' + (digital_ocean_spaces_region | default(server_location)) + '.digitaloceanspaces.com') }}", + } + - { option: "repo1-s3-region", value: "{{ PGBACKREST_S3_REGION | default(digital_ocean_spaces_region | default(server_location)) }}" } + - { option: "repo1-s3-uri-style", value: "{{ PGBACKREST_S3_URI_STYLE | default('path') }}" } + - { option: "repo1-retention-full", value: "{{ PGBACKREST_RETENTION_FULL | default('4') }}" } + - { option: "repo1-retention-archive", value: "{{ PGBACKREST_RETENTION_ARCHIVE | default('4') }}" } + - { option: "repo1-retention-archive-type", value: "{{ PGBACKREST_RETENTION_ARCHIVE_TYPE | default('full') }}" } + - { option: "repo1-bundle", value: "y" } + - { option: "repo1-block", value: "y" } + - { option: "start-fast", value: "y" } + - { option: "stop-auto", value: "y" } + - { option: "link-all", value: "y" } + - { option: "resume", value: "n" } + - { option: "archive-async", value: "y" } + - { option: "archive-get-queue-max", value: "1GiB" } + - { option: "spool-path", value: "/var/spool/pgbackrest" } + - { option: "process-max", value: "{{ PGBACKREST_PROCESS_MAX | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "backup-standby", value: "{{ 'y' if groups['postgres_cluster'] | length > 1 else 'n' }}" } + stanza: + - { option: "log-level-console", value: "info" } + - { option: "recovery-option", value: "recovery_target_action=promote" } + - { option: "pg1-path", value: "{{ postgresql_data_dir }}" } + no_log: true # do not output contents to the ansible log + when: cloud_provider | default('') | lower == 'digitalocean' + +# Hetzner Object Storage (if 'cloud_provider=hetzner') +- name: "Set variable 'pgbackrest_conf' for backup in Hetzner Object Storage (S3 bucket)" + ansible.builtin.set_fact: + pgbackrest_conf: + global: + - { option: "log-level-file", value: "detail" } + - { option: "log-path", value: "/var/log/pgbackrest" } + - { option: "repo1-type", value: "s3" } + - { option: "repo1-path", value: "{{ PGBACKREST_REPO_PATH | default('/pgbackrest') }}" } + - { option: "repo1-s3-key", value: "{{ PGBACKREST_S3_KEY | default(hetzner_object_storage_access_key | default('')) }}" } + - { option: "repo1-s3-key-secret", value: "{{ PGBACKREST_S3_KEY_SECRET | default(hetzner_object_storage_secret_key | default('')) }}" } + - { option: "repo1-s3-bucket", value: "{{ PGBACKREST_S3_BUCKET | default(hetzner_object_storage_name | default(patroni_cluster_name + '-backup')) }}" } + - { + option: "repo1-s3-endpoint", + value: "{{ PGBACKREST_S3_ENDPOINT | default(hetzner_object_storage_endpoint | default('https://' + (hetzner_object_storage_region | default(server_location)) + '.your-objectstorage.com')) }}", + } + - { option: "repo1-s3-region", value: "{{ PGBACKREST_S3_REGION | default(hetzner_object_storage_region | default(server_location)) }}" } + - { option: "repo1-s3-uri-style", value: "{{ PGBACKREST_S3_URI_STYLE | default('path') }}" } + - { option: "repo1-retention-full", value: "{{ PGBACKREST_RETENTION_FULL | default('4') }}" } + - { option: "repo1-retention-archive", value: "{{ PGBACKREST_RETENTION_ARCHIVE | default('4') }}" } + - { option: "repo1-retention-archive-type", value: "{{ PGBACKREST_RETENTION_ARCHIVE_TYPE | default('full') }}" } + - { option: "repo1-bundle", value: "y" } + - { option: "repo1-block", value: "y" } + - { option: "start-fast", value: "y" } + - { option: "stop-auto", value: "y" } + - { option: "link-all", value: "y" } + - { option: "resume", value: "n" } + - { option: "archive-async", value: "y" } + - { option: "archive-get-queue-max", value: "1GiB" } + - { option: "spool-path", value: "/var/spool/pgbackrest" } + - { option: "process-max", value: "{{ PGBACKREST_PROCESS_MAX | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "backup-standby", value: "{{ 'y' if groups['postgres_cluster'] | length > 1 else 'n' }}" } + stanza: + - { option: "log-level-console", value: "info" } + - { option: "recovery-option", value: "recovery_target_action=promote" } + - { option: "pg1-path", value: "{{ postgresql_data_dir }}" } + delegate_to: localhost + run_once: true # noqa run-once + no_log: true # do not output contents to the ansible log + when: cloud_provider | default('') | lower == 'hetzner' diff --git a/roles/pgbackrest/tasks/bootstrap_script.yml b/automation/roles/pgbackrest/tasks/bootstrap_script.yml similarity index 82% rename from roles/pgbackrest/tasks/bootstrap_script.yml rename to automation/roles/pgbackrest/tasks/bootstrap_script.yml index b69c68873..09d3a8ed5 100644 --- a/roles/pgbackrest/tasks/bootstrap_script.yml +++ b/automation/roles/pgbackrest/tasks/bootstrap_script.yml @@ -1,24 +1,21 @@ --- - -- block: # patroni cluster bootstrap script +- block: # patroni cluster bootstrap script - name: Make sure the pgbackrest bootstrap script directory exist - file: + ansible.builtin.file: dest: /etc/patroni state: directory owner: postgres group: postgres - name: Create /etc/patroni/pgbackrest_bootstrap.sh script - template: + ansible.builtin.template: src: templates/pgbackrest_bootstrap.sh.j2 dest: /etc/patroni/pgbackrest_bootstrap.sh owner: postgres group: postgres - mode: 0775 + mode: "0775" when: - patroni_cluster_bootstrap_method is defined - patroni_cluster_bootstrap_method == "pgbackrest" - "'postgres_cluster' in group_names" tags: pgbackrest, pgbackrest_bootstrap_script - -... diff --git a/automation/roles/pgbackrest/tasks/cron.yml b/automation/roles/pgbackrest/tasks/cron.yml new file mode 100644 index 000000000..d4af17e20 --- /dev/null +++ b/automation/roles/pgbackrest/tasks/cron.yml @@ -0,0 +1,63 @@ +--- +- name: Make sure that the cronie package is installed + ansible.builtin.package: + name: cronie + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: ansible_os_family == "RedHat" + tags: pgbackrest_cron + +- name: Make sure that the cron package is installed + ansible.builtin.apt: + name: cron + state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: ansible_os_family == "Debian" + tags: pgbackrest_cron + +- name: Add pgbackrest cron jobs on database server + ansible.builtin.cron: + cron_file: "{{ item.file | default('') }}" + user: "{{ item.user | default('postgres') }}" + minute: "{{ item.minute | default('*') }}" + hour: "{{ item.hour | default('*') }}" + day: "{{ item.day | default('*') }}" + month: "{{ item.month | default('*') }}" + weekday: "{{ item.weekday | default('*') }}" + name: "{{ item.name }}" + disabled: "{{ item.disabled | default(False) }}" + state: "{{ item.state | default('present') }}" + job: "{{ item.job }}" + loop: "{{ pgbackrest_cron_jobs }}" + when: + - "'postgres_cluster' in group_names" + - pgbackrest_repo_host | default('') | length < 1 + tags: pgbackrest_cron + +# Dedicated pgbackrest server (if "repo_host" is set) +- name: Add pgbackrest cron jobs on pgbackrest server + ansible.builtin.cron: + cron_file: "{{ item.file | default('') }}" + user: "{{ item.user | default('postgres') }}" + minute: "{{ item.minute | default('*') }}" + hour: "{{ item.hour | default('*') }}" + day: "{{ item.day | default('*') }}" + month: "{{ item.month | default('*') }}" + weekday: "{{ item.weekday | default('*') }}" + name: "{{ item.name }}" + disabled: "{{ item.disabled | default(False) }}" + state: "{{ item.state | default('present') }}" + job: "{{ item.job }}" + loop: "{{ pgbackrest_cron_jobs }}" + when: + - "'pgbackrest' in group_names" + - pgbackrest_repo_host | default('') | length > 0 + tags: pgbackrest_cron diff --git a/automation/roles/pgbackrest/tasks/main.yml b/automation/roles/pgbackrest/tasks/main.yml new file mode 100644 index 000000000..190c38449 --- /dev/null +++ b/automation/roles/pgbackrest/tasks/main.yml @@ -0,0 +1,224 @@ +--- +# Automatic setup of the backup configuration based on the selected cloud provider. +# if 'cloud_provider' is 'aws', 'gcp', 'azure', 'digitalocean'. +- ansible.builtin.import_tasks: auto_conf.yml + when: + - cloud_provider | default('') | length > 0 + - pgbackrest_auto_conf | default(true) | bool # to be able to disable auto backup settings + tags: pgbackrest, pgbackrest_conf + +- block: + # Debian pgdg repo + - name: Make sure the gnupg, apt-transport-https and python3-debian packages are present + ansible.builtin.apt: + pkg: + - gnupg + - apt-transport-https + - python3-debian + state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + + - name: Add pgdg repository and signing key + ansible.builtin.deb822_repository: + name: "{{ pgbackrest_repo_name | default('apt-postgresql-org') }}" + types: [deb] + uris: "https://apt.postgresql.org/pub/repos/apt/" + signed_by: "https://apt.postgresql.org/pub/repos/apt/ACCC4CF8.asc" + suites: "{{ ansible_distribution_release }}-pgdg" + components: [main] + state: present + enabled: true + when: ansible_os_family == "Debian" + + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: + - installation_method == "repo" + - ansible_os_family == "Debian" + - pgbackrest_install_from_pgdg_repo|bool + tags: pgbackrest, pgbackrest_repo, pgbackrest_install + +- block: + # RedHat pgdg repo + - name: Get pgdg-redhat-repo-latest.noarch.rpm + ansible.builtin.get_url: + url: "https://download.postgresql.org/pub/repos/yum/reporpms/EL-%7B%7B%20ansible_distribution_major_version%20%7D%7D-%7B%7B%20pgdg_architecture_map[ansible_architecture]%20%7D%7D/pgdg-redhat-repo-latest.noarch.rpm" # yamllint disable rule:line-length + dest: /tmp/ + timeout: 30 + validate_certs: false + + - name: Make sure pgdg repository is installed + ansible.builtin.package: + name: /tmp/pgdg-redhat-repo-latest.noarch.rpm + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + + - name: Clean dnf cache + ansible.builtin.command: dnf clean all + environment: "{{ proxy_env | default({}) }}" + when: + - installation_method == "repo" + - ansible_os_family == "RedHat" + - pgbackrest_install_from_pgdg_repo|bool + tags: pgbackrest, pgbackrest_repo, pgbackrest_install + +- name: Install pgbackrest + ansible.builtin.package: + name: pgbackrest + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + tags: pgbackrest, pgbackrest_install + +- block: + - name: Ensure log directory exists + ansible.builtin.file: + path: "{{ item.value }}" + state: directory + owner: postgres + group: postgres + mode: "0755" + loop: "{{ pgbackrest_conf.global }}" + when: item.option == 'log-path' + loop_control: + label: "{{ item.value }}" + + - name: Ensure repo directory exists + ansible.builtin.file: + path: "{{ item.value }}" + state: directory + owner: postgres + group: postgres + mode: "0750" + loop: "{{ pgbackrest_conf.global }}" + when: item.option == 'repo1-path' and pgbackrest_repo_host | length < 1 + loop_control: + label: "{{ item.value }}" + + - name: Ensure spool directory exists + ansible.builtin.file: + path: "{{ item.value }}" + state: directory + owner: postgres + group: postgres + mode: "0750" + loop: "{{ pgbackrest_conf.global }}" + when: item.option == 'spool-path' + loop_control: + label: "{{ item.value }}" + + - name: Ensure config directory exists + ansible.builtin.file: + path: "{{ pgbackrest_conf_file | dirname }}" + state: directory + owner: postgres + group: postgres + mode: "0750" + + - name: "Generate conf file {{ pgbackrest_conf_file }}" + ansible.builtin.template: + src: pgbackrest.conf.j2 + dest: "{{ pgbackrest_conf_file }}" + owner: postgres + group: postgres + mode: "0644" + when: "'postgres_cluster' in group_names" + tags: pgbackrest, pgbackrest_conf + +# Dedicated pgbackrest server (if "repo_host" is set) +- block: + - name: Ensure log directory exists + ansible.builtin.file: + path: "{{ item.value }}" + state: directory + owner: "{{ pgbackrest_repo_user }}" + group: "{{ pgbackrest_repo_user }}" + mode: "0755" + loop: "{{ pgbackrest_server_conf.global }}" + when: item.option == 'log-path' + loop_control: + label: "{{ item.value }}" + + - name: Ensure repo directory exists + ansible.builtin.file: + path: "{{ item.value }}" + state: directory + owner: "{{ pgbackrest_repo_user }}" + group: "{{ pgbackrest_repo_user }}" + mode: "0750" + loop: "{{ pgbackrest_server_conf.global }}" + when: item.option == 'repo1-path' + loop_control: + label: "{{ item.value }}" + + - name: Ensure config directory exists + ansible.builtin.file: + path: "{{ pgbackrest_conf_file | dirname }}" + state: directory + owner: "{{ pgbackrest_repo_user }}" + group: "{{ pgbackrest_repo_user }}" + mode: "0750" + + - name: Ensure stanza config directory exists + ansible.builtin.file: + path: "{{ pgbackrest_conf_file | dirname }}/conf.d" + state: directory + owner: "{{ pgbackrest_repo_user }}" + group: "{{ pgbackrest_repo_user }}" + mode: "0750" + + - name: "Generate global conf file {{ pgbackrest_conf_file }}" + ansible.builtin.template: + src: pgbackrest.server.conf.j2 + dest: "{{ pgbackrest_conf_file }}" + owner: "{{ pgbackrest_repo_user }}" + group: "{{ pgbackrest_repo_user }}" + mode: "0644" + + - name: "Generate stanza conf file {{ pgbackrest_conf_file | dirname }}/conf.d/{{ pgbackrest_stanza }}.conf" + ansible.builtin.template: + src: pgbackrest.server.stanza.conf.j2 + dest: "{{ pgbackrest_conf_file | dirname }}/conf.d/{{ pgbackrest_stanza }}.conf" + owner: "{{ pgbackrest_repo_user }}" + group: "{{ pgbackrest_repo_user }}" + mode: "0644" + when: + - "'pgbackrest' in group_names" + - pgbackrest_repo_host is defined + - pgbackrest_repo_host | length > 0 + tags: pgbackrest, pgbackrest_conf + +# if 'pgbackrest_repo_host' or 'backup-standby' are specified +- ansible.builtin.import_tasks: ssh_keys.yml + when: + - (pgbackrest_repo_host is defined and pgbackrest_repo_host | length > 0) or + (pgbackrest_conf.global | selectattr('option', 'equalto', 'backup-standby') | map(attribute='value') | list | last | default('') == 'y') + - not ansible_check_mode + tags: pgbackrest, pgbackrest_ssh_keys + +- ansible.builtin.import_tasks: cron.yml + when: + - pgbackrest_cron_jobs is defined + - pgbackrest_cron_jobs | length > 0 + tags: pgbackrest, pgbackrest_cron +# - import_tasks: bootstrap_script.yml +# when: +# - patroni_cluster_bootstrap_method is defined +# - patroni_cluster_bootstrap_method == "pgbackrest" +# - "'postgres_cluster' in group_names" +# tags: pgbackrest, pgbackrest_bootstrap_script diff --git a/roles/pgbackrest/tasks/ssh_keys.yml b/automation/roles/pgbackrest/tasks/ssh_keys.yml similarity index 58% rename from roles/pgbackrest/tasks/ssh_keys.yml rename to automation/roles/pgbackrest/tasks/ssh_keys.yml index e3cb209d8..061d8872c 100644 --- a/roles/pgbackrest/tasks/ssh_keys.yml +++ b/automation/roles/pgbackrest/tasks/ssh_keys.yml @@ -1,8 +1,28 @@ --- -# yamllint disable rule:line-length +- name: Ensure that the openssh-client package is installed + become: true + become_user: root + ansible.builtin.package: + name: openssh-client + state: present + when: ansible_os_family == "Debian" + +- name: Ensure that the openssh-clients package is installed + become: true + become_user: root + ansible.builtin.package: + name: openssh-clients + state: present + when: ansible_os_family == "RedHat" + +- name: Ensure "{{ pgbackrest_repo_user }}" exists on pgbackrest server + ansible.builtin.user: + name: "{{ pgbackrest_repo_user }}" + state: present + when: "'pgbackrest' in group_names" - name: ssh_keys | Ensure ssh key are created for "{{ pgbackrest_repo_user }}" user on pgbackrest server - user: + ansible.builtin.user: name: "{{ pgbackrest_repo_user }}" generate_ssh_key: true ssh_key_bits: 2048 @@ -10,7 +30,7 @@ when: "'pgbackrest' in group_names" - name: ssh_keys | Ensure ssh key are created for "postgres" user on database servers - user: + ansible.builtin.user: name: "postgres" generate_ssh_key: true ssh_key_bits: 2048 @@ -18,37 +38,48 @@ when: "'postgres_cluster' in group_names" - name: ssh_keys | Get public ssh key from pgbackrest server - slurp: + ansible.builtin.slurp: src: "~{{ pgbackrest_repo_user }}/.ssh/id_rsa.pub" - register: pgbackrest_sshkey + register: pgbackrest_server_sshkey changed_when: false when: "'pgbackrest' in group_names" - name: ssh_keys | Get public ssh key from database servers - slurp: + ansible.builtin.slurp: src: "~postgres/.ssh/id_rsa.pub" register: postgres_cluster_sshkey changed_when: false when: "'postgres_cluster' in group_names" - name: ssh_keys | Add pgbackrest ssh key in "~postgres/.ssh/authorized_keys" on database servers - authorized_key: + ansible.posix.authorized_key: user: postgres state: present - key: "{{ hostvars[item].pgbackrest_sshkey['content'] | b64decode }}" - loop: "{{ groups['pgbackrest'] }}" + key: "{{ hostvars[item].pgbackrest_server_sshkey['content'] | b64decode }}" + loop: "{{ groups['pgbackrest'] | default([]) }}" when: "'postgres_cluster' in group_names" - name: ssh_keys | Add database ssh keys in "~{{ pgbackrest_repo_user }}/.ssh/authorized_keys" on pgbackrest server - authorized_key: + ansible.posix.authorized_key: user: "{{ pgbackrest_repo_user }}" state: present key: "{{ hostvars[item].postgres_cluster_sshkey['content'] | b64decode }}" loop: "{{ groups['postgres_cluster'] }}" when: "'pgbackrest' in group_names" +# if 'backup-standby' are specified in pgbackrest_conf.global +- name: ssh_keys | Add ssh keys in "~postgres/.ssh/authorized_keys" on database servers + ansible.posix.authorized_key: + user: postgres + state: present + key: "{{ hostvars[item].postgres_cluster_sshkey['content'] | b64decode }}" + loop: "{{ groups['postgres_cluster'] }}" + when: + - "'postgres_cluster' in group_names" + - pgbackrest_conf.global | selectattr('option', 'equalto', 'backup-standby') | map(attribute='value') | list | last | default('') == 'y' + - name: known_hosts | Get public ssh keys of hosts (ssh-keyscan) - command: "ssh-keyscan -trsa -p {{ ansible_ssh_port | default(22) }} {{ item }}" + ansible.builtin.command: "ssh-keyscan -trsa -p {{ ansible_ssh_port | default(22) }} {{ item }}" loop: "{{ groups['all'] }}" register: ssh_known_host_keyscan changed_when: false @@ -56,7 +87,7 @@ - name: known_hosts | add ssh public keys in "~postgres/.ssh/known_hosts" on database servers become: true become_user: postgres - known_hosts: + ansible.builtin.known_hosts: host: "{{ item.item }}" key: "{{ item.stdout }}" path: "~postgres/.ssh/known_hosts" @@ -67,12 +98,10 @@ - name: known_hosts | add ssh public keys in "~{{ pgbackrest_repo_user }}/.ssh/known_hosts" on pgbackrest server become: true become_user: "{{ pgbackrest_repo_user }}" - known_hosts: + ansible.builtin.known_hosts: host: "{{ item.item }}" key: "{{ item.stdout }}" path: "~{{ pgbackrest_repo_user }}/.ssh/known_hosts" no_log: true loop: "{{ ssh_known_host_keyscan.results }}" when: "'pgbackrest' in group_names" - -... diff --git a/automation/roles/pgbackrest/templates/pgbackrest.conf.j2 b/automation/roles/pgbackrest/templates/pgbackrest.conf.j2 new file mode 100644 index 000000000..db3b97565 --- /dev/null +++ b/automation/roles/pgbackrest/templates/pgbackrest.conf.j2 @@ -0,0 +1,17 @@ +[global] +{% for global in pgbackrest_conf.global %} +{{ global.option }}={{ global.value }} +{% endfor %} + +[{{ pgbackrest_stanza }}] +{% for stanza in pgbackrest_conf.stanza %} +{{ stanza.option }}={{ stanza.value }} +{% endfor %} +{% if pgbackrest_conf.global | selectattr('option', 'equalto', 'backup-standby') | map(attribute='value') | list | last | default('') == 'y' %} +{% set pg_standby_hosts = groups['postgres_cluster'] | reject('equalto', inventory_hostname) | list %} +{% for host in pg_standby_hosts %} +pg{{ loop.index + 1 }}-host={{ host }} +pg{{ loop.index + 1 }}-port={{ postgresql_port }} +pg{{ loop.index + 1 }}-path={{ postgresql_data_dir }} +{% endfor %} +{% endif %} diff --git a/automation/roles/pgbackrest/templates/pgbackrest.server.conf.j2 b/automation/roles/pgbackrest/templates/pgbackrest.server.conf.j2 new file mode 100644 index 000000000..c4a1f5388 --- /dev/null +++ b/automation/roles/pgbackrest/templates/pgbackrest.server.conf.j2 @@ -0,0 +1,8 @@ +[global] +{% for global in pgbackrest_server_conf.global %} +{{ global.option }}={{ global.value }} +{% endfor %} + +# Include stanzas configuration files +repo1-host-config-include-path = {{ pgbackrest_conf_file | dirname }}/conf.d + diff --git a/automation/roles/pgbackrest/templates/pgbackrest.server.stanza.conf.j2 b/automation/roles/pgbackrest/templates/pgbackrest.server.stanza.conf.j2 new file mode 100644 index 000000000..c2de1194e --- /dev/null +++ b/automation/roles/pgbackrest/templates/pgbackrest.server.stanza.conf.j2 @@ -0,0 +1,8 @@ +[{{ pgbackrest_stanza }}] +{% for host in groups['postgres_cluster'] %} +pg{{ loop.index }}-host={{ host }} +pg{{ loop.index }}-port={{ postgresql_port }} +pg{{ loop.index }}-socket-path={{ postgresql_unix_socket_dir }} +pg{{ loop.index }}-path={{ postgresql_data_dir }} +{% endfor %} + diff --git a/roles/pgbackrest/templates/pgbackrest_bootstrap.sh.j2 b/automation/roles/pgbackrest/templates/pgbackrest_bootstrap.sh.j2 similarity index 100% rename from roles/pgbackrest/templates/pgbackrest_bootstrap.sh.j2 rename to automation/roles/pgbackrest/templates/pgbackrest_bootstrap.sh.j2 diff --git a/automation/roles/pgbouncer/README.md b/automation/roles/pgbouncer/README.md new file mode 100644 index 000000000..607ff4620 --- /dev/null +++ b/automation/roles/pgbouncer/README.md @@ -0,0 +1 @@ +# Ansible Role: pgbouncer diff --git a/automation/roles/pgbouncer/config/README.md b/automation/roles/pgbouncer/config/README.md new file mode 100644 index 000000000..fe5758ed8 --- /dev/null +++ b/automation/roles/pgbouncer/config/README.md @@ -0,0 +1 @@ +# Ansible Role: pgbouncer/config diff --git a/automation/roles/pgbouncer/config/tasks/main.yml b/automation/roles/pgbouncer/config/tasks/main.yml new file mode 100644 index 000000000..79e1e998c --- /dev/null +++ b/automation/roles/pgbouncer/config/tasks/main.yml @@ -0,0 +1,82 @@ +--- +- name: Ensure config directory "{{ pgbouncer_conf_dir }}" exist + ansible.builtin.file: + path: "{{ pgbouncer_conf_dir }}" + state: directory + owner: postgres + group: postgres + mode: "0750" + tags: pgbouncer, pgbouncer_conf + +- name: Update pgbouncer.ini + ansible.builtin.template: + src: ../templates/pgbouncer.ini.j2 + dest: "{{ pgbouncer_conf_dir }}/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }}.ini" + owner: postgres + group: postgres + mode: "0640" + loop: "{{ range(0, (pgbouncer_processes | default(1) | int)) | list }}" + loop_control: + index_var: idx + label: "{{ 'pgbouncer' if idx == 0 else 'pgbouncer-%d' % (idx + 1) }}" + notify: "reload pgbouncer" + when: existing_pgcluster is not defined or not existing_pgcluster | bool + tags: pgbouncer, pgbouncer_conf + +# if pgbouncer_auth_user is 'false' +- block: + - name: Get users and password md5 from pg_shadow + run_once: true + become: true + become_user: postgres + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "SELECT concat('\"', usename, '\" \"', passwd, '\"') FROM pg_shadow where usename != '{{ patroni_replication_username }}'" + register: pg_shadow_result + changed_when: false + delegate_to: "{{ groups.master[0] }}" + + - name: "Generate {{ pgbouncer_conf_dir }}/userlist.txt" + become: true + become_user: postgres + ansible.builtin.copy: + content: | + {{ pg_shadow_result.stdout }} + dest: "{{ pgbouncer_conf_dir }}/userlist.txt" + notify: "reload pgbouncer" + when: + - pg_shadow_result.rc == 0 + - pg_shadow_result.stdout is defined + - pg_shadow_result.stdout | length > 0 + when: not pgbouncer_auth_user | bool + tags: pgbouncer, pgbouncer_conf, pgbouncer_generate_userlist + +# if pgbouncer_auth_user is 'true' +- block: + - name: "Check if 'user_search' function exists" + become: true + become_user: postgres + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d {{ pgbouncer_auth_dbname }} -tAXc + "select exists(select proname from pg_proc where proname='user_search')" + register: exists_func_user + changed_when: false + + - name: "Create 'user_search' function for pgbouncer 'auth_query' option" + become: true + become_user: postgres + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d {{ pgbouncer_auth_dbname }} -tAXc + "CREATE FUNCTION user_search(uname TEXT) RETURNS TABLE (usename name, passwd text) AS + $$ + SELECT usename, passwd FROM pg_shadow WHERE usename=$1; + $$ + LANGUAGE sql SECURITY DEFINER; + REVOKE ALL ON FUNCTION user_search(uname TEXT) FROM public; + GRANT EXECUTE ON FUNCTION user_search(uname TEXT) TO {{ pgbouncer_auth_username }}" + when: exists_func_user.stdout == "f" + when: + - pgbouncer_auth_user | bool + - patroni_standby_cluster.host | default('') | length < 1 # do not perform on the Standby Cluster leader + - inventory_hostname == groups['master'][0] or (groups['primary'] is defined and inventory_hostname in groups['primary']) + tags: pgbouncer, pgbouncer_conf, pgbouncer_auth_query diff --git a/automation/roles/pgbouncer/handlers/main.yml b/automation/roles/pgbouncer/handlers/main.yml new file mode 100644 index 000000000..a3d6c48d0 --- /dev/null +++ b/automation/roles/pgbouncer/handlers/main.yml @@ -0,0 +1,36 @@ +--- +- name: Restart pgbouncer service + ansible.builtin.systemd: + name: pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }} + enabled: true + state: restarted + loop: "{{ range(0, (pgbouncer_processes | default(1) | int)) | list }}" + loop_control: + index_var: idx + label: "{{ 'pgbouncer' if idx == 0 else 'pgbouncer-%d' % (idx + 1) }}" + register: pgbouncer_restart_result + retries: 3 + delay: 5 + until: pgbouncer_restart_result is success + listen: "restart pgbouncer" + +- name: Wait for port "{{ pgbouncer_listen_port }}" to become open on the host + ansible.builtin.wait_for: + port: "{{ pgbouncer_listen_port }}" + host: "{{ inventory_hostname }}" + state: started + timeout: 120 + delay: 5 + ignore_errors: false + listen: "restart pgbouncer" + +- name: Reload pgbouncer service + ansible.builtin.systemd: + name: pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }} + state: reloaded + loop: "{{ range(0, (pgbouncer_processes | default(1) | int)) | list }}" + loop_control: + index_var: idx + label: "{{ 'pgbouncer' if idx == 0 else 'pgbouncer-%d' % (idx + 1) }}" + listen: "reload pgbouncer" + ignore_errors: true # Added to prevent test failures in CI. diff --git a/automation/roles/pgbouncer/tasks/main.yml b/automation/roles/pgbouncer/tasks/main.yml new file mode 100644 index 000000000..78101da6d --- /dev/null +++ b/automation/roles/pgbouncer/tasks/main.yml @@ -0,0 +1,235 @@ +--- +# Debian +- name: Install pgbouncer package + ansible.builtin.package: + name: pgbouncer + state: latest + register: package_status + until: package_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: ansible_os_family == "Debian" + tags: pgbouncer_install, pgbouncer + +# RedHat +- name: Install pgbouncer package + ansible.builtin.dnf: + name: pgbouncer + state: latest + disablerepo: AppStream + register: dnf_status + until: dnf_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: ansible_os_family == "RedHat" + tags: pgbouncer_install, pgbouncer + +- name: Ensure config directory "{{ pgbouncer_conf_dir }}" exist + ansible.builtin.file: + path: "{{ pgbouncer_conf_dir }}" + state: directory + owner: postgres + group: postgres + mode: "0750" + tags: pgbouncer_conf, pgbouncer + +- name: Ensure log directory "{{ pgbouncer_log_dir }}" exist + ansible.builtin.file: + path: "{{ pgbouncer_log_dir }}" + state: directory + owner: postgres + group: postgres + mode: "0750" + tags: pgbouncer_conf, pgbouncer + +- name: Check if pgbouncer systemd service file exists + ansible.builtin.stat: + path: /etc/systemd/system/pgbouncer.service + register: pgbouncer_systemd_service + tags: pgbouncer_service, pgbouncer + +- name: Stop and disable standard init script + ansible.builtin.service: + name: pgbouncer + state: stopped + enabled: false + when: + - ansible_os_family == "Debian" + - not pgbouncer_systemd_service.stat.exists + tags: pgbouncer_service, pgbouncer + +- name: Configure pgbouncer systemd service file + ansible.builtin.template: + src: templates/pgbouncer.service.j2 + dest: "/etc/systemd/system/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }}.service" + owner: postgres + group: postgres + mode: "0644" + loop: "{{ range(0, (pgbouncer_processes | default(1) | int)) | list }}" + loop_control: + index_var: idx + label: "{{ 'pgbouncer' if idx == 0 else 'pgbouncer-%d' % (idx + 1) }}" + notify: "restart pgbouncer" + tags: pgbouncer_service, pgbouncer + +- name: Ensure pgbouncer service is enabled + ansible.builtin.systemd: + daemon_reload: true + name: "pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }}" + enabled: true + loop: "{{ range(0, (pgbouncer_processes | default(1) | int)) | list }}" + loop_control: + index_var: idx + label: "{{ 'pgbouncer' if idx == 0 else 'pgbouncer-%d' % (idx + 1) }}" + tags: pgbouncer_service, pgbouncer + +- block: + # workaround for pgbouncer from postgrespro repo + - name: Check that /usr/bin/pgbouncer is exists + ansible.builtin.stat: + path: /usr/bin/pgbouncer + register: pgbouncer_bin + + - name: create a symlink to /usr/sbin/pgbouncer + ansible.builtin.file: + src: /usr/sbin/pgbouncer + dest: /usr/bin/pgbouncer + owner: root + group: root + state: link + when: not pgbouncer_bin.stat.exists + when: + - ansible_os_family == "RedHat" + - postgresql_packages|join(" ") is search("postgrespro") + tags: pgbouncer_service, pgbouncer + +- name: Enable log rotation with logrotate + ansible.builtin.copy: + content: | + {{ pgbouncer_log_dir }}/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }}.log { + daily + rotate 7 + copytruncate + delaycompress + compress + notifempty + missingok + su root root + } + dest: "/etc/logrotate.d/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }}" + loop: "{{ range(0, (pgbouncer_processes | default(1) | int)) | list }}" + loop_control: + index_var: idx + label: "{{ 'pgbouncer' if idx == 0 else 'pgbouncer-%d' % (idx + 1) }}" + tags: pgbouncer_logrotate, pgbouncer + +- name: Configure pgbouncer.ini + ansible.builtin.template: + src: templates/pgbouncer.ini.j2 + dest: "{{ pgbouncer_conf_dir }}/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }}.ini" + owner: postgres + group: postgres + mode: "0640" + loop: "{{ range(0, (pgbouncer_processes | default(1) | int)) | list }}" + loop_control: + index_var: idx + label: "{{ 'pgbouncer' if idx == 0 else 'pgbouncer-%d' % (idx + 1) }}" + notify: "reload pgbouncer" + when: existing_pgcluster is not defined or not existing_pgcluster|bool + tags: pgbouncer_conf, pgbouncer + +- name: Create userlist.txt + ansible.builtin.template: + src: templates/userlist.txt.j2 + dest: "{{ pgbouncer_conf_dir }}/userlist.txt" + owner: postgres + group: postgres + mode: "0640" + when: + - (existing_pgcluster is not defined or not existing_pgcluster|bool) + - not pgbouncer_auth_user|bool + tags: pgbouncer + +- block: + # for add_pgnode.yml + - name: Fetch pgbouncer.ini file from master + run_once: true + ansible.builtin.fetch: + src: "{{ pgbouncer_conf_dir }}/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }}.ini" + dest: files/ + validate_checksum: true + flat: true + loop: "{{ range(0, (pgbouncer_processes | default(1) | int)) | list }}" + loop_control: + index_var: idx + label: "{{ 'pgbouncer' if idx == 0 else 'pgbouncer-%d' % (idx + 1) }}" + delegate_to: "{{ groups.master[0] }}" + + - name: Fetch userlist.txt conf file from master + run_once: true + ansible.builtin.fetch: + src: "{{ pgbouncer_conf_dir }}/userlist.txt" + dest: files/ + validate_checksum: true + flat: true + delegate_to: "{{ groups.master[0] }}" + when: not pgbouncer_auth_user|bool + + - name: Copy pgbouncer.ini file to replica + ansible.builtin.copy: + src: "files/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }}.ini" + dest: "{{ pgbouncer_conf_dir }}" + owner: postgres + group: postgres + mode: "0640" + loop: "{{ range(0, (pgbouncer_processes | default(1) | int)) | list }}" + loop_control: + index_var: idx + label: "{{ 'pgbouncer' if idx == 0 else 'pgbouncer-%d' % (idx + 1) }}" + + - name: Copy userlist.txt conf file to replica + ansible.builtin.copy: + src: files/userlist.txt + dest: "{{ pgbouncer_conf_dir }}" + owner: postgres + group: postgres + mode: "0640" + when: not pgbouncer_auth_user|bool + + - name: Remove pgbouncer.ini file from localhost + become: false + run_once: true + ansible.builtin.file: + path: "files/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }}.ini" + state: absent + loop: "{{ range(0, (pgbouncer_processes | default(1) | int)) | list }}" + loop_control: + index_var: idx + label: "{{ 'pgbouncer' if idx == 0 else 'pgbouncer-%d' % (idx + 1) }}" + delegate_to: localhost + + - name: Remove userlist.txt conf file from localhost + become: false + run_once: true + ansible.builtin.file: + path: files/userlist.txt + state: absent + delegate_to: localhost + when: not pgbouncer_auth_user|bool + + - name: Prepare pgbouncer.ini conf file (replace "listen_addr") + ansible.builtin.lineinfile: + path: "{{ pgbouncer_conf_dir }}/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }}.ini" + regexp: "^listen_addr =" + line: "listen_addr = {{ pgbouncer_listen_addr }}" + backrefs: true + loop: "{{ range(0, (pgbouncer_processes | default(1) | int)) | list }}" + loop_control: + index_var: idx + label: "{{ 'pgbouncer' if idx == 0 else 'pgbouncer-%d' % (idx + 1) }}" + notify: "reload pgbouncer" + when: pgbouncer_listen_addr != "0.0.0.0" + when: existing_pgcluster is defined and existing_pgcluster|bool + tags: pgbouncer_conf, pgbouncer diff --git a/automation/roles/pgbouncer/templates/pgbouncer.ini.j2 b/automation/roles/pgbouncer/templates/pgbouncer.ini.j2 new file mode 100644 index 000000000..95dd5b267 --- /dev/null +++ b/automation/roles/pgbouncer/templates/pgbouncer.ini.j2 @@ -0,0 +1,59 @@ +[databases] +{% for pool in pgbouncer_pools %} +{{ pool.name }} = host={{ postgresql_unix_socket_dir }} port={{ postgresql_port }} dbname={{ pool.dbname }} {{ pool.pool_parameters }} +{% endfor %} + +* = host={{ postgresql_unix_socket_dir }} port={{ postgresql_port }} + +[pgbouncer] +logfile = {{ pgbouncer_log_dir }}/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }}.log +pidfile = /run/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }}/pgbouncer.pid +listen_addr = {{ pgbouncer_listen_addr | default('0.0.0.0') }} +listen_port = {{ pgbouncer_listen_port | default(6432) }} +unix_socket_dir = /var/run/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }} +auth_type = {{ pgbouncer_auth_type }} +{% if pgbouncer_auth_user | bool %} +auth_user = {{ pgbouncer_auth_username }} +auth_dbname = {{ pgbouncer_auth_dbname }} +auth_query = SELECT usename, passwd FROM user_search($1) +{% else %} +auth_file = {{ pgbouncer_conf_dir }}/userlist.txt +{% endif %} +admin_users = {{ pgbouncer_admin_users }} +stats_users = {{ pgbouncer_stats_users }} +ignore_startup_parameters = {{ pgbouncer_ignore_startup_parameters }} + +pool_mode = {{ pgbouncer_default_pool_mode }} +server_reset_query = DISCARD ALL +max_client_conn = {{ pgbouncer_max_client_conn }} +default_pool_size = {{ pgbouncer_default_pool_size }} +query_wait_timeout = {{ pgbouncer_query_wait_timeout }} +reserve_pool_size = 1 +reserve_pool_timeout = 1 +max_db_connections = {{ pgbouncer_max_db_connections }} +pkt_buf = 8192 +listen_backlog = 4096 +max_prepared_statements = {{ pgbouncer_max_prepared_statements }} +so_reuseport = 1 +{% if tls_cert_generate | default(false) | bool %} +{% if pgbouncer_client_tls_sslmode | default('require') != 'disable' %} +client_tls_sslmode = {{ pgbouncer_client_tls_sslmode | default('require') }} +client_tls_key_file = {{ pgbouncer_tls_dir | default('/etc/tls') }}/{{ pgbouncer_client_tls_key_file | default(tls_privatekey | default('server.key')) }} +client_tls_cert_file = {{ pgbouncer_tls_dir | default('/etc/tls') }}/{{ pgbouncer_client_tls_cert_file | default(tls_cert | default('server.crt')) }} +client_tls_ca_file = {{ pgbouncer_tls_dir | default('/etc/tls') }}/{{ pgbouncer_client_tls_ca_file | default(tls_ca_cert | default('ca.crt')) }} +client_tls_protocols = {{ pgbouncer_client_tls_protocols | default('secure') }} +client_tls_ciphers = {{ pgbouncer_client_tls_ciphers | default('secure') }} +{% endif %} +{% if pgbouncer_server_tls_sslmode | default('require') != 'disable' %} +server_tls_sslmode = {{ pgbouncer_server_tls_sslmode | default('require') }} +server_tls_key_file = {{ pgbouncer_tls_dir | default('/etc/tls') }}/{{ pgbouncer_server_tls_key_file | default(tls_privatekey | default('server.key')) }} +server_tls_cert_file = {{ pgbouncer_tls_dir | default('/etc/tls') }}/{{ pgbouncer_server_tls_cert_file | default(tls_cert | default('server.crt')) }} +server_tls_ca_file = {{ pgbouncer_tls_dir | default('/etc/tls') }}/{{ pgbouncer_server_tls_ca_file | default(tls_ca_cert | default('ca.crt')) }} +server_tls_protocols = {{ pgbouncer_server_tls_protocols | default('secure') }} +server_tls_ciphers = {{ pgbouncer_server_tls_ciphers | default('secure') }} +{% endif %} +{% endif %} +log_connections = 0 +log_disconnections = 0 + +# Documentation https://pgbouncer.github.io/config.html diff --git a/roles/pgbouncer/templates/pgbouncer.service.j2 b/automation/roles/pgbouncer/templates/pgbouncer.service.j2 similarity index 64% rename from roles/pgbouncer/templates/pgbouncer.service.j2 rename to automation/roles/pgbouncer/templates/pgbouncer.service.j2 index 59b8866ac..c69eb1ee5 100644 --- a/roles/pgbouncer/templates/pgbouncer.service.j2 +++ b/automation/roles/pgbouncer/templates/pgbouncer.service.j2 @@ -8,17 +8,17 @@ Type=forking User=postgres Group=postgres -PermissionsStartOnly=true -ExecStartPre=-/bin/mkdir -p /var/run/pgbouncer {{ pgbouncer_log_dir }} -ExecStartPre=/bin/chown -R postgres:postgres /var/run/pgbouncer {{ pgbouncer_log_dir }} +RuntimeDirectory=pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }} +RuntimeDirectoryMode=0755 + {% if ansible_os_family == "Debian" %} -ExecStart=/usr/sbin/pgbouncer -d {{ pgbouncer_conf_dir }}/pgbouncer.ini +ExecStart=/usr/sbin/pgbouncer -d {{ pgbouncer_conf_dir }}/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }}.ini {% endif %} {% if ansible_os_family == "RedHat" %} -ExecStart=/usr/bin/pgbouncer -d {{ pgbouncer_conf_dir }}/pgbouncer.ini +ExecStart=/usr/bin/pgbouncer -d {{ pgbouncer_conf_dir }}/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }}.ini {% endif %} ExecReload=/bin/kill -SIGHUP $MAINPID -PIDFile=/var/run/pgbouncer/pgbouncer.pid +PIDFile=/run/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }}/pgbouncer.pid Restart=on-failure LimitNOFILE=100000 diff --git a/roles/pgbouncer/templates/userlist.txt.j2 b/automation/roles/pgbouncer/templates/userlist.txt.j2 similarity index 100% rename from roles/pgbouncer/templates/userlist.txt.j2 rename to automation/roles/pgbouncer/templates/userlist.txt.j2 diff --git a/automation/roles/pgpass/README.md b/automation/roles/pgpass/README.md new file mode 100644 index 000000000..f6a07ac35 --- /dev/null +++ b/automation/roles/pgpass/README.md @@ -0,0 +1 @@ +# Ansible Role: pgpass diff --git a/automation/roles/pgpass/tasks/main.yml b/automation/roles/pgpass/tasks/main.yml new file mode 100644 index 000000000..ee47fb30e --- /dev/null +++ b/automation/roles/pgpass/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- name: "Configure a password file ({{ postgresql_home_dir }}/.pgpass)" + become: true + become_user: root + ansible.builtin.copy: + content: | + {% for pgpass in postgresql_pgpass %} + {{ pgpass }} + {% endfor %} + dest: "{{ postgresql_home_dir }}/.pgpass" + owner: postgres + group: postgres + mode: "0600" + no_log: true + when: + - postgresql_pgpass is defined + - postgresql_pgpass | length > 0 + tags: pgpass diff --git a/automation/roles/postgresql_databases/README.md b/automation/roles/postgresql_databases/README.md new file mode 100644 index 000000000..8a18d895c --- /dev/null +++ b/automation/roles/postgresql_databases/README.md @@ -0,0 +1 @@ +# Ansible Role: postgresql_databases diff --git a/roles/postgresql-databases/tasks/main.yml b/automation/roles/postgresql_databases/tasks/main.yml similarity index 58% rename from roles/postgresql-databases/tasks/main.yml rename to automation/roles/postgresql_databases/tasks/main.yml index fe9b11947..528a772db 100644 --- a/roles/postgresql-databases/tasks/main.yml +++ b/automation/roles/postgresql_databases/tasks/main.yml @@ -1,21 +1,21 @@ --- - - name: Make sure the PostgreSQL databases are present become: true become_user: postgres - postgresql_db: + community.postgresql.postgresql_db: name: "{{ item.db }}" owner: "{{ item.owner }}" encoding: "{{ item.encoding }}" lc_collate: "{{ item.lc_collate }}" lc_ctype: "{{ item.lc_ctype }}" - login_unix_socket: "{{ postgresql_unix_socket_dir }}" - port: "{{ postgresql_port }}" - template: "template0" + template: "{{ item.template | default('template0') }}" + login_host: "127.0.0.1" + login_port: "{{ postgresql_port }}" + login_user: "{{ patroni_superuser_username }}" + login_password: "{{ patroni_superuser_password }}" + conn_limit: "{{ item.conn_limit | default(omit) }}" state: present ignore_errors: true loop: "{{ postgresql_databases | flatten(1) }}" when: postgresql_databases is defined and postgresql_databases | length > 0 tags: postgresql_databases - -... diff --git a/automation/roles/postgresql_extensions/README.md b/automation/roles/postgresql_extensions/README.md new file mode 100644 index 000000000..7a41a8384 --- /dev/null +++ b/automation/roles/postgresql_extensions/README.md @@ -0,0 +1 @@ +# Ansible Role: postgresql_extensions diff --git a/automation/roles/postgresql_extensions/tasks/main.yml b/automation/roles/postgresql_extensions/tasks/main.yml new file mode 100644 index 000000000..35f051ac4 --- /dev/null +++ b/automation/roles/postgresql_extensions/tasks/main.yml @@ -0,0 +1,19 @@ +--- +- name: Add extensions to the databases + become: true + become_user: postgres + community.postgresql.postgresql_ext: + name: "{{ item.ext }}" + schema: "{{ item.schema | default('public') }}" + login_db: "{{ item.db }}" + login_host: "127.0.0.1" + login_port: "{{ postgresql_port }}" + login_user: "{{ patroni_superuser_username }}" + login_password: "{{ patroni_superuser_password }}" + state: present + ignore_errors: true + loop: "{{ postgresql_extensions | flatten(1) }}" + when: + - postgresql_extensions is defined + - postgresql_extensions | length > 0 + tags: postgresql_extensions diff --git a/automation/roles/postgresql_privs/README.md b/automation/roles/postgresql_privs/README.md new file mode 100644 index 000000000..64a6954a6 --- /dev/null +++ b/automation/roles/postgresql_privs/README.md @@ -0,0 +1 @@ +# Ansible Role: postgresql_privs diff --git a/automation/roles/postgresql_privs/tasks/main.yml b/automation/roles/postgresql_privs/tasks/main.yml new file mode 100644 index 000000000..37008a3b7 --- /dev/null +++ b/automation/roles/postgresql_privs/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: Grant/revoke privileges on objects + community.postgresql.postgresql_privs: + roles: "{{ item.role }}" + privs: "{{ item.privs }}" + type: "{{ item.type }}" + objs: "{{ item.objs }}" + schema: "{{ item.schema | default(omit) }}" + login_db: "{{ item.db }}" + login_host: "127.0.0.1" + login_port: "{{ postgresql_port }}" + login_user: "{{ patroni_superuser_username }}" + login_password: "{{ patroni_superuser_password }}" + state: "{{ item.state | default('present') }}" + ignore_errors: true # noqa ignore-errors + loop: "{{ postgresql_privs | flatten(1) }}" + when: + - postgresql_privs | default('') | length > 0 + - item.role | default('') | length > 0 + - item.db | default('') | length > 0 + - patroni_standby_cluster.host | default('') | length < 1 # do not perform on the Standby Cluster leader + tags: postgresql_privs diff --git a/automation/roles/postgresql_schemas/README.md b/automation/roles/postgresql_schemas/README.md new file mode 100644 index 000000000..bc9d01442 --- /dev/null +++ b/automation/roles/postgresql_schemas/README.md @@ -0,0 +1 @@ +# Ansible Role: postgresql_schemas diff --git a/automation/roles/postgresql_schemas/tasks/main.yml b/automation/roles/postgresql_schemas/tasks/main.yml new file mode 100644 index 000000000..46e3dfa6f --- /dev/null +++ b/automation/roles/postgresql_schemas/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- name: Make sure the PostgreSQL schemas are present + become: true + become_user: postgres + community.postgresql.postgresql_schema: + name: "{{ item.schema }}" + database: "{{ item.db }}" + owner: "{{ item.owner }}" + login_unix_socket: "{{ postgresql_unix_socket_dir }}" + port: "{{ postgresql_port }}" + state: present + ignore_errors: true + loop: "{{ postgresql_schemas | flatten(1) }}" + when: postgresql_schemas is defined and postgresql_schemas | length > 0 + tags: postgresql_schemas diff --git a/automation/roles/postgresql_users/README.md b/automation/roles/postgresql_users/README.md new file mode 100644 index 000000000..ecf59b091 --- /dev/null +++ b/automation/roles/postgresql_users/README.md @@ -0,0 +1 @@ +# Ansible Role: postgresql_users diff --git a/automation/roles/postgresql_users/tasks/main.yml b/automation/roles/postgresql_users/tasks/main.yml new file mode 100644 index 000000000..e69bde212 --- /dev/null +++ b/automation/roles/postgresql_users/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- name: Make sure the PostgreSQL users are present + become: true + become_user: postgres + community.postgresql.postgresql_user: + name: "{{ item.name }}" + password: "{{ item.password }}" + encrypted: true + role_attr_flags: "{{ item.flags }}" + login_host: "127.0.0.1" + login_port: "{{ postgresql_port }}" + login_user: "{{ patroni_superuser_username }}" + login_password: "{{ patroni_superuser_password }}" + login_db: "postgres" + state: present + ignore_errors: true + loop: "{{ postgresql_users | flatten(1) }}" + loop_control: + label: "{{ item.name }}" + when: + - postgresql_users | default('') | length > 0 + - item.password | default('') | length > 0 # Ensure the password is not empty + - patroni_standby_cluster.host | default('') | length < 1 # do not perform on the Standby Cluster leader + tags: postgresql_users + +- name: Grant roles to users + community.postgresql.postgresql_membership: + group: "{{ item.role | default('') }}" + target_role: "{{ item.name }}" + login_host: "127.0.0.1" + login_port: "{{ postgresql_port }}" + login_user: "{{ patroni_superuser_username }}" + login_password: "{{ patroni_superuser_password }}" + state: present + ignore_errors: true + loop: "{{ postgresql_users | flatten(1) }}" + loop_control: + label: "{{ item.name }}" + when: + - postgresql_users | default('') | length > 0 + - item.role | default('') | length > 0 + - patroni_standby_cluster.host | default('') | length < 1 # do not perform on the Standby Cluster leader + tags: postgresql_users diff --git a/automation/roles/pre_checks/README.md b/automation/roles/pre_checks/README.md new file mode 100644 index 000000000..e91a99052 --- /dev/null +++ b/automation/roles/pre_checks/README.md @@ -0,0 +1 @@ +# Ansible Role: pre_checks diff --git a/automation/roles/pre_checks/tasks/extensions.yml b/automation/roles/pre_checks/tasks/extensions.yml new file mode 100644 index 000000000..64657988f --- /dev/null +++ b/automation/roles/pre_checks/tasks/extensions.yml @@ -0,0 +1,103 @@ +# yamllint disable rule:line-length +--- +# Extension Auto-Setup: pre-checks and shared_preload_libraries + +# pre-checks +- name: TimescaleDB | Checking PostgreSQL version + ansible.builtin.fail: + msg: + - "The current PostgreSQL version ({{ postgresql_version }}) is not supported by the TimescaleDB." + - "PostgreSQL version must be {{ timescale_minimal_pg_version | default(12) }} or higher." + when: + - (enable_timescale | default(false) | bool) or (enable_timescaledb | default(false) | bool) + - postgresql_version | string is version(timescale_minimal_pg_version | default(12) | string, '<') + +- name: Timescale (pgvectorscale) | Checking PostgreSQL version + ansible.builtin.fail: + msg: + - "The current PostgreSQL version ({{ postgresql_version }}) is not supported by the pgvectorscale." + - "PostgreSQL version must be {{ pgvectorscale_minimal_pg_version | default(13) }} or higher." + when: + - enable_pgvectorscale | default(false) | bool + - postgresql_version | string is version(pgvectorscale_minimal_pg_version | default(13) | string, '<') + +- name: Timescale (pgvectorscale) | Checking supported operating system and version + ansible.builtin.fail: + msg: + - "pgvectorscale is not supported on {{ ansible_distribution }} {{ ansible_distribution_release }}." + - "Supported OS: Debian (bookworm), Ubuntu (jammy, noble)." + when: + - enable_pgvectorscale | default(false) | bool + - not (ansible_os_family == "Debian" and ansible_distribution_release in ['bookworm', 'jammy', 'noble']) + +- name: ParadeDB | Checking PostgreSQL version + ansible.builtin.fail: + msg: + - "The current PostgreSQL version ({{ postgresql_version }}) is not supported by the ParadeDB (pg_search, pg_analytics)." + - "PostgreSQL version must be {{ paradedb_minimal_pg_version | default(14) }} or higher." + when: + - (enable_paradedb | default(false) | bool) or (enable_pg_search | default(false) | bool) or (enable_pg_analytics | default(false) | bool) + - postgresql_version | string is version(paradedb_minimal_pg_version | default(14) | string, '<') + +- name: ParadeDB | Checking supported operating system and version + ansible.builtin.fail: + msg: + - "ParadeDB (pg_search, pg_analytics) is not supported on {{ ansible_distribution }} {{ ansible_distribution_release }}." + - "Supported OS: Debian (bookworm), Ubuntu (jammy, noble) or RedHat (8, 9)." + when: + - (enable_paradedb | default(false) | bool) or (enable_pg_search | default(false) | bool) or (enable_pg_analytics | default(false) | bool) + - not ( + (ansible_os_family == "Debian" and ansible_distribution_release in ['bookworm', 'jammy', 'noble']) or + (ansible_os_family == "RedHat" and ansible_distribution_major_version in ['8', '9']) + ) + +# shared_preload_libraries +- name: Create a list of extensions + ansible.builtin.set_fact: + extensions: >- + {{ + (extensions | default([])) + + (['timescaledb'] if ((enable_timescale | default(false) | bool) or (enable_timescaledb | default(false) | bool)) else []) + + (['citus'] if (enable_citus | default(false) | bool and postgresql_version | int >= 11 and ansible_architecture in ["x86_64", "amd64"]) else []) + + (['pg_cron'] if (enable_pg_cron | default(false) | bool) else []) + + (['pgaudit'] if (enable_pgaudit | default(false) | bool) else []) + + (['pg_stat_statements'] if (enable_pg_stat_kcache | default(false) | bool) else []) + + (['pg_stat_kcache'] if (enable_pg_stat_kcache | default(false) | bool) else []) + + (['pg_wait_sampling'] if (enable_pg_wait_sampling | default(false) | bool) else []) + + (['pg_partman_bgw'] if (enable_pg_partman | default(false) | bool) else []) + + (['pg_search'] if ((enable_paradedb | default(false) | bool) or (enable_pg_search | default(false) | bool)) else []) + + (['pg_analytics'] if ((enable_paradedb | default(false) | bool) or (enable_pg_analytics | default(false) | bool)) else []) + }} + +- name: Add required extensions to 'shared_preload_libraries' (if missing) + ansible.builtin.set_fact: + # This complex line does several things: + # 1. It takes the current list of PostgreSQL parameters, + # 2. Removes any item where the option is 'shared_preload_libraries', + # 3. Then appends a new 'shared_preload_libraries' item at the end. + # The new value of this item is based on whether extension is already present in the old value. + # If it is not present, it appends ',' to the old value. Otherwise, it leaves the value unchanged. + postgresql_parameters: >- + {{ postgresql_parameters | rejectattr('option', 'equalto', 'shared_preload_libraries') | list + + [{'option': 'shared_preload_libraries', 'value': new_value}] }} + vars: + # Find the last item in postgresql_parameters where the option is 'shared_preload_libraries' + shared_preload_libraries_item: >- + {{ + postgresql_parameters + | selectattr('option', 'equalto', 'shared_preload_libraries') + | list | last | default({'value': ''}) + }} + # Ensure that all required extensions are added to the 'shared_preload_libraries' parameter. + # 1. If the 'citus' extension is not yet added to 'shared_preload_libraries', it's added to the beginning of the list. + # This is necessary as 'citus' needs to be first in the list as per Citus documentation. + # 2. For all other extensions: if they are not yet added, they are appended to the end of the list. + new_value: >- + {{ + (item ~ ',' ~ shared_preload_libraries_item.value if item == 'citus' and item not in shared_preload_libraries_item.value.split(',') else + (shared_preload_libraries_item.value ~ (',' if shared_preload_libraries_item.value else '') + if item not in shared_preload_libraries_item.value.split(',') else shared_preload_libraries_item.value)) + ~ (item if item not in shared_preload_libraries_item.value.split(',') and item != 'citus' else '') + }} + loop: "{{ extensions | default([]) | unique }}" + when: extensions | default([]) | length > 0 diff --git a/automation/roles/pre_checks/tasks/huge_pages.yml b/automation/roles/pre_checks/tasks/huge_pages.yml new file mode 100644 index 000000000..9f30e5164 --- /dev/null +++ b/automation/roles/pre_checks/tasks/huge_pages.yml @@ -0,0 +1,140 @@ +--- +# Automatically configure "vm.nr_hugepages" for shared_buffers of 8GB or more, +# if 'sysctl_set' is 'true', "vm.nr_hugepages" is undefined or insufficient in sysctl_conf, +# and "huge_pages" is not 'off' in postgresql_parameters. + +- block: + - name: "HugePages | Get shared_buffers value from postgresql_parameters variable" + ansible.builtin.set_fact: + shared_buffers: "{{ postgresql_parameters_shared_buffers }}" + shared_buffers_value: "{{ postgresql_parameters_shared_buffers | regex_search('[0-9]+') | int }}" + shared_buffers_unit: "{{ postgresql_parameters_shared_buffers | regex_search('[A-Za-z]+') | lower }}" + vars: + postgresql_parameters_shared_buffers: >- + {{ + (postgresql_parameters + | selectattr('option', 'equalto', 'shared_buffers') + | map(attribute='value') + | first | default('128MB')) + }} + + - name: "HugePages | Set variable: shared_buffers_gb" + ansible.builtin.set_fact: + shared_buffers_gb: "{{ (shared_buffers_value | int) // 1024 }}" + when: shared_buffers_unit == 'mb' + + - name: "HugePages | Set variable: shared_buffers_gb" + ansible.builtin.set_fact: + shared_buffers_gb: "{{ shared_buffers_value }}" + when: shared_buffers_unit == 'gb' + + - name: "HugePages | No configuration is required" + ansible.builtin.debug: + msg: >- + Current shared_buffers size: {{ shared_buffers }} (less than {{ min_shared_buffers_gb | default(8) }}GB). + No HugePages configuration is required. + when: + - inventory_hostname == groups['master'][0] # display only once + - (shared_buffers_gb | default(0) | int) < (min_shared_buffers_gb | default(8) | int) + + - name: "HugePages | Get Hugepagesize value from /proc/meminfo" + ansible.builtin.command: "awk '/Hugepagesize/ {print $2}' /proc/meminfo" + changed_when: false + check_mode: false + register: huge_page_size + when: + - shared_buffers_gb | default(0) | int >= (min_shared_buffers_gb | default(8)) + + - name: "HugePages | Get HugePages_Total value from /proc/meminfo" + ansible.builtin.command: "awk '/HugePages_Total/ {print $2}' /proc/meminfo" + changed_when: false + check_mode: false + register: huge_pages_total + when: + - shared_buffers_gb | default(0) | int >= (min_shared_buffers_gb | default(8)) + + - name: "HugePages | Calculate required HugePages" + ansible.builtin.set_fact: + huge_pages_required: >- + {{ + ( + (shared_buffers_gb | default(0) | int + + additional_huge_pages_gb | default(1)) + * 1024 * 1024 + ) + // (huge_page_size.stdout | int if huge_page_size.stdout | int > 0 else 2048) + }} + when: + - shared_buffers_gb | default(0) | int >= (min_shared_buffers_gb | default(8)) + + - name: "HugePages | Check if vm.nr_hugepages is already set sufficiently in sysctl_conf variable" + ansible.builtin.set_fact: + sysctl_conf_vm_nr_hugepages: >- + {{ + (sysctl_conf.postgres_cluster + | selectattr('name', 'equalto', 'vm.nr_hugepages') + | map(attribute='value') + | first | default('0') | int) + }} + sysctl_conf_vm_nr_hugepages_sufficient: >- + {{ + (sysctl_conf.postgres_cluster + | selectattr('name', 'equalto', 'vm.nr_hugepages') + | map(attribute='value') + | first | default('0') | int) >= huge_pages_required | int + }} + when: + - shared_buffers_gb | default(0) | int >= (min_shared_buffers_gb | default(8)) + - sysctl_set + + - name: "HugePages | info" + ansible.builtin.debug: + msg: + shared_buffers_gb: "{{ shared_buffers_gb }}" + huge_page_size_kb: "{{ huge_page_size.stdout | default(2048) | int }}" + huge_pages_total: "{{ huge_pages_total.stdout | default(0) | int }}" + huge_pages_required: "{{ huge_pages_required }}" + huge_pages_sufficient: "{{ sysctl_conf_vm_nr_hugepages_sufficient | default(omit) }}" + when: + - shared_buffers_gb | default(0) | int >= (min_shared_buffers_gb | default(8)) + + # if huge_pages_auto_conf is true + - name: "HugePages | Specify vm.nr_hugepages with value {{ huge_pages_required }} in sysctl_conf variable" + ansible.builtin.set_fact: + sysctl_conf: + postgres_cluster: >- + {{ + (sysctl_conf.postgres_cluster + | rejectattr('name', 'equalto', 'vm.nr_hugepages') + | list) + + [ { 'name': 'vm.nr_hugepages', 'value': huge_pages_required } ] + }} + when: + - shared_buffers_gb | default(0) | int >= (min_shared_buffers_gb | default(8)) + - huge_pages_total.stdout | default(0) | int < huge_pages_required | int + - not sysctl_conf_vm_nr_hugepages_sufficient | default(false) + - huge_pages_auto_conf | bool + - sysctl_set | bool + + # Stop, if the current vm.nr_hugepages value is insufficient in sysctl_conf variable and huge_pages_auto_conf is false + - name: "HugePages | The current HugePages setting is insufficient" + ansible.builtin.fail: + msg: >- + Insufficient HugePages. Current: {{ huge_pages_current }}. Required: {{ huge_pages_required }}. + Please adjust the vm.nr_hugepages kernel parameter. + vars: + huge_pages_current: >- + {{ + (sysctl_conf_vm_nr_hugepages | default(0) + if sysctl_conf_vm_nr_hugepages | default(0) | int > huge_pages_total.stdout | default(0) | int + else huge_pages_total.stdout | default(0) | int) + }} + when: + - shared_buffers_gb | default(0) | int >= (min_shared_buffers_gb | default(8)) + - huge_pages_total.stdout | default(0) | int < huge_pages_required | int + - not sysctl_conf_vm_nr_hugepages_sufficient | default(false) + - (not huge_pages_auto_conf | bool or (not sysctl_set | bool and + (postgresql_parameters | selectattr('option', 'equalto', 'huge_pages') | map(attribute='value') | first | default('try')) == 'on')) + when: + - (postgresql_parameters | selectattr('option', 'equalto', 'huge_pages') | map(attribute='value') | first | default('try')) != 'off' + - (sysctl_set | bool or (postgresql_parameters | selectattr('option', 'equalto', 'huge_pages') | map(attribute='value') | first | default('try')) == 'on') diff --git a/automation/roles/pre_checks/tasks/main.yml b/automation/roles/pre_checks/tasks/main.yml new file mode 100644 index 000000000..8a6cda3c8 --- /dev/null +++ b/automation/roles/pre_checks/tasks/main.yml @@ -0,0 +1,56 @@ +--- +- name: Checking ansible version + ansible.builtin.fail: + msg: "Ansible version must be {{ minimal_ansible_version }} or higher" + delegate_to: localhost + when: + - ansible_version.full is version(minimal_ansible_version, '<') + +- name: Checking Linux distribution + ansible.builtin.fail: + msg: "{{ ansible_distribution }} is not supported" + when: ansible_distribution not in os_valid_distributions + +- name: Checking version of OS Linux + ansible.builtin.fail: + msg: "{{ ansible_distribution_version }} of {{ ansible_distribution }} is not supported" + when: ansible_distribution_version is version_compare(os_minimum_versions[ansible_distribution], '<') + +- name: Perform pre-checks for pgbouncer + ansible.builtin.import_tasks: pgbouncer.yml + when: + - pgbouncer_install is defined + - pgbouncer_install | bool + - inventory_hostname in groups['postgres_cluster'] + +- name: Perform pre-checks for patroni + ansible.builtin.import_tasks: patroni.yml + when: + - inventory_hostname in groups['postgres_cluster'] + +- name: Perform pre-checks for huge_pages + ansible.builtin.import_tasks: huge_pages.yml + when: + - inventory_hostname in groups['postgres_cluster'] + +- name: Perform pre-checks for pgbackrest + ansible.builtin.import_tasks: pgbackrest.yml + when: + - pgbackrest_install is defined + - pgbackrest_install | bool + - inventory_hostname in groups['postgres_cluster'] + +- name: Perform pre-checks for WAL-G + ansible.builtin.import_tasks: wal_g.yml + when: + - wal_g_install is defined + - wal_g_install | bool + - inventory_hostname in groups['postgres_cluster'] + +- name: Generate passwords + ansible.builtin.import_tasks: passwords.yml + when: inventory_hostname in groups['postgres_cluster'] + +- name: Perform pre-checks for extensions + ansible.builtin.import_tasks: extensions.yml + when: inventory_hostname == groups['master'][0] diff --git a/automation/roles/pre_checks/tasks/passwords.yml b/automation/roles/pre_checks/tasks/passwords.yml new file mode 100644 index 000000000..c6fbc582f --- /dev/null +++ b/automation/roles/pre_checks/tasks/passwords.yml @@ -0,0 +1,79 @@ +--- +# Generate passwords (if not defined) +- block: + - name: Generate the missing passwords + ansible.builtin.set_fact: + "{{ item }}": "{{ lookup('password', '/dev/null chars=ascii_letters,digits length=32') }}" + loop: + - patroni_superuser_password + - patroni_replication_password + - patroni_restapi_password + - pgbouncer_auth_password + when: + - inventory_hostname == groups['master'][0] # generate on master host + - vars[item] | default('') | length < 1 # if password is not set + + - name: Set password variables for all hosts + ansible.builtin.set_fact: + patroni_superuser_password: "{{ hostvars[groups['master'][0]]['patroni_superuser_password'] }}" + patroni_replication_password: "{{ hostvars[groups['master'][0]]['patroni_replication_password'] }}" + patroni_restapi_password: "{{ hostvars[groups['master'][0]]['patroni_restapi_password'] }}" + pgbouncer_auth_password: "{{ hostvars[groups['master'][0]]['pgbouncer_auth_password'] }}" + when: not (postgresql_cluster_maintenance | default(false) | bool) # exclude for config_pgcluster.yml and add_pgnode.yml + +# Get current passwords (if not defined) - for config_pgcluster.yml and add_pgnode.yml +- block: + - name: Get patroni superuser password + ansible.builtin.shell: | + set -o pipefail; + grep -A10 "authentication:" /etc/patroni/patroni.yml | \ + grep -A3 "superuser" | grep "password:" | awk '{ print $2 }' | tail -n 1 + args: + executable: /bin/bash + register: superuser_password_result + changed_when: false + when: + - inventory_hostname == groups['master'][0] + - patroni_superuser_password | default('') | length < 1 + + - name: Get patroni replication user password + ansible.builtin.shell: | + set -o pipefail; + grep -A10 "authentication:" /etc/patroni/patroni.yml | \ + grep -A3 "replication" | grep "password:" | awk '{ print $2 }' | tail -n 1 + args: + executable: /bin/bash + register: replication_password_result + changed_when: false + when: + - inventory_hostname == groups['master'][0] + - patroni_replication_password | default('') | length < 1 + + - name: Get patroni restapi password + ansible.builtin.shell: | + set -o pipefail; + grep -A10 "restapi:" /etc/patroni/patroni.yml | \ + grep -A3 "authentication" | grep "password:" | awk '{ print $2 }' | tail -n 1 + args: + executable: /bin/bash + register: patroni_restapi_password_result + changed_when: false + when: + - inventory_hostname == groups['master'][0] + - patroni_restapi_password | default('') | length < 1 + + - name: "Set variable: patroni_superuser_password" + ansible.builtin.set_fact: + patroni_superuser_password: "{{ hostvars[groups['master'][0]]['superuser_password_result']['stdout'] }}" + when: hostvars[groups['master'][0]]['superuser_password_result']['stdout'] is defined + + - name: "Set variable: patroni_replication_password" + ansible.builtin.set_fact: + patroni_replication_password: "{{ hostvars[groups['master'][0]]['replication_password_result']['stdout'] }}" + when: hostvars[groups['master'][0]]['replication_password_result']['stdout'] is defined + + - name: "Set variable: patroni_restapi_password" + ansible.builtin.set_fact: + patroni_restapi_password: "{{ hostvars[groups['master'][0]]['patroni_restapi_password_result']['stdout'] }}" + when: hostvars[groups['master'][0]]['patroni_restapi_password_result']['stdout'] is defined + when: postgresql_cluster_maintenance | default(false) | bool diff --git a/automation/roles/pre_checks/tasks/patroni.yml b/automation/roles/pre_checks/tasks/patroni.yml new file mode 100644 index 000000000..a75ae13bf --- /dev/null +++ b/automation/roles/pre_checks/tasks/patroni.yml @@ -0,0 +1,35 @@ +--- +# when postgresql NOT exists +- block: + - name: PostgreSQL | check that data directory "{{ postgresql_data_dir }}" is not initialized + ansible.builtin.stat: + path: "{{ postgresql_data_dir }}/PG_VERSION" + register: pgdata_initialized + when: patroni_cluster_bootstrap_method == "initdb" + + - name: PostgreSQL | data directory check result + ansible.builtin.fail: + msg: "Whoops! data directory {{ postgresql_data_dir }} is already initialized" + when: + - pgdata_initialized.stat.exists is defined + - pgdata_initialized.stat.exists + when: + - not postgresql_exists | default(false) | bool + - not (postgresql_cluster_maintenance|default(false)|bool) # exclude for config_pgcluster.yml and add_pgnode.yml + +# when postgresql exists +- block: + - name: PostgreSQL | check that data directory "{{ postgresql_data_dir }}" is initialized + ansible.builtin.stat: + path: "{{ postgresql_data_dir }}/PG_VERSION" + register: pgdata_initialized + + - name: PostgreSQL | data directory check result + ansible.builtin.fail: + msg: "Whoops! data directory {{ postgresql_data_dir }} is not initialized" + when: + - pgdata_initialized.stat.exists is defined + - not pgdata_initialized.stat.exists + when: + - postgresql_exists | default(false) | bool + - not (postgresql_cluster_maintenance|default(false)|bool) # exclude for config_pgcluster.yml and add_pgnode.yml diff --git a/automation/roles/pre_checks/tasks/pgbackrest.yml b/automation/roles/pre_checks/tasks/pgbackrest.yml new file mode 100644 index 000000000..f18556171 --- /dev/null +++ b/automation/roles/pre_checks/tasks/pgbackrest.yml @@ -0,0 +1,45 @@ +--- +- name: "pgBackRest | Ensure 'archive_command' is set to '{{ pgbackrest_archive_command }}'" + ansible.builtin.set_fact: + # Create a new list from postgresql_parameters, excluding 'archive_command' option if it exists + # Then, append a new 'archive_command' item with the value of 'pgbackrest_archive_command' + postgresql_parameters: >- + {{ postgresql_parameters | rejectattr('option', 'equalto', 'archive_command') | list + + [{'option': 'archive_command', 'value': pgbackrest_archive_command}] }} + vars: + # Find the last item in postgresql_parameters where the option is 'archive_command' + archive_command_item: "{{ postgresql_parameters | selectattr('option', 'equalto', 'archive_command') | list | last }}" + when: + - pgbackrest_install is defined + - pgbackrest_install | bool + # Execute the task only when 'archive_command' is undefined or its value is not equal to 'pgbackrest_archive_command' + - archive_command_item is undefined or archive_command_item.value != pgbackrest_archive_command + +# Checking parameters for working with a dedicated pgbackrest server +- name: pgBackRest | Ensure pgbackrest host is in inventory + ansible.builtin.fail: + msg: + - "The 'pgbackrest_repo_host' variable is set but the 'pgbackrest' group in your inventory is empty." + - "Please add the necessary host to the 'pgbackrest' group in your inventory." + when: + - inventory_hostname == groups['master'][0] # display only once + - pgbackrest_repo_host | length > 0 + - groups['pgbackrest'] is undefined or groups['pgbackrest'] | length == 0 + +- name: "pgBackRest | Ensure 'repo1-host' and 'repo1-host-user' are set correctly" + ansible.builtin.set_fact: + pgbackrest_conf: + global: >- + {{ pgbackrest_conf.global | rejectattr('option', 'equalto', 'repo1-host') | rejectattr('option', 'equalto', 'repo1-host-user') | list + + [{ 'option': 'repo1-host', 'value': pgbackrest_repo_host }] + + [{ 'option': 'repo1-host-user', 'value': pgbackrest_repo_user }] }} + stanza: "{{ pgbackrest_conf.stanza }}" # keep 'stanza' section unchanged + vars: + repo_host_item: "{{ pgbackrest_conf.global | selectattr('option', 'equalto', 'repo1-host') | list | last }}" + repo_host_user_item: "{{ pgbackrest_conf.global | selectattr('option', 'equalto', 'repo1-host-user') | list | last }}" + when: + - pgbackrest_repo_host is defined and pgbackrest_repo_host | length > 0 + # Execute the task only when 'repo1-host' or 'repo1-host-user' is undefined + # Or its value is not equal to 'pgbackrest_repo_host' and 'pgbackrest_repo_user' + - (repo_host_item is undefined or repo_host_item.value != pgbackrest_repo_host) or + (repo_host_user_item is undefined or repo_host_user_item.value != pgbackrest_repo_user) diff --git a/automation/roles/pre_checks/tasks/pgbouncer.yml b/automation/roles/pre_checks/tasks/pgbouncer.yml new file mode 100644 index 000000000..dda05c875 --- /dev/null +++ b/automation/roles/pre_checks/tasks/pgbouncer.yml @@ -0,0 +1,71 @@ +--- +# This task sets the value for max_connections from the provided variables, defaulting to 100 if not explicitly set. +# It loops through the 'postgresql_parameters' list and, if 'max_connections' is found in the option, it sets the 'max_connections' value accordingly. +- name: Set max_connections from vars or use default + ansible.builtin.set_fact: + max_connections: "{{ (item.value | default(100)) | int }}" + loop: "{{ postgresql_parameters | default([]) }}" + when: + - inventory_hostname == groups['master'][0] + - item.option == "max_connections" + +# This task calculates the pgbouncer_pool_size for each defined pgbouncer pool. +# The pool size for each pool is extracted from its 'pool_parameters' string using a regular expression. +# If 'pool_size' is defined in 'pool_parameters', it takes that value. +# If 'pool_size' is not defined, it uses pgbouncer_default_pool_size if available, otherwise 0. +# The calculated pool size is then added to the total 'pgbouncer_pool_size'. +- name: PgBouncer | Calculate pool_size + ansible.builtin.set_fact: + pgbouncer_pool_size: "{{ + (pgbouncer_pool_size | default(0) | int) + + + (pool_item.pool_parameters + | regex_search('pool_size=(\\d+)', multiline=False) + | regex_replace('[^0-9]', '') + | default(pgbouncer_default_pool_size | default(0), true) + | int) + }}" + loop: "{{ pgbouncer_pools | default([]) }}" + loop_control: + loop_var: pool_item + when: inventory_hostname == groups['master'][0] + +# This task computes the total pool size across all databases. +# If 'postgresql_databases' isn't defined or is empty, 'pgbouncer_pool_size' is the total pool size. +# If 'postgresql_databases' is defined, the task does the following: +# 1. It checks each database against 'pgbouncer_pools'. +# 2. For databases without a corresponding pool, it adds 'pgbouncer_default_pool_size' (or 0 if not defined) to 'pgbouncer_pool_size'. +- name: PgBouncer | Calculate total pool_size + ansible.builtin.set_fact: + pgbouncer_total_pool_size: >- + {{ + ((pgbouncer_pool_size | int) + + + (postgresql_databases + | default([]) + | rejectattr('db', 'in', pgbouncer_pools | map(attribute='dbname') | list) + | length + ) * (pgbouncer_default_pool_size | default(0) | int)) + * (pgbouncer_processes | default(1) | int) + }} + when: + - inventory_hostname == groups['master'][0] + - pgbouncer_pool_size is defined + +- name: PgBouncer | Show total pool_size + ansible.builtin.debug: + var: pgbouncer_total_pool_size + when: + - inventory_hostname == groups['master'][0] + - pgbouncer_total_pool_size is defined + +# This task fails the playbook execution if the total pool size is greater than max_connections. +# It checks if 'pgbouncer_pools' is defined and has length > 0 and whether the total pool size is greater than max_connections. +# If both conditions are met, the execution is stopped with a message indicating that the settings need to be changed. +- name: PgBouncer | Failed when pgbouncer_total_pool_size > max_connections + ansible.builtin.fail: + msg: "pgbouncer_total_pool_size: {{ pgbouncer_total_pool_size }} > max_connections: {{ max_connections }}. Need change settings" + when: + - inventory_hostname == groups['master'][0] + - pgbouncer_pools|default([]) | length > 0 + - pgbouncer_total_pool_size | int > max_connections | default(100) | int diff --git a/automation/roles/pre_checks/tasks/wal_g.yml b/automation/roles/pre_checks/tasks/wal_g.yml new file mode 100644 index 000000000..015d25c15 --- /dev/null +++ b/automation/roles/pre_checks/tasks/wal_g.yml @@ -0,0 +1,16 @@ +--- +- name: "WAL-G | Ensure 'archive_command' is set to '{{ wal_g_archive_command }}'" + ansible.builtin.set_fact: + # Create a new list from postgresql_parameters, excluding 'archive_command' option if it exists + # Then, append a new 'archive_command' item with the value of 'wal_g_archive_command' + postgresql_parameters: >- + {{ postgresql_parameters | rejectattr('option', 'equalto', 'archive_command') | list + + [{'option': 'archive_command', 'value': wal_g_archive_command}] }} + vars: + # Find the last item in postgresql_parameters where the option is 'archive_command' + archive_command_item: "{{ postgresql_parameters | selectattr('option', 'equalto', 'archive_command') | list | last }}" + when: + - wal_g_install is defined + - wal_g_install | bool + # Execute the task only when 'archive_command' is undefined or its value is not equal to 'wal_g_archive_command' + - archive_command_item is undefined or archive_command_item.value != wal_g_archive_command diff --git a/automation/roles/resolv_conf/README.md b/automation/roles/resolv_conf/README.md new file mode 100644 index 000000000..0f85c99e1 --- /dev/null +++ b/automation/roles/resolv_conf/README.md @@ -0,0 +1 @@ +# Ansible Role: resolv_conf diff --git a/roles/resolv_conf/tasks/main.yml b/automation/roles/resolv_conf/tasks/main.yml similarity index 77% rename from roles/resolv_conf/tasks/main.yml rename to automation/roles/resolv_conf/tasks/main.yml index 6f27af54d..b60a155ff 100644 --- a/roles/resolv_conf/tasks/main.yml +++ b/automation/roles/resolv_conf/tasks/main.yml @@ -1,13 +1,12 @@ --- - - block: - name: Make sure /etc/resolv.conf exists - stat: + ansible.builtin.stat: path: /etc/resolv.conf register: resolv_conf - name: Create /etc/resolv.conf - file: + ansible.builtin.file: path: /etc/resolv.conf state: touch owner: root @@ -16,16 +15,14 @@ when: not resolv_conf.stat.exists - name: Add DNS server(s) into /etc/resolv.conf - lineinfile: + ansible.builtin.lineinfile: path: /etc/resolv.conf regexp: "^nameserver {{ item }}" - insertbefore: '^options' + insertbefore: "^options" line: "nameserver {{ item }}" - unsafe_writes: true # to prevent failures in CI + unsafe_writes: true # to prevent failures in CI loop: "{{ nameservers }}" when: - nameservers is defined - nameservers | length > 0 tags: dns, nameservers - -... diff --git a/automation/roles/ssh_keys/README.md b/automation/roles/ssh_keys/README.md new file mode 100644 index 000000000..e3c2a49ce --- /dev/null +++ b/automation/roles/ssh_keys/README.md @@ -0,0 +1 @@ +# Ansible Role: ssh_keys diff --git a/roles/ssh-keys/tasks/main.yml b/automation/roles/ssh_keys/tasks/main.yml similarity index 70% rename from roles/ssh-keys/tasks/main.yml rename to automation/roles/ssh_keys/tasks/main.yml index c9ada7e7e..b82c086ec 100644 --- a/roles/ssh-keys/tasks/main.yml +++ b/automation/roles/ssh_keys/tasks/main.yml @@ -3,51 +3,51 @@ - block: - name: Check user "{{ ssh_key_user }}" exists - user: + ansible.builtin.user: name: "{{ ssh_key_user }}" shell: /bin/bash state: present - - name: Create a 2048-bit SSH key for user "{{ ssh_key_user }}" in ~/.ssh/id_rsa (if not already exist) # yamllint disable rule:line-length - user: + - name: Create a 2048-bit SSH key for user "{{ ssh_key_user }}" in ~/.ssh/id_rsa (if not already exist) + ansible.builtin.user: name: "{{ ssh_key_user }}" generate_ssh_key: true ssh_key_bits: 2048 ssh_key_file: .ssh/id_rsa - name: Fetch key files from remote servers to ansible server - fetch: + ansible.builtin.fetch: src: "~{{ ssh_key_user }}/.ssh/id_rsa.pub" dest: "files/{{ ansible_hostname }}-id_rsa.pub" flat: true changed_when: false - name: Copy key files and add to authorized_keys - authorized_key: + ansible.posix.authorized_key: user: "{{ ssh_key_user }}" state: "{{ ssh_key_state }}" - key: "{{ lookup('pipe','cat files/*id_rsa.pub') }}" + key: "{{ lookup('pipe', 'cat files/*id_rsa.pub') }}" exclusive: false # known_hosts - name: known_hosts | for each host, scan for its ssh public key - command: "ssh-keyscan -trsa -p {{ ansible_ssh_port | default(22) }} {{ item }}" + ansible.builtin.command: "ssh-keyscan -trsa -p {{ ansible_ssh_port | default(22) }} {{ item }}" loop: "{{ ssh_known_hosts }}" register: ssh_known_host_results changed_when: false - - name: known_hosts | for each host, add/update the public key in the "~{{ ssh_key_user }}/.ssh/known_hosts" # yamllint disable rule:line-length + - name: known_hosts | for each host, add/update the public key in the "~{{ ssh_key_user }}/.ssh/known_hosts" become: true become_user: "{{ ssh_key_user }}" - known_hosts: + ansible.builtin.known_hosts: name: "{{ item.item }}" key: "{{ item.stdout }}" path: "~{{ ssh_key_user }}/.ssh/known_hosts" no_log: true loop: "{{ ssh_known_host_results.results }}" ignore_errors: true - when: enable_ssh_key_based_authentication is defined and - enable_ssh_key_based_authentication|bool + when: + - enable_ssh_key_based_authentication is defined + - enable_ssh_key_based_authentication | bool + - not ansible_check_mode tags: ssh_keys - -... diff --git a/automation/roles/sudo/README.md b/automation/roles/sudo/README.md new file mode 100644 index 000000000..28ca3f45d --- /dev/null +++ b/automation/roles/sudo/README.md @@ -0,0 +1 @@ +# Ansible Role: sudo diff --git a/roles/sudo/tasks/main.yml b/automation/roles/sudo/tasks/main.yml similarity index 76% rename from roles/sudo/tasks/main.yml rename to automation/roles/sudo/tasks/main.yml index d3361f786..c107c6bc8 100644 --- a/roles/sudo/tasks/main.yml +++ b/automation/roles/sudo/tasks/main.yml @@ -1,51 +1,48 @@ --- - # if nopasswd = "yes" - name: Add user to /etc/sudoers.d/ - copy: + ansible.builtin.copy: dest: "/etc/sudoers.d/{{ item.name }}" content: | {{ item.name }} ALL=(ALL) NOPASSWD: ALL force: true when: item.nopasswd == "yes" and - ( item.commands is not defined or item.commands == "ALL") + ( item.commands is not defined or item.commands == "ALL") loop: "{{ sudo_users | list }}" tags: sudo # if nopasswd = "yes" and commands is defined - name: Add user to /etc/sudoers.d/ - copy: + ansible.builtin.copy: dest: "/etc/sudoers.d/{{ item.name }}" content: | {{ item.name }} ALL=(ALL) NOPASSWD: {{ item.commands }} force: true when: item.nopasswd == "yes" and - ( item.commands is defined and item.commands != "ALL" ) + ( item.commands is defined and item.commands != "ALL" ) loop: "{{ sudo_users | list }}" tags: sudo # if nopasswd = "no" - name: Add user to /etc/sudoers.d/ - copy: + ansible.builtin.copy: dest: "/etc/sudoers.d/{{ item.name }}" content: | {{ item.name }} ALL=(ALL) ALL force: true when: item.nopasswd != "yes" and - ( item.commands is not defined or item.commands == "ALL" ) + ( item.commands is not defined or item.commands == "ALL" ) loop: "{{ sudo_users | list }}" tags: sudo # if nopasswd = "no" and commands is defined - name: Add user to /etc/sudoers.d/ - copy: + ansible.builtin.copy: dest: "/etc/sudoers.d/{{ item.name }}" content: | {{ item.name }} ALL=(ALL) {{ item.commands }} force: true when: item.nopasswd != "yes" and - ( item.commands is defined and item.commands != "ALL" ) + ( item.commands is defined and item.commands != "ALL" ) loop: "{{ sudo_users | list }}" tags: sudo - -... diff --git a/automation/roles/swap/README.md b/automation/roles/swap/README.md new file mode 100644 index 000000000..8b58a5a77 --- /dev/null +++ b/automation/roles/swap/README.md @@ -0,0 +1 @@ +# Ansible Role: swap diff --git a/automation/roles/swap/tasks/main.yml b/automation/roles/swap/tasks/main.yml new file mode 100644 index 000000000..f1de8ff50 --- /dev/null +++ b/automation/roles/swap/tasks/main.yml @@ -0,0 +1,68 @@ +--- +- name: Ensure swap exists + ansible.builtin.command: swapon --show=SIZE --bytes --noheadings + register: swap_exists + changed_when: false + when: + - swap_file_create | bool + - ansible_virtualization_type not in ['container', 'docker', 'lxc', 'podman'] # exclude for containers to prevent test failures in CI. + tags: swap, swap_create, swap_remove + +- name: Swap exists + ansible.builtin.debug: + msg: "swap_size_mb: {{ (swap_exists.stdout_lines | map('trim') | map('int') | sum / 1024 / 1024) | round | int }}" + when: swap_exists.stdout is defined and swap_exists.stdout | length > 1 + tags: swap, swap_create, swap_remove + +# if the swap exists and the size is not equal to swap_file_size_mb +- block: + - name: Disable all existing swaps + ansible.builtin.command: swapoff --all + + - name: Remove swap from /etc/fstab + ansible.builtin.lineinfile: + path: /etc/fstab + state: absent + regexp: " swap " + + - name: Remove swap file (if exists) + ansible.builtin.file: + path: "{{ swap_file_path }}" + state: absent + when: (swap_exists.stdout is defined and swap_exists.stdout | length > 1) and + ((swap_exists.stdout_lines|map('trim')|map('int')|sum / 1024 / 1024)|round|int != swap_file_size_mb|int) + tags: swap, swap_remove + +# if the swap does not exist +- block: + - name: Create swap file + ansible.builtin.command: + cmd: dd if=/dev/zero of={{ swap_file_path }} bs=1M count={{ swap_file_size_mb }} + creates: "{{ swap_file_path }}" + + - name: Set permissions on swap file + ansible.builtin.file: + path: "{{ swap_file_path }}" + owner: root + group: root + mode: "0600" + + - name: Make swap file if necessary + ansible.builtin.command: mkswap {{ swap_file_path }} + register: mkswap_result + + - name: Run swapon on the swap file + ansible.builtin.command: swapon {{ swap_file_path }} + + - name: Manage swap file entry in fstab + ansible.posix.mount: + name: none + src: "{{ swap_file_path }}" + fstype: swap + opts: sw + state: present + when: > + (swap_exists.stdout is defined and swap_exists.stdout | length < 1) or + (swap_exists.stdout_lines is defined and + (swap_exists.stdout_lines | map('trim') | map('int') | sum / 1024 / 1024) | round | int != swap_file_size_mb|int) + tags: swap, swap_create diff --git a/automation/roles/sysctl/README.md b/automation/roles/sysctl/README.md new file mode 100644 index 000000000..5b3b12925 --- /dev/null +++ b/automation/roles/sysctl/README.md @@ -0,0 +1 @@ +# Ansible Role: sysctl diff --git a/roles/sysctl/tasks/main.yml b/automation/roles/sysctl/tasks/main.yml similarity index 53% rename from roles/sysctl/tasks/main.yml rename to automation/roles/sysctl/tasks/main.yml index 10e3fe41f..a30fbf709 100644 --- a/roles/sysctl/tasks/main.yml +++ b/automation/roles/sysctl/tasks/main.yml @@ -1,25 +1,22 @@ --- - - name: Make sure handlers are flushed immediately - meta: flush_handlers + ansible.builtin.meta: flush_handlers - block: - name: Build a sysctl_conf dynamic variable - set_fact: - sysctl_conf_dynamic_var: "{{ sysctl_conf_dynamic_var |default([]) }} + {{ sysctl_conf[item] | flatten(1) }}" # yamllint disable rule:line-length + ansible.builtin.set_fact: + sysctl_conf_dynamic_var: "{{ sysctl_conf_dynamic_var | default([]) + (sysctl_conf[item] | default([]) | flatten(1)) }}" loop: "{{ hostvars[inventory_hostname].group_names }}" - name: Setting kernel parameters - sysctl: + ansible.posix.sysctl: name: "{{ item.name }}" value: "{{ item.value }}" sysctl_set: true state: present reload: true - loop: "{{ sysctl_conf_dynamic_var|list | unique }}" - when: sysctl_conf_dynamic_var | length > 0 + loop: "{{ sysctl_conf_dynamic_var | default([]) | unique }}" + when: sysctl_conf_dynamic_var | default([]) | length > 0 ignore_errors: true when: sysctl_set|bool tags: sysctl, kernel - -... diff --git a/automation/roles/timezone/README.md b/automation/roles/timezone/README.md new file mode 100644 index 000000000..01a27727d --- /dev/null +++ b/automation/roles/timezone/README.md @@ -0,0 +1 @@ +# Ansible Role: timezone diff --git a/automation/roles/timezone/tasks/main.yml b/automation/roles/timezone/tasks/main.yml new file mode 100644 index 000000000..448dceb4d --- /dev/null +++ b/automation/roles/timezone/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- block: + - name: Make sure that the tzdata package is installed + become: true + become_method: sudo + ansible.builtin.package: + name: tzdata + state: present + environment: "{{ proxy_env | default({}) }}" + when: installation_method == "repo" + + - name: Set timezone to "{{ timezone }}" + become: true + become_method: sudo + community.general.timezone: + name: "{{ timezone }}" + when: timezone is defined and timezone | length > 0 + tags: timezone diff --git a/automation/roles/tls_certificate/README.md b/automation/roles/tls_certificate/README.md new file mode 100644 index 000000000..eaac38d17 --- /dev/null +++ b/automation/roles/tls_certificate/README.md @@ -0,0 +1 @@ +# Ansible Role: tls_certificate diff --git a/automation/roles/tls_certificate/copy/README.md b/automation/roles/tls_certificate/copy/README.md new file mode 100644 index 000000000..3b5eeb327 --- /dev/null +++ b/automation/roles/tls_certificate/copy/README.md @@ -0,0 +1 @@ +# Ansible Role: tls_certificate/copy diff --git a/automation/roles/tls_certificate/copy/tasks/main.yml b/automation/roles/tls_certificate/copy/tasks/main.yml new file mode 100644 index 000000000..099cc53c9 --- /dev/null +++ b/automation/roles/tls_certificate/copy/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- name: "Fetch TLS certificate, key and CA from {{ selected_group[0] }}" + run_once: true + ansible.builtin.slurp: + src: "{{ fetch_tls_dir | default(tls_dir | default('/etc/tls')) }}/{{ item }}" + delegate_to: "{{ selected_group[0] }}" + register: tls_files + loop: + - "{{ fetch_tls_privatekey | default(tls_privatekey | default('server.key')) }}" + - "{{ fetch_tls_cert | default(tls_cert | default('server.crt')) }}" + - "{{ fetch_tls_ca_cert | default(tls_ca_cert | default('ca.crt')) }}" + vars: + selected_group: >- + {{ + groups[tls_group_name] + if (tls_group_name | default('') | length > 0 and tls_group_name in groups) + else groups['master'] + }} + tags: tls, tls_cert_copy + +- name: Create directory {{ copy_tls_dir | default(tls_dir | default('/etc/tls')) }} + ansible.builtin.file: + path: "{{ copy_tls_dir | default(tls_dir | default('/etc/tls')) }}" + state: directory + owner: "{{ copy_tls_owner | default(tls_owner | default('postgres')) }}" + group: "{{ copy_tls_owner | default(tls_owner | default('postgres')) }}" + mode: "0755" + tags: tls, tls_cert_copy + +- name: Copy TLS certificate, key and CA to all nodes + ansible.builtin.copy: + content: "{{ tls_files.results[item.index].content | b64decode }}" + dest: "{{ copy_tls_dir | default(tls_dir | default('/etc/tls')) }}/{{ item.filename }}" + owner: "{{ copy_tls_owner | default(tls_owner | default('postgres')) }}" + group: "{{ copy_tls_owner | default(tls_owner | default('postgres')) }}" + mode: "{{ item.mode }}" + loop: + - { index: 1, filename: "{{ copy_tls_cert | default(tls_cert | default('server.crt')) }}", mode: "0644" } + - { index: 2, filename: "{{ copy_tls_ca_cert | default(tls_ca_cert | default('ca.crt')) }}", mode: "0644" } + - { index: 0, filename: "{{ copy_tls_privatekey | default(tls_privatekey | default('server.key')) }}", mode: "0400" } + loop_control: + label: "{{ copy_tls_dir | default(tls_dir | default('/etc/tls')) }}/{{ item.filename }}" + tags: tls, tls_cert_copy diff --git a/automation/roles/tls_certificate/generate/README.md b/automation/roles/tls_certificate/generate/README.md new file mode 100644 index 000000000..e288bdfca --- /dev/null +++ b/automation/roles/tls_certificate/generate/README.md @@ -0,0 +1 @@ +# Ansible Role: tls_certificate/generate diff --git a/automation/roles/tls_certificate/generate/tasks/main.yml b/automation/roles/tls_certificate/generate/tasks/main.yml new file mode 100644 index 000000000..fe53c53ad --- /dev/null +++ b/automation/roles/tls_certificate/generate/tasks/main.yml @@ -0,0 +1,193 @@ +--- +- block: + - name: Make sure that the python3-cryptography package is present + ansible.builtin.package: + name: python3-cryptography + state: present + register: pack_status + until: pack_status is success + delay: 5 + retries: 3 + + - block: + # Check if certificates already exist + - name: Check if TLS CA cert already exists + ansible.builtin.stat: + path: "{{ tls_directory }}/{{ generate_tls_ca_cert | default(tls_ca_cert | default('ca.crt')) }}" + register: ca_cert + + - name: Check if TLS server cert already exists + ansible.builtin.stat: + path: "{{ tls_directory }}/{{ generate_tls_cert | default(tls_cert | default('server.crt')) }}" + register: server_cert + + - name: Check if TLS server key already exists + ansible.builtin.stat: + path: "{{ tls_directory }}/{{ generate_tls_privatekey | default(tls_privatekey | default('server.key')) }}" + register: server_key + + # Skip generation if certificates already exist and tls_cert_regenerate is false + - name: Skip certificate generation + ansible.builtin.debug: + msg: + - "TLS certificates already exist. Skipping certificate generation." + - "To force regeneration, set '{{ variable_name }}: true'" + vars: + variable_name: >- + {%- if tls_group_name == 'etcd_cluster' and etcd_on_dedicated_nodes | default(false) | bool -%} + etcd_tls_cert_regenerate + {%- elif tls_group_name == 'postgres_cluster' and etcd_on_dedicated_nodes | default(false) | bool -%} + patroni_tls_cert_regenerate + {%- elif tls_group_name == 'consul_instances' and consul_on_dedicated_nodes | default(false) | bool -%} + consul_tls_cert_regenerate + {%- else -%} + tls_cert_regenerate + {%- endif -%} + when: + - ca_cert.stat.exists | default(false) + - server_key.stat.exists | default(false) + - server_cert.stat.exists | default(false) + when: + - not tls_cert_regenerate | default(false) | bool + - inventory_hostname == selected_group[0] + + # Clean up existing certificates if tls_cert_regenerate is true + - name: Clean up existing certificates + ansible.builtin.file: + path: "{{ tls_directory }}/{{ generate_tls_dir | default(tls_dir | default('/etc/tls')) }}/{{ item }}" + state: absent + loop: + - "{{ generate_tls_privatekey | default(tls_privatekey | default('server.key')) }}" + - "{{ generate_tls_cert | default(tls_cert | default('server.crt')) }}" + - "{{ generate_tls_ca_cert | default(tls_ca_cert | default('ca.crt')) }}" + - "{{ generate_tls_ca_key | default(tls_ca_key | default('ca.key')) }}" + when: tls_cert_regenerate | default(false)| bool + vars: + tls_directory: "{{ generate_tls_dir | default(tls_dir | default('/etc/tls')) }}" + selected_group: >- + {{ + groups[tls_group_name] + if (tls_group_name | default('') | length > 0 and tls_group_name in groups) + else groups['postgres_cluster'] + }} + when: inventory_hostname in selected_group + tags: tls, tls_cert_generate + +# Generate new certificates +# if 'tls_cert_regenerate' is 'true' or if at least one certificate or key is not found. +- block: + # Generates a list of Subject Alternative Names (SAN) for TLS certificates if 'tls_subject_alt_name' is not set. + # Uses `tls_group_name` if it is defined, non-empty, and exists in `groups`; otherwise, applies to all playbook hosts. + - name: "Generate subjectAltName entries for all hosts" + ansible.builtin.set_fact: + generated_subject_alt_name: >- + {{ + ( + tls_hosts | map('extract', hostvars, 'ansible_hostname') | map('regex_replace', '^', 'DNS:') | list + + tls_hosts | map('extract', hostvars, 'ansible_fqdn') | map('regex_replace', '^', 'DNS:') | list + + tls_hosts | map('extract', hostvars, 'inventory_hostname') | map('regex_replace', '^', 'IP:') | list + + ['DNS:localhost', 'IP:127.0.0.1'] + ) | unique | join(',') + }} + vars: + tls_hosts: >- + {{ + groups[tls_group_name] + if (tls_group_name | default('') | length > 0 and tls_group_name in groups) + else ansible_play_hosts + }} + when: tls_subject_alt_name | default('') | length < 1 + + - name: "Display Certificate subjectAltName future value" + ansible.builtin.debug: + msg: "SubjectAltName = {{ tls_subject_alt_name | default(generated_subject_alt_name) }}" + + ######## Generate CA ######## + - name: "Ensure TLS directory exist" + ansible.builtin.file: + path: "{{ tls_directory }}" + state: directory + owner: "root" + group: "root" + mode: "0755" + + - name: "Generate CA private key" + community.crypto.openssl_privatekey: + path: "{{ tls_directory }}/{{ generate_tls_ca_key | default(tls_ca_key | default('ca.key')) }}" + size: "{{ generate_tls_privatekey_size | default(tls_privatekey_size | default(4096)) }}" + type: "{{ generate_tls_privatekey_type | default(tls_privatekey_type | default('RSA')) }}" + + - name: "Create CSR for CA certificate" + community.crypto.openssl_csr_pipe: + privatekey_path: "{{ tls_directory }}/{{ generate_tls_ca_key | default(tls_ca_key | default('ca.key')) }}" + common_name: "{{ generate_tls_ca_common_name | default(tls_ca_common_name | default('Autobase CA')) }}" + use_common_name_for_san: false + basic_constraints: + - "CA:TRUE" + basic_constraints_critical: true + key_usage: + - keyCertSign + key_usage_critical: true + register: ca_csr + + - name: "Create self-signed CA certificate from CSR" + community.crypto.x509_certificate: + path: "{{ tls_directory }}/{{ generate_tls_ca_cert | default(tls_ca_cert | default('ca.crt')) }}" + csr_content: "{{ ca_csr.csr }}" + privatekey_path: "{{ tls_directory }}/{{ generate_tls_ca_key | default(tls_ca_key | default('ca.key')) }}" + provider: "{{ generate_tls_cert_provider | default(tls_cert_provider | default('selfsigned')) }}" + selfsigned_not_after: "+{{ generate_tls_cert_valid_days | default(tls_cert_valid_days | default(3650)) }}d" + selfsigned_not_before: "-1d" + + ######## Generate Server cert/key ######## + - name: "Create server private key" + community.crypto.openssl_privatekey: + path: "{{ tls_directory }}/{{ generate_tls_privatekey | default(tls_privatekey | default('server.key')) }}" + size: "{{ generate_tls_privatekey_size | default(tls_privatekey_size | default(4096)) }}" + type: "{{ generate_tls_privatekey_type | default(tls_privatekey_type | default('RSA')) }}" + + - name: "Create server CSR" + community.crypto.openssl_csr_pipe: + privatekey_path: "{{ tls_directory }}/{{ generate_tls_privatekey | default(tls_privatekey | default('server.key')) }}" + common_name: "{{ generate_tls_common_name | default(tls_common_name | default(patroni_cluster_name)) }}" + key_usage: + - digitalSignature + - keyEncipherment + - dataEncipherment + extended_key_usage: + - clientAuth + - serverAuth + subject: + O: "Autobase" + subject_alt_name: "{{ tls_subject_alt_name | default(generated_subject_alt_name) }}" + register: csr + + - name: "Sign server certificate with the CA" + community.crypto.x509_certificate_pipe: + csr_content: "{{ csr.csr }}" + provider: ownca + ownca_path: "{{ tls_directory }}/{{ generate_tls_ca_cert | default(tls_ca_cert | default('ca.crt')) }}" + ownca_privatekey_path: "{{ tls_directory }}/{{ generate_tls_ca_key | default(tls_ca_key | default('ca.key')) }}" + ownca_not_after: +{{ generate_tls_cert_valid_days | default(tls_cert_valid_days | default(3650)) }}d + ownca_not_before: "-1d" + register: certificate + + - name: "Write server certificate" + ansible.builtin.copy: + dest: "{{ tls_directory }}/{{ generate_tls_cert | default(tls_cert | default('server.crt')) }}" + content: "{{ certificate.certificate }}" + vars: + tls_directory: "{{ generate_tls_dir | default(tls_dir | default('/etc/tls')) }}" + selected_group: >- + {{ + [groups[tls_group_name][0]] + if (tls_group_name | default('') | length > 0 and tls_group_name in groups) + else [groups['master'][0]] + }} + when: + - inventory_hostname in selected_group + - tls_cert_regenerate | default(false) | bool or + (not ca_cert.stat.exists | default(false) or + not server_key.stat.exists | default(false) or + not server_cert.stat.exists | default(false)) + tags: tls, tls_cert_generate diff --git a/automation/roles/transparent_huge_pages/README.md b/automation/roles/transparent_huge_pages/README.md new file mode 100644 index 000000000..92cf68fd3 --- /dev/null +++ b/automation/roles/transparent_huge_pages/README.md @@ -0,0 +1 @@ +# Ansible Role: transparent_huge_pages diff --git a/roles/transparent_huge_pages/handlers/main.yml b/automation/roles/transparent_huge_pages/handlers/main.yml similarity index 76% rename from roles/transparent_huge_pages/handlers/main.yml rename to automation/roles/transparent_huge_pages/handlers/main.yml index ebed16d5d..94d78a46c 100644 --- a/roles/transparent_huge_pages/handlers/main.yml +++ b/automation/roles/transparent_huge_pages/handlers/main.yml @@ -1,11 +1,9 @@ --- - - name: Start disable-transparent-huge-pages service - systemd: + ansible.builtin.systemd: daemon_reload: true name: disable-transparent-huge-pages state: restarted enabled: true listen: "restart disable-thp" - -... + when: not ansible_check_mode diff --git a/roles/transparent_huge_pages/tasks/main.yml b/automation/roles/transparent_huge_pages/tasks/main.yml similarity index 72% rename from roles/transparent_huge_pages/tasks/main.yml rename to automation/roles/transparent_huge_pages/tasks/main.yml index c6781730b..f506543e8 100644 --- a/roles/transparent_huge_pages/tasks/main.yml +++ b/automation/roles/transparent_huge_pages/tasks/main.yml @@ -1,8 +1,6 @@ --- -# yamllint disable rule:line-length - - name: Create systemd service "disable-transparent-huge-pages.service" - blockinfile: + ansible.builtin.blockinfile: path: /etc/systemd/system/disable-transparent-huge-pages.service create: true block: | @@ -20,10 +18,10 @@ [Install] WantedBy=basic.target notify: "restart disable-thp" - when: disable_thp is defined and disable_thp|bool + when: + - (disable_thp is defined and disable_thp|bool) + - ansible_virtualization_type not in ['container', 'docker', 'lxc', 'podman'] # exclude for containers to prevent test failures in CI. tags: disable_thp, transparent_huge_pages - name: Make sure handlers are flushed immediately - meta: flush_handlers - -... + ansible.builtin.meta: flush_handlers diff --git a/automation/roles/update/README.md b/automation/roles/update/README.md new file mode 100644 index 000000000..28b3e1314 --- /dev/null +++ b/automation/roles/update/README.md @@ -0,0 +1,150 @@ +## Update the PostgreSQL HA Cluster + +This role is designed to update the PostgreSQL HA cluster to a new minor version (for example, 17.1->17.2, and etc). + +By default, only PostgreSQL packages defined in the `postgresql_packages` variable are updated. In addition, you can update Patroni or the entire system. + +#### Usage + +Update PostgreSQL: + +`ansible-playbook update_pgcluster.yml` + +Update Patroni: + +`ansible-playbook update_pgcluster.yml -e target=patroni` + +Update all system packages: + +`ansible-playbook update_pgcluster.yml -e target=system` + +#### Variables + +- `target` + - Defines the target for the update. + - Available values: 'postgres', 'patroni', 'system' + - Default value: `postgres` +- `max_replication_lag_bytes` + - Determines the size of the replication lag above which the update will not be performed. + - Note: If the lag is high, you will be prompted to try again later. + - Default value: `10485760` (10 MiB) +- `max_transaction_sec` + - Determines the maximum transaction time, in the presence of which the update will not be performed. + - Note: If long-running transactions are present, you will be prompted to try again later. + - Default value: `15` (seconds) +- `update_extensions` + - Attempt to automatically update all PostgreSQL extensions in all databases. + - Note: Specify 'false', to avoid updating extensions. + - Default value: `true` +- `reboot_host_after_update` + - Restart the server if it is required after the update. + - Default value: `true` +- `reboot_host_timeout` + - Maximum seconds to wait for machine to reboot and respond to a test command. + - Default value: `1800` (30 minutes) +- `reboot_host_post_delay` + - The waiting time (in minutes) for the caches to warm up after restarting the server before updating the next server. + - Note: Applicable when there are multiple replicas. + - Default value: `5` (minutes). + +--- + +## Plan: + +Note: About the expected downtime of the database during the update: + +When using load balancing for read-only traffic (the "Type A" and "Type C" schemes), zero downtime is expected (for read traffic), provided there is more than one replica in the cluster. For write traffic (to the Primary), the expected downtime is ~5-10 seconds. + +#### 1. PRE-UPDATE: Perform pre-update tasks + +- Test PostgreSQL DB Access +- Make sure that physical replication is active + - Stop, if there are no active replicas +- Make sure there is no high replication lag + - Note: no more than `max_replication_lag_bytes` + - Stop, if replication lag is high +- Make sure there are no long-running transactions + - no more than `max_transaction_sec` + - Stop, if long-running transactions detected +- Update the pgBackRest package on the backup server (Dedicated Repository Host). + - Note: This task runs only if the backup host is specified in the 'pgbackrest' group in the inventory file, and the variable `target` is set to '`system`'. + +#### 2. UPDATE: Secondary (one by one) + +- Stop read-only traffic + - Enable `noloadbalance`, `nosync`, `nofailover` parameters in the patroni.yml + - Reload patroni service + - Make sure replica endpoint is unavailable + - Wait for active transactions to complete +- Stop Services + - Execute CHECKPOINT before stopping PostgreSQL + - Stop Patroni service on the Cluster Replica +- Update PostgreSQL + - if `target` variable is not defined or `target=postgres` + - Install the latest version of PostgreSQL packages +- Update Patroni + - if `target=patroni` (or `system`) + - Install the latest version of Patroni package +- Update all system packages (includes PostgreSQL and Patroni) + - if `target=system` + - Update all system packages +- Start Services + - Start Patroni service + - Wait for Patroni port to become open on the host + - Check that the Patroni is healthy + - Check PostgreSQL is started and accepting connections +- Start read-only traffic + - Disable `noloadbalance`, `nosync`, `nofailover` parameters in the patroni.yml + - Reload patroni service + - Make sure replica endpoint is available + - Wait N minutes for caches to warm up after reboot + - Note: variable `reboot_host_post_delay` +- Perform the same steps for the next replica server. + +#### 3. UPDATE: Primary + +- Switchover Patroni leader role + - Perform switchover of the leader for the Patroni cluster + - Make sure that the Patroni is healthy and is a replica + - Notes: + - At this stage, the leader becomes a replica + - the database downtime is ~5 seconds (write traffic) +- Stop read-only traffic + - Enable `noloadbalance`, `nosync`, `nofailover` parameters in the patroni.yml + - Reload patroni service + - Make sure replica endpoint is unavailable + - Wait for active transactions to complete +- Stop Services + - Execute CHECKPOINT before stopping PostgreSQL + - Stop Patroni service on the old Cluster Leader +- Update PostgreSQL + - if `target` variable is not defined or `target=postgres` + - Install the latest version of PostgreSQL packages +- Update Patroni + - if `target=patroni` (or `system`) + - Install the latest version of Patroni package +- Update all system packages (includes PostgreSQL and Patroni) + - if `target=system` + - Update all system packages +- Start Services + - Start Patroni service + - Wait for Patroni port to become open on the host + - Check that the Patroni is healthy + - Check PostgreSQL is started and accepting connections +- Start read-only traffic + - Disable `noloadbalance`, `nosync`, `nofailover` parameters in the patroni.yml + - Reload patroni service + - Make sure replica endpoint is available + +#### 4. POST-UPDATE: Update extensions + +- Update extensions + - Get the current Patroni Cluster Leader Node + - Get a list of databases + - Update extensions in each database + - Get a list of old PostgreSQL extensions + - Update old PostgreSQL extensions (if an update is required) +- Check the Patroni cluster state +- Check the current PostgreSQL version +- List the Patroni cluster members +- Update completed. diff --git a/automation/roles/update/tasks/extensions.yml b/automation/roles/update/tasks/extensions.yml new file mode 100644 index 000000000..594447f19 --- /dev/null +++ b/automation/roles/update/tasks/extensions.yml @@ -0,0 +1,26 @@ +--- +- name: "Get the current Patroni Cluster Leader Node" + ansible.builtin.uri: + url: http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/leader + status_code: 200 + register: patroni_leader_result + changed_when: false + failed_when: false + environment: + no_proxy: "{{ inventory_hostname }}" + +- name: Get a list of databases + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select datname from pg_catalog.pg_database where datname <> 'template0'" + register: databases_list + changed_when: false + when: + - patroni_leader_result.status == 200 + +- name: Update extensions in each database + ansible.builtin.include_tasks: update_extensions.yml + loop: "{{ databases_list.stdout_lines }}" + loop_control: + loop_var: pg_target_dbname + when: databases_list.stdout_lines is defined diff --git a/automation/roles/update/tasks/patroni.yml b/automation/roles/update/tasks/patroni.yml new file mode 100644 index 000000000..16a8c6419 --- /dev/null +++ b/automation/roles/update/tasks/patroni.yml @@ -0,0 +1,101 @@ +--- +# patroni_installation_method: "pip" +- block: + - name: Install the latest version of Patroni + ansible.builtin.pip: + name: patroni + state: latest + executable: pip3 + extra_args: "--trusted-host=pypi.python.org --trusted-host=pypi.org --trusted-host=files.pythonhosted.org" + umask: "0022" + environment: + PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" + register: update_patroni_package_pip + ignore_errors: true + environment: "{{ proxy_env | default({}) }}" + when: installation_method == "repo" and patroni_installation_method == "pip" + +# patroni_installation_method: "rpm/deb" +- block: + # Debian + - name: Install the latest version of Patroni packages + ansible.builtin.package: + name: "{{ patroni_packages | default('patroni') }}" + state: latest + register: update_patroni_package_debian + until: update_patroni_package_debian is success + delay: 5 + retries: 3 + when: ansible_os_family == "Debian" and patroni_deb_package_repo | length < 1 + + # RedHat + - name: Install the latest version of Patroni packages + ansible.builtin.package: + name: "{{ patroni_packages | default('patroni') }}" + state: latest + register: update_patroni_package_rhel + until: update_patroni_package_rhel is success + delay: 5 + retries: 3 + when: ansible_os_family == "RedHat" and patroni_rpm_package_repo | length < 1 + + # when patroni_deb_package_repo or patroni_rpm_package_repo URL is defined + # Debian + - name: Download Patroni deb package + ansible.builtin.get_url: + url: "{{ item }}" + dest: /tmp/ + timeout: 60 + validate_certs: false + loop: "{{ patroni_deb_package_repo | list }}" + when: ansible_os_family == "Debian" and patroni_deb_package_repo | length > 0 + + - name: Install Patroni from deb package + ansible.builtin.apt: + force_apt_get: true + deb: "/tmp/{{ item }}" + state: present + loop: "{{ patroni_deb_package_repo | map('basename') | list }}" + register: update_patroni_package_deb + until: update_patroni_package_deb is success + delay: 5 + retries: 3 + when: ansible_os_family == "Debian" and patroni_deb_package_repo | length > 0 + + # RedHat + - name: Download Patroni rpm package + ansible.builtin.get_url: + url: "{{ item }}" + dest: /tmp/ + timeout: 60 + validate_certs: false + loop: "{{ patroni_rpm_package_repo | list }}" + when: ansible_os_family == "RedHat" and patroni_rpm_package_repo | length > 0 + + - name: Install Patroni from rpm package + ansible.builtin.package: + name: "/tmp/{{ item }}" + state: present + loop: "{{ patroni_rpm_package_repo | map('basename') | list }}" + register: update_patroni_package_rpm + until: update_patroni_package_rpm is success + delay: 5 + retries: 3 + when: ansible_os_family == "RedHat" and patroni_rpm_package_repo | length > 0 + ignore_errors: true + environment: "{{ proxy_env | default({}) }}" + when: + - installation_method == "repo" + - (patroni_installation_method == "rpm" or patroni_installation_method == "deb") + +# Set flag if any update failed +- name: "Set variable: update_patroni_failed" + ansible.builtin.set_fact: + update_patroni_failed: true + when: > + (update_patroni_package_pip is defined and update_patroni_package_pip is failed) or + (update_patroni_package_debian is defined and update_patroni_package_debian is failed) or + (update_patroni_package_rhel is defined and update_patroni_package_rhel is failed) or + (update_patroni_package_deb is defined and update_patroni_package_deb is failed) or + (update_patroni_package_rpm is defined and update_patroni_package_rpm is failed) diff --git a/automation/roles/update/tasks/pgbackrest_host.yml b/automation/roles/update/tasks/pgbackrest_host.yml new file mode 100644 index 000000000..1af7163a8 --- /dev/null +++ b/automation/roles/update/tasks/pgbackrest_host.yml @@ -0,0 +1,43 @@ +--- +# Update pgbackrest package on the Dedicated Repository Host +# if 'pgbackrest_repo_host' is defined and the host is specified in the 'pgbackrest' group in the inventory file. +- block: + - name: Gather facts from pgbackrest server + ansible.builtin.setup: + delegate_to: "{{ groups['pgbackrest'][0] }}" + run_once: true + + - name: Update dnf cache + delegate_to: "{{ groups['pgbackrest'][0] }}" + run_once: true + ansible.builtin.shell: dnf clean all && dnf -y makecache + args: + executable: /bin/bash + when: ansible_os_family == "RedHat" + + - name: Update apt cache + delegate_to: "{{ groups['pgbackrest'][0] }}" + run_once: true + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "Debian" + + - name: Install the latest version of pgbackrest package + delegate_to: "{{ groups['pgbackrest'][0] }}" + run_once: true + ansible.builtin.package: + name: pgbackrest + state: latest + register: update_pgbackrest_package + until: update_pgbackrest_package is success + delay: 5 + retries: 3 + when: + - pgbackrest_install | bool + - pgbackrest_repo_host | default('') | length > 0 + - groups['pgbackrest'] | default([]) | length > 0 diff --git a/automation/roles/update/tasks/postgres.yml b/automation/roles/update/tasks/postgres.yml new file mode 100644 index 000000000..78a88d992 --- /dev/null +++ b/automation/roles/update/tasks/postgres.yml @@ -0,0 +1,35 @@ +--- +- name: Update dnf cache + ansible.builtin.shell: dnf clean all && dnf -y makecache + args: + executable: /bin/bash + ignore_errors: true + when: ansible_os_family == "RedHat" + +- name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + ignore_errors: true + when: ansible_os_family == "Debian" + +- name: Install the latest version of PostgreSQL packages + ansible.builtin.package: + name: "{{ item }}" + state: latest + loop: "{{ postgresql_packages }}" + register: update_postgres_package + until: update_postgres_package is success + delay: 5 + retries: 3 + ignore_errors: true + +# Set flag if any update failed +- name: "Set variable: update_postgres_failed" + ansible.builtin.set_fact: + update_postgres_failed: true + when: update_postgres_package is failed diff --git a/automation/roles/update/tasks/pre_checks.yml b/automation/roles/update/tasks/pre_checks.yml new file mode 100644 index 000000000..fdbf48940 --- /dev/null +++ b/automation/roles/update/tasks/pre_checks.yml @@ -0,0 +1,82 @@ +--- +- name: "[Pre-Check] (ALL) Test PostgreSQL DB Access" + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc 'select 1' + changed_when: false + +- name: "[Pre-Check] Make sure that physical replication is active" + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select count(*) from pg_stat_replication + where application_name != 'pg_basebackup'" + register: pg_replication_state + changed_when: false + when: + - inventory_hostname in groups['primary'] + +# Stop, if there are no active replicas +- name: "Pre-Check error. Print physical replication state" + ansible.builtin.fail: + msg: "There are no active replica servers (pg_stat_replication returned 0 entries)." + when: + - inventory_hostname in groups['primary'] + - pg_replication_state.stdout | int == 0 + +- name: "[Pre-Check] Make sure there is no high replication lag (more than {{ max_replication_lag_bytes | human_readable }})" + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select pg_wal_lsn_diff(pg_current_wal_lsn(),replay_lsn) pg_lag_bytes + from pg_stat_replication + order by pg_lag_bytes desc limit 1" + register: pg_lag_bytes + changed_when: false + failed_when: false + until: pg_lag_bytes.stdout|int < max_replication_lag_bytes|int + retries: 30 + delay: 5 + when: + - inventory_hostname in groups['primary'] + +# Stop, if replication lag is high +- block: + - name: "Print replication lag" + ansible.builtin.debug: + msg: "Current replication lag: + {{ pg_lag_bytes.stdout | int | human_readable }}" + + - name: "Pre-Check error. Please try again later" + ansible.builtin.fail: + msg: High replication lag on the Patroni Cluster, please try again later. + when: + - pg_lag_bytes.stdout is defined + - pg_lag_bytes.stdout|int >= max_replication_lag_bytes|int + +- name: "[Pre-Check] Make sure there are no long-running transactions (more than {{ max_transaction_sec }} seconds)" + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select pid, usename, client_addr, clock_timestamp() - xact_start as xact_age, + state, wait_event_type ||':'|| wait_event as wait_events, + left(regexp_replace(query, E'[ \\t\\n\\r]+', ' ', 'g'),100) as query + from pg_stat_activity + where clock_timestamp() - xact_start > '{{ max_transaction_sec }} seconds'::interval + and backend_type = 'client backend' and pid <> pg_backend_pid() + order by xact_age desc limit 10" + register: pg_long_transactions + changed_when: false + failed_when: false + until: pg_long_transactions.stdout | length < 1 + retries: 30 + delay: 2 + +# Stop, if long-running transactions detected +- block: + - name: "Print long-running (>{{ max_transaction_sec }}s) transactions" + ansible.builtin.debug: + msg: "{{ pg_long_transactions.stdout_lines }}" + + - name: "Pre-Check error. Please try again later" + ansible.builtin.fail: + msg: long-running transactions detected (more than {{ max_transaction_sec }} seconds), please try again later. + when: + - pg_long_transactions.stdout is defined + - pg_long_transactions.stdout | length > 0 diff --git a/automation/roles/update/tasks/start_services.yml b/automation/roles/update/tasks/start_services.yml new file mode 100644 index 000000000..958c40a02 --- /dev/null +++ b/automation/roles/update/tasks/start_services.yml @@ -0,0 +1,36 @@ +--- +- name: Start Patroni service + become: true + become_user: root + ansible.builtin.service: + name: patroni + state: started + +- name: "Wait for port {{ patroni_restapi_port }} to become open on the host" + ansible.builtin.wait_for: + port: "{{ patroni_restapi_port }}" + host: "{{ hostvars[inventory_hostname]['inventory_hostname'] }}" + state: started + timeout: 60 + delay: 10 + +- name: Check that the Patroni is healthy + ansible.builtin.uri: + url: http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/health + status_code: 200 + register: patroni_replica_result + until: patroni_replica_result.status == 200 + retries: 300 + delay: 2 + environment: + no_proxy: "{{ inventory_hostname }}" + +- name: Check PostgreSQL is started and accepting connections + become: true + become_user: postgres + ansible.builtin.command: "{{ postgresql_bin_dir }}/pg_isready -p {{ postgresql_port }}" + register: pg_isready_result + until: pg_isready_result.rc == 0 + retries: 30 + delay: 2 + changed_when: false diff --git a/automation/roles/update/tasks/start_traffic.yml b/automation/roles/update/tasks/start_traffic.yml new file mode 100644 index 000000000..31a91968f --- /dev/null +++ b/automation/roles/update/tasks/start_traffic.yml @@ -0,0 +1,39 @@ +--- +- name: "Edit patroni.yml | disable noloadbalance, nosync, nofailover" + ansible.builtin.replace: + path: /etc/patroni/patroni.yml + regexp: "{{ item.regexp }}" + replace: "{{ item.replace }}" + loop: + - { regexp: "noloadbalance: true", replace: "noloadbalance: false" } + - { regexp: "nosync: true", replace: "nosync: false" } + - { regexp: "nofailover: true", replace: "nofailover: false" } + loop_control: + label: "{{ item.replace }}" + +- name: Reload patroni service + ansible.builtin.systemd: + daemon_reload: true + name: patroni + enabled: true + state: reloaded + +- name: Make sure replica endpoint is available + ansible.builtin.uri: + url: http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/replica + status_code: 200 + register: patroni_replica_result + until: patroni_replica_result.status == 200 + retries: 30 + delay: 2 + environment: + no_proxy: "{{ inventory_hostname }}" + +# Warming up caches after reboot (is 'reboot_host_post_delay' is defined) +- name: "Wait {{ reboot_host_post_delay }} minutes for caches to warm up after reboot" + ansible.builtin.pause: + minutes: "{{ reboot_host_post_delay }}" + when: + - (reboot_result.rebooted is defined and reboot_result.rebooted) + - (reboot_host_post_delay is defined and reboot_host_post_delay | int > 0) + - (inventory_hostname in groups['secondary'] and groups['secondary'] | length > 1) diff --git a/automation/roles/update/tasks/stop_services.yml b/automation/roles/update/tasks/stop_services.yml new file mode 100644 index 000000000..8e2592133 --- /dev/null +++ b/automation/roles/update/tasks/stop_services.yml @@ -0,0 +1,43 @@ +--- +# pre-check +- name: Check PostgreSQL is started and accepting connections + become: true + become_user: postgres + ansible.builtin.command: "{{ postgresql_bin_dir }}/pg_isready -p {{ postgresql_port }}" + register: pg_isready_result + until: pg_isready_result.rc == 0 + retries: 30 + delay: 2 + changed_when: false + +# Stop the secondary +- block: + - name: Execute CHECKPOINT before stopping PostgreSQL + become: true + become_user: postgres + ansible.builtin.command: > + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc "CHECKPOINT" + + - name: "Stop Patroni service on the Cluster Replica ({{ ansible_hostname }})" + become: true + become_user: root + ansible.builtin.service: + name: patroni + state: stopped + when: inventory_hostname in groups['secondary'] + +# Stop the old primary (now secondary, after switchover) +- block: + - name: Execute CHECKPOINT before stopping PostgreSQL + become: true + become_user: postgres + ansible.builtin.command: > + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc "CHECKPOINT" + + - name: "Stop Patroni service on the old Cluster Leader ({{ ansible_hostname }})" + become: true + become_user: root + ansible.builtin.service: + name: patroni + state: stopped + when: inventory_hostname in groups['primary'] diff --git a/automation/roles/update/tasks/stop_traffic.yml b/automation/roles/update/tasks/stop_traffic.yml new file mode 100644 index 000000000..de23e4c15 --- /dev/null +++ b/automation/roles/update/tasks/stop_traffic.yml @@ -0,0 +1,46 @@ +--- +- name: "Edit patroni.yml | enable noloadbalance, nosync, nofailover" + ansible.builtin.replace: + path: /etc/patroni/patroni.yml + regexp: "{{ item.regexp }}" + replace: "{{ item.replace }}" + loop: + - { regexp: "noloadbalance: false", replace: "noloadbalance: true" } + - { regexp: "nosync: false", replace: "nosync: true" } + - { regexp: "nofailover: false", replace: "nofailover: true" } + loop_control: + label: "{{ item.replace }}" + +- name: Reload patroni service + ansible.builtin.systemd: + daemon_reload: true + name: patroni + enabled: true + state: reloaded + +- name: Make sure replica endpoint is unavailable + ansible.builtin.uri: + url: http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/replica + status_code: 503 + register: patroni_replica_result + until: patroni_replica_result.status == 503 + retries: 30 + delay: 2 + environment: + no_proxy: "{{ inventory_hostname }}" + +- name: Wait for active transactions to complete + become: true + become_user: postgres + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select count(*) + from pg_stat_activity + where pid <> pg_backend_pid() + and backend_type = 'client backend' + and state = 'active'" + register: pg_active_count + until: pg_active_count.stdout|int == 0 + retries: 300 + delay: 2 + changed_when: false diff --git a/automation/roles/update/tasks/switchover.yml b/automation/roles/update/tasks/switchover.yml new file mode 100644 index 000000000..51e089954 --- /dev/null +++ b/automation/roles/update/tasks/switchover.yml @@ -0,0 +1,26 @@ +--- +- name: Perform switchover of the leader for the Patroni cluster "{{ patroni_cluster_name }}" + ansible.builtin.uri: + url: http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/switchover + method: POST + user: "{{ patroni_restapi_username | default(omit) }}" + password: "{{ patroni_restapi_password | default(omit) }}" + body: '{"leader":"{{ ansible_hostname }}"}' + body_format: json + register: patroni_switchover_result + until: patroni_switchover_result.status == 200 + retries: 300 + delay: 2 + environment: + no_proxy: "{{ inventory_hostname }}" + +- name: Make sure that the Patroni is healthy and is a replica + ansible.builtin.uri: + url: http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/replica + status_code: 200 + register: patroni_replica_result + until: patroni_replica_result.status == 200 + retries: 300 + delay: 2 + environment: + no_proxy: "{{ inventory_hostname }}" diff --git a/automation/roles/update/tasks/system.yml b/automation/roles/update/tasks/system.yml new file mode 100644 index 000000000..f50e6eddb --- /dev/null +++ b/automation/roles/update/tasks/system.yml @@ -0,0 +1,81 @@ +--- +- name: Update dnf cache + ansible.builtin.shell: dnf clean all && dnf -y makecache + args: + executable: /bin/bash + ignore_errors: true + when: ansible_os_family == "RedHat" + +- name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + ignore_errors: true + when: ansible_os_family == "Debian" + +- name: Update all system packages + ansible.builtin.package: + name: "*" + state: latest + register: update_system_packages_debian + until: update_system_packages_debian is success + delay: 5 + retries: 3 + ignore_errors: true + when: ansible_os_family == "Debian" + +- name: Update all system packages + ansible.builtin.dnf: + name: "*" + state: latest + disablerepo: "pgdg*" + register: update_system_packages_rhel + until: update_system_packages_rhel is success + delay: 5 + retries: 3 + ignore_errors: true + when: ansible_os_family == "RedHat" + +# Reboot (if 'reboot_host_after_update' is 'true') +- name: Check if a reboot is required + ansible.builtin.stat: + path: /var/run/reboot-required + register: reboot_required_debian + failed_when: false + changed_when: false + when: + - ansible_os_family == "Debian" + - ansible_virtualization_type not in ['container', 'docker', 'lxc', 'podman'] # exclude for containers to prevent test failures in CI. + - reboot_host_after_update | bool + +- name: Check if a reboot is required + ansible.builtin.command: needs-restarting -r + register: reboot_required_rhel + failed_when: false + changed_when: false + when: + - ansible_os_family == "RedHat" + - ansible_virtualization_type not in ['container', 'docker', 'lxc', 'podman'] # exclude for containers to prevent test failures in CI. + - reboot_host_after_update | bool + +- name: Rebooting host + ansible.builtin.reboot: + msg: "Reboot initiated by Ansible due to required system updates" + reboot_timeout: "{{ reboot_host_timeout | int }}" + test_command: uptime + register: reboot_result + when: (reboot_required_debian.stat.exists is defined and reboot_required_debian.stat.exists) or + (reboot_required_rhel.rc is defined and reboot_required_rhel.rc != 0) + +# Set flag if any update failed +- name: "Set variable: update_system_failed" + ansible.builtin.set_fact: + update_system_failed: true + when: > + (update_system_packages_debian is defined and update_system_packages_debian is failed) or + (update_system_packages_rhel is defined and update_system_packages_rhel is failed) or + (update_system_packages_rhel7 is defined and update_system_packages_rhel7 is failed) diff --git a/automation/roles/update/tasks/update_extensions.yml b/automation/roles/update/tasks/update_extensions.yml new file mode 100644 index 000000000..a8b5cf920 --- /dev/null +++ b/automation/roles/update/tasks/update_extensions.yml @@ -0,0 +1,67 @@ +--- +- name: Get a list of old PostgreSQL extensions + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d {{ pg_target_dbname }} -tAXc + "select extname from pg_extension e + join pg_available_extensions ae on extname = ae.name + where installed_version <> default_version" + register: pg_old_extensions + changed_when: false + when: + - patroni_leader_result.status == 200 + +# if there are no old extensions +- name: The extensions are up-to-date + ansible.builtin.debug: + msg: + - "The extension versions are up-to-date for the database {{ pg_target_dbname }}" + - "No update is required." + when: pg_old_extensions.stdout_lines | length < 1 + +# excluding: 'pg_repack' (is exists), as it requires re-creation to update +- name: Update old PostgreSQL extensions + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d {{ pg_target_dbname }} -tAXc + "ALTER EXTENSION {{ item }} UPDATE" + ignore_errors: true + loop: "{{ pg_old_extensions.stdout_lines | reject('match', '^pg_repack$') | list }}" + register: pg_old_extensions_update_result + when: + - patroni_leader_result.status == 200 + - (pg_old_extensions.stdout_lines | length > 0 + and not 'pg_stat_kcache' in pg_old_extensions.stdout_lines) + +# if pg_stat_kcache is exists +- block: + # excluding: 'pg_stat_statements', because extension pg_stat_kcache depends on it (will be re-created) + - name: Update old PostgreSQL extensions + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d {{ pg_target_dbname }} -tAXc + "ALTER EXTENSION {{ item }} UPDATE" + ignore_errors: true + loop: "{{ pg_old_extensions.stdout_lines | reject('match', '^(pg_repack|pg_stat_statements|pg_stat_kcache)$') | list }}" + register: pg_old_extensions_update_result + + # re-create 'pg_stat_statements' and 'pg_stat_kcache' if an update is required + - name: Recreate old pg_stat_statements and pg_stat_kcache extensions to update + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d {{ pg_target_dbname }} -tAXc + "DROP EXTENSION pg_stat_statements CASCADE; + CREATE EXTENSION pg_stat_statements; + CREATE EXTENSION pg_stat_kcache" + when: + - patroni_leader_result.status == 200 + - pg_old_extensions.stdout_lines | length > 0 + - ('pg_stat_statements' in pg_old_extensions.stdout_lines or + 'pg_stat_kcache' in pg_old_extensions.stdout_lines) + +# re-create the 'pg_repack' if it exists and an update is required +- name: Recreate old pg_repack extension to update + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d {{ pg_target_dbname }} -tAXc + "DROP EXTENSION pg_repack; + CREATE EXTENSION pg_repack;" + when: + - patroni_leader_result.status == 200 + - (pg_old_extensions.stdout_lines | length > 0 + and 'pg_repack' in pg_old_extensions.stdout_lines) diff --git a/automation/roles/update/vars/main.yml b/automation/roles/update/vars/main.yml new file mode 100644 index 000000000..fed562144 --- /dev/null +++ b/automation/roles/update/vars/main.yml @@ -0,0 +1,13 @@ +--- +target: postgres # Defines the target for the update. Available values: 'postgres', 'patroni', 'system' + +update_extensions: true # Attempt will be made to automatically update all PostgreSQL extensions in all databases. + +# if target=system +reboot_host_after_update: true # Restart the server if it is required after the update. +reboot_host_timeout: 1800 # Maximum seconds to wait for machine to reboot and respond to a test command. +reboot_host_post_delay: 5 # The waiting time (in minutes) for the caches to warm up after restarting the server before updating the next server. + +# pre-checks vars +max_replication_lag_bytes: 10485760 # (10 MiB) Determines the size of the replication lag above which the update will not be performed. +max_transaction_sec: 15 # (seconds) Determines the maximum transaction time, in the presence of which the update will not be performed. diff --git a/automation/roles/upgrade/README.md b/automation/roles/upgrade/README.md new file mode 100644 index 000000000..1e4e3e32c --- /dev/null +++ b/automation/roles/upgrade/README.md @@ -0,0 +1,337 @@ +## PostgreSQL in-place major upgrade + +This role is designed for in-place major upgrades of PostgreSQL (e.g., from version 15 to 16). + +#### Compatibility + +The upgrade is supported starting from PostgreSQL 9.3 and up to the latest PostgreSQL version. + +#### Requirements + +There is no need to plan additional disk space, because when upgrading PostgreSQL using hard links instead of copying files. However, it is required that the `pg_old_datadir` and `pg_new_datadir` are located within the same top-level directory (`pg_upper_datadir` variable). + +Specify the current (old) version of PostgreSQL in the `pg_old_version` variable and target version of PostgreSQL for the upgrade in the `pg_new_version` variable. + +#### Recommendations + +1. Before upgrading to a new major version, it's recommended to update PostgreSQL and its extensions. Additionally, consider updating Patroni and the entire system. + + To achieve this, use the `update_pgcluster.yml` playbook. More details can be found [here](../update/README.md). + +2. Before moving forward, execute preliminary checks to ensure that your database schema is compatible with the upcoming PostgreSQL version and that the cluster is ready for the upgrade. + + To do this, run the `pg_upgrade.yml` playbook using the tags '`pre-checks,upgrade-check`'. + + If any errors arise, such as schema object incompatibilities, resolve these issues and repeat the checks. + + Once the playbook completes the pre-checks without any errors, you should see the following messages in the Ansible log: + + - "`The database schema is compatible with PostgreSQL `" + - "`Clusters are compatible`" + + Upon seeing these messages, proceed to run the playbook without any tags to initiate the upgrade. + +### Upgrade + +```bash +ansible-playbook pg_upgrade.yml -e "pg_old_version=14 pg_new_version=15" +``` + +#### Database Downtime Considerations + +To minimize or even eliminate errors during database upgrades (depending on the workload and timeouts), we pause the PgBouncer pools. From an application's perspective, this does not result in terminated database connections. Instead, applications might experience a temporary increase in query latency while the PgBouncer pools are paused. + +On average, the PgBouncer pause duration is approximately 30 seconds. However, for larger databases, this pause might be extended due to longer `pg_upgrade` and `rsync` procedures. The default maximum wait time for a request during a pause is set to 2 minutes (controlled by the `query_wait_timeout` pgbouncer parameter). If the pause exceeds this duration, connections will be terminated with a timeout error. + +### Rollback + +This playbook performs a rollback of a PostgreSQL upgrade. + +Note: In some scenarios, if errors occur, the pg_upgrade.yml playbook may automatically initiate a rollback. Alternatively, if the automatic rollback does not occur, you can manually execute the pg_upgrade_rollback.yml playbook to revert the changes. + +```bash +ansible-playbook pg_upgrade_rollback.yml +``` + +It's designed to be used when a PostgreSQL upgrade hasn't been fully completed and the new version hasn't been started. +The rollback operation is performed by starting the Patroni cluster with the old version of PostgreSQL using the same PGDATA. +The playbook first checks the health of the current cluster, verifies the version of PostgreSQL, and ensures the new PostgreSQL is not running. +If these checks pass, the playbook switches back to the old PostgreSQL paths and restarts the Patroni service. + +### Variables + +| Variable Name | Description | Default Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------: | +| `pg_old_version` | Current (old) version of PostgreSQL. | `""` | +| `pg_new_version` | Target version of PostgreSQL for the upgrade. | `""` | +| `pg_old_bindir` | Directory containing binaries for the old PostgreSQL version. | Derived value | +| `pg_old_datadir` | Data directory path for the old PostgreSQL version. | Derived value | +| `pg_old_confdir` | Configuration directory path for the old PostgreSQL version. | Derived value | +| `pg_new_bindir` | Directory containing binaries for the new PostgreSQL version. | Derived value | +| `pg_new_datadir` | Data directory path for the new PostgreSQL version. | Derived value | +| `pg_new_confdir` | Configuration directory path for the new PostgreSQL version. | Derived value | +| `pg_new_wal_dir` | Custom WAL directory for the new PostgreSQL version. | Derived value | +| `pg_upper_datadir` | Top-level directory containing both old and new PostgreSQL data directories. | Derived value | +| `pg_new_packages` | List of package names for the new PostgreSQL version to be installed. | Derived value | +| `pg_old_packages_remove` | Whether to remove old PostgreSQL packages after the upgrade. | `true` | +| `pg_start_stop_timeout` | Timeout when starting/stopping PostgreSQL during the upgrade (in seconds). | `1800` | +| `schema_compatibility_check` | Check database schema compatibility with the new PostgreSQL version before upgrading. | `true` | +| `schema_compatibility_check_port` | Port for temporary PostgreSQL instance for schema compatibility checking. | Derived value | +| `schema_compatibility_check_timeout` | Max duration for compatibility check (pg_dumpall --schema-only) in seconds. | `3600` | +| `vacuumdb_parallel_jobs` | Execute the analyze command in parallel by running `njobs` commands simultaneously. This option may reduce the processing time but it also increases the load on the database server. | all CPU cores | +| `vacuumdb_analyze_timeout` | Max duration of analyze command in seconds. | `3600` | +| `vacuumdb_analyze_terminate_treshold` | Terminate active queries that are longer than the specified time (in seconds) during the collection of statistics (0 = do not terminate active backends). | `0` | +| `update_extensions` | Automatically update all PostgreSQL extensions. | `true` | +| `max_replication_lag_bytes` | Maximum allowed replication lag in bytes. | `10485760` | +| `max_transaction_sec` | Maximum allowed duration for a transaction in seconds. | `15` | +| `copy_files_to_all_server` | Copy files located in the "files" directory to all servers. (optional) | `[]` | +| `pgbouncer_pool_pause` | Pause pgbouncer pools during upgrade. | `true` | +| `pgbouncer_pool_pause_timeout` | The maximum waiting time (in seconds) for the pool to be paused. For each iteration of the loop when trying to pause all pools. | `2` | +| `pgbouncer_pool_pause_terminate_after` | Time in seconds after which script terminates slow active queries. | `30` | +| `pgbouncer_pool_pause_stop_after` | Time in seconds after which the script exits with an error if unable to pause all pgbouncer pools. | `60` | +| `pg_slow_active_query_treshold` | Time in milliseconds to wait for active queries before trying to pause the pool. | `1000` | +| `pg_slow_active_query_treshold_to_terminate` | Time in milliseconds after reaching "pgbouncer_pool_pause_terminate_after" before the script terminates active queries. | `100` | +| `pgbackrest_stanza_upgrade` | Perform the "stanza-upgrade" command after the upgrade (if 'pgbackrest_install' is 'true'). | `true` | + +Note: For variables marked as "Derived value", the default value is determined based on other variables. \ +Please see the [upgrade.yml](../common/defaults/upgrade.yml) variable file. + +--- + +### Upgrade Plan: + +#### 1. PRE-UPGRADE: Perform Pre-Checks + +- **Make sure that the required variables are specified** + - Notes: `pg_old_version` and `pg_new_version` variables + - Stop, if one or more required variables have empty values. +- **Make sure that the old and new data and confg directories do not match** + - Stop, if `pg_old_datadir` and `pg_new_datadir`, or `pg_old_confdir` and `pg_new_confdir` match. +- **Make sure the ansible required Python library is installed** + - Notes: Install 'pexpect' package if missing +- **Test PostgreSQL database access using a unix socket** + - if there is an error (no pg_hba.conf entry): + - Add temporary local access rule (during the upgrade) + - Update the PostgreSQL configuration +- **Check the current version of PostgreSQL** + - Stop, if the current version does not match `pg_old_version` + - Stop, if the current version greater than or equal to `pg_new_version`. No upgrade is needed. +- **Ensure new data directory is different from the current one** + - Note: This check is necessary to avoid the risk of deleting the current data directory + - Stop, if the current data directory is the same as `pg_new_datadir`. + - Stop, if the current WAL directory is the same as `pg_new_wal_dir` (if a custom wal dir is used). +- **Make sure that physical replication is active** + - Stop, if there are no active replicas +- **Make sure there is no high replication lag** + - Stop, if replication lag is high (more than `max_replication_lag_bytes`) +- **Make sure there are no long-running transactions** + - Stop, if long-running transactions detected (more than `max_transaction_sec`) +- **Make sure that SSH key-based authentication is configured between cluster nodes** + - Create and copy ssh keys between database servers (if not configured) +- **Perform Rsync Checks** + - Make sure that the rsync package are installed + - Create 'testrsync' file on Primary + - Test rsync and ssh key access + - Cleanup 'testrsync' file +- **Check if PostgreSQL tablespaces exist** + - Print tablespace location (if exists) + - Note: If tablespaces are present they will be upgraded (step 5) on replicas using rsync +- **Make sure that the 'recovery.signal' file is absent** in the data directory +- **Test PgBouncer access via unix socket** + - Ensure correct permissions for PgBouncer unix socket directory + - Test access via unix socket to be able to perform 'PAUSE' command +- **Make sure that the cluster ip address (VIP) is running** + - Notes: if 'cluster_vip' is defined + +#### 2. PRE-UPGRADE: Install new PostgreSQL packages + +- Clean yum/dnf cache (for RedHat based) or Update apt cache for (Debian based) +- Install new PostgreSQL packages +- Install TimescaleDB package for new PostgreSQL + - Note: if 'enable_timescale' is 'true' + +#### 3. PRE-UPGRADE: Initialize new db, schema compatibility check, and pg_upgrade --check + +- **Initialize new PostgreSQL** + - Make sure new PostgreSQL data directory exists + - Make sure new PostgreSQL data directory is not initialized + - If already initialized: + - Perform pg_dropcluster (for Debian based) + - Clear the new PostgreSQL data directory + - Get the current install user (rolname with oid = 10) + - Get the current encoding and data_checksums settings + - Initialize new PostgreSQL data directory + - for Debain based: on all database servers to create default config files + - for RedHat based: on the Primary only +- **Copy files specified in the `copy_files_to_all_server` variable**, [optional] + - Notes: for example, it may be necessary for Postgres Full-Text Search (FTS) files +- **Schema compatibility check** + - Get the current `shared_preload_libraries` settings + - Get the current `cron.database_name` settings + - Notes: if 'pg_cron' is defined in 'pg_shared_preload_libraries' + - Start new PostgreSQL to check the schema compatibility + - Note: on the port specified in the `schema_compatibility_check_port` variable + - Wait for PostgreSQL to start + - Check the compatibility of the database schema with the new PostgreSQL + - Notes: used `pg_dumpall` with `--schema-only` options + - Wait for the schema compatibility check to complete + - Checking the result of the schema compatibility + - Note: Checking for errors in `/tmp/pg_schema_compatibility_check.log` + - Stop, if the scheme is not compatible (there are errors) + - Print result of checking the compatibility of the scheme + - Stop new PostgreSQL to re-initdb + - Drop new PostgreSQL to re-initdb (perform pg_dropcluster for Debian based) + - Reinitialize the database after checking schema compatibility +- **Perform pg_upgrade check** + - Get the current `shared_preload_libraries` settings + - Verify the two clusters are compatible (`pg_upgrade --check`) + - Print the result of the pg_upgrade check + +#### 4. PRE-UPGRADE: Prepare the Patroni configuration + +- Backup the patroni.yml configuration file +- Edit the patroni.yml configuration file + - **Update parameters**: `data_dir`, `bin_dir`, `config_dir` + - **Prepare the PostgreSQL parameters** + - Notes: Removed or renamed parameters + - **Remove 'standby_cluster' parameter (if exists)** + - Notes: To support upgrades in the Patroni Standby Cluster +- **Copy pg_hba.conf to `pg_new_confdir`** + - Notes: to save pg_hba rules + +#### 5. UPGRADE: Upgrade PostgreSQL + +- **Enable maintenance mode for Patroni cluster** (pause) +- **Enable maintenance mode for HAProxy** (for 'Type A' scheme) + - Notes: if 'pgbouncer_install' is 'true' and 'pgbouncer_pool_pause' is 'true' + - Stop confd service + - Update haproxy conf file + - Notes: Temporarily disable http-checks in order to keep database connections after stopping the Patroni service + - Reload haproxy service +- **Enable maintenance mode for vip-manager** (for 'Type B' scheme) + - Notes: if 'pgbouncer_install' is 'true' and 'pgbouncer_pool_pause' is 'true' + - Update vip-manager service file (comment out 'ExecStopPost') + - Notes: Temporarily disable vip-manager service to keep database connections after stopping the Patroni service + - Stop vip-manager service + - Notes: This prevents the VIP from being removed when the Patroni leader is unavailable during maintenance + - Make sure that the cluster ip address (VIP) is running +- **Stop Patroni service** + - Wait until the Patroni cluster is stopped +- **Execute CHECKPOINT before stopping PostgreSQL** + - Wait for the CHECKPOINT to complete +- **Wait until replication lag is less than `max_replication_lag_bytes`** + - Notes: max wait time: 2 minutes + - Stop, if replication lag is high + - Perform rollback + - Print error message: "There's a replication lag in the PostgreSQL Cluster. Please try again later" +- **Perform PAUSE on all pgbouncers servers** + - Notes: if 'pgbouncer_install' is 'true' and 'pgbouncer_pool_pause' is 'true' + - Notes: pgbouncer pause script (details in [pgbouncer_pause.yml](tasks/pgbouncer_pause.yml)) performs the following actions: + - Waits for active queries on the database servers to complete (with a runtime more than `pg_slow_active_query_treshold`). + - If there are no active queries, sends a `PAUSE` command to each pgbouncer servers in parallel (using `xargs` and ssh connections). + - If all pgbouncer are successfully paused, the script exits with code 0 (successful). + - If active queries do not complete within 30 seconds (`pgbouncer_pool_pause_terminate_after` variable), the script terminates slow active queries (longer than `pg_slow_active_query_treshold_to_terminate`). + - If after that it is still not possible to pause the pgbouncer servers within 60 seconds (`pgbouncer_pool_pause_stop_after` variable) from the start of the script, the script exits with an error. + - Perform rollback + - Print error message: "PgBouncer pools could not be paused, please try again later." +- **Stop PostgreSQL** on the Leader and Replicas + - Check if old PostgreSQL is stopped + - Check if new PostgreSQL is stopped +- **Get 'Latest checkpoint location'** on the Leader and Replicas + - Print 'Latest checkpoint location' for the Leader and Replicas +- **Check if all 'Latest checkpoint location' values match** + - if 'Latest checkpoint location' values match + - Print info message: + - "'Latest checkpoint location' is the same on the leader and its standbys" + - if 'Latest checkpoint location' values doesn't match + - Perform rollback + - Print error message: "Latest checkpoint location' doesn't match on leader and its standbys. Please try again later" +- **Upgrade the PostgreSQL on the Primary** (using pg_upgrade --link) + - Perform rollback, if the upgrade failed + - Print the result of the pg_upgrade +- **Make sure that the new data directory are empty on the Replica** +- **Upgrade the PostgreSQL on the Replica** (using rsync --hard-links) + - Wait for the rsync to complete +- **Upgrade the PostgreSQL tablespaces on the Replica** (using rsync --hard-links) + - Notes: if tablespaces exist + - Wait for the tablespaces rsync to complete +- **Synchronize WAL directory** (if `pg_new_wal_dir` is defined) [optional] + - Make sure new pg_wal directory is not symlink + - Make sure the custom WAL directory exists and is empty + - Synchronize new pg_wal to 'pg_new_wal_dir' path + - Rename pg_wal to pg_wal_old + - Create symlink + - Remove 'pg_wal_old' directory +- **Remove existing cluster from DCS** +- **Start Patroni service on the Cluster Leader** + - Wait for Patroni port to become open on the host + - Check Patroni is healthy on the Leader +- **Perform RESUME PgBouncer pools on the Leader** + - Notes: if 'pgbouncer_install' is 'true' and 'pgbouncer_pool_pause' is 'true' +- **Start Patroni service on the Cluster Replica** + - Wait for Patroni port to become open on the host + - Check Patroni is healthy on the Replica +- **Perform RESUME PgBouncer pools on the Replica** + - Notes: if 'pgbouncer_install' is 'true' and 'pgbouncer_pool_pause' is 'true' +- **Check PostgreSQL is started and accepting connections** +- **Disable maintenance mode for HAProxy** (for 'Type A' scheme) + - Update haproxy conf file + - Notes: Enable http-checks + - Reload haproxy service + - Start confd service +- **Disable maintenance mode for vip-manager** (for 'Type B' scheme) + - Update vip-manager service file (uncomment 'ExecStopPost') + - Start vip-manager service + - Make sure that the cluster ip address (VIP) is running + +#### 6. POST-UPGRADE: Analyze a PostgreSQL database (update optimizer statistics) and Post-Upgrade tasks + +- **Run vacuumdb to analyze the PostgreSQL databases** + - Note: Uses parallel processes equal to 50% of CPU cores ('`vacuumdb_parallel_jobs`' variable) + - Note: Before collecting statistics, the 'pg_terminator' script is launched to monitor and terminate any 'ANALYZE' blockers. Once statistics collection is complete, the script is stopped. +- **Update extensions in each database** + - Get list of installed PostgreSQL extensions + - Get list of old PostgreSQL extensions + - Update old PostgreSQL extensions + - Notes: excluding: 'pg_repack' and 'pg_stat_kcache' (is exists), as it requires re-creation to update + - Recreate old pg_stat_statements and pg_stat_kcache extensions to update + - Notes: if pg_stat_kcache is installed + - Recreate old pg_repack extension to update + - Notes: if pg_repack is installed + - Notes: if there are no old extensions, print message: + - "The extension versions are up-to-date for the database. No update is required." +- **Perform Post-Checks** + - Make sure that physical replication is active + - Note: if no active replication connections found, print error message: "No active replication connections found. Please check the replication status and PostgreSQL logs." + - Create a table "test_replication" with 10000 rows on the Primary + - Wait until the PostgreSQL replica is synchronized (max wait time: 2 minutes) + - Drop a table "test_replication" + - Print the result of checking the number of records + - if the number of rows match, print info message: "The PostgreSQL Replication is OK. The number of records in the 'test_replication' table the same as the Primary." + - if the number of rows does not match, print error message: "The number of records in the 'test_replication' table does not match the Primary. Please check the replication status and PostgreSQL logs." +- **Perform Post-Upgrade tasks** + - **Ensure the current data directory is the new data directory** + - Notes: to prevent deletion the old directory if it is used + - **Delete the old PostgreSQL data directory** + - Notes: perform pg_dropcluster for Debian based + - **Delete the old PostgreSQL WAL directory** + - Notes: if 'pg_new_wal_dir' is defined + - **Remove old PostgreSQL packages** + - Notes: if 'pg_old_packages_remove' is 'true' + - **Remove temporary local access rule from pg_hba.conf** + - Notes: if it has been changed + - Update the PostgreSQL configuration + - **pgBackRest** (if 'pgbackrest_install' is 'true') + - Check pg-path option + - Update pg-path in pgbackrest.conf + - Upgrade stanza + - **WAL-G** (if 'wal_g_install' is 'true') + - Update PostgreSQL data directory path in .walg.json + - Update PostgreSQL data directory path in cron jobs + - **Wait for the analyze to complete.** + - Notes: max wait time: 1 hour ('`vacuumdb_analyze_timeout`' variable) + - **Check the Patroni cluster state** + - **Check the current PostgreSQL version** + - **Print info messages** + - List the Patroni cluster members + - Upgrade completed diff --git a/automation/roles/upgrade/tasks/checkpoint_location.yml b/automation/roles/upgrade/tasks/checkpoint_location.yml new file mode 100644 index 000000000..516ac2b6a --- /dev/null +++ b/automation/roles/upgrade/tasks/checkpoint_location.yml @@ -0,0 +1,80 @@ +--- +# This playbook performs several tasks related to PostgreSQL's "Latest checkpoint location": +# 1. Retrieves the value from the cluster leader and its replicas. +# 2. Debugs this value for both leader and replicas. +# 3. Determines if the values match across the leader and replicas, setting 'pg_checkpoint_location_match' accordingly. +# 4. If the values match across all nodes, a success message is displayed and the update procedure continues. +# 5. If there's a mismatch, the previously stopped cluster starts (rollback), and the playbook stops with an error message. + +- name: Get 'Latest checkpoint location' on the Leader + ansible.builtin.shell: | + set -o pipefail; + {{ pg_old_bindir }}/pg_controldata {{ pg_old_datadir }} | grep 'Latest checkpoint location' | awk '{print $4}' + args: + executable: /bin/bash + changed_when: false + register: pg_checkpoint_location_leader + when: + - inventory_hostname in groups['primary'] + +- name: Get 'Latest checkpoint location' on the Replicas + ansible.builtin.shell: | + set -o pipefail; + {{ pg_old_bindir }}/pg_controldata {{ pg_old_datadir }} | grep 'Latest checkpoint location' | awk '{print $4}' + args: + executable: /bin/bash + changed_when: false + register: pg_checkpoint_location_replica + when: + - inventory_hostname in groups['secondary'] + +- name: Print 'Latest checkpoint location' for the Leader + ansible.builtin.debug: + msg: "Leader's latest checkpoint location: {{ pg_checkpoint_location_leader.stdout }}" + when: + - inventory_hostname in groups['primary'] + +- name: Print 'Latest checkpoint location' for the Replica + ansible.builtin.debug: + msg: "Replica: {{ inventory_hostname }}, latest checkpoint location: {{ pg_checkpoint_location_replica.stdout }}" + when: + - inventory_hostname in groups['secondary'] + +- name: Check if all 'Latest checkpoint location' values match + ansible.builtin.set_fact: + pg_checkpoint_location_match: "{{ pg_checkpoint_location_replica.stdout == hostvars[groups['primary'][0]]['pg_checkpoint_location_leader']['stdout'] }}" + when: + - inventory_hostname in groups['secondary'] + +- name: "SUCCESS: 'Latest checkpoint location' values match on all cluster nodes" + ansible.builtin.debug: + msg: "'Latest checkpoint location' is the same on the leader and its standbys" + run_once: true + when: + # This condition retrieves the 'pg_checkpoint_location_match' value for each node in the 'secondary' group. + # The 'select' filter selects all nodes whose 'pg_checkpoint_location_match' is 'False'. + # If no such nodes exist (i.e., the length of the resulting list is less than 1), it means that the 'pg_checkpoint_location_match' is 'True' for all nodes. + - groups['secondary'] | map('extract', hostvars, 'pg_checkpoint_location_match') | select('equalto', False) | list | length < 1 + +# Stop, if 'Latest checkpoint location' doesn't match +- block: + - name: "'Latest checkpoint location' doesn't match" + ansible.builtin.debug: + msg: "'Latest checkpoint location' doesn't match on leader and its standbys" + run_once: true + + # rollback + - name: Perform rollback + ansible.builtin.include_tasks: rollback.yml + + - name: "ERROR: 'Latest checkpoint location' doesn't match" + ansible.builtin.fail: + msg: "'Latest checkpoint location' doesn't match on leader and its standbys. Please try again later" + run_once: true + when: + # This condition retrieves the 'pg_checkpoint_location_match' value for each node in the 'secondary' group. + # The 'select' filter selects all nodes whose 'pg_checkpoint_location_match' is 'False'. + # If there is at least one such node (i.e., the length of the resulting list is greater than 0), + # it means that the 'pg_checkpoint_location_match' is not 'True' for all nodes, + # and the block of tasks is executed, including cleanup and throwing an error. + - groups['secondary'] | map('extract', hostvars, 'pg_checkpoint_location_match') | select('equalto', False) | list | length > 0 diff --git a/automation/roles/upgrade/tasks/custom_wal_dir.yml b/automation/roles/upgrade/tasks/custom_wal_dir.yml new file mode 100644 index 000000000..7399a6d5d --- /dev/null +++ b/automation/roles/upgrade/tasks/custom_wal_dir.yml @@ -0,0 +1,45 @@ +--- +- name: "Make sure {{ pg_new_datadir }}/pg_wal is not symlink" + ansible.builtin.stat: + path: "{{ pg_new_datadir }}/pg_wal" + register: sym + +# Synchronize WAL`s (if wal dir is not symlink) +- block: + - name: Make sure the custom WAL directory "{{ pg_new_wal_dir }}" exists and is empty + become: true + become_user: root + ansible.builtin.file: + path: "{{ pg_new_wal_dir }}" + state: "{{ item }}" + owner: postgres + group: postgres + mode: "0700" + loop: + - absent + - directory + + - name: "Synchronize {{ pg_new_datadir }}/pg_wal to {{ pg_new_wal_dir }}" + become: true + become_user: postgres + ansible.posix.synchronize: + src: "{{ pg_new_datadir }}/pg_wal/" + dest: "{{ pg_new_wal_dir }}/" + delegate_to: "{{ inventory_hostname }}" + + - name: "Rename pg_wal to pg_wal_old" + ansible.builtin.command: mv {{ pg_new_datadir }}/pg_wal {{ pg_new_datadir }}/pg_wal_old + + - name: "Create symlink {{ pg_new_datadir }}/pg_wal -> {{ pg_new_wal_dir }}" + become: true + become_user: postgres + ansible.builtin.file: + src: "{{ pg_new_wal_dir }}" + dest: "{{ pg_new_datadir }}/pg_wal" + state: link + + - name: "Remove pg_wal_old directory" + ansible.builtin.file: + path: "{{ pg_new_datadir }}/pg_wal_old" + state: absent + when: sym.stat.exists and not sym.stat.islnk | bool diff --git a/automation/roles/upgrade/tasks/dcs_remove_cluster.yml b/automation/roles/upgrade/tasks/dcs_remove_cluster.yml new file mode 100644 index 000000000..9a848dbf1 --- /dev/null +++ b/automation/roles/upgrade/tasks/dcs_remove_cluster.yml @@ -0,0 +1,11 @@ +--- +- name: Remove existing cluster "{{ patroni_cluster_name }}" from DCS + ansible.builtin.expect: + command: "patronictl -c {{ patroni_config_file }} remove {{ patroni_cluster_name }}" + responses: + (.*)Please confirm the cluster name to remove: "{{ patroni_cluster_name }}" + (.*)"Yes I am aware": "Yes I am aware" + environment: + PATH: "{{ ansible_env.PATH }}:/usr/bin:/usr/local/bin" + when: + - inventory_hostname in groups['primary'] diff --git a/automation/roles/upgrade/tasks/extensions.yml b/automation/roles/upgrade/tasks/extensions.yml new file mode 100644 index 000000000..79a9ecc38 --- /dev/null +++ b/automation/roles/upgrade/tasks/extensions.yml @@ -0,0 +1,23 @@ +--- +- name: Get a list of databases + ansible.builtin.command: >- + {{ pg_new_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select datname from pg_catalog.pg_database where datname <> 'template0'" + register: databases_list + until: databases_list is success + delay: 5 + retries: 3 + changed_when: false + ignore_errors: true # show the error and continue the playbook execution + when: + - inventory_hostname in groups['primary'] + +- name: Update extensions in each database + ansible.builtin.include_tasks: update_extensions.yml + loop: "{{ databases_list.stdout_lines }}" + loop_control: + loop_var: pg_target_dbname + when: + - databases_list is success + - databases_list.stdout_lines is defined + - databases_list.stdout_lines | length > 0 diff --git a/automation/roles/upgrade/tasks/initdb.yml b/automation/roles/upgrade/tasks/initdb.yml new file mode 100644 index 000000000..1dc6cb76b --- /dev/null +++ b/automation/roles/upgrade/tasks/initdb.yml @@ -0,0 +1,109 @@ +--- +- name: Make sure new PostgreSQL data directory "{{ pg_new_datadir }}" exists + become: true + become_user: root + ansible.builtin.file: + path: "{{ pg_new_datadir }}" + state: directory + mode: "0700" + group: postgres + owner: postgres + +- name: Make sure new PostgreSQL data directory "{{ pg_new_datadir }}" is not initialized + ansible.builtin.stat: + path: "{{ pg_new_datadir }}/PG_VERSION" + register: pgdata_initialized + +- block: # if already initialized + - name: Perform pg_dropcluster + ansible.builtin.command: > + /usr/bin/pg_dropcluster --stop {{ pg_new_version }} {{ postgresql_cluster_name }} + failed_when: false + when: + - ansible_os_family == "Debian" + - pg_new_confdir != pg_new_datadir + + - name: Clear the new PostgreSQL data directory "{{ pg_new_datadir }}" + ansible.builtin.file: + path: "{{ pg_new_datadir }}" + state: "{{ item }}" + mode: "0700" + group: postgres + owner: postgres + loop: + - absent + - directory + when: + - pgdata_initialized.stat.exists is defined + - pgdata_initialized.stat.exists + +- name: Get the current install user + ansible.builtin.command: >- + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select rolname from pg_roles where oid = 10" + changed_when: false + register: pg_install_user + when: + - inventory_hostname in groups['primary'] + +- name: Get the current encodig and data_checksums settings + ansible.builtin.command: >- + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc " + select row_to_json(pg_settings) + from ( + select + (select datcollate from pg_database where datname = current_database()) as lc_collate, + (select datctype from pg_database where datname = current_database()) as lc_ctype, + current_setting('lc_messages') as lc_messages, + current_setting('lc_monetary') as lc_monetary, + current_setting('lc_numeric') as lc_numeric, + current_setting('lc_time') as lc_time, + current_setting('server_encoding') as server_encoding, + current_setting('data_checksums') as data_checksums + ) pg_settings" + changed_when: false + register: pg_settings + when: + - inventory_hostname in groups['primary'] + +# for Debian based use pg_createcluster, if the PostgreSQL configuration is not located in the data directory. +# Note: Patroni failure is possible if the default postgresql config files are missing in the /etc/postgresql/... +- name: Initialize new PostgreSQL data directory with default config files + ansible.builtin.command: > + /usr/bin/pg_createcluster {{ pg_new_version }} {{ postgresql_cluster_name }} + --user={{ hostvars[groups['primary'][0]].pg_install_user.stdout }} + --datadir={{ pg_new_datadir }} + --encoding={{ (hostvars[groups['primary'][0]].pg_settings.stdout | from_json).server_encoding }} + --lc-collate={{ (hostvars[groups['primary'][0]].pg_settings.stdout | from_json).lc_collate }} + --lc-ctype={{ (hostvars[groups['primary'][0]].pg_settings.stdout | from_json).lc_ctype }} + --lc-messages={{ (hostvars[groups['primary'][0]].pg_settings.stdout | from_json).lc_messages }} + --lc-monetary={{ (hostvars[groups['primary'][0]].pg_settings.stdout | from_json).lc_monetary }} + --lc-numeric={{ (hostvars[groups['primary'][0]].pg_settings.stdout | from_json).lc_numeric }} + --lc-time={{ (hostvars[groups['primary'][0]].pg_settings.stdout | from_json).lc_time }} + --start-conf=manual + {% if (hostvars[groups['primary'][0]].pg_settings.stdout | from_json).data_checksums == 'on' %} + -- --data-checksums + {% endif %} + when: + - ansible_os_family == "Debian" + - pg_new_confdir != pg_new_datadir + +# Use initdb, if the PostgreSQL configuration is located in the data directory. +- name: Initialize new PostgreSQL data directory on the Primary + ansible.builtin.command: > + {{ pg_new_bindir }}/initdb + --username={{ pg_install_user.stdout }} + --pgdata={{ pg_new_datadir }} + --encoding={{ (pg_settings.stdout | from_json).server_encoding }} + --lc-collate={{ (pg_settings.stdout | from_json).lc_collate }} + --lc-ctype={{ (pg_settings.stdout | from_json).lc_ctype }} + --lc-messages={{ (pg_settings.stdout | from_json).lc_messages }} + --lc-monetary={{ (pg_settings.stdout | from_json).lc_monetary }} + --lc-numeric={{ (pg_settings.stdout | from_json).lc_numeric }} + --lc-time={{ (pg_settings.stdout | from_json).lc_time }} + {% if (pg_settings.stdout | from_json).data_checksums == 'on' %} + --data-checksums + {% endif %} + when: + - inventory_hostname in groups['primary'] + - pg_new_confdir == pg_new_datadir diff --git a/automation/roles/upgrade/tasks/maintenance_disable.yml b/automation/roles/upgrade/tasks/maintenance_disable.yml new file mode 100644 index 000000000..1c95ed88a --- /dev/null +++ b/automation/roles/upgrade/tasks/maintenance_disable.yml @@ -0,0 +1,69 @@ +--- +# Disable maintenance mode for HAProxy (Type A scheme) +# if 'pgbouncer_install' is 'true' and 'pgbouncer_pool_pause' is 'true' +- block: + - name: Update haproxy conf file (enable http-checks) + ansible.builtin.template: + src: ../haproxy/templates/haproxy.cfg.j2 # use the haproxy role template + dest: /etc/haproxy/haproxy.cfg + owner: haproxy + group: haproxy + delegate_to: "{{ item }}" + loop: "{{ groups.balancers | default([]) | list }}" + run_once: true + + - name: Reload haproxy service + ansible.builtin.systemd: + name: haproxy + state: reloaded + delegate_to: "{{ item }}" + loop: "{{ groups.balancers | default([]) | list }}" + run_once: true + + - name: Start confd service + ansible.builtin.service: + name: confd + state: started + delegate_to: "{{ item }}" + loop: "{{ groups.balancers | default([]) | list }}" + run_once: true + when: dcs_type == "etcd" + + become: true + become_user: root + ignore_errors: true # show the error and continue the playbook execution + when: + - groups.balancers | default([]) | length > 0 + - with_haproxy_load_balancing | bool + - pgbouncer_install | bool + - pgbouncer_pool_pause | bool + +# Disable maintenance mode for vip-manager (Type B scheme) +- block: + - name: Update vip-manager service file (uncomment 'ExecStopPost') + ansible.builtin.replace: + path: /etc/systemd/system/vip-manager.service + regexp: "#ExecStopPost=/sbin/ip addr del {{ vip_manager_ip }}/{{ vip_manager_mask }} dev {{ vip_manager_iface }}" + replace: "ExecStopPost=/sbin/ip addr del {{ vip_manager_ip }}/{{ vip_manager_mask }} dev {{ vip_manager_iface }}" + + - name: Start vip-manager service + ansible.builtin.service: + name: vip-manager + daemon_reload: true + state: started + + - name: Make sure that the cluster ip address (VIP) "{{ cluster_vip }}" is running + ansible.builtin.wait_for: + host: "{{ cluster_vip }}" + port: "{{ pgbouncer_listen_port }}" + state: started + timeout: 30 + delay: 2 + become: true + become_user: root + ignore_errors: true # show the error and continue the playbook execution + when: + - not with_haproxy_load_balancing | bool + - cluster_vip | length > 0 + - pgbouncer_install | bool + - pgbouncer_pool_pause | bool diff --git a/automation/roles/upgrade/tasks/maintenance_enable.yml b/automation/roles/upgrade/tasks/maintenance_enable.yml new file mode 100644 index 000000000..7f9968d78 --- /dev/null +++ b/automation/roles/upgrade/tasks/maintenance_enable.yml @@ -0,0 +1,116 @@ +--- +# Disable auto failover – we need this to be able to stop leader before its standbys +- name: Pause Patroni cluster (enable maintenance mode) + become: true + become_user: postgres + ansible.builtin.command: "patronictl -c {{ patroni_config_file }} pause --wait {{ patroni_cluster_name }}" + environment: + PATH: "{{ ansible_env.PATH }}:/usr/bin:/usr/local/bin" + register: pause_result + failed_when: "'Cluster is already paused' not in pause_result.stderr and pause_result.rc != 0" + when: + - inventory_hostname in groups['primary'] + +# Enable maintenance mode for HAProxy (Type A scheme) +# if 'pgbouncer_install' is 'true' and 'pgbouncer_pool_pause' is 'true' +# Temporarily disable http-checks in order to keep database connections after stopping the Patroni service +# and then pause the pgbouncer pools. +- block: + - name: Gather facts from balancers + ansible.builtin.setup: + delegate_to: "{{ item }}" + delegate_facts: true + loop: "{{ groups.balancers | default([]) | list }}" + run_once: true + + - name: Stop confd service + ansible.builtin.service: + name: confd + state: stopped + delegate_to: "{{ item }}" + loop: "{{ groups.balancers | default([]) | list }}" + run_once: true + when: dcs_type == "etcd" + + - name: Update haproxy conf file (disable http-checks) + ansible.builtin.template: + src: templates/haproxy-no-http-checks.cfg.j2 + dest: /etc/haproxy/haproxy.cfg + owner: haproxy + group: haproxy + delegate_to: "{{ item }}" + loop: "{{ groups.balancers | default([]) | list }}" + run_once: true + + - name: Reload haproxy service + ansible.builtin.systemd: + name: haproxy + state: reloaded + delegate_to: "{{ item }}" + loop: "{{ groups.balancers | default([]) | list }}" + run_once: true + become: true + become_user: root + when: + - groups.balancers | default([]) | length > 0 + - with_haproxy_load_balancing | bool + - pgbouncer_install | bool + - pgbouncer_pool_pause | bool + +# Enable maintenance mode for vip-manager (Type B scheme) +# if 'pgbouncer_install' is 'true' and 'pgbouncer_pool_pause' is 'true' +# Temporarily disable vip-manager service to keep database connections after stopping the Patroni service +# and then pause the pgbouncer pools. +# This prevents the VIP from being removed when the Patroni leader is unavailable during maintenance. +- block: + - name: Update vip-manager service file (comment out 'ExecStopPost') + ansible.builtin.replace: + path: /etc/systemd/system/vip-manager.service + regexp: "ExecStopPost=/sbin/ip addr del {{ vip_manager_ip }}/{{ vip_manager_mask }} dev {{ vip_manager_iface }}" + replace: "#ExecStopPost=/sbin/ip addr del {{ vip_manager_ip }}/{{ vip_manager_mask }} dev {{ vip_manager_iface }}" + + - name: Stop vip-manager service + ansible.builtin.service: + name: vip-manager + daemon_reload: true + state: stopped + + - name: Make sure that the cluster ip address (VIP) "{{ cluster_vip }}" is running + ansible.builtin.wait_for: + host: "{{ cluster_vip }}" + port: "{{ pgbouncer_listen_port }}" + state: started + timeout: 30 + delay: 2 + become: true + become_user: root + when: + - not with_haproxy_load_balancing | bool + - cluster_vip | length > 0 + - pgbouncer_install | bool + - pgbouncer_pool_pause | bool + +# Stop Patroni +- name: Stop Patroni service + become: true + become_user: root + ansible.builtin.service: + name: patroni + state: stopped + +- name: Wait until the Patroni cluster is stopped + ansible.builtin.shell: | + set -o pipefail; + patronictl -c {{ patroni_config_file }} list -f json | grep -cv '^\[\]$' + args: + executable: /bin/bash + environment: + PATH: "{{ ansible_env.PATH }}:/usr/bin:/usr/local/bin" + register: patronictl_result + until: patronictl_result.stdout|int == 0 + retries: 30 # max duration 5 minutes + delay: 10 + changed_when: false + failed_when: false + when: + - inventory_hostname in groups['primary'] diff --git a/automation/roles/upgrade/tasks/packages.yml b/automation/roles/upgrade/tasks/packages.yml new file mode 100644 index 000000000..f74001797 --- /dev/null +++ b/automation/roles/upgrade/tasks/packages.yml @@ -0,0 +1,39 @@ +--- +# Update dnf cache +- name: Update dnf cache + ansible.builtin.shell: dnf clean all && dnf -y makecache + args: + executable: /bin/bash + register: dnf_status + until: dnf_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "RedHat" + +# Update apt cache +- name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "Debian" + +# Install PostgreSQL packages +- name: "Install PostgreSQL {{ pg_new_version }} packages" + ansible.builtin.package: + name: "{{ item }}" + state: latest + loop: "{{ pg_new_packages }}" + register: package_status + until: package_status is success + delay: 5 + retries: 3 + +# Install Extensions packages +- name: "Install Extensions packages for PostgreSQL {{ pg_new_version }}" + ansible.builtin.import_tasks: "../../packages/tasks/extensions.yml" + vars: + pg_version: "{{ pg_new_version }}" diff --git a/automation/roles/upgrade/tasks/pgbouncer_pause.yml b/automation/roles/upgrade/tasks/pgbouncer_pause.yml new file mode 100644 index 000000000..7d7b8b793 --- /dev/null +++ b/automation/roles/upgrade/tasks/pgbouncer_pause.yml @@ -0,0 +1,139 @@ +# yamllint disable rule:line-length +--- +# Perform PAUSE in parallel on all pgbouncers servers +# +# This script performs the following actions: +# 1. Waits for active queries on the database servers to complete (with a runtime more than 'pg_slow_active_query_treshold'). +# 2. If there are no active queries, sends a PAUSE command to each pgbouncer server in the pgb_servers list (in parallel to all servers). +# 3. If all pgbouncer are successfully paused, the script exits. +# 4. If active queries do not complete within 30 seconds, the script forcibly terminates slow active queries using pg_slow_active_terminate_query. +# 5. If after that it is still not possible to pause the pgbouncer servers within 60 seconds from the start of the script, the script exits with an error. +# +# The script uses the 'pause_results' array to track the results of executing the PAUSE command on each pgbouncer server. +# The 'timeout 2' command is used to set a timeout for the execution of the 'pgb_pause_command'. +# If the execution of the 'pgb_pause_command' does not finish within 2 seconds, +# the timeout command will interrupt the execution of 'pgb_resume_command' and execute the pgb_resume_query command to remove the pause and ensure atomicity. +# +# Finally, the script checks whether all servers have been successfully paused by comparing the number of successful PAUSE executions to the total number of pgbouncer servers. + +- name: Ensure correct permissions for PgBouncer unix socket directory + become: true + become_user: root + ansible.builtin.file: + path: "/var/run/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }}" + state: directory + owner: postgres + group: postgres + mode: "0755" + loop: "{{ range(0, (pgbouncer_processes | default(1) | int)) | list }}" + loop_control: + index_var: idx + label: "{{ 'pgbouncer' if idx == 0 else 'pgbouncer-%d' % (idx + 1) }}" + +- name: PAUSE PgBouncer pools + become: true + become_user: postgres + vars: + pg_slow_active_count_query: >- + select count(*) from pg_stat_activity + where pid <> pg_backend_pid() + and state <> 'idle' + and query_start < clock_timestamp() - interval '{{ pg_slow_active_query_treshold }} ms' + {{ "and backend_type = 'client backend'" if pg_old_version | string is version('10', '>=') else '' }} + pg_slow_active_terminate_query: >- + select + clock_timestamp(), + pg_terminate_backend(pid), + clock_timestamp() - query_start as query_age, + left(regexp_replace(query, E'[ \\t\\n\\r]+', ' ', 'g'),150) as query + from pg_stat_activity + where pid <> pg_backend_pid() + and state <> 'idle' + and query_start < clock_timestamp() - interval '{{ pg_slow_active_query_treshold_to_terminate }} ms' + {{ "and backend_type = 'client backend'" if pg_old_version | string is version('10', '>=') else '' }} + pgb_unix_socket_dirs: >- + {% set unix_socket_dir = ['/var/run/pgbouncer'] %} + {%- for idx in range(1, pgbouncer_processes | default(1) | int) -%} + {% set _ = unix_socket_dir.append('/var/run/pgbouncer-' ~ (idx + 1) | string) %} + {%- endfor -%} + {{ unix_socket_dir | join(' ') }} + ansible.builtin.shell: | + set -o pipefail; + + pg_servers="{{ (groups['primary'] + groups['secondary']) | join('\n') }}" + pg_servers_count="{{ groups['primary'] | default([]) | length + groups['secondary'] | default([]) | length }}" + pg_slow_active_count_query="{{ pg_slow_active_count_query }}" + pg_slow_active_terminate_query="{{ pg_slow_active_terminate_query }}" + # it is assumed that pgbouncer is installed on database servers + pgb_servers="$pg_servers" + pgb_servers_count="$pg_servers_count" + pgb_count="{{ (groups['primary'] | default([]) | length + groups['secondary'] | default([]) | length) * (pgbouncer_processes | default(1) | int) }}" + pgb_pause_command="printf '%s\n' {{ pgb_unix_socket_dirs }} | xargs -I {} -P {{ pgbouncer_processes | default(1) | int }} -n 1 timeout {{ pgbouncer_pool_pause_timeout }} psql -h {} -p {{ pgbouncer_listen_port }} -U {{ patroni_superuser_username }} -d pgbouncer -tAXc 'PAUSE'" + pgb_resume_command='kill -SIGUSR2 $(pidof pgbouncer)' + + start_time=$(date +%s) + while true; do + current_time=$(date +%s) + # initialize pgb_paused_count to 0 (we assume that all pgbouncers are not paused) + pgb_paused_count=0 + + # wait for the active queries to complete on pg_servers + IFS=$'\n' pg_slow_active_counts=($(echo -e "$pg_servers" | xargs -I {} -P "$pg_servers_count" -n 1 ssh -o StrictHostKeyChecking=no {} "psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc \"$pg_slow_active_count_query\"")) + + # sum up all the values in the array + total_pg_slow_active_count=0 + for count in "${pg_slow_active_counts[@]}"; do + total_pg_slow_active_count=$((total_pg_slow_active_count + count)) + done + + echo "$(date): total pg_slow_active_count: $total_pg_slow_active_count" + + if [[ "$total_pg_slow_active_count" == 0 ]]; then + # pause pgbouncer on all pgb_servers. We send via ssh to all pgbouncers in parallel and collect results from all (maximum wait time 2 seconds) + IFS=$'\n' pause_results=($(echo -e "$pgb_servers" | xargs -I {} -P "$pgb_servers_count" -n 1 ssh -o StrictHostKeyChecking=no {} "$pgb_pause_command 2>&1 || true")) + echo "${pause_results[*]}" + # analyze the pause_results array to count the number of paused pgbouncers + pgb_paused_count=$(echo "${pause_results[*]}" | grep -o -e "PAUSE" -e "already suspended/paused" | wc -l) + echo "$(date): pgb_count: $pgb_count, pgb_paused: $pgb_paused_count" + fi + + # make sure that the pause is performed on all pgbouncer servers, to ensure atomicity + if [[ "$pgb_paused_count" -eq "$pgb_count" ]]; then + break # pause is performed on all pgb_servers, exit from the loop + elif [[ "$pgb_paused_count" -gt 0 && "$pgb_paused_count" -ne "$pgb_count" ]]; then + # pause is not performed on all pgb_servers, perform resume (we do not use timeout because we mast to resume all pgbouncers) + IFS=$'\n' resume_results=($(echo -e "$pgb_servers" | xargs -I {} -P "$pgb_servers_count" -n 1 ssh -o StrictHostKeyChecking=no {} "$pgb_resume_command 2>&1 || true")) + echo "${resume_results[*]}" + fi + + # after 30 seconds of waiting, terminate active sessions on pg_servers and try pausing again + if (( current_time - start_time >= {{ pgbouncer_pool_pause_terminate_after }} )); then + echo "$(date): terminate active queries" + echo -e "$pg_servers" | xargs -I {} -P "$pg_servers_count" -n 1 ssh -o StrictHostKeyChecking=no {} "psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc \"$pg_slow_active_terminate_query\"" + fi + + # if it was not possible to pause for 60 seconds, exit with an error + if (( current_time - start_time >= {{ pgbouncer_pool_pause_stop_after }} )); then + echo "$(date): it was not possible to pause (exit by timeout)" + exit 1 + fi + done > /tmp/pgbouncer_pool_pause_{{ ansible_date_time.date }}.log + args: + executable: /bin/bash + register: pgbouncer_pool_pause_result + ignore_errors: true + when: inventory_hostname in groups['primary'] + +# Stop, if it was not possible to put the pools on pause +- block: + - name: Perform rollback + ansible.builtin.include_tasks: rollback.yml + + - name: "ERROR: PgBouncer pools cannot be paused" + ansible.builtin.fail: + msg: + - "PgBouncer pools could not be paused, please try again later." + - "The log is available on the path: /tmp/pgbouncer_pool_pause_{{ ansible_date_time.date }}.log" + - "on the {{ hostvars[groups['primary'][0]]['ansible_hostname'] }} server." + run_once: true + when: hostvars[groups['primary'][0]].pgbouncer_pool_pause_result is failed diff --git a/automation/roles/upgrade/tasks/pgbouncer_resume.yml b/automation/roles/upgrade/tasks/pgbouncer_resume.yml new file mode 100644 index 000000000..0de7bd407 --- /dev/null +++ b/automation/roles/upgrade/tasks/pgbouncer_resume.yml @@ -0,0 +1,9 @@ +--- +# Perform RESUME pgbouncers server +- name: RESUME PgBouncer pools + become: true + become_user: postgres + ansible.builtin.shell: kill -SIGUSR2 $(pidof pgbouncer) + args: + executable: /bin/bash + ignore_errors: true # if there is an error, show the message and continue diff --git a/automation/roles/upgrade/tasks/post_checks.yml b/automation/roles/upgrade/tasks/post_checks.yml new file mode 100644 index 000000000..f1396d737 --- /dev/null +++ b/automation/roles/upgrade/tasks/post_checks.yml @@ -0,0 +1,85 @@ +--- +- name: Make sure that physical replication is active + ansible.builtin.command: >- + {{ pg_new_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select count(*) from pg_stat_replication + where application_name != 'pg_basebackup'" + register: pg_replication_state + until: pg_replication_state.stdout | int > 0 + retries: 30 # max wait time: 1 minute + delay: 2 + changed_when: false + failed_when: false + when: + - inventory_hostname in groups['primary'] + +# Error, if no active replication connections found. +- name: "Post-Check error. No active replication connections found." + ansible.builtin.debug: + msg: + - "No active replication connections found." + - "Please check the replication status and PostgreSQL logs." + failed_when: pg_replication_state.stdout | int == 0 + ignore_errors: true # show the error and continue the playbook execution + when: + - inventory_hostname in groups['primary'] + - pg_replication_state.stdout | int == 0 + +- name: Create a table "test_replication" with 10000 rows on the Primary + ansible.builtin.command: >- + {{ pg_new_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "drop table IF EXISTS test_replication; + create table test_replication as select generate_series(1, 10000)" + register: create_table_result + until: create_table_result is success + delay: 5 + retries: 3 + ignore_errors: true # show the error and continue the playbook execution + when: + - inventory_hostname in groups['primary'] + +- name: Wait until the PostgreSQL replica is synchronized + ansible.builtin.command: >- + {{ pg_new_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select count(*) from test_replication" + register: count_test + until: count_test.stdout | int == 10000 + retries: 60 # max wait time: 2 minutes + delay: 2 + changed_when: false + failed_when: false + when: + - inventory_hostname in groups['secondary'] + - create_table_result is success + +- name: Drop a table "test_replication" + ansible.builtin.command: >- + {{ pg_new_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "drop table IF EXISTS test_replication" + when: + - inventory_hostname in groups['primary'] + - create_table_result is success + +- name: Print the result of checking the number of records + ansible.builtin.debug: + msg: + - "The PostgreSQL Replication is OK for replica {{ ansible_hostname }}" + - "The number of records in the test_replication table the same as the Primary ({{ count_test.stdout }} rows)" + when: + - inventory_hostname in groups['secondary'] + - count_test.stdout is defined + - count_test.stdout | int == 10000 + +# Error, if the number of records in the "test_replication" table does not match the Primary. +- name: "Post-Check error. The number of records does not match" + ansible.builtin.debug: + msg: + - "The PostgreSQL Replication is NOT OK for replica {{ ansible_hostname }}" + - "The number of records in the test_replication table does not match the Primary ({{ count_test.stdout }} rows)." + - "Please check the replication status and PostgreSQL logs." + failed_when: count_test.stdout | int != 10000 + ignore_errors: true # show the error and continue the playbook execution + when: + - inventory_hostname in groups['secondary'] + - count_test.stdout is defined + - count_test.stdout | int != 10000 diff --git a/automation/roles/upgrade/tasks/post_upgrade.yml b/automation/roles/upgrade/tasks/post_upgrade.yml new file mode 100644 index 000000000..9be08909f --- /dev/null +++ b/automation/roles/upgrade/tasks/post_upgrade.yml @@ -0,0 +1,239 @@ +--- +- name: Ensure the current data directory is the new data directory + ansible.builtin.command: >- + {{ pg_new_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "show data_directory" + register: pg_current_datadir + until: pg_current_datadir is success + delay: 5 + retries: 3 + changed_when: false + ignore_errors: true # show the error and continue the playbook execution + +# RedHat based +- name: Delete the old PostgreSQL data directory + ansible.builtin.file: + path: "{{ pg_old_datadir }}" + state: absent + when: + - pg_current_datadir is success + - pg_new_datadir == pg_current_datadir.stdout | trim + - ansible_os_family == "RedHat" + +# Debian based (use pg_dropcluster) +- name: Delete the old PostgreSQL data directory (perform pg_dropcluster) + ansible.builtin.command: > + /usr/bin/pg_dropcluster {{ pg_old_version }} {{ postgresql_cluster_name }} + failed_when: false + when: + - pg_current_datadir is success + - pg_new_datadir == pg_current_datadir.stdout | trim + - ansible_os_family == "Debian" + +# if pg_new_wal_dir is defined +- name: Delete the old PostgreSQL WAL directory + ansible.builtin.file: + path: "{{ postgresql_wal_dir | regex_replace('(/$)', '') | replace(postgresql_version | string, pg_old_version | string) }}" + state: absent + when: + - pg_current_datadir is success + - postgresql_wal_dir | length > 0 + - pg_new_wal_dir | length > 0 + +# RedHat based +- name: Remove old PostgreSQL packages + become: true + become_user: root + ansible.builtin.package: + name: "{{ item }}" + state: absent + loop: "{{ postgresql_packages | replace(postgresql_version | string, pg_old_version | string) }}" + register: package_remove + until: package_remove is success + delay: 5 + retries: 3 + ignore_errors: true # show the error and continue the playbook execution + when: + - item | string is search(pg_old_version | string) + - pg_old_packages_remove | bool + - ansible_os_family == "RedHat" + +# Debian based (use purge option) +- name: Remove old PostgreSQL packages + become: true + become_user: root + ansible.builtin.apt: + name: "{{ item }}" + state: absent + purge: true + loop: "{{ postgresql_packages | replace(postgresql_version | string, pg_old_version | string) }}" + register: apt_remove + until: apt_remove is success + delay: 5 + retries: 3 + ignore_errors: true # show the error and continue the playbook execution + when: + - item | string is search(pg_old_version | string) + - pg_old_packages_remove | bool + - ansible_os_family == "Debian" + +# Return the pg_hba.conf file to its original state (if it has been changed) +- block: + - name: Remove temporary local access rule from pg_hba.conf + ansible.builtin.blockinfile: + path: "{{ pg_new_confdir }}/pg_hba.conf" + marker: "# {mark} ANSIBLE TEMPORARY pg_upgrade RULE" + state: absent + + - name: Update the PostgreSQL configuration + ansible.builtin.command: "{{ pg_new_bindir }}/pg_ctl reload -D {{ pg_new_datadir }}" + ignore_errors: true # show the error and continue the playbook execution + when: + - socket_access_result.stderr is defined + - "'no pg_hba.conf entry' in socket_access_result.stderr" + +# pgBackRest (local) +- block: + - name: pgbackrest | Check pg-path option + ansible.builtin.command: "grep -c '^pg[0-9]*-path=' {{ pgbackrest_conf_file }}" + register: pg_path_count + changed_when: false + + - name: pgbackrest | Update pg-path in pgbackrest.conf + ansible.builtin.replace: + path: "{{ pgbackrest_conf_file }}" + regexp: "^pg{{ idx + 1 }}-path=.*$" + replace: "pg{{ idx + 1 }}-path={{ pg_new_datadir }}" + loop: "{{ range(0, pg_path_count.stdout | int) | list }}" + loop_control: + index_var: idx + label: "pg{{ idx + 1 }}-path={{ pg_new_datadir }}" + when: pg_path_count.stdout | int > 0 + + - name: pgbackrest | Upgrade stanza "{{ pgbackrest_stanza }}" + ansible.builtin.command: "pgbackrest --stanza={{ pgbackrest_stanza }} --no-online stanza-upgrade" + when: pg_path_count.stdout | int > 0 and pgbackrest_stanza_upgrade | bool and pgbackrest_repo_host | length < 1 + become: true + become_user: postgres + ignore_errors: true # show the error and continue the playbook execution + when: + - pgbackrest_install | bool + +# pgBackRest (dedicated) +- block: + - name: pgbackrest | Check pg-path option + delegate_to: "{{ groups['pgbackrest'][0] }}" + run_once: true + ansible.builtin.command: "grep -c '^pg[0-9]*-path=' {{ pgbackrest_conf_file | dirname }}/conf.d/{{ pgbackrest_stanza }}.conf" + register: pg_path_count + changed_when: false + + - name: pgbackrest | Update pg-path in pgbackrest.conf + delegate_to: "{{ groups['pgbackrest'][0] }}" + run_once: true + ansible.builtin.replace: + path: "{{ pgbackrest_conf_file | dirname }}/conf.d/{{ pgbackrest_stanza }}.conf" + regexp: "^pg{{ idx + 1 }}-path=.*$" + replace: "pg{{ idx + 1 }}-path={{ pg_new_datadir }}" + loop: "{{ range(0, pg_path_count.stdout | int) | list }}" + loop_control: + index_var: idx + label: "pg{{ idx + 1 }}-path={{ pg_new_datadir }}" + when: pg_path_count.stdout | int > 0 + + - name: pgbackrest | Upgrade stanza "{{ pgbackrest_stanza }}" + delegate_to: "{{ groups['pgbackrest'][0] }}" + run_once: true + ansible.builtin.command: "pgbackrest --stanza={{ pgbackrest_stanza }} --no-online stanza-upgrade" + when: pg_path_count.stdout | int > 0 and pgbackrest_stanza_upgrade | bool + become: true + become_user: "{{ pgbackrest_repo_user }}" + ignore_errors: true # show the error and continue the playbook execution + when: + - pgbackrest_install | bool + - pgbackrest_repo_host | length > 0 + +# WAL-G +- block: + - name: "WAL-G | Update PostgreSQL data directory path in .walg.json" + ansible.builtin.replace: + path: "{{ postgresql_home_dir }}/.walg.json" + regexp: "{{ postgresql_data_dir | replace(postgresql_version | string, pg_old_version | string) }}" + replace: "{{ postgresql_data_dir | replace(postgresql_version | string, pg_new_version | string) }}" + + - name: "WAL-G | Update PostgreSQL data directory path in cron jobs" + ansible.builtin.replace: + path: "{{ wal_g_cron_jobs[0].file | default('/etc/cron.d/walg') }}" + regexp: "{{ postgresql_data_dir | replace(postgresql_version | string, pg_old_version | string) }}" + replace: "{{ postgresql_data_dir | replace(postgresql_version | string, pg_new_version | string) }}" + become: true + become_user: root + ignore_errors: true # show the error and continue the playbook execution + when: wal_g_install | bool + +# Wait for the analyze to complete +- name: "Collecting statistics in progress. Wait for the analyze to complete." + ansible.builtin.async_status: + jid: "{{ vacuumdb_analyze.ansible_job_id }}" + register: vacuumdb_analyze_job_result + until: vacuumdb_analyze_job_result.finished + retries: "{{ (vacuumdb_analyze_timeout | int) // 10 }}" # max wait time + delay: 10 + ignore_errors: true # ignore errors if the task runs for over an vacuumdb_analyze_timeout + when: + - vacuumdb_analyze is defined + - vacuumdb_analyze.ansible_job_id is defined + +- name: "Stop pg_terminator script" + ansible.builtin.shell: | + while read pid; do + if ps -p $pid > /dev/null 2>&1; then + echo "Stopping pg_terminator with pid: $pid" >> /tmp/pg_terminator.log + kill -9 $pid + else + echo "No process found for pid: $pid" >> /tmp/pg_terminator.log + fi + done < /tmp/pg_terminator.pid + args: + executable: /bin/bash + ignore_errors: true # show the error and continue the playbook execution + when: (pg_terminator_analyze is defined and pg_terminator_analyze is changed) or + (pg_terminator_long_transactions is defined and pg_terminator_long_transactions is changed) + +# finish (info) +- name: Check the Patroni cluster state + run_once: true + become: true + become_user: postgres + ansible.builtin.command: "patronictl -c {{ patroni_config_file }} list" + register: patronictl_result + changed_when: false + environment: + PATH: "{{ ansible_env.PATH }}:/usr/bin:/usr/local/bin" + when: inventory_hostname in groups['primary'] + +- name: Check the current PostgreSQL version + run_once: true + ansible.builtin.command: >- + {{ pg_new_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select current_setting('server_version')" + register: postgres_version + until: postgres_version is success + delay: 5 + retries: 3 + changed_when: false + when: inventory_hostname in groups['primary'] + +- name: List the Patroni cluster members + run_once: true + ansible.builtin.debug: + msg: "{{ patronictl_result.stdout_lines }}" + when: patronictl_result.stdout_lines is defined + +- name: Upgrade completed + run_once: true + ansible.builtin.debug: + msg: + - "PostgreSQL upgrade completed." + - "Current version: {{ postgres_version.stdout }}" + when: postgres_version.stdout is defined diff --git a/automation/roles/upgrade/tasks/pre_checks.yml b/automation/roles/upgrade/tasks/pre_checks.yml new file mode 100644 index 000000000..dd4939584 --- /dev/null +++ b/automation/roles/upgrade/tasks/pre_checks.yml @@ -0,0 +1,399 @@ +--- +# Stop, if pg_old_version, pg_new_version are not defined +- name: Make sure that the required variables are specified + run_once: true + ansible.builtin.debug: + msg: + - "One or more required variables have empty values." + - "Please specify a value for the variables: pg_old_version, pg_new_version" + failed_when: pg_old_version | string | length < 1 or pg_new_version | string | length < 1 + when: pg_old_version | string | length < 1 or pg_new_version | string | length < 1 + +# Stop, if the directories of the old and new versions are the same +- name: "Make sure that the old and new data and config directories do not match" + run_once: true + ansible.builtin.debug: + msg: + - "pg_old_datadir and pg_new_datadir, pg_old_confdir and pg_new_confdir must not be the same." + - "Please check your configuration (upgrade.yml)" + failed_when: (pg_old_datadir == pg_new_datadir) or (pg_old_confdir == pg_new_confdir) + when: (pg_old_datadir == pg_new_datadir) or (pg_old_confdir == pg_new_confdir) + +# required to perform the dcs_remove_cluster.yml +- name: "[Pre-Check] Make sure the ansible required Python library is installed" + ansible.builtin.pip: + name: "{{ item }}" + state: present + executable: pip3 + extra_args: "--trusted-host=pypi.python.org --trusted-host=pypi.org --trusted-host=files.pythonhosted.org" + umask: "0022" + loop: + - pexpect + environment: + PATH: "{{ ansible_env.PATH }}:/usr/local/bin:/usr/bin" + PIP_BREAK_SYSTEM_PACKAGES: "1" + +- name: "[Pre-Check] Test PostgreSQL database access using a unix socket" + ansible.builtin.command: > + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc 'select 1' + register: socket_access_result + changed_when: false + failed_when: "socket_access_result.rc != 0 and 'no pg_hba.conf entry' not in socket_access_result.stderr" + +# if 'no pg_hba.conf entry' +- block: + # Add a temporary local access rule for pg_upgrade to allow the upgrade process to proceed without authentication issues. + # This is necessary to ensure a smooth upgrade process and will be removed after the upgrade is complete. + - name: Add temporary local access rule (during the upgrade) + ansible.builtin.blockinfile: + path: "{{ pg_old_confdir }}/pg_hba.conf" + marker: "# {mark} ANSIBLE TEMPORARY pg_upgrade RULE" + insertbefore: BOF + content: "local all all trust" + + - name: Update the PostgreSQL configuration + ansible.builtin.command: "{{ pg_old_bindir }}/pg_ctl reload -D {{ pg_old_datadir }}" + when: + - socket_access_result.stderr is defined + - "'no pg_hba.conf entry' in socket_access_result.stderr" + +- name: "[Pre-Check] Check the current version of PostgreSQL" + ansible.builtin.command: >- + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select setting::integer/10000 from pg_settings where name = 'server_version_num'" + register: pg_current_version + changed_when: false + when: + - inventory_hostname in groups['primary'] + - pg_old_version | string is version('10', '>=') + +# for compatibility with Postgres 9.x +- name: "[Pre-Check] Check the current version of PostgreSQL" + ansible.builtin.command: >- + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select substring(setting from '^[0-9]+\.[0-9]+') from pg_settings where name = 'server_version'" + register: pg_current_version_9x + changed_when: false + when: + - inventory_hostname in groups['primary'] + - pg_old_version | string is version('10', '<') + +- name: "Set variable 'current_pg_version'" + ansible.builtin.set_fact: + current_pg_version: "{{ pg_current_version.stdout if pg_old_version | string is version('10', '>=') else pg_current_version_9x.stdout }}" + when: + - inventory_hostname in groups['primary'] + +# Stop, if the current version does not match pg_old_version +- name: "Pre-Check error. An incorrect version of PostgreSQL may have been specified" + ansible.builtin.fail: + msg: + - "The current version of PostgreSQL is {{ current_pg_version }}" + - "Make sure that you have specified the correct version in the pg_old_version variable." + when: + - inventory_hostname in groups['primary'] + - current_pg_version is not version (pg_old_version, '==') + +# Stop, if the current version greater than or equal to pg_new_version +- name: "Pre-Check error. An incorrect target version of PostgreSQL may have been specified" + ansible.builtin.fail: + msg: + - "The current version of PostgreSQL is {{ current_pg_version }}, no upgrade is needed." + - "Or, make sure that you have specified the correct version in the pg_new_version variable." + when: + - inventory_hostname in groups['primary'] + - current_pg_version is version (pg_new_version, '>=') + +# This check is necessary to avoid the risk of deleting the current data directory +# the current directory must not be equal to the path specified in the pg_new_datadir variable +# which will later be cleaned up before executing initdb for a new version of PostgreSQL +- name: "[Pre-Check] Ensure new data directory is different from the current one" + ansible.builtin.command: >- + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "show data_directory" + changed_when: false + register: pg_current_datadir + when: + - inventory_hostname in groups['primary'] + +# Stop, if the current data directory is the same as pg_new_datadir +- name: "Pre-Check error. The current data directory is the same as new data directory" + ansible.builtin.fail: + msg: + - "The new data directory ({{ pg_new_datadir }}) must be different from the current one ({{ pg_current_datadir.stdout | trim }})" + when: + - inventory_hostname in groups['primary'] + - pg_new_datadir == pg_current_datadir.stdout | trim + +# Stop, if the current WAL directory is the same as pg_new_wal_dir +- name: "Pre-Check error. The current WAL directory is the same as new WAL directory" + ansible.builtin.fail: + msg: + - "The new WAL directory ({{ pg_new_wal_dir }}) must be different from the current one ({{ postgresql_wal_dir }})" + - "Please specify a different path for the 'pg_new_wal_dir' variable." + when: + - inventory_hostname in groups['primary'] + - pg_new_wal_dir | length > 0 and pg_new_wal_dir == postgresql_wal_dir + +- name: "[Pre-Check] Make sure that physical replication is active" + ansible.builtin.command: >- + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select count(*) from pg_stat_replication where application_name != 'pg_basebackup'" + register: pg_replication_state + changed_when: false + when: + - inventory_hostname in groups['primary'] + +# Stop, if there are no active replicas +- name: "Pre-Check error. Print physical replication state" + ansible.builtin.fail: + msg: "There are no active replica servers (pg_stat_replication returned 0 entries)." + when: + - inventory_hostname in groups['primary'] + - pg_replication_state.stdout | int == 0 + +- name: "[Pre-Check] Make sure there is no high replication lag (more than {{ max_replication_lag_bytes | human_readable }})" + ansible.builtin.command: >- + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select pg_wal_lsn_diff(pg_current_wal_lsn(),replay_lsn) as pg_lag_bytes + from pg_stat_replication + order by pg_lag_bytes desc limit 1" + register: pg_lag_bytes + changed_when: false + failed_when: false + until: pg_lag_bytes.stdout|int < max_replication_lag_bytes|int + retries: 30 # 1 minute + delay: 5 + when: + - inventory_hostname in groups['primary'] + - pg_old_version | string is version('10', '>=') + +# Stop, if replication lag is high +- name: "Pre-Check error. High replication lag" + ansible.builtin.fail: + msg: + - "High replication lag ({{ pg_lag_bytes.stdout | int | human_readable }}) on the Patroni Cluster" + - "Please try again later." + when: + - pg_lag_bytes.stdout is defined + - pg_lag_bytes.stdout|int >= max_replication_lag_bytes|int + +# for compatibility with Postgres 9.x +- name: "[Pre-Check] Make sure there is no high replication lag (more than {{ max_replication_lag_bytes | human_readable }})" + ansible.builtin.command: >- + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select pg_xlog_location_diff(pg_current_xlog_location(),replay_location) as pg_lag_bytes + from pg_stat_replication + order by pg_lag_bytes desc limit 1" + register: pg_lag_bytes_9x + changed_when: false + failed_when: false + until: pg_lag_bytes_9x.stdout|int < max_replication_lag_bytes|int + retries: 30 # 1 minute + delay: 5 + when: + - inventory_hostname in groups['primary'] + - pg_old_version | string is version('10', '<') + +# Stop, if replication lag is high (for 9x) +- name: "Pre-Check error. High replication lag" + ansible.builtin.fail: + msg: + - "High replication lag ({{ pg_lag_bytes_9x.stdout | int | human_readable }}) on the Patroni Cluster" + - "Please try again later." + when: + - pg_lag_bytes_9x.stdout is defined + - pg_lag_bytes_9x.stdout|int >= max_replication_lag_bytes|int + +- name: "[Pre-Check] Make sure there are no long-running transactions (more than {{ max_transaction_sec }} seconds)" + ansible.builtin.command: >- + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select pid, usename, client_addr, clock_timestamp() - xact_start as xact_age, + state, wait_event_type ||':'|| wait_event as wait_events, + left(regexp_replace(query, E'[ \\t\\n\\r]+', ' ', 'g'),100) as query + from pg_stat_activity + where clock_timestamp() - xact_start > '{{ max_transaction_sec }} seconds'::interval + and backend_type = 'client backend' and pid <> pg_backend_pid() + order by xact_age desc limit 10" + register: pg_long_transactions + changed_when: false + failed_when: false + until: pg_long_transactions.stdout | length < 1 + retries: 30 # 1 minute + delay: 2 + when: pg_old_version | string is version('10', '>=') + +# Stop, if long-running transactions detected +- block: + - name: "Print long-running (>{{ max_transaction_sec }}s) transactions" + ansible.builtin.debug: + msg: "{{ pg_long_transactions.stdout_lines }}" + + - name: "Pre-Check error. Long-running transactions detected" + ansible.builtin.fail: + msg: long-running transactions detected (more than {{ max_transaction_sec }} seconds), please try again later. + run_once: true + when: + - pg_long_transactions.stdout is defined + - pg_long_transactions.stdout | length > 0 + +# for compatibility with Postgres 9.x +- name: "[Pre-Check] Make sure there are no long-running transactions (more than {{ max_transaction_sec }} seconds)" + ansible.builtin.command: >- + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select pid, usename, client_addr, clock_timestamp() - xact_start as xact_age, + state, left(regexp_replace(query, E'[ \\t\\n\\r]+', ' ', 'g'),100) as query + from pg_stat_activity + where clock_timestamp() - xact_start > '{{ max_transaction_sec }} seconds'::interval + order by xact_age desc limit 10" + register: pg_long_transactions_9x + changed_when: false + failed_when: false + until: pg_long_transactions_9x.stdout | length < 1 + retries: 30 # 1 minute + delay: 2 + when: pg_old_version | string is version('10', '<') + +# Stop, if long-running transactions detected (for 9x) +- block: + - name: "Print long-running (>{{ max_transaction_sec }}s) transactions" + ansible.builtin.debug: + msg: "{{ pg_long_transactions_9x.stdout_lines }}" + + - name: "Pre-Check error. Long-running transactions detected" + ansible.builtin.fail: + msg: long-running transactions detected (more than {{ max_transaction_sec }} seconds), please try again later. + run_once: true + when: + - pg_long_transactions_9x.stdout is defined + - pg_long_transactions_9x.stdout | length > 0 + +# SSH Keys (required for upgrade replicas with rsync) +- name: "[Pre-Check] Make sure that SSH key-based authentication is configured between cluster nodes" + ansible.builtin.include_tasks: ssh-keys.yml + vars: + ssh_key_user: postgres + +# if pg_new_wal_dir is defined (for synchronize wal dir) +- name: "[Pre-Check] Make sure that the sshpass package are installed" + become: true + become_user: root + ansible.builtin.package: + name: sshpass + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: pg_new_wal_dir | length > 0 + +# Rsync Checks +- name: "[Pre-Check] Make sure that the rsync package are installed" + become: true + become_user: root + ansible.builtin.package: + name: rsync + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + +- name: "[Pre-Check] Rsync Checks: create testrsync file on Primary" + become: true + become_user: postgres + ansible.builtin.file: + path: /tmp/testrsync + state: touch + when: + - inventory_hostname in groups['primary'] + +- name: "[Pre-Check] Rsync Checks: test rsync and ssh key access" + become: true + become_user: postgres + ansible.builtin.shell: > + rsync -e "ssh -o StrictHostKeyChecking=no" --archive --delete --hard-links --size-only --no-inc-recursive --omit-dir-times + /tmp/testrsync {{ item }}:/tmp/ + args: + executable: /bin/bash + loop: "{{ groups.secondary | list }}" + when: + - inventory_hostname in groups['primary'] + +- name: "[Pre-Check] Cleanup testrsync file" + become: true + become_user: postgres + ansible.builtin.file: + path: /tmp/testrsync + state: absent + +# Tablespaces +- name: "[Pre-Check] Check if PostgreSQL tablespaces exist" + ansible.builtin.command: >- + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select pg_tablespace_location(oid) as tablespace_location + from pg_tablespace + where spcname not in ('pg_default','pg_global')" + register: tablespace_location + changed_when: false + when: + - inventory_hostname in groups['primary'] + +- name: "Print tablespace location" + ansible.builtin.debug: + var: tablespace_location.stdout_lines + when: + - inventory_hostname in groups['primary'] + - tablespace_location.stdout_lines | length > 0 + +- name: Make sure that the 'recovery.signal' file is absent in the data directory + ansible.builtin.file: + path: "{{ pg_old_datadir }}/recovery.signal" + state: absent + +# PgBouncer (if 'pgbouncer_pool_pause' is 'true') +- name: Ensure correct permissions for PgBouncer unix socket directory + become: true + become_user: root + ansible.builtin.file: + path: "/var/run/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }}" + state: directory + owner: postgres + group: postgres + mode: "0755" + loop: "{{ range(0, (pgbouncer_processes | default(1) | int)) | list }}" + loop_control: + index_var: idx + label: "{{ 'pgbouncer' if idx == 0 else 'pgbouncer-%d' % (idx + 1) }}" + when: + - pgbouncer_install | bool + - pgbouncer_pool_pause | bool + +# Test access via unix socket to be able to perform 'PAUSE' command +- name: "[Pre-Check] Test PgBouncer access via unix socket" + ansible.builtin.command: >- + {{ pg_old_bindir }}/psql -h /var/run/pgbouncer{{ '-%d' % (idx + 1) if idx > 0 else '' }} + -p {{ pgbouncer_listen_port }} + -U {{ patroni_superuser_username }} + -d pgbouncer + -tAXc "SHOW POOLS" + loop: "{{ range(0, (pgbouncer_processes | default(1) | int)) | list }}" + loop_control: + index_var: idx + label: "{{ 'pgbouncer' if idx == 0 else 'pgbouncer-%d' % (idx + 1) }}" + changed_when: false + when: + - pgbouncer_install | bool + - pgbouncer_pool_pause | bool + +# Check the VIP address +- name: Make sure that the cluster ip address (VIP) "{{ cluster_vip }}" is running + ansible.builtin.wait_for: + host: "{{ cluster_vip }}" + port: "{{ pgbouncer_listen_port if pgbouncer_install | bool else postgresql_port }}" + state: started + timeout: 15 # max wait time: 30 seconds + delay: 2 + ignore_errors: true # show the error and continue the playbook execution + when: + - cluster_vip | length > 0 diff --git a/automation/roles/upgrade/tasks/rollback.yml b/automation/roles/upgrade/tasks/rollback.yml new file mode 100644 index 000000000..537b95760 --- /dev/null +++ b/automation/roles/upgrade/tasks/rollback.yml @@ -0,0 +1,228 @@ +--- +# This playbook performs a rollback of a PostgreSQL database cluster upgrade. +# It's designed to be used when a PostgreSQL upgrade hasn't been fully completed and the new version hasn't been started. +# The rollback operation is performed by starting the Patroni cluster with the old version of PostgreSQL using the same PGDATA. +# The playbook first checks the health of the current cluster, verifies the version of PostgreSQL, and ensures the new PostgreSQL is not running. +# If these checks pass, the playbook switches back to the old PostgreSQL paths and restarts the Patroni service. +# Notes: +# If pg_upgrade aborted before linking started, the old cluster was unmodified; it can be restarted. +# If you did not start the new cluster, the old cluster was unmodified except that, +# when linking started, a .old suffix was appended to $PGDATA/global/pg_control. +# To reuse the old cluster, remove the .old suffix from $PGDATA/global/pg_control; you can then restart the old cluster. +# If you did start the new cluster, it has written to shared files and it is unsafe to use the old cluster. +# The old cluster will need to be restored from backup in this case. + +# If the cluster is already healthy, the process stops to avoid unnecessary actions. +- name: "[Rollback] Check Patroni cluster state" + ansible.builtin.uri: + url: http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/cluster + return_content: true + register: patroni_cluster_result + failed_when: false + changed_when: false + environment: + no_proxy: "{{ inventory_hostname }}" + when: + - inventory_hostname in groups['primary'] + +# Stop, if the Patroni cluster is already healthy +- name: "[Rollback] Abort if the Patroni cluster is already running" + ansible.builtin.fail: + msg: "The Patroni cluster is already running. Stop rollback." + vars: + cluster_members: "{{ patroni_cluster_result.json.members | default([]) | rejectattr('state', 'equalto', 'stopped') | list | length }}" + total_nodes: "{{ groups['primary'] | length + groups['secondary'] | length }}" + when: + - inventory_hostname in groups['primary'] + # Check if the cluster members (excluding 'stopped') equals the total number of nodes + - cluster_members == total_nodes + +- name: "[Rollback] Make sure the new PostgreSQL is not running" + ansible.builtin.command: "{{ pg_new_bindir }}/pg_ctl status -D {{ pg_new_datadir }}" + register: pg_ctl_status_new_result + failed_when: false + changed_when: false + +# Stop, if new PostgreSQL is running +# "If you did start the new cluster, it has written to shared files and it is unsafe to use the old cluster." +- name: "[Rollback] Abort if the new PostgreSQL cluster is already running" + ansible.builtin.fail: + msg: + - "The PostgreSQL {{ pg_new_version }} is running on host {{ ansible_hostname }}. We can't rollback." + when: + - pg_ctl_status_new_result is defined + - pg_ctl_status_new_result.rc == 0 + +- name: Check if pg_control.old exists + ansible.builtin.stat: + path: "{{ pg_old_datadir }}/global/pg_control.old" + register: pg_control_old + +# if 'pg_control.old' exists +# "To reuse the old cluster, remove the .old suffix from $PGDATA/global/pg_control" +- name: "[Rollback] Rename pg_control.old to pg_control" + ansible.builtin.command: mv "{{ pg_old_datadir }}/global/pg_control.old" "{{ pg_old_datadir }}/global/pg_control" + when: pg_control_old.stat.exists + +- name: "[Rollback] Check PostgreSQL version in pg_control" + ansible.builtin.shell: | + set -o pipefail; + {{ pg_old_bindir }}/pg_controldata {{ pg_old_datadir }} | grep 'pg_control version number' | awk '{print substr($4, 1, 2)}' + args: + executable: /bin/bash + changed_when: false + register: pg_control_version + when: + - inventory_hostname in groups['primary'] + +# Stop, if 'pg_control version number' is equal to the new PostgreSQL version +- name: "[Rollback] Abort if the PostgreSQL version does not match expected version" + ansible.builtin.fail: + msg: + - "The version in pg_control ({{ pg_control_version.stdout }}) is not equal to the PostgreSQL {{ pg_old_version }}. We can't rollback." + - "The old cluster will need to be restored from backup." + when: + - inventory_hostname in groups['primary'] + - pg_control_version.stdout == pg_new_version | string | replace('.', '') + +# Restore the old Patroni configuration +- name: "[Rollback] Restore the old patroni.yml configuration file" + ansible.builtin.copy: + src: "{{ patroni_config_file }}.bkp" + dest: "{{ patroni_config_file }}" + owner: postgres + group: postgres + mode: "0640" + remote_src: true + +- name: "[Rollback] Ensure old PostgreSQL paths are set in patroni.yml" + ansible.builtin.replace: + path: "{{ patroni_config_file }}" + regexp: "{{ item.regexp }}" + replace: "{{ item.replace }}" + loop: + - { regexp: "data_dir: {{ pg_new_datadir }}", replace: "data_dir: {{ pg_old_datadir }}" } + - { regexp: "bin_dir: {{ pg_new_bindir }}", replace: "bin_dir: {{ pg_old_bindir }}" } + - { regexp: "config_dir: {{ pg_new_confdir }}", replace: "config_dir: {{ pg_old_confdir }}" } + loop_control: + label: "{{ item.replace }}" + +# Start Patroni cluster +- name: "[Rollback] Start Patroni service on the Cluster Leader" + become: true + become_user: root + ansible.builtin.service: + name: patroni + state: started + when: + - inventory_hostname in groups['primary'] + +- name: '[Rollback] Wait for port "{{ patroni_restapi_port }}" to become open on the host' + ansible.builtin.wait_for: + port: "{{ patroni_restapi_port }}" + host: "{{ inventory_hostname }}" + state: started + timeout: "{{ (pg_start_stop_timeout | int) // 2 }}" + delay: 2 + when: + - inventory_hostname in groups['primary'] + +- name: "[Rollback] Resume Patroni (disable maintenance mode)" + run_once: true + ansible.builtin.command: "patronictl -c {{ patroni_config_file }} resume --wait {{ patroni_cluster_name }}" + environment: + PATH: "{{ ansible_env.PATH }}:/usr/bin:/usr/local/bin" + failed_when: false + +- name: "[Rollback] Check Patroni is healthy on the Leader" + ansible.builtin.uri: + url: http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/leader + status_code: 200 + register: patroni_leader_result + until: patroni_leader_result.status == 200 + retries: "{{ (pg_start_stop_timeout | int) // 2 }}" + delay: 2 + environment: + no_proxy: "{{ inventory_hostname }}" + when: + - inventory_hostname in groups['primary'] + +- name: "[Rollback] Start Patroni service on the Cluster Replica" + become: true + become_user: root + ansible.builtin.service: + name: patroni + state: started + when: + - inventory_hostname in groups['secondary'] + +- name: '[Rollback] Wait for port "{{ patroni_restapi_port }}" to become open on the host' + ansible.builtin.wait_for: + port: "{{ patroni_restapi_port }}" + host: "{{ inventory_hostname }}" + state: started + timeout: "{{ (pg_start_stop_timeout | int) // 2 }}" + delay: 2 + when: + - inventory_hostname in groups['secondary'] + +- name: "[Rollback] Check Patroni is healthy on the Replica" + ansible.builtin.uri: + url: http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/health + status_code: 200 + register: patroni_replica_result + until: patroni_replica_result.status == 200 + retries: "{{ (pg_start_stop_timeout | int) // 2 }}" + delay: 2 + environment: + no_proxy: "{{ inventory_hostname }}" + when: + - inventory_hostname in groups['secondary'] + +# if 'pgbouncer_install' is 'true' and 'pgbouncer_pool_pause' is 'true' +- block: + - name: "[Rollback] Disable maintenance mode" + ansible.builtin.include_tasks: maintenance_disable.yml + + - name: "[Rollback] Perform RESUME PgBouncer pools (if paused)" + ansible.builtin.include_tasks: pgbouncer_resume.yml + when: + - pgbouncer_install | bool + - pgbouncer_pool_pause | bool + +- name: "[Rollback] Check PostgreSQL is started and accepting connections" + ansible.builtin.command: "{{ pg_old_bindir }}/pg_isready -p {{ postgresql_port }}" + register: pg_isready_result + until: pg_isready_result.rc == 0 + retries: 300 # max duration 10 minutes + delay: 2 + changed_when: false + +# Info +- block: + - name: Check the PostgreSQL version + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select current_setting('server_version')" + register: postgres_version + changed_when: false + + - name: Get the Patroni cluster members + become: true + become_user: postgres + ansible.builtin.command: "patronictl -c {{ patroni_config_file }} list" + register: patronictl_result + changed_when: false + environment: + PATH: "{{ ansible_env.PATH }}:/usr/bin:/usr/local/bin" + + - name: Print the Patroni cluster state + ansible.builtin.debug: + msg: "{{ patronictl_result.stdout_lines }}" + + - name: Rollback completed + ansible.builtin.debug: + msg: + - "Rollback to the old PostgreSQL is completed." + - "Current version: {{ postgres_version.stdout }}" + when: inventory_hostname in groups['primary'] diff --git a/automation/roles/upgrade/tasks/schema_compatibility.yml b/automation/roles/upgrade/tasks/schema_compatibility.yml new file mode 100644 index 000000000..70d1ad110 --- /dev/null +++ b/automation/roles/upgrade/tasks/schema_compatibility.yml @@ -0,0 +1,140 @@ +--- +- name: Get the current shared_preload_libraries settings + ansible.builtin.command: >- + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "show shared_preload_libraries" + changed_when: false + register: pg_shared_preload_libraries + when: + - inventory_hostname in groups['primary'] + +- name: Get the current cron.database_name settings + ansible.builtin.command: >- + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select current_setting('cron.database_name', true)" + changed_when: false + register: pg_cron_database_name + when: + - inventory_hostname in groups['primary'] + - "'pg_cron' in pg_shared_preload_libraries.stdout" + +- name: Check if PostgreSQL is running + ansible.builtin.command: "{{ pg_new_bindir }}/pg_ctl status -D {{ pg_new_datadir }}" + register: pg_ctl_status_result + failed_when: false + changed_when: false + when: + - inventory_hostname in groups['primary'] + +- name: "Start new PostgreSQL on port {{ schema_compatibility_check_port }} to check the schema compatibility" + ansible.builtin.command: > + {{ pg_new_bindir }}/pg_ctl -D {{ pg_new_datadir }} + -o "-p {{ schema_compatibility_check_port }} + -c unix_socket_directories='/tmp' + -c shared_preload_libraries='{{ pg_shared_preload_libraries.stdout }}' + {% if pg_cron_database_name.stdout | default('') | length > 0 %} + -c cron.database_name='{{ pg_cron_database_name.stdout }}' + {% endif %} + -c config_file='{{ pg_new_confdir }}/postgresql.conf'" + start -w -t {{ pg_start_stop_timeout }} -l /tmp/pg_tmp_start.log + async: "{{ pg_start_stop_timeout }}" # run the command asynchronously + poll: 0 + register: pg_ctl_start_result + when: + - inventory_hostname in groups['primary'] + - pg_ctl_status_result.rc != 0 + +- name: Wait for PostgreSQL to start + ansible.builtin.async_status: + jid: "{{ pg_ctl_start_result.ansible_job_id }}" + register: pg_ctl_start_job_result + until: pg_ctl_start_job_result.finished + retries: "{{ (pg_start_stop_timeout | int) // 10 }}" + delay: 10 + when: + - pg_ctl_start_result.ansible_job_id is defined + - inventory_hostname in groups['primary'] + +- name: "Check the compatibility of the database schema with the PostgreSQL {{ pg_new_version }}" + ansible.builtin.shell: | + set -o pipefail; + {{ pg_new_bindir }}/pg_dumpall \ + -h {{ postgresql_unix_socket_dir }} \ + -p {{ postgresql_port }} \ + -U {{ pg_install_user.stdout }} \ + --schema-only | {{ pg_new_bindir }}/psql \ + -U {{ pg_install_user.stdout }} \ + -d postgres \ + -h /tmp \ + -p {{ schema_compatibility_check_port }} \ + > /tmp/pg_schema_compatibility_check.log 2>&1 + args: + executable: /bin/bash + async: "{{ schema_compatibility_check_timeout }}" # run the command asynchronously + poll: 0 + register: pg_dumpall_result + when: + - inventory_hostname in groups['primary'] + +- name: Wait for the schema compatibility check to complete. + ansible.builtin.async_status: + jid: "{{ pg_dumpall_result.ansible_job_id }}" + register: pg_dumpall_job_result + until: pg_dumpall_job_result.finished + retries: "{{ (schema_compatibility_check_timeout | int) // 10 }}" + delay: 10 + when: + - inventory_hostname in groups['primary'] + +- name: Checking the result of the schema compatibility + ansible.builtin.shell: > + set -o pipefail; + grep ERROR /tmp/pg_schema_compatibility_check.log | grep -v "already exists" + args: + executable: /bin/bash + register: pg_schema_compatibility_check_result + changed_when: false + failed_when: false + when: + - inventory_hostname in groups['primary'] + +- name: "Result of checking the compatibility of the scheme - success" + ansible.builtin.debug: + msg: "The database schema are compatible with PostgreSQL {{ pg_new_version }}" + when: + - inventory_hostname in groups['primary'] + - pg_schema_compatibility_check_result.stdout | length < 1 + +# Stop, if the scheme is not compatible (there are errors) +- name: "Result of checking the compatibility of the scheme - error" + ansible.builtin.debug: + msg: + - "{{ pg_schema_compatibility_check_result.stdout_lines }}" + - "The database schema is not compatible with PostgreSQL {{ pg_new_version }}" + - "Please check the /tmp/pg_schema_compatibility_check.log on the Primary" + failed_when: true + when: + - inventory_hostname in groups['primary'] + - pg_schema_compatibility_check_result.stdout | length > 0 + +- name: Stop new PostgreSQL to re-initdb + ansible.builtin.command: > + {{ pg_new_bindir }}/pg_ctl -D {{ pg_new_datadir }} stop -w -t {{ pg_start_stop_timeout }} + when: + - inventory_hostname in groups['primary'] + - pg_new_confdir == pg_new_datadir + +# for Debian based, drop the cluster to perform re-init +- name: Drop new PostgreSQL to re-initdb (perform pg_dropcluster) + ansible.builtin.command: > + /usr/bin/pg_dropcluster --stop {{ pg_new_version }} {{ postgresql_cluster_name }} + failed_when: false + when: + - inventory_hostname in groups['primary'] + - pg_new_confdir != pg_new_datadir + - ansible_os_family == "Debian" + +- name: Reinitialize the database after checking schema compatibility + ansible.builtin.include_tasks: "{{ role_path }}/tasks/initdb.yml" + when: + - inventory_hostname in groups['primary'] diff --git a/automation/roles/upgrade/tasks/ssh-keys.yml b/automation/roles/upgrade/tasks/ssh-keys.yml new file mode 100644 index 000000000..ffb5948b9 --- /dev/null +++ b/automation/roles/upgrade/tasks/ssh-keys.yml @@ -0,0 +1,65 @@ +--- +# Configure SSH Key-Based Authentication between cluster nodes + +- name: Make sure that the openssh-client package is installed + become: true + become_user: root + ansible.builtin.package: + name: openssh-client + state: present + when: ansible_os_family == "Debian" + +- name: Make sure that the openssh-clients package is installed + become: true + become_user: root + ansible.builtin.package: + name: openssh-clients + state: present + when: ansible_os_family == "RedHat" + +- name: Make sure the SSH key for user "{{ ssh_key_user }}" exists + ansible.builtin.user: + name: "{{ ssh_key_user }}" + generate_ssh_key: true + ssh_key_file: .ssh/id_rsa + +- name: Fetch public SSH keys from database servers + ansible.builtin.fetch: + src: "~{{ ssh_key_user }}/.ssh/id_rsa.pub" + dest: "files/{{ inventory_hostname }}-id_rsa.pub" + flat: true + changed_when: false + +- name: Add public SSH keys to authorized_keys + ansible.posix.authorized_key: + user: "{{ ssh_key_user }}" + state: present + key: "{{ lookup('pipe', 'cat files/*id_rsa.pub') }}" + exclusive: false + +- name: Remove public SSH keys from localhost + run_once: true + become: false + ansible.builtin.file: + path: files/{{ item }}-id_rsa.pub + state: absent + loop: "{{ groups['postgres_cluster'] }}" + delegate_to: localhost + changed_when: false + +# known_hosts +- name: known_hosts | for each host, scan for its ssh public key + ansible.builtin.command: "ssh-keyscan -trsa -p {{ ansible_ssh_port | default(22) }} {{ item }}" + loop: "{{ groups['postgres_cluster'] }}" + register: ssh_known_host_results + changed_when: false + +- name: known_hosts | for each host, add/update the public key in the "~{{ ssh_key_user }}/.ssh/known_hosts" + become: true + become_user: "{{ ssh_key_user }}" + ansible.builtin.known_hosts: + name: "{{ item.item }}" + key: "{{ item.stdout }}" + path: "~{{ ssh_key_user }}/.ssh/known_hosts" + loop: "{{ ssh_known_host_results.results }}" + no_log: true # don't show public keys diff --git a/automation/roles/upgrade/tasks/start_services.yml b/automation/roles/upgrade/tasks/start_services.yml new file mode 100644 index 000000000..a1c494223 --- /dev/null +++ b/automation/roles/upgrade/tasks/start_services.yml @@ -0,0 +1,88 @@ +--- +- name: Start Patroni service on the Cluster Leader + become: true + become_user: root + ansible.builtin.service: + name: patroni + state: started + when: + - inventory_hostname in groups['primary'] + +- name: Wait for Patroni port "{{ patroni_restapi_port }}" to become open on the host + ansible.builtin.wait_for: + port: "{{ patroni_restapi_port }}" + host: "{{ inventory_hostname }}" + state: started + timeout: "{{ (pg_start_stop_timeout | int) // 2 }}" + delay: 2 + when: + - inventory_hostname in groups['primary'] + +- name: Check Patroni is healthy on the Leader + ansible.builtin.uri: + url: http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/leader + status_code: 200 + register: patroni_leader_result + until: patroni_leader_result.status == 200 + retries: "{{ (pg_start_stop_timeout | int) // 2 }}" + delay: 2 + environment: + no_proxy: "{{ inventory_hostname }}" + when: + - inventory_hostname in groups['primary'] + +# if 'pgbouncer_install' is 'true' and 'pgbouncer_pool_pause' is 'true' +- name: Perform RESUME PgBouncer pools on the Leader + ansible.builtin.include_tasks: pgbouncer_resume.yml + when: + - inventory_hostname in groups['primary'] + - hostvars[groups['primary'][0]].pgbouncer_pool_pause_result is defined + - hostvars[groups['primary'][0]].pgbouncer_pool_pause_result is succeeded + +- name: Start Patroni service on the Cluster Replica + become: true + become_user: root + ansible.builtin.service: + name: patroni + state: started + when: + - inventory_hostname in groups['secondary'] + +- name: Wait for Patroni port "{{ patroni_restapi_port }}" to become open on the host + ansible.builtin.wait_for: + port: "{{ patroni_restapi_port }}" + host: "{{ inventory_hostname }}" + state: started + timeout: "{{ (pg_start_stop_timeout | int) // 2 }}" + delay: 2 + when: + - inventory_hostname in groups['secondary'] + +- name: Check Patroni is healthy on the Replica + ansible.builtin.uri: + url: http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/health + status_code: 200 + register: patroni_replica_result + until: patroni_replica_result.status == 200 + retries: "{{ (pg_start_stop_timeout | int) // 2 }}" + delay: 2 + environment: + no_proxy: "{{ inventory_hostname }}" + when: + - inventory_hostname in groups['secondary'] + +# if 'pgbouncer_install' is 'true' and 'pgbouncer_pool_pause' is 'true' +- name: Perform RESUME PgBouncer pools on the Replica + ansible.builtin.include_tasks: pgbouncer_resume.yml + when: + - inventory_hostname in groups['secondary'] + - hostvars[groups['primary'][0]].pgbouncer_pool_pause_result is defined + - hostvars[groups['primary'][0]].pgbouncer_pool_pause_result is succeeded + +- name: Check PostgreSQL is started and accepting connections + ansible.builtin.command: "{{ pg_new_bindir }}/pg_isready -p {{ postgresql_port }}" + register: pg_isready_result + until: pg_isready_result.rc == 0 + retries: 300 # max duration 10 minutes + delay: 2 + changed_when: false diff --git a/automation/roles/upgrade/tasks/statistics.yml b/automation/roles/upgrade/tasks/statistics.yml new file mode 100644 index 000000000..8554a0b00 --- /dev/null +++ b/automation/roles/upgrade/tasks/statistics.yml @@ -0,0 +1,82 @@ +--- +# ANALYZE: Update optimizer statistics +# +# When collecting statistics, if the autovacuum process starts and begins a transaction ID wraparound, +# there's a risk that the 'ANALYZE' command might get blocked and it will wait for the lock to be released until the autovacuum process is completed. +# For large tables, this waiting period can span from several minutes to hours. +# +# To prevent 'ANALYZE' from getting blocked, we execute the 'pg_terminator' script during statistics collection. + +- block: + # Monitor the locks and terminate the backend blocking the 'ANALYZE' query (for more than 15 seconds) + - name: "pg_terminator: Monitor locks and terminate the 'ANALYZE' blockers" + ansible.builtin.shell: | + echo $$ > /tmp/pg_terminator.pid + for i in {1..{{ vacuumdb_analyze_timeout // 10 }}}; do + {{ pg_new_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc " + with blocker_pids(pid) as ( + select unnest(pg_blocking_pids(pid)) + from pg_stat_activity + where + query ilike 'ANALYZE %' + and wait_event_type = 'Lock' + ) + select + clock_timestamp(), + pg_terminate_backend(pid), + pid, + clock_timestamp() - xact_start as xact_age, + left(regexp_replace(query, E'[ \\t\\n\\r]+', ' ', 'g'),150) as query + from pg_stat_activity + where + pid in (select pid from blocker_pids) + and xact_start < clock_timestamp() - interval '15s';" >> /tmp/pg_terminator.log + sleep 10 + done + args: + executable: /bin/bash + async: "{{ vacuumdb_analyze_timeout }}" # run the command asynchronously with a maximum duration + poll: 0 + register: pg_terminator_analyze + ignore_errors: true # ignore errors if the task runs for over an 'vacuumdb_analyze_timeout'. + when: pg_new_version | string is version('9.6', '>=') + + # Monitor long-running transactions and terminate them (for more than 'vacuumdb_analyze_terminate_treshold') + - name: "pg_terminator: Monitor and terminate the long-running transactions (more than {{ max_tx_sec }} seconds) during collecting statistics" + ansible.builtin.shell: | + echo $$ >> /tmp/pg_terminator.pid + for i in {1..{{ vacuumdb_analyze_timeout // 10 }}}; do + {{ pg_new_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc " + select + clock_timestamp(), + pg_terminate_backend(pid), + pid, + clock_timestamp() - xact_start as xact_age, + left(regexp_replace(query, E'[ \\t\\n\\r]+', ' ', 'g'),150) as query + from pg_stat_activity + where + backend_type = 'client backend' and pid <> pg_backend_pid() + and query not ilike 'ANALYZE %' + and xact_start < clock_timestamp() - interval '{{ max_tx_sec }}s';" >> /tmp/pg_terminator.log + sleep 10 + done + args: + executable: /bin/bash + async: "{{ vacuumdb_analyze_timeout }}" # run the command asynchronously with a maximum duration + poll: 0 + register: pg_terminator_long_transactions + ignore_errors: true # ignore errors if the task runs for over an 'vacuumdb_analyze_timeout'. + vars: + max_tx_sec: "{{ vacuumdb_analyze_terminate_treshold }}" + when: pg_new_version | string is version('10', '>=') and vacuumdb_analyze_terminate_treshold | int > 0 + + # ANALYZE + - name: "Run vacuumdb to analyze the PostgreSQL databases" + ansible.builtin.command: > + {{ pg_new_bindir }}/vacuumdb -p {{ postgresql_port }} + --all --analyze-in-stages --jobs={{ vacuumdb_parallel_jobs }} + async: "{{ vacuumdb_analyze_timeout }}" # run the command asynchronously with a maximum duration + poll: 0 + register: vacuumdb_analyze + ignore_errors: true # ignore errors if the task runs for over an 'vacuumdb_analyze_timeout'. + when: inventory_hostname in groups['primary'] diff --git a/automation/roles/upgrade/tasks/stop_services.yml b/automation/roles/upgrade/tasks/stop_services.yml new file mode 100644 index 000000000..72c7f7df7 --- /dev/null +++ b/automation/roles/upgrade/tasks/stop_services.yml @@ -0,0 +1,128 @@ +--- +- name: "Execute CHECKPOINT before stopping PostgreSQL" + ansible.builtin.command: > + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc "CHECKPOINT" + async: "{{ pg_start_stop_timeout | int }}" # run the command asynchronously + poll: 0 + register: checkpoint_result + +- name: Wait for the CHECKPOINT to complete + ansible.builtin.async_status: + jid: "{{ checkpoint_result.ansible_job_id }}" + register: checkpoint_job_result + until: checkpoint_job_result.finished + retries: "{{ (pg_start_stop_timeout | int) // 10 }}" + delay: 10 + +# Wait for the window to appear without high replication lag before stopping PostgreSQL +- name: "Wait until replication lag is less than {{ max_replication_lag_bytes | human_readable }}" + ansible.builtin.command: >- + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select coalesce(max(pg_wal_lsn_diff(pg_current_wal_lsn(),replay_lsn)),1) as pg_lag_bytes + from pg_stat_replication" + register: pg_lag_bytes + until: pg_lag_bytes.stdout|int < max_replication_lag_bytes|int + retries: 60 # max wait time: 2 minutes + delay: 2 + changed_when: false + failed_when: false + when: + - inventory_hostname in groups['primary'] + - pg_old_version | string is version('10', '>=') + +# Stop, if replication lag is high +- block: + - name: "Print replication lag" + ansible.builtin.debug: + msg: "Current replication lag: {{ pg_lag_bytes.stdout | int | human_readable }}" + + # rollback + - name: Perform rollback + ansible.builtin.include_tasks: rollback.yml + + - name: "Replication lag detected" + ansible.builtin.fail: + msg: "There's a replication lag in the PostgreSQL Cluster. Please try again later." + when: + - pg_lag_bytes.stdout is defined + - pg_lag_bytes.stdout|int >= max_replication_lag_bytes|int + +# for compatibility with Postgres 9.x +- name: "Wait until replication lag is less than {{ max_replication_lag_bytes | human_readable }}" + ansible.builtin.command: >- + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select coalesce(max(pg_xlog_location_diff(pg_current_xlog_location(),replay_location)),1) as pg_lag_bytes + from pg_stat_replication" + register: pg_lag_bytes_9x + until: pg_lag_bytes_9x.stdout|int < max_replication_lag_bytes|int + retries: 60 # max wait time: 2 minutes + delay: 2 + changed_when: false + failed_when: false + when: + - inventory_hostname in groups['primary'] + - pg_old_version | string is version('10', '<') + +# Stop, if replication lag is high (for 9x) +- block: + - name: "Print replication lag" + ansible.builtin.debug: + msg: "Current replication lag: {{ pg_lag_bytes_9x.stdout | int | human_readable }}" + + # rollback + - name: Perform rollback + ansible.builtin.include_tasks: rollback.yml + + - name: "Replication lag detected" + ansible.builtin.fail: + msg: "There's a replication lag in the PostgreSQL Cluster. Please try again later." + when: + - pg_lag_bytes_9x.stdout is defined + - pg_lag_bytes_9x.stdout|int >= max_replication_lag_bytes|int + +# if 'pgbouncer_install' is 'true' and 'pgbouncer_pool_pause' is 'true' +- name: Perform PAUSE on all pgbouncers servers + ansible.builtin.include_tasks: pgbouncer_pause.yml + when: + - pgbouncer_install | bool + - pgbouncer_pool_pause | bool + +# Stop PostgreSQL (if replication lag is 0 bytes) +- name: Stop PostgreSQL on the Leader + ansible.builtin.command: >- + {{ pg_old_bindir }}/pg_ctl -D {{ pg_old_datadir }} stop -m fast -w -t {{ pg_start_stop_timeout }} + when: + - inventory_hostname in groups['primary'] + +- name: Stop PostgreSQL on the Replica + ansible.builtin.command: >- + {{ pg_old_bindir }}/pg_ctl -D {{ pg_old_datadir }} stop -m fast -w -t {{ pg_start_stop_timeout }} + when: + - inventory_hostname in groups['secondary'] + +# additional checks using pg_ctl +- name: "Check if PostgreSQL {{ pg_old_version }} is stopped" + ansible.builtin.command: "{{ pg_old_bindir }}/pg_ctl status -D {{ pg_old_datadir }}" + register: pg_ctl_status_old_result + failed_when: false + changed_when: false + +- name: "Check if PostgreSQL {{ pg_new_version }} is stopped" + ansible.builtin.command: "{{ pg_new_bindir }}/pg_ctl status -D {{ pg_new_datadir }}" + register: pg_ctl_status_new_result + failed_when: false + changed_when: false + +- name: "Stop PostgreSQL {{ pg_old_version }}" + ansible.builtin.command: > + {{ pg_old_bindir }}/pg_ctl -D {{ pg_old_datadir }} stop -w -t {{ pg_start_stop_timeout }} + when: + - pg_ctl_status_old_result is defined + - pg_ctl_status_old_result.rc == 0 + +- name: "Stop PostgreSQL {{ pg_new_version }}" + ansible.builtin.command: > + {{ pg_new_bindir }}/pg_ctl -D {{ pg_new_datadir }} stop -w -t {{ pg_start_stop_timeout }} + when: + - pg_ctl_status_new_result is defined + - pg_ctl_status_new_result.rc == 0 diff --git a/automation/roles/upgrade/tasks/update_config.yml b/automation/roles/upgrade/tasks/update_config.yml new file mode 100644 index 000000000..c151564a2 --- /dev/null +++ b/automation/roles/upgrade/tasks/update_config.yml @@ -0,0 +1,273 @@ +--- +# Prepare the parameters for Patroni. + +- name: "Backup the patroni.yml configuration file" + ansible.builtin.copy: + src: "{{ patroni_config_file }}" + dest: "{{ patroni_config_file }}.bkp" + remote_src: true + +- name: Remove patroni.dynamic.json file + ansible.builtin.file: + path: "{{ pg_old_datadir }}/patroni.dynamic.json" + state: absent + +# Update the directory path to a new version of PostgresSQL +- name: "Edit patroni.yml | update parameters: data_dir, bin_dir, config_dir" + ansible.builtin.replace: + path: "{{ patroni_config_file }}" + regexp: "{{ item.regexp }}" + replace: "{{ item.replace }}" + loop: + - { regexp: "data_dir: {{ pg_old_datadir }}", replace: "data_dir: {{ pg_new_datadir }}" } + - { regexp: "bin_dir: {{ pg_old_bindir }}", replace: "bin_dir: {{ pg_new_bindir }}" } + - { regexp: "config_dir: {{ pg_old_confdir }}", replace: "config_dir: {{ pg_new_confdir }}" } + loop_control: + label: "{{ item.replace }}" + +# Prepare the parameters for PostgreSQL (removed or renamed parameters). + +- block: # replacement_sort_tuples (removed in the PG 11) + # check if the replacement_sort_tuples parameter is specified in the patroni.yml + - name: "Edit patroni.yml | check if the 'replacement_sort_tuples' parameter is specified" + ansible.builtin.command: grep replacement_sort_tuples {{ patroni_config_file }} + register: replacement_sort_tuples_output + changed_when: false + failed_when: false + + # if defined, remove the replacement_sort_tuples parameter from the patroni.yml + - name: "Edit patroni.yml | remove parameter: 'replacement_sort_tuples'" + ansible.builtin.lineinfile: + path: "{{ patroni_config_file }}" + regexp: '^(\s*)replacement_sort_tuples:.*' + state: absent + when: replacement_sort_tuples_output.stdout | length > 0 + when: + - pg_old_version|int <= 10 and pg_new_version|int >= 11 + +- block: # default_with_oids (removed in the PG 12) + # check if the default_with_oids parameter is specified in the patroni.yml + - name: "Edit patroni.yml | check if the 'default_with_oids' parameter is specified" + ansible.builtin.command: grep default_with_oids {{ patroni_config_file }} + register: default_with_oids_output + changed_when: false + failed_when: false + + # if defined, remove the default_with_oids parameter from the patroni.yml + - name: "Edit patroni.yml | remove parameter: 'default_with_oids'" + ansible.builtin.lineinfile: + path: "{{ patroni_config_file }}" + regexp: '^(\s*)default_with_oids:.*' + state: absent + when: default_with_oids_output.stdout | length > 0 + when: + - pg_old_version|int <= 11 and pg_new_version|int >= 12 + +- block: # wal_keep_segments (removed in the PG 13) + # check if the wal_keep_segments parameter is specified in the patroni.yml + - name: "Edit patroni.yml | check if the 'wal_keep_segments' parameter is specified" + ansible.builtin.shell: > + set -o pipefail; + grep wal_keep_segments {{ patroni_config_file }} | awk '{print $2}' | tail -n 1 + args: + executable: /bin/bash + register: wal_keep_segments_output + changed_when: false + failed_when: false + + # if defined, replace it to wal_keep_size with a value in MB. + - name: "Edit patroni.yml | replace parameter: 'wal_keep_segments' to 'wal_keep_size'" + ansible.builtin.replace: + path: "{{ patroni_config_file }}" + regexp: "wal_keep_segments: ([0-9]+)" + replace: "wal_keep_size: {{ (wal_keep_segments_output.stdout | int * 16) | string + 'MB' }}" + when: wal_keep_segments_output.stdout|int > 0 + when: + - pg_old_version|int <= 12 and pg_new_version|int >= 13 + +- block: # operator_precedence_warning (removed in the PG 14) + # check if the operator_precedence_warning parameter is specified in the patroni.yml + - name: "Edit patroni.yml | check if the 'operator_precedence_warning' parameter is specified" + ansible.builtin.command: grep operator_precedence_warning {{ patroni_config_file }} + register: operator_precedence_warning_output + changed_when: false + failed_when: false + + # if defined, remove the operator_precedence_warning parameter from the patroni.yml + - name: "Edit patroni.yml | remove parameter: 'operator_precedence_warning'" + ansible.builtin.lineinfile: + path: "{{ patroni_config_file }}" + regexp: '^(\s*)operator_precedence_warning:.*' + state: absent + when: operator_precedence_warning_output.stdout | length > 0 + when: + - pg_old_version|int <= 13 and pg_new_version|int >= 14 + +- block: # vacuum_cleanup_index_scale_factor (removed in the PG 14) + # check if the vacuum_cleanup_index_scale_factor parameter is specified in the patroni.yml + - name: "Edit patroni.yml | check if the 'vacuum_cleanup_index_scale_factor' parameter is specified" + ansible.builtin.command: grep vacuum_cleanup_index_scale_factor {{ patroni_config_file }} + register: vacuum_cleanup_index_scale_factor_output + changed_when: false + failed_when: false + + # if defined, remove the vacuum_cleanup_index_scale_factor parameter from the patroni.yml + - name: "Edit patroni.yml | remove parameter: 'vacuum_cleanup_index_scale_factor'" + ansible.builtin.lineinfile: + path: "{{ patroni_config_file }}" + regexp: '^(\s*)vacuum_cleanup_index_scale_factor:.*' + state: absent + when: vacuum_cleanup_index_scale_factor_output.stdout | length > 0 + when: + - pg_old_version|int <= 13 and pg_new_version|int >= 14 + +- block: # stats_temp_directory (removed in the PG 15) + # check if the stats_temp_directory parameter is specified in the patroni.yml + - name: "Edit patroni.yml | check if the 'stats_temp_directory' parameter is specified" + ansible.builtin.command: grep stats_temp_directory {{ patroni_config_file }} + register: stats_temp_directory_output + changed_when: false + failed_when: false + + # if defined, remove the stats_temp_directory parameter from the patroni.yml + - name: "Edit patroni.yml | remove parameter: 'stats_temp_directory'" + ansible.builtin.lineinfile: + path: "{{ patroni_config_file }}" + regexp: '^(\s*)stats_temp_directory:.*' + state: absent + when: stats_temp_directory_output.stdout | length > 0 + when: + - pg_old_version|int <= 14 and pg_new_version|int >= 15 + +- block: # force_parallel_mode (removed in the PG 16) + # check if the force_parallel_mode parameter is specified in the patroni.yml + - name: "Edit patroni.yml | check if the 'force_parallel_mode' parameter is specified" + ansible.builtin.command: grep force_parallel_mode {{ patroni_config_file }} + register: force_parallel_mode_output + changed_when: false + failed_when: false + + # if defined, remove the force_parallel_mode parameter from the patroni.yml + - name: "Edit patroni.yml | remove parameter: 'force_parallel_mode'" + ansible.builtin.lineinfile: + path: "{{ patroni_config_file }}" + regexp: '^(\s*)force_parallel_mode:.*' + state: absent + when: force_parallel_mode_output.stdout | length > 0 + when: + - pg_old_version|int <= 15 and pg_new_version|int >= 16 + +- block: # promote_trigger_file (removed in the PG 16) + # check if the promote_trigger_file parameter is specified in the patroni.yml + - name: "Edit patroni.yml | check if the 'promote_trigger_file' parameter is specified" + ansible.builtin.command: grep promote_trigger_file {{ patroni_config_file }} + register: promote_trigger_file_output + changed_when: false + failed_when: false + + # if defined, remove the promote_trigger_file parameter from the patroni.yml + - name: "Edit patroni.yml | remove parameter: 'promote_trigger_file'" + ansible.builtin.lineinfile: + path: "{{ patroni_config_file }}" + regexp: '^(\s*)promote_trigger_file:.*' + state: absent + when: promote_trigger_file_output.stdout | length > 0 + when: + - pg_old_version|int <= 15 and pg_new_version|int >= 16 + +- block: # vacuum_defer_cleanup_age (removed in the PG 16) + # check if the vacuum_defer_cleanup_age parameter is specified in the patroni.yml + - name: "Edit patroni.yml | check if the 'vacuum_defer_cleanup_age' parameter is specified" + ansible.builtin.command: grep vacuum_defer_cleanup_age {{ patroni_config_file }} + register: vacuum_defer_cleanup_age_output + changed_when: false + failed_when: false + + # if defined, remove the vacuum_defer_cleanup_age parameter from the patroni.yml + - name: "Edit patroni.yml | remove parameter: 'vacuum_defer_cleanup_age'" + ansible.builtin.lineinfile: + path: "{{ patroni_config_file }}" + regexp: '^(\s*)vacuum_defer_cleanup_age:.*' + state: absent + when: vacuum_defer_cleanup_age_output.stdout | length > 0 + when: + - pg_old_version|int <= 15 and pg_new_version|int >= 16 + +- block: # db_user_namespace (removed in the PG 17) + # check if the db_user_namespace parameter is specified in the patroni.yml + - name: "Edit patroni.yml | check if the 'db_user_namespace' parameter is specified" + ansible.builtin.command: grep db_user_namespace {{ patroni_config_file }} + register: db_user_namespace_output + changed_when: false + failed_when: false + + # if defined, remove the db_user_namespace parameter from the patroni.yml + - name: "Edit patroni.yml | remove parameter: 'db_user_namespace'" + ansible.builtin.lineinfile: + path: "{{ patroni_config_file }}" + regexp: '^(\s*)db_user_namespace:.*' + state: absent + when: db_user_namespace_output.stdout | length > 0 + when: + - pg_old_version|int <= 16 and pg_new_version|int >= 17 + +- block: # old_snapshot_threshold (removed in the PG 17) + # check if the old_snapshot_threshold parameter is specified in the patroni.yml + - name: "Edit patroni.yml | check if the 'old_snapshot_threshold' parameter is specified" + ansible.builtin.command: grep old_snapshot_threshold {{ patroni_config_file }} + register: old_snapshot_threshold_output + changed_when: false + failed_when: false + + # if defined, remove the old_snapshot_threshold parameter from the patroni.yml + - name: "Edit patroni.yml | remove parameter: 'old_snapshot_threshold'" + ansible.builtin.lineinfile: + path: "{{ patroni_config_file }}" + regexp: '^(\s*)old_snapshot_threshold:.*' + state: absent + when: old_snapshot_threshold_output.stdout | length > 0 + when: + - pg_old_version|int <= 16 and pg_new_version|int >= 17 + +- block: # trace_recovery_messages (removed in the PG 17) + # check if the trace_recovery_messages parameter is specified in the patroni.yml + - name: "Edit patroni.yml | check if the 'trace_recovery_messages' parameter is specified" + ansible.builtin.command: grep trace_recovery_messages {{ patroni_config_file }} + register: trace_recovery_messages_output + changed_when: false + failed_when: false + + # if defined, remove the trace_recovery_messages parameter from the patroni.yml + - name: "Edit patroni.yml | remove parameter: 'trace_recovery_messages'" + ansible.builtin.lineinfile: + path: "{{ patroni_config_file }}" + regexp: '^(\s*)trace_recovery_messages:.*' + state: absent + when: trace_recovery_messages_output.stdout | length > 0 + when: + - pg_old_version|int <= 16 and pg_new_version|int >= 17 + +# TODO: Prepare the parameters for PostgreSQL 18 and etc. + +# To support upgrades in the Patroni Standby Cluster. +- name: "Edit patroni.yml | remove parameter: standby_cluster (if exists)" + ansible.builtin.replace: + path: "{{ patroni_config_file }}" + regexp: '^\s*standby_cluster:[^\n]*\n(.|\n)*?initdb:' + replace: " initdb:" + +- name: "Remove 'standby_cluster' parameter from DCS (if exists)" + ansible.builtin.command: patronictl -c {{ patroni_config_file }} edit-config -s standby_cluster=null --force + environment: + PATH: "{{ ansible_env.PATH }}:/usr/bin:/usr/local/bin" + when: inventory_hostname in groups['primary'] + +# Copy the pg_hba.conf file to a new PostgreSQL to save pg_hba rules. +- name: "Copy pg_hba.conf to {{ pg_new_confdir }}" + ansible.builtin.copy: + src: "{{ pg_old_confdir }}/pg_hba.conf" + dest: "{{ pg_new_confdir }}" + owner: postgres + mode: "0600" + force: true + remote_src: true diff --git a/automation/roles/upgrade/tasks/update_extensions.yml b/automation/roles/upgrade/tasks/update_extensions.yml new file mode 100644 index 000000000..aeb816671 --- /dev/null +++ b/automation/roles/upgrade/tasks/update_extensions.yml @@ -0,0 +1,92 @@ +--- +- name: "Get list of installed PostgreSQL extensions (database: {{ pg_target_dbname }})" + ansible.builtin.command: >- + {{ pg_new_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d {{ pg_target_dbname }} -tAXc + "select extname from pg_catalog.pg_extension" + register: pg_installed_extensions + until: pg_installed_extensions is success + delay: 5 + retries: 3 + changed_when: false + ignore_errors: true # show the error and continue the playbook execution + when: + - inventory_hostname in groups['primary'] + +- name: "Get list of old PostgreSQL extensions (database: {{ pg_target_dbname }})" + ansible.builtin.command: >- + {{ pg_new_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d {{ pg_target_dbname }} -tAXc + "select extname from pg_catalog.pg_extension e + join pg_catalog.pg_available_extensions ae on extname = ae.name + where installed_version <> default_version" + register: pg_old_extensions + until: pg_old_extensions is success + delay: 5 + retries: 3 + changed_when: false + ignore_errors: true # show the error and continue the playbook execution + when: + - inventory_hostname in groups['primary'] + +# if there are no old extensions +- name: "The extensions are up-to-date (database: {{ pg_target_dbname }})" + ansible.builtin.debug: + msg: + - "The extension versions are up-to-date for the database {{ pg_target_dbname }}" + - "No update is required." + when: + - inventory_hostname in groups['primary'] + - pg_old_extensions is success + - pg_old_extensions.stdout_lines | length < 1 + +# if pg_stat_cache is not installed +# excluding: 'pg_repack' (is exists), as it requires re-creation to update +- name: "Update old PostgreSQL extensions (database: {{ pg_target_dbname }})" + ansible.builtin.command: >- + {{ pg_new_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d {{ pg_target_dbname }} -tAXc + "ALTER EXTENSION {{ item }} UPDATE" + ignore_errors: true # show the error and continue the playbook execution + loop: "{{ pg_old_extensions.stdout_lines | reject('match', '^pg_repack$') | list }}" + when: + - inventory_hostname in groups['primary'] + - pg_old_extensions is success + - pg_installed_extensions is success + - pg_old_extensions.stdout_lines | length > 0 + - (not 'pg_stat_kcache' in pg_installed_extensions.stdout_lines) + +# if pg_stat_kcache is installed +- block: + # excluding: 'pg_stat_statements','pg_stat_kcache', because extension pg_stat_kcache depends on it (will be re-created) + - name: "Update old PostgreSQL extensions (database: {{ pg_target_dbname }})" + ansible.builtin.command: >- + {{ pg_new_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d {{ pg_target_dbname }} -tAXc + "ALTER EXTENSION {{ item }} UPDATE" + ignore_errors: true # show the error and continue the playbook execution + loop: "{{ pg_old_extensions.stdout_lines | reject('match', '^(pg_repack|pg_stat_statements|pg_stat_kcache)$') | list }}" + + # re-create 'pg_stat_statements' and 'pg_stat_kcache' if an update is required + - name: "Recreate old pg_stat_statements and pg_stat_kcache extensions to update (database: {{ pg_target_dbname }})" + ansible.builtin.command: >- + {{ pg_new_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d {{ pg_target_dbname }} -tAXc + "DROP EXTENSION pg_stat_statements CASCADE; + CREATE EXTENSION pg_stat_statements; + CREATE EXTENSION pg_stat_kcache" + ignore_errors: true # show the error and continue the playbook execution + when: + - inventory_hostname in groups['primary'] + - pg_old_extensions is success + - pg_installed_extensions is success + - pg_old_extensions.stdout_lines | length > 0 + - ('pg_stat_statements' in pg_old_extensions.stdout_lines or 'pg_stat_kcache' in pg_old_extensions.stdout_lines) + - ('pg_stat_kcache' in pg_installed_extensions.stdout_lines) + +# re-create the 'pg_repack' if it exists and an update is required +- name: "Recreate old pg_repack extension to update (database: {{ pg_target_dbname }})" + ansible.builtin.command: >- + {{ pg_new_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d {{ pg_target_dbname }} -tAXc + "DROP EXTENSION pg_repack; + CREATE EXTENSION pg_repack;" + ignore_errors: true # show the error and continue the playbook execution + when: + - inventory_hostname in groups['primary'] + - pg_old_extensions is success + - (pg_old_extensions.stdout_lines | length > 0 and 'pg_repack' in pg_old_extensions.stdout_lines) diff --git a/automation/roles/upgrade/tasks/upgrade_check.yml b/automation/roles/upgrade/tasks/upgrade_check.yml new file mode 100644 index 000000000..1874c7fe3 --- /dev/null +++ b/automation/roles/upgrade/tasks/upgrade_check.yml @@ -0,0 +1,47 @@ +--- +- name: Get the current shared_preload_libraries settings + ansible.builtin.command: >- + {{ pg_old_bindir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "show shared_preload_libraries" + changed_when: false + register: pg_shared_preload_libraries_result + when: + - inventory_hostname in groups['primary'] + +- name: "Set the variable: pg_shared_preload_libraries_value" + ansible.builtin.set_fact: + pg_shared_preload_libraries_value: "{{ pg_shared_preload_libraries_result.stdout }}" + when: + - inventory_hostname in groups['primary'] + +# In the --new-options argument, an inline if condition checks if 'timescaledb' is present in the pg_shared_preload_libraries_value. +# If it is, it appends '-c timescaledb.restoring='on'' to the --new-options argument. +- name: Verify the two clusters are compatible (pg_upgrade --check) + ansible.builtin.command: > + {{ pg_new_bindir }}/pg_upgrade + --username={{ pg_install_user.stdout }} + --old-bindir {{ pg_old_bindir }} + --new-bindir {{ pg_new_bindir }} + --old-datadir {{ pg_old_datadir }} + --new-datadir {{ pg_new_datadir }} + --old-options "-c config_file={{ pg_old_confdir }}/postgresql.conf" + --new-options "-c config_file={{ pg_new_confdir }}/postgresql.conf {{ shared_preload_libraries }} {{ timescaledb_restoring }}" + --jobs={{ ansible_processor_vcpus }} + --link + --check + args: + chdir: "/tmp" + vars: + shared_preload_libraries: "-c shared_preload_libraries='{{ pg_shared_preload_libraries_value }}'" + timescaledb_restoring: '{{ "-c timescaledb.restoring=''on''" if ''timescaledb'' in pg_shared_preload_libraries_value else '''' }}' + failed_when: false + register: pg_upgrade_check_result + when: + - inventory_hostname in groups['primary'] + +- name: Print the result of the pg_upgrade check + ansible.builtin.debug: + var: pg_upgrade_check_result.stdout_lines + failed_when: "'Clusters are compatible' not in pg_upgrade_check_result.stdout" + when: + - inventory_hostname in groups['primary'] diff --git a/automation/roles/upgrade/tasks/upgrade_primary.yml b/automation/roles/upgrade/tasks/upgrade_primary.yml new file mode 100644 index 000000000..0bf0221c6 --- /dev/null +++ b/automation/roles/upgrade/tasks/upgrade_primary.yml @@ -0,0 +1,61 @@ +--- +# Upgrade with pg_upgrade (hard-links) + +# In the --new-options argument, an inline if condition checks if 'timescaledb' is present in the pg_shared_preload_libraries_value. +# If it is, it appends '-c timescaledb.restoring='on'' to the --new-options argument. +- name: "Upgrade the PostgreSQL to version {{ pg_new_version }} on the Primary (using pg_upgrade --link)" + ansible.builtin.command: > + {{ pg_new_bindir }}/pg_upgrade + --username={{ pg_install_user.stdout }} + --old-bindir {{ pg_old_bindir }} + --new-bindir {{ pg_new_bindir }} + --old-datadir {{ pg_old_datadir }} + --new-datadir {{ pg_new_datadir }} + --old-options "-c config_file={{ pg_old_confdir }}/postgresql.conf" + --new-options "-c config_file={{ pg_new_confdir }}/postgresql.conf {{ shared_preload_libraries }} {{ timescaledb_restoring }}" + --jobs={{ ansible_processor_vcpus }} + --link + args: + chdir: "/tmp" + vars: + shared_preload_libraries: "-c shared_preload_libraries='{{ pg_shared_preload_libraries_value }}'" + timescaledb_restoring: '{{ "-c timescaledb.restoring=''on''" if ''timescaledb'' in pg_shared_preload_libraries_value else '''' }}' + register: pg_upgrade_result + ignore_errors: true # show the error and perform rollback + when: + - inventory_hostname in groups['primary'] + +# Stop, if the upgrade failed +- block: + - name: Perform rollback + ansible.builtin.include_tasks: rollback.yml + + - name: "ERROR: PostgreSQL upgrade failed" + ansible.builtin.fail: + msg: + - "The PostgreSQL upgrade has encountered an error and a rollback has been initiated." + - "For detailed information, please consult the pg_upgrade log located at '{{ pg_new_datadir }}/pg_upgrade_output.d'" + run_once: true + when: hostvars[groups['primary'][0]].pg_upgrade_result is failed + +# If the length of the pg_upgrade_result.stdout_lines is greater than 100 lines, +# the upgrade_output variable will include the first 70 lines, an ellipsis (...), +# and the last 30 lines of the pg_upgrade_result.stdout_lines. +- name: Print the result of the pg_upgrade + ansible.builtin.debug: + msg: + - "{{ pg_upgrade_result.stdout_lines[:70] }}" + - " ... " + - "{{ pg_upgrade_result.stdout_lines[-30:] }}" + when: + - inventory_hostname in groups['primary'] + - pg_upgrade_result.stdout_lines | length > 100 + +# Otherwise, it will include all lines of the pg_upgrade_result.stdout_lines. +- name: Print the result of the pg_upgrade + ansible.builtin.debug: + msg: + - "{{ pg_upgrade_result.stdout_lines }}" + when: + - inventory_hostname in groups['primary'] + - pg_upgrade_result.stdout_lines | length <= 100 diff --git a/automation/roles/upgrade/tasks/upgrade_secondary.yml b/automation/roles/upgrade/tasks/upgrade_secondary.yml new file mode 100644 index 000000000..628d5a08a --- /dev/null +++ b/automation/roles/upgrade/tasks/upgrade_secondary.yml @@ -0,0 +1,132 @@ +--- +# Upgrade with rsync (hard-links) + +# This task performs the upgrade of PostgreSQL on the replica servers using the RSync utility. +# It follows these steps: +# 1. Retrieve the list of target secondary servers from the inventory, which are the servers where the upgrade will be performed. +# 2. Count the number of target secondary servers to determine the parallel execution limit. +# 3. Use xargs to execute the RSync command in parallel for each target secondary server. + +- name: Make sure that the new data directory "{{ pg_new_datadir }}" are empty on the Replica + ansible.builtin.file: + path: "{{ pg_new_datadir }}" + state: "{{ item }}" + mode: "0700" + group: postgres + owner: postgres + loop: + - absent + - directory + when: + - inventory_hostname in groups['secondary'] + +# If the source and target directories are inside versioned directories +# (example: /pgdata//main -> /pgdata//main) +- block: + - name: "Upgrade the PostgreSQL on the Replica (using rsync --hard-links)" + vars: + secondary_servers: "{{ groups['secondary'] | join('\n') }}" + secondary_count: "{{ groups['secondary'] | length }}" + ansible.builtin.shell: | + set -o pipefail; + echo -e "{{ secondary_servers }}" | xargs -I {} -P "{{ secondary_count }}" -n 1 \ + rsync -e 'ssh -o StrictHostKeyChecking=no' --archive --delete --hard-links --size-only --no-inc-recursive \ + {{ pg_upper_datadir }}/{{ pg_old_version }} {{ pg_upper_datadir }}/{{ pg_new_version }} {}:{{ pg_upper_datadir }} + args: + executable: /bin/bash + async: 3600 # run the command asynchronously with a maximum duration of 1 hour + poll: 0 + register: rsync_result_1 + + - name: Wait for the rsync to complete. + ansible.builtin.async_status: + jid: "{{ rsync_result_1.ansible_job_id }}" + register: rsync_1_job_result + until: rsync_1_job_result.finished + retries: 1800 + delay: 2 + become: true + become_user: postgres + when: + - inventory_hostname in groups['primary'] + - pg_old_datadir|dirname == pg_upper_datadir + '/' + (pg_old_version | string) + - pg_new_datadir|dirname == pg_upper_datadir + '/' + (pg_new_version | string) + +# If the source and target directories are non-versioned directories +# (example: /pgdata/main -> /pgdata/main) +- block: + - name: "Upgrade the PostgreSQL on the Replica (using rsync --hard-links)" + vars: + secondary_servers: "{{ groups['secondary'] | join('\n') }}" + secondary_count: "{{ groups['secondary'] | length }}" + ansible.builtin.shell: | + set -o pipefail; + echo -e "{{ secondary_servers }}" | xargs -I {} -P "{{ secondary_count }}" -n 1 \ + rsync -e 'ssh -o StrictHostKeyChecking=no' --archive --delete --hard-links --size-only --no-inc-recursive \ + {{ pg_old_datadir }} {{ pg_new_datadir }} {}:{{ pg_upper_datadir }} + args: + executable: /bin/bash + async: 3600 # run the command asynchronously with a maximum duration of 1 hour + poll: 0 + register: rsync_result_2 + + - name: Wait for the rsync to complete. + ansible.builtin.async_status: + jid: "{{ rsync_result_2.ansible_job_id }}" + register: rsync_2_job_result + until: rsync_2_job_result.finished + retries: 1800 + delay: 2 + become: true + become_user: postgres + when: + - inventory_hostname in groups['primary'] + - pg_old_datadir|dirname != pg_upper_datadir + '/' + (pg_old_version | string) + - pg_new_datadir|dirname != pg_upper_datadir + '/' + (pg_new_version | string) + +# Tablespaces (if exists) +- block: + - name: "Upgrade the PostgreSQL tablespaces on the Replica (using rsync --hard-links)" + vars: + secondary_servers: "{{ groups['secondary'] | join('\n') }}" + secondary_count: "{{ groups['secondary'] | length }}" + ansible.builtin.shell: | + set -o pipefail; + for tablespace_location in {{ tablespace_location.stdout_lines | join(' ') }}; + do + old_tablespace_dir_count=$(ls -d ${tablespace_location}/PG_{{ pg_old_version }}_* | wc -l) + new_tablespace_dir_count=$(ls -d ${tablespace_location}/PG_{{ pg_new_version }}_* | wc -l) + + if [ $old_tablespace_dir_count -ne 1 ] || [ $new_tablespace_dir_count -ne 1 ]; then + echo "Expected exactly one matching directory for each version, \ + but found $old_tablespace_dir_count for old version and $new_tablespace_dir_count for new version. \ + Skipping rsync." + exit 1 + fi + + old_tablespace_dir=$(ls -d ${tablespace_location}/PG_{{ pg_old_version }}_*) + new_tablespace_dir=$(ls -d ${tablespace_location}/PG_{{ pg_new_version }}_*) + + echo -e "{{ secondary_servers }}" | xargs -I {} -P "{{ secondary_count }}" -n 1 \ + rsync -e 'ssh -o StrictHostKeyChecking=no' --archive --delete --hard-links --size-only --no-inc-recursive \ + "${old_tablespace_dir}" "${new_tablespace_dir}" {}:"${tablespace_location}" + done + args: + executable: /bin/bash + async: 3600 # run the command asynchronously with a maximum duration of 1 hour + poll: 0 + register: rsync_tablespace_result + + - name: Wait for the tablespaces rsync to complete. + ansible.builtin.async_status: + jid: "{{ rsync_tablespace_result.ansible_job_id }}" + register: rsync_tablespace_job_result + until: rsync_tablespace_job_result.finished + retries: 1800 + delay: 2 + become: true + become_user: postgres + when: + - inventory_hostname in groups['primary'] + - tablespace_location.stdout_lines is defined + - tablespace_location.stdout_lines | length > 0 diff --git a/automation/roles/upgrade/templates/haproxy-no-http-checks.cfg.j2 b/automation/roles/upgrade/templates/haproxy-no-http-checks.cfg.j2 new file mode 100644 index 000000000..7c6be97f7 --- /dev/null +++ b/automation/roles/upgrade/templates/haproxy-no-http-checks.cfg.j2 @@ -0,0 +1,137 @@ +global + maxconn {{ haproxy_maxconn.global }} + log /dev/log local0 + log /dev/log local1 notice + chroot /var/lib/haproxy + stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners + stats timeout 30s + user haproxy + group haproxy + daemon + +defaults + mode tcp + log global + retries 2 + timeout queue 5s + timeout connect 5s + timeout client {{ haproxy_timeout.client }} + timeout server {{ haproxy_timeout.server }} + timeout check 15s + +listen stats + mode http + bind {{ inventory_hostname }}:{{ haproxy_listen_port.stats }} + stats enable + stats uri / + +listen master +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.master }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.master }} +{% endif %} + maxconn {{ haproxy_maxconn.master }} + option tcplog +{% for host in groups['primary'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ pgbouncer_listen_port }} +{% endfor %} + +{% if haproxy_listen_port.master_direct is defined %} +listen master_direct +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.master_direct }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.master_direct }} +{% endif %} + maxconn {{ haproxy_maxconn.master }} + option tcplog + {% for host in groups['primary'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ postgresql_port }} + {% endfor %} +{% endif %} + +listen replicas +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + option tcplog + balance roundrobin +{% for host in groups['secondary'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ pgbouncer_listen_port }} +{% endfor %} + +{% if haproxy_listen_port.replicas_direct is defined %} +listen replicas_direct +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_direct }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_direct }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + option tcplog + balance roundrobin + {% for host in groups['secondary'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ postgresql_port }} + {% endfor %} +{% endif %} + +listen replicas_sync +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_sync }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_sync }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + option tcplog + balance roundrobin +{% for host in groups['secondary'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ pgbouncer_listen_port }} +{% endfor %} + +{% if haproxy_listen_port.replicas_sync_direct is defined %} +listen replicas_sync_direct +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_sync_direct }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_sync_direct }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + option tcplog + balance roundrobin + {% for host in groups['secondary'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ postgresql_port }} + {% endfor %} +{% endif %} + +listen replicas_async +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_async }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_async }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + option tcplog + balance roundrobin +{% for host in groups['secondary'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ pgbouncer_listen_port }} +{% endfor %} + +{% if haproxy_listen_port.replicas_async_direct is defined %} +listen replicas_async_direct +{% if cluster_vip is defined and cluster_vip | length > 0 %} + bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_async_direct }} +{% else %} + bind {{ inventory_hostname }}:{{ haproxy_listen_port.replicas_async_direct }} +{% endif %} + maxconn {{ haproxy_maxconn.replica }} + option tcplog + balance roundrobin + {% for host in groups['secondary'] %} +server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ postgresql_port }} + {% endfor %} +{% endif %} + diff --git a/automation/roles/vip_manager/README.md b/automation/roles/vip_manager/README.md new file mode 100644 index 000000000..da153fd91 --- /dev/null +++ b/automation/roles/vip_manager/README.md @@ -0,0 +1 @@ +# Ansible Role: vip_manager diff --git a/automation/roles/vip_manager/defaults/main.yml b/automation/roles/vip_manager/defaults/main.yml new file mode 100644 index 000000000..7a64b627c --- /dev/null +++ b/automation/roles/vip_manager/defaults/main.yml @@ -0,0 +1,8 @@ +--- +vip_manager_architecture_map: + amd64: x86_64 + x86_64: x86_64 + aarch64: arm64 + arm64: arm64 + 32-bit: "i386" + 64-bit: x86_64 diff --git a/roles/vip-manager/disable/tasks/main.yml b/automation/roles/vip_manager/disable/tasks/main.yml similarity index 82% rename from roles/vip-manager/disable/tasks/main.yml rename to automation/roles/vip_manager/disable/tasks/main.yml index ef1985c0b..a6c00f150 100644 --- a/roles/vip-manager/disable/tasks/main.yml +++ b/automation/roles/vip_manager/disable/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Disabe vip-manager service - systemd: + ansible.builtin.systemd: daemon_reload: true name: vip-manager state: stopped diff --git a/roles/vip-manager/handlers/main.yml b/automation/roles/vip_manager/handlers/main.yml similarity index 55% rename from roles/vip-manager/handlers/main.yml rename to automation/roles/vip_manager/handlers/main.yml index ed8424c97..86903d36b 100644 --- a/roles/vip-manager/handlers/main.yml +++ b/automation/roles/vip_manager/handlers/main.yml @@ -1,7 +1,6 @@ --- - - name: Restart vip-manager service - systemd: + ansible.builtin.systemd: daemon_reload: true name: vip-manager state: restarted @@ -9,12 +8,11 @@ listen: "restart vip-manager" - name: Wait for the cluster ip address (VIP) "{{ cluster_vip }}" is running - wait_for: + ansible.builtin.wait_for: host: "{{ cluster_vip }}" - port: "{{ ansible_ssh_port | default(22) }}" + port: "{{ pgbouncer_listen_port if pgbouncer_install | bool else postgresql_port }}" state: started - timeout: 60 + timeout: 15 # max wait time: 30 seconds delay: 2 + ignore_errors: true # show the error and continue the playbook execution listen: "restart vip-manager" - -... diff --git a/roles/vip-manager/tasks/main.yml b/automation/roles/vip_manager/tasks/main.yml similarity index 66% rename from roles/vip-manager/tasks/main.yml rename to automation/roles/vip_manager/tasks/main.yml index 3fe3890f1..b8f7c1f23 100644 --- a/roles/vip-manager/tasks/main.yml +++ b/automation/roles/vip_manager/tasks/main.yml @@ -1,9 +1,7 @@ --- -# yamllint disable rule:line-length - -- block: # install vip-manager package from repo +- block: # install vip-manager package from repo - name: Get vip-manager package - get_url: + ansible.builtin.get_url: url: "{{ item }}" dest: /tmp/ timeout: 60 @@ -13,45 +11,69 @@ environment: "{{ proxy_env | default({}) }}" - name: Install vip-manager - apt: + ansible.builtin.apt: force_apt_get: true deb: "/tmp/{{ vip_manager_package_repo | basename }}" state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 when: ansible_os_family == "Debian" - name: Install vip-manager - package: + ansible.builtin.package: name: "/tmp/{{ vip_manager_package_repo | basename }}" state: present disable_gpg_check: true + register: package_status + until: package_status is success + delay: 5 + retries: 3 when: ansible_os_family == "RedHat" - when: installation_method == "repo" and vip_manager_package_repo | length > 0 + when: + - installation_method == "repo" + - vip_manager_package_repo | length > 0 + - not ansible_check_mode + - not postgresql_cluster_maintenance | default(false) | bool tags: vip, vip_manager, vip_manager_install -- block: # install vip-manager package from file +- block: # install vip-manager package from file - name: Copy vip-manager package into /tmp - copy: + ansible.builtin.copy: src: "{{ vip_manager_package_file }}" dest: /tmp/ - name: Install vip-manager - apt: + ansible.builtin.apt: force_apt_get: true deb: "/tmp/{{ vip_manager_package_file | basename }}" state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 when: ansible_os_family == "Debian" - name: Install vip-manager - package: + ansible.builtin.package: name: "/tmp/{{ vip_manager_package_file | basename }}" state: present disable_gpg_check: true + register: package_status + until: package_status is success + delay: 5 + retries: 3 when: ansible_os_family == "RedHat" - when: installation_method == "file" and vip_manager_package_file | length > 0 + when: + - installation_method == "file" + - vip_manager_package_file | length > 0 + - not ansible_check_mode + - not postgresql_cluster_maintenance | default(false) | bool tags: vip, vip_manager, vip_manager_install - name: Make sure the conf directory "{{ vip_manager_conf | dirname }}" exist - file: + ansible.builtin.file: dest: "{{ vip_manager_conf | dirname }}" state: directory owner: postgres @@ -59,20 +81,20 @@ tags: vip, vip_manager, vip_manager_conf - name: Generate conf file "{{ vip_manager_conf }}" - template: + ansible.builtin.template: src: templates/vip-manager.yml.j2 dest: "{{ vip_manager_conf }}" owner: postgres group: postgres - mode: 0644 + mode: "0644" notify: "restart vip-manager" when: existing_pgcluster is not defined or not existing_pgcluster|bool tags: vip, vip_manager, vip_manager_conf -- block: # for add_pgnode.yml +- block: # for add_pgnode.yml - name: Fetch "{{ vip_manager_conf | basename }}" conf file from master run_once: true - fetch: + ansible.builtin.fetch: src: "{{ vip_manager_conf }}" dest: "files/{{ vip_manager_conf | basename }}" validate_checksum: true @@ -80,20 +102,20 @@ delegate_to: "{{ groups.master[0] }}" - name: Copy "{{ vip_manager_conf | basename }}" conf file to replica - copy: + ansible.builtin.copy: src: "files/{{ vip_manager_conf | basename }}" dest: "{{ vip_manager_conf | dirname }}" notify: "restart vip-manager" - name: Prepare "{{ vip_manager_conf | basename }}" conf file (replace "nodename","iface") - lineinfile: + ansible.builtin.lineinfile: path: "{{ vip_manager_conf }}" regexp: "{{ item.regexp }}" line: "{{ item.line }}" backrefs: true loop: - - {regexp: '^nodename:', line: 'nodename: "{{ ansible_hostname }}"'} - - {regexp: '^iface:', line: 'iface: {{ vip_manager_iface }} # interface to which the virtual ip will be added'} + - { regexp: "^nodename:", line: 'nodename: "{{ ansible_hostname }}"' } + - { regexp: "^iface:", line: "iface: {{ vip_manager_iface }} # interface to which the virtual ip will be added" } loop_control: label: "{{ item.line }}" notify: "restart vip-manager" @@ -101,16 +123,14 @@ tags: vip, vip_manager, vip_manager_conf - name: Generate systemd service file - template: + ansible.builtin.template: src: templates/vip-manager.service.j2 dest: /etc/systemd/system/vip-manager.service owner: postgres group: postgres - mode: 0644 + mode: "0644" notify: "restart vip-manager" tags: vip, vip_manager, vip_manager_service - name: Make sure handlers are flushed immediately - meta: flush_handlers - -... + ansible.builtin.meta: flush_handlers diff --git a/roles/vip-manager/templates/vip-manager.service.j2 b/automation/roles/vip_manager/templates/vip-manager.service.j2 similarity index 100% rename from roles/vip-manager/templates/vip-manager.service.j2 rename to automation/roles/vip_manager/templates/vip-manager.service.j2 diff --git a/automation/roles/vip_manager/templates/vip-manager.yml.j2 b/automation/roles/vip_manager/templates/vip-manager.yml.j2 new file mode 100644 index 000000000..99dfb9f7a --- /dev/null +++ b/automation/roles/vip_manager/templates/vip-manager.yml.j2 @@ -0,0 +1,81 @@ +# config for vip-manager by Cybertec Schönig & Schönig GmbH + +# time (in milliseconds) after which vip-manager wakes up and checks if it needs to register or release ip addresses. +interval: {{ vip_manager_interval }} + +{% if vip_manager_dcs_type | default(dcs_type) == 'patroni' %} +trigger-key: "/leader" +trigger-value: 200 +{% else %} +# the etcd or consul key which vip-manager will regularly poll. +trigger-key: "/{{ patroni_etcd_namespace | default('service') }}/{{ patroni_cluster_name }}/leader" +# if the value of the above key matches the trigger-value (often the hostname of this host), vip-manager will try to add the virtual ip address to the interface specified in Iface +trigger-value: "{{ ansible_hostname }}" +{% endif %} + +ip: {{ vip_manager_ip }} # the virtual ip address to manage +netmask: {{ vip_manager_mask }} # netmask for the virtual ip +interface: {{ vip_manager_iface }} # interface to which the virtual ip will be added + +# how the virtual ip should be managed. we currently support "ip addr add/remove" through shell commands or the Hetzner api +hosting-type: basic # possible values: basic, or hetzner. + +dcs-type: {{ vip_manager_dcs_type | default(dcs_type) }} # etcd, consul or patroni + +{% if vip_manager_dcs_type | default(dcs_type) == 'etcd' %} +# a list that contains all DCS endpoints to which vip-manager could talk. +{% if not dcs_exists | bool %} +dcs-endpoints: +{% for host in groups['etcd_cluster'] %} + - {{ patroni_etcd_protocol | default('http', true) }}://{{ hostvars[host]['inventory_hostname'] }}:2379 +{% endfor %} +{% else %} +dcs-endpoints: +{% for etcd_hosts in patroni_etcd_hosts %} + - {{ patroni_etcd_protocol | default('http', true) }}://{{ etcd_hosts.host }}:{{ etcd_hosts.port }} +{% endfor %} +{% endif %} +{% endif %} +{% if vip_manager_dcs_type | default(dcs_type) == 'consul' %} +# A list that contains all DCS endpoints to which vip-manager could talk. +dcs-endpoints: +{% if not dcs_exists | bool %} + {% if consul_client_address == '127.0.0.1' %} + - http://127.0.0.1:8500 + {% else %} + {% for host in groups['consul_instances'] %} + - http://{{ hostvars[host]['inventory_hostname'] }}:8500 + {% endfor %} + {% endif %} +{% else %} + {% for consul_host in consul_join %} + - http://{{ consul_host }}:8500 + {% endfor %} +{% endif %} +{% if consul_token | default("") | length > 0 %} +consul-token: "{{ consul_token | default('') }}" +{% endif %} +{% endif %} + +{% if vip_manager_dcs_type | default(dcs_type) == 'etcd' %} +{% if patroni_etcd_username | default("") | length > 0 %} +etcd-user: {{ patroni_etcd_username | default("") }} +{% endif %} +{% if patroni_etcd_password | default("") | length > 0 %} +etcd-password: {{ patroni_etcd_password | default("") }} +{% endif %} +{% if etcd_tls_enable | default(false) | bool %} +# when etcd-ca-file is specified, TLS connections to the etcd endpoints will be used. +etcd-ca-file: {{ patroni_etcd_cacert | default('/etc/patroni/tls/etcd/ca.crt') }} +# when etcd-cert-file and etcd-key-file are specified, we will authenticate at the etcd endpoints using this certificate and key. +etcd-cert-file: {{ patroni_etcd_cert | default('/etc/patroni/tls/etcd/server.crt') }} +etcd-key-file: {{ patroni_etcd_key | default('/etc/patroni/tls/etcd/server.key') }} +{% endif %} +{% endif %} + +# how often things should be retried and how long to wait between retries. (currently only affects arpClient) +retry-num: 2 +retry-after: 250 # in milliseconds + +# verbose logs (currently only supported for hetzner) +verbose: false diff --git a/automation/roles/wal_g/README.md b/automation/roles/wal_g/README.md new file mode 100644 index 000000000..b758b494e --- /dev/null +++ b/automation/roles/wal_g/README.md @@ -0,0 +1 @@ +# Ansible Role: wal_g diff --git a/automation/roles/wal_g/defaults/main.yml b/automation/roles/wal_g/defaults/main.yml new file mode 100644 index 000000000..0e3a60580 --- /dev/null +++ b/automation/roles/wal_g/defaults/main.yml @@ -0,0 +1,13 @@ +--- +wal_g_architecture_map: + amd64: amd64 + x86_64: amd64 + aarch64: aarch64 + arm64: aarch64 + +# if 'wal_g_installation_method' is 'src' +go_architecture_map: + amd64: amd64 + x86_64: amd64 + aarch64: arm64 + arm64: arm64 diff --git a/automation/roles/wal_g/tasks/auto_conf.yml b/automation/roles/wal_g/tasks/auto_conf.yml new file mode 100644 index 000000000..08f7ddbdb --- /dev/null +++ b/automation/roles/wal_g/tasks/auto_conf.yml @@ -0,0 +1,161 @@ +# yamllint disable rule:line-length +--- +# AWS S3 bucket (if 'cloud_provider=aws') +- name: "Set variable 'wal_g_json' for backup in AWS S3 bucket" + ansible.builtin.set_fact: + wal_g_json: + - { option: "AWS_ACCESS_KEY_ID", value: "{{ WALG_AWS_ACCESS_KEY_ID | default(lookup('ansible.builtin.env', 'AWS_ACCESS_KEY_ID')) }}" } + - { option: "AWS_SECRET_ACCESS_KEY", value: "{{ WALG_AWS_SECRET_ACCESS_KEY | default(lookup('ansible.builtin.env', 'AWS_SECRET_ACCESS_KEY')) }}" } + - { option: "WALG_S3_PREFIX", value: "{{ WALG_S3_PREFIX | default('s3://' + (aws_s3_bucket_name | default(patroni_cluster_name + '-backup'))) }}" } + - { option: "AWS_REGION", value: "{{ WALG_AWS_REGION | default(aws_s3_bucket_region | default(server_location)) }}" } + - { option: "WALG_COMPRESSION_METHOD", value: "{{ WALG_COMPRESSION_METHOD | default('brotli') }}" } + - { option: "WALG_DELTA_MAX_STEPS", value: "{{ WALG_DELTA_MAX_STEPS | default('6') }}" } + - { option: "WALG_DOWNLOAD_CONCURRENCY", value: "{{ WALG_DOWNLOAD_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "WALG_PREFETCH_DIR", value: "{{ wal_g_prefetch_dir_path | default(postgresql_home_dir + '/wal-g-prefetch') }}" } + - { option: "WALG_UPLOAD_CONCURRENCY", value: "{{ WALG_UPLOAD_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "WALG_UPLOAD_DISK_CONCURRENCY", value: "{{ WALG_UPLOAD_DISK_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "PGDATA", value: "{{ postgresql_data_dir }}" } + - { option: "PGHOST", value: "{{ postgresql_unix_socket_dir | default('/var/run/postgresql') }}" } + - { option: "PGPORT", value: "{{ postgresql_port | default('5432') }}" } + - { option: "PGUSER", value: "{{ patroni_superuser_username | default('postgres') }}" } + delegate_to: localhost + run_once: true # noqa run-once + no_log: true # do not output contents to the ansible log + when: cloud_provider | default('') | lower == 'aws' + +# GCS Bucket (if 'cloud_provider=gcp') +- block: + - name: "Set variable 'wal_g_json' for backup in GCS Bucket" + ansible.builtin.set_fact: + wal_g_json: + - { option: "GOOGLE_APPLICATION_CREDENTIALS", value: "{{ WALG_GS_KEY | default(postgresql_home_dir + '/gcs-key.json') }}" } + - { option: "WALG_GS_PREFIX", value: "{{ WALG_GS_PREFIX | default('gs://' + (gcp_bucket_name | default(patroni_cluster_name + '-backup'))) }}" } + - { option: "WALG_COMPRESSION_METHOD", value: "{{ WALG_COMPRESSION_METHOD | default('brotli') }}" } + - { option: "WALG_DELTA_MAX_STEPS", value: "{{ WALG_DELTA_MAX_STEPS | default('6') }}" } + - { option: "WALG_DOWNLOAD_CONCURRENCY", value: "{{ WALG_DOWNLOAD_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "WALG_PREFETCH_DIR", value: "{{ wal_g_prefetch_dir_path | default(postgresql_home_dir + '/wal-g-prefetch') }}" } + - { option: "WALG_UPLOAD_CONCURRENCY", value: "{{ WALG_UPLOAD_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "WALG_UPLOAD_DISK_CONCURRENCY", value: "{{ WALG_UPLOAD_DISK_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "PGDATA", value: "{{ postgresql_data_dir }}" } + - { option: "PGHOST", value: "{{ postgresql_unix_socket_dir | default('/var/run/postgresql') }}" } + - { option: "PGPORT", value: "{{ postgresql_port | default('5432') }}" } + - { option: "PGUSER", value: "{{ patroni_superuser_username | default('postgres') }}" } + no_log: true # do not output contents to the ansible log + + # if 'gcs_key_file' is not defined, copy GCS key file from GCP_SERVICE_ACCOUNT_CONTENTS environment variable. + - block: + - name: "Get GCP service account contents from localhost" + ansible.builtin.set_fact: + gcp_service_account_contents: "{{ lookup('ansible.builtin.env', 'GCP_SERVICE_ACCOUNT_CONTENTS') }}" + delegate_to: localhost + run_once: true # noqa run-once + no_log: true # do not output GCP service account contents to the ansible log + + - name: "Copy GCP service account contents to {{ WALG_GS_KEY | default(postgresql_home_dir + '/gcs-key.json') }}" + ansible.builtin.copy: + content: "{{ gcp_service_account_contents }}" + dest: "{{ WALG_GS_KEY | default(postgresql_home_dir + '/gcs-key.json') }}" + mode: "0600" + owner: "postgres" + group: "postgres" + no_log: true # do not output GCP service account contents to the ansible log + when: gcs_key_file is not defined + + # if 'gcs_key_file' is defined, copy this GCS key file. + - name: "Copy GCS key file to {{ WALG_GS_KEY | default(postgresql_home_dir + '/gcs-key.json') }}" + ansible.builtin.copy: + src: "{{ gcs_key_file }}" + dest: "{{ WALG_GS_KEY | default(postgresql_home_dir + '/gcs-key.json') }}" + mode: "0600" + owner: "postgres" + group: "postgres" + no_log: true # do not output GCP service account contents to the ansible log + when: gcs_key_file is defined and gcs_key_file | length > 0 + when: cloud_provider | default('') | lower == 'gcp' + +# Azure Blob Storage (if 'cloud_provider=azure') +- name: "Set variable 'wal_g_json' for backup in Azure Blob Storage" + ansible.builtin.set_fact: + wal_g_json: + - { + option: "AZURE_STORAGE_ACCOUNT", + value: "{{ WALG_AZURE_STORAGE_ACCOUNT | default(azure_blob_storage_account_name | default(patroni_cluster_name | lower | replace('-', '') | truncate(24, true, ''))) }}", + } + - { + option: "AZURE_STORAGE_ACCESS_KEY", + value: "{{ WALG_AZURE_STORAGE_ACCESS_KEY | default(hostvars['localhost']['azure_storage_account_key'] | default('')) }}", + } + - { + option: "WALG_AZ_PREFIX", + value: "{{ WALG_AZ_PREFIX | default('azure://' + (azure_blob_storage_name | default(patroni_cluster_name + '-backup'))) }}", + } + - { option: "WALG_COMPRESSION_METHOD", value: "{{ WALG_COMPRESSION_METHOD | default('brotli') }}" } + - { option: "WALG_DELTA_MAX_STEPS", value: "{{ WALG_DELTA_MAX_STEPS | default('6') }}" } + - { option: "WALG_DOWNLOAD_CONCURRENCY", value: "{{ WALG_DOWNLOAD_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "WALG_PREFETCH_DIR", value: "{{ wal_g_prefetch_dir_path | default(postgresql_home_dir + '/wal-g-prefetch') }}" } + - { option: "WALG_UPLOAD_CONCURRENCY", value: "{{ WALG_UPLOAD_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "WALG_UPLOAD_DISK_CONCURRENCY", value: "{{ WALG_UPLOAD_DISK_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "PGDATA", value: "{{ postgresql_data_dir }}" } + - { option: "PGHOST", value: "{{ postgresql_unix_socket_dir | default('/var/run/postgresql') }}" } + - { option: "PGPORT", value: "{{ postgresql_port | default('5432') }}" } + - { option: "PGUSER", value: "{{ patroni_superuser_username | default('postgres') }}" } + no_log: true # do not output contents to the ansible log + when: cloud_provider | default('') | lower == 'azure' + +# DigitalOcean Spaces Object Storage (if 'cloud_provider=digitalocean') +# Note: requires the Spaces access keys "AWS_ACCESS_KEY_ID" and "AWS_SECRET_ACCESS_KEY" (https://cloud.digitalocean.com/account/api/spaces) +- name: "Set variable 'wal_g_json' for backup in DigitalOcean Spaces Object Storage" + ansible.builtin.set_fact: + wal_g_json: + - { option: "AWS_ACCESS_KEY_ID", value: "{{ WALG_AWS_ACCESS_KEY_ID | default(AWS_ACCESS_KEY_ID | default('')) }}" } + - { option: "AWS_SECRET_ACCESS_KEY", value: "{{ WALG_AWS_SECRET_ACCESS_KEY | default(AWS_SECRET_ACCESS_KEY | default('')) }}" } + - { + option: "AWS_ENDPOINT", + value: "{{ WALG_S3_ENDPOINT | default('https://' + (digital_ocean_spaces_region | default(server_location)) + '.digitaloceanspaces.com') }}", + } + - { option: "AWS_REGION", value: "{{ WALG_S3_REGION | default(digital_ocean_spaces_region | default(server_location)) }}" } + - { option: "AWS_S3_FORCE_PATH_STYLE", value: "{{ AWS_S3_FORCE_PATH_STYLE | default(true) }}" } + - { option: "WALG_S3_PREFIX", value: "{{ WALG_S3_PREFIX | default('s3://' + (digital_ocean_spaces_name | default(patroni_cluster_name + '-backup'))) }}" } + - { option: "WALG_COMPRESSION_METHOD", value: "{{ WALG_COMPRESSION_METHOD | default('brotli') }}" } + - { option: "WALG_DELTA_MAX_STEPS", value: "{{ WALG_DELTA_MAX_STEPS | default('6') }}" } + - { option: "WALG_DOWNLOAD_CONCURRENCY", value: "{{ WALG_DOWNLOAD_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "WALG_PREFETCH_DIR", value: "{{ wal_g_prefetch_dir_path | default(postgresql_home_dir + '/wal-g-prefetch') }}" } + - { option: "WALG_UPLOAD_CONCURRENCY", value: "{{ WALG_UPLOAD_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "WALG_UPLOAD_DISK_CONCURRENCY", value: "{{ WALG_UPLOAD_DISK_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "PGDATA", value: "{{ postgresql_data_dir }}" } + - { option: "PGHOST", value: "{{ postgresql_unix_socket_dir | default('/var/run/postgresql') }}" } + - { option: "PGPORT", value: "{{ postgresql_port | default('5432') }}" } + - { option: "PGUSER", value: "{{ patroni_superuser_username | default('postgres') }}" } + no_log: true # do not output contents to the ansible log + when: cloud_provider | default('') | lower == 'digitalocean' + +# Hetzner Object Storage (if 'cloud_provider=hetzner') +- name: "Set variable 'wal_g_json' for backup in AWS S3 bucket" + ansible.builtin.set_fact: + wal_g_json: + - { option: "AWS_ACCESS_KEY_ID", value: "{{ WALG_AWS_ACCESS_KEY_ID | default(hetzner_object_storage_access_key | default('')) }}" } + - { option: "AWS_SECRET_ACCESS_KEY", value: "{{ WALG_AWS_SECRET_ACCESS_KEY | default(hetzner_object_storage_secret_key | default('')) }}" } + - { + option: "AWS_ENDPOINT", + value: "{{ WALG_S3_ENDPOINT | default(hetzner_object_storage_endpoint | default('https://' + (hetzner_object_storage_region | default(server_location)) + '.your-objectstorage.com')) }}", + } + - { option: "AWS_S3_FORCE_PATH_STYLE", value: "{{ AWS_S3_FORCE_PATH_STYLE | default(true) }}" } + - { option: "AWS_REGION", value: "{{ WALG_S3_REGION | default(hetzner_object_storage_region | default(server_location)) }}" } + - { + option: "WALG_S3_PREFIX", + value: "{{ WALG_S3_PREFIX | default('s3://' + (hetzner_object_storage_name | default(patroni_cluster_name + '-backup'))) }}", + } + - { option: "WALG_COMPRESSION_METHOD", value: "{{ WALG_COMPRESSION_METHOD | default('brotli') }}" } + - { option: "WALG_DELTA_MAX_STEPS", value: "{{ WALG_DELTA_MAX_STEPS | default('6') }}" } + - { option: "WALG_DOWNLOAD_CONCURRENCY", value: "{{ WALG_DOWNLOAD_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "WALG_PREFETCH_DIR", value: "{{ wal_g_prefetch_dir_path | default(postgresql_home_dir + '/wal-g-prefetch') }}" } + - { option: "WALG_UPLOAD_CONCURRENCY", value: "{{ WALG_UPLOAD_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "WALG_UPLOAD_DISK_CONCURRENCY", value: "{{ WALG_UPLOAD_DISK_CONCURRENCY | default([ansible_processor_vcpus | int // 2, 1] | max) }}" } + - { option: "PGDATA", value: "{{ postgresql_data_dir }}" } + - { option: "PGHOST", value: "{{ postgresql_unix_socket_dir | default('/var/run/postgresql') }}" } + - { option: "PGPORT", value: "{{ postgresql_port | default('5432') }}" } + - { option: "PGUSER", value: "{{ patroni_superuser_username | default('postgres') }}" } + delegate_to: localhost + run_once: true # noqa run-once + no_log: true # do not output contents to the ansible log + when: cloud_provider | default('') | lower == 'hetzner' diff --git a/automation/roles/wal_g/tasks/cron.yml b/automation/roles/wal_g/tasks/cron.yml new file mode 100644 index 000000000..0632d877e --- /dev/null +++ b/automation/roles/wal_g/tasks/cron.yml @@ -0,0 +1,40 @@ +--- +- name: Make sure that the cronie package is installed + ansible.builtin.package: + name: cronie + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: ansible_os_family == "RedHat" + tags: wal_g_cron + +- name: Make sure that the cron package is installed + ansible.builtin.apt: + name: cron + state: present + register: apt_status + until: apt_status is success + delay: 5 + retries: 3 + environment: "{{ proxy_env | default({}) }}" + when: ansible_os_family == "Debian" + tags: wal_g_cron + +- name: Add WAL-G cron jobs + ansible.builtin.cron: + cron_file: "{{ item.file | default('') }}" + user: "{{ item.user | default('postgres') }}" + minute: "{{ item.minute | default('*') }}" + hour: "{{ item.hour | default('*') }}" + day: "{{ item.day | default('*') }}" + month: "{{ item.month | default('*') }}" + weekday: "{{ item.weekday | default('*') }}" + name: "{{ item.name }}" + disabled: "{{ item.disabled | default(False) }}" + state: "{{ item.state | default('present') }}" + job: "{{ item.job }}" + loop: "{{ wal_g_cron_jobs }}" + tags: wal_g_cron diff --git a/automation/roles/wal_g/tasks/main.yml b/automation/roles/wal_g/tasks/main.yml new file mode 100644 index 000000000..af0f00b81 --- /dev/null +++ b/automation/roles/wal_g/tasks/main.yml @@ -0,0 +1,306 @@ +--- +# Automatic setup of the backup configuration based on the selected cloud provider. +# if 'cloud_provider' is 'aws', 'gcp', 'azure', 'digitalocean'. +- ansible.builtin.import_tasks: auto_conf.yml + when: + - cloud_provider | default('') | length > 0 + - wal_g_auto_conf | default(true) | bool # to be able to disable auto backup settings + tags: wal-g, wal_g, wal_g_conf + +# Pre-check +- name: Check if WAL-G is already installed + ansible.builtin.shell: | + set -o pipefail; + "{{ wal_g_path.split(' ')[0] }}" --version | awk {'print $3'} | tr -d 'v' + args: + executable: /bin/bash + changed_when: false + failed_when: false + register: wal_g_installed_version + environment: + PATH: "{{ ansible_env.PATH }}:/usr/local/bin" + tags: wal-g, wal_g, wal_g_install + +- name: WAL-G check result + ansible.builtin.debug: + msg: "WAL-G is already installed, version {{ wal_g_installed_version.stdout }}. Skip the installation." + when: + - wal_g_installed_version.rc == 0 + - wal_g_installed_version.stdout == wal_g_version + tags: wal-g, wal_g, wal_g_install + +# Install WAL-G from a precompiled binary +# (if 'wal_g_installation_method' is 'binary') +# Note: excluding RHEL 8 as GLIBC version 2.29 or higher is required. +- block: + - name: "Download WAL-G v{{ wal_g_version | string | replace('v', '') }} binary" + ansible.builtin.get_url: + url: "{{ wal_g_repo }}/{{ wal_g_archive }}" + dest: /tmp/ + timeout: 60 + validate_certs: false + check_mode: false + vars: + wal_g_repo: "https://github.com/wal-g/wal-g/releases/download/v%7B%7B%20wal_g_version%20|%20string%20|%20replace('v', '') }}" + wal_g_archive: "wal-g-pg-ubuntu-20.04-{{ wal_g_architecture_map[ansible_architecture] }}.tar.gz" + environment: "{{ proxy_env | default({}) }}" + + # Note: We are using a precompiled binary on Ubuntu 20.04, + # but since Go binaries are cross-platform, it works well on other distributions as well. + + - name: Extract WAL-G into /tmp + ansible.builtin.unarchive: + src: "/tmp/wal-g-pg-ubuntu-20.04-{{ wal_g_architecture_map[ansible_architecture] }}.tar.gz" + dest: /tmp/ + extra_opts: + - --no-same-owner + remote_src: true + check_mode: false + + - name: Copy WAL-G binary file to "{{ wal_g_path.split(' ')[0] }}" + ansible.builtin.copy: + src: "/tmp/wal-g-pg-ubuntu-20.04-{{ wal_g_architecture_map[ansible_architecture] }}" + dest: "{{ wal_g_path.split(' ')[0] }}" + mode: u+x,g+x,o+x + remote_src: true + when: + - installation_method == "repo" + - wal_g_installation_method == "binary" + - wal_g_version is version('1.0', '>=') + - (wal_g_installed_version.stderr is search("command not found") or wal_g_installed_version.stdout != wal_g_version) + - not (ansible_os_family == "RedHat" and ansible_distribution_major_version == '8') + tags: wal-g, wal_g, wal_g_install + +# Install WAL-G from the source code +# (if 'wal_g_installation_method' is 'src') +- block: + - name: Install lib dependencies to build WAL-G + ansible.builtin.package: + name: + - libbrotli-dev + # - liblzo2-dev # https://github.com/wal-g/wal-g/issues/1412 + - libsodium-dev + - make + - cmake + - git + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "Debian" + + - name: Install lib dependencies to build WAL-G + ansible.builtin.package: + name: + - brotli-devel + # - lzo-devel # https://github.com/wal-g/wal-g/issues/1412 + - libsodium-devel + - make + - cmake + - gcc + - git + state: present + register: package_status + until: package_status is success + delay: 5 + retries: 3 + when: ansible_os_family == "RedHat" + + - name: Check the installed Go version + ansible.builtin.shell: | + set -o pipefail; + go version | awk {'print $3'} | tr -d 'go' + args: + executable: /bin/bash + changed_when: false + failed_when: false + register: go_installed_version + environment: + PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin" + + - name: Check the latest available Go version + ansible.builtin.shell: | + set -o pipefail; + curl -s https://go.dev/VERSION?m=text | grep 'go' | tr -d 'go' + args: + executable: /bin/bash + changed_when: false + register: go_latest_version + + - block: # Install latest Go compiler + - name: "Download Go v{{ go_latest_version.stdout }}" + ansible.builtin.get_url: + url: "https://go.dev/dl/go%7B%7B%20go_latest_version.stdout%20%7D%7D.linux-%7B%7B%20go_architecture_map[ansible_architecture]%20%7D%7D.tar.gz" + dest: /tmp/ + timeout: 60 + validate_certs: false + + - name: Install Go + ansible.builtin.unarchive: + src: "/tmp/go{{ go_latest_version.stdout }}.linux-{{ go_architecture_map[ansible_architecture] }}.tar.gz" + dest: /usr/local/ + extra_opts: + - --no-same-owner + remote_src: true + when: go_installed_version.stderr is search("command not found") or + go_installed_version.stdout is version(go_latest_version.stdout, '<') + + - name: "Download WAL-G v{{ wal_g_version | string | replace('v', '') }} source code" + ansible.builtin.git: + repo: https://github.com/wal-g/wal-g.git + version: v{{ wal_g_version | string | replace('v', '') }} + dest: /tmp/wal-g + force: true + + - name: Run go mod tidy to ensure dependencies are correct + ansible.builtin.command: go mod tidy + args: + chdir: /tmp/wal-g + environment: + PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin" + + - name: Build WAL-G deps + become: true + become_user: root + community.general.make: + chdir: /tmp/wal-g/ + target: deps + params: + USE_BROTLI: 1 + USE_LIBSODIUM: 1 + # USE_LZO: 1 # https://github.com/wal-g/wal-g/issues/1412 + environment: + PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin" + + - name: Build and install WAL-G + become: true + become_user: root + community.general.make: + chdir: /tmp/wal-g/ + target: pg_install + params: + USE_BROTLI: 1 + USE_LIBSODIUM: 1 + # USE_LZO: 1 # https://github.com/wal-g/wal-g/issues/1412 + GOBIN: /usr/local/bin + environment: + PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin" + environment: "{{ proxy_env | default({}) }}" + when: + - installation_method == "repo" + - (wal_g_installation_method == "src" or (ansible_os_family == "RedHat" and ansible_distribution_major_version == '8')) + - wal_g_version is version('1.0', '>=') + - (wal_g_installed_version.stderr is search("command not found") or wal_g_installed_version.stdout != wal_g_version) + - not ansible_check_mode + tags: wal-g, wal_g, wal_g_install + +# older versions of WAL-G (for compatibility) +- block: + - name: "Download WAL-G v{{ wal_g_version | string | replace('v', '') }} binary" + ansible.builtin.get_url: + url: "https://github.com/wal-g/wal-g/releases/download/v%7B%7B%20wal_g_version%20|%20string%20|%20replace('v', '') }}/wal-g.linux-amd64.tar.gz" + dest: /tmp/ + timeout: 60 + validate_certs: false + environment: "{{ proxy_env | default({}) }}" + check_mode: false + + - name: Extract WAL-G into /tmp + ansible.builtin.unarchive: + src: "/tmp/wal-g.linux-amd64.tar.gz" + dest: /tmp/ + extra_opts: + - --no-same-owner + remote_src: true + check_mode: false + + - name: Copy WAL-G binary file to "{{ wal_g_path.split(' ')[0] }}" + ansible.builtin.copy: + src: "/tmp/wal-g" + dest: "{{ wal_g_path.split(' ')[0] }}" + mode: u+x,g+x,o+x + remote_src: true + when: + - installation_method == "repo" + - wal_g_installation_method == "binary" + - wal_g_version is version('0.2.19', '<=') + - (wal_g_installed_version.stderr is search("command not found") or wal_g_installed_version.stdout != wal_g_version) + tags: wal-g, wal_g, wal_g_install + +# installation_method == "file" + +# A precompiled binary (package in tar.gz) +- block: + - name: "Extract WAL-G archive {{ wal_g_package_file }} into /tmp" + ansible.builtin.unarchive: + src: "{{ wal_g_package_file }}" + dest: /tmp/ + extra_opts: + - --no-same-owner + + - name: Copy WAL-G binary file to "{{ wal_g_path.split(' ')[0] }}" + ansible.builtin.copy: + src: "/tmp/{{ wal_g_package_file.split('.tar.gz')[0] | basename }}" + dest: "{{ wal_g_path.split(' ')[0] }}" + mode: u+x,g+x,o+x + remote_src: true + check_mode: false + when: + - installation_method == "file" + - wal_g_version is version('1.0', '>=') + - (wal_g_installed_version.stderr is search("command not found") or + wal_g_installed_version.stdout != wal_g_version) + - (wal_g_package_file is defined and wal_g_package_file | length > 0) + tags: wal-g, wal_g, wal_g_install + +# older versions of WAL-G (for compatibility) +- block: + - name: "Extract WAL-G archive {{ wal_g_package_file }} into /tmp" + ansible.builtin.unarchive: + src: "{{ wal_g_package_file }}" + dest: /tmp/ + extra_opts: + - --no-same-owner + + - name: Copy WAL-G binary file to "{{ wal_g_path.split(' ')[0] }}" + ansible.builtin.copy: + src: "/tmp/wal-g" + dest: "{{ wal_g_path.split(' ')[0] }}" + mode: u+x,g+x,o+x + remote_src: true + check_mode: false + when: + - installation_method == "file" + - wal_g_version is version('0.2.19', '<=') + - (wal_g_installed_version.stderr is search("command not found") or + wal_g_installed_version.stdout != wal_g_version) + - wal_g_package_file == "wal-g.linux-amd64.tar.gz" + tags: wal-g, wal_g, wal_g_install + +# Ensure the WAL_G_PREFETCH directory is created if configured +- name: "Create WAL-G prefetch directory {{ wal_g_prefetch_dir_path }}" + ansible.builtin.file: + path: "{{ wal_g_prefetch_dir_path }}" + state: directory + owner: postgres + group: postgres + mode: "0740" + when: wal_g_prefetch_dir_create | default(true) | bool + tags: wal-g, wal_g, wal_g_install + +# Configure walg.json +- name: "Generate conf file {{ postgresql_home_dir }}/.walg.json" + ansible.builtin.template: + src: templates/walg.json.j2 + dest: "{{ postgresql_home_dir }}/.walg.json" + owner: postgres + group: postgres + mode: "0644" + tags: wal-g, wal_g, wal_g_conf + +- ansible.builtin.import_tasks: cron.yml + when: + - wal_g_cron_jobs is defined + - wal_g_cron_jobs | length > 0 + tags: wal-g, wal_g, wal_g_cron diff --git a/roles/wal-g/templates/walg.json.j2 b/automation/roles/wal_g/templates/walg.json.j2 similarity index 100% rename from roles/wal-g/templates/walg.json.j2 rename to automation/roles/wal_g/templates/walg.json.j2 diff --git a/tags.md b/automation/tags.md similarity index 79% rename from tags.md rename to automation/tags.md index 5e8ae811b..8ebaecf0f 100644 --- a/tags.md +++ b/automation/tags.md @@ -1,4 +1,3 @@ - ## Available tags - add_repo @@ -13,6 +12,9 @@ - hostname - dns, nameservers - etc_hosts +- swap +- - swap_create +- - swap_remove - sysctl, kernel - disable_thp - limits @@ -23,11 +25,15 @@ - - ntp_install - - ntp_conf - ssh_keys +- fetch_files +- copy_files - etcd - - etcd_install - - etcd_conf - - etcd_start - - etcd_status +- consul +- pgpass - patroni - - pip - - patroni_install @@ -39,6 +45,7 @@ - - patroni_start_replica - - postgresql_disable - - custom_wal_dir +- - point_in_time_recovery - pgbouncer - - pgbouncer_install - - pgbouncer_conf @@ -46,6 +53,7 @@ - - pgbouncer_logrotate - - pgbouncer_restart - - pgbouncer_generate_userlist +- - pgbouncer_auth_query - load_balancing - - haproxy - - - haproxy_requirements @@ -71,6 +79,8 @@ - - vip_manager_restart - postgresql_users - postgresql_databases +- postgresql_schemas +- postgresql_privs - postgresql_extensions - cluster_info - - patroni_status @@ -81,10 +91,22 @@ - wal_g - - wal_g_install - - wal_g_conf +- - wal_g_cron - pgbackrest - - pgbackrest_repo - - pgbackrest_install - - pgbackrest_conf - - pgbackrest_ssh_keys +- - pgbackrest_stanza_create +- - pgbackrest_cron +- pg_probackup +- - pg_probackup_repo +- - pg_probackup_install +- cron - netdata - +- ssh_public_keys +- mount, zpool +- perf, flamegraph +- tls + - tls_cert_generate + - tls_cert_copy diff --git a/automation/update_pgcluster.yml b/automation/update_pgcluster.yml new file mode 100644 index 000000000..0a3584075 --- /dev/null +++ b/automation/update_pgcluster.yml @@ -0,0 +1,271 @@ +--- +- name: update_pgcluster.yml | Update PostgreSQL HA Cluster (based on "Patroni") + hosts: postgres_cluster + gather_facts: true + become: true + become_method: sudo + any_errors_fatal: true + pre_tasks: + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + tags: always + tasks: + - name: "[Prepare] Get Patroni Cluster Leader Node" + ansible.builtin.uri: + url: http://{{ inventory_hostname }}:{{ patroni_restapi_port }}/leader + status_code: 200 + register: patroni_leader_result + changed_when: false + failed_when: false + environment: + no_proxy: "{{ inventory_hostname }}" + tags: always + + - name: '[Prepare] Add host to group "primary" (in-memory inventory)' + ansible.builtin.add_host: + name: "{{ item }}" + groups: primary + when: hostvars[item]['patroni_leader_result']['status'] == 200 + loop: "{{ groups['postgres_cluster'] }}" + changed_when: false + tags: always + + - name: '[Prepare] Add hosts to group "secondary" (in-memory inventory)' + ansible.builtin.add_host: + name: "{{ item }}" + groups: secondary + when: hostvars[item]['patroni_leader_result']['status'] != 200 + loop: "{{ groups['postgres_cluster'] }}" + changed_when: false + tags: always + + - name: "Print Patroni Cluster info" + ansible.builtin.debug: + msg: + - "Cluster Name: {{ patroni_cluster_name }}" + - "Cluster Leader: {{ ansible_hostname }}" + when: inventory_hostname in groups['primary'] + tags: always + +- name: "(1/4) PRE-UPDATE: Perform pre-update tasks" + hosts: "primary:secondary" + gather_facts: true + become: true + become_user: postgres + any_errors_fatal: true + pre_tasks: + - name: Include role variables + ansible.builtin.include_vars: "roles/update/vars/main.yml" + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tasks: + - name: Running Pre-Checks + ansible.builtin.include_role: + name: update + tasks_from: pre_checks + + # This task updates the pgBackRest package on the backup server (Dedicated Repository Host). + # It runs only if the 'pgbackrest' group is defined in the inventory and the update target is set to 'system'. + - name: Update pgBackRest package (Dedicated Repository Host) + ansible.builtin.include_role: + name: update + tasks_from: pgbackrest_host + when: groups['pgbackrest'] | default([]) | length > 0 and target | lower == 'system' + tags: + - update + - pre-checks + +- name: "(2/4) UPDATE: Secondary" + hosts: secondary + serial: 1 # update replicas one by one + gather_facts: true + become: true + become_method: sudo + any_errors_fatal: true + environment: "{{ proxy_env | default({}) }}" + pre_tasks: + - name: Include role variables + ansible.builtin.include_vars: "roles/update/vars/main.yml" + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tasks: + - name: Stop read-only traffic + ansible.builtin.include_role: + name: update + tasks_from: stop_traffic + + - name: Stop Services + ansible.builtin.include_role: + name: update + tasks_from: stop_services + + - name: Update PostgreSQL + ansible.builtin.include_role: + name: update + tasks_from: postgres + when: target | lower == 'postgres' or target | lower == 'system' + + - name: Update Patroni + ansible.builtin.include_role: + name: update + tasks_from: patroni + when: target | lower == 'patroni' or target | lower == 'system' + + - name: Update all system packages + ansible.builtin.include_role: + name: update + tasks_from: system + when: target | lower == 'system' + + - name: Start Services + ansible.builtin.include_role: + name: update + tasks_from: start_services + + - name: Start read-only traffic + ansible.builtin.include_role: + name: update + tasks_from: start_traffic + tags: + - update + - update-secondary + +- name: "(3/4) UPDATE: Primary" + hosts: primary + gather_facts: true + become: true + become_method: sudo + any_errors_fatal: true + environment: "{{ proxy_env | default({}) }}" + pre_tasks: + - name: Include role variables + ansible.builtin.include_vars: "roles/update/vars/main.yml" + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tasks: + - name: "Switchover Patroni leader role" + ansible.builtin.include_role: + name: update + tasks_from: switchover + + - name: Stop read-only traffic + ansible.builtin.include_role: + name: update + tasks_from: stop_traffic + + - name: Stop Services + ansible.builtin.include_role: + name: update + tasks_from: stop_services + + - name: Update PostgreSQL + ansible.builtin.include_role: + name: update + tasks_from: postgres + when: target | lower == 'postgres' or target | lower == 'system' + + - name: Update Patroni + ansible.builtin.include_role: + name: update + tasks_from: patroni + when: target | lower == 'patroni' or target | lower == 'system' + + - name: Update all system packages + ansible.builtin.include_role: + name: update + tasks_from: system + when: target | lower == 'system' + + - name: Start Services + ansible.builtin.include_role: + name: update + tasks_from: start_services + + - name: Start read-only traffic + ansible.builtin.include_role: + name: update + tasks_from: start_traffic + tags: + - update + - update-primary + +- name: "(4/4) POST-UPDATE: Update extensions" + hosts: postgres_cluster + gather_facts: true + become: true + become_user: postgres + any_errors_fatal: true + pre_tasks: + - name: Include role variables + ansible.builtin.include_vars: "roles/update/vars/main.yml" + - name: Include main variables + ansible.builtin.include_vars: "roles/common/defaults/main.yml" + - name: Include OS-specific variables + ansible.builtin.include_vars: "roles/common/defaults/{{ ansible_os_family }}.yml" + tasks: + - name: Update extensions + ansible.builtin.include_role: + name: update + tasks_from: extensions + when: update_extensions | bool + + # finish (info) + - name: Check for any update failure + run_once: true # noqa run-once + ansible.builtin.set_fact: + any_update_failed: "{{ any_update_failed | default(false) or + hostvars[item].update_postgres_failed | default(false) or + hostvars[item].update_patroni_failed | default(false) or + hostvars[item].update_system_failed | default(false) }}" + loop: "{{ groups['postgres_cluster'] }}" + + - name: Check the Patroni cluster state + run_once: true # noqa run-once + become: true + become_user: postgres + ansible.builtin.command: patronictl -c /etc/patroni/patroni.yml list + register: patronictl_result + changed_when: false + environment: + PATH: "{{ ansible_env.PATH }}:/usr/bin:/usr/local/bin" + + - name: Check the current PostgreSQL version + run_once: true # noqa run-once + ansible.builtin.command: >- + {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U {{ patroni_superuser_username }} -d postgres -tAXc + "select current_setting('server_version')" + register: postgres_version + changed_when: false + + - name: List the Patroni cluster members + run_once: true # noqa run-once + ansible.builtin.debug: + msg: "{{ patronictl_result.stdout_lines }}" + when: patronictl_result.stdout_lines is defined + + # if there are no update errors + - name: Update completed + run_once: true # noqa run-once + ansible.builtin.debug: + msg: + - "PostgreSQL HA cluster update completed." + - "Current version: {{ postgres_version.stdout }}" + when: not any_update_failed + + # if there were errors during the update + - name: Update completed with error + run_once: true # noqa run-once + ansible.builtin.debug: + msg: + - "Update of PostgreSQL HA cluster completed with errors. Check the Ansible log." + - "Current version: {{ postgres_version.stdout }}" + when: any_update_failed + tags: + - update + - update-extensions diff --git a/console/.env.example b/console/.env.example new file mode 100644 index 000000000..fb7f4c962 --- /dev/null +++ b/console/.env.example @@ -0,0 +1,3 @@ +DOMAIN= +EMAIL= +AUTH_TOKEN= diff --git a/console/Dockerfile b/console/Dockerfile new file mode 100644 index 000000000..ec59a8271 --- /dev/null +++ b/console/Dockerfile @@ -0,0 +1,99 @@ +# build-env +FROM golang:1.24-bookworm AS api-builder +WORKDIR /go/src/pg-console + +COPY console/service/ . + +RUN make build_in_docker + +FROM node:20-bookworm AS ui-builder +WORKDIR /usr/src/pg-console + +COPY console/ui/ . + +RUN yarn install --frozen-lockfile --network-timeout 1000000 && yarn vite build + +# Build the console image +FROM nginx:1.26-bookworm +LABEL maintainer="Vitaliy Kukharik vitabaks@gmail.com" + +COPY --from=api-builder /go/src/pg-console/pg-console /usr/local/bin/ +COPY console/db/migrations /etc/db/migrations +COPY --from=ui-builder /usr/src/pg-console/dist /usr/share/nginx/html/ +COPY console/ui/nginx/nginx.conf /etc/nginx/ +COPY console/ui/env.sh console/ui/.env console/ui/.env.production /usr/share/nginx/html/ +RUN chmod +x /usr/share/nginx/html/env.sh + +ARG POSTGRES_VERSION +ENV POSTGRES_VERSION=${POSTGRES_VERSION:-16} + +ARG POSTGRES_PORT +ENV POSTGRES_PORT=${POSTGRES_PORT:-5432} + +ARG POSTGRES_PASSWORD +ENV POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-"postgres-pass"} + +ARG PGDATA +ENV PGDATA=${PGDATA:-"/var/lib/postgresql/${POSTGRES_VERSION}/main"} + +ARG PG_UNIX_SOCKET_DIR +ENV PG_UNIX_SOCKET_DIR=${PG_UNIX_SOCKET_DIR:-"/var/run/postgresql"} + +ARG PG_CONSOLE_API_PORT +ENV PG_CONSOLE_API_PORT=${PG_CONSOLE_API_PORT:-8080} + +ARG PG_CONSOLE_UI_PORT +ENV PG_CONSOLE_UI_PORT=${PG_CONSOLE_UI_PORT:-80} + +# Set SHELL to /bin/bash +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +RUN apt-get clean && rm -rf /var/lib/apt/lists/partial \ + && apt-get update -o Acquire::CompressionTypes::Order::=gz \ + && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ + gnupg postgresql-common apt-transport-https lsb-release openssh-client ca-certificates wget curl vim \ + # PostgreSQL + && install -d /usr/share/postgresql-common/pgdg \ + && curl -o /usr/share/postgresql-common/pgdg/apt.postgresql.org.asc --fail https://www.postgresql.org/media/keys/ACCC4CF8.asc \ + && echo "deb [signed-by=/usr/share/postgresql-common/pgdg/apt.postgresql.org.asc] https://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ + && apt-get update -o Acquire::CompressionTypes::Order::=gz \ + && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-${POSTGRES_VERSION} \ + && sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen && locale-gen \ + && pg_dropcluster ${POSTGRES_VERSION} main \ + # TimescaleDB + && wget --quiet -O - https://packagecloud.io/timescale/timescaledb/gpgkey | gpg --dearmor -o /etc/apt/trusted.gpg.d/timescaledb.gpg \ + && echo "deb https://packagecloud.io/timescale/timescaledb/debian/ $(lsb_release -cs) main" | tee /etc/apt/sources.list.d/timescaledb.list \ + && apt-get update -o Acquire::CompressionTypes::Order::=gz \ + && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y timescaledb-2-postgresql-${POSTGRES_VERSION} \ + # supervisor + && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y supervisor + +# Clean up +RUN apt-get autoremove -y --purge gnupg wget \ + && apt-get clean -y autoclean \ + && rm -rf /var/lib/apt/lists/* + +# Copy configuration files +COPY console/db/postgresql.conf /var/tmp/postgresql.conf +COPY console/db/pg_hba.conf /var/tmp/pg_hba.conf + +# Copy pg_start.sh +COPY console/db/pg_start.sh /pg_start.sh +RUN chmod +x /pg_start.sh + +# supervisord [https://docs.docker.com/engine/admin/using_supervisord/] +COPY console/supervisord.conf /etc/supervisor/supervisord.conf + +VOLUME /var/lib/postgresql + +# Console DB +EXPOSE ${POSTGRES_PORT} +# Console API +EXPOSE ${PG_CONSOLE_API_PORT} +# Console UI +EXPOSE ${PG_CONSOLE_UI_PORT} + +# Override the ENTRYPOINT set by nginx image +ENTRYPOINT [] + +CMD ["/usr/bin/supervisord", "--configuration=/etc/supervisor/supervisord.conf", "--nodaemon"] diff --git a/console/README.md b/console/README.md new file mode 100644 index 000000000..62fea3b7c --- /dev/null +++ b/console/README.md @@ -0,0 +1,39 @@ +# Console Stack + +Autobase stack running with Caddy as reverse proxy. The stack includes: + +- Caddy reverse proxy with automatic HTTPS +- Console API +- Console UI +- PostgreSQL database + +## Quick Start + +1. Setup environment: + +```bash +cp .env.example .env +``` + +2. Configure your `.env`: + +```bash +DOMAIN=your-domain.com # Set your domain +EMAIL=your@email.com # Required for Caddy SSL +AUTH_TOKEN=your-token # Your authorization token +``` + +3. Run the stack: + +```bash +docker compose up -d +``` + +## Notes + +- Caddy will automatically handle SSL certificates for your domain +- Data is persisted in Docker volumes: `console_postgres` and `caddy_data` +- The Caddy network is created automatically by Docker Compose +- All services are configured to restart automatically unless stopped manually. +- Additional [environment variables](https://github.com/vitabaks/autobase/tree/master/console/service#configuration) can be configured based on your project needs +- Using the `latest` versions is great for testing. For production installations, specify release versions in the [docker-compose.yml](docker-compose.yml) file. diff --git a/console/db/Dockerfile b/console/db/Dockerfile new file mode 100644 index 000000000..f9b5a3350 --- /dev/null +++ b/console/db/Dockerfile @@ -0,0 +1,56 @@ +FROM debian:12-slim + +ARG POSTGRES_VERSION +ENV POSTGRES_VERSION=${POSTGRES_VERSION:-16} + +ARG POSTGRES_PORT +ENV POSTGRES_PORT=${POSTGRES_PORT:-5432} + +ARG POSTGRES_PASSWORD +ENV POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-"postgres-pass"} + +ARG PGDATA +ENV PGDATA=${PGDATA:-"/var/lib/postgresql/${POSTGRES_VERSION}/main"} + +ARG PG_UNIX_SOCKET_DIR +ENV PG_UNIX_SOCKET_DIR=${PG_UNIX_SOCKET_DIR:-"/var/run/postgresql"} + +# Set SHELL to /bin/bash +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +RUN apt-get clean && rm -rf /var/lib/apt/lists/partial \ + && apt-get update -o Acquire::CompressionTypes::Order::=gz \ + && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ + gnupg postgresql-common apt-transport-https lsb-release openssh-client ca-certificates wget curl vim-tiny sudo \ + # PostgreSQL + && install -d /usr/share/postgresql-common/pgdg \ + && curl -o /usr/share/postgresql-common/pgdg/apt.postgresql.org.asc --fail https://www.postgresql.org/media/keys/ACCC4CF8.asc \ + && echo "deb [signed-by=/usr/share/postgresql-common/pgdg/apt.postgresql.org.asc] https://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ + && apt-get update -o Acquire::CompressionTypes::Order::=gz \ + && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-${POSTGRES_VERSION} \ + && sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen && locale-gen \ + && pg_dropcluster ${POSTGRES_VERSION} main \ + # TimescaleDB + && wget --quiet -O - https://packagecloud.io/timescale/timescaledb/gpgkey | gpg --dearmor -o /etc/apt/trusted.gpg.d/timescaledb.gpg \ + && echo "deb https://packagecloud.io/timescale/timescaledb/debian/ $(lsb_release -cs) main" | tee /etc/apt/sources.list.d/timescaledb.list \ + && apt-get update -o Acquire::CompressionTypes::Order::=gz \ + && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y timescaledb-2-postgresql-${POSTGRES_VERSION} + +# Clean up +RUN apt-get autoremove -y --purge gnupg wget \ + && apt-get clean -y autoclean \ + && rm -rf /var/lib/apt/lists/* + +# Copy configuration files +COPY console/db/postgresql.conf /var/tmp/postgresql.conf +COPY console/db/pg_hba.conf /var/tmp/pg_hba.conf + +# Copy pg_start.sh +COPY console/db/pg_start.sh /pg_start.sh +RUN chmod +x /pg_start.sh + +VOLUME /var/lib/postgresql + +EXPOSE ${POSTGRES_PORT} + +CMD ["/pg_start.sh"] diff --git a/console/db/README.md b/console/db/README.md new file mode 100644 index 000000000..12f2ba9ef --- /dev/null +++ b/console/db/README.md @@ -0,0 +1,123 @@ +## Database Schema for Autobase Console + +### Introduction + +This project uses [Goose](https://github.com/pressly/goose) for versioning and managing database schema changes. Goose is a database migration tool that enables database version control, much like Git does for source code. It allows defining and tracking changes in the database schema over time, ensuring consistency and reproducibility. The backend service is responsible for applying migrations. + +For more information on using Goose, see the [Goose documentation](https://github.com/pressly/goose). + +### Database Migrations + +Database migrations are SQL scripts that modify the schema of the database. Each migration script should be placed in the `console/db/migrations` directory and follow Goose's naming convention to ensure they are applied in the correct order. + +**Naming Convention** +Goose uses a specific naming convention to order and apply migrations: + +- Versioned Migrations: These migrations have a version number and are applied in sequence. The naming format is `_.sql` + - Example: `20240520144338_initial_scheme_setup` + - Note: You can use the following command `goose create mogration_file_name sql` to create a new migration file. + +Example migrations: + +```shell +goose -dir ./console/db/migrations postgres \ +"host= port=5432 user=postgres password= dbname=" \ +up +``` + +### Validating Migrations + +To check the status of migrations, run: + +```shell +goose -dir ./console/db/migrations postgres \ +"host= port=5432 user=postgres password= dbname=" \ +status +``` + +Output example: + +``` +status + +2024/05/20 17:50:33 Applied At Migration +2024/05/20 17:50:33 ======================================= +2024/05/20 17:50:33 Mon May 20 14:49:26 2024 -- 20240520144338_2.0.0_initial_scheme_setup.sql +``` + +### Database Schema + +#### Tables: + +- `cloud_providers` + - Table containing cloud providers information +- `cloud_regions` + - Table containing cloud regions information for various cloud providers +- `cloud_instances` + - Table containing cloud instances information (including the approximate price) for various cloud providers +- `cloud_volumes` + - Table containing cloud volume information (including the approximate price) for various cloud providers +- `cloud_images` + - Table containing cloud images information for various cloud providers + - Note: For all cloud providers except AWS, the image is the same for all regions. For AWS, the image must be specified for each specific region. +- `secrets` + - Table containing secrets for accessing cloud providers and servers + - Note: The data is encrypted using the pgcrypto extension and a symmetric key. This symmetric key is generated at the application level and is unique for each installation. +- `projects` + - Table containing information about projects + - Default: 'default' +- `environments` + - Table containing information about environments + - Default: 'production', 'staging', 'test', 'dev', 'benchmarking' +- `clusters` + - Table containing information about Postgres clusters +- `servers` + - Table containing information about servers within a Postgres cluster +- `extensions` + - The table stores information about Postgres extensions, including name, description, supported Postgres version range, and whether the extension is a contrib module or third-party. + - 'postgres_min_version' and 'postgres_max_version' define the range of Postgres versions supported by extensions. If the postgres_max_version is NULL, it is assumed that the extension is still supported by new versions of Postgres. +- `operations` + - Table containing logs of operations performed on cluster. + - Note: The migration includes a DO block that checks for the presence of the timescaledb extension. If the extension is installed, the operations table is converted into a hypertable with monthly partitioning. Additionally, the block checks the timescaledb license. If the license is a Community license (timescale), a hypertable compression policy is created for partitions older than one month. +- `postgres_versions` + - Table containing the major PostgreSQL versions supported by the autobase +- `settings` + - Table containing configuration parameters, including console and other component settings + +#### Views: + +- `v_secrets_list` + - Displays a list of secrets (without revealing secret values) along with additional metadata such as creation and update timestamps. It also includes information about whether each secret is in use and, if so, provides details on which clusters and servers are utilizing the secret. +- `v_operations` + - Displays a list of operations, with additional columns such as the name of the cluster and environment. + +#### Functions: + +- `update_server_count` + - Function to update the server_count column in the clusters table. + - Note: This function calculates the number of servers associated with a specific cluster and updates the server_count accordingly. The trigger `update_server_count_trigger` is automatically executed whenever there are INSERT, UPDATE, or DELETE operations on the servers table. This ensures that the server_count in the clusters table is always accurate and up-to-date. +- `add_secret` + - Function to add a secret. + - Usage examples (project_id, secret_type, secret_name, secret_value, encryption_key): + - `SELECT add_secret(1, 'ssh_key', '', '{"private_key": ""}', 'my_encryption_key');` + - `SELECT add_secret(1, 'password', '', '{"username": "", "password": ""}', 'my_encryption_key');` + - `SELECT add_secret(1, 'cloud_secret', '', '{"AWS_ACCESS_KEY_ID": "", "AWS_SECRET_ACCESS_KEY": ""}', 'my_encryption_key');` +- `update_secret` + - Function to update a secret. + - Usage example: + - `SELECT update_secret(, '', '', '', '');` +- `get_secret` + - Function to get a secret value in JSON format. + - Usage example (secret_id, encryption_key): + - `SELECT get_secret(1, 'my_encryption_key');` +- `get_extensions` + - Function to get a list of available extensions in JSON format. All or 'contrib'/'third_party' only (optional). + - Usage examples: + - `SELECT get_extensions(16);` + - `SELECT get_extensions(16, 'contrib');` + - `SELECT get_extensions(16, 'third_party');` +- `get_cluster_name` + - Function to generate a unique name for a new PostgreSQL cluster. + - Note: This function generates names in the format `postgres-cluster-XX`, where `XX` is a sequential number starting from 01. It checks the existing cluster names to ensure the generated name is unique. + - Usage example: + - `SELECT get_cluster_name();` diff --git a/console/db/migrations/20240520144338_2.0.0_initial_scheme_setup.sql b/console/db/migrations/20240520144338_2.0.0_initial_scheme_setup.sql new file mode 100644 index 000000000..3ace3f5c9 --- /dev/null +++ b/console/db/migrations/20240520144338_2.0.0_initial_scheme_setup.sql @@ -0,0 +1,1219 @@ +-- +goose Up +-- Create extensions +create schema if not exists extensions; + +create extension if not exists moddatetime schema extensions; + +create extension if not exists pgcrypto schema extensions; + +-- cloud_providers +create table public.cloud_providers ( + provider_name text not null, + provider_description text not null, + provider_image text +); + +comment on table public.cloud_providers is 'Table containing cloud providers information'; + +comment on column public.cloud_providers.provider_name is 'The name of the cloud provider'; + +comment on column public.cloud_providers.provider_description is 'A description of the cloud provider'; + +insert into public.cloud_providers (provider_name, provider_description, provider_image) + values ('aws', 'Amazon Web Services', 'aws.png'), + ('gcp', 'Google Cloud Platform', 'gcp.png'), + ('azure', 'Microsoft Azure', 'azure.png'), + ('digitalocean', 'DigitalOcean', 'digitalocean.png'), + ('hetzner', 'Hetzner Cloud', 'hetzner.png'); + +alter table only public.cloud_providers + add constraint cloud_providers_pkey primary key (provider_name); + +-- cloud_regions +create table public.cloud_regions ( + cloud_provider text not null, + region_group text not null, + region_name text not null, + region_description text not null +); + +comment on table public.cloud_regions is 'Table containing cloud regions information for various cloud providers'; + +comment on column public.cloud_regions.cloud_provider is 'The name of the cloud provider'; + +comment on column public.cloud_regions.region_group is 'The geographical group of the cloud region'; + +comment on column public.cloud_regions.region_name is 'The specific name of the cloud region'; + +comment on column public.cloud_regions.region_description is 'A description of the cloud region'; + +insert into public.cloud_regions (cloud_provider, region_group, region_name, region_description) + values ('aws', 'Africa', 'af-south-1', 'Africa (Cape Town)'), + ('aws', 'Asia Pacific', 'ap-east-1', 'Asia Pacific (Hong Kong)'), + ('aws', 'Asia Pacific', 'ap-south-1', 'Asia Pacific (Mumbai)'), + ('aws', 'Asia Pacific', 'ap-south-2', 'Asia Pacific (Hyderabad)'), + ('aws', 'Asia Pacific', 'ap-southeast-3', 'Asia Pacific (Jakarta)'), + ('aws', 'Asia Pacific', 'ap-southeast-4', 'Asia Pacific (Melbourne)'), + ('aws', 'Asia Pacific', 'ap-northeast-1', 'Asia Pacific (Tokyo)'), + ('aws', 'Asia Pacific', 'ap-northeast-2', 'Asia Pacific (Seoul)'), + ('aws', 'Asia Pacific', 'ap-northeast-3', 'Asia Pacific (Osaka)'), + ('aws', 'Asia Pacific', 'ap-southeast-1', 'Asia Pacific (Singapore)'), + ('aws', 'Asia Pacific', 'ap-southeast-2', 'Asia Pacific (Sydney)'), + ('aws', 'Europe', 'eu-central-1', 'Europe (Frankfurt)'), + ('aws', 'Europe', 'eu-west-1', 'Europe (Ireland)'), + ('aws', 'Europe', 'eu-west-2', 'Europe (London)'), + ('aws', 'Europe', 'eu-west-3', 'Europe (Paris)'), + ('aws', 'Europe', 'eu-north-1', 'Europe (Stockholm)'), + ('aws', 'Europe', 'eu-south-1', 'Europe (Milan)'), + ('aws', 'Europe', 'eu-south-2', 'Europe (Spain)'), + ('aws', 'Europe', 'eu-central-2', 'Europe (Zurich)'), + ('aws', 'Middle East', 'me-south-1', 'Middle East (Bahrain)'), + ('aws', 'Middle East', 'me-central-1', 'Middle East (UAE)'), + ('aws', 'North America', 'us-east-1', 'US East (N. Virginia)'), + ('aws', 'North America', 'us-east-2', 'US East (Ohio)'), + ('aws', 'North America', 'us-west-1', 'US West (N. California)'), + ('aws', 'North America', 'us-west-2', 'US West (Oregon)'), + ('aws', 'North America', 'ca-central-1', 'Canada (Central)'), + ('aws', 'North America', 'ca-west-1', 'Canada (Calgary)'), + ('aws', 'South America', 'sa-east-1', 'South America (São Paulo)'), + ('gcp', 'Africa', 'africa-south1', 'Johannesburg'), + ('gcp', 'Asia Pacific', 'asia-east1', 'Taiwan'), + ('gcp', 'Asia Pacific', 'asia-east2', 'Hong Kong'), + ('gcp', 'Asia Pacific', 'asia-northeast1', 'Tokyo'), + ('gcp', 'Asia Pacific', 'asia-northeast2', 'Osaka'), + ('gcp', 'Asia Pacific', 'asia-northeast3', 'Seoul'), + ('gcp', 'Asia Pacific', 'asia-south1', 'Mumbai'), + ('gcp', 'Asia Pacific', 'asia-south2', 'Delhi'), + ('gcp', 'Asia Pacific', 'asia-southeast1', 'Singapore'), + ('gcp', 'Asia Pacific', 'asia-southeast2', 'Jakarta'), + ('gcp', 'Australia', 'australia-southeast1', 'Sydney'), + ('gcp', 'Australia', 'australia-southeast2', 'Melbourne'), + ('gcp', 'Europe', 'europe-central2', 'Warsaw'), + ('gcp', 'Europe', 'europe-north1', 'Finland'), + ('gcp', 'Europe', 'europe-southwest1', 'Madrid'), + ('gcp', 'Europe', 'europe-west1', 'Belgium'), + ('gcp', 'Europe', 'europe-west10', 'Berlin'), + ('gcp', 'Europe', 'europe-west12', 'Turin'), + ('gcp', 'Europe', 'europe-west2', 'London'), + ('gcp', 'Europe', 'europe-west3', 'Frankfurt'), + ('gcp', 'Europe', 'europe-west4', 'Netherlands'), + ('gcp', 'Europe', 'europe-west6', 'Zurich'), + ('gcp', 'Europe', 'europe-west8', 'Milan'), + ('gcp', 'Europe', 'europe-west9', 'Paris'), + ('gcp', 'Middle East', 'me-central1', 'Doha'), + ('gcp', 'Middle East', 'me-central2', 'Dammam'), + ('gcp', 'Middle East', 'me-west1', 'Tel Aviv'), + ('gcp', 'North America', 'northamerica-northeast1', 'Montréal'), + ('gcp', 'North America', 'northamerica-northeast2', 'Toronto'), + ('gcp', 'North America', 'us-central1', 'Iowa'), + ('gcp', 'North America', 'us-east1', 'South Carolina'), + ('gcp', 'North America', 'us-east4', 'Northern Virginia'), + ('gcp', 'North America', 'us-east5', 'Columbus'), + ('gcp', 'North America', 'us-south1', 'Dallas'), + ('gcp', 'North America', 'us-west1', 'Oregon'), + ('gcp', 'North America', 'us-west2', 'Los Angeles'), + ('gcp', 'North America', 'us-west3', 'Salt Lake City'), + ('gcp', 'North America', 'us-west4', 'Las Vegas'), + ('gcp', 'South America', 'southamerica-east1', 'São Paulo'), + ('gcp', 'South America', 'southamerica-west1', 'Santiago'), + ('azure', 'Africa', 'southafricanorth', 'South Africa North (Johannesburg)'), + ('azure', 'Africa', 'southafricawest', 'South Africa West (Cape Town)'), + ('azure', 'Asia Pacific', 'australiacentral', 'Australia Central (Canberra)'), + ('azure', 'Asia Pacific', 'australiacentral2', 'Australia Central 2 (Canberra)'), + ('azure', 'Asia Pacific', 'australiaeast', 'Australia East (New South Wales)'), + ('azure', 'Asia Pacific', 'australiasoutheast', 'Australia Southeast (Victoria)'), + ('azure', 'Asia Pacific', 'centralindia', 'Central India (Pune)'), + ('azure', 'Asia Pacific', 'eastasia', 'East Asia (Hong Kong)'), + ('azure', 'Asia Pacific', 'japaneast', 'Japan East (Tokyo, Saitama)'), + ('azure', 'Asia Pacific', 'japanwest', 'Japan West (Osaka)'), + ('azure', 'Asia Pacific', 'jioindiacentral', 'Jio India Central (Nagpur)'), + ('azure', 'Asia Pacific', 'jioindiawest', 'Jio India West (Jamnagar)'), + ('azure', 'Asia Pacific', 'koreacentral', 'Korea Central (Seoul)'), + ('azure', 'Asia Pacific', 'koreasouth', 'Korea South (Busan)'), + ('azure', 'Asia Pacific', 'southeastasia', 'Southeast Asia (Singapore)'), + ('azure', 'Asia Pacific', 'southindia', 'South India (Chennai)'), + ('azure', 'Asia Pacific', 'westindia', 'West India (Mumbai)'), + ('azure', 'Europe', 'francecentral', 'France Central (Paris)'), + ('azure', 'Europe', 'francesouth', 'France South (Marseille)'), + ('azure', 'Europe', 'germanynorth', 'Germany North (Berlin)'), + ('azure', 'Europe', 'germanywestcentral', 'Germany West Central (Frankfurt)'), + ('azure', 'Europe', 'italynorth', 'Italy North (Milan)'), + ('azure', 'Europe', 'northeurope', 'North Europe (Ireland)'), + ('azure', 'Europe', 'norwayeast', 'Norway East (Norway)'), + ('azure', 'Europe', 'norwaywest', 'Norway West (Norway)'), + ('azure', 'Europe', 'polandcentral', 'Poland Central (Warsaw)'), + ('azure', 'Europe', 'swedencentral', 'Sweden Central (Gävle)'), + ('azure', 'Europe', 'switzerlandnorth', 'Switzerland North (Zurich)'), + ('azure', 'Europe', 'switzerlandwest', 'Switzerland West (Geneva)'), + ('azure', 'Europe', 'uksouth', 'UK South (London)'), + ('azure', 'Europe', 'ukwest', 'UK West (Cardiff)'), + ('azure', 'Europe', 'westeurope', 'West Europe (Netherlands)'), + ('azure', 'Mexico', 'mexicocentral', 'Mexico Central (Querétaro State)'), + ('azure', 'Middle East', 'qatarcentral', 'Qatar Central (Doha)'), + ('azure', 'Middle East', 'uaecentral', 'UAE Central (Abu Dhabi)'), + ('azure', 'Middle East', 'uaenorth', 'UAE North (Dubai)'), + ('azure', 'South America', 'brazilsouth', 'Brazil South (Sao Paulo State)'), + ('azure', 'South America', 'brazilsoutheast', 'Brazil Southeast (Rio)'), + ('azure', 'North America', 'centralus', 'Central US (Iowa)'), + ('azure', 'North America', 'eastus', 'East US (Virginia)'), + ('azure', 'North America', 'eastus2', 'East US 2 (Virginia)'), + ('azure', 'North America', 'eastusstg', 'East US STG (Virginia)'), + ('azure', 'North America', 'northcentralus', 'North Central US (Illinois)'), + ('azure', 'North America', 'southcentralus', 'South Central US (Texas)'), + ('azure', 'North America', 'westcentralus', 'West Central US (Wyoming)'), + ('azure', 'North America', 'westus', 'West US (California)'), + ('azure', 'North America', 'westus2', 'West US 2 (Washington)'), + ('azure', 'North America', 'westus3', 'West US 3 (Phoenix)'), + ('azure', 'North America', 'canadaeast', 'Canada East (Quebec)'), + ('azure', 'North America', 'canadacentral', 'Canada Central (Toronto)'), + ('azure', 'South America', 'brazilus', 'Brazil US (South America)'), + ('digitalocean', 'Asia Pacific', 'sgp1', 'Singapore (Datacenter 1)'), + ('digitalocean', 'Asia Pacific', 'blr1', 'Bangalore (Datacenter 1)'), + ('digitalocean', 'Australia', 'syd1', 'Sydney (Datacenter 1)'), + ('digitalocean', 'Europe', 'ams3', 'Amsterdam (Datacenter 3)'), + ('digitalocean', 'Europe', 'lon1', 'London (Datacenter 1)'), + ('digitalocean', 'Europe', 'fra1', 'Frankfurt (Datacenter 1)'), + ('digitalocean', 'North America', 'nyc1', 'New York (Datacenter 1)'), + ('digitalocean', 'North America', 'nyc3', 'New York (Datacenter 3)'), + ('digitalocean', 'North America', 'sfo2', 'San Francisco (Datacenter 2)'), + ('digitalocean', 'North America', 'sfo3', 'San Francisco (Datacenter 3)'), + ('digitalocean', 'North America', 'tor1', 'Toronto (Datacenter 1)'), + ('hetzner', 'Europe', 'nbg1', 'Nuremberg'), + ('hetzner', 'Europe', 'fsn1', 'Falkenstein'), + ('hetzner', 'Europe', 'hel1', 'Helsinki'), + ('hetzner', 'North America', 'hil', 'Hillsboro, OR'), + ('hetzner', 'North America', 'ash', 'Ashburn, VA'), + ('hetzner', 'Asia Pacific', 'sin', 'Singapore'); + +alter table only public.cloud_regions + add constraint cloud_regions_pkey primary key (cloud_provider, region_group, region_name); + +alter table only public.cloud_regions + add constraint cloud_regions_cloud_provider_fkey foreign key (cloud_provider) references public.cloud_providers (provider_name); + +-- cloud_instances +create table public.cloud_instances ( + cloud_provider text not null, + instance_group text not null, + instance_name text not null, + arch text default 'amd64' not null, + cpu integer not null, + ram integer not null, + price_hourly numeric not null, + price_monthly numeric not null, + currency char(1) default '$' not null, + updated_at timestamp default current_timestamp +); + +comment on table public.cloud_instances is 'Table containing cloud instances information for various cloud providers'; + +comment on column public.cloud_instances.cloud_provider is 'The name of the cloud provider'; + +comment on column public.cloud_instances.instance_group is 'The group of the instance size'; + +comment on column public.cloud_instances.instance_name is 'The specific name of the cloud instance'; + +comment on column public.cloud_instances.arch is 'The architecture of the instance'; + +comment on column public.cloud_instances.cpu is 'The number of CPUs of the instance'; + +comment on column public.cloud_instances.ram is 'The amount of RAM (in GB) of the instance'; + +comment on column public.cloud_instances.price_hourly is 'The hourly price of the instance'; + +comment on column public.cloud_instances.price_monthly is 'The monthly price of the instance'; + +comment on column public.cloud_instances.currency is 'The currency of the price (default: $)'; + +comment on column public.cloud_instances.updated_at is 'The date when the instance information was last updated'; + +-- The price is approximate because it is specified for one region and may differ in other regions. +-- aws, gcp, azure: the price is for the region 'US East' +insert into public.cloud_instances (cloud_provider, instance_group, instance_name, cpu, ram, price_hourly, price_monthly, currency, updated_at) + values ('aws', 'Small Size', 't3.small', 2, 2, 0.021, 14.976, '$', '2024-05-15'), + ('aws', 'Small Size', 't3.medium', 2, 4, 0.042, 29.952, '$', '2024-05-15'), + ('aws', 'Small Size', 'm6i.large', 2, 8, 0.096, 69.120, '$', '2024-05-15'), + ('aws', 'Small Size', 'r6i.large', 2, 16, 0.126, 90.720, '$', '2024-05-15'), + ('aws', 'Small Size', 'm6i.xlarge', 4, 16, 0.192, 138.240, '$', '2024-05-15'), + ('aws', 'Small Size', 'r6i.xlarge', 4, 32, 0.252, 181.440, '$', '2024-05-15'), + ('aws', 'Medium Size', 'm6i.2xlarge', 8, 32, 0.384, 276.480, '$', '2024-05-15'), + ('aws', 'Medium Size', 'r6i.2xlarge', 8, 64, 0.504, 362.880, '$', '2024-05-15'), + ('aws', 'Medium Size', 'm6i.4xlarge', 16, 64, 0.768, 552.960, '$', '2024-05-15'), + ('aws', 'Medium Size', 'r6i.4xlarge', 16, 128, 1.008, 725.760, '$', '2024-05-15'), + ('aws', 'Medium Size', 'm6i.8xlarge', 32, 128, 1.536, 1105.920, '$', '2024-05-15'), + ('aws', 'Medium Size', 'r6i.8xlarge', 32, 256, 2.016, 1451.520, '$', '2024-05-15'), + ('aws', 'Medium Size', 'm6i.12xlarge', 48, 192, 2.304, 1658.880, '$', '2024-05-15'), + ('aws', 'Medium Size', 'r6i.12xlarge', 48, 384, 3.024, 2177.280, '$', '2024-05-15'), + ('aws', 'Large Size', 'm6i.16xlarge', 64, 256, 3.072, 2211.840, '$', '2024-05-15'), + ('aws', 'Large Size', 'r6i.16xlarge', 64, 512, 4.032, 2903.040, '$', '2024-05-15'), + ('aws', 'Large Size', 'm6i.24xlarge', 96, 384, 4.608, 3317.760, '$', '2024-05-15'), + ('aws', 'Large Size', 'r6i.24xlarge', 96, 768, 6.048, 4354.560, '$', '2024-05-15'), + ('aws', 'Large Size', 'm6i.32xlarge', 128, 512, 6.144, 4423.680, '$', '2024-05-15'), + ('aws', 'Large Size', 'r6i.32xlarge', 128, 1024, 8.064, 5806.080, '$', '2024-05-15'), + ('aws', 'Large Size', 'm7i.48xlarge', 192, 768, 9.677, 6967.296, '$', '2024-05-15'), + ('aws', 'Large Size', 'r7i.48xlarge', 192, 1536, 12.701, 9144.576, '$', '2024-05-15'), + ('gcp', 'Small Size', 'e2-small', 2, 2, 0.017, 12.228, '$', '2024-05-15'), + ('gcp', 'Small Size', 'e2-medium', 2, 4, 0.034, 24.457, '$', '2024-05-15'), + ('gcp', 'Small Size', 'n2-standard-2', 2, 8, 0.097, 70.896, '$', '2024-05-15'), + ('gcp', 'Small Size', 'n2-highmem-2', 2, 16, 0.131, 95.640, '$', '2024-05-15'), + ('gcp', 'Small Size', 'n2-standard-4', 4, 16, 0.194, 141.792, '$', '2024-05-15'), + ('gcp', 'Small Size', 'n2-highmem-4', 4, 32, 0.262, 191.280, '$', '2024-05-15'), + ('gcp', 'Medium Size', 'n2-standard-8', 8, 32, 0.388, 283.585, '$', '2024-05-15'), + ('gcp', 'Medium Size', 'n2-highmem-8', 8, 64, 0.524, 382.561, '$', '2024-05-15'), + ('gcp', 'Medium Size', 'n2-standard-16', 16, 64, 0.777, 567.169, '$', '2024-05-15'), + ('gcp', 'Medium Size', 'n2-highmem-16', 16, 128, 1.048, 765.122, '$', '2024-05-15'), + ('gcp', 'Medium Size', 'n2-standard-32', 32, 128, 1.554, 1134.338, '$', '2024-05-15'), + ('gcp', 'Medium Size', 'n2-highmem-32', 32, 256, 2.096, 1530.244, '$', '2024-05-15'), + ('gcp', 'Medium Size', 'n2-standard-48', 48, 192, 2.331, 1701.507, '$', '2024-05-15'), + ('gcp', 'Medium Size', 'n2-highmem-48', 48, 384, 3.144, 2295.365, '$', '2024-05-15'), + ('gcp', 'Large Size', 'n2-standard-64', 64, 256, 3.108, 2268.676, '$', '2024-05-15'), + ('gcp', 'Large Size', 'n2-highmem-64', 64, 512, 4.192, 3060.487, '$', '2024-05-15'), + ('gcp', 'Large Size', 'n2-standard-80', 80, 320, 3.885, 2835.846, '$', '2024-05-15'), + ('gcp', 'Large Size', 'n2-highmem-80', 80, 640, 5.241, 3825.609, '$', '2024-05-15'), + ('gcp', 'Large Size', 'n2-standard-96', 96, 384, 4.662, 3403.015, '$', '2024-05-15'), + ('gcp', 'Large Size', 'n2-highmem-96', 96, 768, 6.289, 4590.731, '$', '2024-05-15'), + ('gcp', 'Large Size', 'n2-standard-128', 128, 512, 6.216, 4537.353, '$', '2024-05-15'), + ('gcp', 'Large Size', 'n2-highmem-128', 128, 864, 7.707, 5626.092, '$', '2024-05-15'), + ('gcp', 'Large Size', 'c3-standard-176', 176, 704, 9.188, 6706.913, '$', '2024-05-15'), + ('gcp', 'Large Size', 'c3-highmem-176', 176, 1408, 12.394, 9047.819, '$', '2024-05-15'), + ('azure', 'Small Size', 'Standard_B1ms', 1, 2, 0.021, 15.111, '$', '2024-05-15'), + ('azure', 'Small Size', 'Standard_B2s', 2, 4, 0.042, 30.368, '$', '2024-05-15'), + ('azure', 'Small Size', 'Standard_D2s_v5', 2, 8, 0.096, 70.080, '$', '2024-05-15'), + ('azure', 'Small Size', 'Standard_E2s_v5', 2, 16, 0.126, 91.980, '$', '2024-05-15'), + ('azure', 'Small Size', 'Standard_D4s_v5', 4, 16, 0.192, 140.160, '$', '2024-05-15'), + ('azure', 'Small Size', 'Standard_E4s_v5', 4, 32, 0.252, 183.960, '$', '2024-05-15'), + ('azure', 'Medium Size', 'Standard_D8s_v5', 8, 32, 0.384, 280.320, '$', '2024-05-15'), + ('azure', 'Medium Size', 'Standard_E8s_v5', 8, 64, 0.504, 367.920, '$', '2024-05-15'), + ('azure', 'Medium Size', 'Standard_D16s_v5', 16, 64, 0.768, 560.640, '$', '2024-05-15'), + ('azure', 'Medium Size', 'Standard_E16s_v5', 16, 128, 1.008, 735.840, '$', '2024-05-15'), + ('azure', 'Medium Size', 'Standard_D32s_v5', 32, 128, 1.536, 1121.280, '$', '2024-05-15'), + ('azure', 'Medium Size', 'Standard_E32s_v5', 32, 256, 2.016, 1471.680, '$', '2024-05-15'), + ('azure', 'Large Size', 'Standard_D48s_v5', 48, 192, 2.304, 1681.920, '$', '2024-05-15'), + ('azure', 'Large Size', 'Standard_E48s_v5', 48, 384, 3.024, 2207.520, '$', '2024-05-15'), + ('azure', 'Large Size', 'Standard_D64s_v5', 64, 256, 3.072, 2242.560, '$', '2024-05-15'), + ('azure', 'Large Size', 'Standard_E64s_v5', 64, 512, 4.032, 2943.360, '$', '2024-05-15'), + ('azure', 'Large Size', 'Standard_D96s_v5', 96, 384, 4.608, 3363.840, '$', '2024-05-15'), + ('azure', 'Large Size', 'Standard_E96s_v5', 96, 672, 6.048, 4415.040, '$', '2024-05-15'), + ('digitalocean', 'Small Size', 's-2vcpu-2gb', 2, 2, 0.027, 18.000, '$', '2024-05-15'), + ('digitalocean', 'Small Size', 's-2vcpu-4gb', 2, 4, 0.036, 24.000, '$', '2024-05-15'), + ('digitalocean', 'Small Size', 'g-2vcpu-8gb', 2, 8, 0.094, 63.000, '$', '2024-05-15'), + ('digitalocean', 'Small Size', 'm-2vcpu-16gb', 2, 16, 0.125, 84.000, '$', '2024-05-15'), + ('digitalocean', 'Small Size', 'g-4vcpu-16gb', 4, 16, 0.188, 126.000, '$', '2024-05-15'), + ('digitalocean', 'Small Size', 'm-4vcpu-32gb', 4, 32, 0.250, 168.000, '$', '2024-05-15'), + ('digitalocean', 'Medium Size', 'g-8vcpu-32gb', 8, 32, 0.375, 252.000, '$', '2024-05-15'), + ('digitalocean', 'Medium Size', 'm-8vcpu-64gb', 8, 64, 0.500, 336.000, '$', '2024-05-15'), + ('digitalocean', 'Medium Size', 'g-16vcpu-64gb', 16, 64, 0.750, 504.000, '$', '2024-05-15'), + ('digitalocean', 'Medium Size', 'm-16vcpu-128gb', 16, 128, 1.000, 672.000, '$', '2024-05-15'), + ('digitalocean', 'Medium Size', 'g-32vcpu-128gb', 32, 128, 1.500, 1008.000, '$', '2024-05-15'), + ('digitalocean', 'Medium Size', 'm-32vcpu-256gb', 32, 256, 2.000, 1344.000, '$', '2024-05-15'), + ('digitalocean', 'Medium Size', 'g-48vcpu-192gb', 48, 192, 2.699, 1814.000, '$', '2024-05-15'), + ('hetzner', 'Small Size', 'CPX11', 2, 2, 0.007, 5.180, '€', '2024-07-21'), + ('hetzner', 'Small Size', 'CPX21', 3, 4, 0.010, 8.980, '€', '2024-07-21'), + ('hetzner', 'Small Size', 'CCX13', 2, 8, 0.024, 14.860, '€', '2024-05-15'), + ('hetzner', 'Small Size', 'CCX23', 4, 16, 0.047, 29.140, '€', '2024-05-15'), + ('hetzner', 'Medium Size', 'CCX33', 8, 32, 0.093, 57.700, '€', '2024-05-15'), + ('hetzner', 'Medium Size', 'CCX43', 16, 64, 0.184, 114.820, '€', '2024-05-15'), + ('hetzner', 'Medium Size', 'CCX53', 32, 128, 0.367, 229.060, '€', '2024-05-15'), + ('hetzner', 'Medium Size', 'CCX63', 48, 192, 0.550, 343.300, '€', '2024-05-15'); + +alter table only public.cloud_instances + add constraint cloud_instances_pkey primary key (cloud_provider, instance_group, instance_name); + +alter table only public.cloud_instances + add constraint cloud_instances_cloud_provider_fkey foreign key (cloud_provider) references public.cloud_providers (provider_name); + +-- this trigger will set the "updated_at" column to the current timestamp for every update +create trigger handle_updated_at + before update on public.cloud_instances for each row + execute function extensions.moddatetime (updated_at); + +-- cloud_volumes +create table public.cloud_volumes ( + cloud_provider text not null, + volume_type text not null, + volume_description text not null, + volume_min_size integer not null, + volume_max_size integer not null, + price_monthly numeric not null, + currency char(1) default '$' not null, + is_default boolean not null default false, + updated_at timestamp default current_timestamp +); + +comment on table public.cloud_volumes is 'Table containing cloud volume information for various cloud providers'; + +comment on column public.cloud_volumes.cloud_provider is 'The name of the cloud provider'; + +comment on column public.cloud_volumes.volume_type is 'The type of the volume (the name provided by the API)'; + +comment on column public.cloud_volumes.volume_description is 'Description of the volume'; + +comment on column public.cloud_volumes.volume_min_size is 'The minimum size of the volume (in GB)'; + +comment on column public.cloud_volumes.volume_max_size is 'The maximum size of the volume (in GB)'; + +comment on column public.cloud_volumes.price_monthly is 'The monthly price per GB of the volume'; + +comment on column public.cloud_volumes.currency is 'The currency of the price (default: $)'; + +comment on column public.cloud_volumes.is_default is 'Indicates if the volume type is the default'; + +comment on column public.cloud_volumes.updated_at is 'The date when the volume information was last updated'; + +-- The price is approximate because it is specified for one region and may differ in other regions. +-- aws, gcp, azure: the price is for the region 'US East' +insert into public.cloud_volumes (cloud_provider, volume_type, volume_description, volume_min_size, volume_max_size, price_monthly, currency, is_default, updated_at) + values ('aws', 'st1', 'Throughput Optimized HDD Disk (Max throughput: 500 MiB/s, Max IOPS: 500)', 125, 16000, 0.045, '$', false, '2024-05-15'), + ('aws', 'gp3', 'General Purpose SSD Disk (Max throughput: 1,000 MiB/s, Max IOPS: 16,000)', 10, 16000, 0.080, '$', true, '2024-05-15'), + ('aws', 'io2', 'Provisioned IOPS SSD Disk (Max throughput: 4,000 MiB/s, Max IOPS: 256,000)', 10, 64000, 0.125, '$', false, '2024-05-15'), + ('gcp', 'pd-standard', 'Standard Persistent HDD Disk (Max throughput: 180 MiB/s, Max IOPS: 3,000)', 10, 64000, 0.040, '$', false, '2024-05-15'), + ('gcp', 'pd-balanced', 'Balanced Persistent SSD Disk (Max throughput: 240 MiB/s, Max IOPS: 15,000)', 10, 64000, 0.100, '$', false, '2024-05-15'), + ('gcp', 'pd-ssd', 'SSD Persistent Disk (Max throughput: 1,200 MiB/s, Max IOPS: 100,000)', 10, 64000, 0.170, '$', true, '2024-05-15'), + ('gcp', 'pd-extreme', 'Extreme Persistent SSD Disk (Max throughput: 2,400 MiB/s, Max IOPS: 120,000)', 500, 64000, 0.125, '$', false, '2024-05-15'), + ('azure', 'Standard_LRS', 'Standard HDD (Max throughput: 500 MiB/s, Max IOPS: 2,000)', 10, 32000, 0.040, '$', false, '2024-05-15'), + ('azure', 'StandardSSD_LRS', 'Standard SSD (Max throughput: 750 MiB/s, Max IOPS: 6,000)', 10, 32000, 0.075, '$', true, '2024-05-15'), + ('azure', 'Premium_LRS', 'Premium SSD (Max throughput: 900 MiB/s, Max IOPS: 20,000)', 10, 32000, 0.132, '$', false, '2024-05-15'), + ('azure', 'UltraSSD_LRS', 'Ultra SSD (Max throughput: 10,000 MiB/s, Max IOPS: 400,000)', 10, 64000, 0.120, '$', false, '2024-05-15'), + ('digitalocean', 'ssd', 'SSD Block Storage (Max throughput: 300 MiB/s, Max IOPS: 7,500)', 10, 16000, 0.100, '$', true, '2024-05-15'), + ('hetzner', 'ssd', 'SSD Block Storage (Max throughput: N/A MiB/s, Max IOPS: N/A)', 10, 10000, 0.052, '€', true, '2024-05-15'); + +alter table only public.cloud_volumes + add constraint cloud_volumes_pkey primary key (cloud_provider, volume_type); + +alter table only public.cloud_volumes + add constraint cloud_volumes_cloud_provider_fkey foreign key (cloud_provider) references public.cloud_providers (provider_name); + +create trigger handle_updated_at + before update on public.cloud_volumes for each row + execute function extensions.moddatetime (updated_at); + +-- cloud_images +create table public.cloud_images ( + cloud_provider text not null, + region text not null, + image jsonb not null, + arch text default 'amd64' not null, + os_name text not null, + os_version text not null, + updated_at timestamp default current_timestamp +); + +comment on table public.cloud_images is 'Table containing cloud images information for various cloud providers'; + +comment on column public.cloud_images.cloud_provider is 'The name of the cloud provider'; + +comment on column public.cloud_images.region is 'The region where the image is available'; + +comment on column public.cloud_images.image is 'The image details in JSON format {"variable_name": "value"}'; + +comment on column public.cloud_images.arch is 'The architecture of the operating system (default: amd64)'; + +comment on column public.cloud_images.os_name is 'The name of the operating system'; + +comment on column public.cloud_images.os_version is 'The version of the operating system'; + +comment on column public.cloud_images.updated_at is 'The date when the image information was last updated'; + +-- For all cloud providers except AWS, the image is the same for all regions. +-- For AWS, the image must be specified for each specific region. +-- The value of the "image" column is set in the format: '{"variable_name": "value"}' +-- This format provides flexibility to specify different variables for different cloud providers. +-- For example, Azure requires four variables instead of a single "server_image": +-- azure_vm_image_offer, azure_vm_image_publisher, azure_vm_image_sku, azure_vm_image_version. +insert into public.cloud_images (cloud_provider, region, image, arch, os_name, os_version, updated_at) + values ('aws', 'ap-south-2', '{"server_image": "ami-07c29982fe3ae5d4a"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'ap-south-1', '{"server_image": "ami-01c893e7f232d634f"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'eu-south-1', '{"server_image": "ami-0ef03f8ff5bbf854c"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'eu-south-2', '{"server_image": "ami-0e37953c2e92990cd"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'me-central-1', '{"server_image": "ami-028258249d6efbb44"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'ca-central-1', '{"server_image": "ami-0019e788a5e62c6e4"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'eu-central-1', '{"server_image": "ami-0ac67c1f8689447a6"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'eu-central-2', '{"server_image": "ami-0ac79a44f0ec70fe1"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'us-west-1', '{"server_image": "ami-0947011e21ec8788d"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'us-west-2', '{"server_image": "ami-0ca5d4e146b3ba5bf"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'af-south-1', '{"server_image": "ami-0ff5d1627e39b443d"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'eu-north-1', '{"server_image": "ami-035542f8c972d7edf"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'eu-west-3', '{"server_image": "ami-0ba794d79cd225039"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'eu-west-2', '{"server_image": "ami-0bc743bd935283b7f"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'eu-west-1', '{"server_image": "ami-09c7c04446217191d"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'ap-northeast-3', '{"server_image": "ami-0bfe27a707728ee11"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'ap-northeast-2', '{"server_image": "ami-02d2c9994ab378951"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'me-south-1', '{"server_image": "ami-009d9f02cfb388154"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'ap-northeast-1', '{"server_image": "ami-0aa80c152f0b55a7e"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'sa-east-1', '{"server_image": "ami-0f381f7a86e649eb5"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'ap-east-1', '{"server_image": "ami-0bb44258f22410dc4"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'ca-west-1', '{"server_image": "ami-042df192e435e6fb3"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'ap-southeast-1', '{"server_image": "ami-01568ec8f5b6dc989"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'ap-southeast-2', '{"server_image": "ami-0c5b9ab59f97ceca7"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'ap-southeast-3', '{"server_image": "ami-01c86258ba749f015"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'ap-southeast-4', '{"server_image": "ami-0afd313fa12d9bcf0"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'us-east-1', '{"server_image": "ami-063fb82b183efe67d"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('aws', 'us-east-2', '{"server_image": "ami-0dc168b827060282d"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-09'), + ('gcp', 'all', '{"server_image": "projects/ubuntu-os-cloud/global/images/family/ubuntu-2404-lts-amd64"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-12'), + ('azure', 'all', '{"azure_vm_image_offer": "ubuntu-24_04-lts", "azure_vm_image_publisher": "Canonical", "azure_vm_image_sku": "server", "azure_vm_image_version": "latest"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-12'), + ('digitalocean', 'all', '{"server_image": "ubuntu-24-04-x64"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-12'), + ('hetzner', 'all', '{"server_image": "ubuntu-24.04"}', 'amd64', 'Ubuntu', '24.04 LTS', '2024-08-12'); + +alter table only public.cloud_images + add constraint cloud_images_pkey primary key (cloud_provider, image); + +alter table only public.cloud_images + add constraint cloud_images_cloud_provider_fkey foreign key (cloud_provider) references public.cloud_providers (provider_name); + +create trigger handle_updated_at + before update on public.cloud_images for each row + execute function extensions.moddatetime (updated_at); + +-- Projects +create table public.projects ( + project_id bigserial primary key, + project_name varchar(50) not null unique, + project_description varchar(150), + created_at timestamp default current_timestamp, + updated_at timestamp +); + +comment on table public.projects is 'Table containing information about projects'; + +comment on column public.projects.project_name is 'The name of the project'; + +comment on column public.projects.project_description is 'A description of the project'; + +comment on column public.projects.created_at is 'The timestamp when the project was created'; + +comment on column public.projects.updated_at is 'The timestamp when the project was last updated'; + +create trigger handle_updated_at + before update on public.projects for each row + execute function extensions.moddatetime (updated_at); + +insert into public.projects (project_name) + values ('default'); + +-- Environments +create table public.environments ( + environment_id bigserial primary key, + environment_name varchar(20) not null, + environment_description text, + created_at timestamp default current_timestamp, + updated_at timestamp +); + +comment on table public.environments is 'Table containing information about environments'; + +comment on column public.environments.environment_name is 'The name of the environment'; + +comment on column public.environments.environment_description is 'A description of the environment'; + +comment on column public.environments.created_at is 'The timestamp when the environment was created'; + +comment on column public.environments.updated_at is 'The timestamp when the environment was last updated'; + +create trigger handle_updated_at + before update on public.environments for each row + execute function extensions.moddatetime (updated_at); + +create index environments_name_idx on public.environments (environment_name); + +insert into public.environments (environment_name) + values ('production'); + +insert into public.environments (environment_name) + values ('staging'); + +insert into public.environments (environment_name) + values ('test'); + +insert into public.environments (environment_name) + values ('dev'); + +insert into public.environments (environment_name) + values ('benchmarking'); + +-- Secrets +create table public.secrets ( + secret_id bigserial primary key, + project_id bigint references public.projects (project_id), + secret_type text not null, + secret_name text not null unique, + secret_value bytea not null, -- Encrypted data + created_at timestamp default current_timestamp, + updated_at timestamp +); + +comment on table public.secrets is 'Table containing secrets for accessing cloud providers and servers'; + +comment on column public.secrets.project_id is 'The ID of the project to which the secret belongs'; + +comment on column public.secrets.secret_type is 'The type of the secret (e.g., cloud_secret, ssh_key, password)'; + +comment on column public.secrets.secret_name is 'The name of the secret'; + +comment on column public.secrets.secret_value is 'The encrypted value of the secret'; + +comment on column public.secrets.created_at is 'The timestamp when the secret was created'; + +comment on column public.secrets.updated_at is 'The timestamp when the secret was last updated'; + +create trigger handle_updated_at + before update on public.secrets for each row + execute function extensions.moddatetime (updated_at); + +create index secrets_type_name_idx on public.secrets (secret_type, secret_name); + +create index secrets_id_project_idx on public.secrets (secret_id, project_id); + +create index secrets_project_idx on public.secrets (project_id); + +-- +goose StatementBegin +create or replace function add_secret (p_project_id bigint, p_secret_type text, p_secret_name text, p_secret_value json, p_encryption_key text) + returns bigint + as $$ +declare + v_inserted_secret_id bigint; +begin + insert into public.secrets (project_id, secret_type, secret_name, secret_value) + values (p_project_id, p_secret_type, p_secret_name, extensions.pgp_sym_encrypt(p_secret_value::text, p_encryption_key, 'cipher-algo=aes256')) + returning + secret_id into v_inserted_secret_id; + return v_inserted_secret_id; +end; +$$ +language plpgsql; +-- +goose StatementEnd + +-- +goose StatementBegin +create or replace function update_secret (p_secret_id bigint, p_secret_type text default null, p_secret_name text default null, p_secret_value json default + null, p_encryption_key text default null) + returns table ( + project_id bigint, + secret_id bigint, + secret_type text, + secret_name text, + created_at timestamp, + updated_at timestamp, + used boolean, + used_by_clusters text, + used_by_servers text + ) + as $$ +begin + if p_secret_value is not null and p_encryption_key is null then + raise exception 'Encryption key must be provided when updating secret value'; + end if; + update + public.secrets + set + secret_name = coalesce(p_secret_name, public.secrets.secret_name), + secret_type = coalesce(p_secret_type, public.secrets.secret_type), + secret_value = case when p_secret_value is not null then + extensions.pgp_sym_encrypt(p_secret_value::text, p_encryption_key, 'cipher-algo=aes256') + else + public.secrets.secret_value + end + where + public.secrets.secret_id = p_secret_id; + return QUERY + select + s.project_id, + s.secret_id, + s.secret_type, + s.secret_name, + s.created_at, + s.updated_at, + s.used, + s.used_by_clusters, + s.used_by_servers + from + public.v_secrets_list s + where + s.secret_id = p_secret_id; +end; +$$ +language plpgsql; +-- +goose StatementEnd + +-- +goose StatementBegin +create or replace function get_secret (p_secret_id bigint, p_encryption_key text) + returns json + as $$ +declare + decrypted_value json; +begin + select + extensions.pgp_sym_decrypt(secret_value, p_encryption_key)::json into decrypted_value + from + public.secrets + where + secret_id = p_secret_id; + return decrypted_value; +end; +$$ +language plpgsql; +-- +goose StatementEnd + +-- An example of using a function to insert a secret (value in JSON format) +-- select add_secret(, 'ssh_key', '', '{"private_key": ""}', ''); +-- select add_secret(, 'password', '', '{"username": "", "password": ""}', ''); +-- select add_secret(, 'aws', '', '{"AWS_ACCESS_KEY_ID": "", "AWS_SECRET_ACCESS_KEY": ""}', ''); +-- An example of using the function to update a secret +-- select update_secret(, '', '', '', ''); +-- An example of using a function to get a secret +-- select get_secret(, ''); + +-- Clusters +create table public.clusters ( + cluster_id bigserial primary key, + project_id bigint references public.projects (project_id), + environment_id bigint references public.environments (environment_id), + secret_id bigint references public.secrets (secret_id), + cluster_name text not null unique, + cluster_status text default 'deploying', + cluster_description text, + cluster_location text, + connection_info jsonb, + extra_vars jsonb, + inventory jsonb, + server_count integer default 0, + postgres_version integer, + created_at timestamp default current_timestamp, + updated_at timestamp, + deleted_at timestamp, + flags integer default 0 +); + +comment on table public.clusters is 'Table containing information about Postgres clusters'; + +comment on column public.clusters.project_id is 'The ID of the project to which the cluster belongs'; + +comment on column public.clusters.environment_id is 'The environment in which the cluster is deployed (e.g., production, development, etc)'; + +comment on column public.clusters.cluster_name is 'The name of the cluster (it must be unique)'; + +comment on column public.clusters.cluster_status is 'The status of the cluster (e.q., deploying, failed, healthy, unhealthy, degraded)'; + +comment on column public.clusters.cluster_description is 'A description of the cluster (optional)'; + +comment on column public.clusters.connection_info is 'The cluster connection info'; + +comment on column public.clusters.extra_vars is 'Extra variables for Ansible specific to this cluster'; + +comment on column public.clusters.inventory is 'The Ansible inventory for this cluster'; + +comment on column public.clusters.cluster_location is 'The region/datacenter where the cluster is located'; + +comment on column public.clusters.server_count is 'The number of servers associated with the cluster'; + +comment on column public.clusters.postgres_version is 'The Postgres major version'; + +comment on column public.clusters.secret_id is 'The ID of the secret for accessing the cloud provider'; + +comment on column public.clusters.created_at is 'The timestamp when the cluster was created'; + +comment on column public.clusters.updated_at is 'The timestamp when the cluster was last updated'; + +comment on column public.clusters.deleted_at is 'The timestamp when the cluster was (soft) deleted'; + +comment on column public.clusters.flags is 'Bitmask field for storing various status flags related to the cluster'; + +create trigger handle_updated_at + before update on public.clusters for each row + execute function extensions.moddatetime (updated_at); + +create index clusters_id_project_id_idx on public.clusters (cluster_id, project_id); + +create index clusters_project_idx on public.clusters (project_id); + +create index clusters_environment_idx on public.clusters (environment_id); + +create index clusters_name_idx on public.clusters (cluster_name); + +create index clusters_secret_id_idx on public.clusters (secret_id); + +-- +goose StatementBegin +create or replace function get_cluster_name () + returns text + as $$ +declare + new_name text; + counter int := 1; +begin + loop + new_name := 'postgres-cluster-' || to_char(counter, 'FM00'); + -- Check if such a cluster name already exists + if not exists ( + select + 1 + from + public.clusters + where + cluster_name = new_name) then + return new_name; + end if; + counter := counter + 1; +end loop; +end; +$$ +language plpgsql; +-- +goose StatementEnd + +-- Servers +create table public.servers ( + server_id bigserial primary key, + cluster_id bigint references public.clusters (cluster_id), + server_name text not null, + server_location text, + server_role text default 'N/A', + server_status text default 'N/A', + ip_address inet not null, + timeline bigint, + lag bigint, + tags jsonb, + pending_restart boolean default false, + created_at timestamp default current_timestamp, + updated_at timestamp +); + +comment on table public.servers is 'Table containing information about servers within a Postgres cluster'; + +comment on column public.servers.cluster_id is 'The ID of the cluster to which the server belongs'; + +comment on column public.servers.server_name is 'The name of the server'; + +comment on column public.servers.server_location is 'The region/datacenter where the server is located'; + +comment on column public.servers.server_role is 'The role of the server (e.g., primary, replica)'; + +comment on column public.servers.server_status is 'The current status of the server'; + +comment on column public.servers.ip_address is 'The IP address of the server'; + +comment on column public.servers.timeline is 'The timeline of the Postgres'; + +comment on column public.servers.lag is 'The lag in MB of the Postgres'; + +comment on column public.servers.tags is 'The tags associated with the server'; + +comment on column public.servers.pending_restart is 'Indicates whether a restart is pending for the Postgres'; + +comment on column public.servers.created_at is 'The timestamp when the server was created'; + +comment on column public.servers.updated_at is 'The timestamp when the server was last updated'; + +create trigger handle_updated_at + before update on public.servers for each row + execute function extensions.moddatetime (updated_at); + +create unique index servers_cluster_id_ip_address_idx on public.servers (cluster_id, ip_address); + +-- +goose StatementBegin +create or replace function update_server_count () + returns trigger + as $$ +begin + update + public.clusters + set + server_count = ( + select + count(*) + from + public.servers + where + public.servers.cluster_id = new.cluster_id) + where + cluster_id = new.cluster_id; + return NEW; +end; +$$ +language plpgsql; +-- +goose StatementEnd + +-- Trigger to update server_count on changes in servers +create trigger update_server_count_trigger + after insert or update or delete on public.servers for each row + execute function update_server_count (); + +-- Secrets view +create view public.v_secrets_list as +select + s.project_id, + s.secret_id, + s.secret_name, + s.secret_type, + s.created_at, + s.updated_at, + case when count(c.secret_id) > 0 then + true + else + false + end as used, + coalesce(string_agg(distinct c.cluster_name, ', '), '') as used_by_clusters +from + public.secrets s + left join lateral ( + select + cluster_name, + secret_id + from + public.clusters + where + secret_id = s.secret_id + and project_id = s.project_id) c on true +group by + s.project_id, + s.secret_id, + s.secret_name, + s.secret_type, + s.created_at, + s.updated_at; + +-- Extensions +create table public.extensions ( + extension_name text primary key, + extension_description varchar(150) not null, + extension_url text, + extension_image text, + postgres_min_version text, + postgres_max_version text, + contrib boolean not null +); + +comment on table public.extensions is 'Table containing available extensions for different Postgres versions'; + +comment on column public.extensions.extension_name is 'The name of the extension'; + +comment on column public.extensions.extension_description is 'The description of the extension'; + +comment on column public.extensions.postgres_min_version is 'The minimum Postgres version where the extension is available'; + +comment on column public.extensions.postgres_max_version is 'The maximum Postgres version where the extension is available'; + +comment on column public.extensions.contrib is 'Indicates if the extension is a contrib module or third-party extension'; + +-- The table stores information about Postgres extensions, including name, description, supported Postgres version range, +-- and whether the extension is a contrib module or third-party. +-- postgres_min_version and postgres_max_version define the range of Postgres versions supported by extensions. +-- If the postgres_max_version is NULL, it is assumed that the extension is still supported by new versions of Postgres. +insert into public.extensions (extension_name, extension_description, postgres_min_version, postgres_max_version, extension_url, extension_image, contrib) + values ('adminpack', 'administrative functions for PostgreSQL', null, null, null, null, true), + ('amcheck', 'functions for verifying relation integrity', null, null, null, null, true), + ('autoinc', 'functions for autoincrementing fields', null, null, null, null, true), + ('bloom', 'bloom access method - signature file based index', null, null, null, null, true), + ('btree_gin', 'support for indexing common datatypes in GIN', null, null, null, null, true), + ('btree_gist', 'support for indexing common datatypes in GiST', null, null, null, null, true), + ('chkpass', 'data type for auto-encrypted passwords', null, '10', null, null, true), + ('citext', 'data type for case-insensitive character strings', null, null, null, null, true), + ('cube', 'data type for multidimensional cubes', null, null, null, null, true), + ('dblink', 'connect to other PostgreSQL databases from within a database', null, null, null, null, true), + ('dict_int', 'text search dictionary template for integers', null, null, null, null, true), + ('dict_xsyn', 'text search dictionary template for extended synonym processing', null, null, null, null, true), + ('earthdistance', 'calculate great-circle distances on the surface of the Earth', null, null, null, null, true), + ('file_fdw', 'foreign-data wrapper for flat file access', null, null, null, null, true), + ('fuzzystrmatch', 'determine similarities and distance between strings', null, null, null, null, true), + ('hstore', 'data type for storing sets of (key, value) pairs', null, null, null, null, true), + ('insert_username', 'functions for tracking who changed a table', null, null, null, null, true), + ('intagg', 'integer aggregator and enumerator (obsolete)', null, null, null, null, true), + ('intarray', 'functions, operators, and index support for 1-D arrays of integers', null, null, null, null, true), + ('isn', 'data types for international product numbering standards', null, null, null, null, true), + ('lo', 'Large Object maintenance', null, null, null, null, true), + ('ltree', 'data type for hierarchical tree-like structures', null, null, null, null, true), + ('moddatetime', 'functions for tracking last modification time', null, null, null, null, true), + ('old_snapshot', 'utilities in support of old_snapshot_threshold', '14', null, null, null, true), + ('pageinspect', 'inspect the contents of database pages at a low level', null, null, null, null, true), + ('pg_buffercache', 'examine the shared buffer cache', null, null, null, null, true), + ('pg_freespacemap', 'examine the free space map (FSM)', null, null, null, null, true), + ('pg_prewarm', 'prewarm relation data', null, null, null, null, true), + ('pg_stat_statements', 'track planning and execution statistics of all SQL statements executed', null, null, null, null, true), + ('pg_surgery', 'extension to perform surgery on a damaged relation', '14', null, null, null, true), + ('pg_trgm', 'text similarity measurement and index searching based on trigrams', null, null, null, null, true), + ('pg_visibility', 'examine the visibility map (VM) and page-level visibility info', null, null, null, null, true), + ('pg_walinspect', 'functions to inspect contents of PostgreSQL Write-Ahead Log', '15', null, null, null, true), + ('pgcrypto', 'cryptographic functions', null, null, null, null, true), + ('pgrowlocks', 'show row-level locking information', null, null, null, null, true), + ('pgstattuple', 'show tuple-level statistics', null, null, null, null, true), + ('plpgsql', 'PL/pgSQL procedural language', null, null, null, null, true), + ('postgres_fdw', 'foreign-data wrapper for remote PostgreSQL servers', null, null, null, null, true), + ('refint', 'functions for implementing referential integrity (obsolete)', null, null, null, null, true), + ('seg', 'data type for representing line segments or floating-point intervals', null, null, null, null, true), + ('sslinfo', 'information about SSL certificates', null, null, null, null, true), + ('tablefunc', 'functions that manipulate whole tables, including crosstab', null, null, null, null, true), + ('tcn', 'Triggered change notifications', null, null, null, null, true), + ('timetravel', 'functions for implementing time travel', null, '11', null, null, true), + ('tsm_system_rows', 'TABLESAMPLE method which accepts number of rows as a limit', null, null, null, null, true), + ('tsm_system_time', 'TABLESAMPLE method which accepts time in milliseconds as a limit', null, null, null, null, true), + ('unaccent', 'text search dictionary that removes accents', null, null, null, null, true), + ('uuid-ossp', 'generate universally unique identifiers (UUIDs)', null, null, null, null, true), + ('xml2', 'XPath querying and XSLT', null, null, null, null, true), + -- Third-Party Extensions + ('citus', 'Citus is a PostgreSQL extension that transforms Postgres into a distributed database—so you can achieve high performance at any scale', 11, 16, 'https://github.com/citusdata/citus', 'citus.png', false), + ('pgaudit', 'The PostgreSQL Audit Extension provides detailed session and/or object audit logging via the standard PostgreSQL logging facility', 10, 16, 'https://github.com/pgaudit/pgaudit', 'pgaudit.png', false), + ('pg_cron', 'Job scheduler for PostgreSQL', 10, 16, 'https://github.com/citusdata/pg_cron', 'pg_cron.png', false), + ('pg_partman', 'pg_partman is an extension to create and manage both time-based and number-based table partition sets', 10, 16, 'https://github.com/pgpartman/pg_partman', 'pg_partman.png', false), + ('pg_repack', 'Reorganize tables in PostgreSQL databases with minimal locks', 10, 16, 'https://github.com/reorg/pg_repack', 'pg_repack.png', false), + ('pg_stat_kcache', 'Gather statistics about physical disk access and CPU consumption done by backends', 10, 16, 'https://github.com/powa-team/pg_stat_kcache', null, false), + ('pg_wait_sampling', 'Sampling based statistics of wait events', 10, 16, 'https://github.com/postgrespro/pg_wait_sampling', null, false), + ('pgvector', 'Open-source vector similarity search for Postgres (vector data type and ivfflat and hnsw access methods)', 11, 16, 'https://github.com/pgvector/pgvector', 'pgvector.png', false), + ('postgis', 'PostGIS extends the capabilities of the PostgreSQL relational database by adding support for storing, indexing, and querying geospatial data', 10, 16, 'https://postgis.net/', 'postgis.png', false), + ('pgrouting', 'pgRouting extends the PostGIS / PostgreSQL geospatial database to provide geospatial routing functionality', 10, 16, 'https://pgrouting.org/', 'pgrouting.png', false), + ('timescaledb', 'TimescaleDB is an open-source database designed to make SQL scalable for time-series data (Community Edition)', 12, 16, 'https://github.com/timescale/timescaledb', 'timescaledb.png', false); + +-- +goose StatementBegin +create or replace function get_extensions (p_postgres_version float, p_extension_type text default 'all') + returns json + as $$ +declare + extensions json; +begin + select + json_agg(row_to_json(e)) into extensions + from ( + select + e.extension_name, + e.extension_description, + e.extension_url, + e.extension_image, + e.postgres_min_version, + e.postgres_max_version, + e.contrib + from + public.extensions e + where (e.postgres_min_version is null + or e.postgres_min_version::float <= p_postgres_version) + and (e.postgres_max_version is null + or e.postgres_max_version::float >= p_postgres_version) + and (p_extension_type = 'all' + or (p_extension_type = 'contrib' + and e.contrib = true) + or (p_extension_type = 'third_party' + and e.contrib = false)) + order by + e.contrib, + e.extension_image is null, + e.extension_name) e; + return extensions; +end; +$$ +language plpgsql; +-- +goose StatementEnd + +-- An example of using a function to get a list of available extensions (all or 'contrib'/'third_party' only) +-- select get_extensions(16); +-- select get_extensions(16, 'contrib'); +-- select get_extensions(16, 'third_party'); + +-- Operations +create table public.operations ( + id bigserial, + project_id bigint references public.projects (project_id), + cluster_id bigint references public.clusters (cluster_id), + docker_code varchar(80) not null, + cid uuid, + operation_type text not null, + operation_status text not null check (operation_status in ('in_progress', 'success', 'failed')), + operation_log text, + created_at timestamp with time zone default current_timestamp, + updated_at timestamp with time zone +); + +comment on table public.operations is 'Table containing logs of operations performed on clusters'; + +comment on column public.operations.id is 'The ID of the operation from the backend'; + +comment on column public.clusters.project_id is 'The ID of the project to which the operation belongs'; + +comment on column public.operations.cluster_id is 'The ID of the cluster related to the operation'; + +comment on column public.operations.docker_code is 'The CODE of the operation related to the docker daemon'; + +comment on column public.operations.cid is 'The correlation_id related to the operation'; + +comment on column public.operations.operation_type is 'The type of operation performed (e.g., deploy, edit, update, restart, delete, etc.)'; + +comment on column public.operations.operation_status is 'The status of the operation (in_progress, success, failed)'; + +comment on column public.operations.operation_log is 'The log details of the operation'; + +comment on column public.operations.created_at is 'The timestamp when the operation was created'; + +comment on column public.operations.updated_at is 'The timestamp when the operation was last updated'; + +create trigger handle_updated_at + before update on public.operations for each row + execute function extensions.moddatetime (updated_at); + +-- add created_at as part of the primary key to be able to create a hypertable +alter table only public.operations + add constraint operations_pkey primary key (created_at, id); + +create index operations_project_id_idx on public.operations (project_id); + +create index operations_cluster_id_idx on public.operations (cluster_id); + +create index operations_project_cluster_id_idx on public.operations (project_id, cluster_id, created_at); + +create index operations_project_cluster_id_operation_type_idx on public.operations (project_id, cluster_id, operation_type, created_at); + +-- Check if the timescaledb extension is available and create hypertable if it is +-- +goose StatementBegin +do $$ +begin + if exists ( + select + 1 + from + pg_extension + where + extname = 'timescaledb') then + -- Convert the operations table to a hypertable + perform + create_hypertable ('public.operations', 'created_at', chunk_time_interval => interval '1 month'); + -- Check if the license allows compression policy + if current_setting('timescaledb.license', true) = 'timescale' then + -- Enable compression on the operations hypertable, segmenting by project_id and cluster_id + alter table public.operations set (timescaledb.compress, timescaledb.compress_orderby = 'created_at desc, id desc, operation_type, operation_status', timescaledb.compress_segmentby = 'project_id, cluster_id'); + -- Compressing chunks older than one month + perform + add_compression_policy ('public.operations', interval '1 month'); + else + raise notice 'Timescaledb license does not support compression policy. Skipping compression setup.'; + end if; +else + raise notice 'Timescaledb extension is not available. Skipping hypertable and compression setup.'; +end if; +end +$$; +-- +goose StatementEnd + +create or replace view public.v_operations as +select + op.project_id, + op.cluster_id, + op.id, + op.created_at as "started", + op.updated_at as "finished", + op.operation_type as "type", + op.operation_status as "status", + cl.cluster_name as "cluster", + env.environment_name as "environment" +from + public.operations op + join public.clusters cl on op.cluster_id = cl.cluster_id + join public.projects pr on op.project_id = pr.project_id + join public.environments env on cl.environment_id = env.environment_id; + +-- Postgres versions +create table public.postgres_versions ( + major_version integer primary key, + release_date date, + end_of_life date +); + +comment on table public.postgres_versions is 'Table containing the major PostgreSQL versions supported by the autobase'; + +comment on column public.postgres_versions.major_version is 'The major version of PostgreSQL'; + +comment on column public.postgres_versions.release_date is 'The release date of the PostgreSQL version'; + +comment on column public.postgres_versions.end_of_life is 'The end of life date for the PostgreSQL version'; + +insert into public.postgres_versions (major_version, release_date, end_of_life) + values (10, '2017-10-05', '2022-11-10'), + (11, '2018-10-18', '2023-11-09'), + (12, '2019-10-03', '2024-11-14'), + (13, '2020-09-24', '2025-11-13'), + (14, '2021-09-30', '2026-11-12'), + (15, '2022-10-13', '2027-11-11'), + (16, '2023-09-14', '2028-11-09'); + +-- Settings +create table public.settings ( + id bigserial primary key, + setting_name text not null unique, + setting_value jsonb not null, + created_at timestamp default current_timestamp, + updated_at timestamp +); + +comment on table public.settings is 'Table containing configuration parameters, including console and other component settings'; + +comment on column public.settings.setting_name is 'The key of the setting'; + +comment on column public.settings.setting_value is 'The value of the setting'; + +comment on column public.settings.created_at is 'The timestamp when the setting was created'; + +comment on column public.settings.updated_at is 'The timestamp when the setting was last updated'; + +create trigger handle_updated_at + before update on public.settings for each row + execute function extensions.moddatetime (updated_at); + +create index settings_name_idx on public.settings (setting_name); + +-- +goose Down +-- Drop triggers +drop trigger update_server_count_trigger on public.servers; + +drop trigger handle_updated_at on public.servers; + +drop trigger handle_updated_at on public.clusters; + +drop trigger handle_updated_at on public.environments; + +drop trigger handle_updated_at on public.projects; + +drop trigger handle_updated_at on public.secrets; + +drop trigger handle_updated_at on public.cloud_images; + +drop trigger handle_updated_at on public.cloud_volumes; + +drop trigger handle_updated_at on public.cloud_instances; + +drop trigger handle_updated_at on public.operations; + +-- Drop functions +drop function update_server_count; + +drop function get_extensions; + +drop function get_secret; + +drop function add_secret; + +drop function get_cluster_name; + +-- Drop views +drop view public.v_operations; + +drop view public.v_secrets_list; + +-- Drop tables +drop table public.postgres_versions; + +drop table public.operations; + +drop table public.extensions; + +drop table public.servers; + +drop table public.clusters; + +drop table public.secrets; + +drop table public.environments; + +drop table public.projects; + +drop table public.cloud_images; + +drop table public.cloud_volumes; + +drop table public.cloud_instances; + +drop table public.cloud_regions; + +drop table public.cloud_providers; + +drop table public.settings; diff --git a/console/db/migrations/20241205103951_2.1.0.sql b/console/db/migrations/20241205103951_2.1.0.sql new file mode 100644 index 000000000..b00c7c38c --- /dev/null +++ b/console/db/migrations/20241205103951_2.1.0.sql @@ -0,0 +1,178 @@ +-- +goose Up +-- Postgres versions +insert into public.postgres_versions (major_version, release_date, end_of_life) + values (17, '2024-09-26', '2029-11-08'); + +-- Extensions +update + public.extensions +set + postgres_max_version = '17' +where + extension_name in ('pgaudit', 'pg_cron', 'pg_partman', 'pg_repack', 'pg_stat_kcache', 'pg_wait_sampling', 'pgvector', 'postgis', + 'pgrouting', 'timescaledb'); + +-- Adds shared_cpu BOOLEAN field to cloud_instances +-- ref: https://github.com/vitabaks/autobase/issues/784 +alter table only public.cloud_instances + add column shared_cpu boolean default false; + +-- Update AWS shared vCPU instances +update + public.cloud_instances +set + shared_cpu = true +where + cloud_provider = 'aws' + and instance_name in ('t3.small', 't3.medium'); + +-- Update GCP shared vCPU instances +update + public.cloud_instances +set + shared_cpu = true +where + cloud_provider = 'gcp' + and instance_name in ('e2-small', 'e2-medium'); + +-- Update Azure shared vCPU instances +update + public.cloud_instances +set + shared_cpu = true +where + cloud_provider = 'azure' + and instance_name in ('Standard_B1ms', 'Standard_B2s'); + +-- Update DigitalOcean shared vCPU instances +update + public.cloud_instances +set + shared_cpu = true +where + cloud_provider = 'digitalocean' + and instance_name in ('s-2vcpu-2gb', 's-2vcpu-4gb'); + +-- Extends 20240520144338_2.0.0_initial_scheme_setup.sql#L217 with more cloud instance types +-- Heztner price is for the region 'Geremany / Finland', other regions may vary in price. +insert into public.cloud_instances (cloud_provider, instance_group, instance_name, cpu, ram, price_hourly, price_monthly, currency, updated_at, shared_cpu) + values ('hetzner', 'Small Size', 'CX22', 2, 4, 0.0074, 4.59, '$', '2024-12-10', true), + ('hetzner', 'Small Size', 'CX32', 4, 8, 0.0127, 7.59, '$', '2024-12-10', true), + ('hetzner', 'Medium Size', 'CX42', 8, 16, 0.0304, 18.59, '$', '2024-12-10', true), + ('hetzner', 'Medium Size', 'CX52', 16, 32, 0.0611, 36.09, '$', '2024-12-10', true), + ('hetzner', 'Small Size', 'CPX31', 4, 8, 0.025, 15.59, '$', '2024-12-10', true), + ('hetzner', 'Medium Size', 'CPX41', 8, 16, 0.0464, 28.09, '$', '2024-12-10', true), + ('hetzner', 'Medium Size', 'CPX51', 16, 32, 0.0979, 61.09, '$', '2024-12-10', true); + +-- Update all existing Hetzner instances to use USD instead of EUR for easy comparison to other IaaS Providers. +-- Update prices and other relevant fields for Hetzner cloud instances indludes an IPv4 address +update + public.cloud_instances +set + price_hourly = 0.0082, + price_monthly = 5.09, + currency = '$', + updated_at = '2024-12-10', + shared_cpu = true +where + cloud_provider = 'hetzner' + and instance_name = 'CPX11'; + +update + public.cloud_instances +set + price_hourly = 0.0138, + price_monthly = 8.59, + currency = '$', + updated_at = '2024-12-10', + shared_cpu = true +where + cloud_provider = 'hetzner' + and instance_name = 'CPX21'; + +update + public.cloud_instances +set + price_hourly = 0.0226, + price_monthly = 14.09, + currency = '$', + updated_at = '2024-12-10', + shared_cpu = false +where + cloud_provider = 'hetzner' + and instance_name = 'CCX13'; + +update + public.cloud_instances +set + price_hourly = 0.0435, + price_monthly = 27.09, + currency = '$', + updated_at = '2024-12-10', + shared_cpu = false +where + cloud_provider = 'hetzner' + and instance_name = 'CCX23'; + +update + public.cloud_instances +set + price_hourly = 0.0867, + price_monthly = 54.09, + currency = '$', + updated_at = '2024-12-10', + shared_cpu = false +where + cloud_provider = 'hetzner' + and instance_name = 'CCX33'; + +update + public.cloud_instances +set + price_hourly = 0.1725, + price_monthly = 107.59, + currency = '$', + updated_at = '2024-12-10', + shared_cpu = false +where + cloud_provider = 'hetzner' + and instance_name = 'CCX43'; + +update + public.cloud_instances +set + price_hourly = 0.3431, + price_monthly = 214.09, + currency = '$', + updated_at = '2024-12-10', + shared_cpu = false +where + cloud_provider = 'hetzner' + and instance_name = 'CCX53'; + +update + public.cloud_instances +set + price_hourly = 0.5138, + price_monthly = 320.59, + currency = '$', + updated_at = '2024-12-10', + shared_cpu = false +where + cloud_provider = 'hetzner' + and instance_name = 'CCX63'; + +-- cloud_volumes +-- Update prices and other relevant fields for Hetzner cloud volume +update + public.cloud_volumes +set + price_monthly = 0.05, + currency = '$', + updated_at = '2024-12-10' +where + cloud_provider = 'hetzner'; + +-- +goose Down +delete from public.postgres_versions +where major_version = 17; diff --git a/console/db/migrations/20250323121343_2.2.0.sql b/console/db/migrations/20250323121343_2.2.0.sql new file mode 100644 index 000000000..e94fc1a81 --- /dev/null +++ b/console/db/migrations/20250323121343_2.2.0.sql @@ -0,0 +1,10 @@ +-- +goose Up +-- Extensions +update + public.extensions +set + postgres_max_version = '17' +where + extension_name = 'citus'; + +-- +goose Down diff --git a/console/db/pg_hba.conf b/console/db/pg_hba.conf new file mode 100644 index 000000000..4a85b1714 --- /dev/null +++ b/console/db/pg_hba.conf @@ -0,0 +1,3 @@ +local all all trust +host all all 127.0.0.1/32 trust +host all all 0.0.0.0/0 scram-sha-256 diff --git a/console/db/pg_start.sh b/console/db/pg_start.sh new file mode 100644 index 000000000..1f61e2a17 --- /dev/null +++ b/console/db/pg_start.sh @@ -0,0 +1,50 @@ +#!/bin/bash +set -e + +log() { + echo "$(date +'%Y-%m-%d %H:%M:%S') - $1" +} + +# Ensure the directory exists and has the correct permissions +mkdir -p ${PGDATA} ${PG_UNIX_SOCKET_DIR} /etc/postgresql/${POSTGRES_VERSION}/main +chown -R postgres:postgres ${PGDATA} ${PG_UNIX_SOCKET_DIR} /etc/postgresql/${POSTGRES_VERSION}/main + +# Create PGDATA if not exists +if [[ ! -d "${PGDATA}/base" ]]; then + log "Creating PostgreSQL data directory..." + su - postgres -c "pg_createcluster --locale en_US.UTF-8 ${POSTGRES_VERSION} main -d ${PGDATA} -- --data-checksums" + mv /etc/postgresql/${POSTGRES_VERSION}/main/postgresql.conf /etc/postgresql/${POSTGRES_VERSION}/main/postgresql.base.conf +fi + +# Check if the config file exists, if not, copy it +if [[ ! -f "/etc/postgresql/${POSTGRES_VERSION}/main/postgresql.conf" ]]; then + cp /var/tmp/postgresql.conf /etc/postgresql/${POSTGRES_VERSION}/main/postgresql.conf + cp /var/tmp/pg_hba.conf /etc/postgresql/${POSTGRES_VERSION}/main/pg_hba.conf + # Update data_directory in postgresql.conf + sed -i "s|^data_directory = .*|data_directory = '${PGDATA}'|" /etc/postgresql/${POSTGRES_VERSION}/main/postgresql.conf +fi + +# Start postgres +log "Starting PostgreSQL..." +su - postgres -c "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/postgres -D ${PGDATA} -k ${PG_UNIX_SOCKET_DIR} -p ${POSTGRES_PORT} -c config_file=/etc/postgresql/${POSTGRES_VERSION}/main/postgresql.conf" & + +for i in {1..300}; do + if pg_isready -h ${PG_UNIX_SOCKET_DIR} -p ${POSTGRES_PORT}; then + log "Postgres is ready!" + break + else + log "Postgres is not ready yet. Waiting..." + sleep 2 + fi +done + +# Reset postgres password +log "Resetting postgres password..." +psql -h ${PG_UNIX_SOCKET_DIR} -p ${POSTGRES_PORT} -U postgres -d postgres -c "ALTER USER postgres WITH PASSWORD '${POSTGRES_PASSWORD}';" + +# Create timescaledb extension (if not exists) +log "Creating TimescaleDB extension..." +psql -h ${PG_UNIX_SOCKET_DIR} -p ${POSTGRES_PORT} -U postgres -d postgres -c "CREATE EXTENSION IF NOT EXISTS timescaledb;" + +# Infinite sleep to allow restarting Postgres +/bin/bash -c "trap : TERM INT; sleep infinity & wait" diff --git a/console/db/postgresql.conf b/console/db/postgresql.conf new file mode 100644 index 000000000..f8d08d315 --- /dev/null +++ b/console/db/postgresql.conf @@ -0,0 +1,60 @@ +listen_addresses = '*' +port = 5432 +max_connections = 100 +superuser_reserved_connections = 5 +password_encryption = scram-sha-256 +max_locks_per_transaction = 512 +shared_preload_libraries = 'pg_stat_statements,timescaledb' +pg_stat_statements.track = all +timescaledb.max_background_workers = 4 +timescaledb.telemetry_level = off +huge_pages = try +shared_buffers = 256MB +work_mem = 64MB +maintenance_work_mem = 128MB +effective_cache_size = 1024MB +effective_io_concurrency = 200 +seq_page_cost = 1.0 +random_page_cost = 1.1 +default_statistics_target = 100 +autovacuum_max_workers = 5 +autovacuum_naptime = 1min +autovacuum_vacuum_scale_factor = 0.01 +autovacuum_analyze_scale_factor = 0.01 +autovacuum_vacuum_cost_limit = 500 +autovacuum_vacuum_cost_delay = 2 +max_files_per_process = 4096 +max_worker_processes = 16 +max_parallel_workers = 4 +max_parallel_workers_per_gather = 2 +max_parallel_maintenance_workers = 2 +synchronous_commit = off +archive_mode = on +archive_command = '/bin/true' +archive_timeout = 30min +wal_level = replica +wal_buffers = 32MB +wal_compression = on +max_wal_size = 2GB +checkpoint_completion_target = 0.9 +checkpoint_timeout = 15min +logging_collector = on +log_truncate_on_rotation = on +log_rotation_age = 1d +log_rotation_size = 0 +log_filename = 'postgresql-%a.log' +log_line_prefix = '%t [%p-%l] %r %q%u@%d ' +log_lock_waits = on +log_temp_files = 0 +log_checkpoints = on +track_activity_query_size = 2048 +track_io_timing = on +track_functions = all +track_activities = on +track_counts = on +tcp_keepalives_count = 10 +tcp_keepalives_idle = 300 +tcp_keepalives_interval = 30 +idle_in_transaction_session_timeout=10min +data_directory = '/var/lib/postgresql/16/main' +hba_file = '/etc/postgresql/16/main/pg_hba.conf' diff --git a/console/docker-compose.yml b/console/docker-compose.yml new file mode 100644 index 000000000..14964a710 --- /dev/null +++ b/console/docker-compose.yml @@ -0,0 +1,72 @@ +--- +services: + caddy: + image: lucaslorentz/caddy-docker-proxy:ci-alpine + container_name: caddy + ports: + - 80:80 + - 443:443 + environment: + - CADDY_INGRESS_NETWORKS=caddy + networks: + - caddy + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - caddy_data:/data + restart: unless-stopped + labels: + caddy.email: ${EMAIL} + + autobase-console-api: + image: autobase/console_api:latest + container_name: autobase-console-api + restart: unless-stopped + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - /tmp/ansible:/tmp/ansible + environment: + - PG_CONSOLE_AUTHORIZATION_TOKEN=${AUTH_TOKEN} + - PG_CONSOLE_DB_HOST=autobase-console-db + - PG_CONSOLE_LOGGER_LEVEL=${PG_CONSOLE_LOGGER_LEVEL:-INFO} + - PG_CONSOLE_DOCKER_IMAGE=autobase/automation:latest + networks: + - autobase + - caddy + + autobase-console-ui: + image: autobase/console_ui:latest + container_name: autobase-console-ui + restart: unless-stopped + labels: + caddy: ${DOMAIN} + caddy.route_0: /api/v1/* + caddy.route_0.reverse_proxy: autobase-console-api:8080 + caddy.route_1: /* + caddy.route_1.reverse_proxy: autobase-console-ui:80 + environment: + - PG_CONSOLE_API_URL=/api/v1 + - PG_CONSOLE_AUTHORIZATION_TOKEN=${AUTH_TOKEN} + networks: + - autobase + - caddy + + autobase-console-db: + image: autobase/console_db:latest + container_name: autobase-console-db + restart: unless-stopped + volumes: + - console_postgres:/var/lib/postgresql + networks: + - autobase + +volumes: + console_postgres: + name: console_postgres + caddy_data: + name: caddy_data + +networks: + autobase: + name: autobase + caddy: + name: caddy diff --git a/console/service/Dockerfile b/console/service/Dockerfile new file mode 100644 index 000000000..2e7f3de90 --- /dev/null +++ b/console/service/Dockerfile @@ -0,0 +1,14 @@ +FROM golang:1.24-bookworm as builder +WORKDIR /go/src/pg-console + +COPY console/service/ . + +RUN make build_in_docker + +FROM debian:bookworm-slim +LABEL maintainer="Vitaliy Kukharik vitabaks@gmail.com" + +COPY --from=builder /go/src/pg-console/pg-console /usr/local/bin/ +COPY console/db/migrations /etc/db/migrations + +CMD ["/usr/local/bin/pg-console"] diff --git a/console/service/Makefile b/console/service/Makefile new file mode 100644 index 000000000..148ff4944 --- /dev/null +++ b/console/service/Makefile @@ -0,0 +1,31 @@ +ifndef GO_BIN +override GO_BIN = "pg-console" +endif + +APP = main.go + +swagger_install: + { \ + export my_dir=$$(pwd) ;\ + export dir=$$(mktemp -d) ;\ + retry_count=0 ;\ + max_retries=5 ;\ + until [ "$$retry_count" -ge "$$max_retries" ]; do \ + git clone https://github.com/go-swagger/go-swagger "$$dir" && break ;\ + retry_count=$$((retry_count+1)) ;\ + echo "Retry $$retry_count/$$max_retries" ;\ + sleep 1 ;\ + done ;\ + cd "$$dir" ;\ + go install ./cmd/swagger ;\ + cd "$$my_dir" ;\ + swagger version ;\ + } + +build: ## Build app + @go build -o $(GO_BIN) $(APP) + +swagger: + @swagger generate server --name PgConsole --spec api/swagger.yaml --principal interface{} --exclude-main + +build_in_docker: swagger_install swagger build diff --git a/console/service/README.md b/console/service/README.md new file mode 100644 index 000000000..49ad4f17b --- /dev/null +++ b/console/service/README.md @@ -0,0 +1,105 @@ +# Autobase Console API service + +Server-side component for autobase console. This REST service implements the API for UI integration. + +The project is written in `Go` and uses [Swagger](https://github.com/go-swagger/go-swagger) for server-side code generation. The server receives requests from the web to create and manage clusters. Under the hood, the server uses Docker to run `autobase/automation` image with Ansible playbooks for cluster deployment logic. + +## Build +Swagger specification is used for creating the server REST API. First, you need to install the Swagger tool to build the auto-generated Go files. +``` +export dir=$$(mktemp -d) +git clone https://github.com/go-swagger/go-swagger "$$dir" +cd "$$dir" +go install ./cmd/swagger +``` +Then, you need to generate the server-side files: +``` +swagger generate server --name DbConsole --spec api/swagger.yaml --principal interface{} --exclude-main +``` + +After that, you can build the server with the following command: +``` +go build -o pg-console main.go +``` + +The project also contains a Makefile with all commands, so you can simply run the following steps: +``` +make swagger_install +make swagger +make build +``` + +## Configuration +Server is configured via the environment. The following environment variables can be used: +``` +KEY TYPE DEFAULT REQUIRED DESCRIPTION +PG_CONSOLE_LOGGER_LEVEL String DEBUG Log level. Accepted values: [TRACE, DEBUG, INFO, WARN, ERROR, FATAL, PANIC] +PG_CONSOLE_HTTP_HOST String 0.0.0.0 Accepted host for connection. '0.0.0.0' for all hosts +PG_CONSOLE_HTTP_PORT Integer 8080 Listening port +PG_CONSOLE_HTTP_WRITETIMEOUT Duration 10s Maximum duration before timing out write of the response +PG_CONSOLE_HTTP_READTIMEOUT Duration 10s Maximum duration before timing out read of the request +PG_CONSOLE_HTTPS_ISUSED True or False false Flag for turn on/off https +PG_CONSOLE_HTTPS_HOST String 0.0.0.0 Accepted host for connection. '0.0.0.0' for all hosts +PG_CONSOLE_HTTPS_PORT Integer 8081 Listening port +PG_CONSOLE_HTTPS_CACERT String /etc/pg_console/cacert.pem The certificate to use for secure connections +PG_CONSOLE_HTTPS_SERVERCERT String /etc/pg_console/server-cert.pem The certificate authority file to be used with mutual tls auth +PG_CONSOLE_HTTPS_SERVERKEY String /etc/pg_console/server-key.pem The private key to use for secure connections +PG_CONSOLE_AUTHORIZATION_TOKEN String auth_token Authorization token for REST API +PG_CONSOLE_DB_HOST String localhost Database host +PG_CONSOLE_DB_PORT Unsigned Integer 5432 Database port +PG_CONSOLE_DB_DBNAME String postgres Database name +PG_CONSOLE_DB_USER String postgres Database user name +PG_CONSOLE_DB_PASSWORD String postgres-pass Database user password +PG_CONSOLE_DB_MAXCONNS Integer 10 MaxConns is the maximum size of the pool +PG_CONSOLE_DB_MAXCONNLIFETIME Duration 60s MaxConnLifetime is the duration since creation after which a connection will be automatically closed +PG_CONSOLE_DB_MAXCONNIDLETIME Duration 60s MaxConnIdleTime is the duration after which an idle connection will be automatically closed by the health check +PG_CONSOLE_DB_MIGRATIONDIR String /etc/db/migrations Path to directory with migration scripts +PG_CONSOLE_ENCRYPTIONKEY String super_secret Encryption key for secret storage +PG_CONSOLE_DOCKER_HOST String unix:///var/run/docker.sock Docker host +PG_CONSOLE_DOCKER_LOGDIR String /tmp/ansible Directory inside docker container for ansible json log +PG_CONSOLE_DOCKER_IMAGE String autobase/automation:2.2.0 Docker image for autobase automation +PG_CONSOLE_LOGWATCHER_RUNEVERY Duration 1m LogWatcher run interval +PG_CONSOLE_LOGWATCHER_ANALYZEPAST Duration 48h LogWatcher gets operations to analyze which created_at > now() - AnalyzePast +PG_CONSOLE_CLUSTERWATCHER_RUNEVERY Duration 1m ClusterWatcher run interval +PG_CONSOLE_CLUSTERWATCHER_POOLSIZE Integer 4 Amount of async request from ClusterWatcher +``` + +Note: Be attention to use `TRACE` level of logging. With `TRACE` level some kind of secrets can be present in logs. + +## Project structure +``` +|-api - Swagger specification +|-internal - Folder with all internal logic +| |-configuration - Configuration +| |-controllers - REST functions and basic logic for handlers +| | |-cluster - REST API for cluster objects +| | |-dictionary - REST API for dictionary objects +| | |-environment - REST API for environment objects +| | |-operation - REST API for operation objects +| | |-project - REST API for project objects +| | |-secret - REST API for secret objects +| | |-setting - REST API for setting objects +| |-convert - Functions for converting DB model to REST model +| |-db - Basic DB functions +| |-service - Common logic for aggregating all server logic +| |-storage - DB logic +| |-watcher - Async watchers +| | |-log_collector.go - Collecting logs from running Docker containers +| | |-log_watcher.go - JSON container log parser +| | |-cluster_watcher.go - Collecting cluster statuses +| |-xdocker - Basic logic for Docker +|-middleware - Common REST middleware for the server +|-migrations - DB migration logic +|-pkg - Folder with common logic +| |-patroni - Client for Patroni integration +| |-tracer - Base structure for tracing +|-*models - Auto-generated files with REST models +|-*restapi - Auto-generated files with REST server +|-main.go - Entry point +``` + +## Secrets +The server handles different kinds of secrets, such as: + +* Cloud secrets used for cloud connections +* SSH keys and passwords for connection to own machine servers diff --git a/console/service/VERSION b/console/service/VERSION new file mode 100644 index 000000000..e3a4f1933 --- /dev/null +++ b/console/service/VERSION @@ -0,0 +1 @@ +2.2.0 \ No newline at end of file diff --git a/console/service/api/swagger.yaml b/console/service/api/swagger.yaml new file mode 100644 index 000000000..04b69e988 --- /dev/null +++ b/console/service/api/swagger.yaml @@ -0,0 +1,1664 @@ +--- +swagger: '2.0' +info: + title: autobase console + description: API for autobase console + version: 2.2.0 +host: localhost:8080 +schemes: + - http +produces: + - application/json +consumes: + - application/json +basePath: "/api/v1" + +paths: + /version: + get: + summary: Get version of API service + tags: + - system + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.Version" + + /external/deployments: + get: + summary: Get full info about available external deployments + tags: + - dictionary + parameters: + - name: offset + in: query + required: false + type: integer + - name: limit + in: query + required: false + type: integer + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.DeploymentsInfo" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + /database/extensions: + get: + summary: "Info about available database extensions" + tags: + - dictionary + parameters: + - name: offset + in: query + required: false + type: integer + - name: limit + in: query + required: false + type: integer + - name: extension_type + in: query + required: false + type: string + default: "all" + enum: + - "all" + - "contrib" + - "third_party" + - name: postgres_version + in: query + required: false + type: string + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.DatabaseExtensions" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + /environments: + get: + summary: "Get environments list" + tags: + - environment + parameters: + - name: limit + in: query + required: false + type: integer + - name: offset + in: query + required: false + type: integer + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.EnvironmentsList" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + post: + summary: "Create environment" + tags: + - environment + parameters: + - name: body + in: body + required: true + schema: + $ref: '#/definitions/Request.Environment' + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.Environment" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + /environments/{id}: + delete: + summary: "Delete environment" + tags: + - environment + parameters: + - name: id + in: path + required: true + type: integer + responses: + '204': + description: OK + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + /postgres_versions: + get: + summary: "Get supported postgres versions" + tags: + - dictionary + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.PostgresVersions" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + /settings: + post: + summary: "Create new setting" + tags: + - setting + parameters: + - name: body + in: body + required: true + schema: + $ref: '#/definitions/Request.CreateSetting' + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.Setting" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + get: + summary: "Get settings" + tags: + - setting + parameters: + - name: name + in: query + required: false + type: string + description: "Filter by name" + - name: offset + in: query + required: false + type: integer + - name: limit + in: query + required: false + type: integer + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.Settings" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + /settings/{name}: + patch: + summary: "Changed setting" + tags: + - setting + parameters: + - name: name + in: path + type: string + required: true + - name: body + in: body + required: true + schema: + $ref: '#/definitions/Request.ChangeSetting' + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.Setting" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + /clusters: + post: + summary: "Create new cluster" + tags: + - cluster + parameters: + - name: body + in: body + required: true + schema: + $ref: '#/definitions/Request.ClusterCreate' + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.ClusterCreate" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + get: + summary: "Get info about clusters" + tags: + - cluster + parameters: + - name: offset + in: query + required: false + type: integer + - name: limit + in: query + required: false + type: integer + - name: project_id + in: query + required: true + type: integer + - name: name + in: query + required: false + type: string + description: "Filter by name" + - name: status + type: string + in: query + required: false + description: "Filter by status" + - name: location + type: string + in: query + required: false + description: "Filter by location" + - name: environment + type: string + in: query + required: false + description: "Filter by environment" + - name: server_count + type: integer + in: query + required: false + description: "Filter by server_count" + - name: postgres_version + type: integer + in: query + required: false + description: "Filter by postgres_version" + - name: created_at_from + required: false + type: string + format: date-time + in: query + description: "Created at after this date" + - name: created_at_to + required: false + type: string + format: date-time + in: query + description: "Created at till this date" + - name: sort_by + in: query + required: false + type: string + description: "Sort by fields. Example: sort_by=id,-name,created_at,updated_at\n + Supported values:\n + - id\n + - name\n + - created_at\n + - updated_at\n + - environment\n + - project\n + - status\n + - location\n + - server_count\n + - postgres_version\n" + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.ClustersInfo" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + /clusters/default_name: + get: + summary: "Get cluster default name" + tags: + - cluster + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.ClusterDefaultName" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + /clusters/{id}: + get: + summary: "Get cluster info" + tags: + - cluster + parameters: + - name: id + in: path + required: true + type: integer + responses: + '200': + description: OK + schema: + $ref: "#/definitions/ClusterInfo" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + delete: + summary: "Delete cluster (from the console database)" + tags: + - cluster + parameters: + - name: id + in: path + required: true + type: integer + responses: + '204': + description: OK + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + /servers/{id}: + delete: + summary: "Delete server (from the console database)" + tags: + - cluster + parameters: + - name: id + in: path + required: true + type: integer + responses: + '204': + description: OK + headers: + x-cluster-id: + type: integer + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + /clusters/{id}/refresh: + post: + summary: "Refresh cluster info (from Patroni API)" + tags: + - cluster + parameters: + - name: id + in: path + required: true + type: integer + responses: + '200': + description: OK + schema: + $ref: "#/definitions/ClusterInfo" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + # TODO: not implemented yet + /clusters/{id}/reinit: + post: + summary: "Reinit cluster" + deprecated: true + tags: + - cluster + parameters: + - name: body + in: body + required: true + schema: + $ref: '#/definitions/Request.ClusterReinit' + - name: id + in: path + required: true + type: integer + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.ClusterCreate" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + # TODO: not implemented yet + /clusters/{id}/reload: + post: + summary: "Reload cluster" + deprecated: true + tags: + - cluster + parameters: + - name: body + in: body + required: true + schema: + $ref: '#/definitions/Request.ClusterReload' + - name: id + in: path + required: true + type: integer + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.ClusterCreate" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + # TODO: not implemented yet + /clusters/{id}/restart: + post: + summary: "Restart cluster" + deprecated: true + tags: + - cluster + parameters: + - name: body + in: body + required: true + schema: + $ref: '#/definitions/Request.ClusterRestart' + - name: id + in: path + required: true + type: integer + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.ClusterCreate" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + # TODO: not implemented yet + /clusters/{id}/stop: + post: + summary: "Stop cluster" + deprecated: true + tags: + - cluster + parameters: + - name: body + in: body + required: true + schema: + $ref: '#/definitions/Request.ClusterStop' + - name: id + in: path + required: true + type: integer + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.ClusterCreate" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + # TODO: not implemented yet + /clusters/{id}/start: + post: + summary: "Start cluster" + deprecated: true + tags: + - cluster + parameters: + - name: body + in: body + required: true + schema: + $ref: '#/definitions/Request.ClusterStart' + - name: id + in: path + required: true + type: integer + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.ClusterCreate" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + # TODO: not implemented yet + /clusters/{id}/remove: + post: + summary: "Remove cluster" + deprecated: true + tags: + - cluster + parameters: + - name: body + in: body + required: true + schema: + $ref: '#/definitions/Request.ClusterRemove' + - name: id + in: path + required: true + type: integer + responses: + '204': + description: OK + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + /projects: + post: + summary: "Create new project" + tags: + - project + parameters: + - name: body + in: body + required: true + schema: + $ref: '#/definitions/Request.ProjectCreate' + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.Project" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + get: + summary: "Get projects list" + tags: + - project + parameters: + - name: limit + in: query + required: false + type: integer + - name: offset + in: query + required: false + type: integer + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.ProjectsList" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + /projects/{id}: + patch: + summary: "Change project" + tags: + - project + parameters: + - name: id + in: path + required: true + type: integer + - name: body + in: body + required: true + schema: + $ref: '#/definitions/Request.ProjectPatch' + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.Project" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + delete: + summary: "Delete project" + tags: + - project + parameters: + - name: id + in: path + required: true + type: integer + responses: + '204': + description: OK + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + /secrets: + post: + summary: "Create new secret" + tags: + - secret + parameters: + - name: body + in: body + required: true + schema: + $ref: '#/definitions/Request.SecretCreate' + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.SecretInfo" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + get: + summary: "Get secrets list" + tags: + - secret + parameters: + - name: limit + in: query + required: false + type: integer + - name: offset + in: query + required: false + type: integer + - name: project_id + in: query + required: true + type: integer + - name: name + in: query + required: false + type: string + description: "Filter by name" + - name: type + in: query + required: false + type: string + description: "Filter by type" + - name: sort_by + in: query + required: false + type: string + description: "Sort by fields. Example: sort_by=id,name,-type,created_at,updated_at" + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.SecretInfoList" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + /secrets/{id}: + patch: + summary: "Change secret" + tags: + - secret + parameters: + - name: id + in: path + required: true + type: integer + - name: body + in: body + required: true + schema: + $ref: '#/definitions/Request.SecretPatch' + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.SecretInfo" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + delete: + summary: "Delete secret" + tags: + - secret + parameters: + - name: id + in: path + required: true + type: integer + responses: + '204': + description: OK + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + /operations: + get: + summary: "Get operations list for current project" + tags: + - operation + parameters: + - name: project_id + in: query + required: true + type: integer + description: "Required parameter for filter" + - name: start_date + required: true + type: string + format: date-time + in: query + description: "Operations started after this date" + - name: end_date + required: true + type: string + format: date-time + in: query + description: "Operations started till this date" + - name: cluster_name + in: query + required: false + type: string + description: "Filter by cluster_name" + - name: type + in: query + required: false + type: string + description: "Filter by type" + - name: status + in: query + required: false + type: string + description: "Filter by status" + - name: environment + in: query + required: false + type: string + description: "Filter by environment" + - name: sort_by + in: query + required: false + type: string + description: "Sort by fields. Example: sort_by=cluster_name,-type,status,id,created_at,updated_at\n + Supported valuese:\n + - id\n + - cluster_name\n + - type\n + - status\n + - started_at\n + - updated_at\n + - cluster\n + - environment\n" + - name: limit + in: query + required: false + type: integer + - name: offset + in: query + required: false + type: integer + responses: + '200': + description: OK + schema: + $ref: "#/definitions/Response.OperationsList" + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + + /operations/{id}/log: + get: + summary: "Get operation log by operation_id" + tags: + - operation + consumes: + - plain/text + parameters: + - name: id + in: path + required: true + type: integer + description: "Operation id" + responses: + '200': + description: OK + schema: + type: string + headers: + content-type: + type: string + x-log-completed: + type: boolean + '400': + description: Error + schema: + $ref: "#/definitions/Response.Error" + +definitions: + Response.Version: + title: Version response + type: object + properties: + version: + type: string + example: v1.0.0 + + Response.Error: + title: Error object + type: object + properties: + code: + type: integer + title: + type: string + description: + type: string + + Meta.Pagination: + title: Pagination info for list requests + type: object + properties: + offset: + type: integer + x-nullable: true + limit: + type: integer + x-nullable: true + count: + type: integer + x-nullable: true + + Response.DeploymentsInfo: + title: Deployments info + type: object + properties: + data: + type: array + items: + $ref: "#/definitions/Response.DeploymentInfo" + meta: + $ref: '#/definitions/Meta.Pagination' + + Response.DeploymentInfo: + description: Deployment info + type: object + properties: + code: + type: string + example: "aws" + description: + type: string + example: "Amazon web services" + avatar_url: + type: string + cloud_regions: + description: "List of available regions for current deployment" + type: array + items: + $ref: '#/definitions/DeploymentInfo.CloudRegion' + instance_types: + description: "Lists of available instance types" + type: object + properties: + small: + type: array + x-nullable: true + items: + $ref: '#/definitions/Deployment.InstanceType' + medium: + type: array + items: + $ref: '#/definitions/Deployment.InstanceType' + large: + type: array + items: + $ref: '#/definitions/Deployment.InstanceType' + volumes: + type: array + description: "Hardware disks info" + items: + type: object + properties: + volume_type: + type: string + description: "Volume type" + example: "gp3" + volume_description: + type: string + description: "Volume description" + example: "General purpose SSD disk" + min_size: + type: integer + description: "Sets in GB" + example: 10 + max_size: + type: integer + description: "Sets in GB" + example: 256 + price_monthly: + type: number + description: "Price for disk by months" + example: 0.1 + currency: + type: string + description: "Price currency" + example: "$" + is_default: + type: boolean + x-nullable: true + description: "Default volume" + example: false + + DeploymentInfo.CloudRegion: + type: object + properties: + code: + type: string + description: "unique parameter for DB" + example: "north_america" + name: + type: string + description: "Field for web" + example: "North America" + datacenters: + type: array + description: "List of datacenters for this region" + items: + type: object + properties: + code: + type: string + example: "ca-central-1" + location: + type: string + example: "Canada (central)" + cloud_image: + $ref: '#/definitions/Deployment.CloudImage' + + Deployment.CloudImage: + type: object + properties: + image: + type: object + example: '{"server_image": "ami-078b3985bbc361448"}' + arch: + type: string + example: "amd64" + os_name: + type: string + example: "Ubuntu" + os_version: + type: string + example: "22.04 LTS" + updated_at: + type: string + format: datetime + + Deployment.InstanceType: + type: object + properties: + code: + type: string + example: "m5.2xlarge" + cpu: + type: integer + example: 8 + shared_cpu: + type: boolean + example: false + ram: + type: integer + example: 256 + price_hourly: + type: number + description: "Price for 1 instance by hour" + example: 0.01 + price_monthly: + type: number + description: "Price for 1 instance by month" + example: 1.2 + currency: + type: string + description: "Price currency" + example: "$" + + Response.DatabaseExtensions: + type: object + properties: + data: + type: array + items: + $ref: '#/definitions/Response.DatabaseExtension' + meta: + $ref: '#/definitions/Meta.Pagination' + + Response.DatabaseExtension: + type: object + description: "Info about database extension" + properties: + name: + type: string + example: "Citus" + description: + type: string + x-nullable: true + example: "Citus is PostgreSQL extension that transforms..." + url: + type: string + x-nullable: true + example: "https://github.com/citusdata/citus" + image: + type: string + x-nullable: true + example: "citus.png" + postgres_min_version: + type: string + x-nullable: true + example: "11" + postgres_max_version: + type: string + x-nullable: true + example: "16" + contrib: + type: boolean + example: false + + Request.ClusterCreate: + type: object + description: "Request struct for cluster creation" + properties: + name: + type: string + example: "drm-prod-pgcluster" + description: + type: string + description: "Info about cluster" + auth_info: + type: object + description: "Info for deployment system authorization" + properties: + secret_id: + type: integer + example: 1 + project_id: + type: integer + description: "Project for new cluster" + environment_id: + type: integer + description: "Project environment" + envs: + type: array + items: + type: string + extra_vars: + type: array + items: + type: string + + Response.ClusterDefaultName: + type: object + description: "Response struct for cluster default name" + properties: + name: + type: string + example: "postgres-cluster-01" + + Response.ClusterCreate: + type: object + description: "Response struct for cluster creation" + properties: + cluster_id: + type: integer + description: "unique code for cluster" + operation_id: + type: integer + description: "operation id" + + Response.ClusterLogs: + type: object + description: "Logs for cluster" + properties: + logs: + type: string + description: "all available logs" + + Response.ClustersInfo: + type: object + properties: + data: + type: array + items: + $ref: '#/definitions/ClusterInfo' + meta: + $ref: '#/definitions/Meta.Pagination' + + ClusterInfo: + type: object + description: "Cluster info" + properties: + id: + type: integer + name: + type: string + example: "drm-prod-pgcluster" + description: + type: string + status: + type: string + example: "healthy" + creation_time: + type: string + format: date-time + example: "16.10.2023T11:20:00Z" + environment: + type: string + example: "production" + servers: + type: array + items: + $ref: "#/definitions/ClusterInfo.Instance" + postgres_version: + type: integer + format: int32 + example: 15 + cluster_location: + type: string + description: "Code of location" + example: "eu-north-1" + project_name: + type: string + description: "Project for cluster" + connection_info: + type: object + + ClusterInfo.AdditionalSettings: + type: object + description: "Additional settings for cluster" + properties: + connection_info: + type: object + + ClusterInfo.Instance: + type: object + description: "Instance info for current cluster" + properties: + id: + type: integer + name: + type: string + example: "pgnode1" + ip: + type: string + example: "10.128.64.141" + status: + type: string + role: + type: string + example: "leader" + timeline: + type: integer + format: int64 + example: 1 + x-nullable: true + lag: + type: integer + format: int64 + example: 0 + x-nullable: true + tags: + type: object + pending_restart: + type: boolean + example: false + x-nullable: true + + Request.ClusterReinit: + type: object + description: "Reinit cluster" + + Request.ClusterReload: + type: object + description: "Reload cluster" + + Request.ClusterRestart: + type: object + description: "Restart cluster" + + Request.ClusterStop: + type: object + description: "Stop cluster" + + Request.ClusterStart: + type: object + description: "Start cluster" + + Request.ClusterRemove: + type: object + description: "Remove cluster" + + Request.ProjectCreate: + type: object + properties: + name: + type: string + example: "default" + description: + type: string + example: "Default project" + + Request.ProjectPatch: + type: object + properties: + name: + type: string + x-nullable: true + description: + type: string + x-nullable: true + + Response.Project: + type: object + properties: + id: + type: integer + name: + type: string + description: + type: string + x-nullable: true + created_at: + type: string + format: date-time + example: "16.10.2023T11:20:00Z" + updated_at: + type: string + format: date-time + x-nullable: true + example: "16.10.2023T11:20:00Z" + + Response.ProjectsList: + type: object + properties: + data: + type: array + items: + $ref: '#/definitions/Response.Project' + meta: + type: object + $ref: '#/definitions/Meta.Pagination' + + Request.SecretCreate: + type: object + properties: + project_id: + type: integer + example: 1 + name: + type: string + example: "aws key" + type: + $ref: '#/definitions/Secret.Type' + value: + type: object + $ref: '#/definitions/Request.SecretValue' + + Secret.Type: + type: string + enum: + - "aws" + - "gcp" + - "hetzner" + - "ssh_key" + - "digitalocean" + - "password" + - "azure" + + Request.SecretValue: + type: object + properties: + aws: + type: object + $ref: '#/definitions/Request.SecretValue.Aws' + x-nullable: true + gcp: + type: object + $ref: '#/definitions/Request.SecretValue.Gcp' + x-nullable: true + hetzner: + type: object + $ref: '#/definitions/Request.SecretValue.Hetzner' + x-nullable: true + ssh_key: + type: object + $ref: '#/definitions/Request.SecretValue.SshKey' + x-nullable: true + digitalocean: + type: object + $ref: '#/definitions/Request.SecretValue.DigitalOcean' + x-nullable: true + password: + type: object + $ref: '#/definitions/Request.SecretValue.Password' + x-nullable: true + azure: + type: object + $ref: '#/definitions/Request.SecretValue.Azure' + x-nullable: true + + Request.SecretValue.Aws: + type: object + properties: + AWS_ACCESS_KEY_ID: + type: string + AWS_SECRET_ACCESS_KEY: + type: string + + Request.SecretValue.Gcp: + type: object + properties: + GCP_SERVICE_ACCOUNT_CONTENTS: + type: string + + Request.SecretValue.Hetzner: + type: object + properties: + HCLOUD_API_TOKEN: + type: string + + Request.SecretValue.SshKey: + type: object + properties: + SSH_PRIVATE_KEY: + type: string + + Request.SecretValue.DigitalOcean: + type: object + properties: + DO_API_TOKEN: + type: string + + Request.SecretValue.Password: + type: object + properties: + USERNAME: + type: string + PASSWORD: + type: string + + Request.SecretValue.Azure: + type: object + properties: + AZURE_SUBSCRIPTION_ID: + type: string + AZURE_CLIENT_ID: + type: string + AZURE_SECRET: + type: string + AZURE_TENANT: + type: string + + Request.SecretPatch: + type: object + properties: + name: + type: string + example: "aws key" + x-nullable: true + type: + type: string + example: "aws" + x-nullable: true + value: + type: string + example: "c2VjcmV0" + description: "Secret value in base64" + x-nullable: true + + Response.SecretInfo: + type: object + properties: + id: + type: integer + example: 1 + project_id: + type: integer + example: 1 + name: + type: string + example: "aws key" + type: + $ref: '#/definitions/Secret.Type' + created_at: + type: string + format: date-time + example: "16.10.2023T11:20:00Z" + updated_at: + type: string + format: date-time + x-nullable: true + example: "16.10.2023T11:20:00Z" + is_used: + type: boolean + example: "true" + used_by_clusters: + type: string + x-nullable: true + example: "mds-prod, drm-prod" + + Response.SecretInfoList: + type: object + properties: + data: + type: array + items: + $ref: '#/definitions/Response.SecretInfo' + meta: + type: object + $ref: '#/definitions/Meta.Pagination' + + Response.Operation: + type: object + properties: + id: + type: integer + example: 1 + cluster_name: + type: string + example: "drm-prod-cluster" + started: + type: string + format: date-time + example: "16.10.2023T11:20:00Z" + finished: + type: string + format: date-time + example: "16.10.2023T11:20:00Z" + x-nullable: true + type: + type: string + example: "deploy" + status: + type: string + example: "success" + environment: + type: string + example: "production" + + Request.Environment: + type: object + properties: + name: + type: string + example: "production" + description: + type: string + example: "environment for production" + + Response.OperationsList: + type: object + properties: + data: + type: array + items: + $ref: '#/definitions/Response.Operation' + meta: + type: object + $ref: '#/definitions/Meta.Pagination' + + Response.Environment: + type: object + properties: + id: + type: integer + example: 1 + name: + type: string + example: "production" + description: + type: string + x-nullable: true + example: "environment for production" + created_at: + type: string + format: date-time + example: "16.10.2023T11:20:00Z" + updated_at: + type: string + format: date-time + x-nullable: true + example: "16.10.2023T11:20:00Z" + + Response.EnvironmentsList: + type: object + properties: + data: + type: array + items: + $ref: '#/definitions/Response.Environment' + meta: + type: object + $ref: '#/definitions/Meta.Pagination' + + Response.PostgresVersions: + type: object + properties: + data: + type: array + items: + $ref: '#/definitions/Response.PostgresVersion' + + Response.PostgresVersion: + type: object + properties: + major_version: + type: integer + example: 10 + release_date: + type: string + format: date + example: "2017-10-05" + end_of_life: + type: string + format: date + example: "2022-11-10" + + Request.CreateSetting: + type: object + description: "Create new setting" + properties: + name: + type: string + value: + type: object + + Request.ChangeSetting: + type: object + description: "Change setting" + properties: + value: + type: object + x-nullable: true + + Response.Setting: + type: object + description: "Setting" + properties: + id: + type: integer + name: + type: string + value: + type: object + created_at: + type: string + format: datetime + updated_at: + type: string + format: datetime + x-nullable: true + + Response.Settings: + type: object + description: "List of settings" + properties: + data: + type: array + items: + $ref: '#/definitions/Response.Setting' + mete: + type: object + $ref: '#/definitions/Meta.Pagination' diff --git a/console/service/env.sh b/console/service/env.sh new file mode 100755 index 000000000..ff8041393 --- /dev/null +++ b/console/service/env.sh @@ -0,0 +1,7 @@ +export PG_CONSOLE_DB_MIGRATIONDIR='./db/migrations' +export PG_CONSOLE_LOGGER_LEVEL=TRACE +export PG_CONSOLE_DB_DBNAME=db_console +export PG_CONSOLE_DOCKER_LOGDIR='/home/nikolay.gurban/log_dir' +export PG_CONSOLE_DB_PASSWORD=postgres +export PG_CONSOLE_LOGWATCHER_RUNEVERY=10m +export PG_CONSOLE_CLUSTERWATCHER_RUNEVERY=10m \ No newline at end of file diff --git a/console/service/go.mod b/console/service/go.mod new file mode 100644 index 000000000..6f29f92f3 --- /dev/null +++ b/console/service/go.mod @@ -0,0 +1,82 @@ +module postgresql-cluster-console + +go 1.24 + +require ( + github.com/docker/docker v26.1.5+incompatible + github.com/docker/go-connections v0.5.0 + github.com/gdex-lab/go-render v1.0.1 + github.com/go-openapi/errors v0.22.1 + github.com/go-openapi/loads v0.22.0 + github.com/go-openapi/runtime v0.28.0 + github.com/go-openapi/strfmt v0.23.0 + github.com/google/uuid v1.6.0 + github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e + github.com/jackc/pgx/v5 v5.7.3 + github.com/jessevdk/go-flags v1.5.0 + github.com/kelseyhightower/envconfig v1.4.0 + github.com/mitchellh/mapstructure v1.5.0 + github.com/pressly/goose/v3 v3.24.1 + github.com/rs/zerolog v1.32.0 + github.com/segmentio/asm v1.2.0 + go.openly.dev/pointy v1.3.0 + golang.org/x/sync v0.12.0 + gotest.tools/v3 v3.5.2 +) + +require ( + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/jsonpointer v0.21.1 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect + github.com/go-openapi/validate v0.24.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mfridman/interpolate v0.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/term v0.5.2 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/sethvargo/go-retry v0.3.0 // indirect + go.mongodb.org/mongo-driver v1.14.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/net v0.37.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.5.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250313205543-e70fdf4c4cb4 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 // indirect + google.golang.org/grpc v1.67.3 // indirect + google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + modernc.org/gc/v3 v3.0.0 // indirect + modernc.org/mathutil v1.7.1 // indirect + modernc.org/memory v1.8.2 // indirect + modernc.org/strutil v1.2.1 // indirect +) diff --git a/console/service/go.sum b/console/service/go.sum new file mode 100644 index 000000000..3803bb1bf --- /dev/null +++ b/console/service/go.sum @@ -0,0 +1,232 @@ +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v26.1.5+incompatible h1:NEAxTwEjxV6VbBMBoGG3zPqbiJosIApZjxlbrG9q3/g= +github.com/docker/docker v26.1.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/gdex-lab/go-render v1.0.1 h1:xk5dn5b0vAUntzcLD57sVpw6crIjkBaVHJxDd/KN2Mc= +github.com/gdex-lab/go-render v1.0.1/go.mod h1:0Cgpq7v7yfmmvplBne9VgJl97YlpT8B9RlgcjdF+Uxc= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= +github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= +github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= +github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e h1:XmA6L9IPRdUr28a+SK/oMchGgQy159wvzXA5tJ7l+40= +github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e/go.mod h1:AFIo+02s+12CEg8Gzz9kzhCbmbq6JcKNrhHffCGA9z4= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.3 h1:PO1wNKj/bTAwxSJnO1Z4Ai8j4magtqg2SLNjEDzcXQo= +github.com/jackc/pgx/v5 v5.7.3/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= +github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pressly/goose/v3 v3.24.1 h1:bZmxRco2uy5uu5Ng1MMVEfYsFlrMJI+e/VMXHQ3C4LY= +github.com/pressly/goose/v3 v3.24.1/go.mod h1:rEWreU9uVtt0DHCyLzF9gRcWiiTF/V+528DV+4DORug= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0= +github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= +github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.mongodb.org/mongo-driver v1.14.1 h1:GoDgWVl+4rOn3Q3mYwBuWhaEWcX5A04A97ikIl0vhzQ= +go.mongodb.org/mongo-driver v1.14.1/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= +go.openly.dev/pointy v1.3.0 h1:keht3ObkbDNdY8PWPwB7Kcqk+MAlNStk5kXZTxukE68= +go.openly.dev/pointy v1.3.0/go.mod h1:rccSKiQDQ2QkNfSVT2KG8Budnfhf3At8IWxy/3ElYes= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 h1:1wp/gyxsuYtuE/JFxsQRtcCDtMrO2qMvlfXALU5wkzI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0/go.mod h1:gbTHmghkGgqxMomVQQMur1Nba4M0MQ8AYThXDUjsJ38= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= +go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= +golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/api v0.0.0-20250313205543-e70fdf4c4cb4 h1:IFnXJq3UPB3oBREOodn1v1aGQeZYQclEmvWRMN0PSsY= +google.golang.org/genproto/googleapis/api v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:c8q6Z6OCqnfVIqUFJkCzKcrj8eCvUrz+K4KRzSTuANg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 h1:iK2jbkWL86DXjEx0qiHcRE9dE4/Ahua5k6V8OWFb//c= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/grpc v1.67.3 h1:OgPcDAFKHnH8X3O4WcO4XUc8GRDeKsKReqbQtiCj7N8= +google.golang.org/grpc v1.67.3/go.mod h1:YGaHCc6Oap+FzBJTZLBzkGSYt/cvGPFTPxkn7QfSU8s= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +modernc.org/gc/v3 v3.0.0 h1:JNEAEd0e/lnR1nlJemLPwS44KfBLBp4SAvZEZFaxfYU= +modernc.org/gc/v3 v3.0.0/go.mod h1:LG5UO1Ran4OO0JRKz2oNiXhR5nNrgz0PzH7UKhz0aMU= +modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U= +modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.8.2 h1:cL9L4bcoAObu4NkxOlKWBWtNHIsnnACGF/TbqQ6sbcI= +modernc.org/memory v1.8.2/go.mod h1:ZbjSvMO5NQ1A2i3bWeDiVMxIorXwdClKE/0SZ+BMotU= +modernc.org/sqlite v1.34.1 h1:u3Yi6M0N8t9yKRDwhXcyp1eS5/ErhPTBggxWFuR6Hfk= +modernc.org/sqlite v1.34.1/go.mod h1:pXV2xHxhzXZsgT/RtTFAPY6JJDEvOTcTdwADQCCWD4k= +modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= +modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= diff --git a/console/service/internal/configuration/config.go b/console/service/internal/configuration/config.go new file mode 100644 index 000000000..2667f1502 --- /dev/null +++ b/console/service/internal/configuration/config.go @@ -0,0 +1,77 @@ +package configuration + +import ( + "fmt" + "time" + + "github.com/kelseyhightower/envconfig" +) + +type Config struct { + Logger struct { + Level string `default:"DEBUG" desc:"Log level. Accepted values: [TRACE, DEBUG, INFO, WARN, ERROR, FATAL, PANIC]"` + } + Http struct { + Host string `default:"0.0.0.0" desc:"Accepted host for connection. '0.0.0.0' for all hosts"` + Port int `default:"8080" desc:"Listening port"` + WriteTimeout time.Duration `default:"10s" desc:"Maximum duration before timing out write of the response"` + ReadTimeout time.Duration `default:"10s" desc:"Maximum duration before timing out read of the request"` + } + Https struct { + IsUsed bool `default:"false" desc:"Flag for turn on/off https"` + Host string `default:"0.0.0.0" desc:"Accepted host for connection. '0.0.0.0' for all hosts"` + Port int `default:"8081" desc:"Listening port"` + CACert string `default:"/etc/pg_console/cacert.pem" desc:"The certificate to use for secure connections"` + ServerCert string `default:"/etc/pg_console/server-cert.pem" desc:"The certificate authority file to be used with mutual tls auth"` + ServerKey string `default:"/etc/pg_console/server-key.pem" desc:"The private key to use for secure connections"` + } + Authorization struct { + Token string `default:"auth_token" desc:"Authorization token for REST API"` + } + Db struct { + Host string `default:"localhost" desc:"Database host"` + Port uint16 `default:"5432" desc:"Database port"` + DbName string `default:"postgres" desc:"Database name"` + User string `default:"postgres" desc:"Database user name"` + Password string `default:"postgres-pass" desc:"Database user password"` + MaxConns int32 `default:"10" desc:"MaxConns is the maximum size of the pool"` + MaxConnLifeTime time.Duration `default:"60s" desc:"MaxConnLifetime is the duration since creation after which a connection will be automatically closed"` + MaxConnIdleTime time.Duration `default:"60s" desc:"MaxConnIdleTime is the duration after which an idle connection will be automatically closed by the health check"` + MigrationDir string `default:"/etc/db/migrations" desc:"Path to directory with migration scripts"` + } + EncryptionKey string `default:"super_secret" desc:"Encryption key for secret storage"` + Docker struct { + Host string `default:"unix:///var/run/docker.sock" desc:"Docker host"` + LogDir string `default:"/tmp/ansible" desc:"Directory inside docker container for ansible json log"` + Image string `default:"autobase/automation:2.2.0" desc:"Docker image for autobase automation"` + } + LogWatcher struct { + RunEvery time.Duration `default:"1m" desc:"LogWatcher run interval"` + AnalyzePast time.Duration `default:"48h" desc:"LogWatcher gets operations to analyze which created_at > now() - AnalyzePast"` + } + ClusterWatcher struct { + RunEvery time.Duration `default:"1m" desc:"ClusterWatcher run interval"` + PoolSize int64 `default:"4" desc:"Amount of async request from ClusterWatcher"` + } +} + +const cfgPrefix = "PG_CONSOLE" + +func ReadConfig() (*Config, error) { + cfg := Config{} + + err := envconfig.Process(cfgPrefix, &cfg) + if err != nil { + return nil, fmt.Errorf("failed to parse config: %s", err.Error()) + } + + return &cfg, nil +} + +func PrintUsage() { + cfg := Config{} + err := envconfig.Usage(cfgPrefix, &cfg) + if err != nil { + fmt.Printf("failed to print envconfig usage: %s", err.Error()) + } +} diff --git a/console/service/internal/controllers/cluster/delete_cluster.go b/console/service/internal/controllers/cluster/delete_cluster.go new file mode 100644 index 000000000..57235d2c1 --- /dev/null +++ b/console/service/internal/controllers/cluster/delete_cluster.go @@ -0,0 +1,28 @@ +package cluster + +import ( + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/restapi/operations/cluster" + + "github.com/go-openapi/runtime/middleware" +) + +type deleteClusterHandler struct { + db storage.IStorage +} + +func NewDeleteClusterHandler(db storage.IStorage) cluster.DeleteClustersIDHandler { + return &deleteClusterHandler{ + db: db, + } +} + +func (h *deleteClusterHandler) Handle(param cluster.DeleteClustersIDParams) middleware.Responder { + err := h.db.DeleteClusterSoft(param.HTTPRequest.Context(), param.ID) + if err != nil { + return cluster.NewDeleteClustersIDBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return cluster.NewDeleteClustersIDNoContent() +} diff --git a/console/service/internal/controllers/cluster/delete_server.go b/console/service/internal/controllers/cluster/delete_server.go new file mode 100644 index 000000000..87467ab17 --- /dev/null +++ b/console/service/internal/controllers/cluster/delete_server.go @@ -0,0 +1,42 @@ +package cluster + +import ( + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/pkg/tracer" + "postgresql-cluster-console/restapi/operations/cluster" + + "github.com/go-openapi/runtime/middleware" + "github.com/rs/zerolog" +) + +type deleteServerHandler struct { + db storage.IStorage + log zerolog.Logger +} + +func NewDeleteServerHandler(db storage.IStorage, log zerolog.Logger) cluster.DeleteServersIDHandler { + return &deleteServerHandler{ + db: db, + log: log, + } +} + +func (h *deleteServerHandler) Handle(param cluster.DeleteServersIDParams) middleware.Responder { + cid := param.HTTPRequest.Context().Value(tracer.CtxCidKey{}).(string) + localLog := h.log.With().Str("cid", cid).Logger() + deletedServer, err := h.db.GetServer(param.HTTPRequest.Context(), param.ID) + if err != nil { + localLog.Warn().Err(err).Msg("failed to get server from db") + } + clusterID := int64(-1) + if deletedServer != nil { + clusterID = deletedServer.ClusterID + } + err = h.db.DeleteServer(param.HTTPRequest.Context(), param.ID) + if err != nil { + return cluster.NewDeleteServersIDBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return cluster.NewDeleteServersIDNoContent().WithXClusterID(clusterID) +} diff --git a/console/service/internal/controllers/cluster/get_cluster.go b/console/service/internal/controllers/cluster/get_cluster.go new file mode 100644 index 000000000..f6d842edd --- /dev/null +++ b/console/service/internal/controllers/cluster/get_cluster.go @@ -0,0 +1,58 @@ +package cluster + +import ( + "context" + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/convert" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + "postgresql-cluster-console/restapi/operations/cluster" + + "github.com/go-openapi/runtime/middleware" + "github.com/rs/zerolog" +) + +type getClusterHandler struct { + db storage.IStorage + log zerolog.Logger +} + +func NewGetClusterHandler(db storage.IStorage, log zerolog.Logger) cluster.GetClustersIDHandler { + return &getClusterHandler{ + db: db, + log: log, + } +} + +func (h *getClusterHandler) Handle(param cluster.GetClustersIDParams) middleware.Responder { + cl, err := h.db.GetCluster(param.HTTPRequest.Context(), param.ID) + if err != nil { + return cluster.NewGetClustersIDBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + resp, err := getClusterInfo(param.HTTPRequest.Context(), h.db, cl) + if err != nil { + return cluster.NewGetClustersIDBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return cluster.NewGetClustersIDOK().WithPayload(resp) +} + +func getClusterInfo(ctx context.Context, db storage.IStorage, cl *storage.Cluster) (*models.ClusterInfo, error) { + project, err := db.GetProject(ctx, cl.ProjectID) + if err != nil { + return nil, err + } + + environment, err := db.GetEnvironment(ctx, cl.EnvironmentID) + if err != nil { + return nil, err + } + + servers, err := db.GetClusterServers(ctx, cl.ID) + if err != nil { + return nil, err + } + + return convert.ClusterToSwagger(cl, servers, environment.Name, project.Name), nil +} diff --git a/console/service/internal/controllers/cluster/get_cluster_default_name.go b/console/service/internal/controllers/cluster/get_cluster_default_name.go new file mode 100644 index 000000000..1288d2f26 --- /dev/null +++ b/console/service/internal/controllers/cluster/get_cluster_default_name.go @@ -0,0 +1,34 @@ +package cluster + +import ( + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + "postgresql-cluster-console/restapi/operations/cluster" + + "github.com/go-openapi/runtime/middleware" + "github.com/rs/zerolog" +) + +type getClusterDefaultNameHandler struct { + db storage.IStorage + log zerolog.Logger +} + +func NewGetClusterDefaultNameHandler(db storage.IStorage, log zerolog.Logger) cluster.GetClustersDefaultNameHandler { + return &getClusterDefaultNameHandler{ + db: db, + log: log, + } +} + +func (h *getClusterDefaultNameHandler) Handle(param cluster.GetClustersDefaultNameParams) middleware.Responder { + name, err := h.db.GetDefaultClusterName(param.HTTPRequest.Context()) + if err != nil { + return cluster.NewGetClustersDefaultNameBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return cluster.NewGetClustersDefaultNameOK().WithPayload(&models.ResponseClusterDefaultName{ + Name: name, + }) +} diff --git a/console/service/internal/controllers/cluster/get_clusters.go b/console/service/internal/controllers/cluster/get_clusters.go new file mode 100644 index 000000000..88cc1968c --- /dev/null +++ b/console/service/internal/controllers/cluster/get_clusters.go @@ -0,0 +1,108 @@ +package cluster + +import ( + "context" + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/convert" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + "postgresql-cluster-console/pkg/tracer" + "postgresql-cluster-console/restapi/operations/cluster" + "time" + + "github.com/go-openapi/runtime/middleware" + "github.com/rs/zerolog" +) + +type getClustersHandler struct { + db storage.IStorage + log zerolog.Logger +} + +func NewGetClustersHandler(db storage.IStorage, log zerolog.Logger) cluster.GetClustersHandler { + return &getClustersHandler{ + db: db, + log: log, + } +} + +func (h *getClustersHandler) Handle(param cluster.GetClustersParams) middleware.Responder { + cid := param.HTTPRequest.Context().Value(tracer.CtxCidKey{}).(string) + localLog := h.log.With().Str("cid", cid).Logger() + + project, err := h.db.GetProject(param.HTTPRequest.Context(), param.ProjectID) + if err != nil { + return cluster.NewGetClustersBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + clusters, meta, err := h.db.GetClusters(param.HTTPRequest.Context(), &storage.GetClustersReq{ + ProjectID: param.ProjectID, + Name: param.Name, + SortBy: param.SortBy, + Status: param.Status, + Location: param.Location, + ServerCount: param.ServerCount, + PostgresVersion: param.PostgresVersion, + EnvironmentID: func() *int64 { + if param.Environment == nil { + return nil + } + environment, err := h.db.GetEnvironmentByName(param.HTTPRequest.Context(), *param.Environment) + if err != nil { + localLog.Error().Err(err).Msg("failed to get environment from db") + + return nil + } + + return &environment.ID + }(), + CreatedAtFrom: (*time.Time)(param.CreatedAtFrom), + CreatedAtTo: (*time.Time)(param.CreatedAtTo), + Limit: param.Limit, + Offset: param.Offset, + }) + if err != nil { + return cluster.NewGetClustersBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + clustersResp := models.ResponseClustersInfo{ + Data: make([]*models.ClusterInfo, 0, len(clusters)), + Meta: &models.MetaPagination{ + Count: &meta.Count, + Limit: &meta.Limit, + Offset: &meta.Offset, + }, + } + + cache := make(map[int64]string) + for _, cl := range clusters { + servers, err := h.db.GetClusterServers(param.HTTPRequest.Context(), cl.ID) + if err != nil { + return cluster.NewGetClustersBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + environmentCode, err := h.getEnvironmentCode(param.HTTPRequest.Context(), cl.EnvironmentID, cache) + if err != nil { + return cluster.NewGetClustersBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + clustersResp.Data = append(clustersResp.Data, convert.ClusterToSwagger(&cl, servers, environmentCode, project.Name)) + } + + return cluster.NewGetClustersOK().WithPayload(&clustersResp) +} + +func (h *getClustersHandler) getEnvironmentCode(ctx context.Context, environmentID int64, cache map[int64]string) (string, error) { + code, ok := cache[environmentID] + if ok { + return code, nil + } + + environment, err := h.db.GetEnvironment(ctx, environmentID) + if err != nil { + return "", err + } + + cache[environmentID] = environment.Name + + return environment.Name, nil +} diff --git a/console/service/internal/controllers/cluster/post_cluster.go b/console/service/internal/controllers/cluster/post_cluster.go new file mode 100644 index 000000000..b67ac6984 --- /dev/null +++ b/console/service/internal/controllers/cluster/post_cluster.go @@ -0,0 +1,249 @@ +package cluster + +import ( + "encoding/json" + "fmt" + "postgresql-cluster-console/internal/configuration" + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/internal/watcher" + "postgresql-cluster-console/internal/xdocker" + "postgresql-cluster-console/models" + "postgresql-cluster-console/pkg/tracer" + "postgresql-cluster-console/restapi/operations/cluster" + "strconv" + "strings" + + "github.com/go-openapi/runtime/middleware" + "github.com/rs/zerolog" + "github.com/segmentio/asm/base64" + "go.openly.dev/pointy" +) + +type postClusterHandler struct { + db storage.IStorage + dockerManager xdocker.IManager + logCollector watcher.LogCollector + log zerolog.Logger + cfg *configuration.Config +} + +func NewPostClusterHandler(db storage.IStorage, dockerManager xdocker.IManager, logCollector watcher.LogCollector, cfg *configuration.Config, log zerolog.Logger) cluster.PostClustersHandler { + return &postClusterHandler{ + db: db, + dockerManager: dockerManager, + logCollector: logCollector, + log: log, + cfg: cfg, + } +} + +func (h *postClusterHandler) Handle(param cluster.PostClustersParams) middleware.Responder { + cid := param.HTTPRequest.Context().Value(tracer.CtxCidKey{}).(string) + localLog := h.log.With().Str("cid", cid).Logger() + oldCluster, err := h.db.GetClusterByName(param.HTTPRequest.Context(), param.Body.Name) + if err != nil { + localLog.Warn().Err(err).Msg("can't get cluster by name") + } + if oldCluster != nil { + localLog.Trace().Any("old_cluster", oldCluster).Msg("cluster already exists") + + return cluster.NewPostClustersBadRequest().WithPayload(controllers.MakeErrorPayload(fmt.Errorf("cluster %s already exists", param.Body.Name), controllers.BaseError)) + } + + var ( + secretEnvs []string + secretID *int64 + paramLocation ParamLocation + ) + if param.Body.AuthInfo != nil { + secretEnvs, paramLocation, err = getSecretEnvs(param.HTTPRequest.Context(), h.log, h.db, param.Body.AuthInfo.SecretID, h.cfg.EncryptionKey) + if err != nil { + localLog.Error().Err(err).Msg("failed to get secret") + + return cluster.NewPostClustersBadRequest().WithPayload(controllers.MakeErrorPayload(fmt.Errorf("failed to get secret: %s", err.Error()), controllers.BaseError)) + } + secretID = ¶m.Body.AuthInfo.SecretID + localLog.Trace().Strs("secretEnvs", secretEnvs).Msg("got secret") + } else { + localLog.Debug().Msg("AuthInfo is nil, secret is expected in envs from web") + } + + ansibleLogEnv := h.getAnsibleLogEnv(param.Body.Name) + localLog.Trace().Strs("file_log", ansibleLogEnv).Msg("got file log name") + + if paramLocation == EnvParamLocation { + param.Body.Envs = append(param.Body.Envs, secretEnvs...) + } else if paramLocation == ExtraVarsParamLocation { + param.Body.ExtraVars = append(param.Body.ExtraVars, secretEnvs...) + } + param.Body.Envs = append(param.Body.Envs, ansibleLogEnv...) + param.Body.ExtraVars = append(param.Body.ExtraVars, "patroni_cluster_name="+param.Body.Name) + + h.addProxySettings(¶m, localLog) + + const ( + LocationExtraVar = "server_location" + CloudProviderExtraVar = "cloud_provider" + ServersExtraVar = "server_count" + PostgreSqlVersionExtraVar = "postgresql_version" + InventoryJsonEnv = "ANSIBLE_INVENTORY_JSON" + ) + + var ( + serverCount int + inventoryJsonVal []byte + ) + + if getValFromVars(param.Body.ExtraVars, CloudProviderExtraVar) == "" { + inventoryJsonVal = []byte(getValFromVars(param.Body.Envs, InventoryJsonEnv)) + var inventoryJson InventoryJson + err = json.Unmarshal(inventoryJsonVal, &inventoryJson) + if err != nil { + localLog.Debug().Err(err).Str("inventory_json_val", string(inventoryJsonVal)).Msg("failed to parse inventory json, try to base64 decode") + inventoryJsonVal, err = base64.StdEncoding.DecodeString(string(inventoryJsonVal)) + if err != nil { + localLog.Debug().Err(err).Msg("failed to base64 decode inventory json") + inventoryJsonVal = nil // to correct insert in db + } else { + err = json.Unmarshal(inventoryJsonVal, &inventoryJson) + if err != nil { + localLog.Debug().Err(err).Str("inventory_json_val", string(inventoryJsonVal)).Msg("failed to parse inventory json") + inventoryJsonVal = nil // to correct insert to db + } else { + serverCount = len(inventoryJson.All.Children.Master.Hosts) + len(inventoryJson.All.Children.Replica.Hosts) + } + } + } else { + serverCount = len(inventoryJson.All.Children.Master.Hosts) + len(inventoryJson.All.Children.Replica.Hosts) + } + } else { + serverCount = getIntValFromVars(param.Body.ExtraVars, ServersExtraVar) + } + + createdCluster, err := h.db.CreateCluster(param.HTTPRequest.Context(), &storage.CreateClusterReq{ + ProjectID: param.Body.ProjectID, + EnvironmentID: param.Body.EnvironmentID, + Name: param.Body.Name, + Description: param.Body.Description, + SecretID: secretID, + ExtraVars: param.Body.ExtraVars, + Location: getValFromVars(param.Body.ExtraVars, LocationExtraVar), + ServerCount: serverCount, + PostgreSqlVersion: getIntValFromVars(param.Body.ExtraVars, PostgreSqlVersionExtraVar), + Status: "deploying", + Inventory: inventoryJsonVal, + }) + if err != nil { + return cluster.NewPostClustersBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + localLog.Info().Any("cluster", createdCluster).Msg("cluster was created") + + defer func() { + if err != nil { + _, err = h.db.UpdateCluster(param.HTTPRequest.Context(), &storage.UpdateClusterReq{ + ID: createdCluster.ID, + Status: pointy.String(storage.ClusterStatusFailed), + }) + if err != nil { + localLog.Error().Err(err).Msg("failed to update cluster") + } + } + }() + + var dockerId xdocker.InstanceID + dockerId, err = h.dockerManager.ManageCluster(param.HTTPRequest.Context(), &xdocker.ManageClusterConfig{ + Envs: param.Body.Envs, + ExtraVars: param.Body.ExtraVars, + Mounts: []xdocker.Mount{ + { + DockerPath: ansibleLogDir, + HostPath: h.cfg.Docker.LogDir, + }, + }, + }) + if err != nil { + return cluster.NewPostClustersBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + localLog.Info().Str("docker_id", string(dockerId)).Msg("docker was started") + + var createdOperation *storage.Operation + createdOperation, err = h.db.CreateOperation(param.HTTPRequest.Context(), &storage.CreateOperationReq{ + ProjectID: param.Body.ProjectID, + ClusterID: createdCluster.ID, + DockerCode: string(dockerId), + Type: storage.OperationTypeDeploy, + Cid: cid, + }) + if err != nil { + return cluster.NewPostClustersBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + localLog.Info().Any("operation", createdOperation).Msg("operation was created") + h.logCollector.StoreInDb(createdOperation.ID, dockerId, cid) + + return cluster.NewPostClustersOK().WithPayload(&models.ResponseClusterCreate{ + ClusterID: createdCluster.ID, + OperationID: createdOperation.ID, + }) +} + +func (h *postClusterHandler) addProxySettings(param *cluster.PostClustersParams, localLog zerolog.Logger) { + const proxySettingName = "proxy_env" + proxySetting, err := h.db.GetSettingByName(param.HTTPRequest.Context(), proxySettingName) + if err != nil { + localLog.Warn().Err(err).Msg("failed to get proxy setting") + } + if proxySetting != nil { + proxySettingVal, err := json.Marshal(proxySetting.Value) + if err != nil { + localLog.Error().Any("proxy_env", proxySetting.Value).Err(err).Msg("failed to marshal proxy_env") + } else { + param.Body.ExtraVars = append(param.Body.ExtraVars, proxySettingName+"="+string(proxySettingVal)) + localLog.Info().Str("proxy_env", string(proxySettingVal)).Msg("proxy_env was added to --extra-vars") + } + } +} + +const ansibleLogDir = "/tmp/ansible" + +func (h *postClusterHandler) getAnsibleLogEnv(clusterName string) []string { + return []string{"ANSIBLE_JSON_LOG_FILE=" + ansibleLogDir + "/" + clusterName + ".json"} +} + +func getValFromVars(vars []string, key string) string { + for _, extraVar := range vars { + if strings.HasPrefix(strings.ToLower(extraVar), strings.ToLower(key)) { + keyVal := strings.Split(extraVar, "=") + if len(keyVal) != 2 { + return "" + } + + return keyVal[1] + } + } + + return "" +} + +func getIntValFromVars(vars []string, key string) int { + valStr := getValFromVars(vars, key) + valInt, err := strconv.Atoi(valStr) + if err != nil { + return 0 + } + + return valInt +} + +type InventoryJson struct { + All struct { + Children struct { + Master struct { + Hosts map[string]interface{} `json:"hosts"` + } `json:"master"` + Replica struct { + Hosts map[string]interface{} `json:"hosts"` + } `json:"replica"` + } `json:"children"` + } `json:"all"` +} diff --git a/console/service/internal/controllers/cluster/post_cluster_refresh.go b/console/service/internal/controllers/cluster/post_cluster_refresh.go new file mode 100644 index 000000000..445b70b07 --- /dev/null +++ b/console/service/internal/controllers/cluster/post_cluster_refresh.go @@ -0,0 +1,41 @@ +package cluster + +import ( + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/internal/watcher" + "postgresql-cluster-console/restapi/operations/cluster" + + "github.com/go-openapi/runtime/middleware" + "github.com/rs/zerolog" +) + +type postClusterRefreshHandler struct { + db storage.IStorage + log zerolog.Logger + clusterWatcher watcher.ClusterWatcher +} + +func NewPostClusterRefreshHandler(db storage.IStorage, log zerolog.Logger, clusterWatcher watcher.ClusterWatcher) cluster.PostClustersIDRefreshHandler { + return &postClusterRefreshHandler{ + db: db, + log: log, + clusterWatcher: clusterWatcher, + } +} + +func (h *postClusterRefreshHandler) Handle(param cluster.PostClustersIDRefreshParams) middleware.Responder { + cl, err := h.db.GetCluster(param.HTTPRequest.Context(), param.ID) + if err != nil { + return cluster.NewPostClustersIDRefreshBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + h.clusterWatcher.HandleCluster(param.HTTPRequest.Context(), cl) + + resp, err := getClusterInfo(param.HTTPRequest.Context(), h.db, cl) + if err != nil { + return cluster.NewPostClustersIDRefreshBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return cluster.NewPostClustersIDRefreshOK().WithPayload(resp) +} diff --git a/console/service/internal/controllers/cluster/remove_cluster.go b/console/service/internal/controllers/cluster/remove_cluster.go new file mode 100644 index 000000000..1453d1f80 --- /dev/null +++ b/console/service/internal/controllers/cluster/remove_cluster.go @@ -0,0 +1,88 @@ +package cluster + +import ( + "encoding/base64" + "encoding/json" + "postgresql-cluster-console/internal/configuration" + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/internal/watcher" + "postgresql-cluster-console/internal/xdocker" + "postgresql-cluster-console/pkg/tracer" + "postgresql-cluster-console/restapi/operations/cluster" + + "github.com/go-openapi/runtime/middleware" + "github.com/rs/zerolog" +) + +type removeClusterHandler struct { + db storage.IStorage + dockerManager xdocker.IManager + logCollector watcher.LogCollector + log zerolog.Logger + cfg *configuration.Config +} + +func NewRemoveClusterHandler(db storage.IStorage, dockerManager xdocker.IManager, logCollector watcher.LogCollector, cfg *configuration.Config, log zerolog.Logger) cluster.PostClustersIDRemoveHandler { + return &removeClusterHandler{ + db: db, + dockerManager: dockerManager, + logCollector: logCollector, + log: log, + cfg: cfg, + } +} + +func (h *removeClusterHandler) Handle(param cluster.PostClustersIDRemoveParams) middleware.Responder { + cid := param.HTTPRequest.Context().Value(tracer.CtxCidKey{}).(string) + localLog := h.log.With().Str("cid", cid).Logger() + clusterInfo, err := h.db.GetCluster(param.HTTPRequest.Context(), param.ID) + if err != nil { + return cluster.NewPostClustersIDRemoveBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + var extraVars []string + + err = json.Unmarshal(clusterInfo.ExtraVars, &extraVars) + if err != nil { + return cluster.NewPostClustersIDRemoveBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + extraVars = append(extraVars, "state=absent") + + var ( + envs []string + paramLocation ParamLocation + ) + if clusterInfo.SecretID != nil { + envs, paramLocation, err = getSecretEnvs(param.HTTPRequest.Context(), h.log, h.db, *clusterInfo.SecretID, h.cfg.EncryptionKey) + if err != nil { + return cluster.NewPostClustersIDRemoveBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + if paramLocation == ExtraVarsParamLocation { + extraVars = append(extraVars, envs...) + } + } + envs = append(envs, "patroni_cluster_name="+clusterInfo.Name) + if len(clusterInfo.Inventory) != 0 { + envs = append(envs, "ANSIBLE_INVENTORY_JSON="+base64.StdEncoding.EncodeToString(clusterInfo.Inventory)) + } + localLog.Trace().Strs("envs", envs).Msg("got envs") + + dockerId, err := h.dockerManager.ManageCluster(param.HTTPRequest.Context(), &xdocker.ManageClusterConfig{ + Envs: envs, + ExtraVars: extraVars, + }) + if err != nil { + return cluster.NewPostClustersIDRemoveBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + localLog.Trace().Str("docker_id", string(dockerId)).Msg("docker was started") + + err = h.db.DeleteCluster(param.HTTPRequest.Context(), clusterInfo.ID) + if err != nil { + return cluster.NewPostClustersIDRemoveBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + h.logCollector.PrintToConsole(dockerId, cid) + + return cluster.NewPostClustersIDRemoveNoContent() +} diff --git a/console/service/internal/controllers/cluster/utils.go b/console/service/internal/controllers/cluster/utils.go new file mode 100644 index 000000000..7dc32f46f --- /dev/null +++ b/console/service/internal/controllers/cluster/utils.go @@ -0,0 +1,99 @@ +package cluster + +import ( + "context" + "encoding/json" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + "postgresql-cluster-console/pkg/tracer" + + "github.com/rs/zerolog" +) + +type ParamLocation uint8 + +const ( + UnknownParamLocation ParamLocation = 0 + EnvParamLocation ParamLocation = 1 + ExtraVarsParamLocation ParamLocation = 2 +) + +func getSecretEnvs(ctx context.Context, log zerolog.Logger, db storage.IStorage, secretID int64, secretKey string) ([]string, ParamLocation, error) { + localLog := log.With().Str("cid", ctx.Value(tracer.CtxCidKey{}).(string)).Logger() + secretView, err := db.GetSecret(ctx, secretID) + if err != nil { + return nil, UnknownParamLocation, err + } + localLog.Trace().Any("secret_view", secretView).Msg("got secret view from db") + secretVal, err := db.GetSecretVal(ctx, secretID, secretKey) + if err != nil { + return nil, UnknownParamLocation, err + } + localLog.Trace().Msgf("secretVal %s", string(secretVal)) + + switch models.SecretType(secretView.Type) { + case models.SecretTypeAws: + var sec models.RequestSecretValueAws + err = json.Unmarshal(secretVal, &sec) + if err != nil { + return nil, UnknownParamLocation, err + } + + return []string{"AWS_ACCESS_KEY_ID=" + sec.AWSACCESSKEYID, "AWS_SECRET_ACCESS_KEY=" + sec.AWSSECRETACCESSKEY}, EnvParamLocation, nil + case models.SecretTypeGcp: + var sec models.RequestSecretValueGcp + err = json.Unmarshal(secretVal, &sec) + if err != nil { + return nil, UnknownParamLocation, err + } + + return []string{"GCP_SERVICE_ACCOUNT_CONTENTS=" + sec.GCPSERVICEACCOUNTCONTENTS}, EnvParamLocation, nil + case models.SecretTypeAzure: + var sec models.RequestSecretValueAzure + err = json.Unmarshal(secretVal, &sec) + if err != nil { + return nil, UnknownParamLocation, err + } + + return []string{ + "AZURE_SUBSCRIPTION_ID=" + sec.AZURESUBSCRIPTIONID, + "AZURE_CLIENT_ID=" + sec.AZURECLIENTID, + "AZURE_SECRET=" + sec.AZURESECRET, + "AZURE_TENANT=" + sec.AZURETENANT, + }, EnvParamLocation, nil + case models.SecretTypeDigitalocean: + var sec models.RequestSecretValueDigitalOcean + err = json.Unmarshal(secretVal, &sec) + if err != nil { + return nil, UnknownParamLocation, err + } + + return []string{"DO_API_TOKEN=" + sec.DOAPITOKEN}, EnvParamLocation, nil + case models.SecretTypeHetzner: + var sec models.RequestSecretValueHetzner + err = json.Unmarshal(secretVal, &sec) + if err != nil { + return nil, UnknownParamLocation, err + } + + return []string{"HCLOUD_API_TOKEN=" + sec.HCLOUDAPITOKEN}, EnvParamLocation, nil + case models.SecretTypeSSHKey: + var sec models.RequestSecretValueSSHKey + err = json.Unmarshal(secretVal, &sec) + if err != nil { + return nil, UnknownParamLocation, err + } + + return []string{"SSH_PRIVATE_KEY_CONTENT=" + sec.SSHPRIVATEKEY}, EnvParamLocation, nil + case models.SecretTypePassword: + var sec models.RequestSecretValuePassword + err = json.Unmarshal(secretVal, &sec) + if err != nil { + return nil, UnknownParamLocation, err + } + + return []string{"ansible_user=" + sec.USERNAME, "ansible_ssh_pass=" + sec.PASSWORD, "ansible_sudo_pass=" + sec.PASSWORD}, ExtraVarsParamLocation, nil + default: + return nil, UnknownParamLocation, nil + } +} diff --git a/console/service/internal/controllers/dictionary/get_database_extensions.go b/console/service/internal/controllers/dictionary/get_database_extensions.go new file mode 100644 index 000000000..92e7c42d0 --- /dev/null +++ b/console/service/internal/controllers/dictionary/get_database_extensions.go @@ -0,0 +1,42 @@ +package dictionary + +import ( + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/convert" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + "postgresql-cluster-console/restapi/operations/dictionary" + + "github.com/go-openapi/runtime/middleware" +) + +type getDbExtensionsHandler struct { + db storage.IStorage +} + +func NewGetDbExtensionsHandler(db storage.IStorage) dictionary.GetDatabaseExtensionsHandler { + return &getDbExtensionsHandler{ + db: db, + } +} + +func (h *getDbExtensionsHandler) Handle(param dictionary.GetDatabaseExtensionsParams) middleware.Responder { + extensions, meta, err := h.db.GetExtensions(param.HTTPRequest.Context(), &storage.GetExtensionsReq{ + Type: param.ExtensionType, + PostgresVersion: param.PostgresVersion, + Limit: param.Limit, + Offset: param.Offset, + }) + if err != nil { + return dictionary.NewGetDatabaseExtensionsBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return dictionary.NewGetDatabaseExtensionsOK().WithPayload(&models.ResponseDatabaseExtensions{ + Data: convert.DbExtensionsToSwagger(extensions), + Meta: &models.MetaPagination{ + Count: &meta.Count, + Limit: &meta.Limit, + Offset: &meta.Offset, + }, + }) +} diff --git a/console/service/internal/controllers/dictionary/get_external_deployments.go b/console/service/internal/controllers/dictionary/get_external_deployments.go new file mode 100644 index 000000000..e3a83e3dc --- /dev/null +++ b/console/service/internal/controllers/dictionary/get_external_deployments.go @@ -0,0 +1,47 @@ +package dictionary + +import ( + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/convert" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + "postgresql-cluster-console/restapi/operations/dictionary" + + "github.com/go-openapi/runtime/middleware" +) + +type getExternalDeploymentsHandler struct { + db storage.IStorage +} + +func NewGetExternalDeploymentsHandler(db storage.IStorage) dictionary.GetExternalDeploymentsHandler { + return &getExternalDeploymentsHandler{ + db: db, + } +} + +func (h *getExternalDeploymentsHandler) Handle(param dictionary.GetExternalDeploymentsParams) middleware.Responder { + cloudProviders, metaPagination, err := h.db.GetCloudProviders(param.HTTPRequest.Context(), param.Limit, param.Offset) + if err != nil { + return dictionary.NewGetExternalDeploymentsBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + resp := &models.ResponseDeploymentsInfo{ + Data: make([]*models.ResponseDeploymentInfo, 0, len(cloudProviders)), + Meta: &models.MetaPagination{ + Count: &metaPagination.Count, + Limit: &metaPagination.Limit, + Offset: &metaPagination.Offset, + }, + } + for _, cloudProvider := range cloudProviders { + cloudProviderInfo, err := h.db.GetCloudProviderInfo(param.HTTPRequest.Context(), cloudProvider.Code) + if err != nil { + return dictionary.NewGetDatabaseExtensionsBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + resp.Data = append(resp.Data, convert.ProviderInfoToSwagger(cloudProviderInfo, cloudProvider.Description, cloudProvider.ProviderImage)) + } + + return dictionary.NewGetExternalDeploymentsOK().WithPayload(resp) +} diff --git a/console/service/internal/controllers/dictionary/get_postgres_versions.go b/console/service/internal/controllers/dictionary/get_postgres_versions.go new file mode 100644 index 000000000..c8af12728 --- /dev/null +++ b/console/service/internal/controllers/dictionary/get_postgres_versions.go @@ -0,0 +1,32 @@ +package dictionary + +import ( + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/convert" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + "postgresql-cluster-console/restapi/operations/dictionary" + + "github.com/go-openapi/runtime/middleware" +) + +type getPostgresVersionsHandler struct { + db storage.IStorage +} + +func NewGetPostgresVersions(db storage.IStorage) dictionary.GetPostgresVersionsHandler { + return &getPostgresVersionsHandler{ + db: db, + } +} + +func (h *getPostgresVersionsHandler) Handle(param dictionary.GetPostgresVersionsParams) middleware.Responder { + postgresVersions, err := h.db.GetPostgresVersions(param.HTTPRequest.Context()) + if err != nil { + return dictionary.NewGetPostgresVersionsBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return dictionary.NewGetPostgresVersionsOK().WithPayload(&models.ResponsePostgresVersions{ + Data: convert.PostgresVersions(postgresVersions), + }) +} diff --git a/console/service/internal/controllers/environment/delete_environment.go b/console/service/internal/controllers/environment/delete_environment.go new file mode 100644 index 000000000..b21234701 --- /dev/null +++ b/console/service/internal/controllers/environment/delete_environment.go @@ -0,0 +1,41 @@ +package environment + +import ( + "errors" + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/pkg/tracer" + "postgresql-cluster-console/restapi/operations/environment" + + "github.com/go-openapi/runtime/middleware" + "github.com/rs/zerolog" +) + +type deleteEnvironmentsHandler struct { + db storage.IStorage + log zerolog.Logger +} + +func NewDeleteEnvironmentsHandler(db storage.IStorage, log zerolog.Logger) environment.DeleteEnvironmentsIDHandler { + return &deleteEnvironmentsHandler{ + db: db, + log: log, + } +} + +func (h *deleteEnvironmentsHandler) Handle(param environment.DeleteEnvironmentsIDParams) middleware.Responder { + cid := param.HTTPRequest.Context().Value(tracer.CtxCidKey{}).(string) + localLog := h.log.With().Str("cid", cid).Logger() + isUsed, err := h.db.CheckEnvironmentIsUsed(param.HTTPRequest.Context(), param.ID) + if err != nil { + localLog.Warn().Err(err).Msg("failed to check that environment is used") + } else if isUsed { + return environment.NewDeleteEnvironmentsIDBadRequest().WithPayload(controllers.MakeErrorPayload(errors.New("The environment is used"), controllers.BaseError)) + } + err = h.db.DeleteEnvironment(param.HTTPRequest.Context(), param.ID) + if err != nil { + return environment.NewDeleteEnvironmentsIDBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return environment.NewDeleteEnvironmentsIDNoContent() +} diff --git a/console/service/internal/controllers/environment/get_environments.go b/console/service/internal/controllers/environment/get_environments.go new file mode 100644 index 000000000..976cc76a2 --- /dev/null +++ b/console/service/internal/controllers/environment/get_environments.go @@ -0,0 +1,37 @@ +package environment + +import ( + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/convert" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + "postgresql-cluster-console/restapi/operations/environment" + + "github.com/go-openapi/runtime/middleware" +) + +type getEnvironmentsHandler struct { + db storage.IStorage +} + +func NewGetEnvironmentsHandler(db storage.IStorage) environment.GetEnvironmentsHandler { + return &getEnvironmentsHandler{ + db: db, + } +} + +func (h *getEnvironmentsHandler) Handle(param environment.GetEnvironmentsParams) middleware.Responder { + environments, meta, err := h.db.GetEnvironments(param.HTTPRequest.Context(), param.Limit, param.Offset) + if err != nil { + return environment.NewGetEnvironmentsBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return environment.NewGetEnvironmentsOK().WithPayload(&models.ResponseEnvironmentsList{ + Data: convert.EnvironmentsToSwagger(environments), + Meta: &models.MetaPagination{ + Count: &meta.Count, + Limit: &meta.Limit, + Offset: &meta.Offset, + }, + }) +} diff --git a/console/service/internal/controllers/environment/post_environment.go b/console/service/internal/controllers/environment/post_environment.go new file mode 100644 index 000000000..6fdef8eac --- /dev/null +++ b/console/service/internal/controllers/environment/post_environment.go @@ -0,0 +1,45 @@ +package environment + +import ( + "fmt" + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/convert" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/pkg/tracer" + "postgresql-cluster-console/restapi/operations/environment" + + "github.com/go-openapi/runtime/middleware" + "github.com/rs/zerolog" +) + +type postEnvironmentsHandler struct { + db storage.IStorage + log zerolog.Logger +} + +func NewPostEnvironmentsHandler(db storage.IStorage, log zerolog.Logger) environment.PostEnvironmentsHandler { + return &postEnvironmentsHandler{ + db: db, + log: log, + } +} + +func (h *postEnvironmentsHandler) Handle(param environment.PostEnvironmentsParams) middleware.Responder { + cid := param.HTTPRequest.Context().Value(tracer.CtxCidKey{}).(string) + localLog := h.log.With().Str("cid", cid).Logger() + checkEnv, err := h.db.GetEnvironmentByName(param.HTTPRequest.Context(), param.Body.Name) + if err != nil { + localLog.Warn().Err(err).Msg("failed to check environment name exists") + } else if checkEnv != nil { + return environment.NewPostEnvironmentsBadRequest().WithPayload(controllers.MakeErrorPayload(fmt.Errorf("The environment named %q already exists", param.Body.Name), controllers.BaseError)) + } + env, err := h.db.CreateEnvironment(param.HTTPRequest.Context(), &storage.AddEnvironmentReq{ + Name: param.Body.Name, + Description: param.Body.Description, + }) + if err != nil { + return environment.NewPostEnvironmentsBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return environment.NewPostEnvironmentsOK().WithPayload(convert.EnvironmentToSwagger(env)) +} diff --git a/console/service/internal/controllers/errors.go b/console/service/internal/controllers/errors.go new file mode 100644 index 000000000..916d930d1 --- /dev/null +++ b/console/service/internal/controllers/errors.go @@ -0,0 +1,5 @@ +package controllers + +const ( + BaseError = int64(100) +) diff --git a/console/service/internal/controllers/operation/get_operation_log.go b/console/service/internal/controllers/operation/get_operation_log.go new file mode 100644 index 000000000..c4449a153 --- /dev/null +++ b/console/service/internal/controllers/operation/get_operation_log.go @@ -0,0 +1,35 @@ +package operation + +import ( + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/restapi/operations/operation" + + "github.com/go-openapi/runtime/middleware" +) + +type getOperationLogHandler struct { + db storage.IStorage +} + +func NewGetOperationLogHandler(db storage.IStorage) operation.GetOperationsIDLogHandler { + return &getOperationLogHandler{ + db: db, + } +} + +func (h *getOperationLogHandler) Handle(param operation.GetOperationsIDLogParams) middleware.Responder { + op, err := h.db.GetOperation(param.HTTPRequest.Context(), param.ID) + if err != nil { + return operation.NewGetOperationsIDLogBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + var logMessage string + if op.Log != nil { + logMessage = *op.Log + } + + return operation.NewGetOperationsIDLogOK().WithPayload(logMessage).WithContentType("plain/text").WithXLogCompleted(func() bool { + return op.Status != storage.OperationStatusInProgress + }()) +} diff --git a/console/service/internal/controllers/operation/get_operations.go b/console/service/internal/controllers/operation/get_operations.go new file mode 100644 index 000000000..7fbb6f5cc --- /dev/null +++ b/console/service/internal/controllers/operation/get_operations.go @@ -0,0 +1,48 @@ +package operation + +import ( + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/convert" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + "postgresql-cluster-console/restapi/operations/operation" + "time" + + "github.com/go-openapi/runtime/middleware" +) + +type getOperationsHandler struct { + db storage.IStorage +} + +func NewGetOperationsHandler(db storage.IStorage) operation.GetOperationsHandler { + return &getOperationsHandler{ + db: db, + } +} + +func (h *getOperationsHandler) Handle(param operation.GetOperationsParams) middleware.Responder { + operations, meta, err := h.db.GetOperations(param.HTTPRequest.Context(), &storage.GetOperationsReq{ + ProjectID: param.ProjectID, + StartedFrom: time.Time(param.StartDate), + EndedTill: time.Time(param.EndDate), + ClusterName: param.ClusterName, + Type: param.Type, + Status: param.Status, + SortBy: param.SortBy, + Limit: param.Limit, + Offset: param.Offset, + }) + if err != nil { + return operation.NewGetOperationsBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return operation.NewGetOperationsOK().WithPayload(&models.ResponseOperationsList{ + Data: convert.OperationsViewToSwagger(operations), + Meta: &models.MetaPagination{ + Count: &meta.Count, + Limit: &meta.Limit, + Offset: &meta.Offset, + }, + }) +} diff --git a/console/service/internal/controllers/project/delete_project.go b/console/service/internal/controllers/project/delete_project.go new file mode 100644 index 000000000..17c8eaa92 --- /dev/null +++ b/console/service/internal/controllers/project/delete_project.go @@ -0,0 +1,62 @@ +package project + +import ( + "fmt" + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/pkg/tracer" + "postgresql-cluster-console/restapi/operations/project" + "strings" + + "github.com/go-openapi/runtime/middleware" + "github.com/rs/zerolog" +) + +type deleteProjectHandler struct { + db storage.IStorage + log zerolog.Logger +} + +func NewDeleteProjectHandler(db storage.IStorage, log zerolog.Logger) project.DeleteProjectsIDHandler { + return &deleteProjectHandler{ + db: db, + log: log, + } +} + +func (h *deleteProjectHandler) Handle(param project.DeleteProjectsIDParams) middleware.Responder { + cid := param.HTTPRequest.Context().Value(tracer.CtxCidKey{}).(string) + localLog := h.log.With().Str("cid", cid).Logger() + checkClusters, _, err := h.db.GetClusters(param.HTTPRequest.Context(), &storage.GetClustersReq{ + ProjectID: param.ID, + }) + if err != nil { + localLog.Warn().Err(err).Msg("failed to check that project is used") + } else if len(checkClusters) != 0 { + return project.NewDeleteProjectsIDBadRequest().WithPayload(controllers.MakeErrorPayload(fmt.Errorf("The project is used by %d cluster(s) (%s)", len(checkClusters), getClustersNameTitle(checkClusters)), controllers.BaseError)) + } + err = h.db.DeleteProject(param.HTTPRequest.Context(), param.ID) + if err != nil { + return project.NewDeleteProjectsIDBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return project.NewDeleteProjectsIDNoContent() +} + +func getClustersNameTitle(clusters []storage.Cluster) string { + const maxSize = 3 + title := strings.Builder{} + for i, cl := range clusters { + if i >= maxSize { + title.WriteString(",...") + + return title.String() + } + if i != 0 { + title.WriteString(",") + } + title.WriteString(cl.Name) + } + + return title.String() +} diff --git a/console/service/internal/controllers/project/get_projects.go b/console/service/internal/controllers/project/get_projects.go new file mode 100644 index 000000000..810e65c8b --- /dev/null +++ b/console/service/internal/controllers/project/get_projects.go @@ -0,0 +1,37 @@ +package project + +import ( + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/convert" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + "postgresql-cluster-console/restapi/operations/project" + + "github.com/go-openapi/runtime/middleware" +) + +type getProjectsHandler struct { + db storage.IStorage +} + +func NewGetProjectsHandler(db storage.IStorage) project.GetProjectsHandler { + return &getProjectsHandler{ + db: db, + } +} + +func (h *getProjectsHandler) Handle(param project.GetProjectsParams) middleware.Responder { + projects, meta, err := h.db.GetProjects(param.HTTPRequest.Context(), param.Limit, param.Offset) + if err != nil { + return project.NewGetProjectsBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return project.NewGetProjectsOK().WithPayload(&models.ResponseProjectsList{ + Data: convert.ProjectsToSwagger(projects), + Meta: &models.MetaPagination{ + Count: &meta.Count, + Limit: &meta.Limit, + Offset: &meta.Offset, + }, + }) +} diff --git a/console/service/internal/controllers/project/path_project.go b/console/service/internal/controllers/project/path_project.go new file mode 100644 index 000000000..4cc548241 --- /dev/null +++ b/console/service/internal/controllers/project/path_project.go @@ -0,0 +1,29 @@ +package project + +import ( + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/convert" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/restapi/operations/project" + + "github.com/go-openapi/runtime/middleware" +) + +type patchProjectHandler struct { + db storage.IStorage +} + +func NewPatchProjectHandler(db storage.IStorage) project.PatchProjectsIDHandler { + return &patchProjectHandler{ + db: db, + } +} + +func (h *patchProjectHandler) Handle(param project.PatchProjectsIDParams) middleware.Responder { + updatedProject, err := h.db.UpdateProject(param.HTTPRequest.Context(), param.ID, param.Body.Name, param.Body.Description) + if err != nil { + return project.NewPatchProjectsIDBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return project.NewPatchProjectsIDOK().WithPayload(convert.ProjectToSwagger(updatedProject)) +} diff --git a/console/service/internal/controllers/project/post_project.go b/console/service/internal/controllers/project/post_project.go new file mode 100644 index 000000000..042867b9d --- /dev/null +++ b/console/service/internal/controllers/project/post_project.go @@ -0,0 +1,43 @@ +package project + +import ( + "fmt" + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/convert" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/pkg/tracer" + "postgresql-cluster-console/restapi/operations/project" + + "github.com/go-openapi/runtime/middleware" + "github.com/rs/zerolog" +) + +type postProjectHandler struct { + db storage.IStorage + log zerolog.Logger +} + +func NewPostProjectHandler(db storage.IStorage, log zerolog.Logger) project.PostProjectsHandler { + return &postProjectHandler{ + db: db, + log: log, + } +} + +func (h *postProjectHandler) Handle(param project.PostProjectsParams) middleware.Responder { + cid := param.HTTPRequest.Context().Value(tracer.CtxCidKey{}).(string) + localLog := h.log.With().Str("cid", cid).Logger() + checkProject, err := h.db.GetProjectByName(param.HTTPRequest.Context(), param.Body.Name) + if err != nil { + localLog.Warn().Err(err).Msg("failed to check project name exists") + } else if checkProject != nil { + return project.NewPostProjectsBadRequest().WithPayload(controllers.MakeErrorPayload(fmt.Errorf("The project %q named already exists", param.Body.Name), controllers.BaseError)) + } + + createdProject, err := h.db.CreateProject(param.HTTPRequest.Context(), param.Body.Name, param.Body.Description) + if err != nil { + return project.NewPostProjectsBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return project.NewPostProjectsOK().WithPayload(convert.ProjectToSwagger(createdProject)) +} diff --git a/console/service/internal/controllers/secret/delete_secret.go b/console/service/internal/controllers/secret/delete_secret.go new file mode 100644 index 000000000..50bf5164a --- /dev/null +++ b/console/service/internal/controllers/secret/delete_secret.go @@ -0,0 +1,28 @@ +package secret + +import ( + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/restapi/operations/secret" + + "github.com/go-openapi/runtime/middleware" +) + +type deleteSecretHandler struct { + db storage.IStorage +} + +func NewDeleteSecretHandler(db storage.IStorage) secret.DeleteSecretsIDHandler { + return &deleteSecretHandler{ + db: db, + } +} + +func (h *deleteSecretHandler) Handle(param secret.DeleteSecretsIDParams) middleware.Responder { + err := h.db.DeleteSecret(param.HTTPRequest.Context(), param.ID) + if err != nil { + return secret.NewDeleteSecretsIDBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return secret.NewDeleteSecretsIDNoContent() +} diff --git a/console/service/internal/controllers/secret/get_secrets.go b/console/service/internal/controllers/secret/get_secrets.go new file mode 100644 index 000000000..5b2498e9c --- /dev/null +++ b/console/service/internal/controllers/secret/get_secrets.go @@ -0,0 +1,44 @@ +package secret + +import ( + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/convert" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + "postgresql-cluster-console/restapi/operations/secret" + + "github.com/go-openapi/runtime/middleware" +) + +type getSecretHandler struct { + db storage.IStorage +} + +func NewGetSecretHandler(db storage.IStorage) secret.GetSecretsHandler { + return &getSecretHandler{ + db: db, + } +} + +func (h *getSecretHandler) Handle(param secret.GetSecretsParams) middleware.Responder { + secrets, meta, err := h.db.GetSecrets(param.HTTPRequest.Context(), &storage.GetSecretsReq{ + ProjectID: param.ProjectID, + Name: param.Name, + Type: param.Type, + SortBy: param.SortBy, + Limit: param.Limit, + Offset: param.Offset, + }) + if err != nil { + return secret.NewGetSecretsBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return secret.NewGetSecretsOK().WithPayload(&models.ResponseSecretInfoList{ + Data: convert.SecretsViewToSwagger(secrets), + Meta: &models.MetaPagination{ + Count: &meta.Count, + Limit: &meta.Limit, + Offset: &meta.Offset, + }, + }) +} diff --git a/console/service/internal/controllers/secret/post_secret.go b/console/service/internal/controllers/secret/post_secret.go new file mode 100644 index 000000000..efeb2da95 --- /dev/null +++ b/console/service/internal/controllers/secret/post_secret.go @@ -0,0 +1,77 @@ +package secret + +import ( + "encoding/json" + "fmt" + "postgresql-cluster-console/internal/configuration" + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/convert" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + "postgresql-cluster-console/pkg/tracer" + "postgresql-cluster-console/restapi/operations/secret" + + "github.com/go-openapi/runtime/middleware" + "github.com/rs/zerolog" +) + +type postSecretHandler struct { + db storage.IStorage + log zerolog.Logger + cfg *configuration.Config +} + +func NewPostSecretHandler(db storage.IStorage, log zerolog.Logger, cfg *configuration.Config) secret.PostSecretsHandler { + return &postSecretHandler{ + db: db, + log: log, + cfg: cfg, + } +} + +func (h *postSecretHandler) Handle(param secret.PostSecretsParams) middleware.Responder { + cid := param.HTTPRequest.Context().Value(tracer.CtxCidKey{}).(string) + localLog := h.log.With().Str("cid", cid).Logger() + checkSecret, err := h.db.GetSecretByName(param.HTTPRequest.Context(), param.Body.Name) + if err != nil { + localLog.Warn().Err(err).Msg("failed to check secret name exists") + } else if checkSecret != nil { + return secret.NewPostSecretsBadRequest().WithPayload(controllers.MakeErrorPayload(fmt.Errorf("The secret named %q already exists", param.Body.Name), controllers.BaseError)) + } + + var ( + value []byte + ) + switch param.Body.Type { + case models.SecretTypeAws: + value, err = json.Marshal(param.Body.Value.Aws) + case models.SecretTypeGcp: + value, err = json.Marshal(param.Body.Value.Gcp) + case models.SecretTypeHetzner: + value, err = json.Marshal(param.Body.Value.Hetzner) + case models.SecretTypeSSHKey: + value, err = json.Marshal(param.Body.Value.SSHKey) + case models.SecretTypeDigitalocean: + value, err = json.Marshal(param.Body.Value.Digitalocean) + case models.SecretTypeAzure: + value, err = json.Marshal(param.Body.Value.Azure) + case models.SecretTypePassword: + value, err = json.Marshal(param.Body.Value.Password) + } + if err != nil { + return secret.NewPostSecretsBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + createdSecret, err := h.db.CreateSecret(param.HTTPRequest.Context(), &storage.AddSecretReq{ + ProjectID: param.Body.ProjectID, + Type: string(param.Body.Type), + Name: param.Body.Name, + Value: value, + SecretKey: h.cfg.EncryptionKey, + }) + if err != nil { + return secret.NewPostSecretsBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return secret.NewPostSecretsOK().WithPayload(convert.SecretViewToSwagger(createdSecret)) +} diff --git a/console/service/internal/controllers/setting/get_settings.go b/console/service/internal/controllers/setting/get_settings.go new file mode 100644 index 000000000..3a8fa13bf --- /dev/null +++ b/console/service/internal/controllers/setting/get_settings.go @@ -0,0 +1,41 @@ +package setting + +import ( + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/convert" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + "postgresql-cluster-console/restapi/operations/setting" + + "github.com/go-openapi/runtime/middleware" +) + +type getSettingsHandler struct { + db storage.IStorage +} + +func NewGetSettingsHandler(db storage.IStorage) setting.GetSettingsHandler { + return &getSettingsHandler{ + db: db, + } +} + +func (h *getSettingsHandler) Handle(param setting.GetSettingsParams) middleware.Responder { + settings, meta, err := h.db.GetSettings(param.HTTPRequest.Context(), &storage.GetSettingsReq{ + Name: param.Name, + Limit: param.Limit, + Offset: param.Offset, + }) + if err != nil { + return setting.NewGetSettingsBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return setting.NewGetSettingsOK().WithPayload(&models.ResponseSettings{ + Data: convert.SettingsToSwagger(settings), + Mete: &models.MetaPagination{ + Count: &meta.Count, + Limit: &meta.Limit, + Offset: &meta.Offset, + }, + }) +} diff --git a/console/service/internal/controllers/setting/patch_setting.go b/console/service/internal/controllers/setting/patch_setting.go new file mode 100644 index 000000000..d14c7c109 --- /dev/null +++ b/console/service/internal/controllers/setting/patch_setting.go @@ -0,0 +1,29 @@ +package setting + +import ( + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/convert" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/restapi/operations/setting" + + "github.com/go-openapi/runtime/middleware" +) + +type patchSettingHandler struct { + db storage.IStorage +} + +func NewPatchSettingHandler(db storage.IStorage) setting.PatchSettingsNameHandler { + return &patchSettingHandler{ + db: db, + } +} + +func (h *patchSettingHandler) Handle(param setting.PatchSettingsNameParams) middleware.Responder { + s, err := h.db.UpdateSetting(param.HTTPRequest.Context(), param.Name, param.Body.Value) + if err != nil { + return setting.NewPatchSettingsNameBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return setting.NewPatchSettingsNameOK().WithPayload(convert.SettingToSwagger(s)) +} diff --git a/console/service/internal/controllers/setting/post_setting.go b/console/service/internal/controllers/setting/post_setting.go new file mode 100644 index 000000000..cc7b69017 --- /dev/null +++ b/console/service/internal/controllers/setting/post_setting.go @@ -0,0 +1,29 @@ +package setting + +import ( + "postgresql-cluster-console/internal/controllers" + "postgresql-cluster-console/internal/convert" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/restapi/operations/setting" + + "github.com/go-openapi/runtime/middleware" +) + +type postSettingHandler struct { + db storage.IStorage +} + +func NewPostSettingHandler(db storage.IStorage) setting.PostSettingsHandler { + return &postSettingHandler{ + db: db, + } +} + +func (h *postSettingHandler) Handle(param setting.PostSettingsParams) middleware.Responder { + s, err := h.db.CreateSetting(param.HTTPRequest.Context(), param.Body.Name, param.Body.Value) + if err != nil { + return setting.NewPostSettingsBadRequest().WithPayload(controllers.MakeErrorPayload(err, controllers.BaseError)) + } + + return setting.NewPostSettingsOK().WithPayload(convert.SettingToSwagger(s)) +} diff --git a/console/service/internal/controllers/utils.go b/console/service/internal/controllers/utils.go new file mode 100644 index 000000000..d47960e72 --- /dev/null +++ b/console/service/internal/controllers/utils.go @@ -0,0 +1,11 @@ +package controllers + +import "postgresql-cluster-console/models" + +func MakeErrorPayload(err error, code int64) *models.ResponseError { + return &models.ResponseError{ + Code: code, + Description: err.Error(), + Title: err.Error(), + } +} diff --git a/console/service/internal/convert/clusters.go b/console/service/internal/convert/clusters.go new file mode 100644 index 000000000..93024aa1c --- /dev/null +++ b/console/service/internal/convert/clusters.go @@ -0,0 +1,46 @@ +package convert + +import ( + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + + "github.com/go-openapi/strfmt" +) + +func ClusterToSwagger(cl *storage.Cluster, servers []storage.Server, environmentCode, projectCode string) *models.ClusterInfo { + clusterInfo := &models.ClusterInfo{ + ConnectionInfo: cl.ConnectionInfo, + CreationTime: strfmt.DateTime(cl.CreatedAt), + ClusterLocation: func() string { + if cl.Location != nil { + return *cl.Location + } + + return "" + }(), + Environment: environmentCode, + ID: cl.ID, + Servers: make([]*models.ClusterInfoInstance, 0, len(servers)), + Name: cl.Name, + Description: cl.Description, + PostgresVersion: cl.PostgreVersion, + ProjectName: projectCode, + Status: cl.Status, + } + + for _, server := range servers { + clusterInfo.Servers = append(clusterInfo.Servers, &models.ClusterInfoInstance{ + ID: server.ID, + IP: server.IpAddress.String(), + Lag: server.Lag, + Name: server.Name, + PendingRestart: server.PendingRestart, + Role: server.Role, + Status: server.Status, + Tags: server.Tags, + Timeline: server.Timeline, + }) + } + + return clusterInfo +} diff --git a/console/service/internal/convert/database_extensions.go b/console/service/internal/convert/database_extensions.go new file mode 100644 index 000000000..8d82928a6 --- /dev/null +++ b/console/service/internal/convert/database_extensions.go @@ -0,0 +1,27 @@ +package convert + +import ( + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" +) + +func DbExtensionToSwagger(ext *storage.Extension) *models.ResponseDatabaseExtension { + return &models.ResponseDatabaseExtension{ + Contrib: ext.Contrib, + Description: ext.Description, + Image: ext.Image, + Name: ext.Name, + PostgresMaxVersion: ext.PostgresMaxVersion, + PostgresMinVersion: ext.PostgresMinVersion, + URL: ext.Url, + } +} + +func DbExtensionsToSwagger(exts []storage.Extension) []*models.ResponseDatabaseExtension { + resp := make([]*models.ResponseDatabaseExtension, 0, len(exts)) + for _, ext := range exts { + resp = append(resp, DbExtensionToSwagger(&ext)) + } + + return resp +} diff --git a/console/service/internal/convert/environments.go b/console/service/internal/convert/environments.go new file mode 100644 index 000000000..d7e053636 --- /dev/null +++ b/console/service/internal/convert/environments.go @@ -0,0 +1,34 @@ +package convert + +import ( + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + + "github.com/go-openapi/strfmt" +) + +func EnvironmentToSwagger(env *storage.Environment) *models.ResponseEnvironment { + return &models.ResponseEnvironment{ + CreatedAt: strfmt.DateTime(env.CreatedAt), + Description: env.Description, + ID: env.ID, + Name: env.Name, + UpdatedAt: func() *strfmt.DateTime { + if env.UpdatedAt == nil { + return nil + } + updated := strfmt.DateTime(*env.UpdatedAt) + + return &updated + }(), + } +} + +func EnvironmentsToSwagger(envs []storage.Environment) []*models.ResponseEnvironment { + resp := make([]*models.ResponseEnvironment, 0, len(envs)) + for _, env := range envs { + resp = append(resp, EnvironmentToSwagger(&env)) + } + + return resp +} diff --git a/console/service/internal/convert/external_deployments.go b/console/service/internal/convert/external_deployments.go new file mode 100644 index 000000000..ac5819929 --- /dev/null +++ b/console/service/internal/convert/external_deployments.go @@ -0,0 +1,116 @@ +package convert + +import ( + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + "sort" + + "github.com/go-openapi/strfmt" + "go.openly.dev/pointy" +) + +func ProviderInfoToSwagger(providerInfo *storage.CloudProviderInfo, description, image string) *models.ResponseDeploymentInfo { + resp := &models.ResponseDeploymentInfo{ + AvatarURL: image, + CloudRegions: nil, + Code: providerInfo.Code, + Description: description, + Volumes: nil, + InstanceTypes: &models.ResponseDeploymentInfoInstanceTypes{}, + } + + cloudRegions := make(map[string][]*models.DeploymentInfoCloudRegionDatacentersItems0) + for _, cloudRegion := range providerInfo.CloudRegions { + datacenterRegion := &models.DeploymentInfoCloudRegionDatacentersItems0{ + Code: cloudRegion.RegionName, + Location: cloudRegion.Description, + } + cloudImage := findCloudImage(providerInfo.CloudImages, cloudRegion.RegionName) + if cloudImage == nil { + cloudImage = findCloudImage(providerInfo.CloudImages, "all") + } + if cloudImage != nil { + datacenterRegion.CloudImage = &models.DeploymentCloudImage{ + Arch: cloudImage.Arch, + Image: cloudImage.Image, + OsName: cloudImage.OsName, + OsVersion: cloudImage.OsVersion, + UpdatedAt: strfmt.DateTime(cloudImage.UpdatedAt), + } + } + cloudRegions[cloudRegion.RegionGroup] = append(cloudRegions[cloudRegion.RegionGroup], datacenterRegion) + } + + mapKeys := make([]string, 0, len(cloudRegions)) + for k, _ := range cloudRegions { + mapKeys = append(mapKeys, k) + } + sort.Strings(mapKeys) + + for _, k := range mapKeys { + resp.CloudRegions = append(resp.CloudRegions, &models.DeploymentInfoCloudRegion{ + Code: k, + Datacenters: cloudRegions[k], + Name: k, + }) + } + + for _, instance := range providerInfo.CloudInstances { + switch instance.InstanceGroup { + case storage.InstanceTypeSmall: + resp.InstanceTypes.Small = append(resp.InstanceTypes.Small, &models.DeploymentInstanceType{ + Code: instance.InstanceName, + CPU: instance.Cpu, + SharedCPU: instance.SharedCpu, + PriceHourly: instance.PriceHourly, + PriceMonthly: instance.PriceMonthly, + Currency: instance.Currency, + RAM: instance.Ram, + }) + case storage.InstanceTypeMedium: + resp.InstanceTypes.Medium = append(resp.InstanceTypes.Medium, &models.DeploymentInstanceType{ + Code: instance.InstanceName, + CPU: instance.Cpu, + SharedCPU: instance.SharedCpu, + PriceHourly: instance.PriceHourly, + PriceMonthly: instance.PriceMonthly, + Currency: instance.Currency, + RAM: instance.Ram, + }) + case storage.InstanceTypeLarge: + resp.InstanceTypes.Large = append(resp.InstanceTypes.Large, &models.DeploymentInstanceType{ + Code: instance.InstanceName, + CPU: instance.Cpu, + SharedCPU: instance.SharedCpu, + PriceHourly: instance.PriceHourly, + PriceMonthly: instance.PriceMonthly, + Currency: instance.Currency, + RAM: instance.Ram, + }) + } + } + + for _, cloudVolume := range providerInfo.CloudVolumes { + resp.Volumes = append(resp.Volumes, &models.ResponseDeploymentInfoVolumesItems0{ + Currency: cloudVolume.Currency, + MaxSize: cloudVolume.VolumeMaxSize, + MinSize: cloudVolume.VolumeMinSize, + PriceMonthly: cloudVolume.PriceMonthly, + VolumeDescription: cloudVolume.VolumeDescription, + VolumeType: cloudVolume.VolumeType, + IsDefault: pointy.Bool(cloudVolume.IsDefault), + }) + } + + return resp +} + +func findCloudImage(images []storage.CloudImage, regionName string) *storage.CloudImage { + for i, image := range images { + if image.Region == regionName { + return &images[i] + } + } + + return nil +} diff --git a/console/service/internal/convert/operations.go b/console/service/internal/convert/operations.go new file mode 100644 index 000000000..76f00bbed --- /dev/null +++ b/console/service/internal/convert/operations.go @@ -0,0 +1,36 @@ +package convert + +import ( + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + + "github.com/go-openapi/strfmt" +) + +func OperationViewToSwagger(op *storage.OperationView) *models.ResponseOperation { + return &models.ResponseOperation{ + ClusterName: op.Cluster, + Environment: op.Environment, + Finished: func() *strfmt.DateTime { + if op.Finished == nil { + return nil + } + finished := strfmt.DateTime(*op.Finished) + + return &finished + }(), + ID: op.ID, + Started: strfmt.DateTime(op.Started), + Status: op.Status, + Type: op.Type, + } +} + +func OperationsViewToSwagger(ops []storage.OperationView) []*models.ResponseOperation { + resp := make([]*models.ResponseOperation, 0, len(ops)) + for _, op := range ops { + resp = append(resp, OperationViewToSwagger(&op)) + } + + return resp +} diff --git a/console/service/internal/convert/postgres_versions.go b/console/service/internal/convert/postgres_versions.go new file mode 100644 index 000000000..afe996613 --- /dev/null +++ b/console/service/internal/convert/postgres_versions.go @@ -0,0 +1,25 @@ +package convert + +import ( + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + + "github.com/go-openapi/strfmt" +) + +func PostgresVersion(pv *storage.PostgresVersion) *models.ResponsePostgresVersion { + return &models.ResponsePostgresVersion{ + EndOfLife: strfmt.Date(pv.EndOfLife), + MajorVersion: pv.MajorVersion, + ReleaseDate: strfmt.Date(pv.ReleaseDate), + } +} + +func PostgresVersions(pvs []storage.PostgresVersion) []*models.ResponsePostgresVersion { + resp := make([]*models.ResponsePostgresVersion, 0, len(pvs)) + for _, pv := range pvs { + resp = append(resp, PostgresVersion(&pv)) + } + + return resp +} diff --git a/console/service/internal/convert/projects.go b/console/service/internal/convert/projects.go new file mode 100644 index 000000000..43812e99f --- /dev/null +++ b/console/service/internal/convert/projects.go @@ -0,0 +1,34 @@ +package convert + +import ( + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + + "github.com/go-openapi/strfmt" +) + +func ProjectToSwagger(prj *storage.Project) *models.ResponseProject { + return &models.ResponseProject{ + CreatedAt: strfmt.DateTime(prj.CreatedAt), + Description: prj.Description, + ID: prj.ID, + Name: prj.Name, + UpdatedAt: func() *strfmt.DateTime { + if prj.UpdatedAt == nil { + return nil + } + updated := strfmt.DateTime(*prj.UpdatedAt) + + return &updated + }(), + } +} + +func ProjectsToSwagger(projects []storage.Project) []*models.ResponseProject { + resp := make([]*models.ResponseProject, 0, len(projects)) + for _, prj := range projects { + resp = append(resp, ProjectToSwagger(&prj)) + } + + return resp +} diff --git a/console/service/internal/convert/secret.go b/console/service/internal/convert/secret.go new file mode 100644 index 000000000..059fda7b8 --- /dev/null +++ b/console/service/internal/convert/secret.go @@ -0,0 +1,37 @@ +package convert + +import ( + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + + "github.com/go-openapi/strfmt" +) + +func SecretViewToSwagger(secret *storage.SecretView) *models.ResponseSecretInfo { + return &models.ResponseSecretInfo{ + CreatedAt: strfmt.DateTime(secret.CreatedAt), + ID: secret.ID, + IsUsed: secret.IsUsed, + Name: secret.Name, + ProjectID: secret.ProjectID, + Type: models.SecretType(secret.Type), + UpdatedAt: func() *strfmt.DateTime { + if secret.UpdatedAt == nil { + return nil + } + updated := strfmt.DateTime(*secret.UpdatedAt) + + return &updated + }(), + UsedByClusters: secret.UsedByClusters, + } +} + +func SecretsViewToSwagger(secrets []storage.SecretView) []*models.ResponseSecretInfo { + resp := make([]*models.ResponseSecretInfo, 0, len(secrets)) + for _, sec := range secrets { + resp = append(resp, SecretViewToSwagger(&sec)) + } + + return resp +} diff --git a/console/service/internal/convert/settings.go b/console/service/internal/convert/settings.go new file mode 100644 index 000000000..5e62fe0e4 --- /dev/null +++ b/console/service/internal/convert/settings.go @@ -0,0 +1,34 @@ +package convert + +import ( + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/models" + + "github.com/go-openapi/strfmt" +) + +func SettingToSwagger(s *storage.Setting) *models.ResponseSetting { + return &models.ResponseSetting{ + CreatedAt: strfmt.DateTime(s.CreatedAt), + ID: s.ID, + Name: s.Name, + UpdatedAt: func() *strfmt.DateTime { + if s.UpdatedAt == nil { + return nil + } + updated := strfmt.DateTime(*s.UpdatedAt) + + return &updated + }(), + Value: s.Value, + } +} + +func SettingsToSwagger(settings []storage.Setting) []*models.ResponseSetting { + resp := make([]*models.ResponseSetting, 0, len(settings)) + for _, s := range settings { + resp = append(resp, SettingToSwagger(&s)) + } + + return resp +} diff --git a/console/service/internal/db/db.go b/console/service/internal/db/db.go new file mode 100644 index 000000000..30dfed93b --- /dev/null +++ b/console/service/internal/db/db.go @@ -0,0 +1,31 @@ +package db + +import ( + "context" + "fmt" + "postgresql-cluster-console/internal/configuration" + "time" + + "github.com/jackc/pgx/v5/pgxpool" +) + +func NewDbPool(cfg *configuration.Config) (*pgxpool.Pool, error) { + connString := fmt.Sprintf("postgres://%s:%s@%s:%d/%s", + cfg.Db.User, cfg.Db.Password, cfg.Db.Host, cfg.Db.Port, cfg.Db.DbName) + poolConfig, err := pgxpool.ParseConfig(connString) + if err != nil { + return nil, err + } + //poolConfig.ConnConfig.PreferSimpleProtocol = true //(don't need simple protocol https://github.com/jackc/pgx/issues/650) + poolConfig.ConnConfig.Tracer = NewTracerZerolog() + poolConfig.MaxConns = cfg.Db.MaxConns + poolConfig.HealthCheckPeriod = time.Minute * 10 + if cfg.Db.MaxConnLifeTime != 0 { + poolConfig.MaxConnLifetime = cfg.Db.MaxConnLifeTime + } + if cfg.Db.MaxConnIdleTime != 0 { + poolConfig.MaxConnIdleTime = cfg.Db.MaxConnIdleTime + } + + return pgxpool.NewWithConfig(context.Background(), poolConfig) +} diff --git a/console/service/internal/db/tracer.go b/console/service/internal/db/tracer.go new file mode 100644 index 000000000..8389942ce --- /dev/null +++ b/console/service/internal/db/tracer.go @@ -0,0 +1,124 @@ +package db + +import ( + "context" + "encoding/hex" + "fmt" + "postgresql-cluster-console/pkg/tracer" + "strings" + "time" + + "github.com/gdex-lab/go-render/render" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +type ( + traceCtxKey struct{} + traceCtxValue struct { + startTime time.Time + queryId string + } + tracerZerolog struct{} +) + +func NewTracerZerolog() pgx.QueryTracer { + return tracerZerolog{} +} + +func (t tracerZerolog) TraceQueryStart( + ctx context.Context, + conn *pgx.Conn, + data pgx.TraceQueryStartData, +) context.Context { + now := time.Now() + queryId := uuid.New().String() + localLog := t.makeTraceLogger(ctx, queryId) + + localLog.Debug().Str("sql", strings.Map(func(r rune) rune { + switch r { + case 0x000A, 0x0009, 0x000B, 0x000C, 0x000D, 0x0085, 0x2028, 0x2029: + return -1 + default: + return r + } + }, data.SQL)).Str("args", logQueryArgs(data.Args)).Msg("TraceQueryStart") + + return context.WithValue(ctx, traceCtxKey{}, &traceCtxValue{startTime: now, queryId: queryId}) +} + +func (t tracerZerolog) TraceQueryEnd( + ctx context.Context, + conn *pgx.Conn, + data pgx.TraceQueryEndData, +) { + traceValues, ok := ctx.Value(traceCtxKey{}).(*traceCtxValue) + if !ok { + return + } + + localLog := t.makeTraceLogger(ctx, traceValues.queryId) + msg := fmt.Sprintf("TraceQueryEnd duration: %s", time.Since(traceValues.startTime)) + if data.Err != nil { + localLog.Error().Err(data.Err).Msg(msg) + } else { + localLog.Debug().Msg(msg) + } +} + +func (t tracerZerolog) makeTraceLogger(ctx context.Context, queryId string) zerolog.Logger { + cid := getCid(ctx) + logCtx := log.With().Str("query_id", queryId) + if len(cid) != 0 { + logCtx = logCtx.Str("cid", cid) + } + + return logCtx.Logger() +} + +func getCid(ctx context.Context) string { + cid, ok := ctx.Value(tracer.CtxCidKey{}).(string) + if !ok { + return uuid.New().String() + } + + return cid +} + +func logQueryArgs(args []any) string { + //logArgs := make([]string, 0, len(args)) + + paramsStr := strings.Builder{} + paramsStr.WriteString("(") + + for i, a := range args { + switch v := a.(type) { + case []byte: + if len(v) < 64 { + a = hex.EncodeToString(v) + } else { + a = fmt.Sprintf("%x (truncated %d bytes)", v[:64], len(v)-64) + } + case string: + if len(v) > 64 { + a = fmt.Sprintf("%s (truncated %d bytes)", v[:64], len(v)-64) + } + } + if i != len(args)-1 { + paramsStr.WriteString(",") + } + + if stringer, ok := a.(fmt.Stringer); ok { + paramsStr.WriteString(stringer.String()) + } else { + paramsStr.WriteString(render.Render(a)) + } + } + + paramsStr.WriteString(")") + + return paramsStr.String() +} diff --git a/console/service/internal/service/service.go b/console/service/internal/service/service.go new file mode 100644 index 000000000..51507420e --- /dev/null +++ b/console/service/internal/service/service.go @@ -0,0 +1,128 @@ +package service + +import ( + "postgresql-cluster-console/internal/configuration" + "postgresql-cluster-console/internal/controllers/cluster" + "postgresql-cluster-console/internal/controllers/dictionary" + "postgresql-cluster-console/internal/controllers/environment" + "postgresql-cluster-console/internal/controllers/operation" + "postgresql-cluster-console/internal/controllers/project" + "postgresql-cluster-console/internal/controllers/secret" + "postgresql-cluster-console/internal/controllers/setting" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/internal/watcher" + "postgresql-cluster-console/internal/xdocker" + "postgresql-cluster-console/models" + "postgresql-cluster-console/restapi" + "postgresql-cluster-console/restapi/operations" + "postgresql-cluster-console/restapi/operations/system" + + "github.com/go-openapi/runtime/middleware" + + "github.com/go-openapi/loads" + "github.com/jessevdk/go-flags" + "github.com/rs/zerolog/log" +) + +type IService interface { + Serve() error +} + +type httpService struct { + srv *restapi.Server +} + +func NewService( + cfg *configuration.Config, + version string, + db storage.IStorage, + dockerManager xdocker.IManager, + logCollector watcher.LogCollector, + clusterWatcher watcher.ClusterWatcher, +) (IService, error) { + swaggerSpec, err := loads.Analyzed(restapi.SwaggerJSON, "2.0") + if err != nil { + return nil, err + } + api := operations.NewPgConsoleAPI(swaggerSpec) + srv := restapi.NewServer(api) + + srv.Host = cfg.Http.Host + srv.Port = cfg.Http.Port + srv.ReadTimeout = cfg.Http.ReadTimeout + srv.WriteTimeout = cfg.Http.WriteTimeout + restapi.Token = cfg.Authorization.Token + + localLog := log.With().Str("module", "http_server").Logger() + api.Logger = func(s string, i ...interface{}) { + localLog.Debug().Msgf(s, i...) + } + + if cfg.Https.IsUsed { + srv.EnabledListeners = append(srv.EnabledListeners, "https") + srv.TLSHost = cfg.Https.Host + srv.TLSPort = cfg.Https.Port + srv.TLSReadTimeout = cfg.Http.ReadTimeout + srv.TLSWriteTimeout = cfg.Http.WriteTimeout + srv.TLSCACertificate = flags.Filename(cfg.Https.CACert) + srv.TLSCertificate = flags.Filename(cfg.Https.ServerCert) + srv.TLSCertificateKey = flags.Filename(cfg.Https.ServerKey) + } + + api.DictionaryGetExternalDeploymentsHandler = dictionary.NewGetExternalDeploymentsHandler(db) + api.DictionaryGetDatabaseExtensionsHandler = dictionary.NewGetDbExtensionsHandler(db) + api.DictionaryGetPostgresVersionsHandler = dictionary.NewGetPostgresVersions(db) + + // environment + api.EnvironmentGetEnvironmentsHandler = environment.NewGetEnvironmentsHandler(db) + api.EnvironmentPostEnvironmentsHandler = environment.NewPostEnvironmentsHandler(db, log.Logger) + api.EnvironmentDeleteEnvironmentsIDHandler = environment.NewDeleteEnvironmentsHandler(db, log.Logger) + + // setting + api.SettingPostSettingsHandler = setting.NewPostSettingHandler(db) + api.SettingGetSettingsHandler = setting.NewGetSettingsHandler(db) + api.SettingPatchSettingsNameHandler = setting.NewPatchSettingHandler(db) + + // project + api.ProjectPostProjectsHandler = project.NewPostProjectHandler(db, log.Logger) + api.ProjectGetProjectsHandler = project.NewGetProjectsHandler(db) + api.ProjectDeleteProjectsIDHandler = project.NewDeleteProjectHandler(db, log.Logger) + api.ProjectPatchProjectsIDHandler = project.NewPatchProjectHandler(db) + + // secret + api.SecretPostSecretsHandler = secret.NewPostSecretHandler(db, log.Logger, cfg) + api.SecretGetSecretsHandler = secret.NewGetSecretHandler(db) + api.SecretDeleteSecretsIDHandler = secret.NewDeleteSecretHandler(db) + + // cluster + api.ClusterPostClustersHandler = cluster.NewPostClusterHandler(db, dockerManager, logCollector, cfg, log.Logger) + api.ClusterDeleteClustersIDHandler = cluster.NewDeleteClusterHandler(db) + api.OperationGetOperationsHandler = operation.NewGetOperationsHandler(db) + api.OperationGetOperationsIDLogHandler = operation.NewGetOperationLogHandler(db) + api.ClusterGetClustersHandler = cluster.NewGetClustersHandler(db, log.Logger) + api.ClusterGetClustersIDHandler = cluster.NewGetClusterHandler(db, log.Logger) + api.ClusterGetClustersDefaultNameHandler = cluster.NewGetClusterDefaultNameHandler(db, log.Logger) + api.ClusterPostClustersIDRemoveHandler = cluster.NewRemoveClusterHandler(db, dockerManager, logCollector, cfg, log.Logger) + api.ClusterDeleteServersIDHandler = cluster.NewDeleteServerHandler(db, log.Logger) + api.ClusterPostClustersIDRefreshHandler = cluster.NewPostClusterRefreshHandler(db, log.Logger, clusterWatcher) + + api.SystemGetVersionHandler = system.GetVersionHandlerFunc(func(params system.GetVersionParams) middleware.Responder { + return system.NewGetVersionOK().WithPayload(&models.ResponseVersion{ + Version: version, + }) + }) + + api.Logger = func(s string, i ...interface{}) { + log.Debug().Msgf(s, i...) + } + + srv.ConfigureAPI() + + return &httpService{ + srv: srv, + }, nil +} + +func (s *httpService) Serve() error { + return s.srv.Serve() +} diff --git a/console/service/internal/storage/cluster_flags.go b/console/service/internal/storage/cluster_flags.go new file mode 100644 index 000000000..d2e824b5a --- /dev/null +++ b/console/service/internal/storage/cluster_flags.go @@ -0,0 +1,16 @@ +package storage + +import "go.openly.dev/pointy" + +const ( + patroniConnectStatusMaskSet = uint32(0x1) + patroniConnectStatusMaskRemove = uint32(0xfffffff6) +) + +func SetPatroniConnectStatus(oldMask uint32, status uint32) *uint32 { + return pointy.Uint32((oldMask & patroniConnectStatusMaskRemove) | (status & patroniConnectStatusMaskSet)) +} + +func GetPatroniConnectStatus(mask uint32) uint32 { + return mask & patroniConnectStatusMaskSet +} diff --git a/console/service/internal/storage/cluster_flags_test.go b/console/service/internal/storage/cluster_flags_test.go new file mode 100644 index 000000000..f78075ec5 --- /dev/null +++ b/console/service/internal/storage/cluster_flags_test.go @@ -0,0 +1,13 @@ +package storage + +import ( + "gotest.tools/v3/assert" + "testing" +) + +func TestClusterFlags(t *testing.T) { + assert.Equal(t, uint32(1), *SetPatroniConnectStatus(0, 1)) + assert.Equal(t, uint32(1), *SetPatroniConnectStatus(1, 1)) + assert.Equal(t, uint32(0x11), *SetPatroniConnectStatus(0x10, 1)) + assert.Equal(t, uint32(0), *SetPatroniConnectStatus(1, 0)) +} diff --git a/console/service/internal/storage/consts.go b/console/service/internal/storage/consts.go new file mode 100644 index 000000000..7d0514a19 --- /dev/null +++ b/console/service/internal/storage/consts.go @@ -0,0 +1,55 @@ +package storage + +const ( + DefaultLimit = 20 + InstanceTypeSmall = "Small Size" + InstanceTypeMedium = "Medium Size" + InstanceTypeLarge = "Large Size" + + OperationStatusInProgress = "in_progress" + OperationStatusSuccess = "success" + OperationStatusFailed = "failed" + + OperationTypeDeploy = "deploy" + + ClusterStatusFailed = "failed" + ClusterStatusHealthy = "healthy" + ClusterStatusUnhealthy = "unhealthy" + ClusterStatusDegraded = "degraded" + ClusterStatusReady = "ready" + ClusterStatusUnavailable = "unavailable" +) + +var ( + secretSortFields = map[string]string{ + "name": "secret_name", + "id": "secret_id", + "type": "secret_type", + "created_at": "created_at", + "updated_at": "updated_at", + } + + clusterSortFields = map[string]string{ + "name": "cluster_name", + "id": "cluster_id", + "created_at": "created_at", + "updated_at": "updated_at", + "environment": "environment_id", + "status": "cluster_status", + "project": "project_id", + "location": "cluster_location", + "server_count": "server_count", + "postgres_version": "postgres_version", + } + + operationSortFields = map[string]string{ + "cluster_name": "cluster", + "type": "type", + "status": "status", + "id": "id", + "created_at": "created_at", + "updated_at": "updated_at", + "cluster": "cluster", + "environment": "environment", + } +) diff --git a/console/service/internal/storage/db_storage.go b/console/service/internal/storage/db_storage.go new file mode 100644 index 000000000..9a21db2da --- /dev/null +++ b/console/service/internal/storage/db_storage.go @@ -0,0 +1,825 @@ +package storage + +import ( + "context" + "errors" + "strconv" + "time" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" +) + +type dbStorage struct { + db *pgxpool.Pool +} + +func NewDbStorage(db *pgxpool.Pool) IStorage { + return &dbStorage{ + db: db, + } +} + +func (s *dbStorage) GetCloudProviders(ctx context.Context, limit, offset *int64) ([]CloudProvider, *MetaPagination, error) { + var ( + curOffset = int64(0) + curLimit = int64(DefaultLimit) + ) + if limit != nil { + curLimit = *limit + } + if offset != nil { + curOffset = *offset + } + + count, err := QueryRowToScalar[int64](ctx, s.db, "select count(*) from cloud_providers") + if err != nil { + return nil, nil, err + } + + cloudProviders, err := QueryRowsToStruct[CloudProvider](ctx, s.db, "select * from cloud_providers order by provider_name limit $1 offset $2", curLimit, curOffset) + if err != nil { + return nil, nil, err + } + + return cloudProviders, &MetaPagination{ + Limit: curLimit, + Offset: curOffset, + Count: count, + }, nil +} + +func (s *dbStorage) GetCloudProviderInfo(ctx context.Context, providerCode string) (*CloudProviderInfo, error) { + cloudInstances, err := QueryRowsToStruct[CloudInstance](ctx, s.db, "select * from cloud_instances where cloud_provider = $1 order by cpu, ram", providerCode) + if err != nil { + return nil, err + } + + cloudRegions, err := QueryRowsToStruct[CloudRegion](ctx, s.db, "select * from cloud_regions where cloud_provider = $1 order by region_name", providerCode) + if err != nil { + return nil, err + } + + cloudVolumes, err := QueryRowsToStruct[CloudVolume](ctx, s.db, "select * from cloud_volumes where cloud_provider = $1", providerCode) + if err != nil { + return nil, err + } + + cloudImages, err := QueryRowsToStruct[CloudImage](ctx, s.db, "select * from cloud_images where cloud_provider = $1", providerCode) + if err != nil { + return nil, err + } + + return &CloudProviderInfo{ + Code: providerCode, + CloudRegions: cloudRegions, + CloudInstances: cloudInstances, + CloudVolumes: cloudVolumes, + CloudImages: cloudImages, + }, nil +} + +func (s *dbStorage) GetPostgresVersions(ctx context.Context) ([]PostgresVersion, error) { + postgresVersions, err := QueryRowsToStruct[PostgresVersion](ctx, s.db, "select * from postgres_versions order by major_version") + if err != nil { + return nil, err + } + + return postgresVersions, nil +} + +func (s *dbStorage) CreateSetting(ctx context.Context, name string, value interface{}) (*Setting, error) { + setting, err := QueryRowToStruct[Setting](ctx, s.db, + `insert into settings(setting_name, setting_value) values($1, $2) returning *`, + name, value) + if err != nil { + return nil, err + } + + return setting, nil +} + +func (s *dbStorage) GetSettings(ctx context.Context, req *GetSettingsReq) ([]Setting, *MetaPagination, error) { + var ( + curOffset = int64(0) + curLimit = int64(DefaultLimit) + ) + if req.Limit != nil { + curLimit = *req.Limit + } + if req.Offset != nil { + curOffset = *req.Offset + } + + var ( + extraWhere string + extraArgsCurPosition = 1 + ) + extraArgs := []interface{}{} + { + if req.Name != nil { + extraWhere = " where setting_name = $" + strconv.Itoa(extraArgsCurPosition) + extraArgs = append(extraArgs, req.Name) + extraArgsCurPosition++ + } + } + + count, err := QueryRowToScalar[int64](ctx, s.db, "select count(*) from settings"+extraWhere, extraArgs...) + if err != nil { + return nil, nil, err + } + + limit := " limit $" + strconv.Itoa(extraArgsCurPosition) + " offset $" + strconv.Itoa(extraArgsCurPosition+1) + extraArgs = append(extraArgs, curLimit, curOffset) + + settings, err := QueryRowsToStruct[Setting](ctx, s.db, "select * from settings "+extraWhere+" order by id"+limit, extraArgs...) + if err != nil { + return nil, nil, err + } + + return settings, &MetaPagination{ + Limit: curLimit, + Offset: curOffset, + Count: count, + }, nil +} + +func (s *dbStorage) GetSettingByName(ctx context.Context, name string) (*Setting, error) { + setting, err := QueryRowToStruct[Setting](ctx, s.db, "select * from settings where setting_name = $1", name) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, nil + } + return nil, err + } + + return setting, nil +} + +func (s *dbStorage) UpdateSetting(ctx context.Context, name string, value interface{}) (*Setting, error) { + setting, err := QueryRowToStruct[Setting](ctx, s.db, + `update settings set + setting_value = $1 + where setting_name = $2 + returning *`, + value, name) + if err != nil { + return nil, err + } + + return setting, nil +} + +func (s *dbStorage) CreateProject(ctx context.Context, name, description string) (*Project, error) { + project, err := QueryRowToStruct[Project](ctx, s.db, + `insert into projects(project_name, project_description) values($1, $2) returning *`, + name, description) + if err != nil { + return nil, err + } + + return project, nil +} + +func (s *dbStorage) GetProjects(ctx context.Context, limit, offset *int64) ([]Project, *MetaPagination, error) { + var ( + curOffset = int64(0) + curLimit = int64(DefaultLimit) + ) + if limit != nil { + curLimit = *limit + } + if offset != nil { + curOffset = *offset + } + + count, err := QueryRowToScalar[int64](ctx, s.db, "select count(*) from projects") + if err != nil { + return nil, nil, err + } + + projects, err := QueryRowsToStruct[Project](ctx, s.db, "select * from projects order by project_id limit $1 offset $2", curLimit, curOffset) + if err != nil { + return nil, nil, err + } + + return projects, &MetaPagination{ + Limit: curLimit, + Offset: curOffset, + Count: count, + }, nil +} + +func (s *dbStorage) GetProject(ctx context.Context, id int64) (*Project, error) { + project, err := QueryRowToStruct[Project](ctx, s.db, "select * from projects where project_id = $1", id) + if err != nil { + return nil, err + } + + return project, nil +} + +func (s *dbStorage) GetProjectByName(ctx context.Context, name string) (*Project, error) { + project, err := QueryRowToStruct[Project](ctx, s.db, "select * from projects where project_name = $1", name) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, nil + } + return nil, err + } + + return project, nil +} + +func (s *dbStorage) DeleteProject(ctx context.Context, id int64) error { + _, err := s.db.Exec(ctx, "delete from projects where project_id=$1", id) + + return err +} + +func (s *dbStorage) UpdateProject(ctx context.Context, id int64, name, description *string) (*Project, error) { + project, err := QueryRowToStruct[Project](ctx, s.db, + `update projects set + project_name = coalesce($1, project_name), + project_description = coalesce($2, project_description) + where project_id = $3 + returning *`, + name, description, id) + if err != nil { + return nil, err + } + + return project, nil +} + +func (s *dbStorage) GetEnvironments(ctx context.Context, limit, offset *int64) ([]Environment, *MetaPagination, error) { + var ( + curOffset = int64(0) + curLimit = int64(DefaultLimit) + ) + if limit != nil { + curLimit = *limit + } + if offset != nil { + curOffset = *offset + } + + count, err := QueryRowToScalar[int64](ctx, s.db, "select count(*) from environments") + if err != nil { + return nil, nil, err + } + + environments, err := QueryRowsToStruct[Environment](ctx, s.db, "select * from environments order by environment_id limit $1 offset $2", curLimit, curOffset) + if err != nil { + return nil, nil, err + } + + return environments, &MetaPagination{ + Limit: curLimit, + Offset: curOffset, + Count: count, + }, nil +} + +func (s *dbStorage) GetEnvironment(ctx context.Context, id int64) (*Environment, error) { + environment, err := QueryRowToStruct[Environment](ctx, s.db, "select * from environments where environment_id = $1", id) + if err != nil { + return nil, err + } + + return environment, nil +} + +func (s *dbStorage) GetEnvironmentByName(ctx context.Context, name string) (*Environment, error) { + environment, err := QueryRowToStruct[Environment](ctx, s.db, "select * from environments where environment_name = $1", name) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, nil + } + return nil, err + } + + return environment, nil +} + +func (s *dbStorage) CreateEnvironment(ctx context.Context, req *AddEnvironmentReq) (*Environment, error) { + environment, err := QueryRowToStruct[Environment](ctx, s.db, "insert into environments(environment_name, environment_description) values($1, $2) returning *", + req.Name, req.Description) + if err != nil { + return nil, err + } + + return environment, nil +} + +func (s *dbStorage) DeleteEnvironment(ctx context.Context, id int64) error { + _, err := s.db.Exec(ctx, "delete from environments where environment_id=$1", id) + + return err +} + +func (s *dbStorage) CheckEnvironmentIsUsed(ctx context.Context, id int64) (bool, error) { + count, err := QueryRowToScalar[int64](ctx, s.db, "select count(*) from clusters where environment_id = $1", id) + if err != nil { + return false, err + } + + return count != 0, nil +} + +func (s *dbStorage) GetSecrets(ctx context.Context, req *GetSecretsReq) ([]SecretView, *MetaPagination, error) { + var ( + curOffset = int64(0) + curLimit = int64(DefaultLimit) + ) + if req.Limit != nil { + curLimit = *req.Limit + } + if req.Offset != nil { + curOffset = *req.Offset + } + + var ( + extraWhere string + extraArgsCurPosition = 2 + ) + extraArgs := []interface{}{req.ProjectID} + { + if req.Name != nil { + extraWhere = " and secret_name = $" + strconv.Itoa(extraArgsCurPosition) + extraArgs = append(extraArgs, req.Name) + extraArgsCurPosition++ + } + if req.Type != nil { + extraWhere += " and secret_type = $" + strconv.Itoa(extraArgsCurPosition) + extraArgsCurPosition++ + extraArgs = append(extraArgs, req.Type) + } + } + + count, err := QueryRowToScalar[int64](ctx, s.db, "select count(*) from secrets where project_id = $1"+extraWhere, extraArgs...) + if err != nil { + return nil, nil, err + } + + orderBy := OrderByConverter(req.SortBy, "secret_id", secretSortFields) + + limit := " limit $" + strconv.Itoa(extraArgsCurPosition) + " offset $" + strconv.Itoa(extraArgsCurPosition+1) + extraArgs = append(extraArgs, curLimit, curOffset) + + secrets, err := QueryRowsToStruct[SecretView](ctx, s.db, "select * from v_secrets_list where project_id = $1 "+extraWhere+" order by "+orderBy+limit, extraArgs...) + if err != nil { + return nil, nil, err + } + + return secrets, &MetaPagination{ + Limit: curLimit, + Offset: curOffset, + Count: count, + }, nil +} + +func (s *dbStorage) GetSecret(ctx context.Context, id int64) (*SecretView, error) { + sec, err := QueryRowToStruct[SecretView](ctx, s.db, "select * from v_secrets_list where secret_id = $1", id) + if err != nil { + return nil, err + } + + return sec, nil +} + +func (s *dbStorage) GetSecretByName(ctx context.Context, name string) (*SecretView, error) { + sec, err := QueryRowToStruct[SecretView](ctx, s.db, "select * from v_secrets_list where secret_name = $1", name) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, nil + } + return nil, err + } + + return sec, nil +} + +func (s *dbStorage) CreateSecret(ctx context.Context, req *AddSecretReq) (*SecretView, error) { + secretID, err := QueryRowToScalar[int64](ctx, s.db, "select * from add_secret($1, $2, $3, $4, $5)", + req.ProjectID, req.Type, req.Name, req.Value, req.SecretKey) + if err != nil { + return nil, err + } + + secret, err := QueryRowToStruct[SecretView](ctx, s.db, "select * from v_secrets_list where secret_id = $1 ", secretID) + if err != nil { + return nil, err + } + + return secret, err +} + +func (s *dbStorage) UpdateSecret(ctx context.Context, req *EditSecretReq) (*SecretView, error) { + return nil, nil +} + +func (s *dbStorage) DeleteSecret(ctx context.Context, id int64) error { + _, err := s.db.Exec(ctx, "delete from secrets where secret_id=$1", id) + + return err +} + +func (s *dbStorage) GetSecretVal(ctx context.Context, id int64, secretKey string) ([]byte, error) { + secretVal, err := QueryRowToScalar[[]byte](ctx, s.db, "select * from get_secret($1, $2)", + id, secretKey) + if err != nil { + return nil, err + } + + return secretVal, nil +} + +func (s *dbStorage) GetExtensions(ctx context.Context, req *GetExtensionsReq) ([]Extension, *MetaPagination, error) { + var ( + curOffset = int64(0) + curLimit = int64(DefaultLimit) + ) + if req.Limit != nil { + curLimit = *req.Limit + } + if req.Offset != nil { + curOffset = *req.Offset + } + + subQuery := ` WHERE (e.postgres_min_version IS NULL OR e.postgres_min_version::float <= $1) + AND (e.postgres_max_version IS NULL OR e.postgres_max_version::float >= $1) + AND ($2 = 'all' OR ($2 = 'contrib' AND e.contrib = true) OR ($2 = 'third_party' AND e.contrib = false))` + + count, err := QueryRowToScalar[int64](ctx, s.db, "select count(*) from extensions as e "+subQuery, req.PostgresVersion, req.Type) + if err != nil { + return nil, nil, err + } + + extensions, err := QueryRowsToStruct[Extension](ctx, s.db, "select * from extensions as e"+subQuery+ + "ORDER BY e.contrib, e.extension_image IS NULL, e.extension_name limit $3 offset $4", + req.PostgresVersion, req.Type, curLimit, curOffset) + if err != nil { + return nil, nil, err + } + + return extensions, &MetaPagination{ + Limit: curLimit, + Offset: curOffset, + Count: count, + }, nil +} + +func (s *dbStorage) CreateCluster(ctx context.Context, req *CreateClusterReq) (*Cluster, error) { + cluster, err := QueryRowToStruct[Cluster](ctx, s.db, `insert into clusters(project_id, environment_id, cluster_name, cluster_description, secret_id, extra_vars, cluster_status, cluster_location, server_count, postgres_version, inventory) + values($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) returning *`, req.ProjectID, req.EnvironmentID, req.Name, req.Description, req.SecretID, req.ExtraVars, req.Status, req.Location, req.ServerCount, req.PostgreSqlVersion, req.Inventory) + if err != nil { + return nil, err + } + + return cluster, nil +} + +func (s *dbStorage) UpdateCluster(ctx context.Context, req *UpdateClusterReq) (*Cluster, error) { + cluster, err := QueryRowToStruct[Cluster](ctx, s.db, + `update clusters + set connection_info = coalesce($1, connection_info), + cluster_status = coalesce($2, cluster_status), + flags = coalesce($3, flags) + where cluster_id = $4 returning *`, + req.ConnectionInfo, req.Status, req.Flags, req.ID) + if err != nil { + return nil, err + } + + return cluster, nil +} + +func (s *dbStorage) GetDefaultClusterName(ctx context.Context) (string, error) { + name, err := QueryRowToScalar[string](ctx, s.db, "select * from get_cluster_name()") + if err != nil { + return "", err + } + + return name, nil +} + +func (s *dbStorage) CreateOperation(ctx context.Context, req *CreateOperationReq) (*Operation, error) { + operation, err := QueryRowToStruct[Operation](ctx, s.db, `insert into operations(project_id, cluster_id, docker_code, operation_type, operation_status, cid) + values($1, $2, $3, $4, $5, $6) returning *`, req.ProjectID, req.ClusterID, req.DockerCode, req.Type, OperationStatusInProgress, req.Cid) + if err != nil { + return nil, err + } + + return operation, nil +} + +func (s *dbStorage) GetClusterByName(ctx context.Context, name string) (*Cluster, error) { + cluster, err := QueryRowToStruct[Cluster](ctx, s.db, "select * from clusters where cluster_name = $1", name) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, nil + } + return nil, err + } + + return cluster, nil +} + +func (s *dbStorage) GetCluster(ctx context.Context, id int64) (*Cluster, error) { + cluster, err := QueryRowToStruct[Cluster](ctx, s.db, "select * from clusters where cluster_id = $1", id) + if err != nil { + return nil, err + } + + return cluster, nil +} + +func (s *dbStorage) GetClusters(ctx context.Context, req *GetClustersReq) ([]Cluster, *MetaPagination, error) { + var ( + curOffset = int64(0) + curLimit = int64(DefaultLimit) + ) + if req.Limit != nil { + curLimit = *req.Limit + } + if req.Offset != nil { + curOffset = *req.Offset + } + + var ( + extraWhere string + extraArgsCurPosition = 2 + ) + extraArgs := []interface{}{req.ProjectID} + { + if req.Name != nil { + extraWhere += " and cluster_name = $" + strconv.Itoa(extraArgsCurPosition) + extraArgs = append(extraArgs, req.Name) + extraArgsCurPosition++ + } + if req.Status != nil { + extraWhere += " and cluster_status = $" + strconv.Itoa(extraArgsCurPosition) + extraArgs = append(extraArgs, req.Status) + extraArgsCurPosition++ + } + if req.Location != nil { + extraWhere += " and cluster_location = $" + strconv.Itoa(extraArgsCurPosition) + extraArgs = append(extraArgs, req.Location) + extraArgsCurPosition++ + } + if req.EnvironmentID != nil { + extraWhere += " and environment_id = $" + strconv.Itoa(extraArgsCurPosition) + extraArgs = append(extraArgs, req.EnvironmentID) + extraArgsCurPosition++ + } + if req.ServerCount != nil { + extraWhere += " and server_count = $" + strconv.Itoa(extraArgsCurPosition) + extraArgs = append(extraArgs, req.ServerCount) + extraArgsCurPosition++ + } + if req.PostgresVersion != nil { + extraWhere += " and postgres_version = $" + strconv.Itoa(extraArgsCurPosition) + extraArgs = append(extraArgs, req.PostgresVersion) + extraArgsCurPosition++ + } + if req.CreatedAtFrom != nil { + extraWhere += " and created_at >= $" + strconv.Itoa(extraArgsCurPosition) + extraArgs = append(extraArgs, req.CreatedAtFrom) + extraArgsCurPosition++ + } + if req.CreatedAtTo != nil { + extraWhere += " and created_at <= $" + strconv.Itoa(extraArgsCurPosition) + extraArgs = append(extraArgs, req.CreatedAtTo) + extraArgsCurPosition++ + } + } + + count, err := QueryRowToScalar[int64](ctx, s.db, "select count(*) from clusters where project_id = $1 and deleted_at is null"+extraWhere, extraArgs...) + if err != nil { + return nil, nil, err + } + + orderBy := OrderByConverter(req.SortBy, "cluster_id", clusterSortFields) + + limit := " limit $" + strconv.Itoa(extraArgsCurPosition) + " offset $" + strconv.Itoa(extraArgsCurPosition+1) + extraArgs = append(extraArgs, curLimit, curOffset) + + clusters, err := QueryRowsToStruct[Cluster](ctx, s.db, "select * from clusters where project_id = $1 and deleted_at is null"+extraWhere+" order by "+orderBy+limit, + extraArgs...) + if err != nil { + return nil, nil, err + } + + return clusters, &MetaPagination{ + Limit: curLimit, + Offset: curOffset, + Count: count, + }, nil +} + +func (s *dbStorage) DeleteCluster(ctx context.Context, id int64) error { + _, err := s.db.Exec(ctx, "delete from operations where cluster_id = $1", id) + if err != nil { + return err + } + + _, err = s.db.Exec(ctx, "delete from servers where cluster_id = $1", id) + if err != nil { + return err + } + + _, err = s.db.Exec(ctx, "delete from clusters where cluster_id = $1", id) + if err != nil { + return err + } + + return nil +} + +func (s *dbStorage) DeleteClusterSoft(ctx context.Context, id int64) error { + query := ` + update clusters + set + deleted_at = current_timestamp, + secret_id = null, + cluster_name = cluster_name || '_deleted_' || to_char(current_timestamp, 'yyyymmddhh24miss') + where + cluster_id = $1 + ` + _, err := s.db.Exec(ctx, query, id) + + return err +} + +func (s *dbStorage) DeleteServer(ctx context.Context, id int64) error { + _, err := s.db.Exec(ctx, "delete from servers where server_id = $1", id) + if err != nil { + return err + } + + return nil +} + +func (s *dbStorage) GetInProgressOperations(ctx context.Context, from time.Time) ([]Operation, error) { + operations, err := QueryRowsToStruct[Operation](ctx, s.db, "select * from operations where operation_status = $1 and created_at > $2", + OperationStatusInProgress, from) + if err != nil { + return nil, err + } + + return operations, nil +} + +func (s *dbStorage) UpdateOperation(ctx context.Context, req *UpdateOperationReq) (*Operation, error) { + operation, err := QueryRowToStruct[Operation](ctx, s.db, + `update operations + set operation_status = coalesce($1, operation_status), + operation_log = case when $2::text is null then operation_log else concat(operation_log, CHR(10), $2::text) end + where id = $3 returning id, project_id, cluster_id, docker_code, cid, operation_type, operation_status, null, created_at, updated_at`, + req.Status, req.Logs, req.ID) + if err != nil { + return nil, err + } + + return operation, nil +} + +func (s *dbStorage) GetOperations(ctx context.Context, req *GetOperationsReq) ([]OperationView, *MetaPagination, error) { + var ( + curOffset = int64(0) + curLimit = int64(DefaultLimit) + ) + if req.Limit != nil { + curLimit = *req.Limit + } + if req.Offset != nil { + curOffset = *req.Offset + } + + subQuery := `WHERE project_id = $1 and started >= $2 and started <= $3` + + var ( + extraWhere string + extraArgsCurPosition = 4 + ) + extraArgs := []interface{}{req.ProjectID, req.StartedFrom, req.EndedTill} + { + if req.ClusterName != nil { + extraWhere = " and cluster = $" + strconv.Itoa(extraArgsCurPosition) + extraArgs = append(extraArgs, req.ClusterName) + extraArgsCurPosition++ + } + if req.Type != nil { + extraWhere += " and type = $" + strconv.Itoa(extraArgsCurPosition) + extraArgsCurPosition++ + extraArgs = append(extraArgs, req.Type) + } + if req.Status != nil { + extraWhere += " and status = $" + strconv.Itoa(extraArgsCurPosition) + extraArgsCurPosition++ + extraArgs = append(extraArgs, req.Status) + } + if req.Environment != nil { + extraWhere += " and environment = $" + strconv.Itoa(extraArgsCurPosition) + extraArgsCurPosition++ + extraArgs = append(extraArgs, req.Environment) + } + } + + count, err := QueryRowToScalar[int64](ctx, s.db, "select count(*) from v_operations "+subQuery+extraWhere, extraArgs...) + if err != nil { + return nil, nil, err + } + + orderBy := OrderByConverter(req.SortBy, "id DESC", operationSortFields) + + limit := " limit $" + strconv.Itoa(extraArgsCurPosition) + " offset $" + strconv.Itoa(extraArgsCurPosition+1) + extraArgs = append(extraArgs, curLimit, curOffset) + + operations, err := QueryRowsToStruct[OperationView](ctx, s.db, "select * from v_operations "+subQuery+extraWhere+ + " order by "+orderBy+limit, + extraArgs...) + if err != nil { + return nil, nil, err + } + + return operations, &MetaPagination{ + Limit: curLimit, + Offset: curOffset, + Count: count, + }, nil +} + +func (s *dbStorage) GetOperation(ctx context.Context, id int64) (*Operation, error) { + operation, err := QueryRowToStruct[Operation](ctx, s.db, "select * from operations where id = $1", id) + if err != nil { + return nil, err + } + + return operation, nil +} + +func (s *dbStorage) CreateServer(ctx context.Context, req *CreateServerReq) (*Server, error) { + server, err := QueryRowToStruct[Server](ctx, s.db, `insert into servers(cluster_id, server_name, server_location, ip_address) + values($1, $2, $3, $4) returning *`, req.ClusterID, req.ServerName, req.ServerLocation, req.IpAddress) + if err != nil { + return nil, err + } + + return server, nil +} + +func (s *dbStorage) GetServer(ctx context.Context, id int64) (*Server, error) { + server, err := QueryRowToStruct[Server](ctx, s.db, "select * from servers where server_id = $1", id) + if err != nil { + return nil, err + } + + return server, nil +} + +func (s *dbStorage) GetClusterServers(ctx context.Context, clusterID int64) ([]Server, error) { + servers, err := QueryRowsToStruct[Server](ctx, s.db, "select * from servers where cluster_id = $1", clusterID) + if err != nil { + return nil, err + } + + return servers, nil +} + +func (s *dbStorage) UpdateServer(ctx context.Context, req *UpdateServerReq) (*Server, error) { + server, err := QueryRowToStruct[Server](ctx, s.db, + `insert into servers(cluster_id, ip_address, server_name, server_role, server_status, timeline, lag, tags, pending_restart) + values($1, $2, $3, $4, $5, $6, $7, $8, $9) on conflict(cluster_id, ip_address) do update + set server_name = case when EXCLUDED.server_name = '' then servers.server_name else EXCLUDED.server_name end, + server_role = coalesce(EXCLUDED.server_role, servers.server_role), + server_status = coalesce(EXCLUDED.server_status, servers.server_status), + timeline = coalesce(EXCLUDED.timeline, servers.timeline), + lag = EXCLUDED.lag, + tags = coalesce(EXCLUDED.tags, servers.tags), + pending_restart = coalesce(EXCLUDED.pending_restart, servers.pending_restart) returning *`, + req.ClusterID, req.IpAddress, req.Name, req.Role, req.Status, req.Timeline, req.Lag, req.Tags, req.PendingRestart) + if err != nil { + return nil, err + } + + return server, nil +} + +func (s *dbStorage) ResetServer(ctx context.Context, clusterID int64, ipAddress string) (*Server, error) { + server, err := QueryRowToStruct[Server](ctx, s.db, + `update servers set + server_role = 'N/A', + server_status = 'N/A', + timeline = null, + lag = null, + tags = null where cluster_id = $1 and ip_address = $2 returning *`, + clusterID, ipAddress) + + if err != nil { + return nil, err + } + + return server, nil +} diff --git a/console/service/internal/storage/istorage.go b/console/service/internal/storage/istorage.go new file mode 100644 index 000000000..ed27adcc9 --- /dev/null +++ b/console/service/internal/storage/istorage.go @@ -0,0 +1,68 @@ +package storage + +import ( + "context" + "time" +) + +type IStorage interface { + GetCloudProviders(ctx context.Context, limit, offset *int64) ([]CloudProvider, *MetaPagination, error) + GetCloudProviderInfo(ctx context.Context, providerCode string) (*CloudProviderInfo, error) + GetExtensions(ctx context.Context, req *GetExtensionsReq) ([]Extension, *MetaPagination, error) + GetPostgresVersions(ctx context.Context) ([]PostgresVersion, error) + + // environment + GetEnvironments(ctx context.Context, limit, offset *int64) ([]Environment, *MetaPagination, error) + GetEnvironment(ctx context.Context, id int64) (*Environment, error) + GetEnvironmentByName(ctx context.Context, name string) (*Environment, error) + CreateEnvironment(ctx context.Context, req *AddEnvironmentReq) (*Environment, error) + DeleteEnvironment(ctx context.Context, id int64) error + CheckEnvironmentIsUsed(ctx context.Context, id int64) (bool, error) + + // setting + CreateSetting(ctx context.Context, name string, value interface{}) (*Setting, error) + GetSettings(ctx context.Context, req *GetSettingsReq) ([]Setting, *MetaPagination, error) + GetSettingByName(ctx context.Context, name string) (*Setting, error) + UpdateSetting(ctx context.Context, name string, value interface{}) (*Setting, error) + + // project + CreateProject(ctx context.Context, name, description string) (*Project, error) + GetProjects(ctx context.Context, limit, offset *int64) ([]Project, *MetaPagination, error) + GetProject(ctx context.Context, id int64) (*Project, error) + GetProjectByName(ctx context.Context, name string) (*Project, error) + DeleteProject(ctx context.Context, id int64) error + UpdateProject(ctx context.Context, id int64, name, description *string) (*Project, error) + + // secrets + GetSecrets(ctx context.Context, req *GetSecretsReq) ([]SecretView, *MetaPagination, error) + GetSecret(ctx context.Context, id int64) (*SecretView, error) + GetSecretByName(ctx context.Context, name string) (*SecretView, error) + CreateSecret(ctx context.Context, req *AddSecretReq) (*SecretView, error) + DeleteSecret(ctx context.Context, id int64) error + GetSecretVal(ctx context.Context, id int64, secretKey string) ([]byte, error) + + // cluster + CreateCluster(ctx context.Context, req *CreateClusterReq) (*Cluster, error) + GetCluster(ctx context.Context, id int64) (*Cluster, error) + GetClusters(ctx context.Context, req *GetClustersReq) ([]Cluster, *MetaPagination, error) + GetDefaultClusterName(ctx context.Context) (string, error) + DeleteCluster(ctx context.Context, id int64) error + DeleteClusterSoft(ctx context.Context, id int64) error + DeleteServer(ctx context.Context, id int64) error + GetClusterByName(ctx context.Context, name string) (*Cluster, error) + UpdateCluster(ctx context.Context, req *UpdateClusterReq) (*Cluster, error) + + // operation + CreateOperation(ctx context.Context, req *CreateOperationReq) (*Operation, error) + GetOperations(ctx context.Context, req *GetOperationsReq) ([]OperationView, *MetaPagination, error) + GetOperation(ctx context.Context, id int64) (*Operation, error) + UpdateOperation(ctx context.Context, req *UpdateOperationReq) (*Operation, error) + GetInProgressOperations(ctx context.Context, from time.Time) ([]Operation, error) + + // server + CreateServer(ctx context.Context, req *CreateServerReq) (*Server, error) + GetServer(ctx context.Context, id int64) (*Server, error) + GetClusterServers(ctx context.Context, clusterID int64) ([]Server, error) + UpdateServer(ctx context.Context, req *UpdateServerReq) (*Server, error) + ResetServer(ctx context.Context, clusterID int64, ipAddress string) (*Server, error) +} diff --git a/console/service/internal/storage/models.go b/console/service/internal/storage/models.go new file mode 100644 index 000000000..17e28f98d --- /dev/null +++ b/console/service/internal/storage/models.go @@ -0,0 +1,312 @@ +package storage + +import ( + "net" + "time" +) + +type CloudProvider struct { + Code string + Description string + ProviderImage string +} + +type CloudRegion struct { + ProviderCode string + RegionGroup string + RegionName string + Description string +} + +type CloudInstance struct { + ProviderCode string + InstanceGroup string + InstanceName string + Arch string + Cpu int64 + Ram int64 + PriceHourly float64 + PriceMonthly float64 + Currency string + UpdatedAt time.Time + SharedCpu bool +} + +type CloudImage struct { + ProviderCode string + Region string + Image interface{} + Arch string + OsName string + OsVersion string + UpdatedAt time.Time +} + +type CloudVolume struct { + ProviderCode string + VolumeType string + VolumeDescription string + VolumeMinSize int64 + VolumeMaxSize int64 + PriceMonthly float64 + Currency string + IsDefault bool + UpdatedAt time.Time +} + +type CloudProviderInfo struct { + Code string + CloudRegions []CloudRegion + CloudInstances []CloudInstance + CloudVolumes []CloudVolume + CloudImages []CloudImage +} + +type PostgresVersion struct { + MajorVersion int64 + ReleaseDate time.Time + EndOfLife time.Time +} + +type Setting struct { + ID int64 + Name string + Value interface{} + CreatedAt time.Time + UpdatedAt *time.Time +} + +type GetSettingsReq struct { + Name *string + + Limit *int64 + Offset *int64 +} + +type MetaPagination struct { + Limit int64 + Offset int64 + Count int64 +} + +type Project struct { + ID int64 + Name string + Description *string + CreatedAt time.Time + UpdatedAt *time.Time +} + +type Environment struct { + ID int64 + Name string + Description *string + CreatedAt time.Time + UpdatedAt *time.Time +} + +type AddEnvironmentReq struct { + Name string + Description string +} + +type SecretView struct { + ProjectID int64 + ID int64 + Name string + Type string + CreatedAt time.Time + UpdatedAt *time.Time + IsUsed bool + UsedByClusters *string +} + +type GetSecretsReq struct { + ProjectID int64 + Name *string + Type *string + SortBy *string + + Limit *int64 + Offset *int64 +} + +type AddSecretReq struct { + ProjectID int64 + Type string + Name string + Value []byte + SecretKey string +} + +type EditSecretReq struct { + ProjectID int64 + Type *string + Name *string + Value []byte + SecretKey string +} + +type Extension struct { + Name string + Description *string + Url *string + Image *string + PostgresMinVersion *string + PostgresMaxVersion *string + Contrib bool +} + +type GetExtensionsReq struct { + Type *string + PostgresVersion *string + + Limit *int64 + Offset *int64 +} + +type Cluster struct { + ID int64 + ProjectID int64 + EnvironmentID int64 + SecretID *int64 + Name string + Status string + Description string + Location *string + ConnectionInfo interface{} + ExtraVars []byte + Inventory []byte + ServersCount int32 + PostgreVersion int32 + CreatedAt time.Time + UpdatedAt *time.Time + DeletedAt *time.Time + Flags uint32 +} + +type GetClustersReq struct { + ProjectID int64 + Name *string + SortBy *string + Status *string + Location *string + ServerCount *int64 + PostgresVersion *int64 + EnvironmentID *int64 + CreatedAtFrom *time.Time + CreatedAtTo *time.Time + + Limit *int64 + Offset *int64 +} + +type CreateClusterReq struct { + ProjectID int64 + EnvironmentID int64 + Name string + Description string + SecretID *int64 + ExtraVars []string + Location string + ServerCount int + PostgreSqlVersion int + Status string + Inventory []byte +} + +type UpdateClusterReq struct { + ID int64 + ConnectionInfo interface{} + Status *string + Flags *uint32 +} + +type Operation struct { + ID int64 + ProjectID int64 + ClusterID int64 + DockerCode string + Cid string + Type string + Status string + Log *string + CreatedAt time.Time + UpdatedAt *time.Time +} + +type OperationView struct { + ProjectID int64 + ClusterID int64 + ID int64 + Started time.Time + Finished *time.Time + Type string + Status string + Cluster string + Environment string +} + +type CreateOperationReq struct { + ProjectID int64 + ClusterID int64 + DockerCode string + Type string + Cid string +} + +type UpdateOperationReq struct { + ID int64 + Status *string + Logs *string +} + +type GetOperationsReq struct { + ProjectID int64 + StartedFrom time.Time + EndedTill time.Time + ClusterName *string + Type *string + Status *string + Environment *string + SortBy *string + + Limit *int64 + Offset *int64 +} + +type Server struct { + ID int64 + ClusterID int64 + Name string + Location *string + Role string + Status string + IpAddress net.IP + Timeline *int64 + Lag *int64 + Tags interface{} + PendingRestart *bool + CreatedAt time.Time + UpdatedAt *time.Time +} + +type CreateServerReq struct { + ClusterID int64 + ServerName string + ServerLocation *string + IpAddress string +} + +type UpdateServerReq struct { + ClusterID int64 + IpAddress string + + Name string + Role *string + Status *string + Timeline *int64 + Lag *int64 + Tags interface{} + PendingRestart *bool +} diff --git a/console/service/internal/storage/utils.go b/console/service/internal/storage/utils.go new file mode 100644 index 000000000..36796700b --- /dev/null +++ b/console/service/internal/storage/utils.go @@ -0,0 +1,114 @@ +package storage + +import ( + "context" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "strings" +) + +func QueryRowsToStruct[output any]( + ctx context.Context, + pool *pgxpool.Pool, + query string, + args ...any, +) ([]output, error) { + rows, err := pool.Query(ctx, query, args...) + if err != nil { + return nil, err + } + + return pgx.CollectRows(rows, pgx.RowToStructByPos[output]) +} + +func QueryRowsToAddrStruct[output any]( + ctx context.Context, + pool *pgxpool.Pool, + query string, + args ...any, +) ([]*output, error) { + rows, err := pool.Query(ctx, query, args...) + if err != nil { + return nil, err + } + + return pgx.CollectRows(rows, pgx.RowToAddrOfStructByPos[output]) +} + +func QueryRowToStruct[output any]( + ctx context.Context, + pool *pgxpool.Pool, + query string, + args ...any, +) (*output, error) { + rows, err := pool.Query(ctx, query, args...) + if err != nil { + return nil, err + } + + var res output + res, err = pgx.CollectOneRow(rows, pgx.RowToStructByPos[output]) + if err != nil { + return nil, err + } + + return &res, nil +} + +func QueryRowToScalar[scalar any]( + ctx context.Context, + pool *pgxpool.Pool, + query string, + args ...any, +) (scalar, error) { + rows, err := pool.Query(ctx, query, args...) + if err != nil { + var value scalar + return value, err + } + + return pgx.CollectOneRow(rows, pgx.RowTo[scalar]) +} + +func QueryRowToScalarAddr[scalar any]( + ctx context.Context, + pool *pgxpool.Pool, + query string, + args ...any, +) (*scalar, error) { + rows, err := pool.Query(ctx, query, args...) + if err != nil { + return nil, err + } + + return pgx.CollectOneRow(rows, pgx.RowToAddrOf[scalar]) +} + +func OrderByConverter(sortByFromApi *string, defaultField string, convMap map[string]string) string { + orderBy := strings.Builder{} + if sortByFromApi != nil { + sortByFields := strings.Split(*sortByFromApi, ",") + for _, sortBy := range sortByFields { + if len(sortBy) == 0 { + continue + } + order := "ASC" + if sortBy[0] == '-' { + order = "DESC" + sortBy = sortBy[1:] + } + tableField := convMap[sortBy] + if len(tableField) != 0 { + if orderBy.Len() != 0 { + orderBy.WriteString(",") + } + orderBy.WriteString(tableField + " " + order) + } + } + } + if orderBy.Len() == 0 { + return defaultField + } + + return orderBy.String() +} diff --git a/console/service/internal/watcher/cluster_watcher.go b/console/service/internal/watcher/cluster_watcher.go new file mode 100644 index 000000000..bbfcc6e24 --- /dev/null +++ b/console/service/internal/watcher/cluster_watcher.go @@ -0,0 +1,292 @@ +package watcher + +import ( + "context" + "postgresql-cluster-console/internal/configuration" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/pkg/patroni" + "postgresql-cluster-console/pkg/tracer" + "sync" + "time" + + "github.com/google/uuid" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "go.openly.dev/pointy" + "golang.org/x/sync/semaphore" +) + +type ClusterWatcher interface { + Run() + Stop() + HandleCluster(ctx context.Context, cl *storage.Cluster) +} + +type clusterWatcher struct { + db storage.IStorage + isRun bool + log zerolog.Logger + cfg *configuration.Config + patroniCli patroni.IClient + + ctx context.Context + done context.CancelFunc + wg sync.WaitGroup +} + +func NewServerWatcher(db storage.IStorage, patroniCli patroni.IClient, cfg *configuration.Config) ClusterWatcher { + return &clusterWatcher{ + db: db, + cfg: cfg, + patroniCli: patroniCli, + log: log.Logger.With().Str("module", "cluster_watcher").Logger(), + } +} + +func (sw *clusterWatcher) Run() { + if sw.isRun { + return + } + sw.isRun = true + + sw.ctx, sw.done = context.WithCancel(context.Background()) + sw.wg.Add(1) + go func() { + sw.loop() + sw.wg.Done() + }() + sw.log.Info().Msg("run") +} + +func (sw *clusterWatcher) Stop() { + sw.log.Info().Msg("stopping") + sw.done() + sw.wg.Wait() + sw.isRun = false + sw.log.Info().Msg("stopped") +} + +func (sw *clusterWatcher) loop() { + timer := time.NewTimer(sw.cfg.ClusterWatcher.RunEvery) + defer timer.Stop() + + for { + select { + case <-sw.ctx.Done(): + sw.log.Info().Msg("loop is done") + + return + case <-timer.C: + sw.doWork() + timer.Reset(sw.cfg.ClusterWatcher.RunEvery) + } + } +} + +func (sw *clusterWatcher) doWork() { + sw.log.Trace().Msg("doWork started") + defer sw.log.Trace().Msg("doWork was done") + ctx := context.WithValue(sw.ctx, tracer.CtxCidKey{}, uuid.New().String()) + projects, _, err := sw.db.GetProjects(ctx, pointy.Int64(1000), pointy.Int64(0)) + if err != nil { + sw.log.Error().Err(err).Msg("failed to get projects") + + return + } + sem := semaphore.NewWeighted(sw.cfg.ClusterWatcher.PoolSize) + for _, pr := range projects { + sw.handleProject(ctx, &pr, sem) + } + _ = sem.Acquire(ctx, sw.cfg.ClusterWatcher.PoolSize) // wait all workers done +} + +func (sw *clusterWatcher) handleProject(ctx context.Context, pr *storage.Project, sem *semaphore.Weighted) { + localLog := sw.log.With().Str("project", pr.Name).Logger() + localLog.Trace().Msg("started to handler project") + defer log.Trace().Msg("project was handled") + + var ( + offset = int64(0) + limit = int64(100) // handle by 100 clusters per call + ) + for { + if ctx.Err() != nil { + return + } + + clusters, _, err := sw.db.GetClusters(ctx, &storage.GetClustersReq{ + ProjectID: pr.ID, + Limit: &limit, + Offset: &offset, + }) + if err != nil { + localLog.Error().Err(err).Msg("failed to get clusters") + + continue + } + if len(clusters) == 0 { + localLog.Trace().Msg("all clusters were handled") + + return + } + + for _, cl := range clusters { + err = sem.Acquire(ctx, 1) + if err != nil { + localLog.Error().Err(err).Msg("failed to acquire semaphore") + + return + } + cl := cl // copy for async handling + go func() { + sw.HandleCluster(ctx, &cl) + sem.Release(1) + }() + } + offset += limit + } +} + +func (sw *clusterWatcher) HandleCluster(ctx context.Context, cl *storage.Cluster) { + localLog := sw.log.With().Str("cluster", cl.Name).Logger() + cid, ok := ctx.Value(tracer.CtxCidKey{}).(string) + if ok { + localLog.With().Str("cid", cid).Logger() + } + localLog.Trace().Msg("started to handle cluster") + defer localLog.Trace().Msg("cluster was handled") + + servers, err := sw.db.GetClusterServers(ctx, cl.ID) + if err != nil { + localLog.Error().Err(err).Msg("failed to get servers by cluster") + + return + } + + sw.handleClusterServers(ctx, cl, servers) +} + +func (sw *clusterWatcher) handleClusterServers(ctx context.Context, cl *storage.Cluster, clusterServers []storage.Server) { + localLog := sw.log.With().Str("cluster", cl.Name).Logger() + cid, ok := ctx.Value(tracer.CtxCidKey{}).(string) + if ok { + localLog.With().Str("cid", cid).Logger() + } + localLog.Trace().Msg("started to handle cluster servers") + defer localLog.Trace().Msg("cluster servers were handled") + + // map with old cluster topology + serversMap := make(map[string]bool) + for _, s := range clusterServers { + serversMap[s.IpAddress.String()] = false + } + + patroniHealthCheck := false + for _, s := range clusterServers { + if ctx.Err() != nil { + return + } + + clusterInfo, err := sw.patroniCli.GetClusterInfo(ctx, s.IpAddress.String()) + if err != nil { + localLog.Debug().Err(err).Msg("failed to get patroni info") + + continue + } + localLog.Trace().Any("cluster_info", &clusterInfo).Msg("got cluster info") + patroniHealthCheck = true + + const ( + stateRunning = "running" + stateStreaming = "streaming" + ) + healthyServers := int32(0) + + for _, serverInfo := range clusterInfo.Members { + var lag *int64 + switch l := serverInfo.Lag.(type) { + case int64: + lag = &l + case uint64: + lag = pointy.Int64(int64(l)) + case int8: + lag = pointy.Int64(int64(l)) + case uint8: + lag = pointy.Int64(int64(l)) + case int16: + lag = pointy.Int64(int64(l)) + case uint16: + lag = pointy.Int64(int64(l)) + case int: + lag = pointy.Int64(int64(l)) + case uint: + lag = pointy.Int64(int64(l)) + case int32: + lag = pointy.Int64(int64(l)) + case uint32: + lag = pointy.Int64(int64(l)) + case float64: + lag = pointy.Int64(int64(l)) + default: + localLog.Trace().Type("lag_type", l).Msg("unknown lag type") + } + updatedServer, err := sw.db.UpdateServer(ctx, &storage.UpdateServerReq{ + ClusterID: cl.ID, + IpAddress: serverInfo.Host, + Name: serverInfo.Name, + Role: &serverInfo.Role, + Status: &serverInfo.State, + Timeline: &serverInfo.Timeline, + Lag: lag, + Tags: &serverInfo.Tags, + PendingRestart: &serverInfo.PendingRestart, + }) + if err != nil { + localLog.Error().Err(err).Msg("failed to update server") + } else { + localLog.Trace().Any("server", updatedServer).Msg("server was updated") + serversMap[serverInfo.Host] = true + } + if serverInfo.State == stateRunning || serverInfo.State == stateStreaming { + healthyServers++ + } + } + var status string + if len(clusterInfo.Members) < int(cl.ServersCount) { + status = storage.ClusterStatusDegraded + } else if healthyServers < cl.ServersCount { + status = storage.ClusterStatusUnhealthy + } else { + status = storage.ClusterStatusHealthy + } + _, err = sw.db.UpdateCluster(ctx, &storage.UpdateClusterReq{ + ID: cl.ID, + Status: &status, + Flags: storage.SetPatroniConnectStatus(cl.Flags, 1), + }) + if err != nil { + localLog.Error().Err(err).Msg("failed to update cluster status") + } + break + } + if !patroniHealthCheck && storage.GetPatroniConnectStatus(cl.Flags) == 1 { + _, err := sw.db.UpdateCluster(ctx, &storage.UpdateClusterReq{ + ID: cl.ID, + Status: pointy.String(storage.ClusterStatusUnavailable), + }) + if err != nil { + localLog.Error().Err(err).Msg("failed to update cluster status") + } + } + + for ipAddress, updated := range serversMap { + if !updated { + updatedServer, err := sw.db.ResetServer(ctx, cl.ID, ipAddress) + if err != nil { + localLog.Error().Err(err).Msg("failed to update unknown server") + } else { + localLog.Trace().Any("server", updatedServer).Msg("unknown server was updated") + } + } + } +} diff --git a/console/service/internal/watcher/consts.go b/console/service/internal/watcher/consts.go new file mode 100644 index 000000000..ce1856a17 --- /dev/null +++ b/console/service/internal/watcher/consts.go @@ -0,0 +1,10 @@ +package watcher + +const ( + ContainerStatusExited = "exited" + ContainerStatusRemoving = "removing" + ContainerStatusDead = "dead" + + LogFieldSystemInfo = "System info" + LogFieldConnectionInfo = "deploy_finish : Connection info" +) diff --git a/console/service/internal/watcher/log_collector.go b/console/service/internal/watcher/log_collector.go new file mode 100644 index 000000000..14c0bbb2b --- /dev/null +++ b/console/service/internal/watcher/log_collector.go @@ -0,0 +1,94 @@ +package watcher + +import ( + "context" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/internal/xdocker" + "postgresql-cluster-console/pkg/tracer" + "sync" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +type LogCollector interface { + StoreInDb(operationID int64, dockerCode xdocker.InstanceID, cid string) + PrintToConsole(dockerCode xdocker.InstanceID, cid string) + Stop() +} + +type logCollector struct { + db storage.IStorage + dockerManager xdocker.IManager + isRun bool + log zerolog.Logger + + ctx context.Context + done context.CancelFunc + wg sync.WaitGroup +} + +func NewLogCollector(db storage.IStorage, dockerManager xdocker.IManager) LogCollector { + lc := &logCollector{ + db: db, + dockerManager: dockerManager, + log: log.Logger.With().Str("module", "log_collector").Logger(), + } + lc.ctx, lc.done = context.WithCancel(context.Background()) + + return lc +} + +func (lc *logCollector) StoreInDb(operationID int64, dockerCode xdocker.InstanceID, cid string) { + lc.wg.Add(1) + go func() { + lc.log.Debug().Str("cid", cid).Int64("operation_id", operationID).Msg("log collector started") + lc.storeLogsFromContainer(operationID, dockerCode, cid) + defer func() { + lc.wg.Done() + lc.log.Debug().Str("cid", cid).Int64("operation_id", operationID).Msg("finished") + }() + }() +} + +func (lc *logCollector) PrintToConsole(dockerCode xdocker.InstanceID, cid string) { + lc.wg.Add(1) + go func() { + lc.log.Debug().Str("cid", cid).Msg("log collector started") + lc.printLogsFromContainer(dockerCode, cid) + defer func() { + lc.wg.Done() + lc.log.Debug().Str("cid", cid).Msg("finished") + }() + }() +} + +func (lc *logCollector) Stop() { + lc.log.Info().Msg("stopping") + lc.done() + lc.wg.Wait() + lc.log.Info().Msg("stopped") +} + +func (lc *logCollector) storeLogsFromContainer(operationID int64, dockerCode xdocker.InstanceID, cid string) { + ctx := context.WithValue(lc.ctx, tracer.CtxCidKey{}, cid) + lc.log.Trace().Msg("storeLogsFromContainer called") + lc.dockerManager.StoreContainerLogs(ctx, dockerCode, func(logMessage string) { + lc.log.Trace().Str("cid", cid).Str("proc", "storeLogsFromContainer").Msg(logMessage) + _, err := lc.db.UpdateOperation(ctx, &storage.UpdateOperationReq{ + ID: operationID, + Logs: &logMessage, + }) + if err != nil { + lc.log.Error().Err(err).Int64("operation_id", operationID).Msg("failed to update log") + } + }) +} + +func (lc *logCollector) printLogsFromContainer(dockerCode xdocker.InstanceID, cid string) { + ctx := context.WithValue(lc.ctx, tracer.CtxCidKey{}, cid) + lc.log.Trace().Msg("storeLogsFromContainer called") + lc.dockerManager.StoreContainerLogs(ctx, dockerCode, func(logMessage string) { + lc.log.Trace().Str("cid", cid).Msg(logMessage) + }) +} diff --git a/console/service/internal/watcher/log_watcher.go b/console/service/internal/watcher/log_watcher.go new file mode 100644 index 000000000..7827a1c0d --- /dev/null +++ b/console/service/internal/watcher/log_watcher.go @@ -0,0 +1,210 @@ +package watcher + +import ( + "context" + "encoding/json" + "os" + "postgresql-cluster-console/internal/configuration" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/internal/xdocker" + "postgresql-cluster-console/pkg/tracer" + "sync" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +type LogWatcher interface { + Run() + Stop() +} + +type logWatcher struct { + db storage.IStorage + dockerManager xdocker.IManager + isRun bool + log zerolog.Logger + cfg *configuration.Config + + ctx context.Context + done context.CancelFunc + wg sync.WaitGroup +} + +func NewLogWatcher(db storage.IStorage, dockerManager xdocker.IManager, cfg *configuration.Config) LogWatcher { + return &logWatcher{ + db: db, + dockerManager: dockerManager, + cfg: cfg, + log: log.Logger.With().Str("module", "log_watcher").Logger(), + } +} + +func (lw *logWatcher) Run() { + if lw.isRun { + return + } + lw.isRun = true + + lw.ctx, lw.done = context.WithCancel(context.Background()) + lw.wg.Add(1) + go func() { + lw.loop() + lw.wg.Done() + }() + lw.log.Info().Msg("run") +} + +func (lw *logWatcher) Stop() { + lw.log.Info().Msg("stopping") + lw.done() + lw.wg.Wait() + lw.isRun = false + lw.log.Info().Msg("stopped") +} + +func (lw *logWatcher) loop() { + timer := time.NewTimer(lw.cfg.LogWatcher.RunEvery) + defer timer.Stop() + + for { + select { + case <-lw.ctx.Done(): + lw.log.Info().Msg("loop is done") + + return + case <-timer.C: + lw.doWork() + timer.Reset(lw.cfg.LogWatcher.RunEvery) + } + } +} + +func (lw *logWatcher) doWork() { + lw.log.Debug().Msg("starting to collect info about operations performed on clusters") + operations, err := lw.db.GetInProgressOperations(lw.ctx, time.Now().Add(-lw.cfg.LogWatcher.AnalyzePast)) + if err != nil { + lw.log.Error().Err(err).Msg("failed to get in_progress operations") + + return + } + for _, op := range operations { + localLog := lw.log.With().Str("cid", op.Cid).Int64("operation_id", op.ID).Logger() + localLog.Trace().Msg("starting to collect info") + + opCtx := context.WithValue(lw.ctx, tracer.CtxCidKey{}, op.Cid) + containerStatus, err := lw.dockerManager.GetStatus(opCtx, xdocker.InstanceID(op.DockerCode)) + if err != nil { + localLog.Error().Err(err).Msg("failed to get containers status") + continue + } + localLog.Trace().Str("container_status", containerStatus).Msg("got container status") + switch containerStatus { + case ContainerStatusExited, ContainerStatusDead, ContainerStatusRemoving: + lw.collectContainerLog(opCtx, &op, localLog) + err = lw.dockerManager.RemoveContainer(opCtx, xdocker.InstanceID(op.DockerCode)) + if err != nil { + localLog.Error().Err(err).Msg("failed to remove container") + } + default: + localLog.Trace().Msg("skipped") + } + } +} + +func (lw *logWatcher) collectContainerLog(ctx context.Context, op *storage.Operation, log zerolog.Logger) { + clusterInfo, err := lw.db.GetCluster(ctx, op.ID) + if err != nil { + log.Error().Err(err).Msg("failed to get cluster from db") + + return + } + + fileLog := lw.cfg.Docker.LogDir + "/" + clusterInfo.Name + ".json" + fLog, err := os.Open(fileLog) + if err != nil { + log.Error().Err(err).Str("file_name", fileLog).Msg("can't open file with log") + + return + } + + var logs []LogEntity + jsonDec := json.NewDecoder(fLog) + err = jsonDec.Decode(&logs) + if err != nil { + log.Error().Err(err).Msg("failed to decode file log") + + return + } + + var status string + for _, logEntity := range logs { + switch logEntity.Task { + case LogFieldSystemInfo: + var serverInfo SystemInfo + err = mapstructure.Decode(logEntity.Msg, &serverInfo) + if err != nil { + log.Error().Err(err).Any("msg", logEntity.Msg).Msg("failed to decode system_info") + continue + } + + createdServer, err := lw.db.CreateServer(ctx, &storage.CreateServerReq{ + ClusterID: clusterInfo.ID, + ServerName: serverInfo.ServerName, + ServerLocation: serverInfo.ServerLocation, + IpAddress: serverInfo.IpAddress, + }) + if err != nil { + log.Error().Err(err).Msg("failed to store server to db") + + continue + } + log.Trace().Any("server", createdServer).Msg("server was created") + case LogFieldConnectionInfo: + _, err := lw.db.UpdateCluster(ctx, &storage.UpdateClusterReq{ + ID: op.ClusterID, + ConnectionInfo: logEntity.Msg, + }) + if err != nil { + log.Error().Err(err).Msg("failed to update cluster") + + continue + } + } + if logEntity.Summary != nil { + status = logEntity.Status + } + } + if len(status) == 0 { + log.Warn().Msg("summary not found in logs") + + status = storage.OperationStatusFailed + } + updatedOperation, err := lw.db.UpdateOperation(ctx, &storage.UpdateOperationReq{ + ID: op.ID, + Status: &status, + }) + if err != nil { + log.Error().Err(err).Msg("failed to update operation status in db") + } else { + log.Trace().Any("operation", updatedOperation).Msg("operation was updated in db") + } + + // set cluster status + if status == storage.OperationStatusFailed { + status = storage.ClusterStatusFailed + } else { + status = storage.ClusterStatusReady + } + updatedCluster, err := lw.db.UpdateCluster(ctx, &storage.UpdateClusterReq{ + ID: op.ClusterID, + Status: &status, + }) + if err != nil { + log.Error().Err(err).Msg("failed to update cluster status in db") + } else { + log.Trace().Any("cluster", updatedCluster).Msg("cluster was updated in db") + } +} diff --git a/console/service/internal/watcher/models.go b/console/service/internal/watcher/models.go new file mode 100644 index 000000000..e20cce630 --- /dev/null +++ b/console/service/internal/watcher/models.go @@ -0,0 +1,15 @@ +package watcher + +type LogEntity struct { + Task string `json:"task"` + Failed bool `json:"failed"` + Msg interface{} `json:"msg"` + Summary interface{} `json:"summary,omitempty"` + Status string `json:"status"` +} + +type SystemInfo struct { + ServerLocation *string `json:"server_location,omitempty" mapstructure:"server_location"` + ServerName string `json:"server_name" mapstructure:"server_name"` + IpAddress string `json:"ip_address" mapstructure:"ip_address"` +} diff --git a/console/service/internal/xdocker/images.go b/console/service/internal/xdocker/images.go new file mode 100644 index 000000000..0ab09496c --- /dev/null +++ b/console/service/internal/xdocker/images.go @@ -0,0 +1,7 @@ +package xdocker + +const ( + playbookCreateCluster = "deploy_pgcluster.yml" + + entryPoint = "ansible-playbook" +) diff --git a/console/service/internal/xdocker/imanager.go b/console/service/internal/xdocker/imanager.go new file mode 100644 index 000000000..0c609e0ea --- /dev/null +++ b/console/service/internal/xdocker/imanager.go @@ -0,0 +1,23 @@ +package xdocker + +import "context" + +type InstanceID string +type ManageClusterConfig struct { + Envs []string + ExtraVars []string + Mounts []Mount +} + +type Mount struct { + DockerPath string + HostPath string +} + +type IManager interface { + ManageCluster(ctx context.Context, req *ManageClusterConfig) (InstanceID, error) + GetStatus(ctx context.Context, id InstanceID) (string, error) + StoreContainerLogs(ctx context.Context, id InstanceID, store func(logMessage string)) + PreloadImage(ctx context.Context) + RemoveContainer(ctx context.Context, id InstanceID) error +} diff --git a/console/service/internal/xdocker/manager.go b/console/service/internal/xdocker/manager.go new file mode 100644 index 000000000..470a06fc8 --- /dev/null +++ b/console/service/internal/xdocker/manager.go @@ -0,0 +1,159 @@ +package xdocker + +import ( + "bufio" + "context" + "net/http" + "postgresql-cluster-console/pkg/tracer" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/client" + "github.com/goombaio/namegenerator" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +type dockerManager struct { + cli *client.Client + log zerolog.Logger + image string +} + +func NewDockerManager(host string, image string) (IManager, error) { + var rt http.RoundTripper + rt, err := NewRoundTripperLog(host, log.Logger.With().Str("module", "docker_client").Logger()) + if err != nil { + return nil, err + } + cli, err := client.NewClientWithOpts( + client.WithHost(host), + client.WithHTTPClient(&http.Client{ + Transport: rt, + }), + client.WithAPIVersionNegotiation()) + if err != nil { + return nil, err + } + + return &dockerManager{ + cli: cli, + log: log.Logger.With().Str("module", "docker_manager").Logger(), + image: image, + }, nil +} + +func (m *dockerManager) ManageCluster(ctx context.Context, config *ManageClusterConfig) (InstanceID, error) { + localLog := m.log.With().Str("cid", ctx.Value(tracer.CtxCidKey{}).(string)).Logger() + err := m.pullImage(ctx, m.image) + if err != nil { + return "", err + } + + resp, err := m.cli.ContainerCreate(ctx, + &container.Config{ + Image: m.image, + Tty: true, + Env: config.Envs, + Cmd: func() []string { + cmd := []string{entryPoint, playbookCreateCluster} + for _, vars := range config.ExtraVars { + cmd = append(cmd, "--extra-vars", vars) + } + + return cmd + }(), + Entrypoint: nil, + }, &container.HostConfig{ + NetworkMode: "host", + Mounts: func() []mount.Mount { + var mounts []mount.Mount + for _, mountPath := range config.Mounts { + mounts = append(mounts, mount.Mount{ + Type: "bind", + Source: mountPath.HostPath, + Target: mountPath.DockerPath, + }) + } + + return mounts + }(), + }, nil, nil, namegenerator.NewNameGenerator(time.Now().UTC().UnixNano()).Generate()) + + if err != nil { + return "", err + } + + localLog.Trace().Str("id", resp.ID).Msg("container was created") + if len(resp.Warnings) != 0 { + localLog.Warn().Strs("warnings", resp.Warnings).Msg("warnings during container creation") + } + + err = m.cli.ContainerStart(ctx, resp.ID, container.StartOptions{}) + if err != nil { + errRem := m.cli.ContainerRemove(ctx, resp.ID, container.RemoveOptions{}) + if errRem != nil { + localLog.Error().Err(err).Msg("failed to remove container after error on start") + } + + return "", err + } + + return InstanceID(resp.ID), nil +} + +func (m *dockerManager) PreloadImage(ctx context.Context) { + _ = m.pullImage(ctx, m.image) +} + +func (m *dockerManager) GetStatus(ctx context.Context, id InstanceID) (string, error) { + inspectRes, err := m.cli.ContainerInspect(ctx, string(id)) + if err != nil { + return "", err + } + + return inspectRes.State.Status, nil +} + +func (m *dockerManager) StoreContainerLogs(ctx context.Context, ID InstanceID, store func(logMessage string)) { + localLog := m.log.With().Str("cid", ctx.Value(tracer.CtxCidKey{}).(string)).Logger() + localLog.Trace().Msg("StoreContainerLogs called") + hijackedCon, err := m.cli.ContainerAttach(ctx, string(ID), container.AttachOptions{ + Stream: true, + Stdin: false, + Stdout: true, + Stderr: true, + DetachKeys: "", + Logs: true, + }) + if err != nil { + localLog.Error().Err(err).Msg("failed to get container logs") + + return + } + localLog.Trace().Msg("got container logs") + defer func() { + hijackedCon.Close() + }() + + scanner := bufio.NewScanner(hijackedCon.Reader) + localLog.Trace().Msg("starting to scan logs") + for { + if ctx.Err() != nil { + localLog.Error().Err(ctx.Err()).Msg("ctx error") + break + } + if !scanner.Scan() { + localLog.Trace().Err(scanner.Err()).Msg("scanner scan returned false") + break + } + s := scanner.Text() + + store(s) + } +} + +func (m *dockerManager) RemoveContainer(ctx context.Context, id InstanceID) error { + return m.cli.ContainerRemove(ctx, string(id), container.RemoveOptions{}) +} diff --git a/console/service/internal/xdocker/manager_utils.go b/console/service/internal/xdocker/manager_utils.go new file mode 100644 index 000000000..50e265555 --- /dev/null +++ b/console/service/internal/xdocker/manager_utils.go @@ -0,0 +1,44 @@ +package xdocker + +import ( + "context" + "io" + "postgresql-cluster-console/pkg/tracer" + "strings" + + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/errdefs" +) + +func (m *dockerManager) pullImage(ctx context.Context, dockerImage string) error { + localLog := m.log.With().Str("cid", ctx.Value(tracer.CtxCidKey{}).(string)).Logger() + inspectRes, _, err := m.cli.ImageInspectWithRaw(ctx, dockerImage) + if err != nil { + if _, ok := err.(errdefs.ErrNotFound); !ok { + localLog.Error().Err(err).Msg("failed to inspect docker image") + + return err + } + } + if err == nil && inspectRes.ID != "" { + return nil // already has locally + } + out, err := m.cli.ImagePull(ctx, dockerImage, image.PullOptions{}) + if err != nil { + localLog.Error().Err(err).Str("docker_image", dockerImage).Msg("failed to pull docker image") + + return err + } + defer func() { + err = out.Close() + if err != nil { + localLog.Warn().Err(err).Msg("failed to close image_pull output") + } + }() + + buf := strings.Builder{} + _, _ = io.Copy(&buf, out) + localLog.Trace().Str("log", buf.String()).Msg("pull image") + + return nil +} diff --git a/console/service/internal/xdocker/round_tripper_log.go b/console/service/internal/xdocker/round_tripper_log.go new file mode 100644 index 000000000..8e761b2fd --- /dev/null +++ b/console/service/internal/xdocker/round_tripper_log.go @@ -0,0 +1,101 @@ +package xdocker + +import ( + "bytes" + "io" + "net/http" + "postgresql-cluster-console/pkg/tracer" + + "github.com/docker/docker/client" + "github.com/docker/go-connections/sockets" + "github.com/rs/zerolog" +) + +type roundTripperLog struct { + http.Transport + log zerolog.Logger +} + +func NewRoundTripperLog(host string, log zerolog.Logger) (http.RoundTripper, error) { + rt := &roundTripperLog{ + log: log, + } + + hostURL, err := client.ParseHostURL(host) + if err != nil { + return nil, err + } + + err = sockets.ConfigureTransport(&rt.Transport, hostURL.Scheme, hostURL.Host) + if err != nil { + return nil, err + } + + return rt, nil +} + +func (rt *roundTripperLog) RoundTrip(request *http.Request) (*http.Response, error) { + var ( + copyBody io.ReadCloser + err error + ) + localLog := rt.log.With().Str("cid", request.Context().Value(tracer.CtxCidKey{}).(string)).Logger() + if request.Body != nil { + copyBody, err = request.GetBody() + if err != nil { + localLog.Error().Err(err).Msgf("failed to GetBody") + } else { + defer func() { + err = copyBody.Close() + if err != nil { + localLog.Error().Err(err).Msg("failed to close copy of body") + } + }() + body, err := io.ReadAll(copyBody) + if err != nil { + localLog.Error().Err(err).Msg("failed to ReadAll request body") + } else { + localLog.Trace().Str("url", request.URL.Path).Str("host", request.URL.Host).Str("method", request.Method).Str("body", string(body)).Msg("request body") + } + } + } else { + localLog.Trace().Str("url", request.URL.Path).Str("host", request.URL.Host).Str("method", request.Method).Msg("request") + } + + res, err := rt.Transport.RoundTrip(request) + if err != nil { + localLog.Error().Err(err).Msg("failed to RoundTrip") + } else { + var respBody io.ReadCloser + respBody, res.Body, err = drainBody(res.Body) + if err != nil { + localLog.Error().Err(err).Msg("failed to drain body") + } else { + defer func() { + err = respBody.Close() + if err != nil { + localLog.Error().Err(err).Msg("failed to close response body") + } + }() + body, err := io.ReadAll(respBody) + if err != nil { + localLog.Error().Err(err).Msg("failed to ReadAll response body") + } else { + localLog.Trace().Str("url", request.URL.Path).Str("host", request.URL.Host).Str("body", string(body)).Msg("response body") + } + } + } + + return res, err +} + +func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) { + var buf bytes.Buffer + if _, err = buf.ReadFrom(b); err != nil { + return nil, b, err + } + if err = b.Close(); err != nil { + return nil, b, err + } + return io.NopCloser(&buf), io.NopCloser(bytes.NewReader(buf.Bytes())), nil +} diff --git a/console/service/main.go b/console/service/main.go new file mode 100644 index 000000000..4dd6bfe72 --- /dev/null +++ b/console/service/main.go @@ -0,0 +1,111 @@ +package main + +import ( + "context" + _ "embed" + "fmt" + "os" + "postgresql-cluster-console/internal/configuration" + "postgresql-cluster-console/internal/db" + "postgresql-cluster-console/internal/service" + "postgresql-cluster-console/internal/storage" + "postgresql-cluster-console/internal/watcher" + "postgresql-cluster-console/internal/xdocker" + "postgresql-cluster-console/migrations" + "postgresql-cluster-console/pkg/patroni" + "postgresql-cluster-console/pkg/tracer" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +//go:embed VERSION +var Version string + +const appName = "pg_console" + +func init() { + log.Logger = zerolog.New(os.Stdout).With(). + Timestamp(). + Str("app", appName). + Str("version", Version). + Logger() +} + +func main() { + if len(os.Args) > 1 { + configuration.PrintUsage() + + return + } + + cfg, err := configuration.ReadConfig() + if err != nil { + fmt.Print(err.Error()) + + return + } + log.Info().Interface("config", cfg).Msg("config was parsed") + + l, err := zerolog.ParseLevel(cfg.Logger.Level) + if err != nil { + log.Error().Str("log_level", cfg.Logger.Level).Msg("unknown log level") + } else { + zerolog.SetGlobalLevel(l) + log.Info().Str("log_level", cfg.Logger.Level).Msg("log level was set") + } + + dbPool, err := db.NewDbPool(cfg) + if err != nil { + log.Error().Err(err).Msg("failed to create db pool") + + return + } + + err = migrations.Migrate(dbPool, cfg.Db.MigrationDir) + if err != nil { + log.Error().Err(err).Msg("failed to make db migration") + + return + } + + str := storage.NewDbStorage(dbPool) + dockerManager, err := xdocker.NewDockerManager(cfg.Docker.Host, cfg.Docker.Image) + if err != nil { + log.Error().Err(err).Msg("failed to create docker manager") + + return + } + + ctx, cancel := context.WithCancel(context.WithValue(context.Background(), tracer.CtxCidKey{}, "")) + go func() { + log.Info().Msgf("preload docker image: %s", cfg.Docker.Image) + dockerManager.PreloadImage(ctx) + }() + defer cancel() + + logWatcher := watcher.NewLogWatcher(str, dockerManager, cfg) + logWatcher.Run() + defer logWatcher.Stop() + + logAggregator := watcher.NewLogCollector(str, dockerManager) + defer logAggregator.Stop() + + clusterWatcher := watcher.NewServerWatcher(str, patroni.NewClient(log.Logger), cfg) + clusterWatcher.Run() + defer clusterWatcher.Stop() + + s, err := service.NewService(cfg, Version, str, dockerManager, logAggregator, clusterWatcher) + if err != nil { + log.Error().Err(err).Msg("failed to create service") + + return + } + + err = s.Serve() + if err != nil { + log.Error().Err(err).Msg("service was finished with error") + } else { + log.Info().Msg("service was successfully stopped") + } +} diff --git a/console/service/middleware/authorization.go b/console/service/middleware/authorization.go new file mode 100644 index 000000000..cea51bc5a --- /dev/null +++ b/console/service/middleware/authorization.go @@ -0,0 +1,34 @@ +package middleware + +import ( + "encoding/json" + "fmt" + "net/http" + "postgresql-cluster-console/models" + "strings" +) + +func Authorization(token string, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + const ( + headerName = "Authorization" + schemeName = "Bearer" + ) + tokenVal := r.Header.Get(headerName) + tokenValSplit := strings.Split(tokenVal, " ") + if len(tokenValSplit) != 2 || tokenValSplit[0] != schemeName || tokenValSplit[1] != token { + w.Header().Add("content-type", "application/json") + w.WriteHeader(http.StatusUnauthorized) + resp, _ := json.Marshal(&models.ResponseError{ + Code: http.StatusUnauthorized, + Description: fmt.Sprintf("token [%s] invalid", tokenVal), + Title: "Invalid token", + }) + _, _ = w.Write(resp) + + return + } + + next.ServeHTTP(w, r) + }) +} diff --git a/console/service/middleware/cid.go b/console/service/middleware/cid.go new file mode 100644 index 000000000..7ca07de06 --- /dev/null +++ b/console/service/middleware/cid.go @@ -0,0 +1,30 @@ +package middleware + +import ( + "context" + "net/http" + "postgresql-cluster-console/pkg/tracer" + + "github.com/google/uuid" +) + +func SetCorrelationId(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cid := getCid(r) + if r.Header.Get(XCorrID) == "" { + r.Header.Set(XCorrID, cid) + } + + ctx := context.WithValue(r.Context(), tracer.CtxCidKey{}, cid) + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +func getCid(r *http.Request) string { + cid := r.Header.Get(XCorrID) + if cid != "" { + return cid + } + + return uuid.New().String() +} diff --git a/console/service/middleware/cors.go b/console/service/middleware/cors.go new file mode 100644 index 000000000..82d415ade --- /dev/null +++ b/console/service/middleware/cors.go @@ -0,0 +1,19 @@ +package middleware + +import "net/http" + +func CORS(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + + w.Header().Set("Access-Control-Allow-Credentials", "true") + w.Header().Set("Access-Control-Allow-Methods", " GET, POST, OPTIONS, PATCH, DELETE, PUT") + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Expose-Headers", "X-Log-Completed, X-Cluster-Id") + w.Header().Set("Access-Control-Allow-Headers", "Authorization, Access-Control-Allow-Origin, Access-Control-Allow-Headers, Origin,Accept, "+ + "X-Requested-With, Content-Type, Access-Control-Request-Method, Access-Control-Request-Headers, X-Log-Completed, X-Cluster-Id") + + if r.Method != http.MethodOptions { + next.ServeHTTP(w, r) + } + }) +} diff --git a/console/service/middleware/request_log.go b/console/service/middleware/request_log.go new file mode 100644 index 000000000..2d0d52eef --- /dev/null +++ b/console/service/middleware/request_log.go @@ -0,0 +1,166 @@ +package middleware + +import ( + "bytes" + "encoding/json" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "io" + "net/http" + "time" +) + +const XCorrID = "X-Correlation-Id" + +const responseWriterBodyLimit = 10000 + +type responseWriter struct { + http.ResponseWriter + body []byte + statusCode int + bodyOverflow bool +} + +func GetResponseWriterCode(w http.ResponseWriter) int { + if wNew, ok := w.(*responseWriter); ok { + return wNew.statusCode + } + return 0 +} + +func (r *responseWriter) Write(b []byte) (int, error) { + if len(r.body) < responseWriterBodyLimit { + maxWriteLen := responseWriterBodyLimit - len(r.body) + if len(b) > maxWriteLen { + r.body = append(r.body, b[:maxWriteLen]...) + r.bodyOverflow = true + } else { + r.body = append(r.body, b...) + } + } + + return r.ResponseWriter.Write(b) +} + +func (r *responseWriter) WriteHeader(statusCode int) { + r.statusCode = statusCode + r.ResponseWriter.WriteHeader(statusCode) +} + +func (r *responseWriter) zerologResponse(log zerolog.Logger) { + body := r.prepareBodyForLog() + + log.Debug(). + Any("body", body). + Any("headers", r.Header()). + Int("status", r.getStatusCode()).Msg("[zerologResponse] Response was sent") +} + +func (r *responseWriter) getStatusCode() int { + if r.statusCode == 0 { + return 200 + } + + return r.statusCode +} + +func (r *responseWriter) prepareBodyForLog() any { + var body map[string]interface{} + if r.bodyOverflow { + return r.body + } + _ = json.Unmarshal(r.body, &body) + replaceFields(body, secretFields) + + return body +} + +var secretFields = map[string]string{ + "AWS_SECRET_ACCESS_KEY": "***", + "GCP_SERVICE_ACCOUNT_CONTENTS": "***", + "HCLOUD_API_TOKEN": "***", + "SSH_PRIVATE_KEY": "***", + "DO_API_TOKEN": "***", + "PASSWORD": "***", + "password": "***", + "AZURE_SECRET": "***", +} + +func RequestZeroLog(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cid := r.Header.Get(XCorrID) + clog := log.With(). + Str("cid", cid). + Str("method", r.Method). + Str("path", r.URL.String()). + Str("protocol", r.Proto). + Int64("request_length", r.ContentLength). + Logger() + + var ( + body []byte + bodyInt map[string]interface{} + bodyCopy io.ReadCloser + err error + ) + + if r.Body != nil && r.Body != http.NoBody { + bodyCopy, r.Body, err = drainBody(r.Body) + if err == nil { + body, err = io.ReadAll(bodyCopy) + if err != nil { + clog.Error().Err(err).Msg("[RequestZeroLog] read body error") + } + } else { + clog.Error().Err(err).Msg("[RequestZeroLog] drainBody failed") + } + } + + err = json.Unmarshal(body, &bodyInt) + if err != nil { + clog.Debug(). + Any("headers", r.Header). + Any("query", r.URL.Query()). + Bytes("body", body). + Msg("[RequestLog] request accepted") + } else { + replaceFields(bodyInt, secretFields) + clog.Debug(). + Any("headers", r.Header). + Any("query", r.URL.Query()). + Any("body", bodyInt). + Msg("[RequestLog] request accepted") + } + + w.Header().Set(XCorrID, cid) + + start := time.Now() + wExt := &responseWriter{ResponseWriter: w} + next.ServeHTTP(wExt, r) + duration := time.Since(start) + + wExt.zerologResponse(clog) + + clog.Debug(). + Int("status", wExt.getStatusCode()). + Dur("handle_time", duration). // request_time + Int("response_length", len(wExt.body)). + Msg("[RequestLog] request completed") + }) +} + +// drainBody reads all of b to memory and then returns two equivalent +// ReadClosers yielding the same bytes. +// +// It returns an error if the initial slurp of all bytes fails. It does not attempt +// to make the returned ReadClosers have identical error-matching behavior. +func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) { + var buf bytes.Buffer + if _, err = buf.ReadFrom(b); err != nil { + return nil, b, err + } + if err = b.Close(); err != nil { + return nil, b, err + } + return io.NopCloser(&buf), io.NopCloser(bytes.NewReader(buf.Bytes())), nil +} diff --git a/console/service/middleware/utils.go b/console/service/middleware/utils.go new file mode 100644 index 000000000..02170315e --- /dev/null +++ b/console/service/middleware/utils.go @@ -0,0 +1,33 @@ +package middleware + +import "reflect" + +func replaceFields(data map[string]interface{}, replacements map[string]string) { + for key, val := range data { + if replacement, ok := replacements[key]; ok { + data[key] = replacement + } else { + valReflect := reflect.ValueOf(val) + switch valReflect.Kind() { + case reflect.Map: + if innerMap, ok := val.(map[string]interface{}); ok { + replaceFields(innerMap, replacements) + } + case reflect.Slice: + for j := 0; j < valReflect.Len(); j++ { + elemVal := valReflect.Index(j) + if innerElemMap, ok := elemVal.Interface().(map[string]interface{}); ok { + replaceFields(innerElemMap, replacements) + } + } + case reflect.Ptr: + if !valReflect.IsNil() { + elem := valReflect.Elem().Interface() + if innerMap, ok := elem.(map[string]interface{}); ok { + replaceFields(innerMap, replacements) + } + } + } + } + } +} diff --git a/console/service/migrations/goose_logger.go b/console/service/migrations/goose_logger.go new file mode 100644 index 000000000..fac91c5c6 --- /dev/null +++ b/console/service/migrations/goose_logger.go @@ -0,0 +1,22 @@ +package migrations + +import ( + "github.com/pressly/goose/v3" + "github.com/rs/zerolog" +) + +type zeroLogAdapter struct { + log zerolog.Logger +} + +func NewZeroLogAdapter(log zerolog.Logger) goose.Logger { + return zeroLogAdapter{log: log.With().Str("module", "goouse").Logger()} +} + +func (l zeroLogAdapter) Fatalf(format string, v ...interface{}) { + l.log.Error().Msgf(format, v...) +} + +func (l zeroLogAdapter) Printf(format string, v ...interface{}) { + l.log.Info().Msgf(format, v...) +} diff --git a/console/service/migrations/migrate.go b/console/service/migrations/migrate.go new file mode 100644 index 000000000..7dc85064c --- /dev/null +++ b/console/service/migrations/migrate.go @@ -0,0 +1,18 @@ +package migrations + +import ( + "context" + + "github.com/rs/zerolog/log" + + "github.com/jackc/pgx/v5/pgxpool" + "github.com/jackc/pgx/v5/stdlib" + "github.com/pressly/goose/v3" +) + +func Migrate(dbPool *pgxpool.Pool, migrationDir string) error { + db := stdlib.OpenDBFromPool(dbPool) + goose.SetLogger(NewZeroLogAdapter(log.Logger)) + + return goose.RunContext(context.Background(), "up", db, migrationDir) +} diff --git a/console/service/pkg/patroni/client.go b/console/service/pkg/patroni/client.go new file mode 100644 index 000000000..95c92d835 --- /dev/null +++ b/console/service/pkg/patroni/client.go @@ -0,0 +1,103 @@ +package patroni + +import ( + "context" + "encoding/json" + "io" + "net/http" + "postgresql-cluster-console/pkg/tracer" + "time" + + "github.com/rs/zerolog" +) + +type IClient interface { + GetMonitoringInfo(ctx context.Context, host string) (*MonitoringInfo, error) + GetClusterInfo(ctx context.Context, host string) (*ClusterInfo, error) +} + +type pClient struct { + log zerolog.Logger + httpClient *http.Client +} + +func NewClient(log zerolog.Logger) IClient { + return pClient{ + log: log, + httpClient: &http.Client{ + Timeout: time.Second, + }, + } +} + +func (c pClient) GetMonitoringInfo(ctx context.Context, host string) (*MonitoringInfo, error) { + cid := ctx.Value(tracer.CtxCidKey{}).(string) + localLog := c.log.With().Str("cid", cid).Logger() + url := "http://" + host + ":8008/patroni" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + localLog.Trace().Str("request", "GET "+url).Msg("call request") + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer func() { + derr := resp.Body.Close() + if derr != nil { + localLog.Error().Err(derr).Msg("failed to close body") + } + }() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + localLog.Trace().Str("response", string(body)).Msg("got response") + + var monitoringInfo MonitoringInfo + err = json.Unmarshal(body, &monitoringInfo) + if err != nil { + return nil, err + } + + return &monitoringInfo, nil +} + +func (c pClient) GetClusterInfo(ctx context.Context, host string) (*ClusterInfo, error) { + cid := ctx.Value(tracer.CtxCidKey{}).(string) + localLog := c.log.With().Str("cid", cid).Logger() + url := "http://" + host + ":8008/cluster" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + localLog.Trace().Str("request", "GET "+url).Msg("call request") + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer func() { + derr := resp.Body.Close() + if derr != nil { + localLog.Error().Err(derr).Msg("failed to close body") + } + }() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + localLog.Trace().Str("response", string(body)).Msg("got response") + + var clusterInfo ClusterInfo + err = json.Unmarshal(body, &clusterInfo) + if err != nil { + return nil, err + } + + return &clusterInfo, nil +} diff --git a/console/service/pkg/patroni/models.go b/console/service/pkg/patroni/models.go new file mode 100644 index 000000000..e06fb8057 --- /dev/null +++ b/console/service/pkg/patroni/models.go @@ -0,0 +1,20 @@ +package patroni + +type MonitoringInfo struct { + State string `json:"state"` + Role string `json:"role"` + ServerVersion int `json:"server_version"` +} + +type ClusterInfo struct { + Members []struct { + Name string `json:"name"` + Role string `json:"role"` + State string `json:"state"` + Host string `json:"host"` + Timeline int64 `json:"timeline"` + Lag interface{} `json:"lag"` + Tags interface{} `json:"tags"` + PendingRestart bool `json:"pending_restart"` + } `json:"members"` +} diff --git a/console/service/pkg/tracer/cid.go b/console/service/pkg/tracer/cid.go new file mode 100644 index 000000000..d364d0c3a --- /dev/null +++ b/console/service/pkg/tracer/cid.go @@ -0,0 +1,3 @@ +package tracer + +type CtxCidKey struct{} diff --git a/console/service/restapi/configure_pg_console.go b/console/service/restapi/configure_pg_console.go new file mode 100644 index 000000000..0a2eb5af4 --- /dev/null +++ b/console/service/restapi/configure_pg_console.go @@ -0,0 +1,131 @@ +// This file is safe to edit. Once it exists it will not be overwritten + +package restapi + +import ( + "crypto/tls" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + + localmid "postgresql-cluster-console/middleware" + "postgresql-cluster-console/restapi/operations" + "postgresql-cluster-console/restapi/operations/cluster" + "postgresql-cluster-console/restapi/operations/dictionary" + "postgresql-cluster-console/restapi/operations/system" +) + +//go:generate swagger generate server --target ../../pg_console --name PgConsole --spec ../api/swagger.yaml --principal interface{} --exclude-main + +func configureFlags(api *operations.PgConsoleAPI) { + // api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... } +} + +func configureAPI(api *operations.PgConsoleAPI) http.Handler { + // configure the api here + api.ServeError = errors.ServeError + + // Set your custom logger if needed. Default one is log.Printf + // Expected interface func(string, ...interface{}) + // + // Example: + // api.Logger = log.Printf + + api.UseSwaggerUI() + // To continue using redoc as your UI, uncomment the following line + // api.UseRedoc() + + api.JSONConsumer = runtime.JSONConsumer() + + api.JSONProducer = runtime.JSONProducer() + + if api.ClusterGetClustersHandler == nil { + api.ClusterGetClustersHandler = cluster.GetClustersHandlerFunc(func(params cluster.GetClustersParams) middleware.Responder { + return middleware.NotImplemented("operation cluster.GetClusters has not yet been implemented") + }) + } + if api.ClusterGetClustersIDHandler == nil { + api.ClusterGetClustersIDHandler = cluster.GetClustersIDHandlerFunc(func(params cluster.GetClustersIDParams) middleware.Responder { + return middleware.NotImplemented("operation cluster.GetClustersID has not yet been implemented") + }) + } + if api.DictionaryGetDatabaseExtensionsHandler == nil { + api.DictionaryGetDatabaseExtensionsHandler = dictionary.GetDatabaseExtensionsHandlerFunc(func(params dictionary.GetDatabaseExtensionsParams) middleware.Responder { + return middleware.NotImplemented("operation dictionary.GetDatabaseExtensions has not yet been implemented") + }) + } + if api.DictionaryGetExternalDeploymentsHandler == nil { + api.DictionaryGetExternalDeploymentsHandler = dictionary.GetExternalDeploymentsHandlerFunc(func(params dictionary.GetExternalDeploymentsParams) middleware.Responder { + return middleware.NotImplemented("operation dictionary.GetExternalDeployments has not yet been implemented") + }) + } + if api.SystemGetVersionHandler == nil { + api.SystemGetVersionHandler = system.GetVersionHandlerFunc(func(params system.GetVersionParams) middleware.Responder { + return middleware.NotImplemented("operation system.GetVersion has not yet been implemented") + }) + } + if api.ClusterPostClustersHandler == nil { + api.ClusterPostClustersHandler = cluster.PostClustersHandlerFunc(func(params cluster.PostClustersParams) middleware.Responder { + return middleware.NotImplemented("operation cluster.PostClusters has not yet been implemented") + }) + } + if api.ClusterPostClustersIDReinitHandler == nil { + api.ClusterPostClustersIDReinitHandler = cluster.PostClustersIDReinitHandlerFunc(func(params cluster.PostClustersIDReinitParams) middleware.Responder { + return middleware.NotImplemented("operation cluster.PostClustersIDReinit has not yet been implemented") + }) + } + if api.ClusterPostClustersIDReloadHandler == nil { + api.ClusterPostClustersIDReloadHandler = cluster.PostClustersIDReloadHandlerFunc(func(params cluster.PostClustersIDReloadParams) middleware.Responder { + return middleware.NotImplemented("operation cluster.PostClustersIDReload has not yet been implemented") + }) + } + if api.ClusterPostClustersIDRestartHandler == nil { + api.ClusterPostClustersIDRestartHandler = cluster.PostClustersIDRestartHandlerFunc(func(params cluster.PostClustersIDRestartParams) middleware.Responder { + return middleware.NotImplemented("operation cluster.PostClustersIDRestart has not yet been implemented") + }) + } + if api.ClusterPostClustersIDStartHandler == nil { + api.ClusterPostClustersIDStartHandler = cluster.PostClustersIDStartHandlerFunc(func(params cluster.PostClustersIDStartParams) middleware.Responder { + return middleware.NotImplemented("operation cluster.PostClustersIDStart has not yet been implemented") + }) + } + if api.ClusterPostClustersIDStopHandler == nil { + api.ClusterPostClustersIDStopHandler = cluster.PostClustersIDStopHandlerFunc(func(params cluster.PostClustersIDStopParams) middleware.Responder { + return middleware.NotImplemented("operation cluster.PostClustersIDStop has not yet been implemented") + }) + } + + api.PreServerShutdown = func() {} + + api.ServerShutdown = func() {} + + return setupGlobalMiddleware(api.Serve(setupMiddlewares)) +} + +// The TLS configuration before HTTPS server starts. +func configureTLS(tlsConfig *tls.Config) { + // Make all necessary changes to the TLS configuration here. +} + +// As soon as server is initialized but not run yet, this function will be called. +// If you need to modify a config, store server instance to stop it individually later, this is the place. +// This function can be called multiple times, depending on the number of serving schemes. +// scheme value will be set accordingly: "http", "https" or "unix". +func configureServer(s *http.Server, scheme, addr string) { +} + +// The middleware configuration is for the handler executors. These do not apply to the swagger.json document. +// The middleware executes after routing but before authentication, binding and validation. +func setupMiddlewares(handler http.Handler) http.Handler { + return handler +} + +var Token string + +// The middleware configuration happens before anything, this middleware also applies to serving the swagger.json document. +// So this is a good place to plug in a panic handling middleware, logging and metrics. +func setupGlobalMiddleware(handler http.Handler) http.Handler { + return localmid.SetCorrelationId(localmid.CORS(localmid.RequestZeroLog(localmid.Authorization(Token, handler)))) +} diff --git a/console/supervisord.conf b/console/supervisord.conf new file mode 100644 index 000000000..5dfdce2bf --- /dev/null +++ b/console/supervisord.conf @@ -0,0 +1,41 @@ +[supervisord] +nodaemon=true +user=root +pidfile=/var/run/supervisord.pid +logfile=/var/log/supervisor/supervisord.log +childlogdir=/var/log/supervisor + +[unix_http_server] +file=/var/run/supervisor.sock + +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface + +[supervisorctl] +serverurl=unix:///var/run/supervisor.sock + +[program:pg-console-db] +command=/pg_start.sh +startsecs=5 +priority=100 +autostart=true +stdout_logfile=/var/log/supervisor/pg-console-db-stdout.log +stderr_logfile=/var/log/supervisor/pg-console-db-stderr.log + +[program:pg-console-api] +command=/usr/local/bin/pg-console +startsecs=5 +startretries=3 +priority=200 +autostart=true +stdout_logfile=/var/log/supervisor/pg-console-api-stdout.log +stderr_logfile=/var/log/supervisor/pg-console-api-stderr.log + +[program:pg-console-ui] +command=/bin/bash -c "/usr/share/nginx/html/env.sh && /usr/sbin/nginx -g 'daemon off;'" +startsecs=5 +startretries=3 +priority=300 +autostart=true +stdout_logfile=/var/log/supervisor/pg-console-ui-stdout.log +stderr_logfile=/var/log/supervisor/pg-console-ui-stderr.log diff --git a/console/ui/.env b/console/ui/.env new file mode 100644 index 000000000..f89b2563f --- /dev/null +++ b/console/ui/.env @@ -0,0 +1,6 @@ +VITE_API_URL=http://localhost:8080/api/v1/ +VITE_AUTH_TOKEN=auth_token +VITE_CLUSTERS_POLLING_INTERVAL=60000 +VITE_CLUSTER_OVERVIEW_POLLING_INTERVAL=60000 +VITE_OPERATIONS_POLLING_INTERVAL=60000 +VITE_OPERATION_LOGS_POLLING_INTERVAL=10000 \ No newline at end of file diff --git a/console/ui/.env.production b/console/ui/.env.production new file mode 100644 index 000000000..78376365b --- /dev/null +++ b/console/ui/.env.production @@ -0,0 +1,6 @@ +VITE_API_URL=REPLACE_ME_WITH_API_URL +VITE_AUTH_TOKEN=REPLACE_ME_WITH_AUTH_TOKEN +VITE_CLUSTERS_POLLING_INTERVAL=REPLACE_ME_WITH_CLUSTERS_POLLING_INTERVAL +VITE_CLUSTER_OVERVIEW_POLLING_INTERVAL=REPLACE_ME_WITH_CLUSTER_OVERVIEW_POLLING_INTERVAL +VITE_OPERATIONS_POLLING_INTERVAL=REPLACE_ME_WITH_OPERATIONS_POLLING_INTERVAL +VITE_OPERATION_LOGS_POLLING_INTERVAL=REPLACE_ME_WITH_OPERATION_LOGS_POLLING_INTERVAL \ No newline at end of file diff --git a/console/ui/.eslintrc.cjs b/console/ui/.eslintrc.cjs new file mode 100644 index 000000000..ca2a2f7c8 --- /dev/null +++ b/console/ui/.eslintrc.cjs @@ -0,0 +1,28 @@ +module.exports = { + root: true, + env: {browser: true, es2020: true}, + extends: [ + 'eslint:recommended', + 'plugin:@typescript-eslint/recommended-type-checked', + 'plugin:react-hooks/recommended', + 'plugin:@typescript-eslint/stylistic-type-checked', + 'plugin:react/recommended', + 'plugin:react/jsx-runtime' + ], + ignorePatterns: ['dist', '.eslintrc.cjs'], + parser: '@typescript-eslint/parser', + parserOptions: { + ecmaVersion: 'latest', + sourceType: 'module', + project: ['./tsconfig.json', './tsconfig.node.json'], + tsconfigRootDir: __dirname, + }, + plugins: ['react-refresh'], + rules: { + 'react-refresh/only-export-components': [ + 'warn', + {allowConstantExport: true}, + ], + '@typescript-eslint/no-misused-promises': 'off' + }, +} diff --git a/console/ui/.gitignore b/console/ui/.gitignore new file mode 100644 index 000000000..a547bf36d --- /dev/null +++ b/console/ui/.gitignore @@ -0,0 +1,24 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/console/ui/.prettierignore b/console/ui/.prettierignore new file mode 100644 index 000000000..c2ed210f5 --- /dev/null +++ b/console/ui/.prettierignore @@ -0,0 +1,6 @@ +build/ +dist/ +node_modules/ +public/ +package.json +yarn.lock diff --git a/console/ui/.prettierrc b/console/ui/.prettierrc new file mode 100644 index 000000000..e47db5689 --- /dev/null +++ b/console/ui/.prettierrc @@ -0,0 +1,12 @@ +{ + "printWidth": 120, + "tabWidth": 2, + "useTabs": false, + "semi": true, + "singleQuote": true, + "trailingComma": "all", + "bracketSameLine": true, + "arrowParens": "always", + "endOfLine": "auto", + "bracketSpacing": true +} diff --git a/console/ui/Dockerfile b/console/ui/Dockerfile new file mode 100644 index 000000000..9f9d98fb4 --- /dev/null +++ b/console/ui/Dockerfile @@ -0,0 +1,20 @@ +FROM node:20-bookworm AS builder + +WORKDIR /usr/src/pg-console + +COPY console/ui/ . + +RUN yarn install --frozen-lockfile --network-timeout 1000000 && yarn vite build + +FROM nginx:1.26-bookworm AS runtime +LABEL maintainer="Vitaliy Kukharik vitabaks@gmail.com" + +WORKDIR /usr/share/nginx/html + +COPY --from=builder /usr/src/pg-console/dist ./ +COPY console/ui/nginx/nginx.conf /etc/nginx/ +COPY console/ui/env.sh console/ui/.env console/ui/.env.production ./ + +RUN chmod +x ./env.sh + +CMD ["/bin/bash", "-c", "/usr/share/nginx/html/env.sh && nginx -g \"daemon off;\""] diff --git a/console/ui/README.md b/console/ui/README.md new file mode 100644 index 000000000..fc6076354 --- /dev/null +++ b/console/ui/README.md @@ -0,0 +1,120 @@ +# Autobase Console UI + +The UI part of autobase console. This project provides a user-friendly web interface for managing, monitoring, and configuring Postgres clusters. + +## Features + +- **Cluster management**: Create Postgres clusters for multiple cloud providers or your own machines. +- **Cluster overview**: View general information and status of Postgres cluster. +- **Operations**: View cluster operations and deployment logs. +- **Projects**: Create multiple projects with different clusters. +- **Environments**: Create multiple environments for clusters. +- **Settings**: Use proxy servers to deploy clusters (optional). +- **Secrets**: Easily manage multiple credentials, including cloud secrets, SSH keys, and passwords. + +## Installation + +To run this project locally, follow these steps: + +1. **Clone repository** + +``` +git clone https://github.com/vitabaks/autobase.git +cd autobase/console/ui +``` + +2. **Install dependencies** + +```yarn install``` + +3. **Start development server** + +```yarn run dev``` + +## Usage + +### Running the App in Development Mode + +1. Ensure you have installed all dependencies with ```yarn install```. +2. Start the development server with ```yarn run dev```. +3. Browser with app should open automatically. If this didn't happen open your browser and navigate + to http://localhost:5173. + +### Building for Production + +To create a production build: + +```yarn run build``` + +The optimized build will be output to the `dist` folder. You can then serve this with any static server. + +## Technology Stack + +**UI:** + +- React +- Redux Toolkit (RTK Query for data fetching) +- React Router v6 +- Vite (development and build tool) +- Material UI (UI kit) +- Material React Table V2 +- React-toastify + +**Deployment:** + +- Docker (included Dockerfile for quick deployment) +- Nginx (project configuration included) + +## Configuration + +There are several env variables that configure UI: + +| KEY | DEFAULT | DESCRIPTION | +|----------------------------------------------|------------------------------|-------------------------------------------------------------| +| PG_CONSOLE_API_URL | http://localhost:8080/api/v1 | Default API URL where frontend will be sending requests to. | +| PG_CONSOLE_AUTHORIZATION_TOKEN | auth_token | Reference auth token that will be used for login. | +| PG_CONSOLE_CLUSTERS_POLLING_INTERVAL | 60000 | Clusters table refresh interval in milliseconds. | +| PG_CONSOLE_CLUSTER_OVERVIEW_POLLING_INTERVAL | 60000 | Cluster overview refresh interval in milliseconds. | +| PG_CONSOLE_OPERATIONS_POLLING_INTERVAL | 60000 | Operations table refresh interval in milliseconds. | +| PG_CONSOLE_OPERATION_LOGS_POLLING_INTERVAL | 10000 | Operation logs refresh interval in milliseconds. | + +## Architecture + +UI uses [Feature-Sliced Design](https://feature-sliced.design/) v2 approach to implement architecture. +This design pattern divides the application into distinct layers and slices, each with a specific role and +responsibility, to promote isolation, reusability, and easy maintenance. + +### Feature-Sliced Design Overview + +#### Layers + +1. **App Layer** + + - Description: This is the top-level layer, responsible for initializing the application, setting up providers (like + routers, states, etc.), and global styles. + - Contents: + - App: Main application component that integrates all providers and initializes the app. + - providers: Context providers such as Redux Provider, Router, Theme, etc. + - styles: Global styles and theming. + +2. **Pages Layer** + + - Description: Represents the application screens or pages. Each page can consist of multiple features and/or entities. + - Contents: Page components like AddCluster, Login, 404, etc. + +3. **Features Layer** + + - Description: This layer contains interactive components such as buttons, modals, etc. + - Contents: Feature components like AddSecret, LogoutButton, OperationsTableRowActions, etc. + +4. **Entities Layer** + + - Description: Contains core business entities of the application. Additionally, reusable form parts are also made + entities. + - Contents: Entities like SidebarItem, SecretFormBlock, etc. + +5. **Shared Layer** + + - Description: This is the foundational layer. It includes utilities, shared components, constants, and other reusable + elements that can be used across features, entities, or pages. + - Contents: Common components (CopyIcon, DefaultTable, Spinner), constants and types, utility functions. diff --git a/console/ui/env.sh b/console/ui/env.sh new file mode 100644 index 000000000..494f457d3 --- /dev/null +++ b/console/ui/env.sh @@ -0,0 +1,21 @@ +#!/bin/sh + +SEARCH_DIR="/usr/share/nginx/html/" + +# Set default values for environment variables if they are not set +export VITE_API_URL=${VITE_API_URL:-${PG_CONSOLE_API_URL:-"http://localhost:8080/api/v1"}} +export VITE_AUTH_TOKEN=${VITE_AUTH_TOKEN:-${PG_CONSOLE_AUTHORIZATION_TOKEN:-"auth_token"}} +export VITE_CLUSTERS_POLLING_INTERVAL=${PG_CONSOLE_CLUSTERS_POLLING_INTERVAL:-"60000"} +export VITE_CLUSTER_OVERVIEW_POLLING_INTERVAL=${PG_CONSOLE_CLUSTER_OVERVIEW_POLLING_INTERVAL:-"60000"} +export VITE_OPERATIONS_POLLING_INTERVAL=${PG_CONSOLE_OPERATIONS_POLLING_INTERVAL:-"60000"} +export VITE_OPERATION_LOGS_POLLING_INTERVAL=${PG_CONSOLE_OPERATION_LOGS_POLLING_INTERVAL:-"10000"} + +# Find all .js files in the specified directory and replace placeholders with the environment variable values +find "${SEARCH_DIR}" -type f -name '*.js' -exec sed -i -e " + s|REPLACE_ME_WITH_API_URL|${VITE_API_URL}|g; + s|REPLACE_ME_WITH_AUTH_TOKEN|${VITE_AUTH_TOKEN}|g; + s|REPLACE_ME_WITH_CLUSTERS_POLLING_INTERVAL|${VITE_CLUSTERS_POLLING_INTERVAL}|g; + s|REPLACE_ME_WITH_CLUSTER_OVERVIEW_POLLING_INTERVAL|${VITE_CLUSTER_OVERVIEW_POLLING_INTERVAL}|g; + s|REPLACE_ME_WITH_OPERATIONS_POLLING_INTERVAL|${VITE_OPERATIONS_POLLING_INTERVAL}|g; + s|REPLACE_ME_WITH_OPERATION_LOGS_POLLING_INTERVAL|${VITE_OPERATION_LOGS_POLLING_INTERVAL}|g; +" {} \; diff --git a/console/ui/index.html b/console/ui/index.html new file mode 100644 index 000000000..4976b2a65 --- /dev/null +++ b/console/ui/index.html @@ -0,0 +1,13 @@ + + + + + + + Autobase for PostgreSQL® + + +
+ + + diff --git a/console/ui/nginx/nginx.conf b/console/ui/nginx/nginx.conf new file mode 100644 index 000000000..7f2a9fb30 --- /dev/null +++ b/console/ui/nginx/nginx.conf @@ -0,0 +1,48 @@ +worker_processes auto; + +events { + worker_connections 8000; + multi_accept on; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + server { + add_header 'Access-Control-Allow-Origin' '*' always; + add_header 'Access-Control-Allow-Credentials' 'true' always; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, PATCH, DELETE, PUT' always; + add_header 'Access-Control-Allow-Headers' 'Authorization, Access-Control-Allow-Origin, Access-Control-Allow-Headers, Origin,Accept, X-Requested-With, Content-Type, Access-Control-Request-Method, Access-Control-Request-Headers,X-Auth-Token' always; + + listen 80; + access_log /var/log/nginx/access.log; + + root /usr/share/nginx/html; + index index.html index.htm; + + location / { + try_files $uri $uri/ /index.html; + sendfile off; + add_header Last-Modified $date_gmt; + add_header Cache-Control 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0'; + if_modified_since off; + expires off; + etag off; + proxy_no_cache 1; + proxy_cache_bypass 1; + } + + location ~* \.(?:css|js)$ { + try_files $uri =404; + expires 1y; + access_log off; + add_header Cache-Control "public"; + } + + location ~ ^.+\..+$ { + try_files $uri =404; + } + } +} + diff --git a/console/ui/package.json b/console/ui/package.json new file mode 100644 index 000000000..1b4a82a06 --- /dev/null +++ b/console/ui/package.json @@ -0,0 +1,77 @@ +{ + "name": "postgresql-cluster-console-ui", + "private": true, + "version": "2.2.0", + "type": "module", + "scripts": { + "dev": "vite --open", + "build": "vite build", + "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0", + "preview": "tsc && vite build && vite preview", + "serve": "vite build && serve ./dist", + "apiGen": "npx @rtk-query/codegen-openapi src/shared/api/apiConfig.ts", + "vitest": "vitest run", + "vitest:watch": "vitest" + }, + "dependencies": { + "@emotion/react": "^11.14.0", + "@emotion/styled": "^11.14.0", + "@fontsource/roboto": "^5.2.5", + "@hookform/resolvers": "^3.10.0", + "@monaco-editor/react": "^4.7.0", + "@mui/icons-material": "^5.17.1", + "@mui/lab": "^5.0.0-alpha.176", + "@mui/material": "^5.17.1", + "@mui/x-data-grid": "^7.28.1", + "@mui/x-date-pickers": "^7.28.0", + "@reduxjs/toolkit": "^2.6.1", + "date-fns": "^3.6.0", + "i18next": "^23.16.8", + "i18next-browser-languagedetector": "^7.2.2", + "i18next-fs-backend": "^2.6.0", + "i18next-http-backend": "^2.7.3", + "ip-regex": "^5.0.0", + "material-react-table": "^2.13.3", + "normalize.css": "^8.0.1", + "react": "^18.3.1", + "react-dom": "^18.3.1", + "react-error-boundary": "^4.1.2", + "react-hook-form": "^7.54.2", + "react-i18next": "^14.1.3", + "react-lazylog": "^4.5.3", + "react-redux": "^9.2.0", + "react-router-dom": "^6.30.0", + "react-toastify": "^10.0.6", + "yup": "^1.6.1" + }, + "devDependencies": { + "@faker-js/faker": "^8.4.1", + "@rtk-query/codegen-openapi": "^1.2.0", + "@testing-library/dom": "^10.4.0", + "@testing-library/jest-dom": "^6.6.3", + "@testing-library/react": "^15.0.7", + "@testing-library/user-event": "^14.6.1", + "@types/node": "^20.17.25", + "@types/react": "^18.3.19", + "@types/react-dom": "^18.3.5", + "@types/react-lazylog": "^4.5.4", + "@typescript-eslint/eslint-plugin": "^7.18.0", + "@typescript-eslint/parser": "^7.18.0", + "@vitejs/plugin-react": "^4.3.4", + "@vitejs/plugin-react-swc": "^3.8.1", + "autoprefixer": "^10.4.21", + "esbuild-plugin-react-virtualized": "^1.0.4", + "esbuild-runner": "^2.2.2", + "eslint": "^8.57.1", + "eslint-plugin-react": "^7.37.4", + "eslint-plugin-react-hooks": "^4.6.2", + "eslint-plugin-react-refresh": "^0.4.19", + "jsdom": "^24.1.3", + "prettier": "^3.5.3", + "sass": "^1.86.0", + "typescript": "^5.8.2", + "vite": "^5.4.14", + "vite-plugin-svgr": "^4.3.0", + "vitest": "^1.6.1" + } +} diff --git a/console/ui/src/app/App.tsx b/console/ui/src/app/App.tsx new file mode 100644 index 000000000..3683722fb --- /dev/null +++ b/console/ui/src/app/App.tsx @@ -0,0 +1,24 @@ +import { FC } from 'react'; +import { ThemeProvider } from '@mui/material'; +import theme from '@shared/theme/theme.ts'; +import Router from '@app/router/Router.tsx'; +import { Provider } from 'react-redux'; +import { ToastContainer } from 'react-toastify'; +import { LocalizationProvider } from '@mui/x-date-pickers'; +import { AdapterDateFns } from '@mui/x-date-pickers/AdapterDateFnsV3'; +import { store } from '@app/redux/store/store.ts'; + +const App: FC = () => { + return ( + + + + + + + + + ); +}; + +export default App; diff --git a/console/ui/src/app/layout/index.ts b/console/ui/src/app/layout/index.ts new file mode 100644 index 000000000..7c9056883 --- /dev/null +++ b/console/ui/src/app/layout/index.ts @@ -0,0 +1,3 @@ +import Layout from './ui'; + +export default Layout; diff --git a/console/ui/src/app/layout/ui/index.tsx b/console/ui/src/app/layout/ui/index.tsx new file mode 100644 index 000000000..e3f98fe5f --- /dev/null +++ b/console/ui/src/app/layout/ui/index.tsx @@ -0,0 +1,16 @@ +import { FC } from 'react'; +import Sidebar from '@widgets/sidebar'; +import Header from '@widgets/header'; +import Main from '@widgets/main'; + +const Layout: FC = () => { + return ( +
+
+ +
+
+ ); +}; + +export default Layout; diff --git a/console/ui/src/app/main.tsx b/console/ui/src/app/main.tsx new file mode 100644 index 000000000..530783d4a --- /dev/null +++ b/console/ui/src/app/main.tsx @@ -0,0 +1,16 @@ +import React from 'react'; +import ReactDOM from 'react-dom/client'; +import App from './App.tsx'; +import 'normalize.css/normalize.css'; +import '@shared/i18n/i18n.ts'; +import '@fontsource/roboto/300.css'; +import '@fontsource/roboto/400.css'; +import '@fontsource/roboto/500.css'; +import '@fontsource/roboto/700.css'; +import 'react-toastify/dist/ReactToastify.min.css'; + +ReactDOM.createRoot(document.getElementById('root')!).render( + + + , +); diff --git a/console/ui/src/app/redux/slices/projectSlice/projectSelectors.ts b/console/ui/src/app/redux/slices/projectSlice/projectSelectors.ts new file mode 100644 index 000000000..0db80f572 --- /dev/null +++ b/console/ui/src/app/redux/slices/projectSlice/projectSelectors.ts @@ -0,0 +1,3 @@ +import { RootState } from '@app/redux/store/store.ts'; + +export const selectCurrentProject = (state: RootState) => state.project.currentProject; diff --git a/console/ui/src/app/redux/slices/projectSlice/projectSlice.ts b/console/ui/src/app/redux/slices/projectSlice/projectSlice.ts new file mode 100644 index 000000000..64ac1dfd9 --- /dev/null +++ b/console/ui/src/app/redux/slices/projectSlice/projectSlice.ts @@ -0,0 +1,24 @@ +import { createSlice, PayloadAction } from '@reduxjs/toolkit'; + +interface ProjectSliceState { + currentProject: string | null; +} + +const initialState: ProjectSliceState = { + currentProject: localStorage.getItem('currentProject') ?? '', +}; + +export const projectSlice = createSlice({ + name: 'project', + initialState, + reducers: { + setProject: (state: ProjectSliceState, action: PayloadAction) => { + state.currentProject = action.payload; + localStorage.setItem('currentProject', action.payload); + }, + }, +}); + +export const { setProject } = projectSlice.actions; + +export default projectSlice.reducer; diff --git a/console/ui/src/app/redux/store/hooks.ts b/console/ui/src/app/redux/store/hooks.ts new file mode 100644 index 000000000..5cdf32436 --- /dev/null +++ b/console/ui/src/app/redux/store/hooks.ts @@ -0,0 +1,6 @@ +import { useDispatch, useSelector } from 'react-redux'; +import type { AppDispatch, RootState } from './store'; + +// Use throughout your app instead of plain `useDispatch` and `useSelector` +export const useAppDispatch = useDispatch.withTypes(); +export const useAppSelector = useSelector.withTypes(); diff --git a/console/ui/src/app/redux/store/store.ts b/console/ui/src/app/redux/store/store.ts new file mode 100644 index 000000000..b80a2787b --- /dev/null +++ b/console/ui/src/app/redux/store/store.ts @@ -0,0 +1,71 @@ +import { Action, configureStore, isRejectedWithValue, Middleware, ThunkAction } from '@reduxjs/toolkit'; +import { operationsApi } from '@shared/api/api/operations'; +import { clustersApi } from '@shared/api/api/clusters.ts'; +import { environmentsApi } from '@shared/api/api/environments.ts'; +import { projectsApi } from '@shared/api/api/projects.ts'; +import { secretsApi } from '@shared/api/api/secrets.ts'; +import { settingsApi } from '@shared/api/api/settings.ts'; +import { otherApi } from '@shared/api/api/other.ts'; +import { projectSlice } from '@app/redux/slices/projectSlice/projectSlice.ts'; +import { baseApi } from '@shared/api/baseApi.ts'; +import { toast } from 'react-toastify'; +import { setupListeners } from '@reduxjs/toolkit/query'; + +export const rtkQueryErrorLogger: Middleware = () => (next) => (action) => { + if (isRejectedWithValue(action)) { + toast.error(action.payload?.data?.message || action.payload.data?.title); + console.error(action.payload?.data); + } + return next(action); +}; + +// `combineSlices` automatically combines the reducers using +// their `reducerPath`s, therefore we no longer need to call `combineReducers`. +const rootReducer = { + [baseApi.reducerPath]: baseApi.reducer, + [clustersApi.reducerPath]: clustersApi.reducer, + [environmentsApi.reducerPath]: environmentsApi.reducer, + [operationsApi.reducerPath]: operationsApi.reducer, + [projectsApi.reducerPath]: projectsApi.reducer, + [secretsApi.reducerPath]: secretsApi.reducer, + [settingsApi.reducerPath]: settingsApi.reducer, + [otherApi.reducerPath]: otherApi.reducer, + project: projectSlice.reducer, +}; + +// Infer the `RootState` type from the root reducer +export type RootState = ReturnType; + +export const makeStore = (preloadedState?: Partial) => { + const store = configureStore({ + reducer: rootReducer, + // Adding the api middleware enables caching, invalidation, polling, + // and other useful features of `rtk-query`. + middleware: (getDefaultMiddleware) => { + return getDefaultMiddleware().concat( + baseApi.middleware, + clustersApi.middleware, + environmentsApi.middleware, + operationsApi.middleware, + projectsApi.middleware, + secretsApi.middleware, + settingsApi.middleware, + otherApi.middleware, + rtkQueryErrorLogger, + ); + }, + preloadedState, + }); + // configure listeners using the provided defaults + // optional, but required for `refetchOnFocus`/`refetchOnReconnect` behaviors + setupListeners(store.dispatch); + return store; +}; + +export const store = makeStore(); + +// Infer the type of `store` +export type AppStore = typeof store; +// Infer the `AppDispatch` type from the store itself +export type AppDispatch = AppStore['dispatch']; +export type AppThunk = ThunkAction; diff --git a/console/ui/src/app/router/PrivateRouterWrapper.tsx b/console/ui/src/app/router/PrivateRouterWrapper.tsx new file mode 100644 index 000000000..fddeee181 --- /dev/null +++ b/console/ui/src/app/router/PrivateRouterWrapper.tsx @@ -0,0 +1,24 @@ +import { Navigate, Outlet, useLocation } from 'react-router-dom'; +import RouterPaths from '@app/router/routerPathsConfig'; +import { FC, useEffect } from 'react'; +import { toast } from 'react-toastify'; +import { useTranslation } from 'react-i18next'; +import { AUTH_TOKEN } from '@shared/config/constants.ts'; + +const PrivateRouteWrapper: FC = () => { + const { t } = useTranslation('toasts'); + const location = useLocation(); + + useEffect(() => { + const token = localStorage.getItem('token'); + if (token && token !== AUTH_TOKEN) toast.error(t('invalidToken')); + }, [localStorage.getItem('token')]); + + return localStorage.getItem('token') === AUTH_TOKEN ? ( + + ) : ( + + ); +}; + +export default PrivateRouteWrapper; diff --git a/console/ui/src/app/router/Router.tsx b/console/ui/src/app/router/Router.tsx new file mode 100644 index 000000000..4a78e298a --- /dev/null +++ b/console/ui/src/app/router/Router.tsx @@ -0,0 +1,49 @@ +import { FC, lazy, Suspense } from 'react'; +import { + createBrowserRouter, + createRoutesFromElements, + Navigate, + Outlet, + Route, + RouterProvider, +} from 'react-router-dom'; +import Layout from '../layout'; +import ClustersRoutes from '@app/router/routerConfig/ClustersRoutes.tsx'; +import OperationsRoutes from '@app/router/routerConfig/OperationsRoutes.tsx'; +import SettingsRoutes from '@app/router/routerConfig/SettingsRoutes.tsx'; +import RouterPaths from '@app/router/routerPathsConfig'; +import PrivateRouteWrapper from '@app/router/PrivateRouterWrapper.tsx'; +import Spinner from '@shared/ui/spinner'; + +const Login = lazy(() => import('@pages/login')); +const Page404 = lazy(() => import('@pages/404')); + +const Router: FC = () => { + const routes = createRoutesFromElements( + }> + + + }> + } /> + }> + }> + {ClustersRoutes()} + {OperationsRoutes()} + {SettingsRoutes()} + + + } /> + } /> + {/* anything that starts with "/" i.e. "/any-page" */} + , + ); + + const browserRouter = createBrowserRouter(routes); + + return ; +}; + +export default Router; diff --git a/console/ui/src/app/router/routerConfig/ClustersRoutes.tsx b/console/ui/src/app/router/routerConfig/ClustersRoutes.tsx new file mode 100644 index 000000000..29e0f62c4 --- /dev/null +++ b/console/ui/src/app/router/routerConfig/ClustersRoutes.tsx @@ -0,0 +1,37 @@ +import { lazy } from 'react'; +import { Navigate, Route } from 'react-router-dom'; +import RouterPaths from '@app/router/routerPathsConfig'; + +const Clusters = lazy(() => import('@pages/clusters')); +const AddCluster = lazy(() => import('@pages/add-cluster')); +const OverviewCluster = lazy(() => import('@pages/overview-cluster')); + +const ClustersRoutes = () => ( + + {/*redirects to "clusters" when opening homepage*/} + } /> + + } /> + } + /> + } + /> + + +); + +export default ClustersRoutes; diff --git a/console/ui/src/app/router/routerConfig/OperationsRoutes.tsx b/console/ui/src/app/router/routerConfig/OperationsRoutes.tsx new file mode 100644 index 000000000..fda368640 --- /dev/null +++ b/console/ui/src/app/router/routerConfig/OperationsRoutes.tsx @@ -0,0 +1,29 @@ +import { lazy } from 'react'; +import { Route } from 'react-router-dom'; +import RouterPaths from '@app/router/routerPathsConfig'; +import OperationLog from '@pages/operation-log'; + +const Operations = lazy(() => import('@pages/operations')); + +const OperationsRoutes = () => ( + + + } /> + `${data.operationId}`, + }, + }} + element={} + /> + + +); + +export default OperationsRoutes; diff --git a/console/ui/src/app/router/routerConfig/SettingsRoutes.tsx b/console/ui/src/app/router/routerConfig/SettingsRoutes.tsx new file mode 100644 index 000000000..3b013d146 --- /dev/null +++ b/console/ui/src/app/router/routerConfig/SettingsRoutes.tsx @@ -0,0 +1,28 @@ +import { lazy } from 'react'; +import { Navigate, Route } from 'react-router-dom'; +import RouterPaths from '@app/router/routerPathsConfig'; + +const Settings = lazy(() => import('@pages/settings')); +const SettingsForm = lazy(() => import('@widgets/settings-form')); +const SecretsTable = lazy(() => import('@widgets/secrets-table/ui')); +const ProjectsTable = lazy(() => import('@widgets/projects-table')); +const EnvironmentsTable = lazy(() => import('@widgets/environments-table')); + +const SettingsRoutes = () => ( + + }> + }> + } /> + } /> + } /> + } /> + + +); + +export default SettingsRoutes; diff --git a/console/ui/src/app/router/routerPathsConfig/index.ts b/console/ui/src/app/router/routerPathsConfig/index.ts new file mode 100644 index 000000000..3563dc35a --- /dev/null +++ b/console/ui/src/app/router/routerPathsConfig/index.ts @@ -0,0 +1,20 @@ +import routerClustersPathsConfig from '@app/router/routerPathsConfig/routerClustersPathsConfig.ts'; +import routerOperationsPathsConfig from '@app/router/routerPathsConfig/routerOperationsPathsConfig.ts'; +import routerSettingsPathsConfig from '@app/router/routerPathsConfig/routerSettingsPathsConfig.ts'; + +/* + Combines route paths into one config + */ +const RouterPaths = { + login: { + absolutePath: 'login', + }, + notFound: { + absolutePath: 'notFound', + }, + clusters: routerClustersPathsConfig, + operations: routerOperationsPathsConfig, + settings: routerSettingsPathsConfig, +} as const; + +export default RouterPaths; diff --git a/console/ui/src/app/router/routerPathsConfig/routerClustersPathsConfig.ts b/console/ui/src/app/router/routerPathsConfig/routerClustersPathsConfig.ts new file mode 100644 index 000000000..a75dc7858 --- /dev/null +++ b/console/ui/src/app/router/routerPathsConfig/routerClustersPathsConfig.ts @@ -0,0 +1,13 @@ +const routerClustersPathsConfig = { + absolutePath: '/clusters', + add: { + absolutePath: '/clusters/add', + relativePath: 'add', + }, + overview: { + absolutePath: '/clusters/:clusterId/overview', + relativePath: ':clusterId/overview', + }, +}; + +export default routerClustersPathsConfig; diff --git a/console/ui/src/app/router/routerPathsConfig/routerOperationsPathsConfig.ts b/console/ui/src/app/router/routerPathsConfig/routerOperationsPathsConfig.ts new file mode 100644 index 000000000..f8cdb3398 --- /dev/null +++ b/console/ui/src/app/router/routerPathsConfig/routerOperationsPathsConfig.ts @@ -0,0 +1,9 @@ +const routerOperationsPathsConfig = { + absolutePath: '/operations', + log: { + absolutePath: '/operations/:operationId/log', + relativePath: ':operationId/log', + }, +}; + +export default routerOperationsPathsConfig; diff --git a/console/ui/src/app/router/routerPathsConfig/routerSettingsPathsConfig.ts b/console/ui/src/app/router/routerPathsConfig/routerSettingsPathsConfig.ts new file mode 100644 index 000000000..7df9c59a0 --- /dev/null +++ b/console/ui/src/app/router/routerPathsConfig/routerSettingsPathsConfig.ts @@ -0,0 +1,21 @@ +const routerSettingsPathsConfig = { + absolutePath: '/settings', + general: { + absolutePath: '/settings/general', + relativePath: 'general', + }, + secrets: { + absolutePath: '/settings/secrets', + relativePath: 'secrets', + }, + projects: { + absolutePath: '/settings/projects', + relativePath: 'projects', + }, + environments: { + absolutePath: '/settings/environments', + relativePath: 'environments', + }, +}; + +export default routerSettingsPathsConfig; diff --git a/console/ui/src/app/vite-env.d.ts b/console/ui/src/app/vite-env.d.ts new file mode 100644 index 000000000..b1f45c786 --- /dev/null +++ b/console/ui/src/app/vite-env.d.ts @@ -0,0 +1,2 @@ +/// +/// diff --git a/console/ui/src/entities/authentification-method-form-block/index.ts b/console/ui/src/entities/authentification-method-form-block/index.ts new file mode 100644 index 000000000..7aa00aa2c --- /dev/null +++ b/console/ui/src/entities/authentification-method-form-block/index.ts @@ -0,0 +1,3 @@ +import AuthenticationMethodFormBlock from '@entities/authentification-method-form-block/ui'; + +export default AuthenticationMethodFormBlock; diff --git a/console/ui/src/entities/authentification-method-form-block/model/constants.ts b/console/ui/src/entities/authentification-method-form-block/model/constants.ts new file mode 100644 index 000000000..1a05b07e0 --- /dev/null +++ b/console/ui/src/entities/authentification-method-form-block/model/constants.ts @@ -0,0 +1,16 @@ +import { TFunction } from 'i18next'; +import { AUTHENTICATION_METHODS } from '@shared/model/constants.ts'; + +export const authenticationMethods = (t: TFunction) => + Object.freeze([ + { + id: AUTHENTICATION_METHODS.SSH, + name: t('sshKey', { ns: 'clusters' }), + description: t('sshKeyAuthDescription', { ns: 'clusters' }), + }, + { + id: AUTHENTICATION_METHODS.PASSWORD, + name: t('password', { ns: 'shared' }), + description: t('passwordAuthDescription', { ns: 'clusters' }), + }, + ]); diff --git a/console/ui/src/entities/authentification-method-form-block/ui/AuthenticationFormPart.tsx b/console/ui/src/entities/authentification-method-form-block/ui/AuthenticationFormPart.tsx new file mode 100644 index 000000000..2fb7f2c21 --- /dev/null +++ b/console/ui/src/entities/authentification-method-form-block/ui/AuthenticationFormPart.tsx @@ -0,0 +1,45 @@ +import React, { FC } from 'react'; +import { AUTHENTICATION_METHODS } from '@shared/model/constants.ts'; +import SshMethodFormPart from '@entities/authentification-method-form-block/ui/SshMethodFormPart.tsx'; +import PasswordMethodFormPart from '@entities/authentification-method-form-block/ui/PasswordMethodFormPart.tsx'; +import { Controller, useFormContext } from 'react-hook-form'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; +import { TextField } from '@mui/material'; +import { useTranslation } from 'react-i18next'; + +const AuthenticationFormPart: FC = () => { + const { t } = useTranslation('shared'); + const { + control, + watch, + formState: { errors }, + } = useFormContext(); + + const watchAuthenticationMethod = watch(CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_METHOD); + const watchIsSaveToConsole = watch(CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_IS_SAVE_TO_CONSOLE); + + return ( + <> + ( + + )} + /> + {watchAuthenticationMethod === AUTHENTICATION_METHODS.SSH ? : } + + ); +}; + +export default AuthenticationFormPart; diff --git a/console/ui/src/entities/authentification-method-form-block/ui/PasswordMethodFormPart.tsx b/console/ui/src/entities/authentification-method-form-block/ui/PasswordMethodFormPart.tsx new file mode 100644 index 000000000..fb3a3746c --- /dev/null +++ b/console/ui/src/entities/authentification-method-form-block/ui/PasswordMethodFormPart.tsx @@ -0,0 +1,37 @@ +import React, { FC } from 'react'; +import { Controller, useFormContext } from 'react-hook-form'; +import { TextField } from '@mui/material'; +import { useTranslation } from 'react-i18next'; +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; + +const PasswordMethodFormPart: FC = () => { + const { t } = useTranslation('shared'); + + const { + control, + formState: { errors }, + } = useFormContext(); + + return ( + <> + ( + + )} + /> + + ); +}; + +export default PasswordMethodFormPart; diff --git a/console/ui/src/entities/authentification-method-form-block/ui/SshMethodFormPart.tsx b/console/ui/src/entities/authentification-method-form-block/ui/SshMethodFormPart.tsx new file mode 100644 index 000000000..57aaaae53 --- /dev/null +++ b/console/ui/src/entities/authentification-method-form-block/ui/SshMethodFormPart.tsx @@ -0,0 +1,40 @@ +import React, { FC } from 'react'; +import { Controller, useFormContext } from 'react-hook-form'; +import { TextField } from '@mui/material'; +import { useTranslation } from 'react-i18next'; +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; + +const SshMethodFormPart: FC = () => { + const { t } = useTranslation(['clusters', 'shared', 'settings']); + + const { + control, + formState: { errors }, + } = useFormContext(); + + return ( + <> + ( + + )} + /> + + ); +}; + +export default SshMethodFormPart; diff --git a/console/ui/src/entities/authentification-method-form-block/ui/index.tsx b/console/ui/src/entities/authentification-method-form-block/ui/index.tsx new file mode 100644 index 000000000..6c0a27fde --- /dev/null +++ b/console/ui/src/entities/authentification-method-form-block/ui/index.tsx @@ -0,0 +1,187 @@ +import React, { useEffect } from 'react'; +import { Box, Checkbox, FormControlLabel, MenuItem, Radio, Stack, TextField, Typography } from '@mui/material'; +import { authenticationMethods } from '@entities/authentification-method-form-block/model/constants.ts'; +import { useTranslation } from 'react-i18next'; +import { Controller, useFormContext } from 'react-hook-form'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import { useGetSecretsQuery } from '@shared/api/api/secrets.ts'; +import { useAppSelector } from '@app/redux/store/hooks.ts'; +import { selectCurrentProject } from '@app/redux/slices/projectSlice/projectSelectors.ts'; +import AuthenticationFormPart from '@entities/authentification-method-form-block/ui/AuthenticationFormPart.tsx'; +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; +import { AUTHENTICATION_METHODS } from '@shared/model/constants.ts'; + +const AuthenticationMethodFormBlock: React.FC = () => { + const { t } = useTranslation(['clusters', 'shared', 'settings']); + + const { + control, + watch, + resetField, + setValue, + formState: { errors }, + } = useFormContext(); + + const currentProject = useAppSelector(selectCurrentProject); + + const watchAuthenticationMethod = watch(CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_METHOD); + const watchIsSaveToConsole = watch(CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_IS_SAVE_TO_CONSOLE); + const watchIsUseDefinedSecret = watch(CLUSTER_FORM_FIELD_NAMES.IS_USE_DEFINED_SECRET); + + const secrets = useGetSecretsQuery({ type: watchAuthenticationMethod, projectId: currentProject }); + + useEffect(() => { + resetField(CLUSTER_FORM_FIELD_NAMES.SECRET_ID); + }, [watchIsUseDefinedSecret, watchAuthenticationMethod]); + + useEffect(() => { + setValue(CLUSTER_FORM_FIELD_NAMES.IS_USE_DEFINED_SECRET, !!secrets.data?.data?.length); + }, [secrets.data?.data?.length]); + + return ( + + + {t('authenticationMethod', { ns: 'clusters' })} + + + + ( + <> + {authenticationMethods(t).map((method) => ( + onChange(method.id)}> + + + {method.name} + {method.description} + + + ))} + + )} + /> + + {secrets.data?.data?.length ? ( + <> + ( + + {[t('yes', { ns: 'shared' }), t('no', { ns: 'shared' })].map((option) => ( + + {option} + + ))} + + )} + /> + {watchIsUseDefinedSecret ? ( + <> + {watchAuthenticationMethod === AUTHENTICATION_METHODS.SSH ? ( + ( + + )} + /> + ) : null} + ( + + {secrets.data.data.map((secret) => ( + + {secret?.name} + + ))} + + )} + /> + + ) : ( + + )} + + ) : ( + + )} + {(secrets.data?.data?.length && !watchIsUseDefinedSecret) || !secrets.data?.data?.length ? ( + <> + {watchIsSaveToConsole ? ( + ( + + )} + /> + ) : null} + ( + } + checked={value as boolean} + onChange={onChange} + label={t('saveToConsole')} + /> + )} + /> + + ) : null} + + + ); +}; + +export default AuthenticationMethodFormBlock; diff --git a/console/ui/src/entities/breadcumb-item/index.ts b/console/ui/src/entities/breadcumb-item/index.ts new file mode 100644 index 000000000..5e908adaa --- /dev/null +++ b/console/ui/src/entities/breadcumb-item/index.ts @@ -0,0 +1,3 @@ +import BreadcrumbsItem from '@entities/breadcumb-item/ui'; + +export default BreadcrumbsItem; diff --git a/console/ui/src/entities/breadcumb-item/model/types.ts b/console/ui/src/entities/breadcumb-item/model/types.ts new file mode 100644 index 000000000..1d1de41e9 --- /dev/null +++ b/console/ui/src/entities/breadcumb-item/model/types.ts @@ -0,0 +1,4 @@ +export interface BreadcrumbsItemProps { + label: string; + path: string; +} diff --git a/console/ui/src/entities/breadcumb-item/ui/index.tsx b/console/ui/src/entities/breadcumb-item/ui/index.tsx new file mode 100644 index 000000000..a164727fa --- /dev/null +++ b/console/ui/src/entities/breadcumb-item/ui/index.tsx @@ -0,0 +1,11 @@ +import { FC } from 'react'; +import { BreadcrumbsItemProps } from '@entities/breadcumb-item/model/types.ts'; +import { Link } from 'react-router-dom'; + +const BreadcrumbsItem: FC = ({ label, path }) => ( + + {label} + +); + +export default BreadcrumbsItem; diff --git a/console/ui/src/entities/cluster-cloud-provider-block/index.ts b/console/ui/src/entities/cluster-cloud-provider-block/index.ts new file mode 100644 index 000000000..cd7e326e9 --- /dev/null +++ b/console/ui/src/entities/cluster-cloud-provider-block/index.ts @@ -0,0 +1,3 @@ +import ClusterFormCloudProviderBox from '@entities/cluster-cloud-provider-block/ui'; + +export default ClusterFormCloudProviderBox; diff --git a/console/ui/src/entities/cluster-cloud-provider-block/model/types.ts b/console/ui/src/entities/cluster-cloud-provider-block/model/types.ts new file mode 100644 index 000000000..e4466d11e --- /dev/null +++ b/console/ui/src/entities/cluster-cloud-provider-block/model/types.ts @@ -0,0 +1,6 @@ +import { ReactElement } from 'react'; + +export interface ClusterFormCloudProviderBoxProps { + children?: ReactElement; + isActive?: boolean; +} diff --git a/console/ui/src/entities/cluster-cloud-provider-block/ui/index.tsx b/console/ui/src/entities/cluster-cloud-provider-block/ui/index.tsx new file mode 100644 index 000000000..6c5797e65 --- /dev/null +++ b/console/ui/src/entities/cluster-cloud-provider-block/ui/index.tsx @@ -0,0 +1,23 @@ +import { FC } from 'react'; +import { ClusterFormCloudProviderBoxProps } from '@entities/cluster-cloud-provider-block/model/types.ts'; +import SelectableBox from '@shared/ui/selectable-box'; + +const ClusterFormCloudProviderBox: FC = ({ children, isActive, ...props }) => { + return ( + + {children} + + ); +}; + +export default ClusterFormCloudProviderBox; diff --git a/console/ui/src/entities/cluster-description-block/index.ts b/console/ui/src/entities/cluster-description-block/index.ts new file mode 100644 index 000000000..8670af328 --- /dev/null +++ b/console/ui/src/entities/cluster-description-block/index.ts @@ -0,0 +1,3 @@ +import ClusterDescriptionBlock from '@entities/cluster-description-block/ui'; + +export default ClusterDescriptionBlock; diff --git a/console/ui/src/entities/cluster-description-block/ui/index.tsx b/console/ui/src/entities/cluster-description-block/ui/index.tsx new file mode 100644 index 000000000..54fb36a6c --- /dev/null +++ b/console/ui/src/entities/cluster-description-block/ui/index.tsx @@ -0,0 +1,35 @@ +import React from 'react'; +import { useTranslation } from 'react-i18next'; +import { Controller, useFormContext } from 'react-hook-form'; +import { Box, TextField, Typography } from '@mui/material'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; + +const ClusterDescriptionBlock: React.FC = () => { + const { t } = useTranslation('clusters'); + const { control } = useFormContext(); + + return ( + + + {t('description')} + + ( + + )} + /> + + ); +}; + +export default ClusterDescriptionBlock; diff --git a/console/ui/src/entities/cluster-form-cloud-region-block/assets/aws.svg b/console/ui/src/entities/cluster-form-cloud-region-block/assets/aws.svg new file mode 100644 index 000000000..b3049e1b1 --- /dev/null +++ b/console/ui/src/entities/cluster-form-cloud-region-block/assets/aws.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/entities/cluster-form-cloud-region-block/assets/azure.svg b/console/ui/src/entities/cluster-form-cloud-region-block/assets/azure.svg new file mode 100644 index 000000000..3948dff06 --- /dev/null +++ b/console/ui/src/entities/cluster-form-cloud-region-block/assets/azure.svg @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/console/ui/src/entities/cluster-form-cloud-region-block/assets/digitalocean.svg b/console/ui/src/entities/cluster-form-cloud-region-block/assets/digitalocean.svg new file mode 100644 index 000000000..b27205b5b --- /dev/null +++ b/console/ui/src/entities/cluster-form-cloud-region-block/assets/digitalocean.svg @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/console/ui/src/entities/cluster-form-cloud-region-block/assets/gcp.svg b/console/ui/src/entities/cluster-form-cloud-region-block/assets/gcp.svg new file mode 100644 index 000000000..973d10529 --- /dev/null +++ b/console/ui/src/entities/cluster-form-cloud-region-block/assets/gcp.svg @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff --git a/console/ui/src/entities/cluster-form-cloud-region-block/assets/hetzner.svg b/console/ui/src/entities/cluster-form-cloud-region-block/assets/hetzner.svg new file mode 100644 index 000000000..328106191 --- /dev/null +++ b/console/ui/src/entities/cluster-form-cloud-region-block/assets/hetzner.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/console/ui/src/entities/cluster-form-cloud-region-block/index.ts b/console/ui/src/entities/cluster-form-cloud-region-block/index.ts new file mode 100644 index 000000000..eab9a9bd2 --- /dev/null +++ b/console/ui/src/entities/cluster-form-cloud-region-block/index.ts @@ -0,0 +1,3 @@ +import CloudFormRegionBlock from '@entities/cluster-form-cloud-region-block/ui'; + +export default CloudFormRegionBlock; diff --git a/console/ui/src/entities/cluster-form-cloud-region-block/lib/hooks.tsx b/console/ui/src/entities/cluster-form-cloud-region-block/lib/hooks.tsx new file mode 100644 index 000000000..f179eb74d --- /dev/null +++ b/console/ui/src/entities/cluster-form-cloud-region-block/lib/hooks.tsx @@ -0,0 +1,15 @@ +import AWSIcon from '../assets/aws.svg'; +import GCPIcon from '../assets/gcp.svg'; +import AzureIcon from '../assets/azure.svg'; +import DigitalOceanIcon from '../assets/digitalocean.svg'; +import HetznerIcon from '../assets/hetzner.svg'; +import { PROVIDERS } from '@shared/config/constants.ts'; + +export const useNameIconProvidersMap = () => ({ + // TODO: refactor into moving from hooks to constant + [PROVIDERS.AWS]: AWSIcon, + [PROVIDERS.GCP]: GCPIcon, + [PROVIDERS.AZURE]: AzureIcon, + [PROVIDERS.DIGITAL_OCEAN]: DigitalOceanIcon, + [PROVIDERS.HETZNER]: HetznerIcon, +}); diff --git a/console/ui/src/entities/cluster-form-cloud-region-block/model/types.ts b/console/ui/src/entities/cluster-form-cloud-region-block/model/types.ts new file mode 100644 index 000000000..502161210 --- /dev/null +++ b/console/ui/src/entities/cluster-form-cloud-region-block/model/types.ts @@ -0,0 +1,5 @@ +import { DeploymentInfoCloudRegion } from '@shared/api/api/other.ts'; + +export interface CloudFormRegionBlockProps { + regions: DeploymentInfoCloudRegion[]; +} diff --git a/console/ui/src/entities/cluster-form-cloud-region-block/ui/index.tsx b/console/ui/src/entities/cluster-form-cloud-region-block/ui/index.tsx new file mode 100644 index 000000000..709eaf2f2 --- /dev/null +++ b/console/ui/src/entities/cluster-form-cloud-region-block/ui/index.tsx @@ -0,0 +1,81 @@ +import { FC, SyntheticEvent } from 'react'; +import { TabContext, TabList, TabPanel } from '@mui/lab'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import { Box, Divider, Stack, Tab, Typography } from '@mui/material'; +import { useTranslation } from 'react-i18next'; +import { Controller, useFormContext } from 'react-hook-form'; +import ClusterFormRegionConfigBox from '@widgets/cluster-form/ui/ClusterFormRegionConfigBox.tsx'; + +const CloudFormRegionBlock: FC = () => { + const { t } = useTranslation('clusters'); + const { control, watch, setValue } = useFormContext(); + + const watchProvider = watch(CLUSTER_FORM_FIELD_NAMES.PROVIDER); + const regionWatch = watch(CLUSTER_FORM_FIELD_NAMES.REGION); + + const regions = watchProvider?.cloud_regions ?? []; + + const handleRegionChange = + (onChange: (...event: never[]) => void) => (e: SyntheticEvent, value: string) => { + onChange(value); + setValue( + CLUSTER_FORM_FIELD_NAMES.REGION_CONFIG, + regions?.find((region) => region.code === value)?.datacenters?.[0], + ); + }; + + const handleRegionConfigChange = (onChange: (...event: never[]) => void, value: string) => () => { + onChange(value); + }; + + return ( + + + {t('selectCloudRegion')} + + + { + return ( + + {regions.map((region) => ( + + ))} + + ); + }} + /> + + { + return ( + <> + {regions.map((region) => ( + + + {region.datacenters.map((config) => ( + + ))} + + + ))} + + ); + }} + /> + + + ); +}; + +export default CloudFormRegionBlock; diff --git a/console/ui/src/entities/cluster-form-cluster-name-block/index.ts b/console/ui/src/entities/cluster-form-cluster-name-block/index.ts new file mode 100644 index 000000000..79a75cb09 --- /dev/null +++ b/console/ui/src/entities/cluster-form-cluster-name-block/index.ts @@ -0,0 +1,3 @@ +import ClusterFormClusterNameBlock from '@entities/cluster-form-cluster-name-block/ui'; + +export default ClusterFormClusterNameBlock; diff --git a/console/ui/src/entities/cluster-form-cluster-name-block/ui/index.tsx b/console/ui/src/entities/cluster-form-cluster-name-block/ui/index.tsx new file mode 100644 index 000000000..2f7519643 --- /dev/null +++ b/console/ui/src/entities/cluster-form-cluster-name-block/ui/index.tsx @@ -0,0 +1,37 @@ +import React from 'react'; +import { Box, TextField, Typography } from '@mui/material'; +import { useTranslation } from 'react-i18next'; +import { Controller, useFormContext } from 'react-hook-form'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; + +const ClusterFormClusterNameBlock: React.FC = () => { + const { t } = useTranslation('clusters'); + const { + control, + formState: { errors }, + } = useFormContext(); + + return ( + + + {t('clusterName')}* + + ( + + )} + /> + + ); +}; + +export default ClusterFormClusterNameBlock; diff --git a/console/ui/src/entities/cluster-form-environment-block/index.ts b/console/ui/src/entities/cluster-form-environment-block/index.ts new file mode 100644 index 000000000..051571223 --- /dev/null +++ b/console/ui/src/entities/cluster-form-environment-block/index.ts @@ -0,0 +1,3 @@ +import ClusterFormEnvironmentBlock from '@entities/cluster-form-environment-block/ui'; + +export default ClusterFormEnvironmentBlock; diff --git a/console/ui/src/entities/cluster-form-environment-block/model/types.ts b/console/ui/src/entities/cluster-form-environment-block/model/types.ts new file mode 100644 index 000000000..3cf7df829 --- /dev/null +++ b/console/ui/src/entities/cluster-form-environment-block/model/types.ts @@ -0,0 +1,5 @@ +import { ResponseEnvironment } from '@shared/api/api/environments.ts'; + +export interface EnvironmentBlockProps { + environments: ResponseEnvironment[]; +} diff --git a/console/ui/src/entities/cluster-form-environment-block/ui/index.tsx b/console/ui/src/entities/cluster-form-environment-block/ui/index.tsx new file mode 100644 index 000000000..6b4286fb2 --- /dev/null +++ b/console/ui/src/entities/cluster-form-environment-block/ui/index.tsx @@ -0,0 +1,34 @@ +import { FC } from 'react'; +import { Box, MenuItem, Select, Typography } from '@mui/material'; +import { useTranslation } from 'react-i18next'; +import { Controller, useFormContext } from 'react-hook-form'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import { EnvironmentBlockProps } from '@entities/cluster-form-environment-block/model/types.ts'; + +const ClusterFormEnvironmentBlock: FC = ({ environments }) => { + const { t } = useTranslation('shared'); + const { control } = useFormContext(); + + return ( + + + {t('environment')} + + ( + + )} + /> + + ); +}; + +export default ClusterFormEnvironmentBlock; diff --git a/console/ui/src/entities/cluster-form-instances-amount-block/assets/instancesIcon.svg b/console/ui/src/entities/cluster-form-instances-amount-block/assets/instancesIcon.svg new file mode 100644 index 000000000..be31ac321 --- /dev/null +++ b/console/ui/src/entities/cluster-form-instances-amount-block/assets/instancesIcon.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/console/ui/src/entities/cluster-form-instances-amount-block/index.ts b/console/ui/src/entities/cluster-form-instances-amount-block/index.ts new file mode 100644 index 000000000..6bc1e3a93 --- /dev/null +++ b/console/ui/src/entities/cluster-form-instances-amount-block/index.ts @@ -0,0 +1,3 @@ +import InstancesAmountBlock from '@entities/cluster-form-instances-amount-block/ui'; + +export default InstancesAmountBlock; diff --git a/console/ui/src/entities/cluster-form-instances-amount-block/ui/index.tsx b/console/ui/src/entities/cluster-form-instances-amount-block/ui/index.tsx new file mode 100644 index 000000000..e8c64732e --- /dev/null +++ b/console/ui/src/entities/cluster-form-instances-amount-block/ui/index.tsx @@ -0,0 +1,42 @@ +import { FC } from 'react'; +import { useTranslation } from 'react-i18next'; +import { Controller, useFormContext } from 'react-hook-form'; +import { Box, Typography } from '@mui/material'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import ClusterSliderBox from '@shared/ui/slider-box'; +import ServersIcon from '@shared/assets/serversIcon.svg?react'; + +const InstancesAmountBlock: FC = () => { + const { t } = useTranslation('clusters'); + + const { + control, + formState: { errors }, + } = useFormContext(); + + return ( + + + {t('numberOfInstances')} + + ( + } + error={errors[CLUSTER_FORM_FIELD_NAMES.INSTANCES_AMOUNT]} + /> + )} + /> + + ); +}; + +export default InstancesAmountBlock; diff --git a/console/ui/src/entities/cluster-form-instances-block/index.ts b/console/ui/src/entities/cluster-form-instances-block/index.ts new file mode 100644 index 000000000..6fbdf71ba --- /dev/null +++ b/console/ui/src/entities/cluster-form-instances-block/index.ts @@ -0,0 +1,3 @@ +import CloudFormInstancesBlock from '@entities/cluster-form-instances-block/ui'; + +export default CloudFormInstancesBlock; diff --git a/console/ui/src/entities/cluster-form-instances-block/model/types.ts b/console/ui/src/entities/cluster-form-instances-block/model/types.ts new file mode 100644 index 000000000..b560ec09a --- /dev/null +++ b/console/ui/src/entities/cluster-form-instances-block/model/types.ts @@ -0,0 +1,9 @@ +import { DeploymentInstanceType } from '@shared/api/api/other.ts'; + +export interface CloudFormInstancesBlockProps { + instances: { + small?: DeploymentInstanceType[]; + medium?: DeploymentInstanceType[]; + large?: DeploymentInstanceType[]; + }; +} diff --git a/console/ui/src/entities/cluster-form-instances-block/ui/index.tsx b/console/ui/src/entities/cluster-form-instances-block/ui/index.tsx new file mode 100644 index 000000000..3183228a5 --- /dev/null +++ b/console/ui/src/entities/cluster-form-instances-block/ui/index.tsx @@ -0,0 +1,79 @@ +import { FC } from 'react'; +import { TabContext, TabList, TabPanel } from '@mui/lab'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import { Box, Divider, Stack, Tab, Typography } from '@mui/material'; +import { useTranslation } from 'react-i18next'; +import { Controller, useFormContext } from 'react-hook-form'; +import ClusterFromInstanceConfigBox from '@entities/cluster-instance-config-box'; + +const CloudFormInstancesBlock: FC = () => { + const { t } = useTranslation('clusters'); + const { control, watch, setValue } = useFormContext(); + + const watchInstanceType = watch(CLUSTER_FORM_FIELD_NAMES.INSTANCE_TYPE); + + const watchProvider = watch(CLUSTER_FORM_FIELD_NAMES.PROVIDER); + + const instances = watchProvider?.instance_types ?? []; + + const handleInstanceTypeChange = (onChange: (...event: any[]) => void) => (_: any, value: string) => { + onChange(value); + setValue(CLUSTER_FORM_FIELD_NAMES.INSTANCE_CONFIG, instances?.[value]?.[0]); + }; + + const handleInstanceConfigChange = (onChange: (...event: any[]) => void, value: string) => () => { + onChange(value); + }; + + return ( + + + {t('selectInstanceType')} + + + { + return ( + + {Object.entries(instances)?.map(([key, value]) => + value ? : null, + )} + + ); + }} + /> + + { + return ( + <> + {Object.entries(instances).map(([key, configs]) => ( + + + {configs?.map((config) => ( + + ))} + + + ))} + + ); + }} + /> + + + ); +}; + +export default CloudFormInstancesBlock; diff --git a/console/ui/src/entities/cluster-info/index.ts b/console/ui/src/entities/cluster-info/index.ts new file mode 100644 index 000000000..bbce068bb --- /dev/null +++ b/console/ui/src/entities/cluster-info/index.ts @@ -0,0 +1,3 @@ +import ClusterInfo from '@entities/cluster-info/ui'; + +export default ClusterInfo; diff --git a/console/ui/src/entities/cluster-info/lib/hooks.tsx b/console/ui/src/entities/cluster-info/lib/hooks.tsx new file mode 100644 index 000000000..599e76772 --- /dev/null +++ b/console/ui/src/entities/cluster-info/lib/hooks.tsx @@ -0,0 +1,40 @@ +import { useTranslation } from 'react-i18next'; +import { Typography } from '@mui/material'; +import { ClusterInfoProps } from '@entities/cluster-info/model/types.ts'; + +export const useGetClusterInfoConfig = ({ + postgresVersion, + clusterName, + description, + environment, + location, +}: ClusterInfoProps) => { + const { t } = useTranslation(['clusters', 'shared']); + + return [ + { + title: t('postgresVersion', { ns: 'clusters' }), + children: {postgresVersion}, + }, + { + title: t('clusterName', { ns: 'clusters' }), + children: {clusterName}, + }, + { + title: t('description', { ns: 'shared' }), + children: {description ?? '---'}, + }, + { + title: t('environment', { ns: 'shared' }), + children: {environment}, + }, + ...(location + ? [ + { + title: t('location', { ns: 'clusters' }), + children: {location}, + }, + ] + : []), + ]; +}; diff --git a/console/ui/src/entities/cluster-info/model/types.ts b/console/ui/src/entities/cluster-info/model/types.ts new file mode 100644 index 000000000..ca8b3e36c --- /dev/null +++ b/console/ui/src/entities/cluster-info/model/types.ts @@ -0,0 +1,7 @@ +export interface ClusterInfoProps { + postgresVersion?: number; + clusterName?: string; + description?: string; + environment?: string; + location?: string; +} diff --git a/console/ui/src/entities/cluster-info/ui/index.tsx b/console/ui/src/entities/cluster-info/ui/index.tsx new file mode 100644 index 000000000..ca9b24e10 --- /dev/null +++ b/console/ui/src/entities/cluster-info/ui/index.tsx @@ -0,0 +1,34 @@ +import { FC } from 'react'; +import { useTranslation } from 'react-i18next'; +import { Accordion, AccordionDetails, AccordionSummary, Typography } from '@mui/material'; +import ExpandMoreIcon from '@mui/icons-material/ExpandMore'; +import { ClusterInfoProps } from '@entities/cluster-info/model/types.ts'; +import EditNoteOutlinedIcon from '@mui/icons-material/EditNoteOutlined'; +import { useGetClusterInfoConfig } from '@entities/cluster-info/lib/hooks.tsx'; +import InfoCardBody from '@shared/ui/info-card-body'; + +const ClusterInfo: FC = ({ postgresVersion, clusterName, description, environment, location }) => { + const { t } = useTranslation(['clusters', 'shared']); + + const config = useGetClusterInfoConfig({ + postgresVersion, + clusterName, + description, + environment, + location, + }); + + return ( + + }> + + {t('clusterInfo')} + + + + + + ); +}; + +export default ClusterInfo; diff --git a/console/ui/src/entities/cluster-instance-config-box/index.ts b/console/ui/src/entities/cluster-instance-config-box/index.ts new file mode 100644 index 000000000..9c916eca5 --- /dev/null +++ b/console/ui/src/entities/cluster-instance-config-box/index.ts @@ -0,0 +1,3 @@ +import ClusterFromInstanceConfigBox from '@entities/cluster-instance-config-box/ui'; + +export default ClusterFromInstanceConfigBox; diff --git a/console/ui/src/entities/cluster-instance-config-box/model/types.ts b/console/ui/src/entities/cluster-instance-config-box/model/types.ts new file mode 100644 index 000000000..b02047b62 --- /dev/null +++ b/console/ui/src/entities/cluster-instance-config-box/model/types.ts @@ -0,0 +1,7 @@ +import { ClusterFormSelectableBoxProps } from '@shared/ui/selectable-box/model/types.ts'; + +export interface ClusterFromInstanceConfigBoxProps extends ClusterFormSelectableBoxProps { + name: string; + cpu: string; + ram: string; +} diff --git a/console/ui/src/entities/cluster-instance-config-box/ui/index.tsx b/console/ui/src/entities/cluster-instance-config-box/ui/index.tsx new file mode 100644 index 000000000..75fdad778 --- /dev/null +++ b/console/ui/src/entities/cluster-instance-config-box/ui/index.tsx @@ -0,0 +1,32 @@ +import { FC } from 'react'; +import { ClusterFromInstanceConfigBoxProps } from '@entities/cluster-instance-config-box/model/types.ts'; +import SelectableBox from '@shared/ui/selectable-box'; +import { Box, Stack, Typography } from '@mui/material'; +import RamIcon from '@shared/assets/ramIcon.svg?react'; +import CpuIcon from '@shared/assets/cpuIcon.svg?react'; + +const ClusterFromInstanceConfigBox: FC = ({ + name, + cpu, + ram, + isActive, + ...props +}) => ( + + + {name} + + + + + {cpu} CPU + + + + {ram} GB RAM + + + +); + +export default ClusterFromInstanceConfigBox; diff --git a/console/ui/src/entities/cluster-name-description-block/index.ts b/console/ui/src/entities/cluster-name-description-block/index.ts new file mode 100644 index 000000000..d150a3dc7 --- /dev/null +++ b/console/ui/src/entities/cluster-name-description-block/index.ts @@ -0,0 +1,3 @@ +import ClusterNameDescriptionBlock from '@entities/cluster-name-description-block/ui'; + +export default ClusterNameDescriptionBlock; diff --git a/console/ui/src/entities/cluster-name-description-block/ui/index.tsx b/console/ui/src/entities/cluster-name-description-block/ui/index.tsx new file mode 100644 index 000000000..efef382dc --- /dev/null +++ b/console/ui/src/entities/cluster-name-description-block/ui/index.tsx @@ -0,0 +1,35 @@ +import React from 'react'; +import { useTranslation } from 'react-i18next'; +import { Controller, useFormContext } from 'react-hook-form'; +import { Box, TextField, Typography } from '@mui/material'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; + +const ClusterNameDescriptionBlock: React.FC = () => { + const { t } = useTranslation('clusters'); + const { control } = useFormContext(); + + return ( + + + {t('description')} + + ( + + )} + /> + + ); +}; + +export default ClusterNameDescriptionBlock; diff --git a/console/ui/src/entities/connection-info/assets/eyeIcon.svg b/console/ui/src/entities/connection-info/assets/eyeIcon.svg new file mode 100644 index 000000000..08e322d38 --- /dev/null +++ b/console/ui/src/entities/connection-info/assets/eyeIcon.svg @@ -0,0 +1,4 @@ + + + + diff --git a/console/ui/src/entities/connection-info/index.ts b/console/ui/src/entities/connection-info/index.ts new file mode 100644 index 000000000..880b22c06 --- /dev/null +++ b/console/ui/src/entities/connection-info/index.ts @@ -0,0 +1,3 @@ +import ConnectionInfo from '@entities/connection-info/ui'; + +export default ConnectionInfo; diff --git a/console/ui/src/entities/connection-info/lib/hooks.tsx b/console/ui/src/entities/connection-info/lib/hooks.tsx new file mode 100644 index 000000000..3ab9bdd48 --- /dev/null +++ b/console/ui/src/entities/connection-info/lib/hooks.tsx @@ -0,0 +1,65 @@ +import { useTranslation } from 'react-i18next'; +import { Stack, Typography } from '@mui/material'; +import { useState } from 'react'; +import CopyIcon from '@shared/ui/copy-icon'; +import EyeIcon from '../assets/eyeIcon.svg?react'; +import ConnectionInfoRowContainer from '@entities/connection-info/ui/ConnectionInfoRowConteiner.tsx'; +import { ConnectionInfoProps } from '@entities/connection-info/model/types.ts'; + +export const useGetConnectionInfoConfig = ({ connectionInfo }: { connectionInfo: ConnectionInfoProps }) => { + const { t } = useTranslation(['clusters', 'shared']); + const [isPasswordHidden, setIsPasswordHidden] = useState(true); + + const togglePasswordVisibility = () => setIsPasswordHidden((prev) => !prev); + + const renderCollection = (collection: string | object, defaultLabel: string) => { + return ['string', 'number'].includes(typeof collection) + ? [ + { + title: defaultLabel, + children: ( + + {collection} + + ), + }, + ] + : typeof collection === 'object' + ? Object.entries(collection)?.map(([key, value]) => ({ + title: `${defaultLabel} ${key}`, + children: ( + + {value} + + ), + })) ?? [] + : []; + }; + + return [ + ...(connectionInfo?.address ? renderCollection(connectionInfo.address, t('address', { ns: 'shared' })) : []), + ...(connectionInfo?.port ? renderCollection(connectionInfo.port, t('port', { ns: 'clusters' })) : []), + { + title: t('user', { ns: 'shared' }), + children: ( + + {connectionInfo?.superuser} + + ), + }, + { + title: t('password', { ns: 'shared' }), + children: ( + + + {isPasswordHidden ? connectionInfo?.password?.replace(/./g, '*') : connectionInfo?.password} + + + + + + + ), + }, + ]; +}; diff --git a/console/ui/src/entities/connection-info/model/types.ts b/console/ui/src/entities/connection-info/model/types.ts new file mode 100644 index 000000000..13a0db1a0 --- /dev/null +++ b/console/ui/src/entities/connection-info/model/types.ts @@ -0,0 +1,14 @@ +import { ReactNode } from 'react'; + +export interface ConnectionInfoProps { + connectionInfo?: { + address?: string | Record; + port?: string | Record; + superuser?: string; + password?: string; + }; +} + +export interface ConnectionInfoRowContainerProps { + children: ReactNode; +} diff --git a/console/ui/src/entities/connection-info/ui/ConnectionInfoRowConteiner.tsx b/console/ui/src/entities/connection-info/ui/ConnectionInfoRowConteiner.tsx new file mode 100644 index 000000000..2b2214e2b --- /dev/null +++ b/console/ui/src/entities/connection-info/ui/ConnectionInfoRowConteiner.tsx @@ -0,0 +1,13 @@ +import { FC } from 'react'; +import { Stack } from '@mui/material'; +import { ConnectionInfoRowContainerProps } from '@entities/connection-info/model/types.ts'; + +const ConnectionInfoRowContainer: FC = ({ children }) => { + return ( + + {children} + + ); +}; + +export default ConnectionInfoRowContainer; diff --git a/console/ui/src/entities/connection-info/ui/index.tsx b/console/ui/src/entities/connection-info/ui/index.tsx new file mode 100644 index 000000000..e15e1797d --- /dev/null +++ b/console/ui/src/entities/connection-info/ui/index.tsx @@ -0,0 +1,28 @@ +import { FC } from 'react'; +import { Accordion, AccordionDetails, AccordionSummary, Typography } from '@mui/material'; +import { useTranslation } from 'react-i18next'; +import ExpandMoreIcon from '@mui/icons-material/ExpandMore'; +import { ConnectionInfoProps } from '@entities/connection-info/model/types.ts'; +import PowerOutlinedIcon from '@mui/icons-material/PowerOutlined'; +import { useGetConnectionInfoConfig } from '@entities/connection-info/lib/hooks.tsx'; +import InfoCardBody from '@shared/ui/info-card-body'; + +const ConnectionInfo: FC = ({ connectionInfo }) => { + const { t } = useTranslation(['clusters', 'shared']); + + const config = useGetConnectionInfoConfig({ connectionInfo }); + + return ( + + }> + + {t('connectionInfo')} + + + + + + ); +}; + +export default ConnectionInfo; diff --git a/console/ui/src/entities/database-servers-block/index.ts b/console/ui/src/entities/database-servers-block/index.ts new file mode 100644 index 000000000..b9a1caa2e --- /dev/null +++ b/console/ui/src/entities/database-servers-block/index.ts @@ -0,0 +1,3 @@ +import DatabaseServersBlock from '@entities/database-servers-block/ui'; + +export default DatabaseServersBlock; diff --git a/console/ui/src/entities/database-servers-block/model/types.ts b/console/ui/src/entities/database-servers-block/model/types.ts new file mode 100644 index 000000000..08dd769ca --- /dev/null +++ b/console/ui/src/entities/database-servers-block/model/types.ts @@ -0,0 +1,6 @@ +import { UseFieldArrayRemove } from 'react-hook-form'; + +export interface DatabaseServerBlockProps { + index: number; + remove?: UseFieldArrayRemove; +} diff --git a/console/ui/src/entities/database-servers-block/ui/DatabaseServerBox.tsx b/console/ui/src/entities/database-servers-block/ui/DatabaseServerBox.tsx new file mode 100644 index 000000000..074907f70 --- /dev/null +++ b/console/ui/src/entities/database-servers-block/ui/DatabaseServerBox.tsx @@ -0,0 +1,81 @@ +import { FC } from 'react'; +import { DatabaseServerBlockProps } from '@entities/database-servers-block/model/types.ts'; +import { Controller, useFormContext } from 'react-hook-form'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import { Card, IconButton, Stack, TextField, Typography } from '@mui/material'; +import { useTranslation } from 'react-i18next'; +import CloseIcon from '@mui/icons-material/Close'; + +const DatabaseServerBox: FC = ({ index, remove }) => { + const { t } = useTranslation(['clusters', 'shared']); + const { + control, + formState: { errors }, + } = useFormContext(); + + return ( + + {remove ? ( + + + + ) : null} + + {`${t('server', { ns: 'clusters' })} ${index + 1}`} + ( + + )} + /> + ( + + )} + /> + ( + + )} + /> + + + ); +}; + +export default DatabaseServerBox; diff --git a/console/ui/src/entities/database-servers-block/ui/index.tsx b/console/ui/src/entities/database-servers-block/ui/index.tsx new file mode 100644 index 000000000..570d9709f --- /dev/null +++ b/console/ui/src/entities/database-servers-block/ui/index.tsx @@ -0,0 +1,40 @@ +import { FC } from 'react'; +import { useFieldArray } from 'react-hook-form'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import DatabaseServerBox from '@entities/database-servers-block/ui/DatabaseServerBox.tsx'; +import { Box, Button, Stack, Typography } from '@mui/material'; +import AddIcon from '@mui/icons-material/Add'; +import { useTranslation } from 'react-i18next'; + +const DatabaseServersBlock: FC = () => { + const { t } = useTranslation('clusters'); + const { fields, append, remove } = useFieldArray({ + name: CLUSTER_FORM_FIELD_NAMES.DATABASE_SERVERS, + }); + + const removeServer = (index: number) => () => remove(index); + + return ( + + + {t('databaseServers')} + + + + {fields.map((field, index) => ( + + ))} + + + + + ); +}; + +export default DatabaseServersBlock; diff --git a/console/ui/src/entities/load-balancers-block/index.ts b/console/ui/src/entities/load-balancers-block/index.ts new file mode 100644 index 000000000..3006f3b89 --- /dev/null +++ b/console/ui/src/entities/load-balancers-block/index.ts @@ -0,0 +1,3 @@ +import LoadBalancersBlock from '@entities/load-balancers-block/ui'; + +export default LoadBalancersBlock; diff --git a/console/ui/src/entities/load-balancers-block/ui/index.tsx b/console/ui/src/entities/load-balancers-block/ui/index.tsx new file mode 100644 index 000000000..5295688e0 --- /dev/null +++ b/console/ui/src/entities/load-balancers-block/ui/index.tsx @@ -0,0 +1,32 @@ +import { FC } from 'react'; +import { useTranslation } from 'react-i18next'; +import { Controller, useFormContext } from 'react-hook-form'; +import { Box, Checkbox, Stack, Tooltip, Typography } from '@mui/material'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import HelpOutlineIcon from '@mui/icons-material/HelpOutline'; + +const LoadBalancersBlock: FC = () => { + const { t } = useTranslation('clusters'); + const { control } = useFormContext(); + + return ( + + + {t('loadBalancers')} + + + {t('haproxyLoadBalancer')} + + + + } + /> + + + ); +}; + +export default LoadBalancersBlock; diff --git a/console/ui/src/entities/postgres-version-block/index.ts b/console/ui/src/entities/postgres-version-block/index.ts new file mode 100644 index 000000000..d11dc47b0 --- /dev/null +++ b/console/ui/src/entities/postgres-version-block/index.ts @@ -0,0 +1,3 @@ +import PostgresVersionBox from '@entities/postgres-version-block/ui'; + +export default PostgresVersionBox; diff --git a/console/ui/src/entities/postgres-version-block/model/types.ts b/console/ui/src/entities/postgres-version-block/model/types.ts new file mode 100644 index 000000000..845ecc4c8 --- /dev/null +++ b/console/ui/src/entities/postgres-version-block/model/types.ts @@ -0,0 +1,5 @@ +import { ResponsePostgresVersion } from '@shared/api/api/other.ts'; + +export interface PostgresVersionBlockProps { + postgresVersions: ResponsePostgresVersion[]; +} diff --git a/console/ui/src/entities/postgres-version-block/ui/index.tsx b/console/ui/src/entities/postgres-version-block/ui/index.tsx new file mode 100644 index 000000000..23bc1a952 --- /dev/null +++ b/console/ui/src/entities/postgres-version-block/ui/index.tsx @@ -0,0 +1,43 @@ +import { FC } from 'react'; +import { useTranslation } from 'react-i18next'; +import { Controller, useFormContext } from 'react-hook-form'; +import { Box, MenuItem, Select, Typography } from '@mui/material'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import { PostgresVersionBlockProps } from '@entities/postgres-version-block/model/types.ts'; + +const PostgresVersionBox: FC = ({ postgresVersions }) => { + const { t } = useTranslation('clusters'); + const { + control, + formState: { errors }, + } = useFormContext(); + + return ( + + + {t('postgresVersion')} + + ( + + )} + /> + + ); +}; + +export default PostgresVersionBox; diff --git a/console/ui/src/entities/providers-block/index.ts b/console/ui/src/entities/providers-block/index.ts new file mode 100644 index 000000000..7c5ed31e0 --- /dev/null +++ b/console/ui/src/entities/providers-block/index.ts @@ -0,0 +1,3 @@ +import ClusterFormProvidersBlock from '@entities/providers-block/ui'; + +export default ClusterFormProvidersBlock; diff --git a/console/ui/src/entities/providers-block/model/types.ts b/console/ui/src/entities/providers-block/model/types.ts new file mode 100644 index 000000000..cf51b516b --- /dev/null +++ b/console/ui/src/entities/providers-block/model/types.ts @@ -0,0 +1,10 @@ +import { ReactElement } from 'react'; + +export interface ProvidersBlockProps { + providers: { code?: string; description?: string }[]; +} + +export interface ClusterFormCloudProviderBoxProps { + children?: ReactElement; + isActive?: boolean; +} diff --git a/console/ui/src/entities/providers-block/ui/ClusterFormCloudProviderBox.tsx b/console/ui/src/entities/providers-block/ui/ClusterFormCloudProviderBox.tsx new file mode 100644 index 000000000..eb86ba3bb --- /dev/null +++ b/console/ui/src/entities/providers-block/ui/ClusterFormCloudProviderBox.tsx @@ -0,0 +1,23 @@ +import { FC } from 'react'; +import SelectableBox from '@shared/ui/selectable-box'; +import { ClusterFormCloudProviderBoxProps } from '@entities/providers-block/model/types.ts'; + +const ClusterFormCloudProviderBox: FC = ({ children, isActive, ...props }) => { + return ( + + {children} + + ); +}; + +export default ClusterFormCloudProviderBox; diff --git a/console/ui/src/entities/providers-block/ui/index.tsx b/console/ui/src/entities/providers-block/ui/index.tsx new file mode 100644 index 000000000..bbecb6df2 --- /dev/null +++ b/console/ui/src/entities/providers-block/ui/index.tsx @@ -0,0 +1,81 @@ +import { FC } from 'react'; +import { Box, Stack, Tooltip, Typography } from '@mui/material'; +import { Controller, useFormContext } from 'react-hook-form'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import { useTranslation } from 'react-i18next'; +import { useNameIconProvidersMap } from '@entities/cluster-form-cloud-region-block/lib/hooks.tsx'; +import ErrorOutlineOutlinedIcon from '@mui/icons-material/ErrorOutlineOutlined'; +import ServersIcon from '@shared/assets/serversIcon.svg?react'; +import theme from '@shared/theme/theme.ts'; +import { ProvidersBlockProps } from '@entities/providers-block/model/types.ts'; +import { PROVIDERS } from '@shared/config/constants.ts'; +import ClusterFormCloudProviderBox from '@entities/providers-block/ui/ClusterFormCloudProviderBox.tsx'; + +const ClusterFormProvidersBlock: FC = ({ providers }) => { + const { t } = useTranslation('clusters'); + const { control, reset } = useFormContext(); + + const nameIconProvidersMap = useNameIconProvidersMap(); + + const handleProviderChange = (value) => () => { + reset((values) => ({ + ...values, + [CLUSTER_FORM_FIELD_NAMES.PROVIDER]: value, + [CLUSTER_FORM_FIELD_NAMES.REGION]: value?.cloud_regions?.[0]?.code, + [CLUSTER_FORM_FIELD_NAMES.REGION_CONFIG]: value?.cloud_regions?.[0]?.datacenters?.[0], + [CLUSTER_FORM_FIELD_NAMES.INSTANCE_TYPE]: 'small', + [CLUSTER_FORM_FIELD_NAMES.INSTANCE_CONFIG]: value?.instance_types?.small?.[0], + [CLUSTER_FORM_FIELD_NAMES.STORAGE_AMOUNT]: + value?.volumes?.find((volume) => volume?.is_default)?.min_size < 100 + ? 100 + : value?.volumes?.find((volume) => volume?.is_default)?.min_size, + })); + }; + + return ( + + + {t('selectDeploymentDestination')} + + ( + + {providers.map((provider) => ( + + {provider.description} + + ))} + + + + + + + + + + {t('yourOwn')} + + + {t('machines')} + + + + + + + )} + /> + + ); +}; + +export default ClusterFormProvidersBlock; diff --git a/console/ui/src/entities/secret-form-block/index.ts b/console/ui/src/entities/secret-form-block/index.ts new file mode 100644 index 000000000..f2fd1ffa4 --- /dev/null +++ b/console/ui/src/entities/secret-form-block/index.ts @@ -0,0 +1,3 @@ +import SecretFormBlock from '@entities/secret-form-block/ui'; + +export default SecretFormBlock; diff --git a/console/ui/src/entities/secret-form-block/lib/functions.ts b/console/ui/src/entities/secret-form-block/lib/functions.ts new file mode 100644 index 000000000..b00615d25 --- /dev/null +++ b/console/ui/src/entities/secret-form-block/lib/functions.ts @@ -0,0 +1,118 @@ +import { PROVIDERS } from '@shared/config/constants.ts'; +import AwsSecretBlock from '@entities/secret-form-block/ui/AwsSecret.tsx'; +import GcpSecretBlock from '@entities/secret-form-block/ui/GcpSecret.tsx'; +import AzureSecretBlock from '@entities/secret-form-block/ui/AzureSecret.tsx'; +import DoSecretBlock from '@entities/secret-form-block/ui/DigitalOceanSecret.tsx'; +import HetznerSecretBlock from '@entities/secret-form-block/ui/HetznerSecret.tsx'; +import SshKeySecretBlock from '@entities/secret-form-block/ui/SshKeySecret.tsx'; +import PasswordSecretBlock from '@entities/secret-form-block/ui/PasswordSecret.tsx'; +import { AUTHENTICATION_METHODS } from '@shared/model/constants.ts'; +import { SecretFormValues } from '@entities/secret-form-block/model/types.ts'; +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; + +export const getAddSecretFormContentByType = (type: string) => { + switch (type) { + case PROVIDERS.AWS: + return { + translationKey: 'settingsAwsSecretInfo', + link: 'https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html', + formComponent: AwsSecretBlock, + }; + case PROVIDERS.GCP: + return { + translationKey: 'settingsGcpSecretInfo', + link: 'https://cloud.google.com/iam/docs/keys-create-delete', + formComponent: GcpSecretBlock, + }; + case PROVIDERS.AZURE: + return { + translationKey: 'settingsAzureSecretInfo', + link: 'https://learn.microsoft.com/en-us/azure/developer/ansible/create-ansible-service-principal?tabs=azure-cli', + formComponent: AzureSecretBlock, + }; + case PROVIDERS.DIGITAL_OCEAN: + return { + translationKey: 'settingsDoSecretInfo', + link: 'https://docs.digitalocean.com/reference/api/create-personal-access-token/', + formComponent: DoSecretBlock, + }; + case PROVIDERS.HETZNER: + return { + translationKey: 'settingsHetznerSecretInfo', + link: 'https://docs.hetzner.com/cloud/api/getting-started/generating-api-token/', + formComponent: HetznerSecretBlock, + }; + case AUTHENTICATION_METHODS.SSH: + return { + translationKey: 'settingsSshKeySecretInfo', + formComponent: SshKeySecretBlock, + }; + default: + return { + translationKey: 'settingsPasswordSecretInfo', + formComponent: PasswordSecretBlock, + }; + } +}; + +export const getSecretBodyFromValues = (values: SecretFormValues) => { + switch (values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SECRET_TYPE]) { + case PROVIDERS.AWS: + return { + [PROVIDERS.AWS]: { + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AWS_ACCESS_KEY_ID]: + values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AWS_ACCESS_KEY_ID], + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AWS_SECRET_ACCESS_KEY]: + values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AWS_SECRET_ACCESS_KEY], + }, + }; + case PROVIDERS.GCP: + return { + [PROVIDERS.GCP]: { + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.GCP_SERVICE_ACCOUNT_CONTENTS]: + values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.GCP_SERVICE_ACCOUNT_CONTENTS], + }, + }; + case PROVIDERS.DIGITAL_OCEAN: + return { + [PROVIDERS.DIGITAL_OCEAN]: { + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.DO_API_TOKEN]: + values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.DO_API_TOKEN], + }, + }; + case PROVIDERS.AZURE: + return { + [PROVIDERS.AZURE]: { + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AZURE_SUBSCRIPTION_ID]: + values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AZURE_SUBSCRIPTION_ID], + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AZURE_CLIENT_ID]: + values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AZURE_CLIENT_ID], + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AZURE_SECRET]: + values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AZURE_SECRET], + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AZURE_TENANT]: + values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AZURE_TENANT], + }, + }; + case PROVIDERS.HETZNER: + return { + [PROVIDERS.HETZNER]: { + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.HCLOUD_API_TOKEN]: + values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.HCLOUD_API_TOKEN], + }, + }; + case AUTHENTICATION_METHODS.SSH: + return { + [AUTHENTICATION_METHODS.SSH]: { + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SSH_PRIVATE_KEY]: + values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SSH_PRIVATE_KEY], + }, + }; + case AUTHENTICATION_METHODS.PASSWORD: + return { + [AUTHENTICATION_METHODS.PASSWORD]: { + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.USERNAME]: values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.USERNAME], + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.PASSWORD]: values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.PASSWORD], + }, + }; + } +}; diff --git a/console/ui/src/entities/secret-form-block/model/constants.ts b/console/ui/src/entities/secret-form-block/model/constants.ts new file mode 100644 index 000000000..dcb23babe --- /dev/null +++ b/console/ui/src/entities/secret-form-block/model/constants.ts @@ -0,0 +1,30 @@ +export const SECRET_MODAL_CONTENT_CLOUD_PROVIDERS_FORM_FIELD_NAMES = Object.freeze({ + // changing names or keys might break 'secrets' POST request + AWS_ACCESS_KEY_ID: 'AWS_ACCESS_KEY_ID', + AWS_SECRET_ACCESS_KEY: 'AWS_SECRET_ACCESS_KEY', + GCP_SERVICE_ACCOUNT_CONTENTS: 'GCP_SERVICE_ACCOUNT_CONTENTS', + DO_API_TOKEN: 'DO_API_TOKEN', + AZURE_SUBSCRIPTION_ID: 'AZURE_SUBSCRIPTION_ID', + AZURE_CLIENT_ID: 'AZURE_CLIENT_ID', + AZURE_SECRET: 'AZURE_SECRET', + AZURE_TENANT: 'AZURE_TENANT', + HCLOUD_API_TOKEN: 'HCLOUD_API_TOKEN', +}); + +export const SECRET_MODAL_CONTENT_LOCAL_FORM_FIELDS = Object.freeze({ + SSH_PRIVATE_KEY: 'SSH_PRIVATE_KEY', + USERNAME: 'USERNAME', + PASSWORD: 'PASSWORD', +}); + +export const SECRET_MODAL_CONTENT_BODY_FORM_FIELDS = Object.freeze({ + ...SECRET_MODAL_CONTENT_CLOUD_PROVIDERS_FORM_FIELD_NAMES, + ...SECRET_MODAL_CONTENT_LOCAL_FORM_FIELDS, +}); + +export const SECRET_MODAL_CONTENT_FORM_FIELD_NAMES = Object.freeze({ + // changing names might break 'secrets' POST request + SECRET_TYPE: 'type', + SECRET_NAME: 'name', + ...SECRET_MODAL_CONTENT_BODY_FORM_FIELDS, +}); diff --git a/console/ui/src/entities/secret-form-block/model/types.ts b/console/ui/src/entities/secret-form-block/model/types.ts new file mode 100644 index 000000000..5657eee4c --- /dev/null +++ b/console/ui/src/entities/secret-form-block/model/types.ts @@ -0,0 +1,28 @@ +import { PROVIDERS } from '@shared/config/constants.ts'; +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; + +export interface SecretFormBlockProps { + secretType: (typeof PROVIDERS)[keyof typeof PROVIDERS]; + isAdditionalInfoDisplayed?: boolean; +} + +export interface SecretFormValues { + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SECRET_TYPE]: string; + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SECRET_NAME]: string; + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AWS_ACCESS_KEY_ID]?: string; + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AWS_SECRET_ACCESS_KEY]?: string; + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.GCP_SERVICE_ACCOUNT_CONTENTS]?: string; + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.DO_API_TOKEN]?: string; + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AZURE_SUBSCRIPTION_ID]?: string; + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AZURE_CLIENT_ID]?: string; + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AZURE_SECRET]?: string; + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AZURE_TENANT]?: string; + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.HCLOUD_API_TOKEN]?: string; + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SSH_PRIVATE_KEY]?: string; + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.USERNAME]?: string; + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.PASSWORD]?: string; +} + +export interface SecretModalContentProps { + secretType: string; +} diff --git a/console/ui/src/entities/secret-form-block/ui/AwsSecret.tsx b/console/ui/src/entities/secret-form-block/ui/AwsSecret.tsx new file mode 100644 index 000000000..a6f4c1653 --- /dev/null +++ b/console/ui/src/entities/secret-form-block/ui/AwsSecret.tsx @@ -0,0 +1,49 @@ +import { FC } from 'react'; +import { Controller, useFormContext } from 'react-hook-form'; +import { Stack, TextField } from '@mui/material'; + +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; + +const AwsSecretBlock: FC = () => { + const { + control, + formState: { errors }, + } = useFormContext(); + + return ( + + ( + + )} + /> + ( + + )} + /> + + ); +}; + +export default AwsSecretBlock; diff --git a/console/ui/src/entities/secret-form-block/ui/AzureSecret.tsx b/console/ui/src/entities/secret-form-block/ui/AzureSecret.tsx new file mode 100644 index 000000000..e0cc1ec2d --- /dev/null +++ b/console/ui/src/entities/secret-form-block/ui/AzureSecret.tsx @@ -0,0 +1,79 @@ +import { FC } from 'react'; +import { Controller, useFormContext } from 'react-hook-form'; +import { Stack, TextField } from '@mui/material'; + +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; + +const AzureSecretBlock: FC = () => { + const { + control, + formState: { errors }, + } = useFormContext(); + + return ( + + ( + + )} + /> + ( + + )} + /> + ( + + )} + /> + ( + + )} + /> + + ); +}; + +export default AzureSecretBlock; diff --git a/console/ui/src/entities/secret-form-block/ui/DigitalOceanSecret.tsx b/console/ui/src/entities/secret-form-block/ui/DigitalOceanSecret.tsx new file mode 100644 index 000000000..92406140d --- /dev/null +++ b/console/ui/src/entities/secret-form-block/ui/DigitalOceanSecret.tsx @@ -0,0 +1,34 @@ +import { FC } from 'react'; +import { Controller, useFormContext } from 'react-hook-form'; +import { Stack, TextField } from '@mui/material'; + +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; + +const DoSecretBlock: FC = () => { + const { + control, + formState: { errors }, + } = useFormContext(); + + return ( + + ( + + )} + /> + + ); +}; + +export default DoSecretBlock; diff --git a/console/ui/src/entities/secret-form-block/ui/GcpSecret.tsx b/console/ui/src/entities/secret-form-block/ui/GcpSecret.tsx new file mode 100644 index 000000000..b3bdc4824 --- /dev/null +++ b/console/ui/src/entities/secret-form-block/ui/GcpSecret.tsx @@ -0,0 +1,36 @@ +import { FC } from 'react'; +import { Controller, useFormContext } from 'react-hook-form'; +import { Stack, TextField } from '@mui/material'; + +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; + +const GcpSecretBlock: FC = () => { + const { + control, + formState: { errors }, + } = useFormContext(); + + return ( + + ( + + )} + /> + + ); +}; + +export default GcpSecretBlock; diff --git a/console/ui/src/entities/secret-form-block/ui/HetznerSecret.tsx b/console/ui/src/entities/secret-form-block/ui/HetznerSecret.tsx new file mode 100644 index 000000000..1eae92059 --- /dev/null +++ b/console/ui/src/entities/secret-form-block/ui/HetznerSecret.tsx @@ -0,0 +1,34 @@ +import React from 'react'; +import { Controller, useFormContext } from 'react-hook-form'; +import { Stack, TextField } from '@mui/material'; + +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; + +const HetznerSecretBlock: React.FC = () => { + const { + control, + formState: { errors }, + } = useFormContext(); + + return ( + + ( + + )} + /> + + ); +}; + +export default HetznerSecretBlock; diff --git a/console/ui/src/entities/secret-form-block/ui/PasswordSecret.tsx b/console/ui/src/entities/secret-form-block/ui/PasswordSecret.tsx new file mode 100644 index 000000000..2127a1cdc --- /dev/null +++ b/console/ui/src/entities/secret-form-block/ui/PasswordSecret.tsx @@ -0,0 +1,52 @@ +import React from 'react'; +import { useTranslation } from 'react-i18next'; +import { Controller, useFormContext } from 'react-hook-form'; +import { Stack, TextField } from '@mui/material'; + +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; + +const PasswordSecretBlock: React.FC = () => { + const { t } = useTranslation('shared'); + const { + control, + formState: { errors }, + } = useFormContext(); + + return ( + + ( + + )} + /> + ( + + )} + /> + + ); +}; + +export default PasswordSecretBlock; diff --git a/console/ui/src/entities/secret-form-block/ui/SshKeySecret.tsx b/console/ui/src/entities/secret-form-block/ui/SshKeySecret.tsx new file mode 100644 index 000000000..88dc63ca2 --- /dev/null +++ b/console/ui/src/entities/secret-form-block/ui/SshKeySecret.tsx @@ -0,0 +1,38 @@ +import { FC } from 'react'; +import { Controller, useFormContext } from 'react-hook-form'; +import { Stack, TextField } from '@mui/material'; +import { useTranslation } from 'react-i18next'; + +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; + +const SshKeySecretBlock: FC = () => { + const { t } = useTranslation('settings'); + const { + control, + formState: { errors }, + } = useFormContext(); + + return ( + + ( + + )} + /> + + ); +}; + +export default SshKeySecretBlock; diff --git a/console/ui/src/entities/secret-form-block/ui/index.tsx b/console/ui/src/entities/secret-form-block/ui/index.tsx new file mode 100644 index 000000000..8bdf97780 --- /dev/null +++ b/console/ui/src/entities/secret-form-block/ui/index.tsx @@ -0,0 +1,30 @@ +import React from 'react'; +import { Trans, useTranslation } from 'react-i18next'; +import { getAddSecretFormContentByType } from '@entities/secret-form-block/lib/functions.ts'; +import { Link, Stack, Typography } from '@mui/material'; +import { SecretFormBlockProps } from '@entities/secret-form-block/model/types.ts'; + +const SecretFormBlock: React.FC = ({ secretType, isAdditionalInfoDisplayed = false }) => { + const { t } = useTranslation('settings'); + + const content = getAddSecretFormContentByType(secretType); + + return ( + + + {content.link ? ( + + + + ) : ( + t(content.translationKey) + )} + + + {isAdditionalInfoDisplayed ? ( + {t('settingsConfidentialDataStore')} + ) : null} + + ); +}; +export default SecretFormBlock; diff --git a/console/ui/src/entities/settings-proxy-block/index.ts b/console/ui/src/entities/settings-proxy-block/index.ts new file mode 100644 index 000000000..f1e0d2d02 --- /dev/null +++ b/console/ui/src/entities/settings-proxy-block/index.ts @@ -0,0 +1,3 @@ +import SettingsProxyBlock from '@entities/settings-proxy-block/ui'; + +export default SettingsProxyBlock; diff --git a/console/ui/src/entities/settings-proxy-block/model/constants.ts b/console/ui/src/entities/settings-proxy-block/model/constants.ts new file mode 100644 index 000000000..c87aad173 --- /dev/null +++ b/console/ui/src/entities/settings-proxy-block/model/constants.ts @@ -0,0 +1,4 @@ +export const SETTINGS_FORM_FIELDS_NAMES = Object.freeze({ + HTTP_PROXY: 'http_proxy', + HTTPS_PROXY: 'https_proxy', +}); diff --git a/console/ui/src/entities/settings-proxy-block/model/types.ts b/console/ui/src/entities/settings-proxy-block/model/types.ts new file mode 100644 index 000000000..9217a87b8 --- /dev/null +++ b/console/ui/src/entities/settings-proxy-block/model/types.ts @@ -0,0 +1,6 @@ +import { SETTINGS_FORM_FIELDS_NAMES } from '@entities/settings-proxy-block/model/constants.ts'; + +export interface SettingsFormValues { + [SETTINGS_FORM_FIELDS_NAMES.HTTP_PROXY]: string; + [SETTINGS_FORM_FIELDS_NAMES.HTTPS_PROXY]: string; +} diff --git a/console/ui/src/entities/settings-proxy-block/ui/index.tsx b/console/ui/src/entities/settings-proxy-block/ui/index.tsx new file mode 100644 index 000000000..6ae6ccd74 --- /dev/null +++ b/console/ui/src/entities/settings-proxy-block/ui/index.tsx @@ -0,0 +1,44 @@ +import React from 'react'; +import { Stack, TextField, Typography } from '@mui/material'; +import { useTranslation } from 'react-i18next'; +import { Controller, useFormContext } from 'react-hook-form'; +import { SETTINGS_FORM_FIELDS_NAMES } from '@entities/settings-proxy-block/model/constants.ts'; + +const SettingsProxyBlock: React.FC = () => { + const { t } = useTranslation('settings'); + + const { control } = useFormContext(); + + return ( + + + {t('proxyServer')} + + {t('proxyServerInfo')} + + ( + + http_proxy + + + )} + /> + ( + + https_proxy + + + )} + /> + + + ); +}; + +export default SettingsProxyBlock; diff --git a/console/ui/src/entities/sidebar-item/index.ts b/console/ui/src/entities/sidebar-item/index.ts new file mode 100644 index 000000000..cdfcd55a4 --- /dev/null +++ b/console/ui/src/entities/sidebar-item/index.ts @@ -0,0 +1,3 @@ +import SidebarItem from './ui'; + +export default SidebarItem; diff --git a/console/ui/src/entities/sidebar-item/model/types.ts b/console/ui/src/entities/sidebar-item/model/types.ts new file mode 100644 index 000000000..91a81cae3 --- /dev/null +++ b/console/ui/src/entities/sidebar-item/model/types.ts @@ -0,0 +1,8 @@ +export interface SidebarItemProps { + path: string; + label: string; + icon?: Element; + isActive?: string; + isCollapsed?: boolean; + target?: string; +} diff --git a/console/ui/src/entities/sidebar-item/ui/SidebarItemContent.tsx b/console/ui/src/entities/sidebar-item/ui/SidebarItemContent.tsx new file mode 100644 index 000000000..1645c0890 --- /dev/null +++ b/console/ui/src/entities/sidebar-item/ui/SidebarItemContent.tsx @@ -0,0 +1,36 @@ +import { FC } from 'react'; +import { Link } from 'react-router-dom'; +import { ListItemButton, ListItemIcon, ListItemText } from '@mui/material'; +import theme from '@shared/theme/theme.ts'; +import { SidebarItemProps } from '@entities/sidebar-item/model/types.ts'; + +const SidebarItemContent: FC = ({ + path, + label, + icon: SidebarIcon, + isActive, + target, + isCollapsed, +}) => { + return ( + + + {SidebarIcon ? : null} + + {!isCollapsed ? : null} + + ); +}; + +export default SidebarItemContent; diff --git a/console/ui/src/entities/sidebar-item/ui/index.tsx b/console/ui/src/entities/sidebar-item/ui/index.tsx new file mode 100644 index 000000000..0231b2ec1 --- /dev/null +++ b/console/ui/src/entities/sidebar-item/ui/index.tsx @@ -0,0 +1,36 @@ +import { FC } from 'react'; +import { SidebarItemProps } from '../model/types.ts'; +import { Box, ListItem, Tooltip, useTheme } from '@mui/material'; +import SidebarItemContent from '@entities/sidebar-item/ui/SidebarItemContent.tsx'; + +const SidebarItem: FC = ({ path, label, icon, isActive, isCollapsed = false, target, ...props }) => { + const theme = useTheme(); + + return ( + + {isCollapsed ? ( + + + + + + ) : ( + + )} + + ); +}; + +export default SidebarItem; diff --git a/console/ui/src/entities/ssh-key-block/index.ts b/console/ui/src/entities/ssh-key-block/index.ts new file mode 100644 index 000000000..f9d2e7755 --- /dev/null +++ b/console/ui/src/entities/ssh-key-block/index.ts @@ -0,0 +1,3 @@ +import ClusterFormSshKeyBlock from '@entities/ssh-key-block/ui'; + +export default ClusterFormSshKeyBlock; diff --git a/console/ui/src/entities/ssh-key-block/ui/index.tsx b/console/ui/src/entities/ssh-key-block/ui/index.tsx new file mode 100644 index 000000000..0944603cc --- /dev/null +++ b/console/ui/src/entities/ssh-key-block/ui/index.tsx @@ -0,0 +1,40 @@ +import { FC } from 'react'; +import { Box, TextField, Typography } from '@mui/material'; +import { useTranslation } from 'react-i18next'; +import { Controller, useFormContext } from 'react-hook-form'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; + +const ClusterFormSshKeyBlock: FC = () => { + const { t } = useTranslation('clusters'); + const { + control, + formState: { errors }, + } = useFormContext(); + + return ( + + + {t('sshPublicKey')}* + + ( + + )} + /> + + ); +}; + +export default ClusterFormSshKeyBlock; diff --git a/console/ui/src/entities/storage-block/index.ts b/console/ui/src/entities/storage-block/index.ts new file mode 100644 index 000000000..ca4917b81 --- /dev/null +++ b/console/ui/src/entities/storage-block/index.ts @@ -0,0 +1,3 @@ +import StorageBlock from '@entities/storage-block/ui'; + +export default StorageBlock; diff --git a/console/ui/src/entities/storage-block/lib/functions.ts b/console/ui/src/entities/storage-block/lib/functions.ts new file mode 100644 index 000000000..e69de29bb diff --git a/console/ui/src/entities/storage-block/ui/index.tsx b/console/ui/src/entities/storage-block/ui/index.tsx new file mode 100644 index 000000000..47b93cbf9 --- /dev/null +++ b/console/ui/src/entities/storage-block/ui/index.tsx @@ -0,0 +1,50 @@ +import { FC } from 'react'; +import { Box, Typography } from '@mui/material'; +import ClusterSliderBox from '@shared/ui/slider-box'; +import { useTranslation } from 'react-i18next'; +import { Controller, useFormContext } from 'react-hook-form'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import StorageIcon from '@shared/assets/storageIcon.svg?react'; + +const StorageBlock: FC = () => { + const { t } = useTranslation('clusters'); + + const { + control, + watch, + formState: { errors }, + } = useFormContext(); + + const watchProvider = watch(CLUSTER_FORM_FIELD_NAMES.PROVIDER); + + const storage = watchProvider?.volumes?.find((volume) => volume?.is_default) ?? {}; + + return ( + + + {t('dataDiskStorage')} + + ( + } + error={errors[CLUSTER_FORM_FIELD_NAMES.STORAGE_AMOUNT]} + /> + )} + /> + + ); +}; + +export default StorageBlock; diff --git a/console/ui/src/entities/vip-address-block/index.ts b/console/ui/src/entities/vip-address-block/index.ts new file mode 100644 index 000000000..ac25bc850 --- /dev/null +++ b/console/ui/src/entities/vip-address-block/index.ts @@ -0,0 +1,3 @@ +import VipAddressBlock from '@entities/vip-address-block/ui'; + +export default VipAddressBlock; diff --git a/console/ui/src/entities/vip-address-block/ui/index.tsx b/console/ui/src/entities/vip-address-block/ui/index.tsx new file mode 100644 index 000000000..2bf7aebf7 --- /dev/null +++ b/console/ui/src/entities/vip-address-block/ui/index.tsx @@ -0,0 +1,38 @@ +import React, { FC } from 'react'; +import { Box, TextField, Typography } from '@mui/material'; +import { useTranslation } from 'react-i18next'; +import { Controller, useFormContext } from 'react-hook-form'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; + +const VipAddressBlock: FC = () => { + const { t } = useTranslation('clusters'); + const { + control, + formState: { errors }, + } = useFormContext(); + + return ( + + + {t('clusterVipAddress')} + + ( + + )} + /> + + ); +}; + +export default VipAddressBlock; diff --git a/console/ui/src/features/add-environment/index.ts b/console/ui/src/features/add-environment/index.ts new file mode 100644 index 000000000..4cd74ca65 --- /dev/null +++ b/console/ui/src/features/add-environment/index.ts @@ -0,0 +1,3 @@ +import AddEnvironment from '@features/add-environment/ui'; + +export default AddEnvironment; diff --git a/console/ui/src/features/add-environment/ui/index.tsx b/console/ui/src/features/add-environment/ui/index.tsx new file mode 100644 index 000000000..e4d746bc6 --- /dev/null +++ b/console/ui/src/features/add-environment/ui/index.tsx @@ -0,0 +1,41 @@ +import React, { FC } from 'react'; +import { useTranslation } from 'react-i18next'; +import { toast } from 'react-toastify'; +import SettingsAddEntity from '@shared/ui/settings-add-entity/ui'; +import { usePostEnvironmentsMutation } from '@shared/api/api/environments.ts'; +import { AddEntityFormValues } from '@shared/ui/settings-add-entity/model/types.ts'; +import { ADD_ENTITY_FORM_NAMES } from '@shared/ui/settings-add-entity/model/constants.ts'; + +const AddEnvironment: FC = () => { + const { t } = useTranslation('settings'); + + const [postEnvironmentTrigger, postEnvironmentTriggerState] = usePostEnvironmentsMutation(); + + const onSubmit = async (values: AddEntityFormValues) => { + await postEnvironmentTrigger({ + requestEnvironment: { + name: values[ADD_ENTITY_FORM_NAMES.NAME], + description: values[ADD_ENTITY_FORM_NAMES.DESCRIPTION], + }, + }).unwrap(); + toast.success( + t('environmentSuccessfullyCreated', { + ns: 'toasts', + environmentName: values[ADD_ENTITY_FORM_NAMES.NAME], + }), + ); + }; + + return ( + + ); +}; + +export default AddEnvironment; diff --git a/console/ui/src/features/add-project/index.ts b/console/ui/src/features/add-project/index.ts new file mode 100644 index 000000000..5b56920db --- /dev/null +++ b/console/ui/src/features/add-project/index.ts @@ -0,0 +1,3 @@ +import AddProject from '@features/add-project/ui'; + +export default AddProject; diff --git a/console/ui/src/features/add-project/model/constants.ts b/console/ui/src/features/add-project/model/constants.ts new file mode 100644 index 000000000..e7ca72c01 --- /dev/null +++ b/console/ui/src/features/add-project/model/constants.ts @@ -0,0 +1,4 @@ +export const PROJECT_FORM_NAMES = Object.freeze({ + NAME: 'name', + DESCRIPTION: 'description', +}); diff --git a/console/ui/src/features/add-project/model/types.ts b/console/ui/src/features/add-project/model/types.ts new file mode 100644 index 000000000..427c0c177 --- /dev/null +++ b/console/ui/src/features/add-project/model/types.ts @@ -0,0 +1,6 @@ +import { PROJECT_FORM_NAMES } from '@features/add-project/model/constants.ts'; + +export interface ProjectFormValues { + [PROJECT_FORM_NAMES.NAME]: string; + [PROJECT_FORM_NAMES.NAME]: string; +} diff --git a/console/ui/src/features/add-project/model/validation.ts b/console/ui/src/features/add-project/model/validation.ts new file mode 100644 index 000000000..0ecdb5d9f --- /dev/null +++ b/console/ui/src/features/add-project/model/validation.ts @@ -0,0 +1,9 @@ +import * as yup from 'yup'; +import { TFunction } from 'i18next'; +import { PROJECT_FORM_NAMES } from '@features/add-project/model/constants.ts'; + +export const AddProjectFormSchema = (t: TFunction) => + yup.object({ + [PROJECT_FORM_NAMES.NAME]: yup.string().required(t('requiredField', { ns: 'validation' })), + [PROJECT_FORM_NAMES.DESCRIPTION]: yup.string(), + }); diff --git a/console/ui/src/features/add-project/ui/index.tsx b/console/ui/src/features/add-project/ui/index.tsx new file mode 100644 index 000000000..18ae968d8 --- /dev/null +++ b/console/ui/src/features/add-project/ui/index.tsx @@ -0,0 +1,54 @@ +import React, { FC } from 'react'; +import { useForm } from 'react-hook-form'; +import { useTranslation } from 'react-i18next'; +import { usePostProjectsMutation } from '@shared/api/api/projects.ts'; +import { yupResolver } from '@hookform/resolvers/yup'; + +import { ProjectFormValues } from '@features/add-project/model/types.ts'; +import { toast } from 'react-toastify'; +import { AddProjectFormSchema } from '@features/add-project/model/validation.ts'; +import SettingsAddEntity from '@shared/ui/settings-add-entity/ui'; +import { ADD_ENTITY_FORM_NAMES } from '@shared/ui/settings-add-entity/model/constants.ts'; + +const AddProject: FC = () => { + const { t } = useTranslation(['settings', 'toasts']); + + const [postProjectTrigger, postProjectTriggerState] = usePostProjectsMutation(); + + const { + control, + handleSubmit, + formState: { isValid, isSubmitting }, + } = useForm({ + mode: 'all', + resolver: yupResolver(AddProjectFormSchema(t)), + }); + + const onSubmit = async (values: ProjectFormValues) => { + await postProjectTrigger({ + requestProjectCreate: { + name: values[ADD_ENTITY_FORM_NAMES.NAME], + description: values[ADD_ENTITY_FORM_NAMES.DESCRIPTION], + }, + }).unwrap(); + toast.success( + t('projectSuccessfullyCreated', { + ns: 'toasts', + projectName: values[ADD_ENTITY_FORM_NAMES.NAME], + }), + ); + }; + + return ( + + ); +}; + +export default AddProject; diff --git a/console/ui/src/features/add-secret/index.ts b/console/ui/src/features/add-secret/index.ts new file mode 100644 index 000000000..bb2061a68 --- /dev/null +++ b/console/ui/src/features/add-secret/index.ts @@ -0,0 +1,3 @@ +import SettingsAddSecret from '@features/add-secret/ui'; + +export default SettingsAddSecret; diff --git a/console/ui/src/features/add-secret/model/constants.ts b/console/ui/src/features/add-secret/model/constants.ts new file mode 100644 index 000000000..3001d4304 --- /dev/null +++ b/console/ui/src/features/add-secret/model/constants.ts @@ -0,0 +1,6 @@ +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; + +export const ADD_SECRET_FORM_FIELD_NAMES = Object.freeze({ + SECRET_NAME: 'secretName', + ...SECRET_MODAL_CONTENT_FORM_FIELD_NAMES, +}); diff --git a/console/ui/src/features/add-secret/model/types.ts b/console/ui/src/features/add-secret/model/types.ts new file mode 100644 index 000000000..750b1b2e0 --- /dev/null +++ b/console/ui/src/features/add-secret/model/types.ts @@ -0,0 +1,7 @@ +import { ADD_SECRET_FORM_FIELD_NAMES } from '@features/add-secret/model/constants.ts'; + +import { SecretFormValues } from '@entities/secret-form-block/model/types.ts'; + +export interface AddSecretFormValues extends SecretFormValues { + [ADD_SECRET_FORM_FIELD_NAMES.SECRET_NAME]: string; +} diff --git a/console/ui/src/features/add-secret/model/validation.ts b/console/ui/src/features/add-secret/model/validation.ts new file mode 100644 index 000000000..a03daa589 --- /dev/null +++ b/console/ui/src/features/add-secret/model/validation.ts @@ -0,0 +1,49 @@ +import * as yup from 'yup'; +import { TFunction } from 'i18next'; +import { PROVIDERS } from '@shared/config/constants.ts'; + +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; + +const requiredField = ({ valueToBeRequired, t }: { valueToBeRequired: string; t: TFunction }) => + yup + .mixed() + .when(SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SECRET_TYPE, ([secretType]) => + secretType === valueToBeRequired + ? yup.string().required(t('requiredField', { ns: 'validation' })) + : yup.mixed().optional(), + ); + +export const AddSecretFormSchema = (t: TFunction) => + yup.object({ + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SECRET_TYPE]: yup.string().required(), + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SECRET_NAME]: yup + .string() + .required(t('requiredField', { ns: 'validation' })), + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AWS_ACCESS_KEY_ID]: requiredField({ valueToBeRequired: PROVIDERS.AWS, t }), + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AWS_SECRET_ACCESS_KEY]: requiredField({ + valueToBeRequired: PROVIDERS.AWS, + t, + }), + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.GCP_SERVICE_ACCOUNT_CONTENTS]: requiredField({ + valueToBeRequired: PROVIDERS.GCP, + t, + }), + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AZURE_SUBSCRIPTION_ID]: requiredField({ + valueToBeRequired: PROVIDERS.AZURE, + t, + }), + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AZURE_CLIENT_ID]: requiredField({ valueToBeRequired: PROVIDERS.AZURE, t }), + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AZURE_SECRET]: requiredField({ valueToBeRequired: PROVIDERS.AZURE, t }), + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.AZURE_TENANT]: requiredField({ valueToBeRequired: PROVIDERS.AZURE, t }), + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.DO_API_TOKEN]: requiredField({ + valueToBeRequired: PROVIDERS.DIGITAL_OCEAN, + t, + }), + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.HCLOUD_API_TOKEN]: requiredField({ + valueToBeRequired: PROVIDERS.HETZNER, + t, + }), + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SSH_PRIVATE_KEY]: requiredField({ valueToBeRequired: 'ssh_key', t }), + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.USERNAME]: requiredField({ valueToBeRequired: 'password', t }), + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.PASSWORD]: requiredField({ valueToBeRequired: 'password', t }), + }); diff --git a/console/ui/src/features/add-secret/ui/index.tsx b/console/ui/src/features/add-secret/ui/index.tsx new file mode 100644 index 000000000..328c93d2e --- /dev/null +++ b/console/ui/src/features/add-secret/ui/index.tsx @@ -0,0 +1,150 @@ +import React, { useState } from 'react'; +import { Button, Card, CircularProgress, MenuItem, Modal, Select, Stack, TextField, Typography } from '@mui/material'; +import AddBoxOutlinedIcon from '@mui/icons-material/AddBoxOutlined'; +import { useTranslation } from 'react-i18next'; +import { Controller, FormProvider, useForm } from 'react-hook-form'; +import { yupResolver } from '@hookform/resolvers/yup'; +import { AddSecretFormSchema } from '@features/add-secret/model/validation.ts'; +import { PROVIDERS } from '@shared/config/constants.ts'; +import { useAppSelector } from '@app/redux/store/hooks.ts'; +import { selectCurrentProject } from '@app/redux/slices/projectSlice/projectSelectors.ts'; +import { usePostSecretsMutation } from '@shared/api/api/secrets.ts'; +import { LoadingButton } from '@mui/lab'; +import { AUTHENTICATION_METHODS } from '@shared/model/constants.ts'; +import { toast } from 'react-toastify'; +import SecretFormBlock from '@entities/secret-form-block/ui'; + +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; +import { SecretFormValues } from '@entities/secret-form-block/model/types.ts'; +import { getSecretBodyFromValues } from '@entities/secret-form-block/lib/functions.ts'; +import { handleRequestErrorCatch } from '@shared/lib/functions.ts'; + +const SettingsAddSecret: React.FC = () => { + const { t } = useTranslation(['settings', 'validation', 'toasts']); + const currentProject = useAppSelector(selectCurrentProject); + + const [isModalOpen, setIsModalOpen] = useState(false); + + const handleModalOpenState = (isOpen: boolean) => () => setIsModalOpen(isOpen); + + const [postSecretTrigger, postSecretTriggerState] = usePostSecretsMutation(); + + const methods = useForm({ + mode: 'all', + resolver: yupResolver(AddSecretFormSchema(t)), + defaultValues: { + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SECRET_TYPE]: '', + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SECRET_NAME]: '', + }, + }); + + const watchType = methods.watch(SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SECRET_TYPE); + + const onSubmit = async (values: SecretFormValues) => { + try { + if (currentProject) { + await postSecretTrigger({ + requestSecretCreate: { + project_id: Number(currentProject), + name: values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SECRET_NAME], + type: values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SECRET_TYPE], + value: getSecretBodyFromValues(values), + }, + }).unwrap(); + methods.reset(); + toast.success( + t('secretSuccessfullyCreated', { + ns: 'toasts', + secretName: values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SECRET_NAME], + }), + ); + setIsModalOpen(false); + } + } catch (e) { + handleRequestErrorCatch(e); + } + }; + + const { isValid, isSubmitting } = methods.formState; + + return ( + <> + + + +
+ + + + {t('addSecret', { ns: 'settings' })} + + + {t('secretType', { ns: 'settings' })} + ( + + )} + /> + + + {t('secretName', { ns: 'settings' })}* + ( + + )} + /> + + {watchType ? ( + + + } + loading={isSubmitting || postSecretTriggerState.isLoading}> + {t('addSecret')} + + + ) : null} + + +
+
+
+ + ); +}; + +export default SettingsAddSecret; diff --git a/console/ui/src/features/bradcrumbs/hooks/useBreadcrumbs.tsx b/console/ui/src/features/bradcrumbs/hooks/useBreadcrumbs.tsx new file mode 100644 index 000000000..b322dbf3f --- /dev/null +++ b/console/ui/src/features/bradcrumbs/hooks/useBreadcrumbs.tsx @@ -0,0 +1,19 @@ +import { useMatches } from 'react-router-dom'; +import { useTranslation } from 'react-i18next'; + +const useBreadcrumbs = (): { label: string; path: string }[] => { + const { t } = useTranslation(); + const matches = useMatches(); + + return matches + .filter((match: any) => Boolean(match?.handle?.breadcrumb)) + .map((match) => ({ + label: + typeof match.handle.breadcrumb.label === 'function' + ? match.handle.breadcrumb.label({ ...match.params }) + : t(match.handle.breadcrumb.label, { ns: match.handle.breadcrumb.ns }), + path: match.handle.breadcrumb?.path ?? match.pathname, + })); +}; + +export default useBreadcrumbs; diff --git a/console/ui/src/features/bradcrumbs/index.ts b/console/ui/src/features/bradcrumbs/index.ts new file mode 100644 index 000000000..3c2d1b2b0 --- /dev/null +++ b/console/ui/src/features/bradcrumbs/index.ts @@ -0,0 +1,3 @@ +import Breadcrumbs from '@/features/bradcrumbs/ui'; + +export default Breadcrumbs; diff --git a/console/ui/src/features/bradcrumbs/ui/index.tsx b/console/ui/src/features/bradcrumbs/ui/index.tsx new file mode 100644 index 000000000..8e5eb62ae --- /dev/null +++ b/console/ui/src/features/bradcrumbs/ui/index.tsx @@ -0,0 +1,35 @@ +import { FC } from 'react'; +import BreadcrumbsItem from '@entities/breadcumb-item'; +import useBreadcrumbs from '@/features/bradcrumbs/hooks/useBreadcrumbs.tsx'; +import { Breadcrumbs as MaterialBreadcrumbs, Icon, Typography } from '@mui/material'; +import RouterPaths from '@app/router/routerPathsConfig'; +import HomeOutlinedIcon from '@mui/icons-material/HomeOutlined'; +import { generateAbsoluteRouterPath } from '@shared/lib/functions.ts'; +import { Link } from 'react-router-dom'; + +const Breadcrumbs: FC = () => { + const breadcrumbs = useBreadcrumbs(); + + return ( + + + + + + + {breadcrumbs.map((breadcrumb, index) => + index === breadcrumbs.length - 1 ? ( + + {breadcrumb.label} + + ) : ( + + ), + )} + + ); +}; + +export default Breadcrumbs; diff --git a/console/ui/src/features/cluster-secret-modal/index.ts b/console/ui/src/features/cluster-secret-modal/index.ts new file mode 100644 index 000000000..1e2556575 --- /dev/null +++ b/console/ui/src/features/cluster-secret-modal/index.ts @@ -0,0 +1,3 @@ +import ClusterSecretModal from '@features/cluster-secret-modal/ui'; + +export default ClusterSecretModal; diff --git a/console/ui/src/features/cluster-secret-modal/lib/functions.ts b/console/ui/src/features/cluster-secret-modal/lib/functions.ts new file mode 100644 index 000000000..5e2864857 --- /dev/null +++ b/console/ui/src/features/cluster-secret-modal/lib/functions.ts @@ -0,0 +1,169 @@ +import { RequestClusterCreate } from '@shared/api/api/clusters.ts'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import { PROVIDER_CODE_TO_ANSIBLE_USER_MAP } from '@features/cluster-secret-modal/model/constants.ts'; +import { AUTHENTICATION_METHODS } from '@shared/model/constants.ts'; +import { PROVIDERS } from '@shared/config/constants.ts'; +import { ClusterFormValues } from '@features/cluster-secret-modal/model/types.ts'; + +import { + SECRET_MODAL_CONTENT_BODY_FORM_FIELDS, + SECRET_MODAL_CONTENT_FORM_FIELD_NAMES, +} from '@entities/secret-form-block/model/constants.ts'; + +export const getCommonExtraVars = (values: ClusterFormValues) => ({ + postgresql_version: values[CLUSTER_FORM_FIELD_NAMES.POSTGRES_VERSION], + patroni_cluster_name: values[CLUSTER_FORM_FIELD_NAMES.CLUSTER_NAME], +}); + +export const getCloudProviderExtraVars = (values: ClusterFormValues) => ({ + cloud_provider: values[CLUSTER_FORM_FIELD_NAMES.PROVIDER].code, + server_type: values[CLUSTER_FORM_FIELD_NAMES.INSTANCE_CONFIG].code, + server_location: values[CLUSTER_FORM_FIELD_NAMES.REGION_CONFIG].code, + server_count: values[CLUSTER_FORM_FIELD_NAMES.INSTANCES_AMOUNT], + volume_size: values[CLUSTER_FORM_FIELD_NAMES.STORAGE_AMOUNT], + ssh_public_keys: values[CLUSTER_FORM_FIELD_NAMES.SSH_PUBLIC_KEY].split('\n').map((key) => `'${key}'`), + ansible_user: PROVIDER_CODE_TO_ANSIBLE_USER_MAP[values[CLUSTER_FORM_FIELD_NAMES.PROVIDER].code], + ...getCommonExtraVars(values), + ...values[CLUSTER_FORM_FIELD_NAMES.REGION_CONFIG].cloud_image.image, +}); + +export const getLocalMachineExtraVars = (values: ClusterFormValues, secretId?: number) => ({ + ...(values[CLUSTER_FORM_FIELD_NAMES.CLUSTER_VIP_ADDRESS] + ? { cluster_vip: values[CLUSTER_FORM_FIELD_NAMES.CLUSTER_VIP_ADDRESS] } + : {}), + ...(values[CLUSTER_FORM_FIELD_NAMES.IS_HAPROXY_LOAD_BALANCER] ? { with_haproxy_load_balancing: true } : {}), + ...(!secretId && + !values[CLUSTER_FORM_FIELD_NAMES.IS_USE_DEFINED_SECRET] && + values[CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_METHOD] === AUTHENTICATION_METHODS.PASSWORD + ? { + ansible_user: values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.USERNAME], + ansible_ssh_pass: values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.PASSWORD], + } + : {}), + ...getCommonExtraVars(values), +}); + +export const getLocalMachineEnvs = (values: ClusterFormValues, secretId?: number) => ({ + ...(values[CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_METHOD] === AUTHENTICATION_METHODS.SSH && + !values[CLUSTER_FORM_FIELD_NAMES.IS_USE_DEFINED_SECRET] && + !secretId + ? { + SSH_PRIVATE_KEY_CONTENT: btoa(values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SSH_PRIVATE_KEY]), + } + : {}), + ANSIBLE_INVENTORY_JSON: btoa( + JSON.stringify({ + all: { + vars: { + ansible_user: values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.USERNAME], + ...(values[CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_METHOD] === AUTHENTICATION_METHODS.PASSWORD + ? { + ansible_ssh_pass: values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.USERNAME], + ansible_sudo_pass: values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.PASSWORD], + } + : {}), + }, + children: { + balancers: { + hosts: values[CLUSTER_FORM_FIELD_NAMES.IS_HAPROXY_LOAD_BALANCER] + ? values[CLUSTER_FORM_FIELD_NAMES.DATABASE_SERVERS].reduce( + (acc, server) => ({ + ...acc, + [server[CLUSTER_FORM_FIELD_NAMES.IP_ADDRESS]]: { + ansible_host: server[CLUSTER_FORM_FIELD_NAMES.IP_ADDRESS], + }, + }), + {}, + ) + : {}, + }, + consul_instances: { + hosts: {}, + }, + etcd_cluster: { + hosts: values[CLUSTER_FORM_FIELD_NAMES.DATABASE_SERVERS].reduce( + (acc, server) => ({ + ...acc, + [server[CLUSTER_FORM_FIELD_NAMES.IP_ADDRESS]]: { + ansible_host: server[CLUSTER_FORM_FIELD_NAMES.IP_ADDRESS], + }, + }), + {}, + ), + }, + master: { + hosts: { + [values[CLUSTER_FORM_FIELD_NAMES.DATABASE_SERVERS][0][CLUSTER_FORM_FIELD_NAMES.IP_ADDRESS]]: { + hostname: values[CLUSTER_FORM_FIELD_NAMES.DATABASE_SERVERS][0][CLUSTER_FORM_FIELD_NAMES.HOSTNAME], + ansible_host: values[CLUSTER_FORM_FIELD_NAMES.DATABASE_SERVERS][0][CLUSTER_FORM_FIELD_NAMES.IP_ADDRESS], + server_location: + values[CLUSTER_FORM_FIELD_NAMES.DATABASE_SERVERS]?.[0]?.[CLUSTER_FORM_FIELD_NAMES.LOCATION], + postgresql_exists: false, + }, + }, + }, + ...(values[CLUSTER_FORM_FIELD_NAMES.DATABASE_SERVERS].length > 1 + ? { + replica: { + hosts: values[CLUSTER_FORM_FIELD_NAMES.DATABASE_SERVERS].slice(1).reduce( + (acc, server) => ({ + ...acc, + [server.ipAddress]: { + hostname: server?.[CLUSTER_FORM_FIELD_NAMES.HOSTNAME], + ansible_host: server?.[CLUSTER_FORM_FIELD_NAMES.IP_ADDRESS], + server_location: server?.[CLUSTER_FORM_FIELD_NAMES.LOCATION], + postgresql_exists: false, + }, + }), + {}, + ), + }, + } + : {}), + postgres_cluster: { + children: { + master: {}, + replica: {}, + }, + }, + }, + }, + }), + ), +}); + +const convertObjectToRequiredFormat = (object: Record) => { + return Object.entries(object).reduce((acc: string[], [key, value]) => [...acc, `${key}=${value}`], []); +}; + +export const mapFormValuesToRequestFields = ({ + values, + secretId, + projectId, + envs, +}: { + values: ClusterFormValues; + secretId?: number; + projectId: number; + envs?: object; +}): RequestClusterCreate => ({ + project_id: projectId, + name: values[CLUSTER_FORM_FIELD_NAMES.CLUSTER_NAME], + environment_id: values[CLUSTER_FORM_FIELD_NAMES.ENVIRONMENT_ID], + description: values[CLUSTER_FORM_FIELD_NAMES.DESCRIPTION], + ...(secretId ? { auth_info: { secret_id: secretId } } : {}), + ...(values[CLUSTER_FORM_FIELD_NAMES.PROVIDER].code === PROVIDERS.LOCAL + ? { envs: convertObjectToRequiredFormat(getLocalMachineEnvs(values, secretId)) } + : envs && values[CLUSTER_FORM_FIELD_NAMES.PROVIDER].code !== PROVIDERS.LOCAL + ? { + envs: convertObjectToRequiredFormat( + Object.fromEntries(Object.entries(envs).filter(([key]) => SECRET_MODAL_CONTENT_BODY_FORM_FIELDS?.[key])), + ), + } + : {}), + extra_vars: convertObjectToRequiredFormat( + values[CLUSTER_FORM_FIELD_NAMES.PROVIDER].code === PROVIDERS.LOCAL + ? getLocalMachineExtraVars(values, secretId) + : getCloudProviderExtraVars(values), + ), +}); diff --git a/console/ui/src/features/cluster-secret-modal/model/constants.ts b/console/ui/src/features/cluster-secret-modal/model/constants.ts new file mode 100644 index 000000000..04ee1d8df --- /dev/null +++ b/console/ui/src/features/cluster-secret-modal/model/constants.ts @@ -0,0 +1,16 @@ +import { PROVIDERS } from '@shared/config/constants.ts'; + +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; + +export const CLUSTER_SECRET_MODAL_FORM_FIELD_NAMES = Object.freeze({ + ...SECRET_MODAL_CONTENT_FORM_FIELD_NAMES, + IS_SAVE_TO_CONSOLE: 'isSaveToConsole', +}); + +export const PROVIDER_CODE_TO_ANSIBLE_USER_MAP = Object.freeze({ + [PROVIDERS.AWS]: 'ubuntu', + [PROVIDERS.GCP]: 'root', + [PROVIDERS.AZURE]: 'azureadmin', + [PROVIDERS.DIGITAL_OCEAN]: 'root', + [PROVIDERS.HETZNER]: 'root', +}); diff --git a/console/ui/src/features/cluster-secret-modal/model/types.ts b/console/ui/src/features/cluster-secret-modal/model/types.ts new file mode 100644 index 000000000..48f055a98 --- /dev/null +++ b/console/ui/src/features/cluster-secret-modal/model/types.ts @@ -0,0 +1,53 @@ +import { CLUSTER_SECRET_MODAL_FORM_FIELD_NAMES } from '@features/cluster-secret-modal/model/constants.ts'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import { + DeploymentInfoCloudRegion, + DeploymentInstanceType, + ResponseDeploymentInfo, +} from '@shared/api/api/deployments.ts'; +import { AUTHENTICATION_METHODS } from '@shared/model/constants.ts'; +import { ClusterDatabaseServer } from '@widgets/cluster-form/model/types.ts'; +import { SecretFormValues } from '@entities/secret-form-block/model/types.ts'; +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; + +export interface ClusterSecretModalProps { + isClusterFormSubmitting?: boolean; + isClusterFormDisabled?: boolean; +} + +export interface ClusterSecretModalFormValues extends SecretFormValues { + [CLUSTER_SECRET_MODAL_FORM_FIELD_NAMES.IS_SAVE_TO_CONSOLE]: boolean; +} + +interface ClusterCloudProviderFormValues { + [CLUSTER_FORM_FIELD_NAMES.REGION]?: string; + [CLUSTER_FORM_FIELD_NAMES.REGION_CONFIG]?: DeploymentInfoCloudRegion; + [CLUSTER_FORM_FIELD_NAMES.INSTANCE_TYPE]?: ['small', 'medium', 'large']; + [CLUSTER_FORM_FIELD_NAMES.INSTANCE_CONFIG]?: DeploymentInstanceType; + [CLUSTER_FORM_FIELD_NAMES.INSTANCES_AMOUNT]?: number; + [CLUSTER_FORM_FIELD_NAMES.STORAGE_AMOUNT]?: number; + [CLUSTER_FORM_FIELD_NAMES.SSH_PUBLIC_KEY]?: string; +} + +interface ClusterLocalMachineProviderFormValues + extends Pick< + SECRET_MODAL_CONTENT_FORM_FIELD_NAMES, + | SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.USERNAME + | SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.PASSWORD + | SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.PRIVATE_KEY + > { + [CLUSTER_FORM_FIELD_NAMES.DATABASE_SERVERS]?: ClusterDatabaseServer[]; + [CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_METHOD]?: typeof AUTHENTICATION_METHODS; + [CLUSTER_FORM_FIELD_NAMES.SECRET_KEY_NAME]?: string; + [CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_IS_SAVE_TO_CONSOLE]?: boolean; + [CLUSTER_FORM_FIELD_NAMES.CLUSTER_VIP_ADDRESS]?: string; + [CLUSTER_FORM_FIELD_NAMES.IS_HAPROXY_LOAD_BALANCER]?: boolean; +} + +export interface ClusterFormValues extends ClusterCloudProviderFormValues, ClusterLocalMachineProviderFormValues { + [CLUSTER_FORM_FIELD_NAMES.PROVIDER]: ResponseDeploymentInfo; + [CLUSTER_FORM_FIELD_NAMES.ENVIRONMENT_ID]: number; + [CLUSTER_FORM_FIELD_NAMES.CLUSTER_NAME]: string; + [CLUSTER_FORM_FIELD_NAMES.DESCRIPTION]: string; + [CLUSTER_FORM_FIELD_NAMES.POSTGRES_VERSION]: number; +} diff --git a/console/ui/src/features/cluster-secret-modal/model/validation.ts b/console/ui/src/features/cluster-secret-modal/model/validation.ts new file mode 100644 index 000000000..e69de29bb diff --git a/console/ui/src/features/cluster-secret-modal/ui/index.tsx b/console/ui/src/features/cluster-secret-modal/ui/index.tsx new file mode 100644 index 000000000..1bfeb6804 --- /dev/null +++ b/console/ui/src/features/cluster-secret-modal/ui/index.tsx @@ -0,0 +1,219 @@ +import { FC, useRef, useState } from 'react'; +import { + Box, + Button, + Card, + Checkbox, + CircularProgress, + FormControlLabel, + MenuItem, + Modal, + Stack, + TextField, +} from '@mui/material'; +import { Controller, FormProvider, useForm, useFormContext } from 'react-hook-form'; +import { useTranslation } from 'react-i18next'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import { generateAbsoluteRouterPath, handleRequestErrorCatch } from '@shared/lib/functions.ts'; +import RouterPaths from '@app/router/routerPathsConfig'; +import { useNavigate } from 'react-router-dom'; +import { ClusterSecretModalFormValues, ClusterSecretModalProps } from '@features/cluster-secret-modal/model/types.ts'; +import { LoadingButton } from '@mui/lab'; +import { useGetSecretsQuery, usePostSecretsMutation } from '@shared/api/api/secrets.ts'; +import { CLUSTER_SECRET_MODAL_FORM_FIELD_NAMES } from '@features/cluster-secret-modal/model/constants.ts'; +import { useAppSelector } from '@app/redux/store/hooks.ts'; +import { selectCurrentProject } from '@app/redux/slices/projectSlice/projectSelectors.ts'; +import { toast } from 'react-toastify'; +import { mapFormValuesToRequestFields } from '@features/cluster-secret-modal/lib/functions.ts'; +import { usePostClustersMutation } from '@shared/api/api/clusters.ts'; +import SecretFormBlock from '@entities/secret-form-block'; + +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; +import { getSecretBodyFromValues } from '@entities/secret-form-block/lib/functions.ts'; + +const ClusterSecretModal: FC = ({ isClusterFormDisabled = false }) => { + const { t } = useTranslation(['clusters', 'shared', 'toasts']); + const navigate = useNavigate(); + const createSecretResultRef = useRef(null); // ref is used for case when user saves secret and uses its ID to create cluster + + const currentProject = useAppSelector(selectCurrentProject); + + const [isModalOpen, setIsModalOpen] = useState(false); + + const { watch, getValues } = useFormContext(); + + const watchProvider = watch(CLUSTER_FORM_FIELD_NAMES.PROVIDER); + + const secrets = useGetSecretsQuery({ type: watchProvider?.code, projectId: currentProject }); + + const [addSecretTrigger, addSecretTriggerState] = usePostSecretsMutation(); + const [addClusterTrigger, addClusterTriggerState] = usePostClustersMutation(); + + const methods = useForm(); + + const watchIsSaveToConsole = methods.watch(CLUSTER_SECRET_MODAL_FORM_FIELD_NAMES.IS_SAVE_TO_CONSOLE); + + const handleModalOpenState = (isOpen: boolean) => () => setIsModalOpen(isOpen); + + const cancelHandler = () => navigate(generateAbsoluteRouterPath(RouterPaths.clusters.absolutePath)); + + const onSubmit = async (values: ClusterSecretModalFormValues) => { + const clusterFormValues = getValues(); + try { + if (values[CLUSTER_SECRET_MODAL_FORM_FIELD_NAMES.IS_SAVE_TO_CONSOLE] && !createSecretResultRef?.current) { + createSecretResultRef.current = await addSecretTrigger({ + requestSecretCreate: { + project_id: Number(currentProject), + type: clusterFormValues[CLUSTER_FORM_FIELD_NAMES.PROVIDER].code, + name: values[CLUSTER_SECRET_MODAL_FORM_FIELD_NAMES.SECRET_NAME], + value: getSecretBodyFromValues({ + ...values, + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SECRET_TYPE]: clusterFormValues.provider.code, + }), + }, + }).unwrap(); + toast.success( + t('secretSuccessfullyCreated', { + ns: 'toasts', + secretName: values[SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SECRET_NAME], + }), + ); + } + if (!secrets.data?.data?.length && !createSecretResultRef?.current?.id) { + await addClusterTrigger({ + requestClusterCreate: mapFormValuesToRequestFields({ + values: clusterFormValues, + envs: values, + projectId: Number(currentProject), + }), + }).unwrap(); + } else { + await addClusterTrigger({ + requestClusterCreate: mapFormValuesToRequestFields({ + values: clusterFormValues, + secretId: createSecretResultRef.current?.id ?? values[CLUSTER_FORM_FIELD_NAMES.SECRET_ID], + projectId: Number(currentProject), + }), + }).unwrap(); + } + toast.success( + t('clusterSuccessfullyCreated', { + ns: 'toasts', + clusterName: clusterFormValues[CLUSTER_FORM_FIELD_NAMES.CLUSTER_NAME], + }), + ); + navigate(generateAbsoluteRouterPath(RouterPaths.clusters.absolutePath)); + } catch (e) { + handleRequestErrorCatch(e); + } finally { + setIsModalOpen(false); + } + }; + + const { isValid, isDirty, isSubmitting } = methods.formState; + + return ( + + + {t('createCluster', { ns: 'clusters' })} + + + + +
+ + + {secrets.data?.data?.length > 1 ? ( + ( + + {secrets.data.data.map((secret) => ( + + {secret?.name} + + ))} + + )} + /> + ) : ( + <> + + {watchIsSaveToConsole ? ( + ( + + )} + /> + ) : null} + ( + } + checked={value} + onChange={onChange} + label={t('saveToConsole', { ns: 'clusters' })} + /> + )} + /> + + )} + } + fullWidth={false}> + {t('createCluster', { ns: 'clusters' })} + + + +
+
+
+
+ +
+ ); +}; + +export default ClusterSecretModal; diff --git a/console/ui/src/features/clusters-overview-table-row-actions/index.ts b/console/ui/src/features/clusters-overview-table-row-actions/index.ts new file mode 100644 index 000000000..11ec5d22e --- /dev/null +++ b/console/ui/src/features/clusters-overview-table-row-actions/index.ts @@ -0,0 +1,3 @@ +import ClustersOverviewTableRowActions from '@features/clusters-overview-table-row-actions/ui'; + +export default ClustersOverviewTableRowActions; diff --git a/console/ui/src/features/clusters-overview-table-row-actions/ui/index.tsx b/console/ui/src/features/clusters-overview-table-row-actions/ui/index.tsx new file mode 100644 index 000000000..624ec646f --- /dev/null +++ b/console/ui/src/features/clusters-overview-table-row-actions/ui/index.tsx @@ -0,0 +1,47 @@ +import { FC } from 'react'; +import { useTranslation } from 'react-i18next'; +import { handleRequestErrorCatch } from '@shared/lib/functions.ts'; +import { ListItemIcon, MenuItem } from '@mui/material'; +import { TableRowActionsProps } from '@shared/model/types.ts'; +import { toast } from 'react-toastify'; +import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline'; +import { useDeleteServersByIdMutation } from '@shared/api/api/other.ts'; +import { CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES } from '@widgets/cluster-overview-table/model/constants.ts'; +import { useLazyGetClustersByIdQuery } from '@shared/api/api/clusters.ts'; +import { useParams } from 'react-router-dom'; + +const ClustersOverviewTableRowActions: FC = ({ closeMenu, row }) => { + const { t } = useTranslation(['shared', 'toasts']); + const { clusterId } = useParams(); + + const [removeServerTrigger] = useDeleteServersByIdMutation(); + const [getClusterTrigger] = useLazyGetClustersByIdQuery(); + + const handleButtonClick = async () => { + try { + await removeServerTrigger({ id: row.original[CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.ID] }).unwrap(); + toast.success( + t('serverSuccessfullyRemoved', { + ns: 'toasts', + serverName: row.original[CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.NAME], + }), + ); + await getClusterTrigger({ id: clusterId }); + } catch (e) { + handleRequestErrorCatch(e); + } finally { + closeMenu(); + } + }; + + return [ + + + + + {t('removeFromList', { ns: 'shared' })} + , + ]; +}; + +export default ClustersOverviewTableRowActions; diff --git a/console/ui/src/features/clusters-table-buttons/index.ts b/console/ui/src/features/clusters-table-buttons/index.ts new file mode 100644 index 000000000..d8302b69b --- /dev/null +++ b/console/ui/src/features/clusters-table-buttons/index.ts @@ -0,0 +1,3 @@ +import ClustersTableButtons from '@features/clusters-table-buttons/ui'; + +export default ClustersTableButtons; diff --git a/console/ui/src/features/clusters-table-buttons/model/types.ts b/console/ui/src/features/clusters-table-buttons/model/types.ts new file mode 100644 index 000000000..ed84d60ba --- /dev/null +++ b/console/ui/src/features/clusters-table-buttons/model/types.ts @@ -0,0 +1,3 @@ +export interface ClustersTableButtonsProps { + refetch: () => void; +} diff --git a/console/ui/src/features/clusters-table-buttons/ui/index.tsx b/console/ui/src/features/clusters-table-buttons/ui/index.tsx new file mode 100644 index 000000000..3d12a84ac --- /dev/null +++ b/console/ui/src/features/clusters-table-buttons/ui/index.tsx @@ -0,0 +1,35 @@ +import { useTranslation } from 'react-i18next'; +import { Button, Stack } from '@mui/material'; +import { useNavigate } from 'react-router-dom'; +import { generateAbsoluteRouterPath } from '@shared/lib/functions.ts'; +import RouterPaths from '@app/router/routerPathsConfig'; +import RefreshIcon from '@mui/icons-material/Refresh'; +import AddIcon from '@mui/icons-material/Add'; +import { ClustersTableButtonsProps } from '@features/clusters-table-buttons/model/types.ts'; +import { FC } from 'react'; + +const ClustersTableButtons: FC = ({ refetch }) => { + const { t } = useTranslation(['clusters, shared']); + const navigate = useNavigate(); + + const handleRefresh = () => { + refetch(); + }; + + const handleCreateCluster = () => { + navigate(generateAbsoluteRouterPath(RouterPaths.clusters.add.absolutePath)); + }; + + return ( + + + + + ); +}; + +export default ClustersTableButtons; diff --git a/console/ui/src/features/clusters-table-row-actions/index.ts b/console/ui/src/features/clusters-table-row-actions/index.ts new file mode 100644 index 000000000..84a49ed2d --- /dev/null +++ b/console/ui/src/features/clusters-table-row-actions/index.ts @@ -0,0 +1,3 @@ +import ClustersTableRowActions from '@features/clusters-table-row-actions/ui'; + +export default ClustersTableRowActions; diff --git a/console/ui/src/features/clusters-table-row-actions/model/types.ts b/console/ui/src/features/clusters-table-row-actions/model/types.ts new file mode 100644 index 000000000..84135902e --- /dev/null +++ b/console/ui/src/features/clusters-table-row-actions/model/types.ts @@ -0,0 +1,5 @@ +export interface ClustersTableRemoveButtonProps { + clusterId: number; + clusterName: string; + closeMenu: () => void; +} diff --git a/console/ui/src/features/clusters-table-row-actions/ui/ClusterTableRemoveButton.tsx b/console/ui/src/features/clusters-table-row-actions/ui/ClusterTableRemoveButton.tsx new file mode 100644 index 000000000..a168dc9be --- /dev/null +++ b/console/ui/src/features/clusters-table-row-actions/ui/ClusterTableRemoveButton.tsx @@ -0,0 +1,71 @@ +import { FC, useState } from 'react'; +import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline'; +import { + Button, + CircularProgress, + Dialog, + DialogActions, + DialogContent, + DialogContentText, + DialogTitle, + Stack, + Typography, +} from '@mui/material'; +import { LoadingButton } from '@mui/lab'; +import { useTranslation } from 'react-i18next'; +import { ClustersTableRemoveButtonProps } from '@features/clusters-table-row-actions/model/types.ts'; +import { useDeleteClustersByIdMutation } from '@shared/api/api/clusters.ts'; +import { toast } from 'react-toastify'; +import { handleRequestErrorCatch } from '@shared/lib/functions.ts'; + +const ClustersTableRemoveButton: FC = ({ clusterId, clusterName, closeMenu }) => { + const { t } = useTranslation(['clusters', 'shared']); + + const [isModalOpen, setIsModalOpen] = useState(false); + + const [removeClusterTrigger, removeClusterTriggerState] = useDeleteClustersByIdMutation(); + + const handleModalOpenState = (state: boolean) => () => { + setIsModalOpen(state); + if (!state) closeMenu(); + }; + + const handleButtonClick = async () => { + try { + await removeClusterTrigger({ id: clusterId }); + closeMenu(); + toast.success(t('clusterSuccessfullyRemoved', { ns: 'toasts', clusterName })); + } catch (e) { + handleRequestErrorCatch(e); + } + }; + + return ( + <> + + + {t('deleteClusterModalHeader', { ns: 'clusters', clusterName })} + + {t('deleteClusterModalBody', { ns: 'clusters', clusterName })} + + + + } + loading={removeClusterTriggerState.isLoading}> + {t('delete', { ns: 'shared' })} + + + + + ); +}; + +export default ClustersTableRemoveButton; diff --git a/console/ui/src/features/clusters-table-row-actions/ui/index.tsx b/console/ui/src/features/clusters-table-row-actions/ui/index.tsx new file mode 100644 index 000000000..4fcfda388 --- /dev/null +++ b/console/ui/src/features/clusters-table-row-actions/ui/index.tsx @@ -0,0 +1,14 @@ +import { FC } from 'react'; +import { TableRowActionsProps } from '@shared/model/types.ts'; +import ClustersTableRemoveButton from '@features/clusters-table-row-actions/ui/ClusterTableRemoveButton.tsx'; + +const ClustersTableRowActions: FC = ({ closeMenu, row }) => [ + , +]; + +export default ClustersTableRowActions; diff --git a/console/ui/src/features/environments-table-row-actions/ui/index.tsx b/console/ui/src/features/environments-table-row-actions/ui/index.tsx new file mode 100644 index 000000000..1e7bbad94 --- /dev/null +++ b/console/ui/src/features/environments-table-row-actions/ui/index.tsx @@ -0,0 +1,42 @@ +import { FC } from 'react'; +import { TableRowActionsProps } from '@shared/model/types.ts'; +import { useTranslation } from 'react-i18next'; +import { toast } from 'react-toastify'; +import { handleRequestErrorCatch } from '@shared/lib/functions.ts'; +import { ListItemIcon, MenuItem } from '@mui/material'; +import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline'; +import { useDeleteEnvironmentsByIdMutation } from '@shared/api/api/environments.ts'; +import { ENVIRONMENTS_TABLE_COLUMN_NAMES } from '@widgets/environments-table/model/constants.ts'; + +const EnvironmentsTableRowActions: FC = ({ closeMenu, row }) => { + const { t } = useTranslation(['shared', 'toasts']); + + const [removeEnvironmentTrigger] = useDeleteEnvironmentsByIdMutation(); + + const handleButtonClick = async () => { + try { + await removeEnvironmentTrigger({ id: row.original[ENVIRONMENTS_TABLE_COLUMN_NAMES.ID] }).unwrap(); + toast.success( + t('environmentSuccessfullyRemoved', { + ns: 'toasts', + environmentName: row.original[ENVIRONMENTS_TABLE_COLUMN_NAMES.NAME], + }), + ); + } catch (e) { + handleRequestErrorCatch(e); + } finally { + closeMenu(); + } + }; + + return [ + + + + + {t('delete')} + , + ]; +}; + +export default EnvironmentsTableRowActions; diff --git a/console/ui/src/features/logout-button/index.ts b/console/ui/src/features/logout-button/index.ts new file mode 100644 index 000000000..5940e264f --- /dev/null +++ b/console/ui/src/features/logout-button/index.ts @@ -0,0 +1,3 @@ +import LogoutButton from '@features/logout-button/ui'; + +export default LogoutButton; diff --git a/console/ui/src/features/logout-button/ui/index.tsx b/console/ui/src/features/logout-button/ui/index.tsx new file mode 100644 index 000000000..1c6d206d7 --- /dev/null +++ b/console/ui/src/features/logout-button/ui/index.tsx @@ -0,0 +1,23 @@ +import { FC } from 'react'; +import Logout from '@shared/assets/logoutIcon.svg?react'; +import { Icon } from '@mui/material'; +import { useNavigate } from 'react-router-dom'; +import RouterPaths from '@app/router/routerPathsConfig'; +import { generateAbsoluteRouterPath } from '@shared/lib/functions.ts'; + +const LogoutButton: FC = () => { + const navigate = useNavigate(); + + const handleLogout = () => { + localStorage.removeItem('token'); + navigate(generateAbsoluteRouterPath(RouterPaths.login.absolutePath)); + }; + + return ( + + + + ); +}; + +export default LogoutButton; diff --git a/console/ui/src/features/operations-table-buttons/index.ts b/console/ui/src/features/operations-table-buttons/index.ts new file mode 100644 index 000000000..1e48e3d8c --- /dev/null +++ b/console/ui/src/features/operations-table-buttons/index.ts @@ -0,0 +1,3 @@ +import OperationsTableButtons from '@features/operations-table-buttons/ui'; + +export default OperationsTableButtons; diff --git a/console/ui/src/features/operations-table-buttons/lib/functions.ts b/console/ui/src/features/operations-table-buttons/lib/functions.ts new file mode 100644 index 000000000..d342c7c15 --- /dev/null +++ b/console/ui/src/features/operations-table-buttons/lib/functions.ts @@ -0,0 +1,61 @@ +import { startOfDay } from 'date-fns/startOfDay'; +import { subDays } from 'date-fns/subDays'; +import { subMonths } from 'date-fns/subMonths'; +import { subYears } from 'date-fns/subYears'; +import { TFunction } from 'i18next'; +import { DATE_RANGE_VALUES } from '@features/operations-table-buttons/model/constants.ts'; + +export const formatOperationsDate = (date: Date) => startOfDay(date).toISOString(); + +export const getOperationsTimeNameValue = (name: keyof DATE_RANGE_VALUES) => { + let value = ''; + + switch (name) { + case DATE_RANGE_VALUES.LAST_DAY: + value = formatOperationsDate(subDays(new Date(), 1)); + break; + case DATE_RANGE_VALUES.LAST_WEEK: + value = formatOperationsDate(subDays(new Date(), 7)); + break; + case DATE_RANGE_VALUES.LAST_MONTH: + value = formatOperationsDate(subMonths(new Date(), 1)); + break; + case DATE_RANGE_VALUES.LAST_THREE_MONTHS: + value = formatOperationsDate(subMonths(new Date(), 3)); + break; + case DATE_RANGE_VALUES.LAST_SIX_MONTHS: + value = formatOperationsDate(subMonths(new Date(), 6)); + break; + case DATE_RANGE_VALUES.LAST_YEAR: + value = formatOperationsDate(subYears(new Date(), 1)); + break; + } + return { name, value }; +}; + +export const getOperationsDateRangeVariants = (t: TFunction) => [ + { + label: t('lastDay', { ns: 'operations' }), + value: DATE_RANGE_VALUES.LAST_DAY, + }, + { + label: t('lastWeek', { ns: 'operations' }), + value: DATE_RANGE_VALUES.LAST_WEEK, + }, + { + label: t('lastMonth', { ns: 'operations' }), + value: DATE_RANGE_VALUES.LAST_MONTH, + }, + { + label: t('lastThreeMonths', { ns: 'operations' }), + value: DATE_RANGE_VALUES.LAST_THREE_MONTHS, + }, + { + label: t('lastSixMonths', { ns: 'operations' }), + value: DATE_RANGE_VALUES.LAST_SIX_MONTHS, + }, + { + label: t('lastYear', { ns: 'operations' }), + value: DATE_RANGE_VALUES.LAST_YEAR, + }, +]; diff --git a/console/ui/src/features/operations-table-buttons/model/constants.ts b/console/ui/src/features/operations-table-buttons/model/constants.ts new file mode 100644 index 000000000..091945239 --- /dev/null +++ b/console/ui/src/features/operations-table-buttons/model/constants.ts @@ -0,0 +1,8 @@ +export const DATE_RANGE_VALUES = Object.freeze({ + LAST_DAY: 'lastDay', + LAST_WEEK: 'lastWeek', + LAST_MONTH: 'lastMonth', + LAST_THREE_MONTHS: 'lastThreeMonths', + LAST_SIX_MONTHS: 'lastSixMonths', + LAST_YEAR: 'lastYear', +}); diff --git a/console/ui/src/features/operations-table-buttons/model/types.ts b/console/ui/src/features/operations-table-buttons/model/types.ts new file mode 100644 index 000000000..c29b36eaf --- /dev/null +++ b/console/ui/src/features/operations-table-buttons/model/types.ts @@ -0,0 +1,5 @@ +export interface OperationsTableButtonsProps { + refetch: () => void; + startDate: Date; + setStartDate: (date: Date) => void; +} diff --git a/console/ui/src/features/operations-table-buttons/ui/index.tsx b/console/ui/src/features/operations-table-buttons/ui/index.tsx new file mode 100644 index 000000000..06e55fe71 --- /dev/null +++ b/console/ui/src/features/operations-table-buttons/ui/index.tsx @@ -0,0 +1,53 @@ +import { FC } from 'react'; +import { useTranslation } from 'react-i18next'; +import { Button, InputAdornment, MenuItem, Stack, TextField } from '@mui/material'; +import RefreshIcon from '@mui/icons-material/Refresh'; +import { OperationsTableButtonsProps } from '@features/operations-table-buttons/model/types.ts'; +import CalendarClockIcon from '@shared/assets/calendarClockICon.svg?react'; +import { + getOperationsDateRangeVariants, + getOperationsTimeNameValue, +} from '@features/operations-table-buttons/lib/functions.ts'; + +const OperationsTableButtons: FC = ({ refetch, startDate, setStartDate }) => { + const { t } = useTranslation('operations'); + + const rangeOptions = getOperationsDateRangeVariants(t); + + const handleChange = (e) => { + setStartDate(getOperationsTimeNameValue(e.target.value)); + }; + + const handleRefresh = () => { + refetch(); + }; + + return ( + + + + + ), + }}> + {rangeOptions.map((option) => ( + + {option.label} + + ))} + + + + ); +}; + +export default OperationsTableButtons; diff --git a/console/ui/src/features/operations-table-row-actions/index.ts b/console/ui/src/features/operations-table-row-actions/index.ts new file mode 100644 index 000000000..cbce8e88f --- /dev/null +++ b/console/ui/src/features/operations-table-row-actions/index.ts @@ -0,0 +1,3 @@ +import OperationsTableRowActions from '@features/operations-table-row-actions/ui'; + +export default OperationsTableRowActions; diff --git a/console/ui/src/features/operations-table-row-actions/ui/index.tsx b/console/ui/src/features/operations-table-row-actions/ui/index.tsx new file mode 100644 index 000000000..0fdca6667 --- /dev/null +++ b/console/ui/src/features/operations-table-row-actions/ui/index.tsx @@ -0,0 +1,25 @@ +import { FC } from 'react'; +import { useTranslation } from 'react-i18next'; +import { MenuItem } from '@mui/material'; +import { TableRowActionsProps } from '@shared/model/types.ts'; +import { useNavigate } from 'react-router-dom'; +import RouterPaths from '@app/router/routerPathsConfig'; +import { generateAbsoluteRouterPath } from '@shared/lib/functions.ts'; + +const OperationsTableRowActions: FC = ({ closeMenu, row }) => { + const { t } = useTranslation('operations'); + const navigate = useNavigate(); + + const handleButtonClick = () => { + navigate(generateAbsoluteRouterPath(RouterPaths.operations.log.absolutePath, { operationId: row.original.id })); + closeMenu(); + }; + + return [ + + {t('showDetails')} + , + ]; +}; + +export default OperationsTableRowActions; diff --git a/console/ui/src/features/pojects-table-row-actions/index.ts b/console/ui/src/features/pojects-table-row-actions/index.ts new file mode 100644 index 000000000..8c4e9a64d --- /dev/null +++ b/console/ui/src/features/pojects-table-row-actions/index.ts @@ -0,0 +1,3 @@ +import ProjectsTableRowActions from '@features/pojects-table-row-actions/ui'; + +export default ProjectsTableRowActions; diff --git a/console/ui/src/features/pojects-table-row-actions/ui/index.tsx b/console/ui/src/features/pojects-table-row-actions/ui/index.tsx new file mode 100644 index 000000000..6cffc382d --- /dev/null +++ b/console/ui/src/features/pojects-table-row-actions/ui/index.tsx @@ -0,0 +1,48 @@ +import { FC } from 'react'; +import { ListItemIcon, MenuItem } from '@mui/material'; +import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline'; +import { useTranslation } from 'react-i18next'; +import { useDeleteProjectsByIdMutation } from '@shared/api/api/projects.ts'; +import { toast } from 'react-toastify'; +import { handleRequestErrorCatch } from '@shared/lib/functions.ts'; +import { TableRowActionsProps } from '@shared/model/types.ts'; +import { PROJECTS_TABLE_COLUMN_NAMES } from '@widgets/projects-table/model/constants.ts'; +import { useAppSelector } from '@app/redux/store/hooks.ts'; +import { selectCurrentProject } from '@app/redux/slices/projectSlice/projectSelectors.ts'; + +const ProjectsTableRowActions: FC = ({ closeMenu, row }) => { + const { t } = useTranslation(['shared', 'toasts']); + + const currentProject = useAppSelector(selectCurrentProject); + + const [removeProjectTrigger] = useDeleteProjectsByIdMutation(); + + const handleButtonClick = async () => { + try { + if (Number(currentProject) === row.original[PROJECTS_TABLE_COLUMN_NAMES.ID]) + throw t('cannotRemoveActiveProject', { ns: 'toasts' }); + await removeProjectTrigger({ id: row.original[PROJECTS_TABLE_COLUMN_NAMES.ID] }).unwrap(); + toast.success( + t('projectSuccessfullyRemoved', { + ns: 'toasts', + projectName: row.original[PROJECTS_TABLE_COLUMN_NAMES.NAME], + }), + ); + } catch (e) { + handleRequestErrorCatch(e); + } finally { + closeMenu(); + } + }; + + return [ + + + + + {t('delete')} + , + ]; +}; + +export default ProjectsTableRowActions; diff --git a/console/ui/src/features/settings-table-buttons/index.ts b/console/ui/src/features/settings-table-buttons/index.ts new file mode 100644 index 000000000..aadca66d8 --- /dev/null +++ b/console/ui/src/features/settings-table-buttons/index.ts @@ -0,0 +1,3 @@ +import SettingsTableButtons from '@features/settings-table-buttons/ui'; + +export default SettingsTableButtons; diff --git a/console/ui/src/features/settings-table-buttons/lib/functions.ts b/console/ui/src/features/settings-table-buttons/lib/functions.ts new file mode 100644 index 000000000..906dd06ef --- /dev/null +++ b/console/ui/src/features/settings-table-buttons/lib/functions.ts @@ -0,0 +1,3 @@ +export const handleDelete = () => {}; +export const handleEdit = () => {}; +export const handleAddSecret = () => {}; diff --git a/console/ui/src/features/settings-table-buttons/ui/index.tsx b/console/ui/src/features/settings-table-buttons/ui/index.tsx new file mode 100644 index 000000000..f451c805c --- /dev/null +++ b/console/ui/src/features/settings-table-buttons/ui/index.tsx @@ -0,0 +1,16 @@ +import React from 'react'; +import { useTranslation } from 'react-i18next'; +import { Stack } from '@mui/material'; +import SettingsAddSecret from '@features/add-secret'; + +const SettingsTableButtons: React.FC = () => { + const { t } = useTranslation(['shared', 'settings']); + + return ( + + + + ); +}; + +export default SettingsTableButtons; diff --git a/console/ui/src/features/settings-table-row-actions/index.ts b/console/ui/src/features/settings-table-row-actions/index.ts new file mode 100644 index 000000000..3a966bb65 --- /dev/null +++ b/console/ui/src/features/settings-table-row-actions/index.ts @@ -0,0 +1,3 @@ +import SettingsTableRowActions from '@features/settings-table-row-actions/ui'; + +export default SettingsTableRowActions; diff --git a/console/ui/src/features/settings-table-row-actions/model/constants.ts b/console/ui/src/features/settings-table-row-actions/model/constants.ts new file mode 100644 index 000000000..ceb434bcc --- /dev/null +++ b/console/ui/src/features/settings-table-row-actions/model/constants.ts @@ -0,0 +1 @@ +export const SECRET_TOAST_DISPLAY_CLUSTERS_LIMIT = 10; diff --git a/console/ui/src/features/settings-table-row-actions/ui/index.tsx b/console/ui/src/features/settings-table-row-actions/ui/index.tsx new file mode 100644 index 000000000..dcc4a1423 --- /dev/null +++ b/console/ui/src/features/settings-table-row-actions/ui/index.tsx @@ -0,0 +1,57 @@ +import { FC } from 'react'; +import { useTranslation } from 'react-i18next'; +import { ListItemIcon, MenuItem } from '@mui/material'; +import { useDeleteSecretsByIdMutation } from '@shared/api/api/secrets.ts'; +import { TableRowActionsProps } from '@shared/model/types.ts'; +import DeleteOutlineIcon from '@mui/icons-material/DeleteOutline'; +import { toast } from 'react-toastify'; +import { handleRequestErrorCatch } from '@shared/lib/functions.ts'; +import { SECRETS_TABLE_COLUMN_NAMES } from '@widgets/secrets-table/model/constants.ts'; +import { SECRET_TOAST_DISPLAY_CLUSTERS_LIMIT } from '@features/settings-table-row-actions/model/constants.ts'; + +const SettingsTableRowActions: FC = ({ closeMenu, row }) => { + const { t } = useTranslation(['shared', 'toasts']); + + const [removeSecretTrigger] = useDeleteSecretsByIdMutation(); + + const handleButtonClick = async () => { + try { + if (row.original[SECRETS_TABLE_COLUMN_NAMES.USED].toString() === 'true') { + const usingClusterList = row.original[SECRETS_TABLE_COLUMN_NAMES.USED_BY]?.split(', '); + toast.warning( + t('secretsSecretIsUsed', { + ns: 'toasts', + count: usingClusterList?.length, + clusterNames: + usingClusterList?.length > SECRET_TOAST_DISPLAY_CLUSTERS_LIMIT + ? `${[...usingClusterList.slice(0, SECRET_TOAST_DISPLAY_CLUSTERS_LIMIT), '...'].join(', ')}` + : row.original[SECRETS_TABLE_COLUMN_NAMES.USED_BY], + }), + ); + } else { + await removeSecretTrigger({ id: row.original[SECRETS_TABLE_COLUMN_NAMES.ID] }).unwrap(); + toast.success( + t('secretSuccessfullyRemoved', { + ns: 'toasts', + secretName: row.original[SECRETS_TABLE_COLUMN_NAMES.NAME], + }), + ); + } + } catch (e) { + handleRequestErrorCatch(e); + } finally { + closeMenu(); + } + }; + + return [ + + + + + {t('delete')} + , + ]; +}; + +export default SettingsTableRowActions; diff --git a/console/ui/src/pages/404/index.ts b/console/ui/src/pages/404/index.ts new file mode 100644 index 000000000..29e8d7e6a --- /dev/null +++ b/console/ui/src/pages/404/index.ts @@ -0,0 +1,3 @@ +import Page404 from '@pages/404/ui'; + +export default Page404; diff --git a/console/ui/src/pages/404/ui/illustration.tsx b/console/ui/src/pages/404/ui/illustration.tsx new file mode 100644 index 000000000..00fc35061 --- /dev/null +++ b/console/ui/src/pages/404/ui/illustration.tsx @@ -0,0 +1,14 @@ +import { ComponentPropsWithoutRef } from 'react'; + +const Illustration = (props: ComponentPropsWithoutRef<'svg'>) => { + return ( + + + + ); +}; + +export default Illustration; diff --git a/console/ui/src/pages/404/ui/index.tsx b/console/ui/src/pages/404/ui/index.tsx new file mode 100644 index 000000000..d11064428 --- /dev/null +++ b/console/ui/src/pages/404/ui/index.tsx @@ -0,0 +1,39 @@ +import { FC } from 'react'; +import Illustration from '@pages/404/ui/illustration.tsx'; +import { useTranslation } from 'react-i18next'; +import RouterPaths from '@app/router/routerPathsConfig'; +import { generateAbsoluteRouterPath } from '@shared/lib/functions.ts'; +import { useNavigate } from 'react-router-dom'; +import { Box, Button, Container, Typography } from '@mui/material'; +import theme from '@shared/theme/theme.ts'; +import { grey } from '@mui/material/colors'; + +const Page404: FC = () => { + const { t } = useTranslation('shared'); + const navigate = useNavigate(); + + const handleReturnButton = () => navigate(generateAbsoluteRouterPath(RouterPaths.clusters.absolutePath)); + + return ( + + + + + + {t('404Title')} + + + {t('404Text')} + + + + + + + + ); +}; + +export default Page404; diff --git a/console/ui/src/pages/add-cluster/index.ts b/console/ui/src/pages/add-cluster/index.ts new file mode 100644 index 000000000..b4ea872b3 --- /dev/null +++ b/console/ui/src/pages/add-cluster/index.ts @@ -0,0 +1,3 @@ +import AddCluster from '@pages/add-cluster/ui'; + +export default AddCluster; diff --git a/console/ui/src/pages/add-cluster/ui/index.tsx b/console/ui/src/pages/add-cluster/ui/index.tsx new file mode 100644 index 000000000..102b3724a --- /dev/null +++ b/console/ui/src/pages/add-cluster/ui/index.tsx @@ -0,0 +1,8 @@ +import { FC } from 'react'; +import ClusterForm from '@widgets/cluster-form'; + +const AddCluster: FC = () => { + return ; +}; + +export default AddCluster; diff --git a/console/ui/src/pages/clusters/index.ts b/console/ui/src/pages/clusters/index.ts new file mode 100644 index 000000000..f3e8d8643 --- /dev/null +++ b/console/ui/src/pages/clusters/index.ts @@ -0,0 +1,3 @@ +import Clusters from '@pages/clusters/ui'; + +export default Clusters; diff --git a/console/ui/src/pages/clusters/ui/index.tsx b/console/ui/src/pages/clusters/ui/index.tsx new file mode 100644 index 000000000..06daa706b --- /dev/null +++ b/console/ui/src/pages/clusters/ui/index.tsx @@ -0,0 +1,13 @@ +import { FC } from 'react'; +import { Box } from '@mui/material'; +import ClustersTable from '@widgets/clusters-table'; + +const Clusters: FC = () => { + return ( + + + + ); +}; + +export default Clusters; diff --git a/console/ui/src/pages/login/index.ts b/console/ui/src/pages/login/index.ts new file mode 100644 index 000000000..6895e2a10 --- /dev/null +++ b/console/ui/src/pages/login/index.ts @@ -0,0 +1,3 @@ +import Login from '@pages/login/ui'; + +export default Login; diff --git a/console/ui/src/pages/login/model/constants.ts b/console/ui/src/pages/login/model/constants.ts new file mode 100644 index 000000000..cfeaebc7c --- /dev/null +++ b/console/ui/src/pages/login/model/constants.ts @@ -0,0 +1,3 @@ +export const LOGIN_FORM_FIELD_NAMES = Object.freeze({ + TOKEN: 'token', +}); diff --git a/console/ui/src/pages/login/model/types.ts b/console/ui/src/pages/login/model/types.ts new file mode 100644 index 000000000..50729a4d5 --- /dev/null +++ b/console/ui/src/pages/login/model/types.ts @@ -0,0 +1,5 @@ +import { LOGIN_FORM_FIELD_NAMES } from '@pages/login/model/constants.ts'; + +export interface LoginFormValues { + [LOGIN_FORM_FIELD_NAMES.TOKEN]: string; +} diff --git a/console/ui/src/pages/login/ui/index.tsx b/console/ui/src/pages/login/ui/index.tsx new file mode 100644 index 000000000..333931496 --- /dev/null +++ b/console/ui/src/pages/login/ui/index.tsx @@ -0,0 +1,75 @@ +import { FC } from 'react'; +import { Box, Button, Link, Paper, Stack, TextField, Typography } from '@mui/material'; +import { useTranslation } from 'react-i18next'; +import { useNavigate } from 'react-router-dom'; +import RouterPaths from '@app/router/routerPathsConfig'; +import { generateAbsoluteRouterPath } from '@shared/lib/functions.ts'; +import { Controller, useForm } from 'react-hook-form'; +import { LoginFormValues } from '@pages/login/model/types.ts'; +import { LOGIN_FORM_FIELD_NAMES } from '@pages/login/model/constants.ts'; +import { version } from '../../../../package.json'; +import Logo from '@shared/assets/AutobaseLogo.svg?react'; + +const Login: FC = () => { + const { t } = useTranslation('shared'); + const navigate = useNavigate(); + + const { handleSubmit, control } = useForm(); + + const onSubmit = (values: LoginFormValues) => { + localStorage.setItem('token', values[LOGIN_FORM_FIELD_NAMES.TOKEN]); + navigate(generateAbsoluteRouterPath(RouterPaths.clusters.absolutePath)); + }; + + return ( + + +
+ + + Autobase for PostgreSQL® + ( + + )} + /> + + + v.{version} + + +
+ + + Powered by  + + GS Labs + + + +
+
+ ); +}; + +export default Login; diff --git a/console/ui/src/pages/operation-log/index.ts b/console/ui/src/pages/operation-log/index.ts new file mode 100644 index 000000000..3922eaa48 --- /dev/null +++ b/console/ui/src/pages/operation-log/index.ts @@ -0,0 +1,3 @@ +import OperationLog from '@pages/operation-log/ui'; + +export default OperationLog; diff --git a/console/ui/src/pages/operation-log/ui/index.tsx b/console/ui/src/pages/operation-log/ui/index.tsx new file mode 100644 index 000000000..965f928ee --- /dev/null +++ b/console/ui/src/pages/operation-log/ui/index.tsx @@ -0,0 +1,39 @@ +import { FC, useEffect, useState } from 'react'; +import { Box } from '@mui/material'; +import { useGetOperationsByIdLogQuery } from '@shared/api/api/operations.ts'; +import { useParams } from 'react-router-dom'; +import { LazyLog } from 'react-lazylog'; +import { useQueryPolling } from '@shared/lib/hooks.tsx'; +import { OPERATION_LOGS_POLLING_INTERVAL } from '@shared/config/constants.ts'; + +const OperationLog: FC = () => { + const { operationId } = useParams(); + const [isStopRequest, setIsStopRequest] = useState(false); + + const log = useQueryPolling( + () => useGetOperationsByIdLogQuery({ id: operationId }), + OPERATION_LOGS_POLLING_INTERVAL, + { stop: isStopRequest }, + ); + + useEffect(() => { + setIsStopRequest(!!log.data?.isComplete); + }, [log.data?.isComplete]); + + return ( + + + + ); +}; + +export default OperationLog; diff --git a/console/ui/src/pages/operations/index.ts b/console/ui/src/pages/operations/index.ts new file mode 100644 index 000000000..7f7303f70 --- /dev/null +++ b/console/ui/src/pages/operations/index.ts @@ -0,0 +1,3 @@ +import Operations from '@pages/operations/ui'; + +export default Operations; diff --git a/console/ui/src/pages/operations/ui/index.tsx b/console/ui/src/pages/operations/ui/index.tsx new file mode 100644 index 000000000..20a608281 --- /dev/null +++ b/console/ui/src/pages/operations/ui/index.tsx @@ -0,0 +1,13 @@ +import { FC } from 'react'; +import OperationsTable from '@widgets/operations-table'; +import { Box } from '@mui/material'; + +const Operations: FC = () => { + return ( + + + + ); +}; + +export default Operations; diff --git a/console/ui/src/pages/overview-cluster/index.ts b/console/ui/src/pages/overview-cluster/index.ts new file mode 100644 index 000000000..00ec7425c --- /dev/null +++ b/console/ui/src/pages/overview-cluster/index.ts @@ -0,0 +1,3 @@ +import OverviewCluster from '@pages/overview-cluster/ui'; + +export default OverviewCluster; diff --git a/console/ui/src/pages/overview-cluster/ui/index.tsx b/console/ui/src/pages/overview-cluster/ui/index.tsx new file mode 100644 index 000000000..aa49b2669 --- /dev/null +++ b/console/ui/src/pages/overview-cluster/ui/index.tsx @@ -0,0 +1,49 @@ +import { FC } from 'react'; +import { useTranslation } from 'react-i18next'; +import { useParams } from 'react-router-dom'; +import { useGetClustersByIdQuery } from '@shared/api/api/clusters.ts'; +import { Grid } from '@mui/material'; +import ClusterOverviewTable from '@widgets/cluster-overview-table'; +import ConnectionInfo from '@entities/connection-info'; +import ClusterInfo from '@entities/cluster-info'; +import { useQueryPolling } from '@shared/lib/hooks.tsx'; +import { CLUSTER_OVERVIEW_POLLING_INTERVAL } from '@shared/config/constants.ts'; +import Spinner from '@shared/ui/spinner'; + +const OverviewCluster: FC = () => { + const { t } = useTranslation('clusters'); + const { clusterId } = useParams(); + + const cluster = useQueryPolling(() => useGetClustersByIdQuery({ id: clusterId }), CLUSTER_OVERVIEW_POLLING_INTERVAL); + + const connectionInfo = cluster.data?.connection_info; + + return cluster.isLoading ? ( + + ) : ( + + + + + + + + + + + + ); +}; + +export default OverviewCluster; diff --git a/console/ui/src/pages/settings/index.ts b/console/ui/src/pages/settings/index.ts new file mode 100644 index 000000000..b6b138be8 --- /dev/null +++ b/console/ui/src/pages/settings/index.ts @@ -0,0 +1,3 @@ +import Settings from '@pages/settings/ui'; + +export default Settings; diff --git a/console/ui/src/pages/settings/model/constants.ts b/console/ui/src/pages/settings/model/constants.ts new file mode 100644 index 000000000..57f3e7d6c --- /dev/null +++ b/console/ui/src/pages/settings/model/constants.ts @@ -0,0 +1,20 @@ +import RouterPaths from '@app/router/routerPathsConfig'; + +export const settingsTabsContent = [ + { + translateKey: 'generalSettings', + path: RouterPaths.settings.general.absolutePath, + }, + { + translateKey: 'secrets', + path: RouterPaths.settings.secrets.absolutePath, + }, + { + translateKey: 'projects', + path: RouterPaths.settings.projects.absolutePath, + }, + { + translateKey: 'environments', + path: RouterPaths.settings.environments.absolutePath, + }, +]; diff --git a/console/ui/src/pages/settings/ui/index.tsx b/console/ui/src/pages/settings/ui/index.tsx new file mode 100644 index 000000000..c9b54b0d4 --- /dev/null +++ b/console/ui/src/pages/settings/ui/index.tsx @@ -0,0 +1,31 @@ +import { FC } from 'react'; +import { Divider, Tab, Tabs } from '@mui/material'; +import { Link, Outlet, useLocation } from 'react-router-dom'; +import { settingsTabsContent } from '@pages/settings/model/constants.ts'; +import { useTranslation } from 'react-i18next'; +import { generateAbsoluteRouterPath } from '@shared/lib/functions.ts'; + +const Settings: FC = () => { + const { t } = useTranslation('settings'); + const location = useLocation(); + + return ( + <> + + {settingsTabsContent.map((tabContent) => ( + + ))} + + + + + ); +}; + +export default Settings; diff --git a/console/ui/src/shared/api/api/clusters.ts b/console/ui/src/shared/api/api/clusters.ts new file mode 100644 index 000000000..548555128 --- /dev/null +++ b/console/ui/src/shared/api/api/clusters.ts @@ -0,0 +1,268 @@ +import { baseApi as api } from '../baseApi.ts'; + +const injectedRtkApi = api.injectEndpoints({ + endpoints: (build) => ({ + postClusters: build.mutation({ + query: (queryArg) => ({ url: `/clusters`, method: 'POST', body: queryArg.requestClusterCreate }), + invalidatesTags: () => [{ type: 'Clusters', id: 'LIST' }], + }), + getClusters: build.query({ + query: (queryArg) => ({ + url: `/clusters`, + params: { + offset: queryArg.offset, + limit: queryArg.limit, + project_id: queryArg.projectId, + name: queryArg.name, + status: queryArg.status, + location: queryArg.location, + environment: queryArg.environment, + server_count: queryArg.serverCount, + postgres_version: queryArg.postgresVersion, + created_at_from: queryArg.createdAtFrom, + created_at_to: queryArg.createdAtTo, + sort_by: queryArg.sortBy, + }, + }), + providesTags: (result) => + result?.data + ? [...result.data.map(({ id }) => ({ type: 'Clusters', id })), { type: 'Clusters', id: 'LIST' }] + : [{ type: 'Clusters', id: 'LIST' }], + }), + getClustersDefaultName: build.query({ + query: () => ({ url: `/clusters/default_name` }), + keepUnusedDataFor: 0, + }), + getClustersById: build.query({ + query: (queryArg) => ({ url: `/clusters/${queryArg.id}` }), + providesTags: (result, error, { id }) => [{ type: 'Clusters', id }], + }), + deleteClustersById: build.mutation({ + query: (queryArg) => ({ url: `/clusters/${queryArg.id}`, method: 'DELETE' }), + invalidatesTags: () => [{ type: 'Clusters', id: 'LIST' }], + }), + postClustersByIdRefresh: build.mutation({ + query: (queryArg) => ({ url: `/clusters/${queryArg.id}/refresh`, method: 'POST' }), + invalidatesTags: (result, error, { id }) => [{ type: 'Clusters', id }], + }), + postClustersByIdReinit: build.mutation({ + query: (queryArg) => ({ + url: `/clusters/${queryArg.id}/reinit`, + method: 'POST', + body: queryArg.requestClusterReinit, + }), + invalidatesTags: (result, error, { id }) => [{ type: 'Clusters', id }], + }), + postClustersByIdReload: build.mutation({ + query: (queryArg) => ({ + url: `/clusters/${queryArg.id}/reload`, + method: 'POST', + body: queryArg.requestClusterReload, + }), + invalidatesTags: (result, error, { id }) => [{ type: 'Clusters', id }], + }), + postClustersByIdRestart: build.mutation({ + query: (queryArg) => ({ + url: `/clusters/${queryArg.id}/restart`, + method: 'POST', + body: queryArg.requestClusterRestart, + }), + invalidatesTags: (result, error, { id }) => [{ type: 'Clusters', id }], + }), + postClustersByIdStop: build.mutation({ + query: (queryArg) => ({ + url: `/clusters/${queryArg.id}/stop`, + method: 'POST', + body: queryArg.requestClusterStop, + }), + invalidatesTags: (result, error, { id }) => [{ type: 'Clusters', id }], + }), + postClustersByIdStart: build.mutation({ + query: (queryArg) => ({ + url: `/clusters/${queryArg.id}/start`, + method: 'POST', + body: queryArg.requestClusterStart, + }), + invalidatesTags: (result, error, { id }) => [{ type: 'Clusters', id }], + }), + postClustersByIdRemove: build.mutation({ + query: (queryArg) => ({ + url: `/clusters/${queryArg.id}/remove`, + method: 'POST', + body: queryArg.requestClusterRemove, + }), + invalidatesTags: () => [{ type: 'Clusters', id: 'LIST' }], + }), + }), + overrideExisting: false, +}); +export { injectedRtkApi as clustersApi }; +export type PostClustersApiResponse = /** status 200 OK */ ResponseClusterCreate; +export type PostClustersApiArg = { + requestClusterCreate: RequestClusterCreate; +}; +export type GetClustersApiResponse = /** status 200 OK */ ResponseClustersInfo; +export type GetClustersApiArg = { + offset?: number; + limit?: number; + projectId: number; + /** Filter by name */ + name?: string; + /** Filter by status */ + status?: string; + /** Filter by location */ + location?: string; + /** Filter by environment */ + environment?: string; + /** Filter by server_count */ + serverCount?: number; + /** Filter by postgres_version */ + postgresVersion?: number; + /** Created at after this date */ + createdAtFrom?: string; + /** Created at till this date */ + createdAtTo?: string; + /** Sort by fields. Example: sort_by=id,-name,created_at,updated_at + Supported values: + - id + - name + - created_at + - updated_at + - environment + - project + - status + - location + - server_count + - postgres_version + */ + sortBy?: string; +}; +export type GetClustersDefaultNameApiResponse = /** status 200 OK */ ResponseClusterDefaultName; +export type GetClustersDefaultNameApiArg = void; +export type GetClustersByIdApiResponse = /** status 200 OK */ ClusterInfo; +export type GetClustersByIdApiArg = { + id: number; +}; +export type DeleteClustersByIdApiResponse = /** status 204 OK */ void; +export type DeleteClustersByIdApiArg = { + id: number; +}; +export type PostClustersByIdRefreshApiResponse = /** status 200 OK */ ClusterInfo; +export type PostClustersByIdRefreshApiArg = { + id: number; +}; +export type PostClustersByIdReinitApiResponse = /** status 200 OK */ ResponseClusterCreate; +export type PostClustersByIdReinitApiArg = { + id: number; + requestClusterReinit: RequestClusterReinit; +}; +export type PostClustersByIdReloadApiResponse = /** status 200 OK */ ResponseClusterCreate; +export type PostClustersByIdReloadApiArg = { + id: number; + requestClusterReload: RequestClusterReload; +}; +export type PostClustersByIdRestartApiResponse = /** status 200 OK */ ResponseClusterCreate; +export type PostClustersByIdRestartApiArg = { + id: number; + requestClusterRestart: RequestClusterRestart; +}; +export type PostClustersByIdStopApiResponse = /** status 200 OK */ ResponseClusterCreate; +export type PostClustersByIdStopApiArg = { + id: number; + requestClusterStop: RequestClusterStop; +}; +export type PostClustersByIdStartApiResponse = /** status 200 OK */ ResponseClusterCreate; +export type PostClustersByIdStartApiArg = { + id: number; + requestClusterStart: RequestClusterStart; +}; +export type PostClustersByIdRemoveApiResponse = /** status 204 OK */ void; +export type PostClustersByIdRemoveApiArg = { + id: number; + requestClusterRemove: RequestClusterRemove; +}; +export type ResponseClusterCreate = { + /** unique code for cluster */ + cluster_id?: number; +}; +export type ErrorObject = { + code?: number; + title?: string; + description?: string; +}; +export type RequestClusterCreate = { + name?: string; + /** Info about cluster */ + description?: string; + /** Info for deployment system authorization */ + auth_info?: { + secret_id?: number; + }; + /** Project for new cluster */ + project_id?: number; + /** Project environment */ + environment_id?: number; + envs?: string[]; + extra_vars?: string[]; +}; +export type ClusterInfoInstance = { + id?: number; + name?: string; + ip?: string; + status?: string; + role?: string; + timeline?: number | null; + lag?: number | null; + tags?: object; + pending_restart?: boolean | null; +}; +export type ClusterInfo = { + id?: number; + name?: string; + description?: string; + status?: string; + creation_time?: string; + environment?: string; + servers?: ClusterInfoInstance[]; + postgres_version?: number; + /** Code of location */ + cluster_location?: string; + /** Project for cluster */ + project_name?: string; + connection_info?: object; +}; +export type PaginationInfoForListRequests = { + offset?: number | null; + limit?: number | null; + count?: number | null; +}; +export type ResponseClustersInfo = { + data?: ClusterInfo[]; + meta?: PaginationInfoForListRequests; +}; +export type ResponseClusterDefaultName = { + name?: string; +}; +export type RequestClusterReinit = object; +export type RequestClusterReload = object; +export type RequestClusterRestart = object; +export type RequestClusterStop = object; +export type RequestClusterStart = object; +export type RequestClusterRemove = object; +export const { + usePostClustersMutation, + useGetClustersQuery, + useLazyGetClustersQuery, + useGetClustersDefaultNameQuery, + useLazyGetClustersDefaultNameQuery, + useGetClustersByIdQuery, + useLazyGetClustersByIdQuery, + useDeleteClustersByIdMutation, + usePostClustersByIdRefreshMutation, + usePostClustersByIdReinitMutation, + usePostClustersByIdReloadMutation, + usePostClustersByIdRestartMutation, + usePostClustersByIdStopMutation, + usePostClustersByIdStartMutation, + usePostClustersByIdRemoveMutation, +} = injectedRtkApi; diff --git a/console/ui/src/shared/api/api/deployments.ts b/console/ui/src/shared/api/api/deployments.ts new file mode 100644 index 000000000..87fdca634 --- /dev/null +++ b/console/ui/src/shared/api/api/deployments.ts @@ -0,0 +1,94 @@ +import { baseApi as api } from '../baseApi.ts'; + +const injectedRtkApi = api.injectEndpoints({ + endpoints: (build) => ({ + getExternalDeployments: build.query({ + query: (queryArg) => ({ + url: `/external/deployments`, + params: { offset: queryArg.offset, limit: queryArg.limit }, + }), + }), + }), + overrideExisting: false, +}); +export { injectedRtkApi as deploymentsApi }; +export type GetExternalDeploymentsApiResponse = /** status 200 OK */ DeploymentsInfo; +export type GetExternalDeploymentsApiArg = { + offset?: number; + limit?: number; +}; +export type DeploymentCloudImage = { + image?: object; + arch?: string; + os_name?: string; + os_version?: string; + updated_at?: string; +}; +export type DeploymentInfoCloudRegion = { + /** unique parameter for DB */ + code?: string; + /** Field for web */ + name?: string; + /** List of datacenters for this region */ + datacenters?: { + code?: string; + location?: string; + cloud_image?: DeploymentCloudImage; + }[]; +}; +export type DeploymentInstanceType = { + code?: string; + cpu?: number; + ram?: number; + /** Price for 1 instance by hour */ + price_hourly?: number; + /** Price for 1 instance by month */ + price_monthly?: number; + /** Price currency */ + currency?: string; +}; +export type ResponseDeploymentInfo = { + code?: string; + description?: string; + avatar_url?: string; + /** List of available regions for current deployment */ + cloud_regions?: DeploymentInfoCloudRegion[]; + /** Lists of available instance types */ + instance_types?: { + small?: DeploymentInstanceType[] | null; + medium?: DeploymentInstanceType[]; + large?: DeploymentInstanceType[]; + }; + /** Hardware disks info */ + volumes?: { + /** Volume type */ + volume_type?: string; + /** Volume description */ + volume_description?: string; + /** Sets in GB */ + min_size?: number; + /** Sets in GB */ + max_size?: number; + /** Price for disk by months */ + price_monthly?: number; + /** Price currency */ + currency?: string; + /** Default volume */ + is_default?: boolean | null; + }[]; +}; +export type PaginationInfoForListRequests = { + offset?: number | null; + limit?: number | null; + count?: number | null; +}; +export type DeploymentsInfo = { + data?: ResponseDeploymentInfo[]; + meta?: PaginationInfoForListRequests; +}; +export type ErrorObject = { + code?: number; + title?: string; + description?: string; +}; +export const { useGetExternalDeploymentsQuery, useLazyGetExternalDeploymentsQuery } = injectedRtkApi; diff --git a/console/ui/src/shared/api/api/environments.ts b/console/ui/src/shared/api/api/environments.ts new file mode 100644 index 000000000..5fedef2e5 --- /dev/null +++ b/console/ui/src/shared/api/api/environments.ts @@ -0,0 +1,70 @@ +import { baseApi as api } from '../baseApi.ts'; + +const injectedRtkApi = api.injectEndpoints({ + endpoints: (build) => ({ + getEnvironments: build.query({ + query: (queryArg) => ({ url: `/environments`, params: { limit: queryArg.limit, offset: queryArg.offset } }), + providesTags: (result) => + result?.data + ? [ + ...result.data.map(({ id }) => ({ type: 'Environments', id }) as const), + { type: 'Environments', id: 'LIST' }, + ] + : [{ type: 'Environments', id: 'LIST' }], + }), + postEnvironments: build.mutation({ + query: (queryArg) => ({ url: `/environments`, method: 'POST', body: queryArg.requestEnvironment }), + invalidatesTags: () => [{ type: 'Environments', id: 'LIST' }], + }), + deleteEnvironmentsById: build.mutation({ + query: (queryArg) => ({ url: `/environments/${queryArg.id}`, method: 'DELETE' }), + invalidatesTags: () => [{ type: 'Environments', id: 'LIST' }], + }), + }), + overrideExisting: false, +}); +export { injectedRtkApi as environmentsApi }; +export type GetEnvironmentsApiResponse = /** status 200 OK */ ResponseEnvironmentsList; +export type GetEnvironmentsApiArg = { + limit?: number; + offset?: number; +}; +export type PostEnvironmentsApiResponse = /** status 200 OK */ ResponseEnvironment; +export type PostEnvironmentsApiArg = { + requestEnvironment: RequestEnvironment; +}; +export type DeleteEnvironmentsByIdApiResponse = /** status 204 OK */ void; +export type DeleteEnvironmentsByIdApiArg = { + id: number; +}; +export type ResponseEnvironment = { + id?: number; + name?: string; + description?: string | null; + created_at?: string; + updated_at?: string | null; +}; +export type PaginationInfoForListRequests = { + offset?: number | null; + limit?: number | null; + count?: number | null; +}; +export type ResponseEnvironmentsList = { + data?: ResponseEnvironment[]; + meta?: PaginationInfoForListRequests; +}; +export type ErrorObject = { + code?: number; + title?: string; + description?: string; +}; +export type RequestEnvironment = { + name?: string; + description?: string; +}; +export const { + useGetEnvironmentsQuery, + useLazyGetEnvironmentsQuery, + usePostEnvironmentsMutation, + useDeleteEnvironmentsByIdMutation, +} = injectedRtkApi; diff --git a/console/ui/src/shared/api/api/operations.ts b/console/ui/src/shared/api/api/operations.ts new file mode 100644 index 000000000..b8517ebb1 --- /dev/null +++ b/console/ui/src/shared/api/api/operations.ts @@ -0,0 +1,89 @@ +import { baseApi as api } from '../baseApi.ts'; + +const injectedRtkApi = api.injectEndpoints({ + endpoints: (build) => ({ + getOperations: build.query({ + query: (queryArg) => ({ + url: `/operations`, + params: { + project_id: queryArg.projectId, + start_date: queryArg.startDate, + end_date: queryArg.endDate, + cluster_name: queryArg.clusterName, + type: queryArg['type'], + status: queryArg.status, + sort_by: queryArg.sortBy, + limit: queryArg.limit, + offset: queryArg.offset, + }, + }), + providesTags: (result) => + result?.data + ? [...result.data.map(({ id }) => ({ type: 'Operations', id }) as const), { type: 'Operations', id: 'LIST' }] + : [{ type: 'Operations', id: 'LIST' }], + }), + getOperationsByIdLog: build.query({ + query: (queryArg) => ({ url: `/operations/${queryArg.id}/log` }), + transformResponse: (response, meta) => ({ + log: response, + isComplete: meta.response.headers.get('x-log-completed')?.toString() === 'true', + }), + providesTags: (result, error, { id }) => [{ type: 'Operations', id }], + }), + }), + overrideExisting: false, +}); +export { injectedRtkApi as operationsApi }; +export type GetOperationsApiResponse = /** status 200 OK */ ResponseOperationsList; +export type GetOperationsApiArg = { + /** Required parameter for filter */ + projectId: number; + /** Operations started after this date */ + startDate: string; + /** Operations started till this date */ + endDate: string; + /** Filter by cluster_name */ + clusterName?: string; + /** Filter by type */ + type?: string; + /** Filter by status */ + status?: string; + /** Sort by fields. Example: sort_by=cluster_name,-type,status,id */ + sortBy?: string; + limit?: number; + offset?: number; +}; +export type GetOperationsByIdLogApiResponse = /** status 200 OK */ string; +export type GetOperationsByIdLogApiArg = { + /** Operation id */ + id: number; +}; +export type ResponseOperation = { + id?: number; + cluster_name?: string; + started?: string; + finished?: string | null; + type?: string; + status?: string; + environment?: string; +}; +export type PaginationInfoForListRequests = { + offset?: number | null; + limit?: number | null; + count?: number | null; +}; +export type ResponseOperationsList = { + data?: ResponseOperation[]; + meta?: PaginationInfoForListRequests; +}; +export type ErrorObject = { + code?: number; + title?: string; + description?: string; +}; +export const { + useGetOperationsQuery, + useLazyGetOperationsQuery, + useGetOperationsByIdLogQuery, + useLazyGetOperationsByIdLogQuery, +} = injectedRtkApi; diff --git a/console/ui/src/shared/api/api/other.ts b/console/ui/src/shared/api/api/other.ts new file mode 100644 index 000000000..dcc529c88 --- /dev/null +++ b/console/ui/src/shared/api/api/other.ts @@ -0,0 +1,86 @@ +import { baseApi as api } from '../baseApi.ts'; + +const injectedRtkApi = api.injectEndpoints({ + endpoints: (build) => ({ + getVersion: build.query({ + query: () => ({ url: `/version` }), + }), + getDatabaseExtensions: build.query({ + query: (queryArg) => ({ + url: `/database/extensions`, + params: { + offset: queryArg.offset, + limit: queryArg.limit, + extension_type: queryArg.extensionType, + postgres_version: queryArg.postgresVersion, + }, + }), + }), + getPostgresVersions: build.query({ + query: () => ({ url: `/postgres_versions` }), + }), + deleteServersById: build.mutation({ + query: (queryArg) => ({ url: `/servers/${queryArg.id}`, method: 'DELETE' }), + }), + }), + overrideExisting: false, +}); +export { injectedRtkApi as otherApi }; +export type GetVersionApiResponse = /** status 200 OK */ VersionResponse; +export type GetVersionApiArg = void; +export type GetDatabaseExtensionsApiResponse = /** status 200 OK */ ResponseDatabaseExtensions; +export type GetDatabaseExtensionsApiArg = { + offset?: number; + limit?: number; + extensionType?: 'all' | 'contrib' | 'third_party'; + postgresVersion?: string; +}; +export type GetPostgresVersionsApiResponse = /** status 200 OK */ ResponsePostgresVersions; +export type GetPostgresVersionsApiArg = void; +export type DeleteServersByIdApiResponse = /** status 204 OK */ void; +export type DeleteServersByIdApiArg = { + id: number; +}; +export type VersionResponse = { + version?: string; +}; +export type ResponseDatabaseExtension = { + name?: string; + description?: string | null; + url?: string | null; + image?: string | null; + postgres_min_version?: string | null; + postgres_max_version?: string | null; + contrib?: boolean; +}; +export type PaginationInfoForListRequests = { + offset?: number | null; + limit?: number | null; + count?: number | null; +}; +export type ResponseDatabaseExtensions = { + data?: ResponseDatabaseExtension[]; + meta?: PaginationInfoForListRequests; +}; +export type ErrorObject = { + code?: number; + title?: string; + description?: string; +}; +export type ResponsePostgresVersion = { + major_version?: number; + release_date?: string; + end_of_life?: string; +}; +export type ResponsePostgresVersions = { + data?: ResponsePostgresVersion[]; +}; +export const { + useGetVersionQuery, + useLazyGetVersionQuery, + useGetDatabaseExtensionsQuery, + useLazyGetDatabaseExtensionsQuery, + useGetPostgresVersionsQuery, + useLazyGetPostgresVersionsQuery, + useDeleteServersByIdMutation, +} = injectedRtkApi; diff --git a/console/ui/src/shared/api/api/projects.ts b/console/ui/src/shared/api/api/projects.ts new file mode 100644 index 000000000..08642f759 --- /dev/null +++ b/console/ui/src/shared/api/api/projects.ts @@ -0,0 +1,81 @@ +import { baseApi as api } from '../baseApi.ts'; + +const injectedRtkApi = api.injectEndpoints({ + endpoints: (build) => ({ + postProjects: build.mutation({ + query: (queryArg) => ({ url: `/projects`, method: 'POST', body: queryArg.requestProjectCreate }), + invalidatesTags: () => [{ type: 'Projects', id: 'LIST' }], + }), + getProjects: build.query({ + query: (queryArg) => ({ url: `/projects`, params: { limit: queryArg.limit, offset: queryArg.offset } }), + providesTags: (result) => + result?.data + ? [...result.data.map(({ id }) => ({ type: 'Projects', id }) as const), { type: 'Projects', id: 'LIST' }] + : [{ type: 'Projects', id: 'LIST' }], + }), + patchProjectsById: build.mutation({ + query: (queryArg) => ({ url: `/projects/${queryArg.id}`, method: 'PATCH', body: queryArg.requestProjectPatch }), + invalidatesTags: (result, error, { id }) => [{ type: 'Projects', id }], + }), + deleteProjectsById: build.mutation({ + query: (queryArg) => ({ url: `/projects/${queryArg.id}`, method: 'DELETE' }), + invalidatesTags: () => [{ type: 'Projects', id: 'LIST' }], + }), + }), + overrideExisting: false, +}); +export { injectedRtkApi as projectsApi }; +export type PostProjectsApiResponse = /** status 200 OK */ ResponseProject; +export type PostProjectsApiArg = { + requestProjectCreate: RequestProjectCreate; +}; +export type GetProjectsApiResponse = /** status 200 OK */ ResponseProjectsList; +export type GetProjectsApiArg = { + limit?: number; + offset?: number; +}; +export type PatchProjectsByIdApiResponse = /** status 200 OK */ ResponseProject; +export type PatchProjectsByIdApiArg = { + id: number; + requestProjectPatch: RequestProjectPatch; +}; +export type DeleteProjectsByIdApiResponse = /** status 204 OK */ void; +export type DeleteProjectsByIdApiArg = { + id: number; +}; +export type ResponseProject = { + id?: number; + name?: string; + description?: string | null; + created_at?: string; + updated_at?: string | null; +}; +export type ErrorObject = { + code?: number; + title?: string; + description?: string; +}; +export type RequestProjectCreate = { + name?: string; + description?: string; +}; +export type PaginationInfoForListRequests = { + offset?: number | null; + limit?: number | null; + count?: number | null; +}; +export type ResponseProjectsList = { + data?: ResponseProject[]; + meta?: PaginationInfoForListRequests; +}; +export type RequestProjectPatch = { + name?: string | null; + description?: string | null; +}; +export const { + usePostProjectsMutation, + useGetProjectsQuery, + useLazyGetProjectsQuery, + usePatchProjectsByIdMutation, + useDeleteProjectsByIdMutation, +} = injectedRtkApi; diff --git a/console/ui/src/shared/api/api/secrets.ts b/console/ui/src/shared/api/api/secrets.ts new file mode 100644 index 000000000..b9b385499 --- /dev/null +++ b/console/ui/src/shared/api/api/secrets.ts @@ -0,0 +1,164 @@ +import { baseApi as api } from '../baseApi.ts'; + +const injectedRtkApi = api.injectEndpoints({ + endpoints: (build) => ({ + postSecrets: build.mutation({ + query: (queryArg) => ({ url: `/secrets`, method: 'POST', body: queryArg.requestSecretCreate }), + invalidatesTags: () => [{ type: 'Secrets', id: 'LIST' }], + }), + getSecrets: build.query({ + query: (queryArg) => ({ + url: `/secrets`, + params: { + limit: queryArg.limit, + offset: queryArg.offset, + project_id: queryArg.projectId, + name: queryArg.name, + type: queryArg.type, + sort_by: queryArg.sortBy, + }, + }), + providesTags: (result) => + result?.data + ? [...result.data.map(({ id }) => ({ type: 'Secrets', id }) as const), { type: 'Secrets', id: 'LIST' }] + : [{ type: 'Secrets', id: 'LIST' }], + }), + patchSecretsById: build.mutation({ + query: (queryArg) => ({ url: `/secrets/${queryArg.id}`, method: 'PATCH', body: queryArg.requestSecretPatch }), + invalidatesTags: (result, error, { id }) => [{ type: 'Secrets', id }], + }), + deleteSecretsById: build.mutation({ + query: (queryArg) => ({ url: `/secrets/${queryArg.id}`, method: 'DELETE' }), + invalidatesTags: () => [{ type: 'Secrets', id: 'LIST' }], + }), + }), + overrideExisting: false, +}); +export { injectedRtkApi as secretsApi }; +export type PostSecretsApiResponse = /** status 200 OK */ ResponseSecretInfo; + +export interface PostSecretsApiArg { + requestSecretCreate: RequestSecretCreate; +} + +export type GetSecretsApiResponse = /** status 200 OK */ ResponseSecretInfoList; + +export interface GetSecretsApiArg { + limit?: number; + offset?: number; + projectId: number; + /** Filter by name */ + name?: string; + /** Filter by type */ + type?: string; + /** Sort by fields. Example: sort_by=id,name,-type */ + sortBy?: string; +} + +export type PatchSecretsByIdApiResponse = /** status 200 OK */ ResponseSecretInfo; + +export interface PatchSecretsByIdApiArg { + id: number; + requestSecretPatch: RequestSecretPatch; +} + +export type DeleteSecretsByIdApiResponse = /** status 204 OK */ void; + +export interface DeleteSecretsByIdApiArg { + id: number; +} + +export type SecretType = 'aws' | 'gcp' | 'hetzner' | 'ssh_key' | 'digitalocean' | 'password' | 'azure'; + +export interface ResponseSecretInfo { + id?: number; + project_id?: number; + name?: string; + type?: SecretType; + created_at?: string; + updated_at?: string | null; + is_used?: boolean; + used_by_clusters?: string | null; +} + +export interface ErrorObject { + code?: number; + title?: string; + description?: string; +} + +export interface RequestSecretValueAws { + AWS_ACCESS_KEY_ID?: string; + AWS_SECRET_ACCESS_KEY?: string; +} + +export interface RequestSecretValueGcp { + GCP_SERVICE_ACCOUNT_CONTENTS?: string; +} + +export interface RequestSecretValueHetzner { + HCLOUD_API_TOKEN?: string; +} + +export interface RequestSecretValueSshKey { + SSH_PRIVATE_KEY?: string; +} + +export interface RequestSecretValueDigitalOcean { + DO_API_TOKEN?: string; +} + +export interface RequestSecretValuePassword { + USERNAME?: string; + PASSWORD?: string; +} + +export interface RequestSecretValueAzure { + AZURE_SUBSCRIPTION_ID?: string; + AZURE_CLIENT_ID?: string; + AZURE_SECRET?: string; + AZURE_TENANT?: string; +} + +export interface RequestSecretValue { + aws?: RequestSecretValueAws; + gcp?: RequestSecretValueGcp; + hetzner?: RequestSecretValueHetzner; + ssh_key?: RequestSecretValueSshKey; + digitalocean?: RequestSecretValueDigitalOcean; + password?: RequestSecretValuePassword; + azure?: RequestSecretValueAzure; +} + +export interface RequestSecretCreate { + project_id?: number; + name?: string; + type?: SecretType; + value?: RequestSecretValue; +} + +export interface PaginationInfoForListRequests { + offset?: number | null; + limit?: number | null; + count?: number | null; +} + +export interface ResponseSecretInfoList { + data?: ResponseSecretInfo[]; + meta?: PaginationInfoForListRequests; +} + +export interface RequestSecretPatch { + name?: string | null; + type?: string | null; + /** Secret value in base64 */ + value?: string | null; +} + +export const { + usePostSecretsMutation, + useGetSecretsQuery, + useLazyGetSecretsQuery, + usePatchSecretsByIdMutation, + useDeleteSecretsByIdMutation, +} = injectedRtkApi; diff --git a/console/ui/src/shared/api/api/settings.ts b/console/ui/src/shared/api/api/settings.ts new file mode 100644 index 000000000..e40d90f83 --- /dev/null +++ b/console/ui/src/shared/api/api/settings.ts @@ -0,0 +1,76 @@ +import { baseApi as api } from '../baseApi.ts'; + +const injectedRtkApi = api.injectEndpoints({ + endpoints: (build) => ({ + postSettings: build.mutation({ + query: (queryArg) => ({ url: `/settings`, method: 'POST', body: queryArg.requestCreateSetting }), + invalidatesTags: () => [{ type: 'Settings', id: 'LIST' }], + }), + getSettings: build.query({ + query: (queryArg) => ({ + url: `/settings`, + params: { name: queryArg.name, offset: queryArg.offset, limit: queryArg.limit }, + }), + providesTags: (result) => + result?.data + ? [...result.data.map(({ id }) => ({ type: 'Settings', id }) as const), { type: 'Settings', id: 'LIST' }] + : [{ type: 'Settings', id: 'LIST' }], + }), + patchSettingsByName: build.mutation({ + query: (queryArg) => ({ + url: `/settings/${queryArg.name}`, + method: 'PATCH', + body: queryArg.requestChangeSetting, + }), + invalidatesTags: (result, error, { id }) => [{ type: 'Settings', id }], + }), + }), + overrideExisting: false, +}); +export { injectedRtkApi as settingsApi }; +export type PostSettingsApiResponse = /** status 200 OK */ ResponseSetting; +export type PostSettingsApiArg = { + requestCreateSetting: RequestCreateSetting; +}; +export type GetSettingsApiResponse = /** status 200 OK */ ResponseSettings; +export type GetSettingsApiArg = { + /** Filter by name */ + name?: string; + offset?: number; + limit?: number; +}; +export type PatchSettingsByNameApiResponse = /** status 200 OK */ ResponseSetting; +export type PatchSettingsByNameApiArg = { + name: string; + requestChangeSetting: RequestChangeSetting; +}; +export type ResponseSetting = { + id?: number; + name?: string; + value?: object; + created_at?: string; + updated_at?: string | null; +}; +export type ErrorObject = { + code?: number; + title?: string; + description?: string; +}; +export type RequestCreateSetting = { + name?: string; + value?: object; +}; +export type PaginationInfoForListRequests = { + offset?: number | null; + limit?: number | null; + count?: number | null; +}; +export type ResponseSettings = { + data?: ResponseSetting[]; + mete?: PaginationInfoForListRequests; +}; +export type RequestChangeSetting = { + value?: object | null; +}; +export const { usePostSettingsMutation, useGetSettingsQuery, useLazyGetSettingsQuery, usePatchSettingsByNameMutation } = + injectedRtkApi; diff --git a/console/ui/src/shared/api/apiConfig.ts b/console/ui/src/shared/api/apiConfig.ts new file mode 100644 index 000000000..bb3d13457 --- /dev/null +++ b/console/ui/src/shared/api/apiConfig.ts @@ -0,0 +1,45 @@ +import type { ConfigFile } from '@rtk-query/codegen-openapi'; + +const config: ConfigFile = { + schemaFile: '../../../../service/api/swagger.yaml', + apiFile: './baseApi.ts', + apiImport: 'baseApi', + outputFiles: { + './generatedApi/clusters.ts': { + filterEndpoints: [/cluster/i], + exportName: 'clustersApi', + }, + './generatedApi/environments.ts': { + filterEndpoints: [/environment/i], + exportName: 'environmentsApi', + }, + './generatedApi/projects.ts': { + filterEndpoints: [/project/i], + exportName: 'projectsApi', + }, + './generatedApi/secrets.ts': { + filterEndpoints: [/secret/i], + exportName: 'secretsApi', + }, + './generatedApi/operations.ts': { + filterEndpoints: [/operation/i], + exportName: 'operationsApi', + }, + './generatedApi/deployments.ts': { + filterEndpoints: [/deployment/i], + exportName: 'deploymentsApi', + }, + './generatedApi/settings.ts': { + filterEndpoints: [/settings/i], + exportName: 'settingsApi', + }, + './generatedApi/other.ts': { + filterEndpoints: [/^((?!(cluster|environment|project|secret|operation|deployment|settings)).)*$/i], + exportName: 'otherApi', + }, + }, + exportName: 'postgresClusterConsoleApi', + hooks: { queries: true, lazyQueries: true, mutations: true }, +}; + +export default config; diff --git a/console/ui/src/shared/api/baseApi.ts b/console/ui/src/shared/api/baseApi.ts new file mode 100644 index 000000000..efdc6d703 --- /dev/null +++ b/console/ui/src/shared/api/baseApi.ts @@ -0,0 +1,16 @@ +import { createApi, fetchBaseQuery } from '@reduxjs/toolkit/query/react'; +import { API_URL } from '@shared/config/constants.ts'; +import i18n from 'i18next'; + +export const baseApi = createApi({ + baseQuery: fetchBaseQuery({ + baseUrl: API_URL as string, + prepareHeaders: (headers, { endpoint }) => { + headers.set('Accept-Language', i18n.language); + if (endpoint !== 'login') headers.set('Authorization', `Bearer ${String(localStorage.getItem('token'))}`); + return headers; + }, + }), + tagTypes: ['Clusters', 'Operations', 'Secrets', 'Projects', 'Environments', 'Settings'], + endpoints: () => ({}), +}); diff --git a/console/ui/src/shared/api/enhancedSecretsApi.ts b/console/ui/src/shared/api/enhancedSecretsApi.ts new file mode 100644 index 000000000..a51cbdaa2 --- /dev/null +++ b/console/ui/src/shared/api/enhancedSecretsApi.ts @@ -0,0 +1,15 @@ +import { secretsApi } from '@shared/api/api/secrets.ts'; + +const enhancedSecretsApi = secretsApi.enhanceEndpoints({ + addTagTypes: ['Secrets'], + endpoints: { + getSecrets: { + providesTags: ['Secrets'], + }, + postSecrets: { + invalidatesTags: ['Secrets'], + }, + }, +}); + +export default enhancedSecretsApi; diff --git a/console/ui/src/shared/assets/AutobaseLogo.svg b/console/ui/src/shared/assets/AutobaseLogo.svg new file mode 100644 index 000000000..1ae0493c0 --- /dev/null +++ b/console/ui/src/shared/assets/AutobaseLogo.svg @@ -0,0 +1,3 @@ + + + diff --git a/console/ui/src/shared/assets/calendarClockICon.svg b/console/ui/src/shared/assets/calendarClockICon.svg new file mode 100644 index 000000000..d7f110ca2 --- /dev/null +++ b/console/ui/src/shared/assets/calendarClockICon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/shared/assets/checkIcon.svg b/console/ui/src/shared/assets/checkIcon.svg new file mode 100644 index 000000000..cb4912567 --- /dev/null +++ b/console/ui/src/shared/assets/checkIcon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/shared/assets/clustersIcon.svg b/console/ui/src/shared/assets/clustersIcon.svg new file mode 100644 index 000000000..07dadf4fb --- /dev/null +++ b/console/ui/src/shared/assets/clustersIcon.svg @@ -0,0 +1,64 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/console/ui/src/shared/assets/collapseIcon.svg b/console/ui/src/shared/assets/collapseIcon.svg new file mode 100644 index 000000000..028af5dec --- /dev/null +++ b/console/ui/src/shared/assets/collapseIcon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/shared/assets/cpuIcon.svg b/console/ui/src/shared/assets/cpuIcon.svg new file mode 100644 index 000000000..a1584a84f --- /dev/null +++ b/console/ui/src/shared/assets/cpuIcon.svg @@ -0,0 +1,2 @@ + + diff --git a/console/ui/src/shared/assets/databaseIcon.svg b/console/ui/src/shared/assets/databaseIcon.svg new file mode 100644 index 000000000..4050d1013 --- /dev/null +++ b/console/ui/src/shared/assets/databaseIcon.svg @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/console/ui/src/shared/assets/docsIcon.svg b/console/ui/src/shared/assets/docsIcon.svg new file mode 100644 index 000000000..30baf2753 --- /dev/null +++ b/console/ui/src/shared/assets/docsIcon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/shared/assets/flagIcon.svg b/console/ui/src/shared/assets/flagIcon.svg new file mode 100644 index 000000000..f0bd3555f --- /dev/null +++ b/console/ui/src/shared/assets/flagIcon.svg @@ -0,0 +1,4 @@ + + + + diff --git a/console/ui/src/shared/assets/githubIcon.svg b/console/ui/src/shared/assets/githubIcon.svg new file mode 100644 index 000000000..d9a232c77 --- /dev/null +++ b/console/ui/src/shared/assets/githubIcon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/shared/assets/instanceIcon.svg b/console/ui/src/shared/assets/instanceIcon.svg new file mode 100644 index 000000000..cc86f34a8 --- /dev/null +++ b/console/ui/src/shared/assets/instanceIcon.svg @@ -0,0 +1,11 @@ + + + + + + + + + diff --git a/console/ui/src/shared/assets/lanIcon.svg b/console/ui/src/shared/assets/lanIcon.svg new file mode 100644 index 000000000..d720f1904 --- /dev/null +++ b/console/ui/src/shared/assets/lanIcon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/shared/assets/logoutIcon.svg b/console/ui/src/shared/assets/logoutIcon.svg new file mode 100644 index 000000000..ecd6beb3d --- /dev/null +++ b/console/ui/src/shared/assets/logoutIcon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/shared/assets/memoryIcon.svg b/console/ui/src/shared/assets/memoryIcon.svg new file mode 100644 index 000000000..c17bf6a46 --- /dev/null +++ b/console/ui/src/shared/assets/memoryIcon.svg @@ -0,0 +1,2 @@ + + diff --git a/console/ui/src/shared/assets/operationsIcon.svg b/console/ui/src/shared/assets/operationsIcon.svg new file mode 100644 index 000000000..106e3b29a --- /dev/null +++ b/console/ui/src/shared/assets/operationsIcon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/shared/assets/ramIcon.svg b/console/ui/src/shared/assets/ramIcon.svg new file mode 100644 index 000000000..ce5a9e135 --- /dev/null +++ b/console/ui/src/shared/assets/ramIcon.svg @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/console/ui/src/shared/assets/serversIcon.svg b/console/ui/src/shared/assets/serversIcon.svg new file mode 100644 index 000000000..e0f3b08e6 --- /dev/null +++ b/console/ui/src/shared/assets/serversIcon.svg @@ -0,0 +1,112 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/console/ui/src/shared/assets/settingsIcon.svg b/console/ui/src/shared/assets/settingsIcon.svg new file mode 100644 index 000000000..59ba4ea07 --- /dev/null +++ b/console/ui/src/shared/assets/settingsIcon.svg @@ -0,0 +1,58 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/console/ui/src/shared/assets/sponsorIcon.svg b/console/ui/src/shared/assets/sponsorIcon.svg new file mode 100644 index 000000000..53e1ee1a5 --- /dev/null +++ b/console/ui/src/shared/assets/sponsorIcon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/shared/assets/storageIcon.svg b/console/ui/src/shared/assets/storageIcon.svg new file mode 100644 index 000000000..ba2cfa33d --- /dev/null +++ b/console/ui/src/shared/assets/storageIcon.svg @@ -0,0 +1,11 @@ + + + + + + + + + diff --git a/console/ui/src/shared/assets/supportIcon.svg b/console/ui/src/shared/assets/supportIcon.svg new file mode 100644 index 000000000..805593207 --- /dev/null +++ b/console/ui/src/shared/assets/supportIcon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/shared/config/constants.ts b/console/ui/src/shared/config/constants.ts new file mode 100644 index 000000000..a58153d65 --- /dev/null +++ b/console/ui/src/shared/config/constants.ts @@ -0,0 +1,32 @@ +import { getEnvVariable } from '@shared/lib/functions.ts'; + +export const LOCALES = Object.freeze({ + EN_US: 'en', +}); + +export const API_URL = getEnvVariable('VITE_API_URL'); +export const AUTH_TOKEN = getEnvVariable('VITE_AUTH_TOKEN'); +export const CLUSTERS_POLLING_INTERVAL = getEnvVariable('VITE_CLUSTERS_POLLING_INTERVAL'); +export const CLUSTER_OVERVIEW_POLLING_INTERVAL = getEnvVariable('VITE_CLUSTER_OVERVIEW_POLLING_INTERVAL'); +export const OPERATIONS_POLLING_INTERVAL = getEnvVariable('VITE_OPERATIONS_POLLING_INTERVAL'); +export const OPERATION_LOGS_POLLING_INTERVAL = getEnvVariable('VITE_OPERATION_LOGS_POLLING_INTERVAL'); + +export const PAGINATION_LIMIT_OPTIONS = Object.freeze([ + { value: 5, label: 5 }, + { value: 10, label: 10 }, + { + value: 25, + label: 25, + }, + { value: 50, label: 50 }, + { value: 100, label: 100 }, +]); + +export const PROVIDERS = Object.freeze({ + AWS: 'aws', + GCP: 'gcp', + AZURE: 'azure', + DIGITAL_OCEAN: 'digitalocean', + HETZNER: 'hetzner', + LOCAL: 'local', +}); diff --git a/console/ui/src/shared/i18n/i18n.ts b/console/ui/src/shared/i18n/i18n.ts new file mode 100644 index 000000000..1e931c145 --- /dev/null +++ b/console/ui/src/shared/i18n/i18n.ts @@ -0,0 +1,36 @@ +import i18n from 'i18next'; +import { initReactI18next } from 'react-i18next'; +import shared from './locales/en/shared.json'; +import operations from './locales/en/operations.json'; +import settings from './locales/en/settings.json'; +import clusters from './locales/en/clusters.json'; +import validation from './locales/en/validation.json'; +import toasts from './locales/en/toasts.json'; +import { LOCALES } from '../config/constants'; + +import LanguageDetector from 'i18next-browser-languagedetector'; + +const resources = { + en: { + shared, + clusters, + operations, + settings, + validation, + toasts, + }, +}; + +i18n + .use(LanguageDetector) + .use(initReactI18next) + .init({ + resources, + ns: ['shared', 'clusters', 'operations', 'settings', 'validation', 'toasts'], + fallbackLng: LOCALES.EN_US, + supportedLngs: Object.values(LOCALES), + returnNull: false, + debug: false, + }); + +export default i18n; diff --git a/console/ui/src/shared/i18n/locales/en/clusters.json b/console/ui/src/shared/i18n/locales/en/clusters.json new file mode 100644 index 000000000..9a9324537 --- /dev/null +++ b/console/ui/src/shared/i18n/locales/en/clusters.json @@ -0,0 +1,70 @@ +{ + "clusters": "Clusters", + "cluster": "Cluster", + "createCluster": "Create cluster", + "createPostgresCluster": "Create Postgres Cluster", + "clusterName": "Cluster name", + "creationTime": "Creation time", + "servers": "Servers", + "server": "Server", + "postgresVersion": "Postgres version", + "location": "Location", + "noPostgresClustersTitle": "No Postgres Clusters", + "noPostgresClustersLine1": "Deploy Postgres to supported cloud providers: AWS, GCP, Azure, DigitalOcean and Hetzner Cloud. All components are installed within your cloud account.\nOr Install on your existing resources, whether it's any other cloud or your own data center.\n\nTo get started, just click “{{createCluster}}” button.", + "selectDeploymentDestination": "Select deployment destination", + "clustersSearchPlaceholder": "Enter property name or value", + "selectCloudRegion": "Select cloud region", + "selectInstanceType": "Select instance type", + "numberOfInstances": "Number of instances", + "dataDiskStorage": "Data disk storage", + "sshPublicKey": "SSH public key", + "sshKey": "SSH key", + "sshKeyAuthDescription": "Connect to your services with an SSH key pair", + "passwordAuthDescription": "Connect to your services with an SSH user and password", + "description": "Description", + "sshKeyLocalMachinePlaceholder": "Paste your private SSH key here to access to the servers during deployment.\nEnsure that the corresponding public SSH key has been added to the ~/.ssh/authorized_keys file on all specified servers.", + "sshKeyCloudProviderPlaceholder": "Paste the SSH public keys here (one per line). These keys will be added to the database server's ~/.ssh/authorized_keys file.\nAt least one public key must be provided to ensure access to the server after deployment.", + "descriptionPlaceholder": "Optional. Here you can specify any additional information about the cluster or leave notes.", + "summary": "Summary", + "name": "Name", + "cloud": "Cloud", + "region": "Region", + "instanceType": "Instance type", + "estimatedMonthlyPrice": "Estimated monthly price", + "estimatedCostAdditionalInfo": "* Payment is made directly to the cloud provider.\nAn estimated cost is provided here. Please refer to the <0>official pricing page to confirm the actual costs.", + "yourOwn": "Your Own", + "machines": "Machines", + "yourOwnMachinesTooltip": "Use \"Your Own Machines\" to deploy the cluster on existing servers in another cloud provider or your own data center.", + "perServer": "per server", + "perDisk": "per disk", + "hostname": "Hostname", + "locationPlaceholder": "region/datacenter (optional)", + "databaseServers": "Database servers", + "authenticationMethod": "Authentication method", + "saveToConsole": "Save to console", + "clusterVipAddress": "Cluster VIP address", + "clusterVipAddressPlaceholder": "Optional. Specify the (unused) IP address here to provide a single entry point for client access to databases in the cluster. Not for cloud environments.", + "loadBalancers": "Load balancers", + "loadBalancing": "Load balancing", + "haproxyLoadBalancer": "HAProxy load balancer", + "haproxyLoadBalancerTooltip": "Deploy a HAProxy Load Balancer. This feature supports load balancing for read operations, facilitating effective scale-out strategies through the use of read-only replicas.", + "numberOfServers": "Number of servers", + "highAvailability": "High availability", + "highAvailabilityInfo": "*A minimum of 3 servers is required to ensure high availability.", + "host": "Host", + "role": "Role", + "state": "State", + "timeline": "Timeline", + "lagInMb": "Lag in MB", + "pendingRestart": "Pending restart", + "scheduledRestart": "Scheduled restart", + "tags": "Tags", + "connectionInfo": "Connection info", + "clusterInfo": "Cluster info", + "ipAddress": "IP Address", + "port": "Port", + "backups": "Backups", + "useDefinedSecret": "Use defined secret?", + "deleteClusterModalHeader": "Delete {{clusterName}}?", + "deleteClusterModalBody": "Are you sure you want to delete cluster \"{{clusterName}}\"?" +} diff --git a/console/ui/src/shared/i18n/locales/en/operations.json b/console/ui/src/shared/i18n/locales/en/operations.json new file mode 100644 index 000000000..6b63bc625 --- /dev/null +++ b/console/ui/src/shared/i18n/locales/en/operations.json @@ -0,0 +1,16 @@ +{ + "operations": "Operations", + "operation": "Operation", + "started": "Started", + "finished": "Finished", + "creationTime": "Creation time", + "type": "Type", + "showDetails": "Show details", + "lastDay": "Last day", + "lastWeek": "Last week", + "lastMonth": "Last month", + "lastThreeMonths": "Last three months", + "lastSixMonths": "Last six months", + "lastYear": "Last year", + "log": "Log" +} \ No newline at end of file diff --git a/console/ui/src/shared/i18n/locales/en/settings.json b/console/ui/src/shared/i18n/locales/en/settings.json new file mode 100644 index 000000000..3158a47a8 --- /dev/null +++ b/console/ui/src/shared/i18n/locales/en/settings.json @@ -0,0 +1,27 @@ +{ + "settings": "Settings", + "setting": "Setting", + "generalSettings": "General settings", + "environments": "Environments", + "environmentName": "Environment name", + "addEnvironment": "Add environment", + "secret": "Secret", + "secrets": "Secrets", + "proxyServer": "Proxy server", + "proxyServerInfo": "Specify your HTTP and HTTPS corporate proxy server settings below (optional).\nTo enable package downloads during cluster deployment in environments without direct internet access.", + "addSecret": "Add secret", + "secretType": "Secret type", + "secretName": "Secret name", + "addProject": "Add project", + "projectName": "Project name", + "settingsAwsSecretInfo": "Enter the access key to deploy the cluster to your AWS cloud provider account below. See the <0>official documentation for instructions on creating an access key.", + "settingsGcpSecretInfo": "Enter the service account content (in json or base64 format) to deploy the cluster to your Google Cloud provider account below. See the <0>official documentation for instructions on creating an service account key.", + "settingsDoSecretInfo": "Enter the API token to deploy the cluster to your DigitalOcean Cloud provider account below. See the <0>official documentation for instructions on creating an access token.", + "settingsAzureSecretInfo": "Enter the necessary details to deploy the cluster to your Azure Cloud provider account below. See the <0>official documentation for instructions on creating an service principal.", + "settingsHetznerSecretInfo": "Enter the API token to deploy the cluster to your Hetzner Cloud provider account below. See the <0>official documentation for instructions on creating an access token.", + "settingsSshKeySecretInfo": "Enter the contents of your private SSH key below to access the cluster servers via SSH. It is assumed that the corresponding public key has already been added to the servers.", + "settingsPasswordSecretInfo": "Enter the SSH username and password below to access the cluster servers. It is assumed that the user account, such as root or one with sudo privileges, has already been created on the servers.", + "settingsConfidentialDataStore": "All confidential data entered in these fields is stored in encrypted form.", + "sshPrivateKey": "SSH private key", + "month": "Month" +} \ No newline at end of file diff --git a/console/ui/src/shared/i18n/locales/en/shared.json b/console/ui/src/shared/i18n/locales/en/shared.json new file mode 100644 index 000000000..084c0a52b --- /dev/null +++ b/console/ui/src/shared/i18n/locales/en/shared.json @@ -0,0 +1,45 @@ +{ + "logout": "Logout", + "github": "Github repository", + "documentation": "Documentation", + "support": "Support", + "sponsor": "Sponsor", + "404Title": "Nothing to see here", + "404Text": "Page you are trying to open does not exist. You may have mistyped the address, or the page has been moved to another URL.\nIf you think this is an error contact support.", + "404ButtonText": "Take me back to home page", + "removeFromList": "Remove from list", + "refresh": "Refresh", + "overview": "Overview", + "cancel": "Cancel", + "save": "Save", + "status": "Status", + "id": "ID", + "environment": "Environment", + "actions": "Actions", + "user": "User", + "username": "Username", + "password": "Password", + "project": "Project", + "on": "On", + "off": "Off", + "login": "Login", + "token": "Token", + "enterTokenPlaceholder": "Enter token", + "amount": "Amount", + "name": "Name", + "type": "Type", + "created": "Created", + "updated": "Updated", + "used": "Used", + "delete": "Delete", + "selectSecret": "Select secret", + "addNewProject": "Add new project", + "projectName": "Project name", + "enterNewProjectName": "Enter new project name", + "description": "Description", + "yes": "Yes", + "no": "No", + "add": "Add", + "defaultTableSearchPlaceholder": "Enter property name or value", + "address": "Address" +} \ No newline at end of file diff --git a/console/ui/src/shared/i18n/locales/en/toasts.json b/console/ui/src/shared/i18n/locales/en/toasts.json new file mode 100644 index 000000000..18114a15c --- /dev/null +++ b/console/ui/src/shared/i18n/locales/en/toasts.json @@ -0,0 +1,18 @@ +{ + "clusterSuccessfullyCreated": "Cluster {{clusterName}} creation initiated. Please wait until deployment is complete", + "clusterSuccessfullyRemoved": "Cluster {{clusterName}} successfully removed", + "secretSuccessfullyCreated": "Secret {{secretName}} successfully created", + "secretSuccessfullyRemoved": "Secret {{secretName}} successfully removed", + "settingsSuccessfullyChanged": "Settings successfully changed", + "secretsSecretIsUsed_one": "The secret cannot be deleted because it is currently being used by the following cluster: {{clusterNames}}", + "secretsSecretIsUsed_other": "The secret cannot be deleted because it is currently being used by the following clusters: {{clusterNames}}", + "projectSuccessfullyCreated": "Project {{projectName}} successfully created", + "projectSuccessfullyRemoved": "Project {{projectName}} successfully removed", + "cannotRemoveActiveProject": "Cannot remove active project", + "environmentSuccessfullyCreated": "Environment {{environmentName}} successfully created", + "environmentSuccessfullyRemoved": "Environment {{environmentName}} successfully removed", + "serverSuccessfullyRemoved": "Server {{serverName}} successfully removed", + "invalidToken": "Token is not valid", + "valueCopiedToClipboard": "Value copied to clipboard", + "failedToCopyToClipboard": "Failed to copy to clipboard" +} \ No newline at end of file diff --git a/console/ui/src/shared/i18n/locales/en/validation.json b/console/ui/src/shared/i18n/locales/en/validation.json new file mode 100644 index 000000000..48e06eb72 --- /dev/null +++ b/console/ui/src/shared/i18n/locales/en/validation.json @@ -0,0 +1,5 @@ +{ + "requiredField": "Required field", + "clusterShouldHaveProperNaming": "Cluster name should have only letters, numbers, hyphens and have length equal or less than 24", + "shouldBeACorrectV4Ip": "The value should be a valid IPv4 address" +} diff --git a/console/ui/src/shared/lib/functions.ts b/console/ui/src/shared/lib/functions.ts new file mode 100644 index 000000000..804698608 --- /dev/null +++ b/console/ui/src/shared/lib/functions.ts @@ -0,0 +1,43 @@ +import { generatePath, resolvePath } from 'react-router-dom'; +import { toast } from 'react-toastify'; +import { format } from 'date-fns/format'; +import { isValid } from 'date-fns/isValid'; + +declare const window: any; + +/** + * Function generates absolute path that can be used for react-router "navigate" function. + * @param path - Absolute URN path. + * @param params - Additional params that will be substituted in URN dynamic values. + */ +export const generateAbsoluteRouterPath = (path: string, params?: Record) => + resolvePath(generatePath(path, params)); + +/** + * Function returns env variable passed to container or variable from .env* files. + * @param variableName - Name of a variable. + */ +export const getEnvVariable = (variableName: string) => window?._env_?.[variableName] ?? import.meta.env[variableName]; + +/** + * Function manages error event when performing request. + * @param e - Error event. + */ +export const handleRequestErrorCatch = (e) => { + console.error(e); + toast.error(e); +}; + +/** + * Function converts timestamp to easily readable string. + * @param timestamp - Timestamp to be converted. + */ +export const convertTimestampToReadableTime = (timestamp?: string) => + isValid(new Date(timestamp)) ? format(timestamp, 'MMM dd, yyyy, HH:mm:ss') : '-'; + +export const manageSortingOrder = ( + sorting: { + desc?: boolean; + id?: string; + }[], +) => (sorting?.desc ? `-${sorting?.id}` : sorting?.id); diff --git a/console/ui/src/shared/lib/hooks.tsx b/console/ui/src/shared/lib/hooks.tsx new file mode 100644 index 000000000..d25c0cb12 --- /dev/null +++ b/console/ui/src/shared/lib/hooks.tsx @@ -0,0 +1,60 @@ +import { useEffect, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { toast } from 'react-toastify'; + +/** + * Hook polls RTK query request every N seconds. + * @param request - RTK request to poll. + * @param pollingInterval - Number in milliseconds that represents polling interval. + * @param options - Different config options. + */ +export const useQueryPolling = (request: any, pollingInterval: number, options?: { stop?: boolean }) => { + const result = request(); + + useEffect(() => { + const polling = setInterval(() => result.refetch(), pollingInterval); + if (options?.stop?.toString() === 'true') clearInterval(polling); + return () => { + clearInterval(polling); + }; + }, [options]); + + return result; +}; + +/** + * Custom hook for copying value to clipboard. Returns copied value and function to copy value. + */ +export const useCopyToClipboard = (): [copiedText: string | null, copyFunction: (text: string) => Promise] => { + const { t } = useTranslation('toasts'); + const [copiedText, setCopiedText] = useState(null); + + const copyFunction = async (text) => { + try { + if (navigator.clipboard && window.isSecureContext) { + await navigator.clipboard.writeText(text); + } else { + const textArea = document.createElement('textarea'); + textArea.value = text; + textArea.style.position = 'fixed'; + textArea.style.left = '-999999px'; + textArea.style.top = '-999999px'; + document.body.appendChild(textArea); + textArea.focus(); + textArea.select(); + document.execCommand('copy'); + textArea.remove(); + } + setCopiedText(text); + toast.success(t('valueCopiedToClipboard')); + return true; + } catch (error) { + console.warn('Copy failed', error); + toast.error(t('failedToCopyToClipboard')); + setCopiedText(null); + return false; + } + }; + + return [copiedText, copyFunction]; +}; diff --git a/console/ui/src/shared/model/constants.ts b/console/ui/src/shared/model/constants.ts new file mode 100644 index 000000000..09db1a02b --- /dev/null +++ b/console/ui/src/shared/model/constants.ts @@ -0,0 +1,5 @@ +export const AUTHENTICATION_METHODS = Object.freeze({ + // changing names might break secrets POST request + SSH: 'ssh_key', + PASSWORD: 'password', +}); diff --git a/console/ui/src/shared/model/types.ts b/console/ui/src/shared/model/types.ts new file mode 100644 index 000000000..9dbbf4501 --- /dev/null +++ b/console/ui/src/shared/model/types.ts @@ -0,0 +1,6 @@ +import { MRT_Row, MRT_RowData } from 'material-react-table'; + +export interface TableRowActionsProps { + closeMenu: () => void; + row: MRT_Row; +} diff --git a/console/ui/src/shared/theme/theme.ts b/console/ui/src/shared/theme/theme.ts new file mode 100644 index 000000000..4f591a075 --- /dev/null +++ b/console/ui/src/shared/theme/theme.ts @@ -0,0 +1,36 @@ +import { createTheme } from '@mui/material'; +import { blue } from '@mui/material/colors'; +import { enUS } from '@mui/material/locale'; + +declare module '@mui/material/styles' { + interface PaletteColor { + lighter10?: string; + } + + interface SimplePaletteColorOptions { + lighter10?: string; + } +} + +const theme = createTheme( + { + palette: { + primary: { + main: '#3367D6', + lighter10: '#0D8CE91A', + }, + }, + components: { + MuiAppBar: { + styleOverrides: { + colorPrimary: { + backgroundColor: '#F6F8FA', + }, + }, + }, + }, + }, + enUS, +); + +export default theme; diff --git a/console/ui/src/shared/ui/copy-icon/assets/copyIcon.svg b/console/ui/src/shared/ui/copy-icon/assets/copyIcon.svg new file mode 100644 index 000000000..a45972213 --- /dev/null +++ b/console/ui/src/shared/ui/copy-icon/assets/copyIcon.svg @@ -0,0 +1,3 @@ + + + diff --git a/console/ui/src/shared/ui/copy-icon/index.ts b/console/ui/src/shared/ui/copy-icon/index.ts new file mode 100644 index 000000000..d870f7c87 --- /dev/null +++ b/console/ui/src/shared/ui/copy-icon/index.ts @@ -0,0 +1,3 @@ +import CopyIcon from '@shared/ui/copy-icon/ui'; + +export default CopyIcon; diff --git a/console/ui/src/shared/ui/copy-icon/model/types.ts b/console/ui/src/shared/ui/copy-icon/model/types.ts new file mode 100644 index 000000000..1cf47b13f --- /dev/null +++ b/console/ui/src/shared/ui/copy-icon/model/types.ts @@ -0,0 +1,3 @@ +export interface CopyIconProps { + valueToCopy?: string; +} diff --git a/console/ui/src/shared/ui/copy-icon/ui/index.tsx b/console/ui/src/shared/ui/copy-icon/ui/index.tsx new file mode 100644 index 000000000..6eb304990 --- /dev/null +++ b/console/ui/src/shared/ui/copy-icon/ui/index.tsx @@ -0,0 +1,17 @@ +import { FC } from 'react'; +import { CopyIconProps } from '@shared/ui/copy-icon/model/types.ts'; +import { useCopyToClipboard } from '@shared/lib/hooks.tsx'; +import CopyValueIcon from '@shared/ui/copy-icon/assets/copyIcon.svg?react'; +import { Box } from '@mui/material'; + +const CopyIcon: FC = ({ valueToCopy }) => { + const [_, copyFunction] = useCopyToClipboard(); + + return ( + copyFunction(valueToCopy)} sx={{ cursor: 'pointer' }}> + + + ); +}; + +export default CopyIcon; diff --git a/console/ui/src/shared/ui/default-form-buttons/index.ts b/console/ui/src/shared/ui/default-form-buttons/index.ts new file mode 100644 index 000000000..3d7cfb8e4 --- /dev/null +++ b/console/ui/src/shared/ui/default-form-buttons/index.ts @@ -0,0 +1,3 @@ +import DefaultFormButtons from '@shared/ui/default-form-buttons/ui'; + +export default DefaultFormButtons; diff --git a/console/ui/src/shared/ui/default-form-buttons/model/types.ts b/console/ui/src/shared/ui/default-form-buttons/model/types.ts new file mode 100644 index 000000000..fa08836ff --- /dev/null +++ b/console/ui/src/shared/ui/default-form-buttons/model/types.ts @@ -0,0 +1,10 @@ +import { ReactElement } from 'react'; + +export interface DefaultFormButtonsProps { + isDisabled?: boolean; + isSubmitting?: boolean; + submitButtonLabel?: string; + cancelButtonLabel?: string; + cancelHandler: () => void; + children?: ReactElement; +} diff --git a/console/ui/src/shared/ui/default-form-buttons/ui/index.tsx b/console/ui/src/shared/ui/default-form-buttons/ui/index.tsx new file mode 100644 index 000000000..93dbe5f77 --- /dev/null +++ b/console/ui/src/shared/ui/default-form-buttons/ui/index.tsx @@ -0,0 +1,51 @@ +import { FC } from 'react'; +import { useTranslation } from 'react-i18next'; +import styled from '@emotion/styled'; +import { LoadingButton } from '@mui/lab'; +import { DefaultFormButtonsProps } from '@shared/ui/default-form-buttons/model/types.ts'; +import { CircularProgress } from '@mui/material'; + +const StyledDefaultFormButtons = styled.div` + display: flex; + align-items: center; + justify-content: space-between; +`; + +const StyledStandardButtons = styled.div` + display: grid; + grid-template: 1fr / repeat(2, auto); + grid-column-gap: 16px; + width: fit-content; +`; + +const DefaultFormButtons: FC = ({ + isDisabled = false, + isSubmitting = false, + cancelHandler, + submitButtonLabel, + cancelButtonLabel, + children, +}) => { + const { t } = useTranslation('shared'); + + return ( + + + } + type="submit"> + {submitButtonLabel ?? t('save')} + + + {cancelButtonLabel ?? t('cancel')} + + + {children} + + ); +}; + +export default DefaultFormButtons; diff --git a/console/ui/src/shared/ui/default-table/index.ts b/console/ui/src/shared/ui/default-table/index.ts new file mode 100644 index 000000000..a7eeb4844 --- /dev/null +++ b/console/ui/src/shared/ui/default-table/index.ts @@ -0,0 +1,3 @@ +import DefaultTable from '@shared/ui/default-table/ui'; + +export default DefaultTable; diff --git a/console/ui/src/shared/ui/default-table/ui/index.tsx b/console/ui/src/shared/ui/default-table/ui/index.tsx new file mode 100644 index 000000000..96de73e08 --- /dev/null +++ b/console/ui/src/shared/ui/default-table/ui/index.tsx @@ -0,0 +1,44 @@ +import { FC } from 'react'; +import { MaterialReactTable, MRT_RowData, MRT_TableOptions, useMaterialReactTable } from 'material-react-table'; +import { PAGINATION_LIMIT_OPTIONS } from '@shared/config/constants.ts'; +import { useTranslation } from 'react-i18next'; + +/** + * Common table with default styles. + * @param tableConfig - Object with additional table configuration. + * @constructor + */ +const DefaultTable: FC = ({ tableConfig }: { tableConfig: MRT_TableOptions }) => { + const { t } = useTranslation('shared'); + + const table = useMaterialReactTable({ + muiPaginationProps: { + rowsPerPageOptions: PAGINATION_LIMIT_OPTIONS, + }, + muiSearchTextFieldProps: { + placeholder: t('defaultTableSearchPlaceholder'), + sx: { minWidth: '300px' }, + }, + muiTableHeadCellProps: { + sx: { + backgroundColor: '#F6F8FA', + }, + }, + displayColumnDefOptions: { + 'mrt-row-select': { + visibleInShowHideMenu: false, + }, + 'mrt-row-actions': { + visibleInShowHideMenu: false, + }, + }, + layoutMode: 'grid', + positionActionsColumn: 'last', + positionGlobalFilter: 'left', + ...tableConfig, + }); + + return ; +}; + +export default DefaultTable; diff --git a/console/ui/src/shared/ui/info-card-body/index.ts b/console/ui/src/shared/ui/info-card-body/index.ts new file mode 100644 index 000000000..7a4ccea81 --- /dev/null +++ b/console/ui/src/shared/ui/info-card-body/index.ts @@ -0,0 +1,3 @@ +import InfoCardBody from '@shared/ui/info-card-body/ui'; + +export default InfoCardBody; diff --git a/console/ui/src/shared/ui/info-card-body/model/types.ts b/console/ui/src/shared/ui/info-card-body/model/types.ts new file mode 100644 index 000000000..6b9017cf5 --- /dev/null +++ b/console/ui/src/shared/ui/info-card-body/model/types.ts @@ -0,0 +1,8 @@ +import { ReactNode } from 'react'; + +export interface InfoCardBodyProps { + config: { + title: string; + children: ReactNode; + }[]; +} diff --git a/console/ui/src/shared/ui/info-card-body/ui/index.tsx b/console/ui/src/shared/ui/info-card-body/ui/index.tsx new file mode 100644 index 000000000..b1b485437 --- /dev/null +++ b/console/ui/src/shared/ui/info-card-body/ui/index.tsx @@ -0,0 +1,27 @@ +import { FC } from 'react'; +import { Divider, Stack, Typography } from '@mui/material'; +import { InfoCardBodyProps } from '@shared/ui/info-card-body/model/types.ts'; + +/** + * Component renders body of a different summary and overview cards. + * Recommended to use inside all similar looking card bodies. + * @param config - Config with data to render. + * @constructor + */ +const InfoCardBody: FC = ({ config }) => { + return ( + + {config.map(({ title, children }, index) => ( + + + {title} + + {children} + {index < config.length - 1 ? : null} + + ))} + + ); +}; + +export default InfoCardBody; diff --git a/console/ui/src/shared/ui/selectable-box/index.ts b/console/ui/src/shared/ui/selectable-box/index.ts new file mode 100644 index 000000000..8c3b67b74 --- /dev/null +++ b/console/ui/src/shared/ui/selectable-box/index.ts @@ -0,0 +1,3 @@ +import SelectableBox from '@shared/ui/selectable-box/ui'; + +export default SelectableBox; diff --git a/console/ui/src/shared/ui/selectable-box/model/types.ts b/console/ui/src/shared/ui/selectable-box/model/types.ts new file mode 100644 index 000000000..fd5eb3171 --- /dev/null +++ b/console/ui/src/shared/ui/selectable-box/model/types.ts @@ -0,0 +1,8 @@ +import { SxProps } from '@mui/material'; +import { ReactNode } from 'react'; + +export interface ClusterFormSelectableBoxProps extends ReactNode { + children?: ReactNode; + isActive?: boolean; + sx?: SxProps; +} diff --git a/console/ui/src/shared/ui/selectable-box/ui/index.tsx b/console/ui/src/shared/ui/selectable-box/ui/index.tsx new file mode 100644 index 000000000..cd18565b6 --- /dev/null +++ b/console/ui/src/shared/ui/selectable-box/ui/index.tsx @@ -0,0 +1,21 @@ +import { FC } from 'react'; +import { ClusterFormSelectableBoxProps } from '@shared/ui/selectable-box/model/types.ts'; +import theme from '@shared/theme/theme.ts'; +import { Box } from '@mui/material'; + +const SelectableBox: FC = ({ isActive, children, sx, ...props }) => { + return ( + + {children} + + ); +}; + +export default SelectableBox; diff --git a/console/ui/src/shared/ui/settings-add-entity/model/constants.ts b/console/ui/src/shared/ui/settings-add-entity/model/constants.ts new file mode 100644 index 000000000..d95c1f9cc --- /dev/null +++ b/console/ui/src/shared/ui/settings-add-entity/model/constants.ts @@ -0,0 +1,4 @@ +export const ADD_ENTITY_FORM_NAMES = Object.freeze({ + NAME: 'name', + DESCRIPTION: 'description', +}); diff --git a/console/ui/src/shared/ui/settings-add-entity/model/types.ts b/console/ui/src/shared/ui/settings-add-entity/model/types.ts new file mode 100644 index 000000000..3eaf490cf --- /dev/null +++ b/console/ui/src/shared/ui/settings-add-entity/model/types.ts @@ -0,0 +1,13 @@ +import { ADD_ENTITY_FORM_NAMES } from '@shared/ui/settings-add-entity/model/constants.ts'; + +export interface AddEntityFormValues { + [ADD_ENTITY_FORM_NAMES.NAME]: string; + [ADD_ENTITY_FORM_NAMES.NAME]: string; +} + +export interface SettingsAddEntityProps { + buttonLabel: string; + submitButtonLabel: string; + isLoading?: boolean; + submitTrigger: (values: AddEntityFormValues) => void; +} diff --git a/console/ui/src/shared/ui/settings-add-entity/model/validation.ts b/console/ui/src/shared/ui/settings-add-entity/model/validation.ts new file mode 100644 index 000000000..f78a86fc7 --- /dev/null +++ b/console/ui/src/shared/ui/settings-add-entity/model/validation.ts @@ -0,0 +1,9 @@ +import { TFunction } from 'i18next'; +import * as yup from 'yup'; +import { ADD_ENTITY_FORM_NAMES } from '@shared/ui/settings-add-entity/model/constants.ts'; + +export const AddEntityFormSchema = (t: TFunction) => + yup.object({ + [ADD_ENTITY_FORM_NAMES.NAME]: yup.string().required(t('requiredField', { ns: 'validation' })), + [ADD_ENTITY_FORM_NAMES.DESCRIPTION]: yup.string(), + }); diff --git a/console/ui/src/shared/ui/settings-add-entity/ui/index.tsx b/console/ui/src/shared/ui/settings-add-entity/ui/index.tsx new file mode 100644 index 000000000..0a3c12713 --- /dev/null +++ b/console/ui/src/shared/ui/settings-add-entity/ui/index.tsx @@ -0,0 +1,120 @@ +import React, { FC, useState } from 'react'; +import { Button, Card, CircularProgress, Modal, Stack, TextField, Typography } from '@mui/material'; +import { Controller, useForm } from 'react-hook-form'; +import { LoadingButton } from '@mui/lab'; +import AddBoxOutlinedIcon from '@mui/icons-material/AddBoxOutlined'; +import { yupResolver } from '@hookform/resolvers/yup'; +import { AddEntityFormValues, SettingsAddEntityProps } from '@shared/ui/settings-add-entity/model/types.ts'; +import { AddEntityFormSchema } from '@shared/ui/settings-add-entity/model/validation.ts'; +import { ADD_ENTITY_FORM_NAMES } from '@shared/ui/settings-add-entity/model/constants.ts'; +import { handleRequestErrorCatch } from '@shared/lib/functions.ts'; +import { useTranslation } from 'react-i18next'; + +const SettingsAddEntity: FC = ({ + buttonLabel, + headerLabel, + submitButtonLabel, + nameLabel, + isLoading, + submitTrigger, +}) => { + const { t } = useTranslation(['settings', 'shared']); + const [isModalOpen, setIsModalOpen] = useState(false); + + const handleModalOpenState = (isOpen: boolean) => () => setIsModalOpen(isOpen); + + const { + control, + handleSubmit, + formState: { errors, isValid, isSubmitting }, + } = useForm({ + mode: 'all', + resolver: yupResolver(AddEntityFormSchema(t)), + }); + + const onSubmit = async (values: AddEntityFormValues) => { + try { + await submitTrigger(values); + setIsModalOpen(false); + } catch (e) { + handleRequestErrorCatch(e); + } + }; + + return ( + <> + + +
+ + + + {headerLabel ?? t('add', { ns: 'shared' })} + + + ( + + )} + /> + + + ( + + )} + /> + + } + loading={isSubmitting || isLoading}> + {submitButtonLabel ?? t('add')} + + + +
+
+ + ); +}; + +export default SettingsAddEntity; diff --git a/console/ui/src/shared/ui/slider-box/index.ts b/console/ui/src/shared/ui/slider-box/index.ts new file mode 100644 index 000000000..245befdf6 --- /dev/null +++ b/console/ui/src/shared/ui/slider-box/index.ts @@ -0,0 +1,3 @@ +import ClusterSliderBox from '@shared/ui/slider-box/ui'; + +export default ClusterSliderBox; diff --git a/console/ui/src/shared/ui/slider-box/lib/functions.ts b/console/ui/src/shared/ui/slider-box/lib/functions.ts new file mode 100644 index 000000000..3f9bb095e --- /dev/null +++ b/console/ui/src/shared/ui/slider-box/lib/functions.ts @@ -0,0 +1,17 @@ +import { GenerateMarkType, GenerateSliderMarksType } from '@shared/ui/slider-box/model/types.ts'; + +const generateMark: GenerateMarkType = (value, marksAdditionalLabel) => ({ + value, + label: `${value} ${marksAdditionalLabel}`, +}); + +export const generateSliderMarks: GenerateSliderMarksType = (min, max, amount, marksAdditionalLabel) => { + const step = (max - min) / (amount - 1); + const marksArray = []; + + for (let i = min; i < max + step; i += step) { + marksArray.push(generateMark(Math.trunc(i), marksAdditionalLabel)); + } + + return marksArray; +}; diff --git a/console/ui/src/shared/ui/slider-box/model/types.ts b/console/ui/src/shared/ui/slider-box/model/types.ts new file mode 100644 index 000000000..d8af2950e --- /dev/null +++ b/console/ui/src/shared/ui/slider-box/model/types.ts @@ -0,0 +1,29 @@ +import { ReactElement } from 'react'; + +export interface SliderBoxProps { + amount: number; + changeAmount: (...event: any[]) => void; + icon?: ReactElement; + unit?: string; + min: number; + max: number; + marks?: { label: unknown; value: unknown }[]; + marksAmount?: number; + marksAdditionalLabel?: string; + step?: number | null; + error?: object; + limitMin?: boolean; + limitMax?: boolean; +} + +export type GenerateMarkType = (value: number, marksAdditionalLabel: string) => { label: string; value: string }; + +export type GenerateSliderMarksType = ( + min: number, + max: number, + amount: number, + marksAdditionalLabel: string, +) => { + label: string; + value: string; +}[]; diff --git a/console/ui/src/shared/ui/slider-box/ui/index.tsx b/console/ui/src/shared/ui/slider-box/ui/index.tsx new file mode 100644 index 000000000..15bbb190b --- /dev/null +++ b/console/ui/src/shared/ui/slider-box/ui/index.tsx @@ -0,0 +1,67 @@ +import { FC } from 'react'; +import { Box, Slider, TextField, Typography } from '@mui/material'; +import { SliderBoxProps } from '@shared/ui/slider-box/model/types.ts'; + +import { generateSliderMarks } from '@shared/ui/slider-box/lib/functions.ts'; + +const ClusterSliderBox: FC = ({ + amount, + changeAmount, + unit, + icon, + min = 1, + max, + marks, + marksAmount, + marksAdditionalLabel = '', + step, + error, + limitMin = true, + limitMax, +}) => { + const onChange = (e) => { + const { value } = e.target; + + if (/^\d*$/.test(value)) changeAmount(value < min && limitMin ? min : value > max && limitMax ? max : value); + }; + + return ( + + + {icon} + + {unit} + + + + + + ); +}; + +export default ClusterSliderBox; diff --git a/console/ui/src/shared/ui/spinner/index.ts b/console/ui/src/shared/ui/spinner/index.ts new file mode 100644 index 000000000..14e55f442 --- /dev/null +++ b/console/ui/src/shared/ui/spinner/index.ts @@ -0,0 +1,3 @@ +import Spinner from '@shared/ui/spinner/ui'; + +export default Spinner; diff --git a/console/ui/src/shared/ui/spinner/ui/index.tsx b/console/ui/src/shared/ui/spinner/ui/index.tsx new file mode 100644 index 000000000..2e61ac1b4 --- /dev/null +++ b/console/ui/src/shared/ui/spinner/ui/index.tsx @@ -0,0 +1,12 @@ +import { FC } from 'react'; +import { CircularProgress, Stack } from '@mui/material'; + +const Spinner: FC = () => { + return ( + + + + ); +}; + +export default Spinner; diff --git a/console/ui/src/widgets/cluster-form/index.ts b/console/ui/src/widgets/cluster-form/index.ts new file mode 100644 index 000000000..24ad7a10d --- /dev/null +++ b/console/ui/src/widgets/cluster-form/index.ts @@ -0,0 +1,3 @@ +import ClusterForm from '@widgets/cluster-form/ui'; + +export default ClusterForm; diff --git a/console/ui/src/widgets/cluster-form/model/constants.ts b/console/ui/src/widgets/cluster-form/model/constants.ts new file mode 100644 index 000000000..3aeb2d1f0 --- /dev/null +++ b/console/ui/src/widgets/cluster-form/model/constants.ts @@ -0,0 +1,36 @@ +export const numberOfInstances = [1, 3, 7, 15, 32]; +export const dataDiskStorage = [10, 100, 500, 1000, 2000, 16000]; + +const CLUSTER_CLOUD_PROVIDER_FIELD_NAMES = Object.freeze({ + REGION: 'region', + REGION_CONFIG: 'regionConfig', + INSTANCE_TYPE: 'instanceType', + INSTANCE_CONFIG: 'instanceConfig', + INSTANCES_AMOUNT: 'instancesAmount', + STORAGE_AMOUNT: 'storageAmount', + SSH_PUBLIC_KEY: 'sshPublicKey', +}); + +const CLUSTER_LOCAL_MACHINE_FIELD_NAMES = Object.freeze({ + DATABASE_SERVERS: 'databaseServers', + HOSTNAME: 'hostname', + IP_ADDRESS: 'ipAddress', + LOCATION: 'location', + AUTHENTICATION_METHOD: 'authenticationMethod', + SECRET_KEY_NAME: 'secretKeyName', + AUTHENTICATION_IS_SAVE_TO_CONSOLE: 'authenticationSaveToConsole', + CLUSTER_VIP_ADDRESS: 'clusterVIPAddress', + IS_HAPROXY_LOAD_BALANCER: 'isHaproxyLoadBalancer', + IS_USE_DEFINED_SECRET: 'isUseDefinedSecret', +}); + +export const CLUSTER_FORM_FIELD_NAMES = Object.freeze({ + PROVIDER: 'provider', + ENVIRONMENT_ID: 'environment', + CLUSTER_NAME: 'clusterName', + DESCRIPTION: 'description', + POSTGRES_VERSION: 'postgresVersion', + SECRET_ID: 'secretId', + ...CLUSTER_CLOUD_PROVIDER_FIELD_NAMES, + ...CLUSTER_LOCAL_MACHINE_FIELD_NAMES, +}); diff --git a/console/ui/src/widgets/cluster-form/model/types.ts b/console/ui/src/widgets/cluster-form/model/types.ts new file mode 100644 index 000000000..f0f9afaa3 --- /dev/null +++ b/console/ui/src/widgets/cluster-form/model/types.ts @@ -0,0 +1,13 @@ +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; + +export interface ClusterFormRegionConfigBoxProps { + name: string; + place: string; + isActive: boolean; +} + +export interface ClusterDatabaseServer { + [CLUSTER_FORM_FIELD_NAMES.HOSTNAME]: string; + [CLUSTER_FORM_FIELD_NAMES.IP_ADDRESS]: string; + [CLUSTER_FORM_FIELD_NAMES.LOCATION]: string; +} diff --git a/console/ui/src/widgets/cluster-form/model/validation.ts b/console/ui/src/widgets/cluster-form/model/validation.ts new file mode 100644 index 000000000..4c7782d5e --- /dev/null +++ b/console/ui/src/widgets/cluster-form/model/validation.ts @@ -0,0 +1,203 @@ +import { TFunction } from 'i18next'; +import * as yup from 'yup'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import { PROVIDERS } from '@shared/config/constants.ts'; +import { AUTHENTICATION_METHODS } from '@shared/model/constants.ts'; +import ipRegex from 'ip-regex'; +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; + +const cloudFormSchema = (t: TFunction) => + yup.object({ + [CLUSTER_FORM_FIELD_NAMES.REGION]: yup + .mixed() + .when(CLUSTER_FORM_FIELD_NAMES.PROVIDER, ([provider], schema) => + provider?.code !== PROVIDERS.LOCAL ? yup.string().required() : schema.notRequired(), + ), + [CLUSTER_FORM_FIELD_NAMES.REGION_CONFIG]: yup + .mixed() + .when(CLUSTER_FORM_FIELD_NAMES.PROVIDER, ([provider], schema) => + provider?.code !== PROVIDERS.LOCAL + ? yup + .object({ + code: yup.string(), + location: yup.string(), + }) + .required() + : schema.notRequired(), + ), + [CLUSTER_FORM_FIELD_NAMES.INSTANCE_TYPE]: yup + .mixed() + .when(CLUSTER_FORM_FIELD_NAMES.PROVIDER, ([provider], schema) => + provider?.code !== PROVIDERS.LOCAL ? yup.string().required() : schema.notRequired(), + ), + [CLUSTER_FORM_FIELD_NAMES.INSTANCE_CONFIG]: yup + .mixed() + .when(CLUSTER_FORM_FIELD_NAMES.PROVIDER, ([provider], schema) => + provider?.code !== PROVIDERS.LOCAL + ? yup + .object({ + code: yup.string(), + cpu: yup.number(), + currency: yup.string(), + price_hourly: yup.number(), + price_monthly: yup.number(), + ram: yup.number(), + }) + .required() + : schema.notRequired(), + ), + [CLUSTER_FORM_FIELD_NAMES.INSTANCES_AMOUNT]: yup + .mixed() + .when(CLUSTER_FORM_FIELD_NAMES.PROVIDER, ([provider], schema) => + provider?.code !== PROVIDERS.LOCAL ? yup.number().required() : schema.notRequired(), + ), + [CLUSTER_FORM_FIELD_NAMES.STORAGE_AMOUNT]: yup + .mixed() + .when(CLUSTER_FORM_FIELD_NAMES.PROVIDER, ([provider], schema) => + provider?.code !== PROVIDERS.LOCAL ? yup.number().required() : schema.notRequired(), + ), + [CLUSTER_FORM_FIELD_NAMES.SSH_PUBLIC_KEY]: yup + .mixed() + .when(CLUSTER_FORM_FIELD_NAMES.PROVIDER, ([provider], schema) => + provider?.code !== PROVIDERS.LOCAL + ? yup.string().required(t('requiredField', { ns: 'validation' })) + : schema.notRequired(), + ), + }); + +export const localFormSchema = (t: TFunction) => + yup.object({ + [CLUSTER_FORM_FIELD_NAMES.DATABASE_SERVERS]: yup + .mixed() + .when(CLUSTER_FORM_FIELD_NAMES.PROVIDER, ([provider], schema) => + provider?.code === PROVIDERS.LOCAL + ? yup.array( + yup.object({ + [CLUSTER_FORM_FIELD_NAMES.HOSTNAME]: yup.string().required(t('requiredField', { ns: 'validation' })), + [CLUSTER_FORM_FIELD_NAMES.IP_ADDRESS]: yup + .string() + .required(t('requiredField', { ns: 'validation' })) + .test('should be a correct IP', t('shouldBeACorrectV4Ip', { ns: 'validation' }), (value) => + ipRegex.v4({ exact: true }).test(value), + ), + [CLUSTER_FORM_FIELD_NAMES.LOCATION]: yup.string(), + }), + ) + : schema.notRequired(), + ), + [CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_METHOD]: yup + .mixed() + .when(CLUSTER_FORM_FIELD_NAMES.PROVIDER, ([provider], schema) => + provider?.code === PROVIDERS.LOCAL ? yup.string().required() : schema.notRequired(), + ), + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SSH_PRIVATE_KEY]: yup + .mixed() + .when( + [CLUSTER_FORM_FIELD_NAMES.PROVIDER, CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_METHOD], + ([provider, authenticationMethod], schema) => + provider?.code === PROVIDERS.LOCAL && authenticationMethod === AUTHENTICATION_METHODS.SSH + ? yup + .mixed() + .when(CLUSTER_FORM_FIELD_NAMES.IS_USE_DEFINED_SECRET, ([isUseDefinedSecret], schema) => + !isUseDefinedSecret + ? yup.string().required(t('requiredField', { ns: 'validation' })) + : schema.notRequired(), + ) + : schema.notRequired(), + ), + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.USERNAME]: yup + .mixed() + .when( + [CLUSTER_FORM_FIELD_NAMES.PROVIDER, CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_METHOD], + ([provider, authenticationMethod], schema) => + provider?.code === PROVIDERS.LOCAL && authenticationMethod === AUTHENTICATION_METHODS.PASSWORD + ? yup + .mixed() + .when(CLUSTER_FORM_FIELD_NAMES.IS_USE_DEFINED_SECRET, ([isUseDefinedSecret], schema) => + !isUseDefinedSecret + ? yup.string().required(t('requiredField', { ns: 'validation' })) + : schema.notRequired(), + ) + : schema.notRequired(), + ), + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.PASSWORD]: yup + .mixed() + .when( + [CLUSTER_FORM_FIELD_NAMES.PROVIDER, CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_METHOD], + ([provider, authenticationMethod], schema) => + provider?.code === PROVIDERS.LOCAL && authenticationMethod === AUTHENTICATION_METHODS.PASSWORD + ? yup + .mixed() + .when(CLUSTER_FORM_FIELD_NAMES.IS_USE_DEFINED_SECRET, ([isUseDefinedSecret], schema) => + !isUseDefinedSecret + ? yup.string().required(t('requiredField', { ns: 'validation' })) + : schema.notRequired(), + ) + : schema.notRequired(), + ), + [CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_IS_SAVE_TO_CONSOLE]: yup + .mixed() + .when( + [CLUSTER_FORM_FIELD_NAMES.PROVIDER, CLUSTER_FORM_FIELD_NAMES.IS_USE_DEFINED_SECRET], + ([provider, isUseDefinedSecret], schema) => + provider?.code === PROVIDERS.LOCAL && !isUseDefinedSecret ? yup.boolean() : schema.notRequired(), + ), + [CLUSTER_FORM_FIELD_NAMES.SECRET_KEY_NAME]: yup + .mixed() + .when( + [CLUSTER_FORM_FIELD_NAMES.PROVIDER, CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_IS_SAVE_TO_CONSOLE], + ([provider, isSaveToConsole], schema) => + provider?.code === PROVIDERS.LOCAL && isSaveToConsole + ? yup.string().required(t('requiredField', { ns: 'validation' })) + : schema.notRequired(), + ), + [CLUSTER_FORM_FIELD_NAMES.CLUSTER_VIP_ADDRESS]: yup + .mixed() + .when(CLUSTER_FORM_FIELD_NAMES.PROVIDER, ([provider], schema) => + provider?.code === PROVIDERS.LOCAL + ? yup + .string() + .test( + 'should be a correct VIP address', + t('shouldBeACorrectV4Ip', { ns: 'validation' }), + (value) => !value || ipRegex.v4({ exact: true }).test(value), + ) + : schema.notRequired(), + ), + [CLUSTER_FORM_FIELD_NAMES.IS_HAPROXY_LOAD_BALANCER]: yup + .mixed() + .when(CLUSTER_FORM_FIELD_NAMES.PROVIDER, ([provider], schema) => + provider?.code === PROVIDERS.LOCAL ? yup.boolean() : schema.notRequired(), + ), + [CLUSTER_FORM_FIELD_NAMES.IS_USE_DEFINED_SECRET]: yup + .mixed() + .when([CLUSTER_FORM_FIELD_NAMES.PROVIDER], ([provider], schema) => + provider?.code === PROVIDERS.LOCAL ? yup.boolean().optional() : schema.notRequired(), + ), + [CLUSTER_FORM_FIELD_NAMES.SECRET_ID]: yup + .mixed() + .when( + [CLUSTER_FORM_FIELD_NAMES.PROVIDER, CLUSTER_FORM_FIELD_NAMES.IS_USE_DEFINED_SECRET], + ([provider, isUseDefinedSecret], schema) => + provider?.code === PROVIDERS.LOCAL && isUseDefinedSecret + ? yup.string().required(t('requiredField', { ns: 'validation' })) + : schema.notRequired(), + ), + }); + +export const ClusterFormSchema = (t: TFunction) => + yup + .object({ + [CLUSTER_FORM_FIELD_NAMES.PROVIDER]: yup.object().required(), + [CLUSTER_FORM_FIELD_NAMES.ENVIRONMENT_ID]: yup.number(), + [CLUSTER_FORM_FIELD_NAMES.CLUSTER_NAME]: yup + .string() + .test('clusters should have proper naming', t('clusterShouldHaveProperNaming', { ns: 'validation' }), (value) => + value.match(/^[a-z0-9][a-z0-9-]{0,23}$/g), + ) + .required(t('requiredField', { ns: 'validation' })), + [CLUSTER_FORM_FIELD_NAMES.DESCRIPTION]: yup.string(), + [CLUSTER_FORM_FIELD_NAMES.POSTGRES_VERSION]: yup.number().required(t('requiredField', { ns: 'validation' })), + }) + .concat(cloudFormSchema(t)) + .concat(localFormSchema(t)); diff --git a/console/ui/src/widgets/cluster-form/ui/ClusterFormCloudProviderFormPart.tsx b/console/ui/src/widgets/cluster-form/ui/ClusterFormCloudProviderFormPart.tsx new file mode 100644 index 000000000..32fe3e026 --- /dev/null +++ b/console/ui/src/widgets/cluster-form/ui/ClusterFormCloudProviderFormPart.tsx @@ -0,0 +1,18 @@ +import { FC } from 'react'; +import ClusterFormRegionBlock from '@entities/cluster-form-cloud-region-block'; +import ClusterFormInstancesBlock from '@entities/cluster-form-instances-block'; +import InstancesAmountBlock from '@entities/cluster-form-instances-amount-block'; +import StorageBlock from '@entities/storage-block'; +import ClusterFormSshKeyBlock from '@entities/ssh-key-block'; + +const ClusterFormCloudProviderFormPart: FC = () => ( + <> + + + + + + +); + +export default ClusterFormCloudProviderFormPart; diff --git a/console/ui/src/widgets/cluster-form/ui/ClusterFormLocalMachineFormPart.tsx b/console/ui/src/widgets/cluster-form/ui/ClusterFormLocalMachineFormPart.tsx new file mode 100644 index 000000000..5ae5dcc61 --- /dev/null +++ b/console/ui/src/widgets/cluster-form/ui/ClusterFormLocalMachineFormPart.tsx @@ -0,0 +1,16 @@ +import { FC } from 'react'; +import DatabaseServersBlock from '@entities/database-servers-block'; +import AuthenticationMethodFormBlock from '@entities/authentification-method-form-block'; +import VipAddressBlock from '@entities/vip-address-block'; +import LoadBalancersBlock from '@entities/load-balancers-block'; + +const ClusterFormLocalMachineFormPart: FC = () => ( + <> + + + + + +); + +export default ClusterFormLocalMachineFormPart; diff --git a/console/ui/src/widgets/cluster-form/ui/ClusterFormRegionConfigBox.tsx b/console/ui/src/widgets/cluster-form/ui/ClusterFormRegionConfigBox.tsx new file mode 100644 index 000000000..2dd7cdfd5 --- /dev/null +++ b/console/ui/src/widgets/cluster-form/ui/ClusterFormRegionConfigBox.tsx @@ -0,0 +1,22 @@ +import { FC } from 'react'; +import SelectableBox from '@shared/ui/selectable-box'; +import { Box, Typography } from '@mui/material'; +import FlagsIcon from '@assets/flagIcon.svg?react'; +import { ClusterFormRegionConfigBoxProps } from '@widgets/cluster-form/model/types.ts'; + +const ClusterFormRegionConfigBox: FC = ({ name, place, isActive, ...props }) => { + return ( + + + {name} + + + +   + {place} + + + ); +}; + +export default ClusterFormRegionConfigBox; diff --git a/console/ui/src/widgets/cluster-form/ui/index.tsx b/console/ui/src/widgets/cluster-form/ui/index.tsx new file mode 100644 index 000000000..ee8cdb662 --- /dev/null +++ b/console/ui/src/widgets/cluster-form/ui/index.tsx @@ -0,0 +1,210 @@ +import React, { useLayoutEffect, useRef, useState } from 'react'; +import ProvidersBlock from '@entities/providers-block'; +import ClusterFormEnvironmentBlock from '@entities/cluster-form-environment-block'; +import ClusterNameBox from '@entities/cluster-form-cluster-name-block'; +import ClusterDescriptionBlock from '@entities/cluster-description-block'; +import PostgresVersionBox from '@entities/postgres-version-block'; +import DefaultFormButtons from '@shared/ui/default-form-buttons'; +import { FormProvider, useForm } from 'react-hook-form'; +import { generateAbsoluteRouterPath, handleRequestErrorCatch } from '@shared/lib/functions.ts'; +import RouterPaths from '@app/router/routerPathsConfig'; +import { useTranslation } from 'react-i18next'; +import { useNavigate } from 'react-router-dom'; +import { useGetExternalDeploymentsQuery } from '@shared/api/api/deployments.ts'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import { PROVIDERS } from '@shared/config/constants.ts'; +import ClusterFormCloudProviderFormPart from '@widgets/cluster-form/ui/ClusterFormCloudProviderFormPart.tsx'; +import ClusterFormLocalMachineFormPart from '@widgets/cluster-form/ui/ClusterFormLocalMachineFormPart.tsx'; +import { useGetClustersDefaultNameQuery, usePostClustersMutation } from '@shared/api/api/clusters.ts'; +import { useAppSelector } from '@app/redux/store/hooks.ts'; +import { selectCurrentProject } from '@app/redux/slices/projectSlice/projectSelectors.ts'; +import { Stack } from '@mui/material'; +import { yupResolver } from '@hookform/resolvers/yup'; +import { ClusterFormSchema } from '@widgets/cluster-form/model/validation.ts'; +import ClusterSummary from '@widgets/cluster-summary'; +import ClusterSecretModal from '@features/cluster-secret-modal'; +import { useGetPostgresVersionsQuery } from '@shared/api/api/other.ts'; +import { useGetEnvironmentsQuery } from '@shared/api/api/environments.ts'; +import { mapFormValuesToRequestFields } from '@features/cluster-secret-modal/lib/functions.ts'; +import { toast } from 'react-toastify'; +import { AUTHENTICATION_METHODS } from '@shared/model/constants.ts'; +import { ClusterFormValues } from '@features/cluster-secret-modal/model/types.ts'; +import { useGetSecretsQuery, usePostSecretsMutation } from '@shared/api/api/secrets.ts'; +import { getSecretBodyFromValues } from '@entities/secret-form-block/lib/functions.ts'; +import { SECRET_MODAL_CONTENT_FORM_FIELD_NAMES } from '@entities/secret-form-block/model/constants.ts'; +import Spinner from '@shared/ui/spinner'; + +const ClusterForm: React.FC = () => { + const { t } = useTranslation(['clusters', 'validation', 'toasts']); + const navigate = useNavigate(); + const createSecretResultRef = useRef(null); // ref is used for case when user saves secret and uses its ID to create cluster + + const [isResetting, setIsResetting] = useState(false); + + const currentProject = useAppSelector(selectCurrentProject); + + const [addSecretTrigger, addSecretTriggerState] = usePostSecretsMutation(); + const [addClusterTrigger, addClusterTriggerState] = usePostClustersMutation(); + + const deployments = useGetExternalDeploymentsQuery({ offset: 0, limit: 999_999_999 }); + const environments = useGetEnvironmentsQuery({ offset: 0, limit: 999_999_999 }); + const postgresVersions = useGetPostgresVersionsQuery(); + const clusterName = useGetClustersDefaultNameQuery(); + + const methods = useForm({ + mode: 'all', + resolver: yupResolver(ClusterFormSchema(t)), + defaultValues: { + [CLUSTER_FORM_FIELD_NAMES.INSTANCES_AMOUNT]: 3, + [CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_METHOD]: AUTHENTICATION_METHODS.SSH, + [CLUSTER_FORM_FIELD_NAMES.IS_USE_DEFINED_SECRET]: false, + [CLUSTER_FORM_FIELD_NAMES.SECRET_ID]: '', + [CLUSTER_FORM_FIELD_NAMES.DATABASE_SERVERS]: Array(3) + .fill(0) + .map(() => ({ + [CLUSTER_FORM_FIELD_NAMES.HOSTNAME]: '', + [CLUSTER_FORM_FIELD_NAMES.IP_ADDRESS]: '', + [CLUSTER_FORM_FIELD_NAMES.LOCATION]: '', + })), + }, + }); + + const watchProvider = methods.watch(CLUSTER_FORM_FIELD_NAMES.PROVIDER); + + const secrets = useGetSecretsQuery({ type: watchProvider?.code, projectId: currentProject }); + + useLayoutEffect(() => { + if (deployments.isFetching || postgresVersions.isFetching || environments.isFetching || clusterName.isFetching) + setIsResetting(true); + if (deployments.data?.data && postgresVersions.data?.data && environments.data?.data && clusterName.data) { + // eslint-disable-next-line @typescript-eslint/require-await + const resetForm = async () => { + // sync function will result in form values setting error + const providers = deployments.data.data; + methods.reset((values) => ({ + ...values, + [CLUSTER_FORM_FIELD_NAMES.PROVIDER]: providers?.[0], + [CLUSTER_FORM_FIELD_NAMES.REGION]: providers?.[0]?.cloud_regions[0]?.code, + [CLUSTER_FORM_FIELD_NAMES.REGION_CONFIG]: providers?.[0]?.cloud_regions[0]?.datacenters?.[0], + [CLUSTER_FORM_FIELD_NAMES.INSTANCE_TYPE]: 'small', + [CLUSTER_FORM_FIELD_NAMES.INSTANCE_CONFIG]: providers?.[0]?.instance_types?.small?.[0], + [CLUSTER_FORM_FIELD_NAMES.STORAGE_AMOUNT]: 100, + [CLUSTER_FORM_FIELD_NAMES.POSTGRES_VERSION]: postgresVersions.data?.data?.at(-1)?.major_version, + [CLUSTER_FORM_FIELD_NAMES.ENVIRONMENT_ID]: environments.data?.data?.[0]?.id, + [CLUSTER_FORM_FIELD_NAMES.CLUSTER_NAME]: clusterName.data?.name ?? 'postgres-cluster', + })); + }; + void resetForm().then(() => setIsResetting(false)); + } + }, [deployments.data?.data, postgresVersions.data?.data, environments.data?.data, clusterName.data, methods]); + + const submitLocalCluster = async (values: ClusterFormValues) => { + if (values[CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_IS_SAVE_TO_CONSOLE] && !createSecretResultRef?.current) { + createSecretResultRef.current = await addSecretTrigger({ + requestSecretCreate: { + project_id: Number(currentProject), + type: values[CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_METHOD], + name: values[CLUSTER_FORM_FIELD_NAMES.SECRET_KEY_NAME], + value: getSecretBodyFromValues({ + ...values, + [SECRET_MODAL_CONTENT_FORM_FIELD_NAMES.SECRET_TYPE]: values[CLUSTER_FORM_FIELD_NAMES.AUTHENTICATION_METHOD], + }), + }, + }).unwrap(); + toast.success( + t('secretSuccessfullyCreated', { + ns: 'toasts', + secretName: values[CLUSTER_FORM_FIELD_NAMES.SECRET_KEY_NAME], + }), + ); + } + await addClusterTrigger({ + requestClusterCreate: mapFormValuesToRequestFields({ + values, + secretId: createSecretResultRef.current?.id ?? Number(values[CLUSTER_FORM_FIELD_NAMES.SECRET_ID]), + projectId: Number(currentProject), + }), + }).unwrap(); + toast.info( + t('clusterSuccessfullyCreated', { + ns: 'toasts', + clusterName: values[CLUSTER_FORM_FIELD_NAMES.CLUSTER_NAME], + }), + ); + }; + + const submitCloudCluster = async (values: ClusterFormValues) => { + await addClusterTrigger({ + requestClusterCreate: mapFormValuesToRequestFields({ + values, + secretId: secrets?.data?.data?.[0]?.id, + projectId: Number(currentProject), + }), + }).unwrap(); + toast.info( + t('clusterSuccessfullyCreated', { + ns: 'toasts', + clusterName: values[CLUSTER_FORM_FIELD_NAMES.CLUSTER_NAME], + }), + ); + }; + + const onSubmit = async (values: ClusterFormValues) => { + try { + values[CLUSTER_FORM_FIELD_NAMES.PROVIDER].code === PROVIDERS.LOCAL + ? await submitLocalCluster(values) + : await submitCloudCluster(values); + navigate(generateAbsoluteRouterPath(RouterPaths.clusters.absolutePath)); + } catch (e) { + handleRequestErrorCatch(e); + } + }; + + const cancelHandler = () => navigate(generateAbsoluteRouterPath(RouterPaths.clusters.absolutePath)); + + const { isValid, isSubmitting } = methods.formState; // spreading is required by React Hook Form to ensure correct form state + + return isResetting || deployments.isFetching || postgresVersions.isFetching || environments.isFetching ? ( + + ) : ( + + + +
+ + + {watchProvider?.code === PROVIDERS.LOCAL ? ( + + ) : ( + + )} + + + + + {watchProvider?.code !== PROVIDERS.LOCAL && secrets?.data?.data?.length !== 1 ? ( + + ) : ( + + )} + +
+ +
+
+
+ ); +}; + +export default ClusterForm; diff --git a/console/ui/src/widgets/cluster-overview-table/index.ts b/console/ui/src/widgets/cluster-overview-table/index.ts new file mode 100644 index 000000000..80eaaa422 --- /dev/null +++ b/console/ui/src/widgets/cluster-overview-table/index.ts @@ -0,0 +1,3 @@ +import ClusterOverviewTable from '@widgets/cluster-overview-table/ui'; + +export default ClusterOverviewTable; diff --git a/console/ui/src/widgets/cluster-overview-table/lib/hooks.tsx b/console/ui/src/widgets/cluster-overview-table/lib/hooks.tsx new file mode 100644 index 000000000..55557b8d8 --- /dev/null +++ b/console/ui/src/widgets/cluster-overview-table/lib/hooks.tsx @@ -0,0 +1,28 @@ +import { useMemo } from 'react'; +import { CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES } from '@widgets/cluster-overview-table/model/constants.ts'; +import { ClusterInfoInstance } from '@shared/api/api/clusters.ts'; +import { Box, Chip } from '@mui/material'; + +export const useGetOverviewClusterTableData = (data: ClusterInfoInstance[]) => { + return useMemo( + () => + data?.map((item) => ({ + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.NAME]: item?.name, + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.HOST]: item?.ip, + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.ROLE]: item?.role, + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.STATE]: item?.status, + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.TIMELINE]: item?.timeline, + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.LAG_IN_MB]: item?.lag, + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.TAGS]: item?.tags && ( + + {Object.entries(item.tags).map(([key, value]) => ( + + ))} + + ), + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.PENDING_RESTART]: String(item?.pending_restart), + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.ID]: item?.id, + })) ?? [], + [data], + ); +}; diff --git a/console/ui/src/widgets/cluster-overview-table/model/constants.ts b/console/ui/src/widgets/cluster-overview-table/model/constants.ts new file mode 100644 index 000000000..ae6fa0719 --- /dev/null +++ b/console/ui/src/widgets/cluster-overview-table/model/constants.ts @@ -0,0 +1,49 @@ +import { createMRTColumnHelper } from 'material-react-table'; +import { TFunction } from 'i18next'; +import { ClusterOverviewTableValues } from '@widgets/cluster-overview-table/model/types.ts'; + +export const CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES = Object.freeze({ + NAME: 'name', + HOST: 'host', + ROLE: 'role', + STATE: 'state', + TIMELINE: 'timeline', + LAG_IN_MB: 'lagInMb', + PENDING_RESTART: 'pendingRestart', + TAGS: 'tags', + ID: 'id', +}); + +const columnHelper = createMRTColumnHelper(); + +export const clusterOverviewTableColumns = (t: TFunction) => [ + columnHelper.accessor(CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.NAME, { + header: t('name', { ns: 'shared' }), + }), + columnHelper.accessor(CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.HOST, { + header: t('host', { ns: 'clusters' }), + size: 70, + }), + columnHelper.accessor(CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.ROLE, { + header: t('role', { ns: 'clusters' }), + size: 120, + }), + columnHelper.accessor(CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.STATE, { + header: t('state', { ns: 'clusters' }), + size: 110, + }), + columnHelper.accessor(CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.TIMELINE, { + header: t('timeline', { ns: 'clusters' }), + size: 80, + }), + columnHelper.accessor(CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.LAG_IN_MB, { + header: t('lagInMb', { ns: 'clusters' }), + size: 140, + }), + columnHelper.accessor(CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.PENDING_RESTART, { + header: t('pendingRestart', { ns: 'clusters' }), + }), + columnHelper.accessor(CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.TAGS, { + header: t('tags', { ns: 'clusters' }), + }), +]; diff --git a/console/ui/src/widgets/cluster-overview-table/model/types.ts b/console/ui/src/widgets/cluster-overview-table/model/types.ts new file mode 100644 index 000000000..4e080da08 --- /dev/null +++ b/console/ui/src/widgets/cluster-overview-table/model/types.ts @@ -0,0 +1,19 @@ +import { CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES } from '@widgets/cluster-overview-table/model/constants.ts'; +import { ClusterInfoInstance } from '@shared/api/api/clusters.ts'; + +export interface ClusterOverviewTableProps { + clusterName?: string; + items?: ClusterInfoInstance[]; + isLoading?: boolean; +} + +export interface ClusterOverviewTableValues { + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.NAME]: string; + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.HOST]: string; + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.ROLE]: string; + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.STATE]: string; + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.TIMELINE]: number; + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.LAG_IN_MB]: number; + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.PENDING_RESTART]: string; + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.TAGS]: string; +} diff --git a/console/ui/src/widgets/cluster-overview-table/ui/ClustersOverviewTableButtons.tsx b/console/ui/src/widgets/cluster-overview-table/ui/ClustersOverviewTableButtons.tsx new file mode 100644 index 000000000..ea404c556 --- /dev/null +++ b/console/ui/src/widgets/cluster-overview-table/ui/ClustersOverviewTableButtons.tsx @@ -0,0 +1,27 @@ +import { FC } from 'react'; +import { Button, Stack } from '@mui/material'; +import RefreshIcon from '@mui/icons-material/Refresh'; +import { useTranslation } from 'react-i18next'; +import { usePostClustersByIdRefreshMutation } from '@shared/api/api/clusters.ts'; +import { useParams } from 'react-router-dom'; + +const ClustersOverviewTableButtons: FC = () => { + const { t } = useTranslation('shared'); + const { clusterId } = useParams(); + + const [refreshClusterTrigger] = usePostClustersByIdRefreshMutation(); + + const handleRefresh = async () => { + await refreshClusterTrigger({ id: Number(clusterId) }); + }; + + return ( + + + + ); +}; + +export default ClustersOverviewTableButtons; diff --git a/console/ui/src/widgets/cluster-overview-table/ui/index.tsx b/console/ui/src/widgets/cluster-overview-table/ui/index.tsx new file mode 100644 index 000000000..1af8b8489 --- /dev/null +++ b/console/ui/src/widgets/cluster-overview-table/ui/index.tsx @@ -0,0 +1,56 @@ +import { FC, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { MRT_ColumnDef, MRT_RowData, MRT_TableOptions } from 'material-react-table'; +import { ClusterOverviewTableProps } from '@widgets/cluster-overview-table/model/types.ts'; +import { + CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES, + clusterOverviewTableColumns, +} from '@widgets/cluster-overview-table/model/constants.ts'; +import ClustersOverviewTableButtons from '@widgets/cluster-overview-table/ui/ClustersOverviewTableButtons.tsx'; +import { useGetOverviewClusterTableData } from '@widgets/cluster-overview-table/lib/hooks.tsx'; +import { ClusterInfo } from '@shared/api/api/clusters.ts'; +import DefaultTable from '@shared/ui/default-table'; +import { Stack, Typography } from '@mui/material'; +import ClustersOverviewTableRowActions from '@features/clusters-overview-table-row-actions'; + +const ClusterOverviewTable: FC = ({ clusterName = '', items, isLoading }) => { + const { t, i18n } = useTranslation('clusters'); + + const columns = useMemo[]>(() => clusterOverviewTableColumns(t), [i18n.language]); + + const data = useGetOverviewClusterTableData(items); + + const tableConfig: MRT_TableOptions = { + columns, + data, + enablePagination: false, + enableRowActions: true, + showGlobalFilter: false, + state: { + isLoading, + }, + initialState: { + columnVisibility: { + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.PENDING_RESTART]: false, + [CLUSTER_OVERVIEW_TABLE_COLUMN_NAMES.TAGS]: false, + }, + }, + renderRowActionMenuItems: ({ row, closeMenu }) => ( + + ), + }; + + return ( + <> + + + {t('cluster')}: {clusterName} + + + + + + ); +}; + +export default ClusterOverviewTable; diff --git a/console/ui/src/widgets/cluster-summary/assets/awsIcon.svg b/console/ui/src/widgets/cluster-summary/assets/awsIcon.svg new file mode 100644 index 000000000..3dec8e73f --- /dev/null +++ b/console/ui/src/widgets/cluster-summary/assets/awsIcon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/widgets/cluster-summary/assets/azureIcon.svg b/console/ui/src/widgets/cluster-summary/assets/azureIcon.svg new file mode 100644 index 000000000..e24e2189c --- /dev/null +++ b/console/ui/src/widgets/cluster-summary/assets/azureIcon.svg @@ -0,0 +1,27 @@ + + + Azure + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/console/ui/src/widgets/cluster-summary/assets/digitaloceanIcon.svg b/console/ui/src/widgets/cluster-summary/assets/digitaloceanIcon.svg new file mode 100644 index 000000000..5a81f2481 --- /dev/null +++ b/console/ui/src/widgets/cluster-summary/assets/digitaloceanIcon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/widgets/cluster-summary/assets/gcpIcon.svg b/console/ui/src/widgets/cluster-summary/assets/gcpIcon.svg new file mode 100644 index 000000000..92da9d7cc --- /dev/null +++ b/console/ui/src/widgets/cluster-summary/assets/gcpIcon.svg @@ -0,0 +1,23 @@ + + + + + + + + + + + diff --git a/console/ui/src/widgets/cluster-summary/assets/hetznerIcon.svg b/console/ui/src/widgets/cluster-summary/assets/hetznerIcon.svg new file mode 100644 index 000000000..73b15fff7 --- /dev/null +++ b/console/ui/src/widgets/cluster-summary/assets/hetznerIcon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/widgets/cluster-summary/assets/hetznerIcon2.svg b/console/ui/src/widgets/cluster-summary/assets/hetznerIcon2.svg new file mode 100644 index 000000000..c8c8ad682 --- /dev/null +++ b/console/ui/src/widgets/cluster-summary/assets/hetznerIcon2.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/widgets/cluster-summary/index.ts b/console/ui/src/widgets/cluster-summary/index.ts new file mode 100644 index 000000000..1c5f9406f --- /dev/null +++ b/console/ui/src/widgets/cluster-summary/index.ts @@ -0,0 +1,3 @@ +import ClusterSummary from '@widgets/cluster-summary/ui'; + +export default ClusterSummary; diff --git a/console/ui/src/widgets/cluster-summary/lib/hooks.tsx b/console/ui/src/widgets/cluster-summary/lib/hooks.tsx new file mode 100644 index 000000000..9ecacc2c8 --- /dev/null +++ b/console/ui/src/widgets/cluster-summary/lib/hooks.tsx @@ -0,0 +1,205 @@ +import { Icon, Link, Stack, Typography } from '@mui/material'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import { Trans, useTranslation } from 'react-i18next'; +import { providerNamePricingListMap } from '@widgets/cluster-summary/model/constants.ts'; +import RamIcon from '@shared/assets/ramIcon.svg?react'; +import InstanceIcon from '@shared/assets/instanceIcon.svg?react'; +import StorageIcon from '@shared/assets/storageIcon.svg?react'; +import LanIcon from '@shared/assets/lanIcon.svg?react'; +import FlagIcon from '@shared/assets/flagIcon.svg?react'; +import CheckIcon from '@shared/assets/checkIcon.svg?react'; +import CpuIcon from '@shared/assets/cpuIcon.svg?react'; +import WarningAmberOutlinedIcon from '@mui/icons-material/WarningAmberOutlined'; +import { + CloudProviderClustersSummary, + LocalClustersSummary, + UseGetSummaryConfigProps, +} from '@widgets/cluster-summary/model/types.ts'; + +const useGetCloudProviderConfig = () => { + const { t } = useTranslation(['clusters', 'shared']); + + return (data: CloudProviderClustersSummary) => { + const defaultVolume = data[CLUSTER_FORM_FIELD_NAMES.PROVIDER]?.volumes?.find((volume) => volume?.is_default) ?? {}; + + return [ + { + title: t('name'), + children: {data[CLUSTER_FORM_FIELD_NAMES.CLUSTER_NAME]}, + }, + { + title: t('postgresVersion'), + children: {data[CLUSTER_FORM_FIELD_NAMES.POSTGRES_VERSION]}, + }, + { + title: t('cloud'), + children: ( + + + {data[CLUSTER_FORM_FIELD_NAMES.PROVIDER]?.description?.[0]} + + {data[CLUSTER_FORM_FIELD_NAMES.PROVIDER]?.description} + + ), + }, + { + title: t('region'), + children: ( + + {data[CLUSTER_FORM_FIELD_NAMES.REGION_CONFIG]?.code} + + + {data[CLUSTER_FORM_FIELD_NAMES.REGION_CONFIG]?.location} + + + ), + }, + { + title: t('instanceType'), + children: ( + + {data[CLUSTER_FORM_FIELD_NAMES.INSTANCE_CONFIG]?.code} + + + + {data[CLUSTER_FORM_FIELD_NAMES.INSTANCE_CONFIG]?.cpu} CPU + + + + {data[CLUSTER_FORM_FIELD_NAMES.INSTANCE_CONFIG]?.ram} RAM + + + + ), + }, + { + title: t('numberOfInstances'), + children: ( + + + {data[CLUSTER_FORM_FIELD_NAMES.INSTANCES_AMOUNT]} + + ), + }, + { + title: t('dataDiskStorage'), + children: ( + + + {data[CLUSTER_FORM_FIELD_NAMES.STORAGE_AMOUNT]} GB + + ), + }, + { + title: `${t('estimatedMonthlyPrice')}*`, + children: ( + <> + + ~ + {`${data[CLUSTER_FORM_FIELD_NAMES.INSTANCE_CONFIG]?.currency}${( + data[CLUSTER_FORM_FIELD_NAMES.INSTANCE_CONFIG]?.price_monthly * + data[CLUSTER_FORM_FIELD_NAMES.INSTANCES_AMOUNT] + + defaultVolume?.price_monthly * + data[CLUSTER_FORM_FIELD_NAMES.STORAGE_AMOUNT] * + data[CLUSTER_FORM_FIELD_NAMES.INSTANCES_AMOUNT] + )?.toFixed(2)}/${t('month', { ns: 'shared' })}`} + + + + ~ + {`${data[CLUSTER_FORM_FIELD_NAMES.INSTANCE_CONFIG]?.currency}${data[ + CLUSTER_FORM_FIELD_NAMES.INSTANCE_CONFIG + ]?.price_monthly.toFixed(2)}/${t('perServer', { ns: 'clusters' })}`} + , ~ + {`${defaultVolume?.currency}${( + defaultVolume?.price_monthly * data[CLUSTER_FORM_FIELD_NAMES.STORAGE_AMOUNT] + )?.toFixed(2)}/${t('perDisk', { ns: 'clusters' })}`} + + + + + + + + + ), + }, + ]; + }; +}; + +const useGetLocalMachineConfig = () => { + const { t } = useTranslation(['clusters', 'shared']); + + return (data: LocalClustersSummary) => [ + { + title: t('name'), + children: {data[CLUSTER_FORM_FIELD_NAMES.CLUSTER_NAME]}, + }, + { + title: t('postgresVersion'), + children: {data[CLUSTER_FORM_FIELD_NAMES.POSTGRES_VERSION]}, + }, + { + title: t('numberOfServers'), + children: ( + + + {data[CLUSTER_FORM_FIELD_NAMES.DATABASE_SERVERS]?.length} + + ), + }, + { + title: t('loadBalancing'), + children: ( + + + + {data[CLUSTER_FORM_FIELD_NAMES.IS_HAPROXY_LOAD_BALANCER] + ? t('on', { ns: 'shared' }) + : t('off', { ns: 'shared' })} + + + ), + }, + { + title: t('highAvailability'), + children: ( + + + {data[CLUSTER_FORM_FIELD_NAMES.DATABASE_SERVERS]?.length >= 3 ? ( + + ) : ( + + )} + + {data[CLUSTER_FORM_FIELD_NAMES.DATABASE_SERVERS]?.length >= 3 + ? t('on', { ns: 'shared' }) + : t('off', { ns: 'shared' })} + + + + {t('highAvailabilityInfo')} + + + ), + }, + ]; +}; + +export const useGetSummaryConfig = ({ isCloudProvider, data }: UseGetSummaryConfigProps) => { + const cloudProviderConfig = useGetCloudProviderConfig(); + const localProviderConfig = useGetLocalMachineConfig(); + + return isCloudProvider + ? cloudProviderConfig(data as CloudProviderClustersSummary) + : localProviderConfig(data as LocalClustersSummary); +}; diff --git a/console/ui/src/widgets/cluster-summary/model/constants.ts b/console/ui/src/widgets/cluster-summary/model/constants.ts new file mode 100644 index 000000000..e69114190 --- /dev/null +++ b/console/ui/src/widgets/cluster-summary/model/constants.ts @@ -0,0 +1,22 @@ +import { PROVIDERS } from '@shared/config/constants.ts'; +import AWSIcon from '@widgets/cluster-summary/assets/awsIcon.svg'; +import GCPIcon from '@widgets/cluster-summary/assets/gcpIcon.svg'; +import AzureIcon from '@widgets/cluster-summary/assets/azureIcon.svg'; +import DigitalOceanIcon from '@widgets/cluster-summary/assets/digitaloceanIcon.svg'; +import HetznerIcon from '@widgets/cluster-summary/assets/hetznerIcon2.svg'; + +export const providerNamePricingListMap = Object.freeze({ + [PROVIDERS.AWS]: 'https://aws.amazon.com/ec2/pricing/on-demand/', + [PROVIDERS.GCP]: 'https://cloud.google.com/compute/vm-instance-pricing/#general-purpose_machine_type_family', + [PROVIDERS.AZURE]: 'https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/', + [PROVIDERS.DIGITAL_OCEAN]: 'https://www.digitalocean.com/pricing/droplets/', + [PROVIDERS.HETZNER]: 'https://www.hetzner.com/cloud/', +}); + +export const clusterSummaryNameIconProvidersMap = Object.freeze({ + [PROVIDERS.AWS]: AWSIcon, + [PROVIDERS.GCP]: GCPIcon, + [PROVIDERS.AZURE]: AzureIcon, + [PROVIDERS.DIGITAL_OCEAN]: DigitalOceanIcon, + [PROVIDERS.HETZNER]: HetznerIcon, +}); diff --git a/console/ui/src/widgets/cluster-summary/model/types.ts b/console/ui/src/widgets/cluster-summary/model/types.ts new file mode 100644 index 000000000..dfb8a898f --- /dev/null +++ b/console/ui/src/widgets/cluster-summary/model/types.ts @@ -0,0 +1,37 @@ +import { ReactElement } from 'react'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; + +export interface SharedClusterSummaryProps { + [CLUSTER_FORM_FIELD_NAMES.CLUSTER_NAME]: string; + [CLUSTER_FORM_FIELD_NAMES.POSTGRES_VERSION]: number; +} + +export interface CloudProviderClustersSummary extends SharedClusterSummaryProps { + [CLUSTER_FORM_FIELD_NAMES.PROVIDER]: { + icon: ReactElement; + name: string; + }; + [CLUSTER_FORM_FIELD_NAMES.REGION_CONFIG]: { + name: string; + place: string; + }; + [CLUSTER_FORM_FIELD_NAMES.INSTANCE_CONFIG]: { + name: string; + cpu: number; + ram: number; + }; + [CLUSTER_FORM_FIELD_NAMES.INSTANCES_AMOUNT]: number; + [CLUSTER_FORM_FIELD_NAMES.STORAGE_AMOUNT]: number; + [CLUSTER_FORM_FIELD_NAMES.INSTANCE_CONFIG]: number; + [CLUSTER_FORM_FIELD_NAMES.INSTANCES_AMOUNT]: number; +} + +export interface LocalClustersSummary extends SharedClusterSummaryProps { + [CLUSTER_FORM_FIELD_NAMES.DATABASE_SERVERS]: number; + [CLUSTER_FORM_FIELD_NAMES.IS_HAPROXY_LOAD_BALANCER]: boolean; +} + +export interface UseGetSummaryConfigProps { + isCloudProvider: boolean; + data: CloudProviderClustersSummary | LocalClustersSummary; +} diff --git a/console/ui/src/widgets/cluster-summary/ui/index.tsx b/console/ui/src/widgets/cluster-summary/ui/index.tsx new file mode 100644 index 000000000..06d83508b --- /dev/null +++ b/console/ui/src/widgets/cluster-summary/ui/index.tsx @@ -0,0 +1,50 @@ +import { FC } from 'react'; +import { Divider, Paper, Typography } from '@mui/material'; +import { useTranslation } from 'react-i18next'; +import { CLUSTER_FORM_FIELD_NAMES } from '@widgets/cluster-form/model/constants.ts'; +import { useFormContext, useWatch } from 'react-hook-form'; +import { useGetSummaryConfig } from '@widgets/cluster-summary/lib/hooks.tsx'; +import { PROVIDERS } from '@shared/config/constants.ts'; +import InfoCardBody from '@shared/ui/info-card-body'; +import { clusterSummaryNameIconProvidersMap } from '@widgets/cluster-summary/model/constants.ts'; + +const ClusterSummary: FC = () => { + const { t } = useTranslation(['clusters', 'shared']); + const { control } = useFormContext(); + + const watchValues = useWatch({ + control, + }); + + const config = useGetSummaryConfig({ + isCloudProvider: watchValues[CLUSTER_FORM_FIELD_NAMES.PROVIDER]?.code !== PROVIDERS.LOCAL, + data: { + ...watchValues, + [CLUSTER_FORM_FIELD_NAMES.PROVIDER]: { + ...watchValues[CLUSTER_FORM_FIELD_NAMES.PROVIDER], + icon: clusterSummaryNameIconProvidersMap[watchValues[CLUSTER_FORM_FIELD_NAMES.PROVIDER]?.code], + }, + }, + }); + + return ( + + + {t('summary')} + + + + + ); +}; + +export default ClusterSummary; diff --git a/console/ui/src/widgets/clusters-table/assets/correctIcon.svg b/console/ui/src/widgets/clusters-table/assets/correctIcon.svg new file mode 100644 index 000000000..17486953f --- /dev/null +++ b/console/ui/src/widgets/clusters-table/assets/correctIcon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/widgets/clusters-table/assets/errorIcon.svg b/console/ui/src/widgets/clusters-table/assets/errorIcon.svg new file mode 100644 index 000000000..74c4d4d62 --- /dev/null +++ b/console/ui/src/widgets/clusters-table/assets/errorIcon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/widgets/clusters-table/assets/noClustersIcon.svg b/console/ui/src/widgets/clusters-table/assets/noClustersIcon.svg new file mode 100644 index 000000000..9eb95a45e --- /dev/null +++ b/console/ui/src/widgets/clusters-table/assets/noClustersIcon.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/console/ui/src/widgets/clusters-table/assets/warningIcon.svg b/console/ui/src/widgets/clusters-table/assets/warningIcon.svg new file mode 100644 index 000000000..19c88a48c --- /dev/null +++ b/console/ui/src/widgets/clusters-table/assets/warningIcon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/console/ui/src/widgets/clusters-table/index.ts b/console/ui/src/widgets/clusters-table/index.ts new file mode 100644 index 000000000..bd1d451f8 --- /dev/null +++ b/console/ui/src/widgets/clusters-table/index.ts @@ -0,0 +1,3 @@ +import ClustersTable from '@widgets/clusters-table/ui'; + +export default ClustersTable; diff --git a/console/ui/src/widgets/clusters-table/lib/functions.ts b/console/ui/src/widgets/clusters-table/lib/functions.ts new file mode 100644 index 000000000..07e2db023 --- /dev/null +++ b/console/ui/src/widgets/clusters-table/lib/functions.ts @@ -0,0 +1,82 @@ +import Index from '@app/router/routerPathsConfig'; +import { convertTimestampToReadableTime, generateAbsoluteRouterPath } from '@shared/lib/functions.ts'; +import { NavigateFunction } from 'react-router-dom'; +import { createMRTColumnHelper } from 'material-react-table'; +import { ClustersTableValues } from '@widgets/clusters-table/model/types.ts'; +import { RankingInfo, rankings, rankItem } from '@tanstack/match-sorter-utils'; +import { CLUSTER_STATUSES, CLUSTER_TABLE_COLUMN_NAMES } from '@widgets/clusters-table/model/constants.ts'; + +export const createClusterButtonHandler = (navigate: NavigateFunction) => () => + navigate(generateAbsoluteRouterPath(Index.clusters.add.absolutePath)); + +const columnHelper = createMRTColumnHelper(); + +export const getClusterTableColumns = ({ t, environmentOptions, postgresVersionOptions }) => [ + // note: changing table cell items content might need new custom filter function + columnHelper.accessor(CLUSTER_TABLE_COLUMN_NAMES.NAME, { + header: t('clusterName', { ns: 'clusters' }), + filterFn: (row: Row, id: string, filterValue: string | number, addMeta: (item: RankingInfo) => void) => { + // custom filter callback because of ReactNode as values + const itemRank = rankItem(row.getValue(CLUSTER_TABLE_COLUMN_NAMES.NAME).props.children, filterValue as string, { + threshold: rankings.MATCHES, + }); + addMeta(itemRank); + return itemRank.passed; + }, + size: 150, + enableHiding: false, + grow: true, + visibleInShowHideMenu: false, + }), + columnHelper.accessor(CLUSTER_TABLE_COLUMN_NAMES.STATUS, { + header: t('status', { ns: 'shared' }), + size: 120, + filterVariant: 'select', + filterSelectOptions: Object.values(CLUSTER_STATUSES), + filterFn: (row: Row, id: string, filterValue: string | number, addMeta: (item: RankingInfo) => void) => { + const itemRank = rankItem( + row.getValue(CLUSTER_TABLE_COLUMN_NAMES.STATUS).props.children[1].props.children, // custom filter callback because of ReactNode as values + filterValue as string, + { + threshold: rankings.MATCHES, + }, + ); + addMeta(itemRank); + return itemRank.passed; + }, + grow: true, + }), + columnHelper.accessor(CLUSTER_TABLE_COLUMN_NAMES.CREATION_TIME, { + accessorFn: (originalRow) => new Date(originalRow[CLUSTER_TABLE_COLUMN_NAMES.CREATION_TIME]), //convert to date for sorting and filtering + header: t('creationTime', { ns: 'clusters' }), + size: 260, + filterVariant: 'date-range', + grow: true, + muiFilterTextFieldProps: { sx: { display: 'flex', flexDirection: 'column' } }, + muiFilterDatePickerProps: { sx: { display: 'flex', flexDirection: 'column' }, size: 'small' }, + Cell: ({ cell }) => convertTimestampToReadableTime(cell.getValue()), // convert back to string to display + }), + columnHelper.accessor(CLUSTER_TABLE_COLUMN_NAMES.ENVIRONMENT, { + header: t('environment', { ns: 'shared' }), + size: 140, + filterVariant: 'select', + filterSelectOptions: environmentOptions, + grow: true, + }), + columnHelper.accessor(CLUSTER_TABLE_COLUMN_NAMES.SERVERS, { + header: t('servers', { ns: 'clusters' }), + size: 120, + grow: true, + }), + columnHelper.accessor(CLUSTER_TABLE_COLUMN_NAMES.POSTGRES_VERSION, { + header: t('postgresVersion', { ns: 'clusters' }), + size: 150, + filterVariant: 'select', + filterSelectOptions: postgresVersionOptions, + grow: true, + }), + columnHelper.accessor(CLUSTER_TABLE_COLUMN_NAMES.LOCATION, { + header: t('location', { ns: 'clusters' }), + grow: true, + }), +]; diff --git a/console/ui/src/widgets/clusters-table/lib/hooks.tsx b/console/ui/src/widgets/clusters-table/lib/hooks.tsx new file mode 100644 index 000000000..fe389f060 --- /dev/null +++ b/console/ui/src/widgets/clusters-table/lib/hooks.tsx @@ -0,0 +1,48 @@ +import { useMemo } from 'react'; +import { + CLUSTER_STATUSES, + CLUSTER_TABLE_COLUMN_NAMES, + clusterStatusColorNamesMap, +} from '@widgets/clusters-table/model/constants.ts'; +import { CircularProgress, Link, Stack, Typography } from '@mui/material'; +import { generateAbsoluteRouterPath } from '@shared/lib/functions.ts'; +import RouterPaths from '@app/router/routerPathsConfig'; +import { ClusterInfo } from '@shared/api/api/clusters.ts'; + +export const useGetClustersTableData = (data: ClusterInfo[]) => + useMemo( + () => + data?.map((cluster) => ({ + [CLUSTER_TABLE_COLUMN_NAMES.NAME]: [CLUSTER_STATUSES.DEPLOYING, CLUSTER_STATUSES.FAILED].some( + (status) => status === cluster.status, + ) ? ( + {cluster.name} + ) : ( + + {cluster.name} + + ), + [CLUSTER_TABLE_COLUMN_NAMES.STATUS]: ( + + {cluster.status === CLUSTER_STATUSES.DEPLOYING ? ( + + ) : clusterStatusColorNamesMap[cluster.status] ? ( + {cluster.status} + ) : null} + {cluster.status} + + ), + [CLUSTER_TABLE_COLUMN_NAMES.CREATION_TIME]: cluster.creation_time, + [CLUSTER_TABLE_COLUMN_NAMES.ENVIRONMENT]: cluster.environment, + [CLUSTER_TABLE_COLUMN_NAMES.SERVERS]: cluster.servers?.length ?? '-', + [CLUSTER_TABLE_COLUMN_NAMES.POSTGRES_VERSION]: cluster?.postgres_version ?? '-', + [CLUSTER_TABLE_COLUMN_NAMES.LOCATION]: cluster?.cluster_location ?? '-', + [CLUSTER_TABLE_COLUMN_NAMES.ID]: cluster.id, // not displayed, required only for correct cluster removal + })) ?? [], + [data], + ); diff --git a/console/ui/src/widgets/clusters-table/model/constants.ts b/console/ui/src/widgets/clusters-table/model/constants.ts new file mode 100644 index 000000000..d741dc5a5 --- /dev/null +++ b/console/ui/src/widgets/clusters-table/model/constants.ts @@ -0,0 +1,33 @@ +import CorrectIcon from '../assets/correctIcon.svg'; +import WarningIcon from '../assets/warningIcon.svg'; +import ErrorIcon from '../assets/errorIcon.svg'; + +export const CLUSTER_TABLE_COLUMN_NAMES = Object.freeze({ + // names are used as sorting params, changes will break sorting + NAME: 'name', + STATUS: 'status', + CREATION_TIME: 'created_at', + ENVIRONMENT: 'environment', + SERVERS: 'server_count', + POSTGRES_VERSION: 'postgres_version', + LOCATION: 'location', + ACTIONS: 'actions', + ID: 'id', +}); + +export const CLUSTER_STATUSES = Object.freeze({ + DEPLOYING: 'deploying', + READY: 'ready', + FAILED: 'failed', + HEALTHY: 'healthy', + UNHEALTHY: 'unhealthy', + DEGRADED: 'degraded', + UNAVAILABLE: 'unavailable', +}); + +export const clusterStatusColorNamesMap = Object.freeze({ + [CLUSTER_STATUSES.HEALTHY]: CorrectIcon, + [CLUSTER_STATUSES.UNHEALTHY]: WarningIcon, + [CLUSTER_STATUSES.DEGRADED]: ErrorIcon, + [CLUSTER_STATUSES.UNAVAILABLE]: ErrorIcon, +}); diff --git a/console/ui/src/widgets/clusters-table/model/types.ts b/console/ui/src/widgets/clusters-table/model/types.ts new file mode 100644 index 000000000..0c95caffd --- /dev/null +++ b/console/ui/src/widgets/clusters-table/model/types.ts @@ -0,0 +1,11 @@ +import { CLUSTER_TABLE_COLUMN_NAMES } from '@widgets/clusters-table/model/constants.ts'; + +export interface ClustersTableValues { + [CLUSTER_TABLE_COLUMN_NAMES.NAME]: string; + [CLUSTER_TABLE_COLUMN_NAMES.STATUS]: Element; + [CLUSTER_TABLE_COLUMN_NAMES.CREATION_TIME]: string; + [CLUSTER_TABLE_COLUMN_NAMES.ENVIRONMENT]: string; + [CLUSTER_TABLE_COLUMN_NAMES.SERVERS]: number; + [CLUSTER_TABLE_COLUMN_NAMES.POSTGRES_VERSION]: number; + [CLUSTER_TABLE_COLUMN_NAMES.LOCATION]: string; +} diff --git a/console/ui/src/widgets/clusters-table/ui/ClustersEmptyRowsFallback.tsx b/console/ui/src/widgets/clusters-table/ui/ClustersEmptyRowsFallback.tsx new file mode 100644 index 000000000..23b0b668c --- /dev/null +++ b/console/ui/src/widgets/clusters-table/ui/ClustersEmptyRowsFallback.tsx @@ -0,0 +1,32 @@ +import React from 'react'; +import { useTranslation } from 'react-i18next'; +import { useNavigate } from 'react-router-dom'; +import { Box, Button, Stack, Typography } from '@mui/material'; +import { createClusterButtonHandler } from '@widgets/clusters-table/lib/functions.ts'; +import DatabaseIcon from '@assets/databaseIcon.svg?react'; + +const ClustersEmptyRowsFallback: React.FC = () => { + const { t } = useTranslation('clusters'); + const navigate = useNavigate(); + + return ( + + + + + + {t('noPostgresClustersTitle')} + + + {t('noPostgresClustersLine1', { createCluster: t('createCluster') })} + + + + + + ); +}; + +export default ClustersEmptyRowsFallback; diff --git a/console/ui/src/widgets/clusters-table/ui/index.tsx b/console/ui/src/widgets/clusters-table/ui/index.tsx new file mode 100644 index 000000000..292de4c6b --- /dev/null +++ b/console/ui/src/widgets/clusters-table/ui/index.tsx @@ -0,0 +1,99 @@ +import { FC, useMemo, useState } from 'react'; +import { CLUSTER_TABLE_COLUMN_NAMES } from '@widgets/clusters-table/model/constants.ts'; +import { useTranslation } from 'react-i18next'; +import { ClustersTableValues } from '@widgets/clusters-table/model/types.ts'; +import { MRT_ColumnDef, MRT_RowData, MRT_TableOptions } from 'material-react-table'; +import { useAppSelector } from '@app/redux/store/hooks.ts'; +import { selectCurrentProject } from '@app/redux/slices/projectSlice/projectSelectors.ts'; +import { CLUSTERS_POLLING_INTERVAL, PAGINATION_LIMIT_OPTIONS } from '@shared/config/constants.ts'; +import ClustersTableButtons from '@features/clusters-table-buttons'; +import { useGetClustersQuery } from '@shared/api/api/clusters.ts'; +import { useGetClustersTableData } from '@widgets/clusters-table/lib/hooks.tsx'; +import { useGetEnvironmentsQuery } from '@shared/api/api/environments.ts'; +import { useGetPostgresVersionsQuery } from '@shared/api/api/other.ts'; +import ClustersTableRowActions from '@features/clusters-table-row-actions'; +import ClustersEmptyRowsFallback from '@widgets/clusters-table/ui/ClustersEmptyRowsFallback.tsx'; +import { getClusterTableColumns } from '@widgets/clusters-table/lib/functions.ts'; +import { manageSortingOrder } from '@shared/lib/functions.ts'; +import { useQueryPolling } from '@shared/lib/hooks.tsx'; +import DefaultTable from '@shared/ui/default-table'; + +const ClustersTable: FC = () => { + const { t, i18n } = useTranslation('clusters'); + + const currentProject = useAppSelector(selectCurrentProject); + + const [sorting, setSorting] = useState([ + { + id: CLUSTER_TABLE_COLUMN_NAMES.CREATION_TIME, + desc: true, + }, + ]); + + const [pagination, setPagination] = useState({ + pageIndex: 0, + pageSize: PAGINATION_LIMIT_OPTIONS[1].value, + }); + + const environments = useGetEnvironmentsQuery({ offset: 0, limit: 999_999_999 }); + const postgresVersions = useGetPostgresVersionsQuery(); + + const clustersList = useQueryPolling( + () => + useGetClustersQuery({ + projectId: Number(currentProject), // TODO: projectId, projectCode + offset: pagination.pageIndex * pagination.pageSize, + limit: pagination.pageSize, + ...(sorting?.[0] ? { sortBy: manageSortingOrder(sorting[0]) } : {}), + }), + CLUSTERS_POLLING_INTERVAL, + ); + + const columns = useMemo[]>( + () => + getClusterTableColumns({ + t, + environmentOptions: environments.data?.data?.map((environment) => environment.name) ?? [], + postgresVersionOptions: postgresVersions.data?.data?.map((version) => version.major_version) ?? [], + }), + [i18n.language, environments.data?.data, postgresVersions.data?.data], + ); + + const data = useGetClustersTableData(clustersList.data?.data); + + const tableConfig: MRT_TableOptions = { + columns, + data, + enablePagination: true, + enableRowSelection: true, + showGlobalFilter: true, + manualPagination: true, + enableRowActions: true, + enableStickyHeader: true, + enableMultiSort: false, + onPaginationChange: setPagination, + onSortingChange: setSorting, + rowCount: clustersList.data?.meta?.count ?? 0, + state: { + isLoading: clustersList.isFetching || environments.isFetching || postgresVersions.isFetching, + pagination, + sorting, + }, + initialState: { + columnVisibility: { + [CLUSTER_TABLE_COLUMN_NAMES.LOCATION]: false, + }, + }, + renderRowActionMenuItems: ({ closeMenu, row }) => , + renderEmptyRowsFallback: () => , + }; + + return ( + <> + + + + ); +}; + +export default ClustersTable; diff --git a/console/ui/src/widgets/environments-table/index.ts b/console/ui/src/widgets/environments-table/index.ts new file mode 100644 index 000000000..32d4c9056 --- /dev/null +++ b/console/ui/src/widgets/environments-table/index.ts @@ -0,0 +1,3 @@ +import EnvironmentsTable from '@widgets/environments-table/ui'; + +export default EnvironmentsTable; diff --git a/console/ui/src/widgets/environments-table/lib/hooks.tsx b/console/ui/src/widgets/environments-table/lib/hooks.tsx new file mode 100644 index 000000000..9f7293f36 --- /dev/null +++ b/console/ui/src/widgets/environments-table/lib/hooks.tsx @@ -0,0 +1,16 @@ +import { useMemo } from 'react'; +import { ENVIRONMENTS_TABLE_COLUMN_NAMES } from '@widgets/environments-table/model/constants.ts'; +import { ResponseEnvironment } from '@shared/api/api/environments.ts'; + +export const useGetEnvironmentsTableData = (data: ResponseEnvironment[]) => + useMemo( + () => + data?.map((secret) => ({ + [ENVIRONMENTS_TABLE_COLUMN_NAMES.ID]: secret.id, + [ENVIRONMENTS_TABLE_COLUMN_NAMES.NAME]: secret.name, + [ENVIRONMENTS_TABLE_COLUMN_NAMES.CREATED]: secret.created_at, + [ENVIRONMENTS_TABLE_COLUMN_NAMES.UPDATED]: secret.updated_at, + [ENVIRONMENTS_TABLE_COLUMN_NAMES.DESCRIPTION]: secret.description ?? '-', + })) ?? [], + [data], + ); diff --git a/console/ui/src/widgets/environments-table/model/constants.ts b/console/ui/src/widgets/environments-table/model/constants.ts new file mode 100644 index 000000000..f63231108 --- /dev/null +++ b/console/ui/src/widgets/environments-table/model/constants.ts @@ -0,0 +1,44 @@ +import { createMRTColumnHelper } from 'material-react-table'; +import { TFunction } from 'i18next'; +import { convertTimestampToReadableTime } from '@shared/lib/functions.ts'; +import { EnvironmentTableValues } from '@widgets/environments-table/model/types.ts'; + +export const ENVIRONMENTS_TABLE_COLUMN_NAMES = Object.freeze({ + ID: 'id', + NAME: 'name', + DESCRIPTION: 'description', + CREATED: 'created_at', + UPDATED: 'updated_at', +}); + +const columnHelper = createMRTColumnHelper(); + +export const environmentTableColumns = (t: TFunction) => [ + columnHelper.accessor(ENVIRONMENTS_TABLE_COLUMN_NAMES.ID, { + header: t('id', { ns: 'shared' }), + size: 80, + grow: true, + }), + columnHelper.accessor(ENVIRONMENTS_TABLE_COLUMN_NAMES.NAME, { + header: t('name', { ns: 'shared' }), + size: 80, + grow: true, + }), + columnHelper.accessor(ENVIRONMENTS_TABLE_COLUMN_NAMES.CREATED, { + header: t('created', { ns: 'shared' }), + size: 150, + grow: true, + Cell: ({ cell }) => convertTimestampToReadableTime(cell.getValue()), // convert back to string for display + }), + columnHelper.accessor(ENVIRONMENTS_TABLE_COLUMN_NAMES.UPDATED, { + header: t('updated', { ns: 'shared' }), + size: 150, + grow: true, + Cell: ({ cell }) => convertTimestampToReadableTime(cell.getValue()), // convert back to string for display + }), + columnHelper.accessor(ENVIRONMENTS_TABLE_COLUMN_NAMES.DESCRIPTION, { + header: t('description', { ns: 'shared' }), + size: 150, + grow: true, + }), +]; diff --git a/console/ui/src/widgets/environments-table/model/types.ts b/console/ui/src/widgets/environments-table/model/types.ts new file mode 100644 index 000000000..3c1719038 --- /dev/null +++ b/console/ui/src/widgets/environments-table/model/types.ts @@ -0,0 +1,9 @@ +import { PROJECTS_TABLE_COLUMN_NAMES } from '@widgets/projects-table/model/constants.ts'; + +export interface EnvironmentTableValues { + [PROJECTS_TABLE_COLUMN_NAMES.ID]: string; + [PROJECTS_TABLE_COLUMN_NAMES.NAME]: string; + [PROJECTS_TABLE_COLUMN_NAMES.DESCRIPTION]: 'description'; + [PROJECTS_TABLE_COLUMN_NAMES.CREATED]: 'created_at'; + [PROJECTS_TABLE_COLUMN_NAMES.UPDATED]: 'updated_at'; +} diff --git a/console/ui/src/widgets/environments-table/ui/EnvironmentsTableButtons.tsx b/console/ui/src/widgets/environments-table/ui/EnvironmentsTableButtons.tsx new file mode 100644 index 000000000..71a41b78e --- /dev/null +++ b/console/ui/src/widgets/environments-table/ui/EnvironmentsTableButtons.tsx @@ -0,0 +1,13 @@ +import { FC } from 'react'; +import { Stack } from '@mui/material'; +import AddEnvironment from '@features/add-environment'; + +const EnvironmentsTableButtons: FC = () => { + return ( + + + + ); +}; + +export default EnvironmentsTableButtons; diff --git a/console/ui/src/widgets/environments-table/ui/index.tsx b/console/ui/src/widgets/environments-table/ui/index.tsx new file mode 100644 index 000000000..9484f049b --- /dev/null +++ b/console/ui/src/widgets/environments-table/ui/index.tsx @@ -0,0 +1,58 @@ +import React, { FC, useMemo, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PAGINATION_LIMIT_OPTIONS } from '@shared/config/constants.ts'; +import { MRT_ColumnDef, MRT_RowData, MRT_TableOptions } from 'material-react-table'; +import { EnvironmentTableValues } from '@widgets/environments-table/model/types.ts'; +import { environmentTableColumns } from '@widgets/environments-table/model/constants.ts'; +import { useGetEnvironmentsTableData } from '@widgets/environments-table/lib/hooks.tsx'; +import { useGetEnvironmentsQuery } from '@shared/api/api/environments.ts'; +import EnvironmentsTableButtons from '@widgets/environments-table/ui/EnvironmentsTableButtons.tsx'; +import EnvironmentsTableRowActions from '@features/environments-table-row-actions/ui'; +import DefaultTable from '@shared/ui/default-table'; + +const EnvironmentsTable: FC = () => { + const { t, i18n } = useTranslation(['settings', 'shared']); + + const [pagination, setPagination] = useState({ + pageIndex: 0, + pageSize: PAGINATION_LIMIT_OPTIONS[1].value, + }); + + const environmentsList = useGetEnvironmentsQuery({ + offset: pagination.pageIndex * pagination.pageSize, + limit: pagination.pageSize, + }); + + const columns = useMemo[]>(() => environmentTableColumns(t), [i18n.language]); + + const data = useGetEnvironmentsTableData(environmentsList.data?.data); + + const tableConfig: MRT_TableOptions = { + columns, + data, + enablePagination: true, + enableRowSelection: true, + showGlobalFilter: true, + enableRowActions: true, + enableStickyHeader: true, + enableMultiSort: false, + enableSorting: false, + onPaginationChange: setPagination, + manualPagination: true, + rowCount: environmentsList.data?.meta?.count ?? 0, + state: { + isLoading: environmentsList.isFetching, + pagination, + }, + renderRowActionMenuItems: ({ closeMenu, row }) => , + }; + + return ( + <> + + + + ); +}; + +export default EnvironmentsTable; diff --git a/console/ui/src/widgets/header/index.ts b/console/ui/src/widgets/header/index.ts new file mode 100644 index 000000000..eab9de354 --- /dev/null +++ b/console/ui/src/widgets/header/index.ts @@ -0,0 +1,3 @@ +import Header from '@widgets/header/ui'; + +export default Header; diff --git a/console/ui/src/widgets/header/ui/index.tsx b/console/ui/src/widgets/header/ui/index.tsx new file mode 100644 index 000000000..d012b026d --- /dev/null +++ b/console/ui/src/widgets/header/ui/index.tsx @@ -0,0 +1,64 @@ +import { FC, useEffect } from 'react'; +import { AppBar, Box, MenuItem, SelectChangeEvent, Stack, TextField, Toolbar, Typography } from '@mui/material'; +import Logo from '@shared/assets/AutobaseLogo.svg?react'; +import { grey } from '@mui/material/colors'; +import LogoutButton from '@features/logout-button'; +import { useGetProjectsQuery } from '@shared/api/api/projects.ts'; +import { setProject } from '@app/redux/slices/projectSlice/projectSlice.ts'; +import { selectCurrentProject } from '@app/redux/slices/projectSlice/projectSelectors.ts'; +import { useAppDispatch, useAppSelector } from '@app/redux/store/hooks.ts'; +import { useTranslation } from 'react-i18next'; + +const Header: FC = () => { + const { t } = useTranslation('shared'); + const dispatch = useAppDispatch(); + const currentProject = useAppSelector(selectCurrentProject); + + const projects = useGetProjectsQuery({ limit: 999_999_999 }); + + useEffect(() => { + if (!currentProject && projects.data?.data) dispatch(setProject(String(projects.data?.data?.[0]?.id))); + }, [projects.data?.data, dispatch, currentProject]); + + const handleProjectChange = (e: SelectChangeEvent) => { + dispatch(setProject(e.target.value)); + }; + + return ( + theme.zIndex.drawer + 1 }} elevation={0} variant="outlined"> + + + + + + + + autobase + + + for PostgreSQL® + + + + + {projects.data?.data?.map((project) => ( + + {project.name} + + )) ?? []} + + + + + + + ); +}; + +export default Header; diff --git a/console/ui/src/widgets/main/index.ts b/console/ui/src/widgets/main/index.ts new file mode 100644 index 000000000..cf887abaa --- /dev/null +++ b/console/ui/src/widgets/main/index.ts @@ -0,0 +1,3 @@ +import Main from '@widgets/main/ui'; + +export default Main; diff --git a/console/ui/src/widgets/main/ui/index.tsx b/console/ui/src/widgets/main/ui/index.tsx new file mode 100644 index 000000000..5bf620c68 --- /dev/null +++ b/console/ui/src/widgets/main/ui/index.tsx @@ -0,0 +1,20 @@ +import { FC, Suspense } from 'react'; +import { Divider, Stack, Toolbar } from '@mui/material'; +import { Outlet } from 'react-router-dom'; +import Breadcrumbs from '@features/bradcrumbs'; +import Spinner from '@shared/ui/spinner'; + +const Main: FC = () => ( +
+ + + + + }> + + + +
+); + +export default Main; diff --git a/console/ui/src/widgets/operations-table/index.ts b/console/ui/src/widgets/operations-table/index.ts new file mode 100644 index 000000000..e6de8fafd --- /dev/null +++ b/console/ui/src/widgets/operations-table/index.ts @@ -0,0 +1,3 @@ +import OperationsTable from '@widgets/operations-table/ui'; + +export default OperationsTable; diff --git a/console/ui/src/widgets/operations-table/lib/hooks.tsx b/console/ui/src/widgets/operations-table/lib/hooks.tsx new file mode 100644 index 000000000..e1223a7c3 --- /dev/null +++ b/console/ui/src/widgets/operations-table/lib/hooks.tsx @@ -0,0 +1,18 @@ +import { useMemo } from 'react'; +import { OPERATIONS_TABLE_COLUMN_NAMES } from '@widgets/operations-table/model/constants.ts'; +import { ResponseOperation } from '@shared/api/api/operations.ts'; + +export const useGetOperationsTableData = (data: ResponseOperation[]) => + useMemo( + () => + data?.map((operation) => ({ + [OPERATIONS_TABLE_COLUMN_NAMES.ID]: operation.id!, + [OPERATIONS_TABLE_COLUMN_NAMES.CLUSTER]: operation.cluster_name!, + [OPERATIONS_TABLE_COLUMN_NAMES.STARTED]: operation.started, + [OPERATIONS_TABLE_COLUMN_NAMES.FINISHED]: operation.status === 'in_progress' ? '-' : operation?.finished ?? '-', + [OPERATIONS_TABLE_COLUMN_NAMES.TYPE]: operation.type!, + [OPERATIONS_TABLE_COLUMN_NAMES.STATUS]: operation.status!, + [OPERATIONS_TABLE_COLUMN_NAMES.ENVIRONMENT]: operation.environment!, + })) ?? [], + [data], + ); diff --git a/console/ui/src/widgets/operations-table/model/constants.ts b/console/ui/src/widgets/operations-table/model/constants.ts new file mode 100644 index 000000000..bddc32254 --- /dev/null +++ b/console/ui/src/widgets/operations-table/model/constants.ts @@ -0,0 +1,59 @@ +import { TFunction } from 'i18next'; +import { createMRTColumnHelper } from 'material-react-table'; +import { OperationsTableValues } from '@widgets/operations-table/model/types.ts'; +import { convertTimestampToReadableTime } from '@shared/lib/functions.ts'; + +export const OPERATIONS_TABLE_COLUMN_NAMES = Object.freeze({ + // names are used as sorting params, changes will break sorting + ID: 'id', + STARTED: 'created_at', + FINISHED: 'updated_at', + TYPE: 'type', + STATUS: 'status', + CLUSTER: 'cluster_name', + ENVIRONMENT: 'environment', + ACTIONS: 'actions', +}); + +const columnHelper = createMRTColumnHelper(); + +export const operationTableColumns = (t: TFunction) => [ + columnHelper.accessor(OPERATIONS_TABLE_COLUMN_NAMES.ID, { + header: t('id', { ns: 'shared' }), + size: 80, + grow: true, + visibleInShowHideMenu: false, + }), + columnHelper.accessor(OPERATIONS_TABLE_COLUMN_NAMES.STARTED, { + header: t('started', { ns: 'operations' }), + grow: true, + size: 120, + Cell: ({ cell }) => convertTimestampToReadableTime(cell.getValue()), + }), + columnHelper.accessor(OPERATIONS_TABLE_COLUMN_NAMES.FINISHED, { + header: t('finished', { ns: 'operations' }), + grow: true, + size: 120, + Cell: ({ cell }) => convertTimestampToReadableTime(cell.getValue()), + }), + columnHelper.accessor(OPERATIONS_TABLE_COLUMN_NAMES.TYPE, { + header: t('type', { ns: 'operations' }), + grow: true, + size: 60, + }), + columnHelper.accessor(OPERATIONS_TABLE_COLUMN_NAMES.STATUS, { + header: t('status', { ns: 'shared' }), + grow: true, + size: 80, + }), + columnHelper.accessor(OPERATIONS_TABLE_COLUMN_NAMES.CLUSTER, { + header: t('cluster', { ns: 'clusters' }), + grow: true, + size: 140, + }), + columnHelper.accessor(OPERATIONS_TABLE_COLUMN_NAMES.ENVIRONMENT, { + header: t('environment', { ns: 'shared' }), + grow: true, + size: 140, + }), +]; diff --git a/console/ui/src/widgets/operations-table/model/types.ts b/console/ui/src/widgets/operations-table/model/types.ts new file mode 100644 index 000000000..46304e31f --- /dev/null +++ b/console/ui/src/widgets/operations-table/model/types.ts @@ -0,0 +1,11 @@ +import { OPERATIONS_TABLE_COLUMN_NAMES } from '@widgets/operations-table/model/constants.ts'; + +export interface OperationsTableValues { + [OPERATIONS_TABLE_COLUMN_NAMES.ID]: number; + [OPERATIONS_TABLE_COLUMN_NAMES.STARTED]: string; + [OPERATIONS_TABLE_COLUMN_NAMES.FINISHED]: string; + [OPERATIONS_TABLE_COLUMN_NAMES.TYPE]: string; + [OPERATIONS_TABLE_COLUMN_NAMES.STATUS]: string; + [OPERATIONS_TABLE_COLUMN_NAMES.CLUSTER]: string; + [OPERATIONS_TABLE_COLUMN_NAMES.ENVIRONMENT]: string; +} diff --git a/console/ui/src/widgets/operations-table/ui/index.tsx b/console/ui/src/widgets/operations-table/ui/index.tsx new file mode 100644 index 000000000..9c5dc1e22 --- /dev/null +++ b/console/ui/src/widgets/operations-table/ui/index.tsx @@ -0,0 +1,90 @@ +import { FC, useMemo, useState } from 'react'; +import { MRT_ColumnDef, MRT_RowData, MRT_TableOptions } from 'material-react-table'; +import { OPERATIONS_TABLE_COLUMN_NAMES, operationTableColumns } from '@widgets/operations-table/model/constants.ts'; +import { useTranslation } from 'react-i18next'; +import { OperationsTableValues } from '@widgets/operations-table/model/types.ts'; +import OperationsTableButtons from '@features/operations-table-buttons'; +import OperationsTableRowActions from '@features/operations-table-row-actions'; +import { useGetOperationsQuery } from '@shared/api/api/operations.ts'; +import { useAppSelector } from '@app/redux/store/hooks.ts'; +import { selectCurrentProject } from '@app/redux/slices/projectSlice/projectSelectors.ts'; +import { subDays } from 'date-fns/subDays'; +import { + formatOperationsDate, + getOperationsDateRangeVariants, +} from '@features/operations-table-buttons/lib/functions.ts'; +import { OPERATIONS_POLLING_INTERVAL, PAGINATION_LIMIT_OPTIONS } from '@shared/config/constants.ts'; +import { useGetOperationsTableData } from '@widgets/operations-table/lib/hooks.tsx'; +import { manageSortingOrder } from '@shared/lib/functions.ts'; +import { useQueryPolling } from '@shared/lib/hooks.tsx'; +import DefaultTable from '@shared/ui/default-table'; + +const OperationsTable: FC = () => { + const { t, i18n } = useTranslation(['operations', 'shared']); + + const currentProject = useAppSelector(selectCurrentProject); + + const [sorting, setSorting] = useState([ + { + id: OPERATIONS_TABLE_COLUMN_NAMES.ID, + desc: true, + }, + ]); + const [pagination, setPagination] = useState({ + pageIndex: 0, + pageSize: PAGINATION_LIMIT_OPTIONS[1].value, + }); + + const [endDate] = useState(new Date().toISOString()); + const [startDate, setStartDate] = useState({ + name: getOperationsDateRangeVariants(t)[0].value, + value: formatOperationsDate(subDays(new Date(), 1)), + }); + + const operationsList = useQueryPolling( + () => + useGetOperationsQuery({ + projectId: Number(currentProject), + startDate: startDate.value, + endDate, + offset: pagination.pageIndex * pagination.pageSize, + limit: pagination.pageSize, + ...(sorting?.[0] ? { sortBy: manageSortingOrder(sorting[0]) } : {}), + }), + OPERATIONS_POLLING_INTERVAL, + ); + + const columns = useMemo[]>(() => operationTableColumns(t), [i18n.language]); + + const data = useGetOperationsTableData(operationsList.data?.data); + + const tableConfig: MRT_TableOptions = { + columns, + data, + enablePagination: true, + showGlobalFilter: true, + manualSorting: true, + manualPagination: true, + enableRowActions: true, + enableStickyHeader: true, + enableMultiSort: false, + onPaginationChange: setPagination, + onSortingChange: setSorting, + rowCount: operationsList.data?.meta?.count ?? 0, + state: { + isLoading: operationsList.isFetching, + pagination, + sorting, + }, + renderRowActionMenuItems: ({ closeMenu, row }) => , + }; + + return ( + <> + + + + ); +}; + +export default OperationsTable; diff --git a/console/ui/src/widgets/projects-table/index.tsx b/console/ui/src/widgets/projects-table/index.tsx new file mode 100644 index 000000000..398bdd3ef --- /dev/null +++ b/console/ui/src/widgets/projects-table/index.tsx @@ -0,0 +1,3 @@ +import ProjectsTable from '@widgets/projects-table/ui'; + +export default ProjectsTable; diff --git a/console/ui/src/widgets/projects-table/lib/hooks.tsx b/console/ui/src/widgets/projects-table/lib/hooks.tsx new file mode 100644 index 000000000..456940f7f --- /dev/null +++ b/console/ui/src/widgets/projects-table/lib/hooks.tsx @@ -0,0 +1,16 @@ +import { useMemo } from 'react'; +import { ResponseProject } from '@shared/api/api/projects.ts'; +import { PROJECTS_TABLE_COLUMN_NAMES } from '@widgets/projects-table/model/constants.ts'; + +export const useGetProjectsTableData = (data: ResponseProject[]) => + useMemo( + () => + data?.map((secret) => ({ + [PROJECTS_TABLE_COLUMN_NAMES.ID]: secret.id, + [PROJECTS_TABLE_COLUMN_NAMES.NAME]: secret.name, + [PROJECTS_TABLE_COLUMN_NAMES.CREATED]: secret.created_at, + [PROJECTS_TABLE_COLUMN_NAMES.UPDATED]: secret.updated_at, + [PROJECTS_TABLE_COLUMN_NAMES.DESCRIPTION]: secret.description ?? '-', + })) ?? [], + [data], + ); diff --git a/console/ui/src/widgets/projects-table/model/constants.ts b/console/ui/src/widgets/projects-table/model/constants.ts new file mode 100644 index 000000000..a9e758097 --- /dev/null +++ b/console/ui/src/widgets/projects-table/model/constants.ts @@ -0,0 +1,44 @@ +import { createMRTColumnHelper } from 'material-react-table'; +import { TFunction } from 'i18next'; +import { convertTimestampToReadableTime } from '@shared/lib/functions.ts'; +import { ProjectsTableValues } from '@widgets/projects-table/model/types.ts'; + +export const PROJECTS_TABLE_COLUMN_NAMES = Object.freeze({ + ID: 'id', + NAME: 'name', + DESCRIPTION: 'description', + CREATED: 'created_at', + UPDATED: 'updated_at', +}); + +const columnHelper = createMRTColumnHelper(); + +export const projectsTableColumns = (t: TFunction) => [ + columnHelper.accessor(PROJECTS_TABLE_COLUMN_NAMES.ID, { + header: t('id', { ns: 'shared' }), + size: 80, + grow: true, + }), + columnHelper.accessor(PROJECTS_TABLE_COLUMN_NAMES.NAME, { + header: t('name', { ns: 'shared' }), + size: 80, + grow: true, + }), + columnHelper.accessor(PROJECTS_TABLE_COLUMN_NAMES.CREATED, { + header: t('created', { ns: 'shared' }), + size: 150, + grow: true, + Cell: ({ cell }) => convertTimestampToReadableTime(cell.getValue()), // convert back to string for display + }), + columnHelper.accessor(PROJECTS_TABLE_COLUMN_NAMES.UPDATED, { + header: t('updated', { ns: 'shared' }), + size: 150, + grow: true, + Cell: ({ cell }) => convertTimestampToReadableTime(cell.getValue()), // convert back to string for display + }), + columnHelper.accessor(PROJECTS_TABLE_COLUMN_NAMES.DESCRIPTION, { + header: t('description', { ns: 'shared' }), + size: 150, + grow: true, + }), +]; diff --git a/console/ui/src/widgets/projects-table/model/types.ts b/console/ui/src/widgets/projects-table/model/types.ts new file mode 100644 index 000000000..accbdd8d5 --- /dev/null +++ b/console/ui/src/widgets/projects-table/model/types.ts @@ -0,0 +1,6 @@ +import { PROJECTS_TABLE_COLUMN_NAMES } from '@widgets/projects-table/model/constants.ts'; + +export interface ProjectsTableValues { + [PROJECTS_TABLE_COLUMN_NAMES.ID]: string; + [PROJECTS_TABLE_COLUMN_NAMES.NAME]: string; +} diff --git a/console/ui/src/widgets/projects-table/ui/ProjectsTableButtons.tsx b/console/ui/src/widgets/projects-table/ui/ProjectsTableButtons.tsx new file mode 100644 index 000000000..3875747a2 --- /dev/null +++ b/console/ui/src/widgets/projects-table/ui/ProjectsTableButtons.tsx @@ -0,0 +1,13 @@ +import { FC } from 'react'; +import AddProject from '@features/add-project'; +import { Stack } from '@mui/material'; + +const ProjectsTableButtons: FC = () => { + return ( + + + + ); +}; + +export default ProjectsTableButtons; diff --git a/console/ui/src/widgets/projects-table/ui/index.tsx b/console/ui/src/widgets/projects-table/ui/index.tsx new file mode 100644 index 000000000..1820a5a19 --- /dev/null +++ b/console/ui/src/widgets/projects-table/ui/index.tsx @@ -0,0 +1,58 @@ +import React, { FC, useMemo, useState } from 'react'; +import { PAGINATION_LIMIT_OPTIONS } from '@shared/config/constants.ts'; +import { MRT_ColumnDef, MRT_RowData, MRT_TableOptions } from 'material-react-table'; +import { ProjectsTableValues } from '@widgets/projects-table/model/types.ts'; +import { projectsTableColumns } from '@widgets/projects-table/model/constants.ts'; +import { useTranslation } from 'react-i18next'; +import { useGetProjectsTableData } from '@widgets/projects-table/lib/hooks.tsx'; +import { useGetProjectsQuery } from '@shared/api/api/projects.ts'; +import ProjectsTableButtons from '@widgets/projects-table/ui/ProjectsTableButtons.tsx'; +import ProjectsTableRowActions from '@features/pojects-table-row-actions'; +import DefaultTable from '@shared/ui/default-table'; + +const ProjectsTable: FC = () => { + const { t, i18n } = useTranslation(['settings', 'shared']); + + const [pagination, setPagination] = useState({ + pageIndex: 0, + pageSize: PAGINATION_LIMIT_OPTIONS[1].value, + }); + + const projectsList = useGetProjectsQuery({ + offset: pagination.pageIndex * pagination.pageSize, + limit: pagination.pageSize, + }); + + const columns = useMemo[]>(() => projectsTableColumns(t), [i18n.language]); + + const data = useGetProjectsTableData(projectsList.data?.data); + + const tableConfig: MRT_TableOptions = { + columns, + data, + enablePagination: true, + enableRowSelection: true, + showGlobalFilter: true, + enableRowActions: true, + enableStickyHeader: true, + enableMultiSort: false, + enableSorting: false, + onPaginationChange: setPagination, + manualPagination: true, + rowCount: projectsList.data?.meta?.count ?? 0, + state: { + isLoading: projectsList.isFetching, + pagination, + }, + renderRowActionMenuItems: ({ closeMenu, row }) => , + }; + + return ( + <> + + + + ); +}; + +export default ProjectsTable; diff --git a/console/ui/src/widgets/secrets-table/index.ts b/console/ui/src/widgets/secrets-table/index.ts new file mode 100644 index 000000000..cc40093da --- /dev/null +++ b/console/ui/src/widgets/secrets-table/index.ts @@ -0,0 +1,3 @@ +import SecretsTable from '@widgets/secrets-table/ui'; + +export default SecretsTable; diff --git a/console/ui/src/widgets/secrets-table/lib/hooks.tsx b/console/ui/src/widgets/secrets-table/lib/hooks.tsx new file mode 100644 index 000000000..6fb0035a0 --- /dev/null +++ b/console/ui/src/widgets/secrets-table/lib/hooks.tsx @@ -0,0 +1,18 @@ +import { ResponseSecretInfo } from '@shared/api/api/secrets.ts'; +import { useMemo } from 'react'; +import { SECRETS_TABLE_COLUMN_NAMES } from '@widgets/secrets-table/model/constants.ts'; + +export const useGetSecretsTableData = (data: ResponseSecretInfo[]) => + useMemo( + () => + data?.map((secret) => ({ + [SECRETS_TABLE_COLUMN_NAMES.NAME]: secret.name!, + [SECRETS_TABLE_COLUMN_NAMES.TYPE]: secret.type!, + [SECRETS_TABLE_COLUMN_NAMES.CREATED]: secret.created_at, + [SECRETS_TABLE_COLUMN_NAMES.UPDATED]: secret.updated_at, + [SECRETS_TABLE_COLUMN_NAMES.USED]: String(!!secret.is_used), + [SECRETS_TABLE_COLUMN_NAMES.ID]: secret.id!, + [SECRETS_TABLE_COLUMN_NAMES.USED_BY]: secret.used_by_clusters, // not displayed, required only for logic purposed + })) ?? [], + [data], + ); diff --git a/console/ui/src/widgets/secrets-table/model/constants.ts b/console/ui/src/widgets/secrets-table/model/constants.ts new file mode 100644 index 000000000..be74a900a --- /dev/null +++ b/console/ui/src/widgets/secrets-table/model/constants.ts @@ -0,0 +1,51 @@ +import { TFunction } from 'i18next'; +import { createMRTColumnHelper } from 'material-react-table'; +import { SecretsTableValues } from '@widgets/secrets-table/model/types.ts'; +import { convertTimestampToReadableTime } from '@shared/lib/functions.ts'; + +export const SECRETS_TABLE_COLUMN_NAMES = Object.freeze({ + NAME: 'name', + TYPE: 'type', + CREATED: 'created', + UPDATED: 'updated', + USED: 'used', + ID: 'id', + USED_BY: 'usedBy', +}); + +const columnHelper = createMRTColumnHelper(); + +export const secretsTableColumns = (t: TFunction) => [ + columnHelper.accessor(SECRETS_TABLE_COLUMN_NAMES.NAME, { + header: t('name', { ns: 'shared' }), + size: 80, + grow: true, + }), + columnHelper.accessor(SECRETS_TABLE_COLUMN_NAMES.TYPE, { + header: t('type', { ns: 'shared' }), + size: 80, + grow: true, + }), + columnHelper.accessor(SECRETS_TABLE_COLUMN_NAMES.CREATED, { + header: t('created', { ns: 'shared' }), + size: 150, + grow: true, + Cell: ({ cell }) => convertTimestampToReadableTime(cell.getValue()), // convert back to string for display + }), + columnHelper.accessor(SECRETS_TABLE_COLUMN_NAMES.UPDATED, { + header: t('updated', { ns: 'shared' }), + size: 150, + grow: true, + Cell: ({ cell }) => convertTimestampToReadableTime(cell.getValue()), // convert back to string for display + }), + columnHelper.accessor(SECRETS_TABLE_COLUMN_NAMES.USED, { + header: t('used', { ns: 'shared' }), + size: 150, + grow: true, + }), + columnHelper.accessor(SECRETS_TABLE_COLUMN_NAMES.ID, { + header: t('id', { ns: 'shared' }), + size: 80, + grow: true, + }), +]; diff --git a/console/ui/src/widgets/secrets-table/model/types.ts b/console/ui/src/widgets/secrets-table/model/types.ts new file mode 100644 index 000000000..4a0a545f5 --- /dev/null +++ b/console/ui/src/widgets/secrets-table/model/types.ts @@ -0,0 +1,10 @@ +import { SECRETS_TABLE_COLUMN_NAMES } from '@widgets/secrets-table/model/constants.ts'; + +export interface SecretsTableValues { + [SECRETS_TABLE_COLUMN_NAMES.NAME]: string; + [SECRETS_TABLE_COLUMN_NAMES.TYPE]: string; + [SECRETS_TABLE_COLUMN_NAMES.CREATED]: string; + [SECRETS_TABLE_COLUMN_NAMES.UPDATED]: string; + [SECRETS_TABLE_COLUMN_NAMES.USED]: string; + [SECRETS_TABLE_COLUMN_NAMES.ID]: number; +} diff --git a/console/ui/src/widgets/secrets-table/ui/index.tsx b/console/ui/src/widgets/secrets-table/ui/index.tsx new file mode 100644 index 000000000..b08ff5c4a --- /dev/null +++ b/console/ui/src/widgets/secrets-table/ui/index.tsx @@ -0,0 +1,64 @@ +import React, { useMemo, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { MRT_ColumnDef, MRT_RowData, MRT_TableOptions } from 'material-react-table'; +import { SecretsTableValues } from '@widgets/secrets-table/model/types.ts'; +import SettingsTableRowActions from '@features/settings-table-row-actions'; +import SettingsTableButtons from '@features/settings-table-buttons'; +import { useGetSecretsQuery } from '@shared/api/api/secrets.ts'; +import { PAGINATION_LIMIT_OPTIONS } from '@shared/config/constants.ts'; +import { useAppSelector } from '@app/redux/store/hooks.ts'; +import { selectCurrentProject } from '@app/redux/slices/projectSlice/projectSelectors.ts'; +import { secretsTableColumns } from '@widgets/secrets-table/model/constants.ts'; + +import { useGetSecretsTableData } from '@widgets/secrets-table/lib/hooks.tsx'; +import DefaultTable from '@shared/ui/default-table'; + +const SecretsTable: React.FC = () => { + const { t, i18n } = useTranslation(['settings', 'shared']); + + const currentProject = useAppSelector(selectCurrentProject); + + const [pagination, setPagination] = useState({ + pageIndex: 0, + pageSize: PAGINATION_LIMIT_OPTIONS[1].value, + }); + + const secretsList = useGetSecretsQuery({ + projectId: Number(currentProject), + offset: pagination.pageIndex * pagination.pageSize, + limit: pagination.pageSize, + }); + + const columns = useMemo[]>(() => secretsTableColumns(t), [i18n.language]); + + const data = useGetSecretsTableData(secretsList.data?.data); + + const tableConfig: MRT_TableOptions = { + columns, + data, + enablePagination: true, + enableRowSelection: true, + showGlobalFilter: true, + enableRowActions: true, + enableStickyHeader: true, + enableMultiSort: false, + enableSorting: false, + onPaginationChange: setPagination, + manualPagination: true, + rowCount: secretsList.data?.meta?.count ?? 0, + state: { + isLoading: secretsList.isFetching, + pagination, + }, + renderRowActionMenuItems: ({ closeMenu, row }) => , + }; + + return ( + <> + + + + ); +}; + +export default SecretsTable; diff --git a/console/ui/src/widgets/settings-form/index.ts b/console/ui/src/widgets/settings-form/index.ts new file mode 100644 index 000000000..cefc899ea --- /dev/null +++ b/console/ui/src/widgets/settings-form/index.ts @@ -0,0 +1,3 @@ +import SettingsForm from '@widgets/settings-form/ui'; + +export default SettingsForm; diff --git a/console/ui/src/widgets/settings-form/ui/index.tsx b/console/ui/src/widgets/settings-form/ui/index.tsx new file mode 100644 index 000000000..0d4148e7e --- /dev/null +++ b/console/ui/src/widgets/settings-form/ui/index.tsx @@ -0,0 +1,98 @@ +import { FC, useEffect, useState } from 'react'; +import { FormProvider, useForm } from 'react-hook-form'; +import { SettingsFormValues } from '@entities/settings-proxy-block/model/types.ts'; +import { Box, Stack } from '@mui/material'; +import SettingsProxyBlock from '@entities/settings-proxy-block'; +import { useTranslation } from 'react-i18next'; +import { SETTINGS_FORM_FIELDS_NAMES } from '@entities/settings-proxy-block/model/constants.ts'; +import { + useGetSettingsQuery, + usePatchSettingsByNameMutation, + usePostSettingsMutation, +} from '@shared/api/api/settings.ts'; +import { LoadingButton } from '@mui/lab'; +import { toast } from 'react-toastify'; +import { handleRequestErrorCatch } from '@shared/lib/functions.ts'; +import Spinner from '@shared/ui/spinner'; + +const SettingsForm: FC = () => { + const { t } = useTranslation(['shared', 'toasts']); + + const [isResetting, setIsResetting] = useState(false); + + const methods = useForm({ + mode: 'all', + defaultValues: { + [SETTINGS_FORM_FIELDS_NAMES.HTTP_PROXY]: '', + [SETTINGS_FORM_FIELDS_NAMES.HTTPS_PROXY]: '', + }, + }); + + const settings = useGetSettingsQuery({ offset: 0, limit: 999_999_999 }); + const [postSettingsTrigger, postSettingsTriggerState] = usePostSettingsMutation(); + const [patchSettingsTrigger, patchSettingsTriggerState] = usePatchSettingsByNameMutation(); + + const { isValid, isDirty } = methods.formState; + + useEffect(() => { + if (settings.isFetching) setIsResetting(true); + if (settings.data?.data) { + // eslint-disable-next-line @typescript-eslint/require-await + const resetForm = async () => { + // sync function will result in form values setting error + const settingsData = settings.data.data?.find((value) => value.name === 'proxy_env')?.value; + methods.reset((values) => ({ + ...values, + ...settingsData, + })); + }; + void resetForm().then(() => setIsResetting(false)); + } + }, [settings.data?.data, methods]); + + const onSubmit = async (values: SettingsFormValues) => { + try { + const filledFormValues = Object.fromEntries(Object.entries(values).filter(([_, value]) => value !== '')); + settings.data?.data?.find((value) => value?.name === 'proxy_env')?.value && isDirty + ? await patchSettingsTrigger({ + name: 'proxy_env', + requestChangeSetting: { value: { ...filledFormValues } }, + }).unwrap() + : await postSettingsTrigger({ + requestCreateSetting: { + name: 'proxy_env', + value: { ...filledFormValues }, + }, + }).unwrap(); + toast.success(t('settingsSuccessfullyChanged', { ns: 'toasts' })); + methods.reset(values); + } catch (e) { + handleRequestErrorCatch(e); + } + }; + + return ( + + {isResetting || settings.isFetching ? ( + + ) : ( + +
+ + + + {t('save')} + + +
+
+ )} +
+ ); +}; + +export default SettingsForm; diff --git a/console/ui/src/widgets/sidebar/index.ts b/console/ui/src/widgets/sidebar/index.ts new file mode 100644 index 000000000..0ee5979a3 --- /dev/null +++ b/console/ui/src/widgets/sidebar/index.ts @@ -0,0 +1,3 @@ +import Sidebar from './ui'; + +export default Sidebar; diff --git a/console/ui/src/widgets/sidebar/model/constants.ts b/console/ui/src/widgets/sidebar/model/constants.ts new file mode 100644 index 000000000..55cfc8bb6 --- /dev/null +++ b/console/ui/src/widgets/sidebar/model/constants.ts @@ -0,0 +1,54 @@ +import { TFunction } from 'i18next'; +import RouterPaths from '@app/router/routerPathsConfig'; +import ClustersIcon from '@assets/clustersIcon.svg?react'; +import OperationsIcon from '@assets/operationsIcon.svg?react'; +import SettingsIcon from '@assets/settingsIcon.svg?react'; +import GithubIcon from '@assets/githubIcon.svg?react'; +import DocumentationIcon from '@assets/docsIcon.svg?react'; +import SupportIcon from '@assets/supportIcon.svg?react'; +import SponsorIcon from '@assets/sponsorIcon.svg?react'; + +export const sidebarData = (t: TFunction) => [ + { + icon: ClustersIcon, + label: t('clusters', { ns: 'clusters' }), + path: RouterPaths.clusters.absolutePath, + }, + { + icon: OperationsIcon, + label: t('operations', { ns: 'operations' }), + path: RouterPaths.operations.absolutePath, + }, + { + icon: SettingsIcon, + label: t('settings', { ns: 'settings' }), + path: RouterPaths.settings.absolutePath, + }, +]; + +export const sidebarLowData = (t: TFunction) => [ + { + icon: GithubIcon, + label: t('github', { ns: 'shared' }), + path: 'https://github.com/vitabaks/autobase', + }, + { + icon: DocumentationIcon, + label: t('documentation', { ns: 'shared' }), + path: 'https://autobase.tech/docs', + }, + { + icon: SupportIcon, + label: t('support', { ns: 'shared' }), + path: 'https://autobase.tech/docs/support', + }, + { + icon: SponsorIcon, + label: t('sponsor', { ns: 'shared' }), + path: 'https://autobase.tech/docs/sponsor', + }, +]; + +export const OPEN_SIDEBAR_WIDTH = '240px'; + +export const COLLAPSED_SIDEBAR_WIDTH = '61px'; diff --git a/console/ui/src/widgets/sidebar/ui/index.tsx b/console/ui/src/widgets/sidebar/ui/index.tsx new file mode 100644 index 000000000..79144bd95 --- /dev/null +++ b/console/ui/src/widgets/sidebar/ui/index.tsx @@ -0,0 +1,86 @@ +import { COLLAPSED_SIDEBAR_WIDTH, OPEN_SIDEBAR_WIDTH, sidebarData, sidebarLowData } from '../model/constants.ts'; +import SidebarItem from '@entities/sidebar-item'; +import { useTranslation } from 'react-i18next'; +import { useLocation } from 'react-router-dom'; +import { Box, Divider, Drawer, IconButton, List, Stack, Toolbar, useMediaQuery } from '@mui/material'; +import { useEffect, useState } from 'react'; +import CollapseIcon from '@shared/assets/collapseIcon.svg?react'; + +const Sidebar = () => { + const { t } = useTranslation('shared'); + const location = useLocation(); + + const [isCollapsed, setIsCollapsed] = useState(localStorage.getItem('isSidebarCollapsed')?.toString() === 'true'); + + const isLesserThan1600 = useMediaQuery('(max-width: 1600px)'); + + const toggleSidebarCollapse = () => { + setIsCollapsed((prev) => { + const newValue = !prev; + localStorage.setItem('isSidebarCollapsed', newValue); + return newValue; + }); + }; + + const isActive = (path: string) => { + return location.pathname?.includes(path); + }; + + useEffect(() => { + if ((!isCollapsed && isLesserThan1600) || (isCollapsed && !isLesserThan1600)) toggleSidebarCollapse(); + }, [isLesserThan1600]); + + const sidebarItems = sidebarData(t).map((item) => ( + + )); + + const sidebarLowIcons = sidebarLowData(t).map((item) => ( + + )); + + return ( + + + + {sidebarItems} + + + {sidebarLowIcons} + + + + + + + ); +}; + +export default Sidebar; diff --git a/console/ui/tsconfig.json b/console/ui/tsconfig.json new file mode 100644 index 000000000..0604aa23a --- /dev/null +++ b/console/ui/tsconfig.json @@ -0,0 +1,61 @@ +{ + "compilerOptions": { + "target": "ES2020", + "useDefineForClassFields": true, + "lib": [ + "ES2020", + "DOM", + "DOM.Iterable" + ], + "module": "ESNext", + "skipLibCheck": true, + /* Bundler mode */ + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + "strictNullChecks": true, + "jsx": "react-jsx", + "baseUrl": "./", + /* Linting */ + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true, + "paths": { + "@/*": [ + "./src/*" + ], + "@app/*": [ + "./src/app/*" + ], + "@assets/*": [ + "./src/shared/assets/*" + ], + "@entities/*": [ + "./src/entities/*" + ], + "@features/*": [ + "./src/features/*" + ], + "@pages/*": [ + "./src/pages/*" + ], + "@widgets/*": [ + "./src/widgets/*" + ], + "@shared/*": [ + "./src/shared/*" + ] + } + }, + "include": [ + "src" + ], + "references": [ + { + "path": "./tsconfig.node.json" + } + ] +} diff --git a/console/ui/tsconfig.node.json b/console/ui/tsconfig.node.json new file mode 100644 index 000000000..97ede7ee6 --- /dev/null +++ b/console/ui/tsconfig.node.json @@ -0,0 +1,11 @@ +{ + "compilerOptions": { + "composite": true, + "skipLibCheck": true, + "module": "ESNext", + "moduleResolution": "bundler", + "allowSyntheticDefaultImports": true, + "strict": true + }, + "include": ["vite.config.ts"] +} diff --git a/console/ui/vite.config.mts b/console/ui/vite.config.mts new file mode 100644 index 000000000..c1c2bebd5 --- /dev/null +++ b/console/ui/vite.config.mts @@ -0,0 +1,28 @@ +import {defineConfig} from 'vite'; +import react from '@vitejs/plugin-react-swc'; +import svgr from 'vite-plugin-svgr'; +import {resolve} from 'path'; +import fixReactVirtualized from 'esbuild-plugin-react-virtualized' + +// https://vitejs.dev/config/ +export default defineConfig({ + plugins: [svgr(), react()], + optimizeDeps: { + exclude: ['js-big-decimal'], + esbuildOptions: { + plugins: [fixReactVirtualized], + }, + }, + resolve: { + alias: { + '@': resolve(__dirname, './src'), + '@app': resolve(__dirname, './src/app'), + '@assets': resolve(__dirname, './src/shared/assets'), + '@entities': resolve(__dirname, './src/entities'), + '@features': resolve(__dirname, './src/features'), + '@pages': resolve(__dirname, './src/pages'), + '@widgets': resolve(__dirname, './src/widgets'), + '@shared': resolve(__dirname, './src/shared'), + }, + }, +}); diff --git a/console/ui/yarn.lock b/console/ui/yarn.lock new file mode 100644 index 000000000..c8741cc42 --- /dev/null +++ b/console/ui/yarn.lock @@ -0,0 +1,6296 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@aashutoshrathi/word-wrap@^1.2.3": + version "1.2.6" + resolved "https://registry.yarnpkg.com/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz#bd9154aec9983f77b3a034ecaa015c2e4201f6cf" + integrity sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA== + +"@adobe/css-tools@^4.4.0": + version "4.4.1" + resolved "https://registry.yarnpkg.com/@adobe/css-tools/-/css-tools-4.4.1.tgz#2447a230bfe072c1659e6815129c03cf170710e3" + integrity sha512-12WGKBQzjUAI4ayyF4IAtfw2QR/IDoqk6jTddXDhtYTJF9ASmoE1zst7cVtP0aL/F1jUJL5r+JxKXKEgHNbEUQ== + +"@ampproject/remapping@^2.2.0": + version "2.3.0" + resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.3.0.tgz#ed441b6fa600072520ce18b43d2c8cc8caecc7f4" + integrity sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw== + dependencies: + "@jridgewell/gen-mapping" "^0.3.5" + "@jridgewell/trace-mapping" "^0.3.24" + +"@apidevtools/json-schema-ref-parser@9.0.6": + version "9.0.6" + resolved "https://registry.yarnpkg.com/@apidevtools/json-schema-ref-parser/-/json-schema-ref-parser-9.0.6.tgz#5d9000a3ac1fd25404da886da6b266adcd99cf1c" + integrity sha512-M3YgsLjI0lZxvrpeGVk9Ap032W6TPQkH6pRAZz81Ac3WUNF79VQooAFnp8umjvVzUmD93NkogxEwbSce7qMsUg== + dependencies: + "@jsdevtools/ono" "^7.1.3" + call-me-maybe "^1.0.1" + js-yaml "^3.13.1" + +"@apidevtools/openapi-schemas@^2.1.0": + version "2.1.0" + resolved "https://registry.yarnpkg.com/@apidevtools/openapi-schemas/-/openapi-schemas-2.1.0.tgz#9fa08017fb59d80538812f03fc7cac5992caaa17" + integrity sha512-Zc1AlqrJlX3SlpupFGpiLi2EbteyP7fXmUOGup6/DnkRgjP9bgMM/ag+n91rsv0U1Gpz0H3VILA/o3bW7Ua6BQ== + +"@apidevtools/swagger-methods@^3.0.2": + version "3.0.2" + resolved "https://registry.yarnpkg.com/@apidevtools/swagger-methods/-/swagger-methods-3.0.2.tgz#b789a362e055b0340d04712eafe7027ddc1ac267" + integrity sha512-QAkD5kK2b1WfjDS/UQn/qQkbwF31uqRjPTrsCs5ZG9BQGAkjwvqGFjjPqAuzac/IYzpPtRzjCP1WrTuAIjMrXg== + +"@apidevtools/swagger-parser@^10.0.2", "@apidevtools/swagger-parser@^10.1.0": + version "10.1.0" + resolved "https://registry.yarnpkg.com/@apidevtools/swagger-parser/-/swagger-parser-10.1.0.tgz#a987d71e5be61feb623203be0c96e5985b192ab6" + integrity sha512-9Kt7EuS/7WbMAUv2gSziqjvxwDbFSg3Xeyfuj5laUODX8o/k/CpsAKiQ8W7/R88eXFTMbJYg6+7uAmOWNKmwnw== + dependencies: + "@apidevtools/json-schema-ref-parser" "9.0.6" + "@apidevtools/openapi-schemas" "^2.1.0" + "@apidevtools/swagger-methods" "^3.0.2" + "@jsdevtools/ono" "^7.1.3" + ajv "^8.6.3" + ajv-draft-04 "^1.0.0" + call-me-maybe "^1.0.1" + +"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.10.4", "@babel/code-frame@^7.23.5", "@babel/code-frame@^7.24.2": + version "7.24.2" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.24.2.tgz#718b4b19841809a58b29b68cde80bc5e1aa6d9ae" + integrity sha512-y5+tLQyV8pg3fsiln67BVLD1P13Eg4lh5RW9mF0zUuvLrv9uIQ4MCL+CRT+FTsBlBjcIan6PGsLcBN0m3ClUyQ== + dependencies: + "@babel/highlight" "^7.24.2" + picocolors "^1.0.0" + +"@babel/code-frame@^7.25.9", "@babel/code-frame@^7.26.0", "@babel/code-frame@^7.26.2": + version "7.26.2" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.26.2.tgz#4b5fab97d33338eff916235055f0ebc21e573a85" + integrity sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ== + dependencies: + "@babel/helper-validator-identifier" "^7.25.9" + js-tokens "^4.0.0" + picocolors "^1.0.0" + +"@babel/compat-data@^7.23.5": + version "7.24.4" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.24.4.tgz#6f102372e9094f25d908ca0d34fc74c74606059a" + integrity sha512-vg8Gih2MLK+kOkHJp4gBEIkyaIi00jgWot2D9QOmmfLC8jINSOzmCLta6Bvz/JSBCqnegV0L80jhxkol5GWNfQ== + +"@babel/compat-data@^7.25.9": + version "7.26.3" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.26.3.tgz#99488264a56b2aded63983abd6a417f03b92ed02" + integrity sha512-nHIxvKPniQXpmQLb0vhY3VaFb3S0YrTAwpOWJZh1wn3oJPjJk9Asva204PsBdmAE8vpzfHudT8DB0scYvy9q0g== + +"@babel/core@^7.21.3": + version "7.24.5" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.24.5.tgz#15ab5b98e101972d171aeef92ac70d8d6718f06a" + integrity sha512-tVQRucExLQ02Boi4vdPp49svNGcfL2GhdTCT9aldhXgCJVAI21EtRfBettiuLUwce/7r6bFdgs6JFkcdTiFttA== + dependencies: + "@ampproject/remapping" "^2.2.0" + "@babel/code-frame" "^7.24.2" + "@babel/generator" "^7.24.5" + "@babel/helper-compilation-targets" "^7.23.6" + "@babel/helper-module-transforms" "^7.24.5" + "@babel/helpers" "^7.24.5" + "@babel/parser" "^7.24.5" + "@babel/template" "^7.24.0" + "@babel/traverse" "^7.24.5" + "@babel/types" "^7.24.5" + convert-source-map "^2.0.0" + debug "^4.1.0" + gensync "^1.0.0-beta.2" + json5 "^2.2.3" + semver "^6.3.1" + +"@babel/core@^7.26.0": + version "7.26.0" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.26.0.tgz#d78b6023cc8f3114ccf049eb219613f74a747b40" + integrity sha512-i1SLeK+DzNnQ3LL/CswPCa/E5u4lh1k6IAEphON8F+cXt0t9euTshDru0q7/IqMa1PMPz5RnHuHscF8/ZJsStg== + dependencies: + "@ampproject/remapping" "^2.2.0" + "@babel/code-frame" "^7.26.0" + "@babel/generator" "^7.26.0" + "@babel/helper-compilation-targets" "^7.25.9" + "@babel/helper-module-transforms" "^7.26.0" + "@babel/helpers" "^7.26.0" + "@babel/parser" "^7.26.0" + "@babel/template" "^7.25.9" + "@babel/traverse" "^7.25.9" + "@babel/types" "^7.26.0" + convert-source-map "^2.0.0" + debug "^4.1.0" + gensync "^1.0.0-beta.2" + json5 "^2.2.3" + semver "^6.3.1" + +"@babel/generator@^7.24.5": + version "7.24.5" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.24.5.tgz#e5afc068f932f05616b66713e28d0f04e99daeb3" + integrity sha512-x32i4hEXvr+iI0NEoEfDKzlemF8AmtOP8CcrRaEcpzysWuoEb1KknpcvMsHKPONoKZiDuItklgWhB18xEhr9PA== + dependencies: + "@babel/types" "^7.24.5" + "@jridgewell/gen-mapping" "^0.3.5" + "@jridgewell/trace-mapping" "^0.3.25" + jsesc "^2.5.1" + +"@babel/generator@^7.26.0", "@babel/generator@^7.26.3": + version "7.26.3" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.26.3.tgz#ab8d4360544a425c90c248df7059881f4b2ce019" + integrity sha512-6FF/urZvD0sTeO7k6/B15pMLC4CHUv1426lzr3N01aHJTl046uCAh9LXW/fzeXXjPNCJ6iABW5XaWOsIZB93aQ== + dependencies: + "@babel/parser" "^7.26.3" + "@babel/types" "^7.26.3" + "@jridgewell/gen-mapping" "^0.3.5" + "@jridgewell/trace-mapping" "^0.3.25" + jsesc "^3.0.2" + +"@babel/helper-compilation-targets@^7.23.6": + version "7.23.6" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.23.6.tgz#4d79069b16cbcf1461289eccfbbd81501ae39991" + integrity sha512-9JB548GZoQVmzrFgp8o7KxdgkTGm6xs9DW0o/Pim72UDjzr5ObUQ6ZzYPqA+g9OTS2bBQoctLJrky0RDCAWRgQ== + dependencies: + "@babel/compat-data" "^7.23.5" + "@babel/helper-validator-option" "^7.23.5" + browserslist "^4.22.2" + lru-cache "^5.1.1" + semver "^6.3.1" + +"@babel/helper-compilation-targets@^7.25.9": + version "7.25.9" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.25.9.tgz#55af025ce365be3cdc0c1c1e56c6af617ce88875" + integrity sha512-j9Db8Suy6yV/VHa4qzrj9yZfZxhLWQdVnRlXxmKLYlhWUVB1sB2G5sxuWYXk/whHD9iW76PmNzxZ4UCnTQTVEQ== + dependencies: + "@babel/compat-data" "^7.25.9" + "@babel/helper-validator-option" "^7.25.9" + browserslist "^4.24.0" + lru-cache "^5.1.1" + semver "^6.3.1" + +"@babel/helper-environment-visitor@^7.22.20": + version "7.22.20" + resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz#96159db61d34a29dba454c959f5ae4a649ba9167" + integrity sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA== + +"@babel/helper-function-name@^7.23.0": + version "7.23.0" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz#1f9a3cdbd5b2698a670c30d2735f9af95ed52759" + integrity sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw== + dependencies: + "@babel/template" "^7.22.15" + "@babel/types" "^7.23.0" + +"@babel/helper-hoist-variables@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz#c01a007dac05c085914e8fb652b339db50d823bb" + integrity sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw== + dependencies: + "@babel/types" "^7.22.5" + +"@babel/helper-module-imports@^7.16.7", "@babel/helper-module-imports@^7.24.3": + version "7.24.3" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.24.3.tgz#6ac476e6d168c7c23ff3ba3cf4f7841d46ac8128" + integrity sha512-viKb0F9f2s0BCS22QSF308z/+1YWKV/76mwt61NBzS5izMzDPwdq1pTrzf+Li3npBWX9KdQbkeCt1jSAM7lZqg== + dependencies: + "@babel/types" "^7.24.0" + +"@babel/helper-module-imports@^7.25.9": + version "7.25.9" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz#e7f8d20602ebdbf9ebbea0a0751fb0f2a4141715" + integrity sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw== + dependencies: + "@babel/traverse" "^7.25.9" + "@babel/types" "^7.25.9" + +"@babel/helper-module-transforms@^7.24.5": + version "7.24.5" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.24.5.tgz#ea6c5e33f7b262a0ae762fd5986355c45f54a545" + integrity sha512-9GxeY8c2d2mdQUP1Dye0ks3VDyIMS98kt/llQ2nUId8IsWqTF0l1LkSX0/uP7l7MCDrzXS009Hyhe2gzTiGW8A== + dependencies: + "@babel/helper-environment-visitor" "^7.22.20" + "@babel/helper-module-imports" "^7.24.3" + "@babel/helper-simple-access" "^7.24.5" + "@babel/helper-split-export-declaration" "^7.24.5" + "@babel/helper-validator-identifier" "^7.24.5" + +"@babel/helper-module-transforms@^7.26.0": + version "7.26.0" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.26.0.tgz#8ce54ec9d592695e58d84cd884b7b5c6a2fdeeae" + integrity sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw== + dependencies: + "@babel/helper-module-imports" "^7.25.9" + "@babel/helper-validator-identifier" "^7.25.9" + "@babel/traverse" "^7.25.9" + +"@babel/helper-plugin-utils@^7.25.9": + version "7.25.9" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.25.9.tgz#9cbdd63a9443a2c92a725cca7ebca12cc8dd9f46" + integrity sha512-kSMlyUVdWe25rEsRGviIgOWnoT/nfABVWlqt9N19/dIPWViAOW2s9wznP5tURbs/IDuNk4gPy3YdYRgH3uxhBw== + +"@babel/helper-simple-access@^7.24.5": + version "7.24.5" + resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.24.5.tgz#50da5b72f58c16b07fbd992810be6049478e85ba" + integrity sha512-uH3Hmf5q5n7n8mz7arjUlDOCbttY/DW4DYhE6FUsjKJ/oYC1kQQUvwEQWxRwUpX9qQKRXeqLwWxrqilMrf32sQ== + dependencies: + "@babel/types" "^7.24.5" + +"@babel/helper-split-export-declaration@^7.24.5": + version "7.24.5" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.24.5.tgz#b9a67f06a46b0b339323617c8c6213b9055a78b6" + integrity sha512-5CHncttXohrHk8GWOFCcCl4oRD9fKosWlIRgWm4ql9VYioKm52Mk2xsmoohvm7f3JoiLSM5ZgJuRaf5QZZYd3Q== + dependencies: + "@babel/types" "^7.24.5" + +"@babel/helper-string-parser@^7.23.4", "@babel/helper-string-parser@^7.24.1": + version "7.24.1" + resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.24.1.tgz#f99c36d3593db9540705d0739a1f10b5e20c696e" + integrity sha512-2ofRCjnnA9y+wk8b9IAREroeUP02KHp431N2mhKniy2yKIDKpbrHv9eXwm8cBeWQYcJmzv5qKCu65P47eCF7CQ== + +"@babel/helper-string-parser@^7.25.9": + version "7.25.9" + resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz#1aabb72ee72ed35789b4bbcad3ca2862ce614e8c" + integrity sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA== + +"@babel/helper-validator-identifier@^7.22.20": + version "7.22.20" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz#c4ae002c61d2879e724581d96665583dbc1dc0e0" + integrity sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A== + +"@babel/helper-validator-identifier@^7.24.5": + version "7.24.5" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.5.tgz#918b1a7fa23056603506370089bd990d8720db62" + integrity sha512-3q93SSKX2TWCG30M2G2kwaKeTYgEUp5Snjuj8qm729SObL6nbtUldAi37qbxkD5gg3xnBio+f9nqpSepGZMvxA== + +"@babel/helper-validator-identifier@^7.25.9": + version "7.25.9" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz#24b64e2c3ec7cd3b3c547729b8d16871f22cbdc7" + integrity sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ== + +"@babel/helper-validator-option@^7.23.5": + version "7.23.5" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.23.5.tgz#907a3fbd4523426285365d1206c423c4c5520307" + integrity sha512-85ttAOMLsr53VgXkTbkx8oA6YTfT4q7/HzXSLEYmjcSTJPMPQtvq1BD79Byep5xMUYbGRzEpDsjUf3dyp54IKw== + +"@babel/helper-validator-option@^7.25.9": + version "7.25.9" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.25.9.tgz#86e45bd8a49ab7e03f276577f96179653d41da72" + integrity sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw== + +"@babel/helpers@^7.24.5": + version "7.24.5" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.24.5.tgz#fedeb87eeafa62b621160402181ad8585a22a40a" + integrity sha512-CiQmBMMpMQHwM5m01YnrM6imUG1ebgYJ+fAIW4FZe6m4qHTPaRHti+R8cggAwkdz4oXhtO4/K9JWlh+8hIfR2Q== + dependencies: + "@babel/template" "^7.24.0" + "@babel/traverse" "^7.24.5" + "@babel/types" "^7.24.5" + +"@babel/helpers@^7.26.0": + version "7.26.0" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.26.0.tgz#30e621f1eba5aa45fe6f4868d2e9154d884119a4" + integrity sha512-tbhNuIxNcVb21pInl3ZSjksLCvgdZy9KwJ8brv993QtIVKJBBkYXz4q4ZbAv31GdnC+R90np23L5FbEBlthAEw== + dependencies: + "@babel/template" "^7.25.9" + "@babel/types" "^7.26.0" + +"@babel/highlight@^7.24.2": + version "7.24.2" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.24.2.tgz#3f539503efc83d3c59080a10e6634306e0370d26" + integrity sha512-Yac1ao4flkTxTteCDZLEvdxg2fZfz1v8M4QpaGypq/WPDqg3ijHYbDfs+LG5hvzSoqaSZ9/Z9lKSP3CjZjv+pA== + dependencies: + "@babel/helper-validator-identifier" "^7.22.20" + chalk "^2.4.2" + js-tokens "^4.0.0" + picocolors "^1.0.0" + +"@babel/parser@^7.1.0", "@babel/parser@^7.20.7", "@babel/parser@^7.24.0": + version "7.24.4" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.24.4.tgz#234487a110d89ad5a3ed4a8a566c36b9453e8c88" + integrity sha512-zTvEBcghmeBma9QIGunWevvBAp4/Qu9Bdq+2k0Ot4fVMD6v3dsC9WOcRSKk7tRRyBM/53yKMJko9xOatGQAwSg== + +"@babel/parser@^7.24.5": + version "7.24.5" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.24.5.tgz#4a4d5ab4315579e5398a82dcf636ca80c3392790" + integrity sha512-EOv5IK8arwh3LI47dz1b0tKUb/1uhHAnHJOrjgtQMIpu1uXd9mlFrJg9IUgGUgZ41Ch0K8REPTYpO7B76b4vJg== + +"@babel/parser@^7.25.9", "@babel/parser@^7.26.0", "@babel/parser@^7.26.3": + version "7.26.3" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.26.3.tgz#8c51c5db6ddf08134af1ddbacf16aaab48bac234" + integrity sha512-WJ/CvmY8Mea8iDXo6a7RK2wbmJITT5fN3BEkRuFlxVyNx8jOKIIhmC4fSkTcPcf8JyavbBwIe6OpiCOBXt/IcA== + dependencies: + "@babel/types" "^7.26.3" + +"@babel/plugin-transform-react-jsx-self@^7.25.9": + version "7.25.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.25.9.tgz#c0b6cae9c1b73967f7f9eb2fca9536ba2fad2858" + integrity sha512-y8quW6p0WHkEhmErnfe58r7x0A70uKphQm8Sp8cV7tjNQwK56sNVK0M73LK3WuYmsuyrftut4xAkjjgU0twaMg== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-react-jsx-source@^7.25.9": + version "7.25.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.25.9.tgz#4c6b8daa520b5f155b5fb55547d7c9fa91417503" + integrity sha512-+iqjT8xmXhhYv4/uiYd8FNQsraMFZIfxVSqxxVSZP0WbbSAWvBXAul0m/zu+7Vv4O/3WtApy9pmaTMiumEZgfg== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/runtime@^7.12.5", "@babel/runtime@^7.18.3", "@babel/runtime@^7.23.2", "@babel/runtime@^7.23.9", "@babel/runtime@^7.5.5", "@babel/runtime@^7.8.7": + version "7.24.5" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.24.5.tgz#230946857c053a36ccc66e1dd03b17dd0c4ed02c" + integrity sha512-Nms86NXrsaeU9vbBJKni6gXiEXZ4CVpYVzEjDH9Sb8vmZ3UljyA1GSOJl/6LGPO8EHLuSF9H+IxNXHPX8QHJ4g== + dependencies: + regenerator-runtime "^0.14.0" + +"@babel/runtime@^7.25.7", "@babel/runtime@^7.26.0": + version "7.26.0" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.26.0.tgz#8600c2f595f277c60815256418b85356a65173c1" + integrity sha512-FDSOghenHTiToteC/QRlv2q3DhPZ/oOXTBoirfWNx1Cx3TMVcGWQtMMmQcSvb/JjpNeGzx8Pq/b4fKEJuWm1sw== + dependencies: + regenerator-runtime "^0.14.0" + +"@babel/runtime@^7.7.2": + version "7.24.8" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.24.8.tgz#5d958c3827b13cc6d05e038c07fb2e5e3420d82e" + integrity sha512-5F7SDGs1T72ZczbRwbGO9lQi0NLjQxzl6i4lJxLxfW9U5UluCSyEJeniWvnhl3/euNiqQVbo8zruhsDfid0esA== + dependencies: + regenerator-runtime "^0.14.0" + +"@babel/template@^7.22.15", "@babel/template@^7.24.0": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.24.0.tgz#c6a524aa93a4a05d66aaf31654258fae69d87d50" + integrity sha512-Bkf2q8lMB0AFpX0NFEqSbx1OkTHf0f+0j82mkw+ZpzBnkk7e9Ql0891vlfgi+kHwOk8tQjiQHpqh4LaSa0fKEA== + dependencies: + "@babel/code-frame" "^7.23.5" + "@babel/parser" "^7.24.0" + "@babel/types" "^7.24.0" + +"@babel/template@^7.25.9": + version "7.25.9" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.25.9.tgz#ecb62d81a8a6f5dc5fe8abfc3901fc52ddf15016" + integrity sha512-9DGttpmPvIxBb/2uwpVo3dqJ+O6RooAFOS+lB+xDqoE2PVCE8nfoHMdZLpfCQRLwvohzXISPZcgxt80xLfsuwg== + dependencies: + "@babel/code-frame" "^7.25.9" + "@babel/parser" "^7.25.9" + "@babel/types" "^7.25.9" + +"@babel/traverse@^7.24.5": + version "7.24.5" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.24.5.tgz#972aa0bc45f16983bf64aa1f877b2dd0eea7e6f8" + integrity sha512-7aaBLeDQ4zYcUFDUD41lJc1fG8+5IU9DaNSJAgal866FGvmD5EbWQgnEC6kO1gGLsX0esNkfnJSndbTXA3r7UA== + dependencies: + "@babel/code-frame" "^7.24.2" + "@babel/generator" "^7.24.5" + "@babel/helper-environment-visitor" "^7.22.20" + "@babel/helper-function-name" "^7.23.0" + "@babel/helper-hoist-variables" "^7.22.5" + "@babel/helper-split-export-declaration" "^7.24.5" + "@babel/parser" "^7.24.5" + "@babel/types" "^7.24.5" + debug "^4.3.1" + globals "^11.1.0" + +"@babel/traverse@^7.25.9": + version "7.26.4" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.26.4.tgz#ac3a2a84b908dde6d463c3bfa2c5fdc1653574bd" + integrity sha512-fH+b7Y4p3yqvApJALCPJcwb0/XaOSgtK4pzV6WVjPR5GLFQBRI7pfoX2V2iM48NXvX07NUxxm1Vw98YjqTcU5w== + dependencies: + "@babel/code-frame" "^7.26.2" + "@babel/generator" "^7.26.3" + "@babel/parser" "^7.26.3" + "@babel/template" "^7.25.9" + "@babel/types" "^7.26.3" + debug "^4.3.1" + globals "^11.1.0" + +"@babel/types@^7.0.0", "@babel/types@^7.20.7", "@babel/types@^7.22.5", "@babel/types@^7.23.0", "@babel/types@^7.24.0": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.24.0.tgz#3b951f435a92e7333eba05b7566fd297960ea1bf" + integrity sha512-+j7a5c253RfKh8iABBhywc8NSfP5LURe7Uh4qpsh6jc+aLJguvmIUBdjSdEMQv2bENrCR5MfRdjGo7vzS/ob7w== + dependencies: + "@babel/helper-string-parser" "^7.23.4" + "@babel/helper-validator-identifier" "^7.22.20" + to-fast-properties "^2.0.0" + +"@babel/types@^7.21.3", "@babel/types@^7.24.5": + version "7.24.5" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.24.5.tgz#7661930afc638a5383eb0c4aee59b74f38db84d7" + integrity sha512-6mQNsaLeXTw0nxYUYu+NSa4Hx4BlF1x1x8/PMFbiR+GBSr+2DkECc69b8hgy2frEodNcvPffeH8YfWd3LI6jhQ== + dependencies: + "@babel/helper-string-parser" "^7.24.1" + "@babel/helper-validator-identifier" "^7.24.5" + to-fast-properties "^2.0.0" + +"@babel/types@^7.25.9", "@babel/types@^7.26.0", "@babel/types@^7.26.3": + version "7.26.3" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.26.3.tgz#37e79830f04c2b5687acc77db97fbc75fb81f3c0" + integrity sha512-vN5p+1kl59GVKMvTHt55NzzmYVxprfJD+ql7U9NFIfKCBkYE55LYtS+WtPlaYOyzydrKI8Nezd+aZextrd+FMA== + dependencies: + "@babel/helper-string-parser" "^7.25.9" + "@babel/helper-validator-identifier" "^7.25.9" + +"@emotion/babel-plugin@^11.13.5": + version "11.13.5" + resolved "https://registry.yarnpkg.com/@emotion/babel-plugin/-/babel-plugin-11.13.5.tgz#eab8d65dbded74e0ecfd28dc218e75607c4e7bc0" + integrity sha512-pxHCpT2ex+0q+HH91/zsdHkw/lXd468DIN2zvfvLtPKLLMo6gQj7oLObq8PhkrxOZb/gGCq03S3Z7PDhS8pduQ== + dependencies: + "@babel/helper-module-imports" "^7.16.7" + "@babel/runtime" "^7.18.3" + "@emotion/hash" "^0.9.2" + "@emotion/memoize" "^0.9.0" + "@emotion/serialize" "^1.3.3" + babel-plugin-macros "^3.1.0" + convert-source-map "^1.5.0" + escape-string-regexp "^4.0.0" + find-root "^1.1.0" + source-map "^0.5.7" + stylis "4.2.0" + +"@emotion/cache@^11.13.5", "@emotion/cache@^11.14.0": + version "11.14.0" + resolved "https://registry.yarnpkg.com/@emotion/cache/-/cache-11.14.0.tgz#ee44b26986eeb93c8be82bb92f1f7a9b21b2ed76" + integrity sha512-L/B1lc/TViYk4DcpGxtAVbx0ZyiKM5ktoIyafGkH6zg/tj+mA+NE//aPYKG0k8kCHSHVJrpLpcAlOBEXQ3SavA== + dependencies: + "@emotion/memoize" "^0.9.0" + "@emotion/sheet" "^1.4.0" + "@emotion/utils" "^1.4.2" + "@emotion/weak-memoize" "^0.4.0" + stylis "4.2.0" + +"@emotion/hash@^0.9.2": + version "0.9.2" + resolved "https://registry.yarnpkg.com/@emotion/hash/-/hash-0.9.2.tgz#ff9221b9f58b4dfe61e619a7788734bd63f6898b" + integrity sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g== + +"@emotion/is-prop-valid@^1.3.0": + version "1.3.1" + resolved "https://registry.yarnpkg.com/@emotion/is-prop-valid/-/is-prop-valid-1.3.1.tgz#8d5cf1132f836d7adbe42cf0b49df7816fc88240" + integrity sha512-/ACwoqx7XQi9knQs/G0qKvv5teDMhD7bXYns9N/wM8ah8iNb8jZ2uNO0YOgiq2o2poIvVtJS2YALasQuMSQ7Kw== + dependencies: + "@emotion/memoize" "^0.9.0" + +"@emotion/memoize@^0.9.0": + version "0.9.0" + resolved "https://registry.yarnpkg.com/@emotion/memoize/-/memoize-0.9.0.tgz#745969d649977776b43fc7648c556aaa462b4102" + integrity sha512-30FAj7/EoJ5mwVPOWhAyCX+FPfMDrVecJAM+Iw9NRoSl4BBAQeqj4cApHHUXOVvIPgLVDsCFoz/hGD+5QQD1GQ== + +"@emotion/react@^11.14.0": + version "11.14.0" + resolved "https://registry.yarnpkg.com/@emotion/react/-/react-11.14.0.tgz#cfaae35ebc67dd9ef4ea2e9acc6cd29e157dd05d" + integrity sha512-O000MLDBDdk/EohJPFUqvnp4qnHeYkVP5B0xEG0D/L7cOKP9kefu2DXn8dj74cQfsEzUqh+sr1RzFqiL1o+PpA== + dependencies: + "@babel/runtime" "^7.18.3" + "@emotion/babel-plugin" "^11.13.5" + "@emotion/cache" "^11.14.0" + "@emotion/serialize" "^1.3.3" + "@emotion/use-insertion-effect-with-fallbacks" "^1.2.0" + "@emotion/utils" "^1.4.2" + "@emotion/weak-memoize" "^0.4.0" + hoist-non-react-statics "^3.3.1" + +"@emotion/serialize@^1.3.3": + version "1.3.3" + resolved "https://registry.yarnpkg.com/@emotion/serialize/-/serialize-1.3.3.tgz#d291531005f17d704d0463a032fe679f376509e8" + integrity sha512-EISGqt7sSNWHGI76hC7x1CksiXPahbxEOrC5RjmFRJTqLyEK9/9hZvBbiYn70dw4wuwMKiEMCUlR6ZXTSWQqxA== + dependencies: + "@emotion/hash" "^0.9.2" + "@emotion/memoize" "^0.9.0" + "@emotion/unitless" "^0.10.0" + "@emotion/utils" "^1.4.2" + csstype "^3.0.2" + +"@emotion/sheet@^1.4.0": + version "1.4.0" + resolved "https://registry.yarnpkg.com/@emotion/sheet/-/sheet-1.4.0.tgz#c9299c34d248bc26e82563735f78953d2efca83c" + integrity sha512-fTBW9/8r2w3dXWYM4HCB1Rdp8NLibOw2+XELH5m5+AkWiL/KqYX6dc0kKYlaYyKjrQ6ds33MCdMPEwgs2z1rqg== + +"@emotion/styled@^11.14.0": + version "11.14.0" + resolved "https://registry.yarnpkg.com/@emotion/styled/-/styled-11.14.0.tgz#f47ca7219b1a295186d7661583376fcea95f0ff3" + integrity sha512-XxfOnXFffatap2IyCeJyNov3kiDQWoR08gPUQxvbL7fxKryGBKUZUkG6Hz48DZwVrJSVh9sJboyV1Ds4OW6SgA== + dependencies: + "@babel/runtime" "^7.18.3" + "@emotion/babel-plugin" "^11.13.5" + "@emotion/is-prop-valid" "^1.3.0" + "@emotion/serialize" "^1.3.3" + "@emotion/use-insertion-effect-with-fallbacks" "^1.2.0" + "@emotion/utils" "^1.4.2" + +"@emotion/unitless@^0.10.0": + version "0.10.0" + resolved "https://registry.yarnpkg.com/@emotion/unitless/-/unitless-0.10.0.tgz#2af2f7c7e5150f497bdabd848ce7b218a27cf745" + integrity sha512-dFoMUuQA20zvtVTuxZww6OHoJYgrzfKM1t52mVySDJnMSEa08ruEvdYQbhvyu6soU+NeLVd3yKfTfT0NeV6qGg== + +"@emotion/use-insertion-effect-with-fallbacks@^1.2.0": + version "1.2.0" + resolved "https://registry.yarnpkg.com/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.2.0.tgz#8a8cb77b590e09affb960f4ff1e9a89e532738bf" + integrity sha512-yJMtVdH59sxi/aVJBpk9FQq+OR8ll5GT8oWd57UpeaKEVGab41JWaCFA7FRLoMLloOZF/c/wsPoe+bfGmRKgDg== + +"@emotion/utils@^1.4.2": + version "1.4.2" + resolved "https://registry.yarnpkg.com/@emotion/utils/-/utils-1.4.2.tgz#6df6c45881fcb1c412d6688a311a98b7f59c1b52" + integrity sha512-3vLclRofFziIa3J2wDh9jjbkUz9qk5Vi3IZ/FSTKViB0k+ef0fPV7dYrUIugbgupYDx7v9ud/SjrtEP8Y4xLoA== + +"@emotion/weak-memoize@^0.4.0": + version "0.4.0" + resolved "https://registry.yarnpkg.com/@emotion/weak-memoize/-/weak-memoize-0.4.0.tgz#5e13fac887f08c44f76b0ccaf3370eb00fec9bb6" + integrity sha512-snKqtPW01tN0ui7yu9rGv69aJXr/a/Ywvl11sUjNtEcRc+ng/mQriFL0wLXMef74iHa/EkftbDzU9F8iFbH+zg== + +"@esbuild/aix-ppc64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/aix-ppc64/-/aix-ppc64-0.20.2.tgz#a70f4ac11c6a1dfc18b8bbb13284155d933b9537" + integrity sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g== + +"@esbuild/aix-ppc64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz#c7184a326533fcdf1b8ee0733e21c713b975575f" + integrity sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ== + +"@esbuild/android-arm64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.20.2.tgz#db1c9202a5bc92ea04c7b6840f1bbe09ebf9e6b9" + integrity sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg== + +"@esbuild/android-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz#09d9b4357780da9ea3a7dfb833a1f1ff439b4052" + integrity sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A== + +"@esbuild/android-arm@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.20.2.tgz#3b488c49aee9d491c2c8f98a909b785870d6e995" + integrity sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w== + +"@esbuild/android-arm@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.21.5.tgz#9b04384fb771926dfa6d7ad04324ecb2ab9b2e28" + integrity sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg== + +"@esbuild/android-x64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.20.2.tgz#3b1628029e5576249d2b2d766696e50768449f98" + integrity sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg== + +"@esbuild/android-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.21.5.tgz#29918ec2db754cedcb6c1b04de8cd6547af6461e" + integrity sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA== + +"@esbuild/darwin-arm64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.20.2.tgz#6e8517a045ddd86ae30c6608c8475ebc0c4000bb" + integrity sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA== + +"@esbuild/darwin-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz#e495b539660e51690f3928af50a76fb0a6ccff2a" + integrity sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ== + +"@esbuild/darwin-x64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.20.2.tgz#90ed098e1f9dd8a9381695b207e1cff45540a0d0" + integrity sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA== + +"@esbuild/darwin-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz#c13838fa57372839abdddc91d71542ceea2e1e22" + integrity sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw== + +"@esbuild/freebsd-arm64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.20.2.tgz#d71502d1ee89a1130327e890364666c760a2a911" + integrity sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw== + +"@esbuild/freebsd-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz#646b989aa20bf89fd071dd5dbfad69a3542e550e" + integrity sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g== + +"@esbuild/freebsd-x64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.20.2.tgz#aa5ea58d9c1dd9af688b8b6f63ef0d3d60cea53c" + integrity sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw== + +"@esbuild/freebsd-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz#aa615cfc80af954d3458906e38ca22c18cf5c261" + integrity sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ== + +"@esbuild/linux-arm64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.20.2.tgz#055b63725df678379b0f6db9d0fa85463755b2e5" + integrity sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A== + +"@esbuild/linux-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz#70ac6fa14f5cb7e1f7f887bcffb680ad09922b5b" + integrity sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q== + +"@esbuild/linux-arm@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.20.2.tgz#76b3b98cb1f87936fbc37f073efabad49dcd889c" + integrity sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg== + +"@esbuild/linux-arm@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz#fc6fd11a8aca56c1f6f3894f2bea0479f8f626b9" + integrity sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA== + +"@esbuild/linux-ia32@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.20.2.tgz#c0e5e787c285264e5dfc7a79f04b8b4eefdad7fa" + integrity sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig== + +"@esbuild/linux-ia32@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz#3271f53b3f93e3d093d518d1649d6d68d346ede2" + integrity sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg== + +"@esbuild/linux-loong64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.20.2.tgz#a6184e62bd7cdc63e0c0448b83801001653219c5" + integrity sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ== + +"@esbuild/linux-loong64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz#ed62e04238c57026aea831c5a130b73c0f9f26df" + integrity sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg== + +"@esbuild/linux-mips64el@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.20.2.tgz#d08e39ce86f45ef8fc88549d29c62b8acf5649aa" + integrity sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA== + +"@esbuild/linux-mips64el@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz#e79b8eb48bf3b106fadec1ac8240fb97b4e64cbe" + integrity sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg== + +"@esbuild/linux-ppc64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.20.2.tgz#8d252f0b7756ffd6d1cbde5ea67ff8fd20437f20" + integrity sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg== + +"@esbuild/linux-ppc64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz#5f2203860a143b9919d383ef7573521fb154c3e4" + integrity sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w== + +"@esbuild/linux-riscv64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.20.2.tgz#19f6dcdb14409dae607f66ca1181dd4e9db81300" + integrity sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg== + +"@esbuild/linux-riscv64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz#07bcafd99322d5af62f618cb9e6a9b7f4bb825dc" + integrity sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA== + +"@esbuild/linux-s390x@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.20.2.tgz#3c830c90f1a5d7dd1473d5595ea4ebb920988685" + integrity sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ== + +"@esbuild/linux-s390x@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz#b7ccf686751d6a3e44b8627ababc8be3ef62d8de" + integrity sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A== + +"@esbuild/linux-x64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.20.2.tgz#86eca35203afc0d9de0694c64ec0ab0a378f6fff" + integrity sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw== + +"@esbuild/linux-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz#6d8f0c768e070e64309af8004bb94e68ab2bb3b0" + integrity sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ== + +"@esbuild/netbsd-x64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.20.2.tgz#e771c8eb0e0f6e1877ffd4220036b98aed5915e6" + integrity sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ== + +"@esbuild/netbsd-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz#bbe430f60d378ecb88decb219c602667387a6047" + integrity sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg== + +"@esbuild/openbsd-x64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.20.2.tgz#9a795ae4b4e37e674f0f4d716f3e226dd7c39baf" + integrity sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ== + +"@esbuild/openbsd-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz#99d1cf2937279560d2104821f5ccce220cb2af70" + integrity sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow== + +"@esbuild/sunos-x64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.20.2.tgz#7df23b61a497b8ac189def6e25a95673caedb03f" + integrity sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w== + +"@esbuild/sunos-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz#08741512c10d529566baba837b4fe052c8f3487b" + integrity sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg== + +"@esbuild/win32-arm64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.20.2.tgz#f1ae5abf9ca052ae11c1bc806fb4c0f519bacf90" + integrity sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ== + +"@esbuild/win32-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz#675b7385398411240735016144ab2e99a60fc75d" + integrity sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A== + +"@esbuild/win32-ia32@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.20.2.tgz#241fe62c34d8e8461cd708277813e1d0ba55ce23" + integrity sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ== + +"@esbuild/win32-ia32@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz#1bfc3ce98aa6ca9a0969e4d2af72144c59c1193b" + integrity sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA== + +"@esbuild/win32-x64@0.20.2": + version "0.20.2" + resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.20.2.tgz#9c907b21e30a52db959ba4f80bb01a0cc403d5cc" + integrity sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ== + +"@esbuild/win32-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz#acad351d582d157bb145535db2a6ff53dd514b5c" + integrity sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw== + +"@eslint-community/eslint-utils@^4.2.0", "@eslint-community/eslint-utils@^4.4.0": + version "4.4.0" + resolved "https://registry.yarnpkg.com/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz#a23514e8fb9af1269d5f7788aa556798d61c6b59" + integrity sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA== + dependencies: + eslint-visitor-keys "^3.3.0" + +"@eslint-community/regexpp@^4.10.0", "@eslint-community/regexpp@^4.6.1": + version "4.10.0" + resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.10.0.tgz#548f6de556857c8bb73bbee70c35dc82a2e74d63" + integrity sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA== + +"@eslint/eslintrc@^2.1.4": + version "2.1.4" + resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-2.1.4.tgz#388a269f0f25c1b6adc317b5a2c55714894c70ad" + integrity sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ== + dependencies: + ajv "^6.12.4" + debug "^4.3.2" + espree "^9.6.0" + globals "^13.19.0" + ignore "^5.2.0" + import-fresh "^3.2.1" + js-yaml "^4.1.0" + minimatch "^3.1.2" + strip-json-comments "^3.1.1" + +"@eslint/js@8.57.1": + version "8.57.1" + resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.57.1.tgz#de633db3ec2ef6a3c89e2f19038063e8a122e2c2" + integrity sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q== + +"@exodus/schemasafe@^1.0.0-rc.2": + version "1.3.0" + resolved "https://registry.yarnpkg.com/@exodus/schemasafe/-/schemasafe-1.3.0.tgz#731656abe21e8e769a7f70a4d833e6312fe59b7f" + integrity sha512-5Aap/GaRupgNx/feGBwLLTVv8OQFfv3pq2lPRzPg9R+IOBnDgghTGW7l7EuVXOvg5cc/xSAlRW8rBrjIC3Nvqw== + +"@faker-js/faker@^8.4.1": + version "8.4.1" + resolved "https://registry.yarnpkg.com/@faker-js/faker/-/faker-8.4.1.tgz#5d5e8aee8fce48f5e189bf730ebd1f758f491451" + integrity sha512-XQ3cU+Q8Uqmrbf2e0cIC/QN43sTBSC8KF12u29Mb47tWrt2hAgBXSgpZMj4Ao8Uk0iJcU99QsOCaIL8934obCg== + +"@floating-ui/core@^1.0.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@floating-ui/core/-/core-1.6.0.tgz#fa41b87812a16bf123122bf945946bae3fdf7fc1" + integrity sha512-PcF++MykgmTj3CIyOQbKA/hDzOAiqI3mhuoN44WRCopIs1sgoDoU4oty4Jtqaj/y3oDU6fnVSm4QG0a3t5i0+g== + dependencies: + "@floating-ui/utils" "^0.2.1" + +"@floating-ui/dom@^1.0.0": + version "1.6.5" + resolved "https://registry.yarnpkg.com/@floating-ui/dom/-/dom-1.6.5.tgz#323f065c003f1d3ecf0ff16d2c2c4d38979f4cb9" + integrity sha512-Nsdud2X65Dz+1RHjAIP0t8z5e2ff/IRbei6BqFrl1urT8sDVzM1HMQ+R0XcU5ceRfyO3I6ayeqIfh+6Wb8LGTw== + dependencies: + "@floating-ui/core" "^1.0.0" + "@floating-ui/utils" "^0.2.0" + +"@floating-ui/react-dom@^2.0.8": + version "2.0.9" + resolved "https://registry.yarnpkg.com/@floating-ui/react-dom/-/react-dom-2.0.9.tgz#264ba8b061000baa132b5910f0427a6acf7ad7ce" + integrity sha512-q0umO0+LQK4+p6aGyvzASqKbKOJcAHJ7ycE9CuUvfx3s9zTHWmGJTPOIlM/hmSBfUfg/XfY5YhLBLR/LHwShQQ== + dependencies: + "@floating-ui/dom" "^1.0.0" + +"@floating-ui/utils@^0.2.0", "@floating-ui/utils@^0.2.1": + version "0.2.1" + resolved "https://registry.yarnpkg.com/@floating-ui/utils/-/utils-0.2.1.tgz#16308cea045f0fc777b6ff20a9f25474dd8293d2" + integrity sha512-9TANp6GPoMtYzQdt54kfAyMmz1+osLlXdg2ENroU7zzrtflTLrrC/lgrIfaSe+Wu0b89GKccT7vxXA0MoAIO+Q== + +"@fontsource/roboto@^5.2.5": + version "5.2.5" + resolved "https://registry.yarnpkg.com/@fontsource/roboto/-/roboto-5.2.5.tgz#b2d869075277e2cba31694951a2d355a8965d763" + integrity sha512-70r2UZ0raqLn5W+sPeKhqlf8wGvUXFWlofaDlcbt/S3d06+17gXKr3VNqDODB0I1ASme3dGT5OJj9NABt7OTZQ== + +"@hookform/resolvers@^3.10.0": + version "3.10.0" + resolved "https://registry.yarnpkg.com/@hookform/resolvers/-/resolvers-3.10.0.tgz#7bfd18113daca4e57e27e1205b7d5a2d371aa59a" + integrity sha512-79Dv+3mDF7i+2ajj7SkypSKHhl1cbln1OGavqrsF7p6mbUv11xpqpacPsGDCTRvCSjEEIez2ef1NveSVL3b0Ag== + +"@humanwhocodes/config-array@^0.13.0": + version "0.13.0" + resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.13.0.tgz#fb907624df3256d04b9aa2df50d7aa97ec648748" + integrity sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw== + dependencies: + "@humanwhocodes/object-schema" "^2.0.3" + debug "^4.3.1" + minimatch "^3.0.5" + +"@humanwhocodes/module-importer@^1.0.1": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz#af5b2691a22b44be847b0ca81641c5fb6ad0172c" + integrity sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA== + +"@humanwhocodes/object-schema@^2.0.3": + version "2.0.3" + resolved "https://registry.yarnpkg.com/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz#4a2868d75d6d6963e423bcf90b7fd1be343409d3" + integrity sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA== + +"@jest/schemas@^29.6.3": + version "29.6.3" + resolved "https://registry.yarnpkg.com/@jest/schemas/-/schemas-29.6.3.tgz#430b5ce8a4e0044a7e3819663305a7b3091c8e03" + integrity sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA== + dependencies: + "@sinclair/typebox" "^0.27.8" + +"@jridgewell/gen-mapping@^0.3.5": + version "0.3.5" + resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz#dcce6aff74bdf6dad1a95802b69b04a2fcb1fb36" + integrity sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg== + dependencies: + "@jridgewell/set-array" "^1.2.1" + "@jridgewell/sourcemap-codec" "^1.4.10" + "@jridgewell/trace-mapping" "^0.3.24" + +"@jridgewell/resolve-uri@^3.1.0": + version "3.1.2" + resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz#7a0ee601f60f99a20c7c7c5ff0c80388c1189bd6" + integrity sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw== + +"@jridgewell/set-array@^1.2.1": + version "1.2.1" + resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.2.1.tgz#558fb6472ed16a4c850b889530e6b36438c49280" + integrity sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A== + +"@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.14", "@jridgewell/sourcemap-codec@^1.4.15": + version "1.4.15" + resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz#d7c6e6755c78567a951e04ab52ef0fd26de59f32" + integrity sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg== + +"@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.25": + version "0.3.25" + resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz#15f190e98895f3fc23276ee14bc76b675c2e50f0" + integrity sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ== + dependencies: + "@jridgewell/resolve-uri" "^3.1.0" + "@jridgewell/sourcemap-codec" "^1.4.14" + +"@jsdevtools/ono@^7.1.3": + version "7.1.3" + resolved "https://registry.yarnpkg.com/@jsdevtools/ono/-/ono-7.1.3.tgz#9df03bbd7c696a5c58885c34aa06da41c8543796" + integrity sha512-4JQNk+3mVzK3xh2rqd6RB4J46qUR19azEHBneZyTZM+c456qOrbbM/5xcR8huNCCcbVt7+UmizG6GuUvPvKUYg== + +"@mattiasbuelens/web-streams-polyfill@^0.2.0": + version "0.2.1" + resolved "https://registry.yarnpkg.com/@mattiasbuelens/web-streams-polyfill/-/web-streams-polyfill-0.2.1.tgz#d7c4aa94f98084ec0787be084d47167d62ea5f67" + integrity sha512-oKuFCQFa3W7Hj7zKn0+4ypI8JFm4ZKIoncwAC6wd5WwFW2sL7O1hpPoJdSWpynQ4DJ4lQ6MvFoVDmCLilonDFg== + dependencies: + "@types/whatwg-streams" "^0.0.7" + +"@monaco-editor/loader@^1.5.0": + version "1.5.0" + resolved "https://registry.yarnpkg.com/@monaco-editor/loader/-/loader-1.5.0.tgz#dcdbc7fe7e905690fb449bed1c251769f325c55d" + integrity sha512-hKoGSM+7aAc7eRTRjpqAZucPmoNOC4UUbknb/VNoTkEIkCPhqV8LfbsgM1webRM7S/z21eHEx9Fkwx8Z/C/+Xw== + dependencies: + state-local "^1.0.6" + +"@monaco-editor/react@^4.7.0": + version "4.7.0" + resolved "https://registry.yarnpkg.com/@monaco-editor/react/-/react-4.7.0.tgz#35a1ec01bfe729f38bfc025df7b7bac145602a60" + integrity sha512-cyzXQCtO47ydzxpQtCGSQGOC8Gk3ZUeBXFAxD+CWXYFo5OqZyZUonFl0DwUlTyAfRHntBfw2p3w4s9R6oe1eCA== + dependencies: + "@monaco-editor/loader" "^1.5.0" + +"@mui/base@5.0.0-beta.40-1": + version "5.0.0-beta.40-1" + resolved "https://registry.yarnpkg.com/@mui/base/-/base-5.0.0-beta.40-1.tgz#6da6229e5e675e811f319149f6e29d7a77522851" + integrity sha512-agKXuNNy0bHUmeU7pNmoZwNFr7Hiyhojkb9+2PVyDG5+6RafYuyMgbrav8CndsB7KUc/U51JAw9vKNDLYBzaUA== + dependencies: + "@babel/runtime" "^7.23.9" + "@floating-ui/react-dom" "^2.0.8" + "@mui/types" "~7.2.15" + "@mui/utils" "^5.17.1" + "@popperjs/core" "^2.11.8" + clsx "^2.1.0" + prop-types "^15.8.1" + +"@mui/core-downloads-tracker@^5.17.1": + version "5.17.1" + resolved "https://registry.yarnpkg.com/@mui/core-downloads-tracker/-/core-downloads-tracker-5.17.1.tgz#49b88ecb68b800431b5c2f2bfb71372d1f1478fa" + integrity sha512-OcZj+cs6EfUD39IoPBOgN61zf1XFVY+imsGoBDwXeSq2UHJZE3N59zzBOVjclck91Ne3e9gudONOeILvHCIhUA== + +"@mui/icons-material@^5.17.1": + version "5.17.1" + resolved "https://registry.yarnpkg.com/@mui/icons-material/-/icons-material-5.17.1.tgz#2b14832473d4d3738d8194665af359377eb91752" + integrity sha512-CN86LocjkunFGG0yPlO4bgqHkNGgaEOEc3X/jG5Bzm401qYw79/SaLrofA7yAKCCXAGdIGnLoMHohc3+ubs95A== + dependencies: + "@babel/runtime" "^7.23.9" + +"@mui/lab@^5.0.0-alpha.176": + version "5.0.0-alpha.176" + resolved "https://registry.yarnpkg.com/@mui/lab/-/lab-5.0.0-alpha.176.tgz#4e6101c8224d896d66588b08b9b7883408a0ecc3" + integrity sha512-DcZt1BAz4CDMUFGUvKqRh6W0sehmPj5luVHPx4vzSNnXj8xFvOdHwvNZ0bzNXy/Ol+81bkxcHQoIG2VOJuLnbw== + dependencies: + "@babel/runtime" "^7.23.9" + "@mui/base" "5.0.0-beta.40-1" + "@mui/system" "^5.17.1" + "@mui/types" "~7.2.15" + "@mui/utils" "^5.17.1" + clsx "^2.1.0" + prop-types "^15.8.1" + +"@mui/material@^5.17.1": + version "5.17.1" + resolved "https://registry.yarnpkg.com/@mui/material/-/material-5.17.1.tgz#596f542a51fc74db75da2df66565b4874ce4049d" + integrity sha512-2B33kQf+GmPnrvXXweWAx+crbiUEsxCdCN979QDYnlH9ox4pd+0/IBriWLV+l6ORoBF60w39cWjFnJYGFdzXcw== + dependencies: + "@babel/runtime" "^7.23.9" + "@mui/core-downloads-tracker" "^5.17.1" + "@mui/system" "^5.17.1" + "@mui/types" "~7.2.15" + "@mui/utils" "^5.17.1" + "@popperjs/core" "^2.11.8" + "@types/react-transition-group" "^4.4.10" + clsx "^2.1.0" + csstype "^3.1.3" + prop-types "^15.8.1" + react-is "^19.0.0" + react-transition-group "^4.4.5" + +"@mui/private-theming@^5.17.1": + version "5.17.1" + resolved "https://registry.yarnpkg.com/@mui/private-theming/-/private-theming-5.17.1.tgz#b4b6fbece27830754ef78186e3f1307dca42f295" + integrity sha512-XMxU0NTYcKqdsG8LRmSoxERPXwMbp16sIXPcLVgLGII/bVNagX0xaheWAwFv8+zDK7tI3ajllkuD3GZZE++ICQ== + dependencies: + "@babel/runtime" "^7.23.9" + "@mui/utils" "^5.17.1" + prop-types "^15.8.1" + +"@mui/styled-engine@^5.16.14": + version "5.16.14" + resolved "https://registry.yarnpkg.com/@mui/styled-engine/-/styled-engine-5.16.14.tgz#f90fef5b4f8ebf11d48e1b1df8854a45bb31a9f5" + integrity sha512-UAiMPZABZ7p8mUW4akDV6O7N3+4DatStpXMZwPlt+H/dA0lt67qawN021MNND+4QTpjaiMYxbhKZeQcyWCbuKw== + dependencies: + "@babel/runtime" "^7.23.9" + "@emotion/cache" "^11.13.5" + csstype "^3.1.3" + prop-types "^15.8.1" + +"@mui/system@^5.17.1": + version "5.17.1" + resolved "https://registry.yarnpkg.com/@mui/system/-/system-5.17.1.tgz#1f987cce91bf738545a8cf5f99152cd2728e6077" + integrity sha512-aJrmGfQpyF0U4D4xYwA6ueVtQcEMebET43CUmKMP7e7iFh3sMIF3sBR0l8Urb4pqx1CBjHAaWgB0ojpND4Q3Jg== + dependencies: + "@babel/runtime" "^7.23.9" + "@mui/private-theming" "^5.17.1" + "@mui/styled-engine" "^5.16.14" + "@mui/types" "~7.2.15" + "@mui/utils" "^5.17.1" + clsx "^2.1.0" + csstype "^3.1.3" + prop-types "^15.8.1" + +"@mui/types@~7.2.15", "@mui/types@~7.2.24": + version "7.2.24" + resolved "https://registry.yarnpkg.com/@mui/types/-/types-7.2.24.tgz#5eff63129d9c29d80bbf2d2e561bd0690314dec2" + integrity sha512-3c8tRt/CbWZ+pEg7QpSwbdxOk36EfmhbKf6AGZsD1EcLDLTSZoxxJ86FVtcjxvjuhdyBiWKSTGZFaXCnidO2kw== + +"@mui/utils@^5.16.6 || ^6.0.0 || ^7.0.0 || ^7.0.0-beta": + version "6.4.8" + resolved "https://registry.yarnpkg.com/@mui/utils/-/utils-6.4.8.tgz#f80ee0c0ac47f1cd47c2031a5fb87243322b6bf3" + integrity sha512-C86gfiZ5BfZ51KqzqoHi1WuuM2QdSKoFhbkZeAfQRB+jCc4YNhhj11UXFVMMsqBgZ+Zy8IHNJW3M9Wj/LOwRXQ== + dependencies: + "@babel/runtime" "^7.26.0" + "@mui/types" "~7.2.24" + "@types/prop-types" "^15.7.14" + clsx "^2.1.1" + prop-types "^15.8.1" + react-is "^19.0.0" + +"@mui/utils@^5.17.1": + version "5.17.1" + resolved "https://registry.yarnpkg.com/@mui/utils/-/utils-5.17.1.tgz#72ba4ffa79f7bdf69d67458139390f18484b6e6b" + integrity sha512-jEZ8FTqInt2WzxDV8bhImWBqeQRD99c/id/fq83H0ER9tFl+sfZlaAoCdznGvbSQQ9ividMxqSV2c7cC1vBcQg== + dependencies: + "@babel/runtime" "^7.23.9" + "@mui/types" "~7.2.15" + "@types/prop-types" "^15.7.12" + clsx "^2.1.1" + prop-types "^15.8.1" + react-is "^19.0.0" + +"@mui/x-data-grid@^7.28.1": + version "7.28.1" + resolved "https://registry.yarnpkg.com/@mui/x-data-grid/-/x-data-grid-7.28.1.tgz#683eac24dd932b1800637639d5e0e6c1c4477bfc" + integrity sha512-uDJcjRB7zfRoquZb4G8iw0NWbhziVVPsHisi/EIzvOPHP+a1ZUnG0bLEnY+cy6eEwDrO1dNzYpwGFCcjl8ZKfA== + dependencies: + "@babel/runtime" "^7.25.7" + "@mui/utils" "^5.16.6 || ^6.0.0 || ^7.0.0 || ^7.0.0-beta" + "@mui/x-internals" "7.28.0" + clsx "^2.1.1" + prop-types "^15.8.1" + reselect "^5.1.1" + use-sync-external-store "^1.0.0" + +"@mui/x-date-pickers@^7.28.0": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@mui/x-date-pickers/-/x-date-pickers-7.28.0.tgz#1daa089722b7b3b7458ad9af1ef39ae5ec9a9918" + integrity sha512-m1bfkZLOw3cMogeh6q92SjykVmLzfptnz3ZTgAlFKV7UBnVFuGUITvmwbgTZ1Mz3FmLVnGUQYUpZWw0ZnoghNA== + dependencies: + "@babel/runtime" "^7.25.7" + "@mui/utils" "^5.16.6 || ^6.0.0 || ^7.0.0 || ^7.0.0-beta" + "@mui/x-internals" "7.28.0" + "@types/react-transition-group" "^4.4.11" + clsx "^2.1.1" + prop-types "^15.8.1" + react-transition-group "^4.4.5" + +"@mui/x-internals@7.28.0": + version "7.28.0" + resolved "https://registry.yarnpkg.com/@mui/x-internals/-/x-internals-7.28.0.tgz#b0a04f4c0f53f2f91d13a46f357f731b77c832c5" + integrity sha512-p4GEp/09bLDumktdIMiw+OF4p+pJOOjTG0VUvzNxjbHB9GxbBKoMcHrmyrURqoBnQpWIeFnN/QAoLMFSpfwQbw== + dependencies: + "@babel/runtime" "^7.25.7" + "@mui/utils" "^5.16.6 || ^6.0.0 || ^7.0.0 || ^7.0.0-beta" + +"@nodelib/fs.scandir@2.1.5": + version "2.1.5" + resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5" + integrity sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g== + dependencies: + "@nodelib/fs.stat" "2.0.5" + run-parallel "^1.1.9" + +"@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2": + version "2.0.5" + resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz#5bd262af94e9d25bd1e71b05deed44876a222e8b" + integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A== + +"@nodelib/fs.walk@^1.2.3", "@nodelib/fs.walk@^1.2.8": + version "1.2.8" + resolved "https://registry.yarnpkg.com/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz#e95737e8bb6746ddedf69c556953494f196fe69a" + integrity sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg== + dependencies: + "@nodelib/fs.scandir" "2.1.5" + fastq "^1.6.0" + +"@parcel/watcher-android-arm64@2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@parcel/watcher-android-arm64/-/watcher-android-arm64-2.5.0.tgz#e32d3dda6647791ee930556aee206fcd5ea0fb7a" + integrity sha512-qlX4eS28bUcQCdribHkg/herLe+0A9RyYC+mm2PXpncit8z5b3nSqGVzMNR3CmtAOgRutiZ02eIJJgP/b1iEFQ== + +"@parcel/watcher-darwin-arm64@2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@parcel/watcher-darwin-arm64/-/watcher-darwin-arm64-2.5.0.tgz#0d9e680b7e9ec1c8f54944f1b945aa8755afb12f" + integrity sha512-hyZ3TANnzGfLpRA2s/4U1kbw2ZI4qGxaRJbBH2DCSREFfubMswheh8TeiC1sGZ3z2jUf3s37P0BBlrD3sjVTUw== + +"@parcel/watcher-darwin-x64@2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@parcel/watcher-darwin-x64/-/watcher-darwin-x64-2.5.0.tgz#f9f1d5ce9d5878d344f14ef1856b7a830c59d1bb" + integrity sha512-9rhlwd78saKf18fT869/poydQK8YqlU26TMiNg7AIu7eBp9adqbJZqmdFOsbZ5cnLp5XvRo9wcFmNHgHdWaGYA== + +"@parcel/watcher-freebsd-x64@2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@parcel/watcher-freebsd-x64/-/watcher-freebsd-x64-2.5.0.tgz#2b77f0c82d19e84ff4c21de6da7f7d096b1a7e82" + integrity sha512-syvfhZzyM8kErg3VF0xpV8dixJ+RzbUaaGaeb7uDuz0D3FK97/mZ5AJQ3XNnDsXX7KkFNtyQyFrXZzQIcN49Tw== + +"@parcel/watcher-linux-arm-glibc@2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-arm-glibc/-/watcher-linux-arm-glibc-2.5.0.tgz#92ed322c56dbafa3d2545dcf2803334aee131e42" + integrity sha512-0VQY1K35DQET3dVYWpOaPFecqOT9dbuCfzjxoQyif1Wc574t3kOSkKevULddcR9znz1TcklCE7Ht6NIxjvTqLA== + +"@parcel/watcher-linux-arm-musl@2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-arm-musl/-/watcher-linux-arm-musl-2.5.0.tgz#cd48e9bfde0cdbbd2ecd9accfc52967e22f849a4" + integrity sha512-6uHywSIzz8+vi2lAzFeltnYbdHsDm3iIB57d4g5oaB9vKwjb6N6dRIgZMujw4nm5r6v9/BQH0noq6DzHrqr2pA== + +"@parcel/watcher-linux-arm64-glibc@2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-arm64-glibc/-/watcher-linux-arm64-glibc-2.5.0.tgz#7b81f6d5a442bb89fbabaf6c13573e94a46feb03" + integrity sha512-BfNjXwZKxBy4WibDb/LDCriWSKLz+jJRL3cM/DllnHH5QUyoiUNEp3GmL80ZqxeumoADfCCP19+qiYiC8gUBjA== + +"@parcel/watcher-linux-arm64-musl@2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-arm64-musl/-/watcher-linux-arm64-musl-2.5.0.tgz#dcb8ff01077cdf59a18d9e0a4dff7a0cfe5fd732" + integrity sha512-S1qARKOphxfiBEkwLUbHjCY9BWPdWnW9j7f7Hb2jPplu8UZ3nes7zpPOW9bkLbHRvWM0WDTsjdOTUgW0xLBN1Q== + +"@parcel/watcher-linux-x64-glibc@2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-x64-glibc/-/watcher-linux-x64-glibc-2.5.0.tgz#2e254600fda4e32d83942384d1106e1eed84494d" + integrity sha512-d9AOkusyXARkFD66S6zlGXyzx5RvY+chTP9Jp0ypSTC9d4lzyRs9ovGf/80VCxjKddcUvnsGwCHWuF2EoPgWjw== + +"@parcel/watcher-linux-x64-musl@2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@parcel/watcher-linux-x64-musl/-/watcher-linux-x64-musl-2.5.0.tgz#01fcea60fedbb3225af808d3f0a7b11229792eef" + integrity sha512-iqOC+GoTDoFyk/VYSFHwjHhYrk8bljW6zOhPuhi5t9ulqiYq1togGJB5e3PwYVFFfeVgc6pbz3JdQyDoBszVaA== + +"@parcel/watcher-win32-arm64@2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@parcel/watcher-win32-arm64/-/watcher-win32-arm64-2.5.0.tgz#87cdb16e0783e770197e52fb1dc027bb0c847154" + integrity sha512-twtft1d+JRNkM5YbmexfcH/N4znDtjgysFaV9zvZmmJezQsKpkfLYJ+JFV3uygugK6AtIM2oADPkB2AdhBrNig== + +"@parcel/watcher-win32-ia32@2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@parcel/watcher-win32-ia32/-/watcher-win32-ia32-2.5.0.tgz#778c39b56da33e045ba21c678c31a9f9d7c6b220" + integrity sha512-+rgpsNRKwo8A53elqbbHXdOMtY/tAtTzManTWShB5Kk54N8Q9mzNWV7tV+IbGueCbcj826MfWGU3mprWtuf1TA== + +"@parcel/watcher-win32-x64@2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@parcel/watcher-win32-x64/-/watcher-win32-x64-2.5.0.tgz#33873876d0bbc588aacce38e90d1d7480ce81cb7" + integrity sha512-lPrxve92zEHdgeff3aiu4gDOIt4u7sJYha6wbdEZDCDUhtjTsOMiaJzG5lMY4GkWH8p0fMmO2Ppq5G5XXG+DQw== + +"@parcel/watcher@^2.4.1": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@parcel/watcher/-/watcher-2.5.0.tgz#5c88818b12b8de4307a9d3e6dc3e28eba0dfbd10" + integrity sha512-i0GV1yJnm2n3Yq1qw6QrUrd/LI9bE8WEBOTtOkpCXHHdyN3TAGgqAK/DAT05z4fq2x04cARXt2pDmjWjL92iTQ== + dependencies: + detect-libc "^1.0.3" + is-glob "^4.0.3" + micromatch "^4.0.5" + node-addon-api "^7.0.0" + optionalDependencies: + "@parcel/watcher-android-arm64" "2.5.0" + "@parcel/watcher-darwin-arm64" "2.5.0" + "@parcel/watcher-darwin-x64" "2.5.0" + "@parcel/watcher-freebsd-x64" "2.5.0" + "@parcel/watcher-linux-arm-glibc" "2.5.0" + "@parcel/watcher-linux-arm-musl" "2.5.0" + "@parcel/watcher-linux-arm64-glibc" "2.5.0" + "@parcel/watcher-linux-arm64-musl" "2.5.0" + "@parcel/watcher-linux-x64-glibc" "2.5.0" + "@parcel/watcher-linux-x64-musl" "2.5.0" + "@parcel/watcher-win32-arm64" "2.5.0" + "@parcel/watcher-win32-ia32" "2.5.0" + "@parcel/watcher-win32-x64" "2.5.0" + +"@popperjs/core@^2.11.8": + version "2.11.8" + resolved "https://registry.yarnpkg.com/@popperjs/core/-/core-2.11.8.tgz#6b79032e760a0899cd4204710beede972a3a185f" + integrity sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A== + +"@reduxjs/toolkit@^2.6.1": + version "2.6.1" + resolved "https://registry.yarnpkg.com/@reduxjs/toolkit/-/toolkit-2.6.1.tgz#532ef3d3f1656461b421f0ba0a4fa1628163a0c5" + integrity sha512-SSlIqZNYhqm/oMkXbtofwZSt9lrncblzo6YcZ9zoX+zLngRBrCOjK4lNLdkNucJF58RHOWrD9txT3bT3piH7Zw== + dependencies: + immer "^10.0.3" + redux "^5.0.1" + redux-thunk "^3.1.0" + reselect "^5.1.0" + +"@remix-run/router@1.23.0": + version "1.23.0" + resolved "https://registry.yarnpkg.com/@remix-run/router/-/router-1.23.0.tgz#35390d0e7779626c026b11376da6789eb8389242" + integrity sha512-O3rHJzAQKamUz1fvE0Qaw0xSFqsA/yafi2iqeE0pvdFtCO1viYx8QL6f3Ln/aCCTLxs68SLf0KPM9eSeM8yBnA== + +"@rollup/pluginutils@^5.1.3": + version "5.1.4" + resolved "https://registry.yarnpkg.com/@rollup/pluginutils/-/pluginutils-5.1.4.tgz#bb94f1f9eaaac944da237767cdfee6c5b2262d4a" + integrity sha512-USm05zrsFxYLPdWWq+K3STlWiT/3ELn3RcV5hJMghpeAIhxfsUIg6mt12CBJBInWMV4VneoV7SfGv8xIwo2qNQ== + dependencies: + "@types/estree" "^1.0.0" + estree-walker "^2.0.2" + picomatch "^4.0.2" + +"@rollup/rollup-android-arm-eabi@4.16.4": + version "4.16.4" + resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.16.4.tgz#5e8930291f1e5ead7fb1171d53ba5c87718de062" + integrity sha512-GkhjAaQ8oUTOKE4g4gsZ0u8K/IHU1+2WQSgS1TwTcYvL+sjbaQjNHFXbOJ6kgqGHIO1DfUhI/Sphi9GkRT9K+Q== + +"@rollup/rollup-android-arm-eabi@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.28.1.tgz#7f4c4d8cd5ccab6e95d6750dbe00321c1f30791e" + integrity sha512-2aZp8AES04KI2dy3Ss6/MDjXbwBzj+i0GqKtWXgw2/Ma6E4jJvujryO6gJAghIRVz7Vwr9Gtl/8na3nDUKpraQ== + +"@rollup/rollup-android-arm64@4.16.4": + version "4.16.4" + resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.16.4.tgz#ffb84f1359c04ec8a022a97110e18a5600f5f638" + integrity sha512-Bvm6D+NPbGMQOcxvS1zUl8H7DWlywSXsphAeOnVeiZLQ+0J6Is8T7SrjGTH29KtYkiY9vld8ZnpV3G2EPbom+w== + +"@rollup/rollup-android-arm64@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.28.1.tgz#17ea71695fb1518c2c324badbe431a0bd1879f2d" + integrity sha512-EbkK285O+1YMrg57xVA+Dp0tDBRB93/BZKph9XhMjezf6F4TpYjaUSuPt5J0fZXlSag0LmZAsTmdGGqPp4pQFA== + +"@rollup/rollup-darwin-arm64@4.16.4": + version "4.16.4" + resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.16.4.tgz#b2fcee8d4806a0b1b9185ac038cc428ddedce9f4" + integrity sha512-i5d64MlnYBO9EkCOGe5vPR/EeDwjnKOGGdd7zKFhU5y8haKhQZTN2DgVtpODDMxUr4t2K90wTUJg7ilgND6bXw== + +"@rollup/rollup-darwin-arm64@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.28.1.tgz#dac0f0d0cfa73e7d5225ae6d303c13c8979e7999" + integrity sha512-prduvrMKU6NzMq6nxzQw445zXgaDBbMQvmKSJaxpaZ5R1QDM8w+eGxo6Y/jhT/cLoCvnZI42oEqf9KQNYz1fqQ== + +"@rollup/rollup-darwin-x64@4.16.4": + version "4.16.4" + resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.16.4.tgz#fcb25ccbaa3dd33a6490e9d1c64bab2e0e16b932" + integrity sha512-WZupV1+CdUYehaZqjaFTClJI72fjJEgTXdf4NbW69I9XyvdmztUExBtcI2yIIU6hJtYvtwS6pkTkHJz+k08mAQ== + +"@rollup/rollup-darwin-x64@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.28.1.tgz#8f63baa1d31784904a380d2e293fa1ddf53dd4a2" + integrity sha512-WsvbOunsUk0wccO/TV4o7IKgloJ942hVFK1CLatwv6TJspcCZb9umQkPdvB7FihmdxgaKR5JyxDjWpCOp4uZlQ== + +"@rollup/rollup-freebsd-arm64@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.28.1.tgz#30ed247e0df6e8858cdc6ae4090e12dbeb8ce946" + integrity sha512-HTDPdY1caUcU4qK23FeeGxCdJF64cKkqajU0iBnTVxS8F7H/7BewvYoG+va1KPSL63kQ1PGNyiwKOfReavzvNA== + +"@rollup/rollup-freebsd-x64@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.28.1.tgz#57846f382fddbb508412ae07855b8a04c8f56282" + integrity sha512-m/uYasxkUevcFTeRSM9TeLyPe2QDuqtjkeoTpP9SW0XxUWfcYrGDMkO/m2tTw+4NMAF9P2fU3Mw4ahNvo7QmsQ== + +"@rollup/rollup-linux-arm-gnueabihf@4.16.4": + version "4.16.4" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.16.4.tgz#40d46bdfe667e5eca31bf40047460e326d2e26bb" + integrity sha512-ADm/xt86JUnmAfA9mBqFcRp//RVRt1ohGOYF6yL+IFCYqOBNwy5lbEK05xTsEoJq+/tJzg8ICUtS82WinJRuIw== + +"@rollup/rollup-linux-arm-gnueabihf@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.28.1.tgz#378ca666c9dae5e6f94d1d351e7497c176e9b6df" + integrity sha512-QAg11ZIt6mcmzpNE6JZBpKfJaKkqTm1A9+y9O+frdZJEuhQxiugM05gnCWiANHj4RmbgeVJpTdmKRmH/a+0QbA== + +"@rollup/rollup-linux-arm-musleabihf@4.16.4": + version "4.16.4" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.16.4.tgz#7741df2448c11c56588b50835dbfe91b1a10b375" + integrity sha512-tJfJaXPiFAG+Jn3cutp7mCs1ePltuAgRqdDZrzb1aeE3TktWWJ+g7xK9SNlaSUFw6IU4QgOxAY4rA+wZUT5Wfg== + +"@rollup/rollup-linux-arm-musleabihf@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.28.1.tgz#a692eff3bab330d5c33a5d5813a090c15374cddb" + integrity sha512-dRP9PEBfolq1dmMcFqbEPSd9VlRuVWEGSmbxVEfiq2cs2jlZAl0YNxFzAQS2OrQmsLBLAATDMb3Z6MFv5vOcXg== + +"@rollup/rollup-linux-arm64-gnu@4.16.4": + version "4.16.4" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.16.4.tgz#0a23b02d2933e4c4872ad18d879890b6a4a295df" + integrity sha512-7dy1BzQkgYlUTapDTvK997cgi0Orh5Iu7JlZVBy1MBURk7/HSbHkzRnXZa19ozy+wwD8/SlpJnOOckuNZtJR9w== + +"@rollup/rollup-linux-arm64-gnu@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.28.1.tgz#6b1719b76088da5ac1ae1feccf48c5926b9e3db9" + integrity sha512-uGr8khxO+CKT4XU8ZUH1TTEUtlktK6Kgtv0+6bIFSeiSlnGJHG1tSFSjm41uQ9sAO/5ULx9mWOz70jYLyv1QkA== + +"@rollup/rollup-linux-arm64-musl@4.16.4": + version "4.16.4" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.16.4.tgz#e37ef259358aa886cc07d782220a4fb83c1e6970" + integrity sha512-zsFwdUw5XLD1gQe0aoU2HVceI6NEW7q7m05wA46eUAyrkeNYExObfRFQcvA6zw8lfRc5BHtan3tBpo+kqEOxmg== + +"@rollup/rollup-linux-arm64-musl@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.28.1.tgz#865baf5b6f5ff67acb32e5a359508828e8dc5788" + integrity sha512-QF54q8MYGAqMLrX2t7tNpi01nvq5RI59UBNx+3+37zoKX5KViPo/gk2QLhsuqok05sSCRluj0D00LzCwBikb0A== + +"@rollup/rollup-linux-loongarch64-gnu@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.28.1.tgz#23c6609ba0f7fa7a7f2038b6b6a08555a5055a87" + integrity sha512-vPul4uodvWvLhRco2w0GcyZcdyBfpfDRgNKU+p35AWEbJ/HPs1tOUrkSueVbBS0RQHAf/A+nNtDpvw95PeVKOA== + +"@rollup/rollup-linux-powerpc64le-gnu@4.16.4": + version "4.16.4" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.16.4.tgz#8c69218b6de05ee2ba211664a2d2ac1e54e43f94" + integrity sha512-p8C3NnxXooRdNrdv6dBmRTddEapfESEUflpICDNKXpHvTjRRq1J82CbU5G3XfebIZyI3B0s074JHMWD36qOW6w== + +"@rollup/rollup-linux-powerpc64le-gnu@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.28.1.tgz#652ef0d9334a9f25b9daf85731242801cb0fc41c" + integrity sha512-pTnTdBuC2+pt1Rmm2SV7JWRqzhYpEILML4PKODqLz+C7Ou2apEV52h19CR7es+u04KlqplggmN9sqZlekg3R1A== + +"@rollup/rollup-linux-riscv64-gnu@4.16.4": + version "4.16.4" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.16.4.tgz#d32727dab8f538d9a4a7c03bcf58c436aecd0139" + integrity sha512-Lh/8ckoar4s4Id2foY7jNgitTOUQczwMWNYi+Mjt0eQ9LKhr6sK477REqQkmy8YHY3Ca3A2JJVdXnfb3Rrwkng== + +"@rollup/rollup-linux-riscv64-gnu@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.28.1.tgz#1eb6651839ee6ebca64d6cc64febbd299e95e6bd" + integrity sha512-vWXy1Nfg7TPBSuAncfInmAI/WZDd5vOklyLJDdIRKABcZWojNDY0NJwruY2AcnCLnRJKSaBgf/GiJfauu8cQZA== + +"@rollup/rollup-linux-s390x-gnu@4.16.4": + version "4.16.4" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.16.4.tgz#d46097246a187d99fc9451fe8393b7155b47c5ec" + integrity sha512-1xwwn9ZCQYuqGmulGsTZoKrrn0z2fAur2ujE60QgyDpHmBbXbxLaQiEvzJWDrscRq43c8DnuHx3QorhMTZgisQ== + +"@rollup/rollup-linux-s390x-gnu@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.28.1.tgz#015c52293afb3ff2a293cf0936b1d43975c1e9cd" + integrity sha512-/yqC2Y53oZjb0yz8PVuGOQQNOTwxcizudunl/tFs1aLvObTclTwZ0JhXF2XcPT/zuaymemCDSuuUPXJJyqeDOg== + +"@rollup/rollup-linux-x64-gnu@4.16.4": + version "4.16.4" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.16.4.tgz#6356c5a03a4afb1c3057490fc51b4764e109dbc7" + integrity sha512-LuOGGKAJ7dfRtxVnO1i3qWc6N9sh0Em/8aZ3CezixSTM+E9Oq3OvTsvC4sm6wWjzpsIlOCnZjdluINKESflJLA== + +"@rollup/rollup-linux-x64-gnu@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.28.1.tgz#b83001b5abed2bcb5e2dbeec6a7e69b194235c1e" + integrity sha512-fzgeABz7rrAlKYB0y2kSEiURrI0691CSL0+KXwKwhxvj92VULEDQLpBYLHpF49MSiPG4sq5CK3qHMnb9tlCjBw== + +"@rollup/rollup-linux-x64-musl@4.16.4": + version "4.16.4" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.16.4.tgz#03a5831a9c0d05877b94653b5ddd3020d3c6fb06" + integrity sha512-ch86i7KkJKkLybDP2AtySFTRi5fM3KXp0PnHocHuJMdZwu7BuyIKi35BE9guMlmTpwwBTB3ljHj9IQXnTCD0vA== + +"@rollup/rollup-linux-x64-musl@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.28.1.tgz#6cc7c84cd4563737f8593e66f33b57d8e228805b" + integrity sha512-xQTDVzSGiMlSshpJCtudbWyRfLaNiVPXt1WgdWTwWz9n0U12cI2ZVtWe/Jgwyv/6wjL7b66uu61Vg0POWVfz4g== + +"@rollup/rollup-win32-arm64-msvc@4.16.4": + version "4.16.4" + resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.16.4.tgz#6cc0db57750376b9303bdb6f5482af8974fcae35" + integrity sha512-Ma4PwyLfOWZWayfEsNQzTDBVW8PZ6TUUN1uFTBQbF2Chv/+sjenE86lpiEwj2FiviSmSZ4Ap4MaAfl1ciF4aSA== + +"@rollup/rollup-win32-arm64-msvc@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.28.1.tgz#631ffeee094d71279fcd1fe8072bdcf25311bc11" + integrity sha512-wSXmDRVupJstFP7elGMgv+2HqXelQhuNf+IS4V+nUpNVi/GUiBgDmfwD0UGN3pcAnWsgKG3I52wMOBnk1VHr/A== + +"@rollup/rollup-win32-ia32-msvc@4.16.4": + version "4.16.4" + resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.16.4.tgz#aea0b7e492bd9ed46971cb80bc34f1eb14e07789" + integrity sha512-9m/ZDrQsdo/c06uOlP3W9G2ENRVzgzbSXmXHT4hwVaDQhYcRpi9bgBT0FTG9OhESxwK0WjQxYOSfv40cU+T69w== + +"@rollup/rollup-win32-ia32-msvc@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.28.1.tgz#06d1d60d5b9f718e8a6c4a43f82e3f9e3254587f" + integrity sha512-ZkyTJ/9vkgrE/Rk9vhMXhf8l9D+eAhbAVbsGsXKy2ohmJaWg0LPQLnIxRdRp/bKyr8tXuPlXhIoGlEB5XpJnGA== + +"@rollup/rollup-win32-x64-msvc@4.16.4": + version "4.16.4" + resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.16.4.tgz#c09ad9a132ccb5a67c4f211d909323ab1294f95f" + integrity sha512-YunpoOAyGLDseanENHmbFvQSfVL5BxW3k7hhy0eN4rb3gS/ct75dVD0EXOWIqFT/nE8XYW6LP6vz6ctKRi0k9A== + +"@rollup/rollup-win32-x64-msvc@4.28.1": + version "4.28.1" + resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.28.1.tgz#4dff5c4259ebe6c5b4a8f2c5bc3829b7a8447ff0" + integrity sha512-ZvK2jBafvttJjoIdKm/Q/Bh7IJ1Ose9IBOwpOXcOvW3ikGTQGmKDgxTC6oCAzW6PynbkKP8+um1du81XJHZ0JA== + +"@rtk-query/codegen-openapi@^1.2.0": + version "1.2.0" + resolved "https://registry.yarnpkg.com/@rtk-query/codegen-openapi/-/codegen-openapi-1.2.0.tgz#2c63cbbd80382c4ba6c9fab5e9004efb43637de3" + integrity sha512-Sru3aPHyFC0Tb7jeFh/kVMGBdQUcofb9frrHhjNSRLEoJWsG9fjaioUx3nPT5HZVbdAvAFF4xMWFQNfgJBrAGw== + dependencies: + "@apidevtools/swagger-parser" "^10.0.2" + commander "^6.2.0" + oazapfts "^4.8.0" + prettier "^2.2.1" + semver "^7.3.5" + swagger2openapi "^7.0.4" + typescript "^5.0.0" + +"@sinclair/typebox@^0.27.8": + version "0.27.8" + resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.27.8.tgz#6667fac16c436b5434a387a34dedb013198f6e6e" + integrity sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA== + +"@svgr/babel-plugin-add-jsx-attribute@8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-8.0.0.tgz#4001f5d5dd87fa13303e36ee106e3ff3a7eb8b22" + integrity sha512-b9MIk7yhdS1pMCZM8VeNfUlSKVRhsHZNMl5O9SfaX0l0t5wjdgu4IDzGB8bpnGBBOjGST3rRFVsaaEtI4W6f7g== + +"@svgr/babel-plugin-remove-jsx-attribute@8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz#69177f7937233caca3a1afb051906698f2f59186" + integrity sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA== + +"@svgr/babel-plugin-remove-jsx-empty-expression@8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz#c2c48104cfd7dcd557f373b70a56e9e3bdae1d44" + integrity sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA== + +"@svgr/babel-plugin-replace-jsx-attribute-value@8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-8.0.0.tgz#8fbb6b2e91fa26ac5d4aa25c6b6e4f20f9c0ae27" + integrity sha512-KVQ+PtIjb1BuYT3ht8M5KbzWBhdAjjUPdlMtpuw/VjT8coTrItWX6Qafl9+ji831JaJcu6PJNKCV0bp01lBNzQ== + +"@svgr/babel-plugin-svg-dynamic-title@8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-8.0.0.tgz#1d5ba1d281363fc0f2f29a60d6d936f9bbc657b0" + integrity sha512-omNiKqwjNmOQJ2v6ge4SErBbkooV2aAWwaPFs2vUY7p7GhVkzRkJ00kILXQvRhA6miHnNpXv7MRnnSjdRjK8og== + +"@svgr/babel-plugin-svg-em-dimensions@8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-8.0.0.tgz#35e08df300ea8b1d41cb8f62309c241b0369e501" + integrity sha512-mURHYnu6Iw3UBTbhGwE/vsngtCIbHE43xCRK7kCw4t01xyGqb2Pd+WXekRRoFOBIY29ZoOhUCTEweDMdrjfi9g== + +"@svgr/babel-plugin-transform-react-native-svg@8.1.0": + version "8.1.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-8.1.0.tgz#90a8b63998b688b284f255c6a5248abd5b28d754" + integrity sha512-Tx8T58CHo+7nwJ+EhUwx3LfdNSG9R2OKfaIXXs5soiy5HtgoAEkDay9LIimLOcG8dJQH1wPZp/cnAv6S9CrR1Q== + +"@svgr/babel-plugin-transform-svg-component@8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-8.0.0.tgz#013b4bfca88779711f0ed2739f3f7efcefcf4f7e" + integrity sha512-DFx8xa3cZXTdb/k3kfPeaixecQLgKh5NVBMwD0AQxOzcZawK4oo1Jh9LbrcACUivsCA7TLG8eeWgrDXjTMhRmw== + +"@svgr/babel-preset@8.1.0": + version "8.1.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-preset/-/babel-preset-8.1.0.tgz#0e87119aecdf1c424840b9d4565b7137cabf9ece" + integrity sha512-7EYDbHE7MxHpv4sxvnVPngw5fuR6pw79SkcrILHJ/iMpuKySNCl5W1qcwPEpU+LgyRXOaAFgH0KhwD18wwg6ug== + dependencies: + "@svgr/babel-plugin-add-jsx-attribute" "8.0.0" + "@svgr/babel-plugin-remove-jsx-attribute" "8.0.0" + "@svgr/babel-plugin-remove-jsx-empty-expression" "8.0.0" + "@svgr/babel-plugin-replace-jsx-attribute-value" "8.0.0" + "@svgr/babel-plugin-svg-dynamic-title" "8.0.0" + "@svgr/babel-plugin-svg-em-dimensions" "8.0.0" + "@svgr/babel-plugin-transform-react-native-svg" "8.1.0" + "@svgr/babel-plugin-transform-svg-component" "8.0.0" + +"@svgr/core@^8.1.0": + version "8.1.0" + resolved "https://registry.yarnpkg.com/@svgr/core/-/core-8.1.0.tgz#41146f9b40b1a10beaf5cc4f361a16a3c1885e88" + integrity sha512-8QqtOQT5ACVlmsvKOJNEaWmRPmcojMOzCz4Hs2BGG/toAp/K38LcsMRyLp349glq5AzJbCEeimEoxaX6v/fLrA== + dependencies: + "@babel/core" "^7.21.3" + "@svgr/babel-preset" "8.1.0" + camelcase "^6.2.0" + cosmiconfig "^8.1.3" + snake-case "^3.0.4" + +"@svgr/hast-util-to-babel-ast@8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-8.0.0.tgz#6952fd9ce0f470e1aded293b792a2705faf4ffd4" + integrity sha512-EbDKwO9GpfWP4jN9sGdYwPBU0kdomaPIL2Eu4YwmgP+sJeXT+L7bMwJUBnhzfH8Q2qMBqZ4fJwpCyYsAN3mt2Q== + dependencies: + "@babel/types" "^7.21.3" + entities "^4.4.0" + +"@svgr/plugin-jsx@^8.1.0": + version "8.1.0" + resolved "https://registry.yarnpkg.com/@svgr/plugin-jsx/-/plugin-jsx-8.1.0.tgz#96969f04a24b58b174ee4cd974c60475acbd6928" + integrity sha512-0xiIyBsLlr8quN+WyuxooNW9RJ0Dpr8uOnH/xrCVO8GLUcwHISwj1AG0k+LFzteTkAA0GbX0kj9q6Dk70PTiPA== + dependencies: + "@babel/core" "^7.21.3" + "@svgr/babel-preset" "8.1.0" + "@svgr/hast-util-to-babel-ast" "8.0.0" + svg-parser "^2.0.4" + +"@swc/core-darwin-arm64@1.11.11": + version "1.11.11" + resolved "https://registry.yarnpkg.com/@swc/core-darwin-arm64/-/core-darwin-arm64-1.11.11.tgz#e4b5fc99bab657f8f72217fd4976956faf4132b3" + integrity sha512-vJcjGVDB8cZH7zyOkC0AfpFYI/7GHKG0NSsH3tpuKrmoAXJyCYspKPGid7FT53EAlWreN7+Pew+bukYf5j+Fmg== + +"@swc/core-darwin-x64@1.11.11": + version "1.11.11" + resolved "https://registry.yarnpkg.com/@swc/core-darwin-x64/-/core-darwin-x64-1.11.11.tgz#0f4e810a2cd9c2993a7ccc3b38d1f92ef49894d8" + integrity sha512-/N4dGdqEYvD48mCF3QBSycAbbQd3yoZ2YHSzYesQf8usNc2YpIhYqEH3sql02UsxTjEFOJSf1bxZABDdhbSl6A== + +"@swc/core-linux-arm-gnueabihf@1.11.11": + version "1.11.11" + resolved "https://registry.yarnpkg.com/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.11.11.tgz#72b4b1e403bca37f051fd194eb0518cda83fad9f" + integrity sha512-hsBhKK+wVXdN3x9MrL5GW0yT8o9GxteE5zHAI2HJjRQel3HtW7m5Nvwaq+q8rwMf4YQRd8ydbvwl4iUOZx7i2Q== + +"@swc/core-linux-arm64-gnu@1.11.11": + version "1.11.11" + resolved "https://registry.yarnpkg.com/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.11.11.tgz#ea87e183ec53db9e121cca581cef538e9652193f" + integrity sha512-YOCdxsqbnn/HMPCNM6nrXUpSndLXMUssGTtzT7ffXqr7WuzRg2e170FVDVQFIkb08E7Ku5uOnnUVAChAJQbMOQ== + +"@swc/core-linux-arm64-musl@1.11.11": + version "1.11.11" + resolved "https://registry.yarnpkg.com/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.11.11.tgz#33db0f45b2286bbca9baf2ed84d1f2405c657600" + integrity sha512-nR2tfdQRRzwqR2XYw9NnBk9Fdvff/b8IiJzDL28gRR2QiJWLaE8LsRovtWrzCOYq6o5Uu9cJ3WbabWthLo4jLw== + +"@swc/core-linux-x64-gnu@1.11.11": + version "1.11.11" + resolved "https://registry.yarnpkg.com/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.11.11.tgz#4a1fe41baa968008bb0fffc7754fd6ee824e76e1" + integrity sha512-b4gBp5HA9xNWNC5gsYbdzGBJWx4vKSGybGMGOVWWuF+ynx10+0sA/o4XJGuNHm8TEDuNh9YLKf6QkIO8+GPJ1g== + +"@swc/core-linux-x64-musl@1.11.11": + version "1.11.11" + resolved "https://registry.yarnpkg.com/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.11.11.tgz#972d3530d740b3681191590ee08bb9ab7bb6706d" + integrity sha512-dEvqmQVswjNvMBwXNb8q5uSvhWrJLdttBSef3s6UC5oDSwOr00t3RQPzyS3n5qmGJ8UMTdPRmsopxmqaODISdg== + +"@swc/core-win32-arm64-msvc@1.11.11": + version "1.11.11" + resolved "https://registry.yarnpkg.com/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.11.11.tgz#179846f1f9e3e806a4bf6d8f35af97f577c1a0b3" + integrity sha512-aZNZznem9WRnw2FbTqVpnclvl8Q2apOBW2B316gZK+qxbe+ktjOUnYaMhdCG3+BYggyIBDOnaJeQrXbKIMmNdw== + +"@swc/core-win32-ia32-msvc@1.11.11": + version "1.11.11" + resolved "https://registry.yarnpkg.com/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.11.11.tgz#b098b72c1b45e237a9598b7b5e83e6c5ecb9ac69" + integrity sha512-DjeJn/IfjgOddmJ8IBbWuDK53Fqw7UvOz7kyI/728CSdDYC3LXigzj3ZYs4VvyeOt+ZcQZUB2HA27edOifomGw== + +"@swc/core-win32-x64-msvc@1.11.11": + version "1.11.11" + resolved "https://registry.yarnpkg.com/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.11.11.tgz#1d5610c585b903b8c1f4a452725d77ac96f27e84" + integrity sha512-Gp/SLoeMtsU4n0uRoKDOlGrRC6wCfifq7bqLwSlAG8u8MyJYJCcwjg7ggm0rhLdC2vbiZ+lLVl3kkETp+JUvKg== + +"@swc/core@^1.11.11": + version "1.11.11" + resolved "https://registry.yarnpkg.com/@swc/core/-/core-1.11.11.tgz#bac3256d7a113f0dd6965206cf428e826981cf0d" + integrity sha512-pCVY2Wn6dV/labNvssk9b3Owi4WOYsapcbWm90XkIj4xH/56Z6gzja9fsU+4MdPuEfC2Smw835nZHcdCFGyX6A== + dependencies: + "@swc/counter" "^0.1.3" + "@swc/types" "^0.1.19" + optionalDependencies: + "@swc/core-darwin-arm64" "1.11.11" + "@swc/core-darwin-x64" "1.11.11" + "@swc/core-linux-arm-gnueabihf" "1.11.11" + "@swc/core-linux-arm64-gnu" "1.11.11" + "@swc/core-linux-arm64-musl" "1.11.11" + "@swc/core-linux-x64-gnu" "1.11.11" + "@swc/core-linux-x64-musl" "1.11.11" + "@swc/core-win32-arm64-msvc" "1.11.11" + "@swc/core-win32-ia32-msvc" "1.11.11" + "@swc/core-win32-x64-msvc" "1.11.11" + +"@swc/counter@^0.1.3": + version "0.1.3" + resolved "https://registry.yarnpkg.com/@swc/counter/-/counter-0.1.3.tgz#cc7463bd02949611c6329596fccd2b0ec782b0e9" + integrity sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ== + +"@swc/types@^0.1.19": + version "0.1.19" + resolved "https://registry.yarnpkg.com/@swc/types/-/types-0.1.19.tgz#65d9fe81e0a1dc7e861ad698dd581abe3703a2d2" + integrity sha512-WkAZaAfj44kh/UFdAQcrMP1I0nwRqpt27u+08LMBYMqmQfwwMofYoMh/48NGkMMRfC4ynpfwRbJuu8ErfNloeA== + dependencies: + "@swc/counter" "^0.1.3" + +"@tanstack/match-sorter-utils@8.19.4": + version "8.19.4" + resolved "https://registry.yarnpkg.com/@tanstack/match-sorter-utils/-/match-sorter-utils-8.19.4.tgz#dacf772b5d94f4684f10dbeb2518cf72dccab8a5" + integrity sha512-Wo1iKt2b9OT7d+YGhvEPD3DXvPv2etTusIMhMUoG7fbhmxcXCtIjJDEygy91Y2JFlwGyjqiBPRozme7UD8hoqg== + dependencies: + remove-accents "0.5.0" + +"@tanstack/react-table@8.20.5": + version "8.20.5" + resolved "https://registry.yarnpkg.com/@tanstack/react-table/-/react-table-8.20.5.tgz#19987d101e1ea25ef5406dce4352cab3932449d8" + integrity sha512-WEHopKw3znbUZ61s9i0+i9g8drmDo6asTWbrQh8Us63DAk/M0FkmIqERew6P71HI75ksZ2Pxyuf4vvKh9rAkiA== + dependencies: + "@tanstack/table-core" "8.20.5" + +"@tanstack/react-virtual@3.10.6": + version "3.10.6" + resolved "https://registry.yarnpkg.com/@tanstack/react-virtual/-/react-virtual-3.10.6.tgz#f90f97d50a8d83dcd3c3a2d425aadbb55d4837db" + integrity sha512-xaSy6uUxB92O8mngHZ6CvbhGuqxQ5lIZWCBy+FjhrbHmOwc6BnOnKkYm2FsB1/BpKw/+FVctlMbEtI+F6I1aJg== + dependencies: + "@tanstack/virtual-core" "3.10.6" + +"@tanstack/table-core@8.20.5": + version "8.20.5" + resolved "https://registry.yarnpkg.com/@tanstack/table-core/-/table-core-8.20.5.tgz#3974f0b090bed11243d4107283824167a395cf1d" + integrity sha512-P9dF7XbibHph2PFRz8gfBKEXEY/HJPOhym8CHmjF8y3q5mWpKx9xtZapXQUWCgkqvsK0R46Azuz+VaxD4Xl+Tg== + +"@tanstack/virtual-core@3.10.6": + version "3.10.6" + resolved "https://registry.yarnpkg.com/@tanstack/virtual-core/-/virtual-core-3.10.6.tgz#babe3989b2344a5f12fc64129f9bbed5d3402999" + integrity sha512-1giLc4dzgEKLMx5pgKjL6HlG5fjZMgCjzlKAlpr7yoUtetVPELgER1NtephAI910nMwfPTHNyWKSFmJdHkz2Cw== + +"@testing-library/dom@^10.0.0": + version "10.1.0" + resolved "https://registry.yarnpkg.com/@testing-library/dom/-/dom-10.1.0.tgz#2d073e49771ad614da999ca48f199919e5176fb6" + integrity sha512-wdsYKy5zupPyLCW2Je5DLHSxSfbIp6h80WoHOQc+RPtmPGA52O9x5MJEkv92Sjonpq+poOAtUKhh1kBGAXBrNA== + dependencies: + "@babel/code-frame" "^7.10.4" + "@babel/runtime" "^7.12.5" + "@types/aria-query" "^5.0.1" + aria-query "5.3.0" + chalk "^4.1.0" + dom-accessibility-api "^0.5.9" + lz-string "^1.5.0" + pretty-format "^27.0.2" + +"@testing-library/dom@^10.4.0": + version "10.4.0" + resolved "https://registry.yarnpkg.com/@testing-library/dom/-/dom-10.4.0.tgz#82a9d9462f11d240ecadbf406607c6ceeeff43a8" + integrity sha512-pemlzrSESWbdAloYml3bAJMEfNh1Z7EduzqPKprCH5S341frlpYnUEW0H72dLxa6IsYr+mPno20GiSm+h9dEdQ== + dependencies: + "@babel/code-frame" "^7.10.4" + "@babel/runtime" "^7.12.5" + "@types/aria-query" "^5.0.1" + aria-query "5.3.0" + chalk "^4.1.0" + dom-accessibility-api "^0.5.9" + lz-string "^1.5.0" + pretty-format "^27.0.2" + +"@testing-library/jest-dom@^6.6.3": + version "6.6.3" + resolved "https://registry.yarnpkg.com/@testing-library/jest-dom/-/jest-dom-6.6.3.tgz#26ba906cf928c0f8172e182c6fe214eb4f9f2bd2" + integrity sha512-IteBhl4XqYNkM54f4ejhLRJiZNqcSCoXUOG2CPK7qbD322KjQozM4kHQOfkG2oln9b9HTYqs+Sae8vBATubxxA== + dependencies: + "@adobe/css-tools" "^4.4.0" + aria-query "^5.0.0" + chalk "^3.0.0" + css.escape "^1.5.1" + dom-accessibility-api "^0.6.3" + lodash "^4.17.21" + redent "^3.0.0" + +"@testing-library/react@^15.0.7": + version "15.0.7" + resolved "https://registry.yarnpkg.com/@testing-library/react/-/react-15.0.7.tgz#ff733ce0893c875cb5a47672e8e772897128f4ae" + integrity sha512-cg0RvEdD1TIhhkm1IeYMQxrzy0MtUNfa3minv4MjbgcYzJAZ7yD0i0lwoPOTPr+INtiXFezt2o8xMSnyHhEn2Q== + dependencies: + "@babel/runtime" "^7.12.5" + "@testing-library/dom" "^10.0.0" + "@types/react-dom" "^18.0.0" + +"@testing-library/user-event@^14.6.1": + version "14.6.1" + resolved "https://registry.yarnpkg.com/@testing-library/user-event/-/user-event-14.6.1.tgz#13e09a32d7a8b7060fe38304788ebf4197cd2149" + integrity sha512-vq7fv0rnt+QTXgPxr5Hjc210p6YKq2kmdziLgnsZGgLJ9e6VAShx1pACLuRjd/AS/sr7phAR58OIIpf0LlmQNw== + +"@types/aria-query@^5.0.1": + version "5.0.4" + resolved "https://registry.yarnpkg.com/@types/aria-query/-/aria-query-5.0.4.tgz#1a31c3d378850d2778dabb6374d036dcba4ba708" + integrity sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw== + +"@types/babel__core@^7.20.5": + version "7.20.5" + resolved "https://registry.yarnpkg.com/@types/babel__core/-/babel__core-7.20.5.tgz#3df15f27ba85319caa07ba08d0721889bb39c017" + integrity sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA== + dependencies: + "@babel/parser" "^7.20.7" + "@babel/types" "^7.20.7" + "@types/babel__generator" "*" + "@types/babel__template" "*" + "@types/babel__traverse" "*" + +"@types/babel__generator@*": + version "7.6.8" + resolved "https://registry.yarnpkg.com/@types/babel__generator/-/babel__generator-7.6.8.tgz#f836c61f48b1346e7d2b0d93c6dacc5b9535d3ab" + integrity sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw== + dependencies: + "@babel/types" "^7.0.0" + +"@types/babel__template@*": + version "7.4.4" + resolved "https://registry.yarnpkg.com/@types/babel__template/-/babel__template-7.4.4.tgz#5672513701c1b2199bc6dad636a9d7491586766f" + integrity sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A== + dependencies: + "@babel/parser" "^7.1.0" + "@babel/types" "^7.0.0" + +"@types/babel__traverse@*": + version "7.20.5" + resolved "https://registry.yarnpkg.com/@types/babel__traverse/-/babel__traverse-7.20.5.tgz#7b7502be0aa80cc4ef22978846b983edaafcd4dd" + integrity sha512-WXCyOcRtH37HAUkpXhUduaxdm82b4GSlyTqajXviN4EfiuPgNYR109xMCKvpl6zPIpua0DGlMEDCq+g8EdoheQ== + dependencies: + "@babel/types" "^7.20.7" + +"@types/estree@1.0.5", "@types/estree@^1.0.0": + version "1.0.5" + resolved "https://registry.yarnpkg.com/@types/estree/-/estree-1.0.5.tgz#a6ce3e556e00fd9895dd872dd172ad0d4bd687f4" + integrity sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw== + +"@types/estree@1.0.6": + version "1.0.6" + resolved "https://registry.yarnpkg.com/@types/estree/-/estree-1.0.6.tgz#628effeeae2064a1b4e79f78e81d87b7e5fc7b50" + integrity sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw== + +"@types/node@^20.17.25": + version "20.17.25" + resolved "https://registry.yarnpkg.com/@types/node/-/node-20.17.25.tgz#3135ad0af2b46a7689aa5ffb3ecafe1f50171a29" + integrity sha512-bT+r2haIlplJUYtlZrEanFHdPIZTeiMeh/fSOEbOOfWf9uTn+lg8g0KU6Q3iMgjd9FLuuMAgfCNSkjUbxL6E3Q== + dependencies: + undici-types "~6.19.2" + +"@types/parse-json@^4.0.0": + version "4.0.2" + resolved "https://registry.yarnpkg.com/@types/parse-json/-/parse-json-4.0.2.tgz#5950e50960793055845e956c427fc2b0d70c5239" + integrity sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw== + +"@types/prop-types@*": + version "15.7.12" + resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.12.tgz#12bb1e2be27293c1406acb6af1c3f3a1481d98c6" + integrity sha512-5zvhXYtRNRluoE/jAp4GVsSduVUzNWKkOZrCDBWYtE7biZywwdC2AcEzg+cSMLFRfVgeAFqpfNabiPjxFddV1Q== + +"@types/prop-types@^15.7.12", "@types/prop-types@^15.7.14": + version "15.7.14" + resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.14.tgz#1433419d73b2a7ebfc6918dcefd2ec0d5cd698f2" + integrity sha512-gNMvNH49DJ7OJYv+KAKn0Xp45p8PLl6zo2YnvDIbTd4J6MER2BmWN49TG7n9LvkyihINxeKW8+3bfS2yDC9dzQ== + +"@types/react-dom@^18.0.0": + version "18.3.0" + resolved "https://registry.yarnpkg.com/@types/react-dom/-/react-dom-18.3.0.tgz#0cbc818755d87066ab6ca74fbedb2547d74a82b0" + integrity sha512-EhwApuTmMBmXuFOikhQLIBUn6uFg81SwLMOAUgodJF14SOBOCMdU04gDoYi0WOJJHD144TL32z4yDqCW3dnkQg== + dependencies: + "@types/react" "*" + +"@types/react-dom@^18.3.5": + version "18.3.5" + resolved "https://registry.yarnpkg.com/@types/react-dom/-/react-dom-18.3.5.tgz#45f9f87398c5dcea085b715c58ddcf1faf65f716" + integrity sha512-P4t6saawp+b/dFrUr2cvkVsfvPguwsxtH6dNIYRllMsefqFzkZk5UIjzyDOv5g1dXIPdG4Sp1yCR4Z6RCUsG/Q== + +"@types/react-lazylog@^4.5.4": + version "4.5.4" + resolved "https://registry.yarnpkg.com/@types/react-lazylog/-/react-lazylog-4.5.4.tgz#dc1a7ad962538ce564f7c5f5aaa01af464bf020d" + integrity sha512-HYP+lVRyE0c+fGT+IGHMqzQS5X9I7oaQ3iZczor2MQyLUXyAZRv2AJoEcYjH1QNPDIc+vMBSteyuSuw8tkGJ5Q== + dependencies: + "@types/react" "*" + immutable ">=3.8.2" + +"@types/react-transition-group@^4.4.10": + version "4.4.10" + resolved "https://registry.yarnpkg.com/@types/react-transition-group/-/react-transition-group-4.4.10.tgz#6ee71127bdab1f18f11ad8fb3322c6da27c327ac" + integrity sha512-hT/+s0VQs2ojCX823m60m5f0sL5idt9SO6Tj6Dg+rdphGPIeJbJ6CxvBYkgkGKrYeDjvIpKTR38UzmtHJOGW3Q== + dependencies: + "@types/react" "*" + +"@types/react-transition-group@^4.4.11": + version "4.4.12" + resolved "https://registry.yarnpkg.com/@types/react-transition-group/-/react-transition-group-4.4.12.tgz#b5d76568485b02a307238270bfe96cb51ee2a044" + integrity sha512-8TV6R3h2j7a91c+1DXdJi3Syo69zzIZbz7Lg5tORM5LEJG7X/E6a1V3drRyBRZq7/utz7A+c4OgYLiLcYGHG6w== + +"@types/react@*": + version "18.2.79" + resolved "https://registry.yarnpkg.com/@types/react/-/react-18.2.79.tgz#c40efb4f255711f554d47b449f796d1c7756d865" + integrity sha512-RwGAGXPl9kSXwdNTafkOEuFrTBD5SA2B3iEB96xi8+xu5ddUa/cpvyVCSNn+asgLCTHkb5ZxN8gbuibYJi4s1w== + dependencies: + "@types/prop-types" "*" + csstype "^3.0.2" + +"@types/react@^18.3.19": + version "18.3.19" + resolved "https://registry.yarnpkg.com/@types/react/-/react-18.3.19.tgz#2b6a01315c9b1b644a8799a7d33efb027150240f" + integrity sha512-fcdJqaHOMDbiAwJnXv6XCzX0jDW77yI3tJqYh1Byn8EL5/S628WRx9b/y3DnNe55zTukUQKrfYxiZls2dHcUMw== + dependencies: + "@types/prop-types" "*" + csstype "^3.0.2" + +"@types/use-sync-external-store@^0.0.6": + version "0.0.6" + resolved "https://registry.yarnpkg.com/@types/use-sync-external-store/-/use-sync-external-store-0.0.6.tgz#60be8d21baab8c305132eb9cb912ed497852aadc" + integrity sha512-zFDAD+tlpf2r4asuHEj0XH6pY6i0g5NeAHPn+15wk3BV6JA69eERFXC1gyGThDkVa1zCyKr5jox1+2LbV/AMLg== + +"@types/whatwg-streams@^0.0.7": + version "0.0.7" + resolved "https://registry.yarnpkg.com/@types/whatwg-streams/-/whatwg-streams-0.0.7.tgz#28bfe73dc850562296367249c4b32a50db81e9d3" + integrity sha512-6sDiSEP6DWcY2ZolsJ2s39ZmsoGQ7KVwBDI3sESQsEm9P2dHTcqnDIHRZFRNtLCzWp7hCFGqYbw5GyfpQnJ01A== + +"@typescript-eslint/eslint-plugin@^7.18.0": + version "7.18.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.18.0.tgz#b16d3cf3ee76bf572fdf511e79c248bdec619ea3" + integrity sha512-94EQTWZ40mzBc42ATNIBimBEDltSJ9RQHCC8vc/PDbxi4k8dVwUAv4o98dk50M1zB+JGFxp43FP7f8+FP8R6Sw== + dependencies: + "@eslint-community/regexpp" "^4.10.0" + "@typescript-eslint/scope-manager" "7.18.0" + "@typescript-eslint/type-utils" "7.18.0" + "@typescript-eslint/utils" "7.18.0" + "@typescript-eslint/visitor-keys" "7.18.0" + graphemer "^1.4.0" + ignore "^5.3.1" + natural-compare "^1.4.0" + ts-api-utils "^1.3.0" + +"@typescript-eslint/parser@^7.18.0": + version "7.18.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-7.18.0.tgz#83928d0f1b7f4afa974098c64b5ce6f9051f96a0" + integrity sha512-4Z+L8I2OqhZV8qA132M4wNL30ypZGYOQVBfMgxDH/K5UX0PNqTu1c6za9ST5r9+tavvHiTWmBnKzpCJ/GlVFtg== + dependencies: + "@typescript-eslint/scope-manager" "7.18.0" + "@typescript-eslint/types" "7.18.0" + "@typescript-eslint/typescript-estree" "7.18.0" + "@typescript-eslint/visitor-keys" "7.18.0" + debug "^4.3.4" + +"@typescript-eslint/scope-manager@7.18.0": + version "7.18.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-7.18.0.tgz#c928e7a9fc2c0b3ed92ab3112c614d6bd9951c83" + integrity sha512-jjhdIE/FPF2B7Z1uzc6i3oWKbGcHb87Qw7AWj6jmEqNOfDFbJWtjt/XfwCpvNkpGWlcJaog5vTR+VV8+w9JflA== + dependencies: + "@typescript-eslint/types" "7.18.0" + "@typescript-eslint/visitor-keys" "7.18.0" + +"@typescript-eslint/type-utils@7.18.0": + version "7.18.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-7.18.0.tgz#2165ffaee00b1fbbdd2d40aa85232dab6998f53b" + integrity sha512-XL0FJXuCLaDuX2sYqZUUSOJ2sG5/i1AAze+axqmLnSkNEVMVYLF+cbwlB2w8D1tinFuSikHmFta+P+HOofrLeA== + dependencies: + "@typescript-eslint/typescript-estree" "7.18.0" + "@typescript-eslint/utils" "7.18.0" + debug "^4.3.4" + ts-api-utils "^1.3.0" + +"@typescript-eslint/types@7.18.0": + version "7.18.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-7.18.0.tgz#b90a57ccdea71797ffffa0321e744f379ec838c9" + integrity sha512-iZqi+Ds1y4EDYUtlOOC+aUmxnE9xS/yCigkjA7XpTKV6nCBd3Hp/PRGGmdwnfkV2ThMyYldP1wRpm/id99spTQ== + +"@typescript-eslint/typescript-estree@7.18.0": + version "7.18.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-7.18.0.tgz#b5868d486c51ce8f312309ba79bdb9f331b37931" + integrity sha512-aP1v/BSPnnyhMHts8cf1qQ6Q1IFwwRvAQGRvBFkWlo3/lH29OXA3Pts+c10nxRxIBrDnoMqzhgdwVe5f2D6OzA== + dependencies: + "@typescript-eslint/types" "7.18.0" + "@typescript-eslint/visitor-keys" "7.18.0" + debug "^4.3.4" + globby "^11.1.0" + is-glob "^4.0.3" + minimatch "^9.0.4" + semver "^7.6.0" + ts-api-utils "^1.3.0" + +"@typescript-eslint/utils@7.18.0": + version "7.18.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-7.18.0.tgz#bca01cde77f95fc6a8d5b0dbcbfb3d6ca4be451f" + integrity sha512-kK0/rNa2j74XuHVcoCZxdFBMF+aq/vH83CXAOHieC+2Gis4mF8jJXT5eAfyD3K0sAxtPuwxaIOIOvhwzVDt/kw== + dependencies: + "@eslint-community/eslint-utils" "^4.4.0" + "@typescript-eslint/scope-manager" "7.18.0" + "@typescript-eslint/types" "7.18.0" + "@typescript-eslint/typescript-estree" "7.18.0" + +"@typescript-eslint/visitor-keys@7.18.0": + version "7.18.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-7.18.0.tgz#0564629b6124d67607378d0f0332a0495b25e7d7" + integrity sha512-cDF0/Gf81QpY3xYyJKDV14Zwdmid5+uuENhjH2EqFaF0ni+yAyq/LzMaIJdhNJXZI7uLzwIlA+V7oWoyn6Curg== + dependencies: + "@typescript-eslint/types" "7.18.0" + eslint-visitor-keys "^3.4.3" + +"@ungap/structured-clone@^1.2.0": + version "1.2.0" + resolved "https://registry.yarnpkg.com/@ungap/structured-clone/-/structured-clone-1.2.0.tgz#756641adb587851b5ccb3e095daf27ae581c8406" + integrity sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ== + +"@vitejs/plugin-react-swc@^3.8.1": + version "3.8.1" + resolved "https://registry.yarnpkg.com/@vitejs/plugin-react-swc/-/plugin-react-swc-3.8.1.tgz#663f14b72b1f42f6e61f412ea320e287b3065c41" + integrity sha512-aEUPCckHDcFyxpwFm0AIkbtv6PpUp3xTb9wYGFjtABynXjCYKkWoxX0AOK9NT9XCrdk6mBBUOeHQS+RKdcNO1A== + dependencies: + "@swc/core" "^1.11.11" + +"@vitejs/plugin-react@^4.3.4": + version "4.3.4" + resolved "https://registry.yarnpkg.com/@vitejs/plugin-react/-/plugin-react-4.3.4.tgz#c64be10b54c4640135a5b28a2432330e88ad7c20" + integrity sha512-SCCPBJtYLdE8PX/7ZQAs1QAZ8Jqwih+0VBLum1EGqmCCQal+MIUqLCzj3ZUy8ufbC0cAM4LRlSTm7IQJwWT4ug== + dependencies: + "@babel/core" "^7.26.0" + "@babel/plugin-transform-react-jsx-self" "^7.25.9" + "@babel/plugin-transform-react-jsx-source" "^7.25.9" + "@types/babel__core" "^7.20.5" + react-refresh "^0.14.2" + +"@vitest/expect@1.6.1": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@vitest/expect/-/expect-1.6.1.tgz#b90c213f587514a99ac0bf84f88cff9042b0f14d" + integrity sha512-jXL+9+ZNIJKruofqXuuTClf44eSpcHlgj3CiuNihUF3Ioujtmc0zIa3UJOW5RjDK1YLBJZnWBlPuqhYycLioog== + dependencies: + "@vitest/spy" "1.6.1" + "@vitest/utils" "1.6.1" + chai "^4.3.10" + +"@vitest/runner@1.6.1": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@vitest/runner/-/runner-1.6.1.tgz#10f5857c3e376218d58c2bfacfea1161e27e117f" + integrity sha512-3nSnYXkVkf3mXFfE7vVyPmi3Sazhb/2cfZGGs0JRzFsPFvAMBEcrweV1V1GsrstdXeKCTXlJbvnQwGWgEIHmOA== + dependencies: + "@vitest/utils" "1.6.1" + p-limit "^5.0.0" + pathe "^1.1.1" + +"@vitest/snapshot@1.6.1": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@vitest/snapshot/-/snapshot-1.6.1.tgz#90414451a634bb36cd539ccb29ae0d048a8c0479" + integrity sha512-WvidQuWAzU2p95u8GAKlRMqMyN1yOJkGHnx3M1PL9Raf7AQ1kwLKg04ADlCa3+OXUZE7BceOhVZiuWAbzCKcUQ== + dependencies: + magic-string "^0.30.5" + pathe "^1.1.1" + pretty-format "^29.7.0" + +"@vitest/spy@1.6.1": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@vitest/spy/-/spy-1.6.1.tgz#33376be38a5ed1ecd829eb986edaecc3e798c95d" + integrity sha512-MGcMmpGkZebsMZhbQKkAf9CX5zGvjkBTqf8Zx3ApYWXr3wG+QvEu2eXWfnIIWYSJExIp4V9FCKDEeygzkYrXMw== + dependencies: + tinyspy "^2.2.0" + +"@vitest/utils@1.6.1": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@vitest/utils/-/utils-1.6.1.tgz#6d2f36cb6d866f2bbf59da854a324d6bf8040f17" + integrity sha512-jOrrUvXM4Av9ZWiG1EajNto0u96kWAhJ1LmPmJhXXQx/32MecEKd10pOLYgS2BQx1TgkGhloPU1ArDW2vvaY6g== + dependencies: + diff-sequences "^29.6.3" + estree-walker "^3.0.3" + loupe "^2.3.7" + pretty-format "^29.7.0" + +acorn-jsx@^5.3.2: + version "5.3.2" + resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.3.2.tgz#7ed5bb55908b3b2f1bc55c6af1653bada7f07937" + integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ== + +acorn-walk@^8.3.2: + version "8.3.2" + resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.3.2.tgz#7703af9415f1b6db9315d6895503862e231d34aa" + integrity sha512-cjkyv4OtNCIeqhHrfS81QWXoCBPExR/J62oyEqepVw8WaQeSqpW2uhuLPh1m9eWhDuOo/jUXVTlifvesOWp/4A== + +acorn@^8.11.3, acorn@^8.9.0: + version "8.11.3" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.11.3.tgz#71e0b14e13a4ec160724b38fb7b0f233b1b81d7a" + integrity sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg== + +agent-base@^7.1.0: + version "7.1.1" + resolved "https://registry.yarnpkg.com/agent-base/-/agent-base-7.1.1.tgz#bdbded7dfb096b751a2a087eeeb9664725b2e317" + integrity sha512-H0TSyFNDMomMNJQBn8wFV5YC/2eJ+VXECwOadZJT554xP6cODZHPX3H9QMQECxvrgiSOP1pHjy1sMWQVYJOUOA== + dependencies: + debug "^4.3.4" + +agent-base@^7.1.2: + version "7.1.3" + resolved "https://registry.yarnpkg.com/agent-base/-/agent-base-7.1.3.tgz#29435eb821bc4194633a5b89e5bc4703bafc25a1" + integrity sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw== + +ajv-draft-04@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/ajv-draft-04/-/ajv-draft-04-1.0.0.tgz#3b64761b268ba0b9e668f0b41ba53fce0ad77fc8" + integrity sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw== + +ajv@^6.12.4: + version "6.12.6" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4" + integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== + dependencies: + fast-deep-equal "^3.1.1" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.4.1" + uri-js "^4.2.2" + +ajv@^8.6.3: + version "8.14.0" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-8.14.0.tgz#f514ddfd4756abb200e1704414963620a625ebbb" + integrity sha512-oYs1UUtO97ZO2lJ4bwnWeQW8/zvOIQLGKcvPTsWmvc2SYgBb+upuNS5NxoLaMU4h8Ju3Nbj6Cq8mD2LQoqVKFA== + dependencies: + fast-deep-equal "^3.1.3" + json-schema-traverse "^1.0.0" + require-from-string "^2.0.2" + uri-js "^4.4.1" + +ansi-regex@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" + integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== + +ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== + dependencies: + color-convert "^1.9.0" + +ansi-styles@^4.0.0, ansi-styles@^4.1.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" + integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== + dependencies: + color-convert "^2.0.1" + +ansi-styles@^5.0.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-5.2.0.tgz#07449690ad45777d1924ac2abb2fc8895dba836b" + integrity sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA== + +argparse@^1.0.7: + version "1.0.10" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" + integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg== + dependencies: + sprintf-js "~1.0.2" + +argparse@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38" + integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== + +aria-query@5.3.0, aria-query@^5.0.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/aria-query/-/aria-query-5.3.0.tgz#650c569e41ad90b51b3d7df5e5eed1c7549c103e" + integrity sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A== + dependencies: + dequal "^2.0.3" + +array-buffer-byte-length@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/array-buffer-byte-length/-/array-buffer-byte-length-1.0.1.tgz#1e5583ec16763540a27ae52eed99ff899223568f" + integrity sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg== + dependencies: + call-bind "^1.0.5" + is-array-buffer "^3.0.4" + +array-buffer-byte-length@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/array-buffer-byte-length/-/array-buffer-byte-length-1.0.2.tgz#384d12a37295aec3769ab022ad323a18a51ccf8b" + integrity sha512-LHE+8BuR7RYGDKvnrmcuSq3tDcKv9OFEXQt/HpbZhY7V6h0zlUXutnAD82GiFx9rdieCMjkvtcsPqBwgUl1Iiw== + dependencies: + call-bound "^1.0.3" + is-array-buffer "^3.0.5" + +array-includes@^3.1.6, array-includes@^3.1.8: + version "3.1.8" + resolved "https://registry.yarnpkg.com/array-includes/-/array-includes-3.1.8.tgz#5e370cbe172fdd5dd6530c1d4aadda25281ba97d" + integrity sha512-itaWrbYbqpGXkGhZPGUulwnhVf5Hpy1xiCFsGqyIGglbBxmG5vSjxQen3/WGOjPpNEv1RtBLKxbmVXm8HpJStQ== + dependencies: + call-bind "^1.0.7" + define-properties "^1.2.1" + es-abstract "^1.23.2" + es-object-atoms "^1.0.0" + get-intrinsic "^1.2.4" + is-string "^1.0.7" + +array-union@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" + integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw== + +array.prototype.findlast@^1.2.5: + version "1.2.5" + resolved "https://registry.yarnpkg.com/array.prototype.findlast/-/array.prototype.findlast-1.2.5.tgz#3e4fbcb30a15a7f5bf64cf2faae22d139c2e4904" + integrity sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ== + dependencies: + call-bind "^1.0.7" + define-properties "^1.2.1" + es-abstract "^1.23.2" + es-errors "^1.3.0" + es-object-atoms "^1.0.0" + es-shim-unscopables "^1.0.2" + +array.prototype.flat@^1.3.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/array.prototype.flat/-/array.prototype.flat-1.3.2.tgz#1476217df8cff17d72ee8f3ba06738db5b387d18" + integrity sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + es-shim-unscopables "^1.0.0" + +array.prototype.flatmap@^1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/array.prototype.flatmap/-/array.prototype.flatmap-1.3.3.tgz#712cc792ae70370ae40586264629e33aab5dd38b" + integrity sha512-Y7Wt51eKJSyi80hFrJCePGGNo5ktJCslFuboqJsbf57CCPcm5zztluPlc4/aD8sWsKvlwatezpV4U1efk8kpjg== + dependencies: + call-bind "^1.0.8" + define-properties "^1.2.1" + es-abstract "^1.23.5" + es-shim-unscopables "^1.0.2" + +array.prototype.tosorted@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/array.prototype.tosorted/-/array.prototype.tosorted-1.1.4.tgz#fe954678ff53034e717ea3352a03f0b0b86f7ffc" + integrity sha512-p6Fx8B7b7ZhL/gmUsAy0D15WhvDccw3mnGNbZpi3pmeJdxtWsj2jEaI4Y6oo3XiHfzuSgPwKc04MYt6KgvC/wA== + dependencies: + call-bind "^1.0.7" + define-properties "^1.2.1" + es-abstract "^1.23.3" + es-errors "^1.3.0" + es-shim-unscopables "^1.0.2" + +arraybuffer.prototype.slice@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.3.tgz#097972f4255e41bc3425e37dc3f6421cf9aefde6" + integrity sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A== + dependencies: + array-buffer-byte-length "^1.0.1" + call-bind "^1.0.5" + define-properties "^1.2.1" + es-abstract "^1.22.3" + es-errors "^1.2.1" + get-intrinsic "^1.2.3" + is-array-buffer "^3.0.4" + is-shared-array-buffer "^1.0.2" + +arraybuffer.prototype.slice@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.4.tgz#9d760d84dbdd06d0cbf92c8849615a1a7ab3183c" + integrity sha512-BNoCY6SXXPQ7gF2opIP4GBE+Xw7U+pHMYKuzjgCN3GwiaIR09UUeKfheyIry77QtrCBlC0KK0q5/TER/tYh3PQ== + dependencies: + array-buffer-byte-length "^1.0.1" + call-bind "^1.0.8" + define-properties "^1.2.1" + es-abstract "^1.23.5" + es-errors "^1.3.0" + get-intrinsic "^1.2.6" + is-array-buffer "^3.0.4" + +assertion-error@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/assertion-error/-/assertion-error-1.1.0.tgz#e60b6b0e8f301bd97e5375215bda406c85118c0b" + integrity sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw== + +asynckit@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" + integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q== + +autoprefixer@^10.4.21: + version "10.4.21" + resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-10.4.21.tgz#77189468e7a8ad1d9a37fbc08efc9f480cf0a95d" + integrity sha512-O+A6LWV5LDHSJD3LjHYoNi4VLsj/Whi7k6zG12xTYaU4cQ8oxQGckXNX8cRHK5yOZ/ppVHe0ZBXGzSV9jXdVbQ== + dependencies: + browserslist "^4.24.4" + caniuse-lite "^1.0.30001702" + fraction.js "^4.3.7" + normalize-range "^0.1.2" + picocolors "^1.1.1" + postcss-value-parser "^4.2.0" + +available-typed-arrays@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz#a5cc375d6a03c2efc87a553f3e0b1522def14846" + integrity sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ== + dependencies: + possible-typed-array-names "^1.0.0" + +babel-plugin-macros@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/babel-plugin-macros/-/babel-plugin-macros-3.1.0.tgz#9ef6dc74deb934b4db344dc973ee851d148c50c1" + integrity sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg== + dependencies: + "@babel/runtime" "^7.12.5" + cosmiconfig "^7.0.0" + resolve "^1.19.0" + +balanced-match@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" + integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== + +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +brace-expansion@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-2.0.1.tgz#1edc459e0f0c548486ecf9fc99f2221364b9a0ae" + integrity sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA== + dependencies: + balanced-match "^1.0.0" + +braces@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" + integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== + dependencies: + fill-range "^7.0.1" + +braces@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" + integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== + dependencies: + fill-range "^7.1.1" + +browserslist@^4.22.2: + version "4.23.0" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.23.0.tgz#8f3acc2bbe73af7213399430890f86c63a5674ab" + integrity sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ== + dependencies: + caniuse-lite "^1.0.30001587" + electron-to-chromium "^1.4.668" + node-releases "^2.0.14" + update-browserslist-db "^1.0.13" + +browserslist@^4.24.0: + version "4.24.3" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.24.3.tgz#5fc2725ca8fb3c1432e13dac278c7cc103e026d2" + integrity sha512-1CPmv8iobE2fyRMV97dAcMVegvvWKxmq94hkLiAkUGwKVTyDLw33K+ZxiFrREKmmps4rIw6grcCFCnTMSZ/YiA== + dependencies: + caniuse-lite "^1.0.30001688" + electron-to-chromium "^1.5.73" + node-releases "^2.0.19" + update-browserslist-db "^1.1.1" + +browserslist@^4.24.4: + version "4.24.4" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.24.4.tgz#c6b2865a3f08bcb860a0e827389003b9fe686e4b" + integrity sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A== + dependencies: + caniuse-lite "^1.0.30001688" + electron-to-chromium "^1.5.73" + node-releases "^2.0.19" + update-browserslist-db "^1.1.1" + +buffer-from@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5" + integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ== + +cac@^6.7.14: + version "6.7.14" + resolved "https://registry.yarnpkg.com/cac/-/cac-6.7.14.tgz#804e1e6f506ee363cb0e3ccbb09cad5dd9870959" + integrity sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ== + +call-bind-apply-helpers@^1.0.0, call-bind-apply-helpers@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.1.tgz#32e5892e6361b29b0b545ba6f7763378daca2840" + integrity sha512-BhYE+WDaywFg2TBWYNXAE+8B1ATnThNBqXHP5nQu0jWJdVvY2hvkpyB3qOmtmDePiS5/BDQ8wASEWGMWRG148g== + dependencies: + es-errors "^1.3.0" + function-bind "^1.1.2" + +call-bind-apply-helpers@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz#4b5428c222be985d79c3d82657479dbe0b59b2d6" + integrity sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ== + dependencies: + es-errors "^1.3.0" + function-bind "^1.1.2" + +call-bind@^1.0.2, call-bind@^1.0.5, call-bind@^1.0.6, call-bind@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.7.tgz#06016599c40c56498c18769d2730be242b6fa3b9" + integrity sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w== + dependencies: + es-define-property "^1.0.0" + es-errors "^1.3.0" + function-bind "^1.1.2" + get-intrinsic "^1.2.4" + set-function-length "^1.2.1" + +call-bind@^1.0.8: + version "1.0.8" + resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.8.tgz#0736a9660f537e3388826f440d5ec45f744eaa4c" + integrity sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww== + dependencies: + call-bind-apply-helpers "^1.0.0" + es-define-property "^1.0.0" + get-intrinsic "^1.2.4" + set-function-length "^1.2.2" + +call-bound@^1.0.2, call-bound@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/call-bound/-/call-bound-1.0.3.tgz#41cfd032b593e39176a71533ab4f384aa04fd681" + integrity sha512-YTd+6wGlNlPxSuri7Y6X8tY2dmm12UMH66RpKMhiX6rsk5wXXnYgbUcOt8kiS31/AjfoTOvCsE+w8nZQLQnzHA== + dependencies: + call-bind-apply-helpers "^1.0.1" + get-intrinsic "^1.2.6" + +call-bound@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/call-bound/-/call-bound-1.0.4.tgz#238de935d2a2a692928c538c7ccfa91067fd062a" + integrity sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg== + dependencies: + call-bind-apply-helpers "^1.0.2" + get-intrinsic "^1.3.0" + +call-me-maybe@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/call-me-maybe/-/call-me-maybe-1.0.2.tgz#03f964f19522ba643b1b0693acb9152fe2074baa" + integrity sha512-HpX65o1Hnr9HH25ojC1YGs7HCQLq0GCOibSaWER0eNpgJ/Z1MZv2mTc7+xh6WOPxbRVcmgbv4hGU+uSQ/2xFZQ== + +callsites@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" + integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== + +camelcase@^6.2.0: + version "6.3.0" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a" + integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA== + +caniuse-lite@^1.0.30001587: + version "1.0.30001612" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001612.tgz#d34248b4ec1f117b70b24ad9ee04c90e0b8a14ae" + integrity sha512-lFgnZ07UhaCcsSZgWW0K5j4e69dK1u/ltrL9lTUiFOwNHs12S3UMIEYgBV0Z6C6hRDev7iRnMzzYmKabYdXF9g== + +caniuse-lite@^1.0.30001688: + version "1.0.30001689" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001689.tgz#67ca960dd5f443903e19949aeacc9d28f6e10910" + integrity sha512-CmeR2VBycfa+5/jOfnp/NpWPGd06nf1XYiefUvhXFfZE4GkRc9jv+eGPS4nT558WS/8lYCzV8SlANCIPvbWP1g== + +caniuse-lite@^1.0.30001702: + version "1.0.30001706" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001706.tgz#902c3f896f4b2968031c3a546ab2ef8b465a2c8f" + integrity sha512-3ZczoTApMAZwPKYWmwVbQMFpXBDds3/0VciVoUwPUbldlYyVLmRVuRs/PcUZtHpbLRpzzDvrvnFuREsGt6lUug== + +chai@^4.3.10: + version "4.4.1" + resolved "https://registry.yarnpkg.com/chai/-/chai-4.4.1.tgz#3603fa6eba35425b0f2ac91a009fe924106e50d1" + integrity sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g== + dependencies: + assertion-error "^1.1.0" + check-error "^1.0.3" + deep-eql "^4.1.3" + get-func-name "^2.0.2" + loupe "^2.3.6" + pathval "^1.1.1" + type-detect "^4.0.8" + +chalk@^2.4.2: + version "2.4.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" + integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + +chalk@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-3.0.0.tgz#3f73c2bf526591f574cc492c51e2456349f844e4" + integrity sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg== + dependencies: + ansi-styles "^4.1.0" + supports-color "^7.1.0" + +chalk@^4.0.0, chalk@^4.1.0: + version "4.1.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" + integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== + dependencies: + ansi-styles "^4.1.0" + supports-color "^7.1.0" + +check-error@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/check-error/-/check-error-1.0.3.tgz#a6502e4312a7ee969f646e83bb3ddd56281bd694" + integrity sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg== + dependencies: + get-func-name "^2.0.2" + +chokidar@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-4.0.1.tgz#4a6dff66798fb0f72a94f616abbd7e1a19f31d41" + integrity sha512-n8enUVCED/KVRQlab1hr3MVpcVMvxtZjmEa956u+4YijlmQED223XMSYj2tLuKvr4jcCTzNNMpQDUer72MMmzA== + dependencies: + readdirp "^4.0.1" + +cliui@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-8.0.1.tgz#0c04b075db02cbfe60dc8e6cf2f5486b1a3608aa" + integrity sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ== + dependencies: + string-width "^4.2.0" + strip-ansi "^6.0.1" + wrap-ansi "^7.0.0" + +clsx@^1.0.4: + version "1.2.1" + resolved "https://registry.yarnpkg.com/clsx/-/clsx-1.2.1.tgz#0ddc4a20a549b59c93a4116bb26f5294ca17dc12" + integrity sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg== + +clsx@^2.1.0, clsx@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/clsx/-/clsx-2.1.1.tgz#eed397c9fd8bd882bfb18deab7102049a2f32999" + integrity sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA== + +color-convert@^1.9.0: + version "1.9.3" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" + integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== + dependencies: + color-name "1.1.3" + +color-convert@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" + integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== + dependencies: + color-name "~1.1.4" + +color-name@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" + integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw== + +color-name@~1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" + integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== + +combined-stream@^1.0.8: + version "1.0.8" + resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" + integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg== + dependencies: + delayed-stream "~1.0.0" + +commander@^6.2.0: + version "6.2.1" + resolved "https://registry.yarnpkg.com/commander/-/commander-6.2.1.tgz#0792eb682dfbc325999bb2b84fddddba110ac73c" + integrity sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA== + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== + +confbox@^0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/confbox/-/confbox-0.1.7.tgz#ccfc0a2bcae36a84838e83a3b7f770fb17d6c579" + integrity sha512-uJcB/FKZtBMCJpK8MQji6bJHgu1tixKPxRLeGkNzBoOZzpnZUJm0jm2/sBDWcuBx1dYgxV4JU+g5hmNxCyAmdA== + +convert-source-map@^1.5.0: + version "1.9.0" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.9.0.tgz#7faae62353fb4213366d0ca98358d22e8368b05f" + integrity sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A== + +convert-source-map@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-2.0.0.tgz#4b560f649fc4e918dd0ab75cf4961e8bc882d82a" + integrity sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg== + +cosmiconfig@^7.0.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-7.1.0.tgz#1443b9afa596b670082ea46cbd8f6a62b84635f6" + integrity sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA== + dependencies: + "@types/parse-json" "^4.0.0" + import-fresh "^3.2.1" + parse-json "^5.0.0" + path-type "^4.0.0" + yaml "^1.10.0" + +cosmiconfig@^8.1.3: + version "8.3.6" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-8.3.6.tgz#060a2b871d66dba6c8538ea1118ba1ac16f5fae3" + integrity sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA== + dependencies: + import-fresh "^3.3.0" + js-yaml "^4.1.0" + parse-json "^5.2.0" + path-type "^4.0.0" + +cross-fetch@4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/cross-fetch/-/cross-fetch-4.0.0.tgz#f037aef1580bb3a1a35164ea2a848ba81b445983" + integrity sha512-e4a5N8lVvuLgAWgnCrLr2PP0YyDOTHa9H/Rj54dirp61qXnNq46m82bRhNqIA5VccJtWBvPTFRV3TtvHUKPB1g== + dependencies: + node-fetch "^2.6.12" + +cross-spawn@^7.0.2, cross-spawn@^7.0.3: + version "7.0.3" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" + integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + dependencies: + path-key "^3.1.0" + shebang-command "^2.0.0" + which "^2.0.1" + +css.escape@^1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/css.escape/-/css.escape-1.5.1.tgz#42e27d4fa04ae32f931a4b4d4191fa9cddee97cb" + integrity sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg== + +cssstyle@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/cssstyle/-/cssstyle-4.0.1.tgz#ef29c598a1e90125c870525490ea4f354db0660a" + integrity sha512-8ZYiJ3A/3OkDd093CBT/0UKDWry7ak4BdPTFP2+QEP7cmhouyq/Up709ASSj2cK02BbZiMgk7kYjZNS4QP5qrQ== + dependencies: + rrweb-cssom "^0.6.0" + +csstype@^3.0.2, csstype@^3.1.3: + version "3.1.3" + resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.3.tgz#d80ff294d114fb0e6ac500fbf85b60137d7eff81" + integrity sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw== + +data-urls@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/data-urls/-/data-urls-5.0.0.tgz#2f76906bce1824429ffecb6920f45a0b30f00dde" + integrity sha512-ZYP5VBHshaDAiVZxjbRVcFJpc+4xGgT0bK3vzy1HLN8jTO975HEbuYzZJcHoQEY5K1a0z8YayJkyVETa08eNTg== + dependencies: + whatwg-mimetype "^4.0.0" + whatwg-url "^14.0.0" + +data-view-buffer@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/data-view-buffer/-/data-view-buffer-1.0.1.tgz#8ea6326efec17a2e42620696e671d7d5a8bc66b2" + integrity sha512-0lht7OugA5x3iJLOWFhWK/5ehONdprk0ISXqVFn/NFrDu+cuc8iADFrGQz5BnRK7LLU3JmkbXSxaqX+/mXYtUA== + dependencies: + call-bind "^1.0.6" + es-errors "^1.3.0" + is-data-view "^1.0.1" + +data-view-buffer@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/data-view-buffer/-/data-view-buffer-1.0.2.tgz#211a03ba95ecaf7798a8c7198d79536211f88570" + integrity sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ== + dependencies: + call-bound "^1.0.3" + es-errors "^1.3.0" + is-data-view "^1.0.2" + +data-view-byte-length@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/data-view-byte-length/-/data-view-byte-length-1.0.1.tgz#90721ca95ff280677eb793749fce1011347669e2" + integrity sha512-4J7wRJD3ABAzr8wP+OcIcqq2dlUKp4DVflx++hs5h5ZKydWMI6/D/fAot+yh6g2tHh8fLFTvNOaVN357NvSrOQ== + dependencies: + call-bind "^1.0.7" + es-errors "^1.3.0" + is-data-view "^1.0.1" + +data-view-byte-length@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/data-view-byte-length/-/data-view-byte-length-1.0.2.tgz#9e80f7ca52453ce3e93d25a35318767ea7704735" + integrity sha512-tuhGbE6CfTM9+5ANGf+oQb72Ky/0+s3xKUpHvShfiz2RxMFgFPjsXuRLBVMtvMs15awe45SRb83D6wH4ew6wlQ== + dependencies: + call-bound "^1.0.3" + es-errors "^1.3.0" + is-data-view "^1.0.2" + +data-view-byte-offset@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/data-view-byte-offset/-/data-view-byte-offset-1.0.0.tgz#5e0bbfb4828ed2d1b9b400cd8a7d119bca0ff18a" + integrity sha512-t/Ygsytq+R995EJ5PZlD4Cu56sWa8InXySaViRzw9apusqsOO2bQP+SbYzAhR0pFKoB+43lYy8rWban9JSuXnA== + dependencies: + call-bind "^1.0.6" + es-errors "^1.3.0" + is-data-view "^1.0.1" + +data-view-byte-offset@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/data-view-byte-offset/-/data-view-byte-offset-1.0.1.tgz#068307f9b71ab76dbbe10291389e020856606191" + integrity sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ== + dependencies: + call-bound "^1.0.2" + es-errors "^1.3.0" + is-data-view "^1.0.1" + +date-fns@^3.6.0: + version "3.6.0" + resolved "https://registry.yarnpkg.com/date-fns/-/date-fns-3.6.0.tgz#f20ca4fe94f8b754951b24240676e8618c0206bf" + integrity sha512-fRHTG8g/Gif+kSh50gaGEdToemgfj74aRX3swtiouboip5JDLAyDE9F11nHMIcvOaXeOC6D7SpNhi7uFyB7Uww== + +debug@4, debug@^4.1.0, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4: + version "4.3.4" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" + integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== + dependencies: + ms "2.1.2" + +decimal.js@^10.4.3: + version "10.4.3" + resolved "https://registry.yarnpkg.com/decimal.js/-/decimal.js-10.4.3.tgz#1044092884d245d1b7f65725fa4ad4c6f781cc23" + integrity sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA== + +deep-eql@^4.1.3: + version "4.1.3" + resolved "https://registry.yarnpkg.com/deep-eql/-/deep-eql-4.1.3.tgz#7c7775513092f7df98d8df9996dd085eb668cc6d" + integrity sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw== + dependencies: + type-detect "^4.0.0" + +deep-is@^0.1.3: + version "0.1.4" + resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.4.tgz#a6f2dce612fadd2ef1f519b73551f17e85199831" + integrity sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ== + +define-data-property@^1.0.1, define-data-property@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/define-data-property/-/define-data-property-1.1.4.tgz#894dc141bb7d3060ae4366f6a0107e68fbe48c5e" + integrity sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A== + dependencies: + es-define-property "^1.0.0" + es-errors "^1.3.0" + gopd "^1.0.1" + +define-properties@^1.1.3, define-properties@^1.2.0, define-properties@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.2.1.tgz#10781cc616eb951a80a034bafcaa7377f6af2b6c" + integrity sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg== + dependencies: + define-data-property "^1.0.1" + has-property-descriptors "^1.0.0" + object-keys "^1.1.1" + +delayed-stream@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" + integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ== + +dequal@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/dequal/-/dequal-2.0.3.tgz#2644214f1997d39ed0ee0ece72335490a7ac67be" + integrity sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA== + +detect-libc@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/detect-libc/-/detect-libc-1.0.3.tgz#fa137c4bd698edf55cd5cd02ac559f91a4c4ba9b" + integrity sha512-pGjwhsmsp4kL2RTz08wcOlGN83otlqHeD/Z5T8GXZB+/YcpQ/dgo+lbU8ZsGxV0HIvqqxo9l7mqYwyYMD9bKDg== + +diff-sequences@^29.6.3: + version "29.6.3" + resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-29.6.3.tgz#4deaf894d11407c51efc8418012f9e70b84ea921" + integrity sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q== + +dir-glob@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f" + integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA== + dependencies: + path-type "^4.0.0" + +doctrine@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-2.1.0.tgz#5cd01fc101621b42c4cd7f5d1a66243716d3f39d" + integrity sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw== + dependencies: + esutils "^2.0.2" + +doctrine@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-3.0.0.tgz#addebead72a6574db783639dc87a121773973961" + integrity sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w== + dependencies: + esutils "^2.0.2" + +dom-accessibility-api@^0.5.9: + version "0.5.16" + resolved "https://registry.yarnpkg.com/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz#5a7429e6066eb3664d911e33fb0e45de8eb08453" + integrity sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg== + +dom-accessibility-api@^0.6.3: + version "0.6.3" + resolved "https://registry.yarnpkg.com/dom-accessibility-api/-/dom-accessibility-api-0.6.3.tgz#993e925cc1d73f2c662e7d75dd5a5445259a8fd8" + integrity sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w== + +dom-helpers@^5.0.1, dom-helpers@^5.1.3: + version "5.2.1" + resolved "https://registry.yarnpkg.com/dom-helpers/-/dom-helpers-5.2.1.tgz#d9400536b2bf8225ad98fe052e029451ac40e902" + integrity sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA== + dependencies: + "@babel/runtime" "^7.8.7" + csstype "^3.0.2" + +dot-case@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/dot-case/-/dot-case-3.0.4.tgz#9b2b670d00a431667a8a75ba29cd1b98809ce751" + integrity sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w== + dependencies: + no-case "^3.0.4" + tslib "^2.0.3" + +dunder-proto@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/dunder-proto/-/dunder-proto-1.0.0.tgz#c2fce098b3c8f8899554905f4377b6d85dabaa80" + integrity sha512-9+Sj30DIu+4KvHqMfLUGLFYL2PkURSYMVXJyXe92nFRvlYq5hBjLEhblKB+vkd/WVlUYMWigiY07T91Fkk0+4A== + dependencies: + call-bind-apply-helpers "^1.0.0" + es-errors "^1.3.0" + gopd "^1.2.0" + +dunder-proto@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/dunder-proto/-/dunder-proto-1.0.1.tgz#d7ae667e1dc83482f8b70fd0f6eefc50da30f58a" + integrity sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A== + dependencies: + call-bind-apply-helpers "^1.0.1" + es-errors "^1.3.0" + gopd "^1.2.0" + +electron-to-chromium@^1.4.668: + version "1.4.747" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.747.tgz#e37fa5b7b7e4c22607c5f59b5cf78f947266e77d" + integrity sha512-+FnSWZIAvFHbsNVmUxhEqWiaOiPMcfum1GQzlWCg/wLigVtshOsjXHyEFfmt6cFK6+HkS3QOJBv6/3OPumbBfw== + +electron-to-chromium@^1.5.73: + version "1.5.73" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.73.tgz#f32956ce40947fa3c8606726a96cd8fb5bb5f720" + integrity sha512-8wGNxG9tAG5KhGd3eeA0o6ixhiNdgr0DcHWm85XPCphwZgD1lIEoi6t3VERayWao7SF7AAZTw6oARGJeVjH8Kg== + +emoji-regex@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" + integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== + +entities@^4.4.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/entities/-/entities-4.5.0.tgz#5d268ea5e7113ec74c4d033b79ea5a35a488fb48" + integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw== + +error-ex@^1.3.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" + integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g== + dependencies: + is-arrayish "^0.2.1" + +es-abstract@^1.17.5, es-abstract@^1.23.3, es-abstract@^1.23.5: + version "1.23.6" + resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.23.6.tgz#55f0e1ce7128995cc04ace0a57d7dca348345108" + integrity sha512-Ifco6n3yj2tMZDWNLyloZrytt9lqqlwvS83P3HtaETR0NUOYnIULGGHpktqYGObGy+8wc1okO25p8TjemhImvA== + dependencies: + array-buffer-byte-length "^1.0.1" + arraybuffer.prototype.slice "^1.0.4" + available-typed-arrays "^1.0.7" + call-bind "^1.0.8" + call-bound "^1.0.3" + data-view-buffer "^1.0.1" + data-view-byte-length "^1.0.1" + data-view-byte-offset "^1.0.0" + es-define-property "^1.0.1" + es-errors "^1.3.0" + es-object-atoms "^1.0.0" + es-set-tostringtag "^2.0.3" + es-to-primitive "^1.3.0" + function.prototype.name "^1.1.7" + get-intrinsic "^1.2.6" + get-symbol-description "^1.0.2" + globalthis "^1.0.4" + gopd "^1.2.0" + has-property-descriptors "^1.0.2" + has-proto "^1.2.0" + has-symbols "^1.1.0" + hasown "^2.0.2" + internal-slot "^1.1.0" + is-array-buffer "^3.0.4" + is-callable "^1.2.7" + is-data-view "^1.0.2" + is-negative-zero "^2.0.3" + is-regex "^1.2.1" + is-shared-array-buffer "^1.0.3" + is-string "^1.1.1" + is-typed-array "^1.1.13" + is-weakref "^1.1.0" + math-intrinsics "^1.0.0" + object-inspect "^1.13.3" + object-keys "^1.1.1" + object.assign "^4.1.5" + regexp.prototype.flags "^1.5.3" + safe-array-concat "^1.1.3" + safe-regex-test "^1.1.0" + string.prototype.trim "^1.2.10" + string.prototype.trimend "^1.0.9" + string.prototype.trimstart "^1.0.8" + typed-array-buffer "^1.0.2" + typed-array-byte-length "^1.0.1" + typed-array-byte-offset "^1.0.3" + typed-array-length "^1.0.7" + unbox-primitive "^1.0.2" + which-typed-array "^1.1.16" + +es-abstract@^1.22.1, es-abstract@^1.22.3, es-abstract@^1.23.0, es-abstract@^1.23.2: + version "1.23.3" + resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.23.3.tgz#8f0c5a35cd215312573c5a27c87dfd6c881a0aa0" + integrity sha512-e+HfNH61Bj1X9/jLc5v1owaLYuHdeHHSQlkhCBiTK8rBvKaULl/beGMxwrMXjpYrv4pz22BlY570vVePA2ho4A== + dependencies: + array-buffer-byte-length "^1.0.1" + arraybuffer.prototype.slice "^1.0.3" + available-typed-arrays "^1.0.7" + call-bind "^1.0.7" + data-view-buffer "^1.0.1" + data-view-byte-length "^1.0.1" + data-view-byte-offset "^1.0.0" + es-define-property "^1.0.0" + es-errors "^1.3.0" + es-object-atoms "^1.0.0" + es-set-tostringtag "^2.0.3" + es-to-primitive "^1.2.1" + function.prototype.name "^1.1.6" + get-intrinsic "^1.2.4" + get-symbol-description "^1.0.2" + globalthis "^1.0.3" + gopd "^1.0.1" + has-property-descriptors "^1.0.2" + has-proto "^1.0.3" + has-symbols "^1.0.3" + hasown "^2.0.2" + internal-slot "^1.0.7" + is-array-buffer "^3.0.4" + is-callable "^1.2.7" + is-data-view "^1.0.1" + is-negative-zero "^2.0.3" + is-regex "^1.1.4" + is-shared-array-buffer "^1.0.3" + is-string "^1.0.7" + is-typed-array "^1.1.13" + is-weakref "^1.0.2" + object-inspect "^1.13.1" + object-keys "^1.1.1" + object.assign "^4.1.5" + regexp.prototype.flags "^1.5.2" + safe-array-concat "^1.1.2" + safe-regex-test "^1.0.3" + string.prototype.trim "^1.2.9" + string.prototype.trimend "^1.0.8" + string.prototype.trimstart "^1.0.8" + typed-array-buffer "^1.0.2" + typed-array-byte-length "^1.0.1" + typed-array-byte-offset "^1.0.2" + typed-array-length "^1.0.6" + unbox-primitive "^1.0.2" + which-typed-array "^1.1.15" + +es-abstract@^1.23.6, es-abstract@^1.23.9: + version "1.23.9" + resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.23.9.tgz#5b45994b7de78dada5c1bebf1379646b32b9d606" + integrity sha512-py07lI0wjxAC/DcfK1S6G7iANonniZwTISvdPzk9hzeH0IZIshbuuFxLIU96OyF89Yb9hiqWn8M/bY83KY5vzA== + dependencies: + array-buffer-byte-length "^1.0.2" + arraybuffer.prototype.slice "^1.0.4" + available-typed-arrays "^1.0.7" + call-bind "^1.0.8" + call-bound "^1.0.3" + data-view-buffer "^1.0.2" + data-view-byte-length "^1.0.2" + data-view-byte-offset "^1.0.1" + es-define-property "^1.0.1" + es-errors "^1.3.0" + es-object-atoms "^1.0.0" + es-set-tostringtag "^2.1.0" + es-to-primitive "^1.3.0" + function.prototype.name "^1.1.8" + get-intrinsic "^1.2.7" + get-proto "^1.0.0" + get-symbol-description "^1.1.0" + globalthis "^1.0.4" + gopd "^1.2.0" + has-property-descriptors "^1.0.2" + has-proto "^1.2.0" + has-symbols "^1.1.0" + hasown "^2.0.2" + internal-slot "^1.1.0" + is-array-buffer "^3.0.5" + is-callable "^1.2.7" + is-data-view "^1.0.2" + is-regex "^1.2.1" + is-shared-array-buffer "^1.0.4" + is-string "^1.1.1" + is-typed-array "^1.1.15" + is-weakref "^1.1.0" + math-intrinsics "^1.1.0" + object-inspect "^1.13.3" + object-keys "^1.1.1" + object.assign "^4.1.7" + own-keys "^1.0.1" + regexp.prototype.flags "^1.5.3" + safe-array-concat "^1.1.3" + safe-push-apply "^1.0.0" + safe-regex-test "^1.1.0" + set-proto "^1.0.0" + string.prototype.trim "^1.2.10" + string.prototype.trimend "^1.0.9" + string.prototype.trimstart "^1.0.8" + typed-array-buffer "^1.0.3" + typed-array-byte-length "^1.0.3" + typed-array-byte-offset "^1.0.4" + typed-array-length "^1.0.7" + unbox-primitive "^1.1.0" + which-typed-array "^1.1.18" + +es-define-property@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.0.tgz#c7faefbdff8b2696cf5f46921edfb77cc4ba3845" + integrity sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ== + dependencies: + get-intrinsic "^1.2.4" + +es-define-property@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.1.tgz#983eb2f9a6724e9303f61addf011c72e09e0b0fa" + integrity sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g== + +es-errors@^1.2.1, es-errors@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f" + integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw== + +es-iterator-helpers@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/es-iterator-helpers/-/es-iterator-helpers-1.2.1.tgz#d1dd0f58129054c0ad922e6a9a1e65eef435fe75" + integrity sha512-uDn+FE1yrDzyC0pCo961B2IHbdM8y/ACZsKD4dG6WqrjV53BADjwa7D+1aom2rsNVfLyDgU/eigvlJGJ08OQ4w== + dependencies: + call-bind "^1.0.8" + call-bound "^1.0.3" + define-properties "^1.2.1" + es-abstract "^1.23.6" + es-errors "^1.3.0" + es-set-tostringtag "^2.0.3" + function-bind "^1.1.2" + get-intrinsic "^1.2.6" + globalthis "^1.0.4" + gopd "^1.2.0" + has-property-descriptors "^1.0.2" + has-proto "^1.2.0" + has-symbols "^1.1.0" + internal-slot "^1.1.0" + iterator.prototype "^1.1.4" + safe-array-concat "^1.1.3" + +es-object-atoms@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/es-object-atoms/-/es-object-atoms-1.0.0.tgz#ddb55cd47ac2e240701260bc2a8e31ecb643d941" + integrity sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw== + dependencies: + es-errors "^1.3.0" + +es-object-atoms@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz#1c4f2c4837327597ce69d2ca190a7fdd172338c1" + integrity sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA== + dependencies: + es-errors "^1.3.0" + +es-set-tostringtag@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.0.3.tgz#8bb60f0a440c2e4281962428438d58545af39777" + integrity sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ== + dependencies: + get-intrinsic "^1.2.4" + has-tostringtag "^1.0.2" + hasown "^2.0.1" + +es-set-tostringtag@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz#f31dbbe0c183b00a6d26eb6325c810c0fd18bd4d" + integrity sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA== + dependencies: + es-errors "^1.3.0" + get-intrinsic "^1.2.6" + has-tostringtag "^1.0.2" + hasown "^2.0.2" + +es-shim-unscopables@^1.0.0, es-shim-unscopables@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz#1f6942e71ecc7835ed1c8a83006d8771a63a3763" + integrity sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw== + dependencies: + hasown "^2.0.0" + +es-to-primitive@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.2.1.tgz#e55cd4c9cdc188bcefb03b366c736323fc5c898a" + integrity sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA== + dependencies: + is-callable "^1.1.4" + is-date-object "^1.0.1" + is-symbol "^1.0.2" + +es-to-primitive@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.3.0.tgz#96c89c82cc49fd8794a24835ba3e1ff87f214e18" + integrity sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g== + dependencies: + is-callable "^1.2.7" + is-date-object "^1.0.5" + is-symbol "^1.0.4" + +es6-promise@^3.2.1: + version "3.3.1" + resolved "https://registry.yarnpkg.com/es6-promise/-/es6-promise-3.3.1.tgz#a08cdde84ccdbf34d027a1451bc91d4bcd28a613" + integrity sha512-SOp9Phqvqn7jtEUxPWdWfWoLmyt2VaJ6MpvP9Comy1MceMXqE6bxvaTu4iaxpYYPzhny28Lc+M87/c2cPK6lDg== + +esbuild-plugin-react-virtualized@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/esbuild-plugin-react-virtualized/-/esbuild-plugin-react-virtualized-1.0.4.tgz#b8911ce8fae4636daa87cfa898752170f5d45609" + integrity sha512-/Y+82TBduHox0/uhJlTgUqi3ZWN+qZPF0xy9crkHQE2AOOdm76l6VY2F0Mdfvue9hqXz2FOlKHlHUVXNalHLzA== + +esbuild-runner@^2.2.2: + version "2.2.2" + resolved "https://registry.yarnpkg.com/esbuild-runner/-/esbuild-runner-2.2.2.tgz#4243089f14c9690bff70beee16da3c41fd1dec50" + integrity sha512-fRFVXcmYVmSmtYm2mL8RlUASt2TDkGh3uRcvHFOKNr/T58VrfVeKD9uT9nlgxk96u0LS0ehS/GY7Da/bXWKkhw== + dependencies: + source-map-support "0.5.21" + tslib "2.4.0" + +esbuild@^0.20.1: + version "0.20.2" + resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.20.2.tgz#9d6b2386561766ee6b5a55196c6d766d28c87ea1" + integrity sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g== + optionalDependencies: + "@esbuild/aix-ppc64" "0.20.2" + "@esbuild/android-arm" "0.20.2" + "@esbuild/android-arm64" "0.20.2" + "@esbuild/android-x64" "0.20.2" + "@esbuild/darwin-arm64" "0.20.2" + "@esbuild/darwin-x64" "0.20.2" + "@esbuild/freebsd-arm64" "0.20.2" + "@esbuild/freebsd-x64" "0.20.2" + "@esbuild/linux-arm" "0.20.2" + "@esbuild/linux-arm64" "0.20.2" + "@esbuild/linux-ia32" "0.20.2" + "@esbuild/linux-loong64" "0.20.2" + "@esbuild/linux-mips64el" "0.20.2" + "@esbuild/linux-ppc64" "0.20.2" + "@esbuild/linux-riscv64" "0.20.2" + "@esbuild/linux-s390x" "0.20.2" + "@esbuild/linux-x64" "0.20.2" + "@esbuild/netbsd-x64" "0.20.2" + "@esbuild/openbsd-x64" "0.20.2" + "@esbuild/sunos-x64" "0.20.2" + "@esbuild/win32-arm64" "0.20.2" + "@esbuild/win32-ia32" "0.20.2" + "@esbuild/win32-x64" "0.20.2" + +esbuild@^0.21.3: + version "0.21.5" + resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.21.5.tgz#9ca301b120922959b766360d8ac830da0d02997d" + integrity sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw== + optionalDependencies: + "@esbuild/aix-ppc64" "0.21.5" + "@esbuild/android-arm" "0.21.5" + "@esbuild/android-arm64" "0.21.5" + "@esbuild/android-x64" "0.21.5" + "@esbuild/darwin-arm64" "0.21.5" + "@esbuild/darwin-x64" "0.21.5" + "@esbuild/freebsd-arm64" "0.21.5" + "@esbuild/freebsd-x64" "0.21.5" + "@esbuild/linux-arm" "0.21.5" + "@esbuild/linux-arm64" "0.21.5" + "@esbuild/linux-ia32" "0.21.5" + "@esbuild/linux-loong64" "0.21.5" + "@esbuild/linux-mips64el" "0.21.5" + "@esbuild/linux-ppc64" "0.21.5" + "@esbuild/linux-riscv64" "0.21.5" + "@esbuild/linux-s390x" "0.21.5" + "@esbuild/linux-x64" "0.21.5" + "@esbuild/netbsd-x64" "0.21.5" + "@esbuild/openbsd-x64" "0.21.5" + "@esbuild/sunos-x64" "0.21.5" + "@esbuild/win32-arm64" "0.21.5" + "@esbuild/win32-ia32" "0.21.5" + "@esbuild/win32-x64" "0.21.5" + +escalade@^3.1.1: + version "3.1.2" + resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.2.tgz#54076e9ab29ea5bf3d8f1ed62acffbb88272df27" + integrity sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA== + +escalade@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.2.0.tgz#011a3f69856ba189dffa7dc8fcce99d2a87903e5" + integrity sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA== + +escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg== + +escape-string-regexp@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" + integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== + +eslint-plugin-react-hooks@^4.6.2: + version "4.6.2" + resolved "https://registry.yarnpkg.com/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.2.tgz#c829eb06c0e6f484b3fbb85a97e57784f328c596" + integrity sha512-QzliNJq4GinDBcD8gPB5v0wh6g8q3SUi6EFF0x8N/BL9PoVs0atuGc47ozMRyOWAKdwaZ5OnbOEa3WR+dSGKuQ== + +eslint-plugin-react-refresh@^0.4.19: + version "0.4.19" + resolved "https://registry.yarnpkg.com/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.19.tgz#f15020c0caa58e33fc4efda27d328281ca74e53d" + integrity sha512-eyy8pcr/YxSYjBoqIFSrlbn9i/xvxUFa8CjzAYo9cFjgGXqq1hyjihcpZvxRLalpaWmueWR81xn7vuKmAFijDQ== + +eslint-plugin-react@^7.37.4: + version "7.37.4" + resolved "https://registry.yarnpkg.com/eslint-plugin-react/-/eslint-plugin-react-7.37.4.tgz#1b6c80b6175b6ae4b26055ae4d55d04c414c7181" + integrity sha512-BGP0jRmfYyvOyvMoRX/uoUeW+GqNj9y16bPQzqAHf3AYII/tDs+jMN0dBVkl88/OZwNGwrVFxE7riHsXVfy/LQ== + dependencies: + array-includes "^3.1.8" + array.prototype.findlast "^1.2.5" + array.prototype.flatmap "^1.3.3" + array.prototype.tosorted "^1.1.4" + doctrine "^2.1.0" + es-iterator-helpers "^1.2.1" + estraverse "^5.3.0" + hasown "^2.0.2" + jsx-ast-utils "^2.4.1 || ^3.0.0" + minimatch "^3.1.2" + object.entries "^1.1.8" + object.fromentries "^2.0.8" + object.values "^1.2.1" + prop-types "^15.8.1" + resolve "^2.0.0-next.5" + semver "^6.3.1" + string.prototype.matchall "^4.0.12" + string.prototype.repeat "^1.0.0" + +eslint-scope@^7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-7.2.2.tgz#deb4f92563390f32006894af62a22dba1c46423f" + integrity sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg== + dependencies: + esrecurse "^4.3.0" + estraverse "^5.2.0" + +eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4.1, eslint-visitor-keys@^3.4.3: + version "3.4.3" + resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz#0cd72fe8550e3c2eae156a96a4dddcd1c8ac5800" + integrity sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag== + +eslint@^8.57.1: + version "8.57.1" + resolved "https://registry.yarnpkg.com/eslint/-/eslint-8.57.1.tgz#7df109654aba7e3bbe5c8eae533c5e461d3c6ca9" + integrity sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA== + dependencies: + "@eslint-community/eslint-utils" "^4.2.0" + "@eslint-community/regexpp" "^4.6.1" + "@eslint/eslintrc" "^2.1.4" + "@eslint/js" "8.57.1" + "@humanwhocodes/config-array" "^0.13.0" + "@humanwhocodes/module-importer" "^1.0.1" + "@nodelib/fs.walk" "^1.2.8" + "@ungap/structured-clone" "^1.2.0" + ajv "^6.12.4" + chalk "^4.0.0" + cross-spawn "^7.0.2" + debug "^4.3.2" + doctrine "^3.0.0" + escape-string-regexp "^4.0.0" + eslint-scope "^7.2.2" + eslint-visitor-keys "^3.4.3" + espree "^9.6.1" + esquery "^1.4.2" + esutils "^2.0.2" + fast-deep-equal "^3.1.3" + file-entry-cache "^6.0.1" + find-up "^5.0.0" + glob-parent "^6.0.2" + globals "^13.19.0" + graphemer "^1.4.0" + ignore "^5.2.0" + imurmurhash "^0.1.4" + is-glob "^4.0.0" + is-path-inside "^3.0.3" + js-yaml "^4.1.0" + json-stable-stringify-without-jsonify "^1.0.1" + levn "^0.4.1" + lodash.merge "^4.6.2" + minimatch "^3.1.2" + natural-compare "^1.4.0" + optionator "^0.9.3" + strip-ansi "^6.0.1" + text-table "^0.2.0" + +espree@^9.6.0, espree@^9.6.1: + version "9.6.1" + resolved "https://registry.yarnpkg.com/espree/-/espree-9.6.1.tgz#a2a17b8e434690a5432f2f8018ce71d331a48c6f" + integrity sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ== + dependencies: + acorn "^8.9.0" + acorn-jsx "^5.3.2" + eslint-visitor-keys "^3.4.1" + +esprima@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" + integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== + +esquery@^1.4.2: + version "1.5.0" + resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.5.0.tgz#6ce17738de8577694edd7361c57182ac8cb0db0b" + integrity sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg== + dependencies: + estraverse "^5.1.0" + +esrecurse@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/esrecurse/-/esrecurse-4.3.0.tgz#7ad7964d679abb28bee72cec63758b1c5d2c9921" + integrity sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag== + dependencies: + estraverse "^5.2.0" + +estraverse@^5.1.0, estraverse@^5.2.0, estraverse@^5.3.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-5.3.0.tgz#2eea5290702f26ab8fe5370370ff86c965d21123" + integrity sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA== + +estree-walker@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/estree-walker/-/estree-walker-2.0.2.tgz#52f010178c2a4c117a7757cfe942adb7d2da4cac" + integrity sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w== + +estree-walker@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/estree-walker/-/estree-walker-3.0.3.tgz#67c3e549ec402a487b4fc193d1953a524752340d" + integrity sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g== + dependencies: + "@types/estree" "^1.0.0" + +esutils@^2.0.2: + version "2.0.3" + resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64" + integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== + +execa@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/execa/-/execa-8.0.1.tgz#51f6a5943b580f963c3ca9c6321796db8cc39b8c" + integrity sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg== + dependencies: + cross-spawn "^7.0.3" + get-stream "^8.0.1" + human-signals "^5.0.0" + is-stream "^3.0.0" + merge-stream "^2.0.0" + npm-run-path "^5.1.0" + onetime "^6.0.0" + signal-exit "^4.1.0" + strip-final-newline "^3.0.0" + +fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3: + version "3.1.3" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" + integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== + +fast-glob@^3.2.9: + version "3.3.2" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.2.tgz#a904501e57cfdd2ffcded45e99a54fef55e46129" + integrity sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow== + dependencies: + "@nodelib/fs.stat" "^2.0.2" + "@nodelib/fs.walk" "^1.2.3" + glob-parent "^5.1.2" + merge2 "^1.3.0" + micromatch "^4.0.4" + +fast-json-stable-stringify@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" + integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== + +fast-levenshtein@^2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" + integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw== + +fast-safe-stringify@^2.0.7: + version "2.1.1" + resolved "https://registry.yarnpkg.com/fast-safe-stringify/-/fast-safe-stringify-2.1.1.tgz#c406a83b6e70d9e35ce3b30a81141df30aeba884" + integrity sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA== + +fastq@^1.6.0: + version "1.17.1" + resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.17.1.tgz#2a523f07a4e7b1e81a42b91b8bf2254107753b47" + integrity sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w== + dependencies: + reusify "^1.0.4" + +fetch-readablestream@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/fetch-readablestream/-/fetch-readablestream-0.2.0.tgz#eaa6d1a76b12de2d4731a343393c6ccdcfe2c795" + integrity sha512-qu4mXWf4wus4idBIN/kVH+XSer8IZ9CwHP+Pd7DL7TuKNC1hP7ykon4kkBjwJF3EMX2WsFp4hH7gU7CyL7ucXw== + +file-entry-cache@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-6.0.1.tgz#211b2dd9659cb0394b073e7323ac3c933d522027" + integrity sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg== + dependencies: + flat-cache "^3.0.4" + +fill-range@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" + integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== + dependencies: + to-regex-range "^5.0.1" + +fill-range@^7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" + integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== + dependencies: + to-regex-range "^5.0.1" + +find-root@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/find-root/-/find-root-1.1.0.tgz#abcfc8ba76f708c42a97b3d685b7e9450bfb9ce4" + integrity sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng== + +find-up@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-5.0.0.tgz#4c92819ecb7083561e4f4a240a86be5198f536fc" + integrity sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng== + dependencies: + locate-path "^6.0.0" + path-exists "^4.0.0" + +flat-cache@^3.0.4: + version "3.2.0" + resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-3.2.0.tgz#2c0c2d5040c99b1632771a9d105725c0115363ee" + integrity sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw== + dependencies: + flatted "^3.2.9" + keyv "^4.5.3" + rimraf "^3.0.2" + +flatted@^3.2.9: + version "3.3.1" + resolved "https://registry.yarnpkg.com/flatted/-/flatted-3.3.1.tgz#21db470729a6734d4997002f439cb308987f567a" + integrity sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw== + +for-each@^0.3.3: + version "0.3.3" + resolved "https://registry.yarnpkg.com/for-each/-/for-each-0.3.3.tgz#69b447e88a0a5d32c3e7084f3f1710034b21376e" + integrity sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw== + dependencies: + is-callable "^1.1.3" + +for-each@^0.3.5: + version "0.3.5" + resolved "https://registry.yarnpkg.com/for-each/-/for-each-0.3.5.tgz#d650688027826920feeb0af747ee7b9421a41d47" + integrity sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg== + dependencies: + is-callable "^1.2.7" + +form-data@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.0.tgz#93919daeaf361ee529584b9b31664dc12c9fa452" + integrity sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww== + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.8" + mime-types "^2.1.12" + +fraction.js@^4.3.7: + version "4.3.7" + resolved "https://registry.yarnpkg.com/fraction.js/-/fraction.js-4.3.7.tgz#06ca0085157e42fda7f9e726e79fefc4068840f7" + integrity sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew== + +fs.realpath@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + integrity sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw== + +fsevents@~2.3.2, fsevents@~2.3.3: + version "2.3.3" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.3.tgz#cac6407785d03675a2a5e1a5305c697b347d90d6" + integrity sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw== + +function-bind@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c" + integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA== + +function.prototype.name@^1.1.6: + version "1.1.6" + resolved "https://registry.yarnpkg.com/function.prototype.name/-/function.prototype.name-1.1.6.tgz#cdf315b7d90ee77a4c6ee216c3c3362da07533fd" + integrity sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + functions-have-names "^1.2.3" + +function.prototype.name@^1.1.7: + version "1.1.7" + resolved "https://registry.yarnpkg.com/function.prototype.name/-/function.prototype.name-1.1.7.tgz#9df48ea5f746bf577d7e15b5da89df8952a98e7b" + integrity sha512-2g4x+HqTJKM9zcJqBSpjoRmdcPFtJM60J3xJisTQSXBWka5XqyBN/2tNUgma1mztTXyDuUsEtYe5qcs7xYzYQA== + dependencies: + call-bind "^1.0.8" + define-properties "^1.2.1" + functions-have-names "^1.2.3" + hasown "^2.0.2" + is-callable "^1.2.7" + +function.prototype.name@^1.1.8: + version "1.1.8" + resolved "https://registry.yarnpkg.com/function.prototype.name/-/function.prototype.name-1.1.8.tgz#e68e1df7b259a5c949eeef95cdbde53edffabb78" + integrity sha512-e5iwyodOHhbMr/yNrc7fDYG4qlbIvI5gajyzPnb5TCwyhjApznQh1BMFou9b30SevY43gCJKXycoCBjMbsuW0Q== + dependencies: + call-bind "^1.0.8" + call-bound "^1.0.3" + define-properties "^1.2.1" + functions-have-names "^1.2.3" + hasown "^2.0.2" + is-callable "^1.2.7" + +functions-have-names@^1.2.3: + version "1.2.3" + resolved "https://registry.yarnpkg.com/functions-have-names/-/functions-have-names-1.2.3.tgz#0404fe4ee2ba2f607f0e0ec3c80bae994133b834" + integrity sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ== + +gensync@^1.0.0-beta.2: + version "1.0.0-beta.2" + resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0" + integrity sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg== + +get-caller-file@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" + integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== + +get-func-name@^2.0.1, get-func-name@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/get-func-name/-/get-func-name-2.0.2.tgz#0d7cf20cd13fda808669ffa88f4ffc7a3943fc41" + integrity sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ== + +get-intrinsic@^1.1.3, get-intrinsic@^1.2.1, get-intrinsic@^1.2.3, get-intrinsic@^1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.4.tgz#e385f5a4b5227d449c3eabbad05494ef0abbeadd" + integrity sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ== + dependencies: + es-errors "^1.3.0" + function-bind "^1.1.2" + has-proto "^1.0.1" + has-symbols "^1.0.3" + hasown "^2.0.0" + +get-intrinsic@^1.2.5, get-intrinsic@^1.2.6: + version "1.2.6" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.6.tgz#43dd3dd0e7b49b82b2dfcad10dc824bf7fc265d5" + integrity sha512-qxsEs+9A+u85HhllWJJFicJfPDhRmjzoYdl64aMWW9yRIJmSyxdn8IEkuIM530/7T+lv0TIHd8L6Q/ra0tEoeA== + dependencies: + call-bind-apply-helpers "^1.0.1" + dunder-proto "^1.0.0" + es-define-property "^1.0.1" + es-errors "^1.3.0" + es-object-atoms "^1.0.0" + function-bind "^1.1.2" + gopd "^1.2.0" + has-symbols "^1.1.0" + hasown "^2.0.2" + math-intrinsics "^1.0.0" + +get-intrinsic@^1.2.7, get-intrinsic@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz#743f0e3b6964a93a5491ed1bffaae054d7f98d01" + integrity sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ== + dependencies: + call-bind-apply-helpers "^1.0.2" + es-define-property "^1.0.1" + es-errors "^1.3.0" + es-object-atoms "^1.1.1" + function-bind "^1.1.2" + get-proto "^1.0.1" + gopd "^1.2.0" + has-symbols "^1.1.0" + hasown "^2.0.2" + math-intrinsics "^1.1.0" + +get-proto@^1.0.0, get-proto@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/get-proto/-/get-proto-1.0.1.tgz#150b3f2743869ef3e851ec0c49d15b1d14d00ee1" + integrity sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g== + dependencies: + dunder-proto "^1.0.1" + es-object-atoms "^1.0.0" + +get-stream@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-8.0.1.tgz#def9dfd71742cd7754a7761ed43749a27d02eca2" + integrity sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA== + +get-symbol-description@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/get-symbol-description/-/get-symbol-description-1.0.2.tgz#533744d5aa20aca4e079c8e5daf7fd44202821f5" + integrity sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg== + dependencies: + call-bind "^1.0.5" + es-errors "^1.3.0" + get-intrinsic "^1.2.4" + +get-symbol-description@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/get-symbol-description/-/get-symbol-description-1.1.0.tgz#7bdd54e0befe8ffc9f3b4e203220d9f1e881b6ee" + integrity sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg== + dependencies: + call-bound "^1.0.3" + es-errors "^1.3.0" + get-intrinsic "^1.2.6" + +glob-parent@^5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4" + integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow== + dependencies: + is-glob "^4.0.1" + +glob-parent@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-6.0.2.tgz#6d237d99083950c79290f24c7642a3de9a28f9e3" + integrity sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A== + dependencies: + is-glob "^4.0.3" + +glob@^7.1.3: + version "7.2.3" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.3.tgz#b8df0fb802bbfa8e89bd1d938b4e16578ed44f2b" + integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q== + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.1.1" + once "^1.3.0" + path-is-absolute "^1.0.0" + +globals@^11.1.0: + version "11.12.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" + integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== + +globals@^13.19.0: + version "13.24.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-13.24.0.tgz#8432a19d78ce0c1e833949c36adb345400bb1171" + integrity sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ== + dependencies: + type-fest "^0.20.2" + +globalthis@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/globalthis/-/globalthis-1.0.3.tgz#5852882a52b80dc301b0660273e1ed082f0b6ccf" + integrity sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA== + dependencies: + define-properties "^1.1.3" + +globalthis@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/globalthis/-/globalthis-1.0.4.tgz#7430ed3a975d97bfb59bcce41f5cabbafa651236" + integrity sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ== + dependencies: + define-properties "^1.2.1" + gopd "^1.0.1" + +globby@^11.1.0: + version "11.1.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-11.1.0.tgz#bd4be98bb042f83d796f7e3811991fbe82a0d34b" + integrity sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g== + dependencies: + array-union "^2.1.0" + dir-glob "^3.0.1" + fast-glob "^3.2.9" + ignore "^5.2.0" + merge2 "^1.4.1" + slash "^3.0.0" + +gopd@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.0.1.tgz#29ff76de69dac7489b7c0918a5788e56477c332c" + integrity sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA== + dependencies: + get-intrinsic "^1.1.3" + +gopd@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.2.0.tgz#89f56b8217bdbc8802bd299df6d7f1081d7e51a1" + integrity sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg== + +graphemer@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/graphemer/-/graphemer-1.4.0.tgz#fb2f1d55e0e3a1849aeffc90c4fa0dd53a0e66c6" + integrity sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag== + +has-bigints@^1.0.1, has-bigints@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-bigints/-/has-bigints-1.0.2.tgz#0871bd3e3d51626f6ca0966668ba35d5602d6eaa" + integrity sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ== + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + integrity sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw== + +has-flag@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" + integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== + +has-property-descriptors@^1.0.0, has-property-descriptors@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz#963ed7d071dc7bf5f084c5bfbe0d1b6222586854" + integrity sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg== + dependencies: + es-define-property "^1.0.0" + +has-proto@^1.0.1, has-proto@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.0.3.tgz#b31ddfe9b0e6e9914536a6ab286426d0214f77fd" + integrity sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q== + +has-proto@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.2.0.tgz#5de5a6eabd95fdffd9818b43055e8065e39fe9d5" + integrity sha512-KIL7eQPfHQRC8+XluaIw7BHUwwqL19bQn4hzNgdr+1wXoU0KKj6rufu47lhY7KbJR2C6T6+PfyN0Ea7wkSS+qQ== + dependencies: + dunder-proto "^1.0.0" + +has-symbols@^1.0.2, has-symbols@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" + integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== + +has-symbols@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.1.0.tgz#fc9c6a783a084951d0b971fe1018de813707a338" + integrity sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ== + +has-tostringtag@^1.0.0, has-tostringtag@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc" + integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw== + dependencies: + has-symbols "^1.0.3" + +hasown@^2.0.0, hasown@^2.0.1, hasown@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003" + integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ== + dependencies: + function-bind "^1.1.2" + +highlight-words@1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/highlight-words/-/highlight-words-1.2.2.tgz#9875b75d11814d7356b24f23feeb7d77761fa867" + integrity sha512-Mf4xfPXYm8Ay1wTibCrHpNWeR2nUMynMVFkXCi4mbl+TEgmNOe+I4hV7W3OCZcSvzGL6kupaqpfHOemliMTGxQ== + +hoist-non-react-statics@^3.3.1: + version "3.3.2" + resolved "https://registry.yarnpkg.com/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz#ece0acaf71d62c2969c2ec59feff42a4b1a85b45" + integrity sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw== + dependencies: + react-is "^16.7.0" + +html-encoding-sniffer@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz#696df529a7cfd82446369dc5193e590a3735b448" + integrity sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ== + dependencies: + whatwg-encoding "^3.1.1" + +html-parse-stringify@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/html-parse-stringify/-/html-parse-stringify-3.0.1.tgz#dfc1017347ce9f77c8141a507f233040c59c55d2" + integrity sha512-KknJ50kTInJ7qIScF3jeaFRpMpE8/lfiTdzf/twXyPBLAGrLRTmkz3AdTnKeh40X8k9L2fdYwEp/42WGXIRGcg== + dependencies: + void-elements "3.1.0" + +http-proxy-agent@^7.0.2: + version "7.0.2" + resolved "https://registry.yarnpkg.com/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz#9a8b1f246866c028509486585f62b8f2c18c270e" + integrity sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig== + dependencies: + agent-base "^7.1.0" + debug "^4.3.4" + +http2-client@^1.2.5: + version "1.3.5" + resolved "https://registry.yarnpkg.com/http2-client/-/http2-client-1.3.5.tgz#20c9dc909e3cc98284dd20af2432c524086df181" + integrity sha512-EC2utToWl4RKfs5zd36Mxq7nzHHBuomZboI0yYL6Y0RmBgT7Sgkq4rQ0ezFTYoIsSs7Tm9SJe+o2FcAg6GBhGA== + +https-proxy-agent@^7.0.5: + version "7.0.6" + resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz#da8dfeac7da130b05c2ba4b59c9b6cd66611a6b9" + integrity sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw== + dependencies: + agent-base "^7.1.2" + debug "4" + +human-signals@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-5.0.0.tgz#42665a284f9ae0dade3ba41ebc37eb4b852f3a28" + integrity sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ== + +i18next-browser-languagedetector@^7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/i18next-browser-languagedetector/-/i18next-browser-languagedetector-7.2.2.tgz#748e7dc192847613911d8a79d9d9a6c2d266133e" + integrity sha512-6b7r75uIJDWCcCflmbof+sJ94k9UQO4X0YR62oUfqGI/GjCLVzlCwu8TFdRZIqVLzWbzNcmkmhfqKEr4TLz4HQ== + dependencies: + "@babel/runtime" "^7.23.2" + +i18next-fs-backend@^2.6.0: + version "2.6.0" + resolved "https://registry.yarnpkg.com/i18next-fs-backend/-/i18next-fs-backend-2.6.0.tgz#7b6b54c5ffc2a5073e47eda0673c002376fa1a3c" + integrity sha512-3ZlhNoF9yxnM8pa8bWp5120/Ob6t4lVl1l/tbLmkml/ei3ud8IWySCHt2lrY5xWRlSU5D9IV2sm5bEbGuTqwTw== + +i18next-http-backend@^2.7.3: + version "2.7.3" + resolved "https://registry.yarnpkg.com/i18next-http-backend/-/i18next-http-backend-2.7.3.tgz#335e5884b4c5446cdb4817cb35e81ee4ac7be9f4" + integrity sha512-FgZxrXdRA5u44xfYsJlEBL4/KH3f2IluBpgV/7riW0YW2VEyM8FzVt2XHAOi6id0Ppj7vZvCZVpp5LrGXnc8Ig== + dependencies: + cross-fetch "4.0.0" + +i18next@^23.16.8: + version "23.16.8" + resolved "https://registry.yarnpkg.com/i18next/-/i18next-23.16.8.tgz#3ae1373d344c2393f465556f394aba5a9233b93a" + integrity sha512-06r/TitrM88Mg5FdUXAKL96dJMzgqLE5dv3ryBAra4KCwD9mJ4ndOTS95ZuymIGoE+2hzfdaMak2X11/es7ZWg== + dependencies: + "@babel/runtime" "^7.23.2" + +iconv-lite@0.6.3: + version "0.6.3" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.6.3.tgz#a52f80bf38da1952eb5c681790719871a1a72501" + integrity sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw== + dependencies: + safer-buffer ">= 2.1.2 < 3.0.0" + +ignore@^5.2.0, ignore@^5.3.1: + version "5.3.1" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.3.1.tgz#5073e554cd42c5b33b394375f538b8593e34d4ef" + integrity sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw== + +immer@^10.0.3: + version "10.1.1" + resolved "https://registry.yarnpkg.com/immer/-/immer-10.1.1.tgz#206f344ea372d8ea176891545ee53ccc062db7bc" + integrity sha512-s2MPrmjovJcoMaHtx6K11Ra7oD05NT97w1IC5zpMkT6Atjr7H8LjaDd81iIxUYpMKSRRNMJE703M1Fhr/TctHw== + +immutable@>=3.8.2: + version "4.3.6" + resolved "https://registry.yarnpkg.com/immutable/-/immutable-4.3.6.tgz#6a05f7858213238e587fb83586ffa3b4b27f0447" + integrity sha512-Ju0+lEMyzMVZarkTn/gqRpdqd5dOPaz1mCZ0SH3JV6iFw81PldE/PEB1hWVEA288HPt4WXW8O7AWxB10M+03QQ== + +immutable@^3.8.2: + version "3.8.2" + resolved "https://registry.yarnpkg.com/immutable/-/immutable-3.8.2.tgz#c2439951455bb39913daf281376f1530e104adf3" + integrity sha512-15gZoQ38eYjEjxkorfbcgBKBL6R7T459OuK+CpcWt7O3KF4uPCx2tD0uFETlUDIyo+1789crbMhTvQBSR5yBMg== + +immutable@^5.0.2: + version "5.0.3" + resolved "https://registry.yarnpkg.com/immutable/-/immutable-5.0.3.tgz#aa037e2313ea7b5d400cd9298fa14e404c933db1" + integrity sha512-P8IdPQHq3lA1xVeBRi5VPqUm5HDgKnx0Ru51wZz5mjxHr5n3RWhjIpOFU7ybkUxfB+5IToy+OLaHYDBIWsv+uw== + +import-fresh@^3.2.1, import-fresh@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.3.0.tgz#37162c25fcb9ebaa2e6e53d5b4d88ce17d9e0c2b" + integrity sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw== + dependencies: + parent-module "^1.0.0" + resolve-from "^4.0.0" + +imurmurhash@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" + integrity sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA== + +indent-string@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-4.0.0.tgz#624f8f4497d619b2d9768531d58f4122854d7251" + integrity sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg== + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + integrity sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA== + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@2: + version "2.0.4" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" + integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== + +internal-slot@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/internal-slot/-/internal-slot-1.0.7.tgz#c06dcca3ed874249881007b0a5523b172a190802" + integrity sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g== + dependencies: + es-errors "^1.3.0" + hasown "^2.0.0" + side-channel "^1.0.4" + +internal-slot@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/internal-slot/-/internal-slot-1.1.0.tgz#1eac91762947d2f7056bc838d93e13b2e9604961" + integrity sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw== + dependencies: + es-errors "^1.3.0" + hasown "^2.0.2" + side-channel "^1.1.0" + +ip-regex@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-5.0.0.tgz#cd313b2ae9c80c07bd3851e12bf4fa4dc5480632" + integrity sha512-fOCG6lhoKKakwv+C6KdsOnGvgXnmgfmp0myi3bcNwj3qfwPAxRKWEuFhvEFF7ceYIz6+1jRZ+yguLFAmUNPEfw== + +is-array-buffer@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/is-array-buffer/-/is-array-buffer-3.0.4.tgz#7a1f92b3d61edd2bc65d24f130530ea93d7fae98" + integrity sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw== + dependencies: + call-bind "^1.0.2" + get-intrinsic "^1.2.1" + +is-array-buffer@^3.0.5: + version "3.0.5" + resolved "https://registry.yarnpkg.com/is-array-buffer/-/is-array-buffer-3.0.5.tgz#65742e1e687bd2cc666253068fd8707fe4d44280" + integrity sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A== + dependencies: + call-bind "^1.0.8" + call-bound "^1.0.3" + get-intrinsic "^1.2.6" + +is-arrayish@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg== + +is-async-function@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-async-function/-/is-async-function-2.0.0.tgz#8e4418efd3e5d3a6ebb0164c05ef5afb69aa9646" + integrity sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA== + dependencies: + has-tostringtag "^1.0.0" + +is-bigint@^1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-bigint/-/is-bigint-1.0.4.tgz#08147a1875bc2b32005d41ccd8291dffc6691df3" + integrity sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg== + dependencies: + has-bigints "^1.0.1" + +is-bigint@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-bigint/-/is-bigint-1.1.0.tgz#dda7a3445df57a42583db4228682eba7c4170672" + integrity sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ== + dependencies: + has-bigints "^1.0.2" + +is-boolean-object@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/is-boolean-object/-/is-boolean-object-1.1.2.tgz#5c6dc200246dd9321ae4b885a114bb1f75f63719" + integrity sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA== + dependencies: + call-bind "^1.0.2" + has-tostringtag "^1.0.0" + +is-boolean-object@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/is-boolean-object/-/is-boolean-object-1.2.1.tgz#c20d0c654be05da4fbc23c562635c019e93daf89" + integrity sha512-l9qO6eFlUETHtuihLcYOaLKByJ1f+N4kthcU9YjHy3N+B3hWv0y/2Nd0mu/7lTFnRQHTrSdXF50HQ3bl5fEnng== + dependencies: + call-bound "^1.0.2" + has-tostringtag "^1.0.2" + +is-callable@^1.1.3, is-callable@^1.1.4, is-callable@^1.2.7: + version "1.2.7" + resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.2.7.tgz#3bc2a85ea742d9e36205dcacdd72ca1fdc51b055" + integrity sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA== + +is-core-module@^2.13.0: + version "2.13.1" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.13.1.tgz#ad0d7532c6fea9da1ebdc82742d74525c6273384" + integrity sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw== + dependencies: + hasown "^2.0.0" + +is-data-view@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-data-view/-/is-data-view-1.0.1.tgz#4b4d3a511b70f3dc26d42c03ca9ca515d847759f" + integrity sha512-AHkaJrsUVW6wq6JS8y3JnM/GJF/9cf+k20+iDzlSaJrinEo5+7vRiteOSwBhHRiAyQATN1AmY4hwzxJKPmYf+w== + dependencies: + is-typed-array "^1.1.13" + +is-data-view@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-data-view/-/is-data-view-1.0.2.tgz#bae0a41b9688986c2188dda6657e56b8f9e63b8e" + integrity sha512-RKtWF8pGmS87i2D6gqQu/l7EYRlVdfzemCJN/P3UOs//x1QE7mfhvzHIApBTRf7axvT6DMGwSwBXYCT0nfB9xw== + dependencies: + call-bound "^1.0.2" + get-intrinsic "^1.2.6" + is-typed-array "^1.1.13" + +is-date-object@^1.0.1, is-date-object@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.5.tgz#0841d5536e724c25597bf6ea62e1bd38298df31f" + integrity sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ== + dependencies: + has-tostringtag "^1.0.0" + +is-date-object@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.1.0.tgz#ad85541996fc7aa8b2729701d27b7319f95d82f7" + integrity sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg== + dependencies: + call-bound "^1.0.2" + has-tostringtag "^1.0.2" + +is-extglob@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" + integrity sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ== + +is-finalizationregistry@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-finalizationregistry/-/is-finalizationregistry-1.1.0.tgz#d74a7d0c5f3578e34a20729e69202e578d495dc2" + integrity sha512-qfMdqbAQEwBw78ZyReKnlA8ezmPdb9BemzIIip/JkjaZUhitfXDkkr+3QTboW0JrSXT1QWyYShpvnNHGZ4c4yA== + dependencies: + call-bind "^1.0.7" + +is-fullwidth-code-point@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" + integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== + +is-generator-function@^1.0.10: + version "1.0.10" + resolved "https://registry.yarnpkg.com/is-generator-function/-/is-generator-function-1.0.10.tgz#f1558baf1ac17e0deea7c0415c438351ff2b3c72" + integrity sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A== + dependencies: + has-tostringtag "^1.0.0" + +is-glob@^4.0.0, is-glob@^4.0.1, is-glob@^4.0.3: + version "4.0.3" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.3.tgz#64f61e42cbbb2eec2071a9dac0b28ba1e65d5084" + integrity sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg== + dependencies: + is-extglob "^2.1.1" + +is-map@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-map/-/is-map-2.0.3.tgz#ede96b7fe1e270b3c4465e3a465658764926d62e" + integrity sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw== + +is-negative-zero@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-negative-zero/-/is-negative-zero-2.0.3.tgz#ced903a027aca6381b777a5743069d7376a49747" + integrity sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw== + +is-number-object@^1.0.4: + version "1.0.7" + resolved "https://registry.yarnpkg.com/is-number-object/-/is-number-object-1.0.7.tgz#59d50ada4c45251784e9904f5246c742f07a42fc" + integrity sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ== + dependencies: + has-tostringtag "^1.0.0" + +is-number-object@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/is-number-object/-/is-number-object-1.1.1.tgz#144b21e95a1bc148205dcc2814a9134ec41b2541" + integrity sha512-lZhclumE1G6VYD8VHe35wFaIif+CTy5SJIi5+3y4psDgWu4wPDoBhF8NxUOinEc7pHgiTsT6MaBb92rKhhD+Xw== + dependencies: + call-bound "^1.0.3" + has-tostringtag "^1.0.2" + +is-number@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" + integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== + +is-path-inside@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-3.0.3.tgz#d231362e53a07ff2b0e0ea7fed049161ffd16283" + integrity sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ== + +is-potential-custom-element-name@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz#171ed6f19e3ac554394edf78caa05784a45bebb5" + integrity sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ== + +is-regex@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.1.4.tgz#eef5663cd59fa4c0ae339505323df6854bb15958" + integrity sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg== + dependencies: + call-bind "^1.0.2" + has-tostringtag "^1.0.0" + +is-regex@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.2.1.tgz#76d70a3ed10ef9be48eb577887d74205bf0cad22" + integrity sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g== + dependencies: + call-bound "^1.0.2" + gopd "^1.2.0" + has-tostringtag "^1.0.2" + hasown "^2.0.2" + +is-set@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-set/-/is-set-2.0.3.tgz#8ab209ea424608141372ded6e0cb200ef1d9d01d" + integrity sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg== + +is-shared-array-buffer@^1.0.2, is-shared-array-buffer@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/is-shared-array-buffer/-/is-shared-array-buffer-1.0.3.tgz#1237f1cba059cdb62431d378dcc37d9680181688" + integrity sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg== + dependencies: + call-bind "^1.0.7" + +is-shared-array-buffer@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-shared-array-buffer/-/is-shared-array-buffer-1.0.4.tgz#9b67844bd9b7f246ba0708c3a93e34269c774f6f" + integrity sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A== + dependencies: + call-bound "^1.0.3" + +is-stream@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-3.0.0.tgz#e6bfd7aa6bef69f4f472ce9bb681e3e57b4319ac" + integrity sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA== + +is-string@^1.0.5, is-string@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/is-string/-/is-string-1.0.7.tgz#0dd12bf2006f255bb58f695110eff7491eebc0fd" + integrity sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg== + dependencies: + has-tostringtag "^1.0.0" + +is-string@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/is-string/-/is-string-1.1.1.tgz#92ea3f3d5c5b6e039ca8677e5ac8d07ea773cbb9" + integrity sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA== + dependencies: + call-bound "^1.0.3" + has-tostringtag "^1.0.2" + +is-symbol@^1.0.2, is-symbol@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.4.tgz#a6dac93b635b063ca6872236de88910a57af139c" + integrity sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg== + dependencies: + has-symbols "^1.0.2" + +is-symbol@^1.0.4, is-symbol@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.1.1.tgz#f47761279f532e2b05a7024a7506dbbedacd0634" + integrity sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w== + dependencies: + call-bound "^1.0.2" + has-symbols "^1.1.0" + safe-regex-test "^1.1.0" + +is-typed-array@^1.1.13: + version "1.1.13" + resolved "https://registry.yarnpkg.com/is-typed-array/-/is-typed-array-1.1.13.tgz#d6c5ca56df62334959322d7d7dd1cca50debe229" + integrity sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw== + dependencies: + which-typed-array "^1.1.14" + +is-typed-array@^1.1.14, is-typed-array@^1.1.15: + version "1.1.15" + resolved "https://registry.yarnpkg.com/is-typed-array/-/is-typed-array-1.1.15.tgz#4bfb4a45b61cee83a5a46fba778e4e8d59c0ce0b" + integrity sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ== + dependencies: + which-typed-array "^1.1.16" + +is-weakmap@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/is-weakmap/-/is-weakmap-2.0.2.tgz#bf72615d649dfe5f699079c54b83e47d1ae19cfd" + integrity sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w== + +is-weakref@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-weakref/-/is-weakref-1.0.2.tgz#9529f383a9338205e89765e0392efc2f100f06f2" + integrity sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ== + dependencies: + call-bind "^1.0.2" + +is-weakref@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-weakref/-/is-weakref-1.1.0.tgz#47e3472ae95a63fa9cf25660bcf0c181c39770ef" + integrity sha512-SXM8Nwyys6nT5WP6pltOwKytLV7FqQ4UiibxVmW+EIosHcmCqkkjViTb5SNssDlkCiEYRP1/pdWUKVvZBmsR2Q== + dependencies: + call-bound "^1.0.2" + +is-weakset@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-weakset/-/is-weakset-2.0.3.tgz#e801519df8c0c43e12ff2834eead84ec9e624007" + integrity sha512-LvIm3/KWzS9oRFHugab7d+M/GcBXuXX5xZkzPmN+NxihdQlZUQ4dWuSV1xR/sq6upL1TJEDrfBgRepHFdBtSNQ== + dependencies: + call-bind "^1.0.7" + get-intrinsic "^1.2.4" + +isarray@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-2.0.5.tgz#8af1e4c1221244cc62459faf38940d4e644a5723" + integrity sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw== + +isexe@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" + integrity sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw== + +iterator.prototype@^1.1.4: + version "1.1.5" + resolved "https://registry.yarnpkg.com/iterator.prototype/-/iterator.prototype-1.1.5.tgz#12c959a29de32de0aa3bbbb801f4d777066dae39" + integrity sha512-H0dkQoCa3b2VEeKQBOxFph+JAbcrQdE7KC0UkqwpLmv2EC4P41QXP+rqo9wYodACiG5/WM5s9oDApTU8utwj9g== + dependencies: + define-data-property "^1.1.4" + es-object-atoms "^1.0.0" + get-intrinsic "^1.2.6" + get-proto "^1.0.0" + has-symbols "^1.1.0" + set-function-name "^2.0.2" + +"js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" + integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== + +js-tokens@^9.0.0: + version "9.0.0" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-9.0.0.tgz#0f893996d6f3ed46df7f0a3b12a03f5fd84223c1" + integrity sha512-WriZw1luRMlmV3LGJaR6QOJjWwgLUTf89OwT2lUOyjX2dJGBwgmIkbcz+7WFZjrZM635JOIR517++e/67CP9dQ== + +js-yaml@^3.13.1: + version "3.14.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.1.tgz#dae812fdb3825fa306609a8717383c50c36a0537" + integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g== + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +js-yaml@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" + integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== + dependencies: + argparse "^2.0.1" + +jsdom@^24.1.3: + version "24.1.3" + resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-24.1.3.tgz#88e4a07cb9dd21067514a619e9f17b090a394a9f" + integrity sha512-MyL55p3Ut3cXbeBEG7Hcv0mVM8pp8PBNWxRqchZnSfAiES1v1mRnMeFfaHWIPULpwsYfvO+ZmMZz5tGCnjzDUQ== + dependencies: + cssstyle "^4.0.1" + data-urls "^5.0.0" + decimal.js "^10.4.3" + form-data "^4.0.0" + html-encoding-sniffer "^4.0.0" + http-proxy-agent "^7.0.2" + https-proxy-agent "^7.0.5" + is-potential-custom-element-name "^1.0.1" + nwsapi "^2.2.12" + parse5 "^7.1.2" + rrweb-cssom "^0.7.1" + saxes "^6.0.0" + symbol-tree "^3.2.4" + tough-cookie "^4.1.4" + w3c-xmlserializer "^5.0.0" + webidl-conversions "^7.0.0" + whatwg-encoding "^3.1.1" + whatwg-mimetype "^4.0.0" + whatwg-url "^14.0.0" + ws "^8.18.0" + xml-name-validator "^5.0.0" + +jsesc@^2.5.1: + version "2.5.2" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" + integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA== + +jsesc@^3.0.2: + version "3.1.0" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-3.1.0.tgz#74d335a234f67ed19907fdadfac7ccf9d409825d" + integrity sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA== + +json-buffer@3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.1.tgz#9338802a30d3b6605fbe0613e094008ca8c05a13" + integrity sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ== + +json-parse-even-better-errors@^2.3.0: + version "2.3.1" + resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d" + integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w== + +json-schema-traverse@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" + integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== + +json-schema-traverse@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz#ae7bcb3656ab77a73ba5c49bf654f38e6b6860e2" + integrity sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug== + +json-stable-stringify-without-jsonify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651" + integrity sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw== + +json5@^2.2.3: + version "2.2.3" + resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.3.tgz#78cd6f1a19bdc12b73db5ad0c61efd66c1e29283" + integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg== + +"jsx-ast-utils@^2.4.1 || ^3.0.0": + version "3.3.5" + resolved "https://registry.yarnpkg.com/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz#4766bd05a8e2a11af222becd19e15575e52a853a" + integrity sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ== + dependencies: + array-includes "^3.1.6" + array.prototype.flat "^1.3.1" + object.assign "^4.1.4" + object.values "^1.1.6" + +keyv@^4.5.3: + version "4.5.4" + resolved "https://registry.yarnpkg.com/keyv/-/keyv-4.5.4.tgz#a879a99e29452f942439f2a405e3af8b31d4de93" + integrity sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw== + dependencies: + json-buffer "3.0.1" + +levn@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/levn/-/levn-0.4.1.tgz#ae4562c007473b932a6200d403268dd2fffc6ade" + integrity sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ== + dependencies: + prelude-ls "^1.2.1" + type-check "~0.4.0" + +lines-and-columns@^1.1.6: + version "1.2.4" + resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632" + integrity sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg== + +local-pkg@^0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/local-pkg/-/local-pkg-0.5.0.tgz#093d25a346bae59a99f80e75f6e9d36d7e8c925c" + integrity sha512-ok6z3qlYyCDS4ZEU27HaU6x/xZa9Whf8jD4ptH5UZTQYZVYeb9bnZ3ojVhiJNLiXK1Hfc0GNbLXcmZ5plLDDBg== + dependencies: + mlly "^1.4.2" + pkg-types "^1.0.3" + +locate-path@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-6.0.0.tgz#55321eb309febbc59c4801d931a72452a681d286" + integrity sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw== + dependencies: + p-locate "^5.0.0" + +lodash.merge@^4.6.2: + version "4.6.2" + resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.2.tgz#558aa53b43b661e1925a0afdfa36a9a1085fe57a" + integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== + +lodash@^4.17.21, lodash@^4.17.4: + version "4.17.21" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" + integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== + +loose-envify@^1.1.0, loose-envify@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" + integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q== + dependencies: + js-tokens "^3.0.0 || ^4.0.0" + +loupe@^2.3.6, loupe@^2.3.7: + version "2.3.7" + resolved "https://registry.yarnpkg.com/loupe/-/loupe-2.3.7.tgz#6e69b7d4db7d3ab436328013d37d1c8c3540c697" + integrity sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA== + dependencies: + get-func-name "^2.0.1" + +lower-case@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/lower-case/-/lower-case-2.0.2.tgz#6fa237c63dbdc4a82ca0fd882e4722dc5e634e28" + integrity sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg== + dependencies: + tslib "^2.0.3" + +lru-cache@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" + integrity sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w== + dependencies: + yallist "^3.0.2" + +lru-cache@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-6.0.0.tgz#6d6fe6570ebd96aaf90fcad1dafa3b2566db3a94" + integrity sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA== + dependencies: + yallist "^4.0.0" + +lz-string@^1.5.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/lz-string/-/lz-string-1.5.0.tgz#c1ab50f77887b712621201ba9fd4e3a6ed099941" + integrity sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ== + +magic-string@^0.30.5: + version "0.30.10" + resolved "https://registry.yarnpkg.com/magic-string/-/magic-string-0.30.10.tgz#123d9c41a0cb5640c892b041d4cfb3bd0aa4b39e" + integrity sha512-iIRwTIf0QKV3UAnYK4PU8uiEc4SRh5jX0mwpIwETPpHdhVM4f53RSwS/vXvN1JhGX+Cs7B8qIq3d6AH49O5fAQ== + dependencies: + "@jridgewell/sourcemap-codec" "^1.4.15" + +material-react-table@^2.13.3: + version "2.13.3" + resolved "https://registry.yarnpkg.com/material-react-table/-/material-react-table-2.13.3.tgz#c61de4105efb3eb09697ed5fc2544d174675de31" + integrity sha512-xeyAEG6UYG3qgBIo17epAP5zsWT1pH0uCEkaUxvhki9sGcP35OqfOMSZJNhISvmqEqXKYHdqKbZI6iOwsg1sYA== + dependencies: + "@tanstack/match-sorter-utils" "8.19.4" + "@tanstack/react-table" "8.20.5" + "@tanstack/react-virtual" "3.10.6" + highlight-words "1.2.2" + +math-intrinsics@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/math-intrinsics/-/math-intrinsics-1.0.0.tgz#4e04bf87c85aa51e90d078dac2252b4eb5260817" + integrity sha512-4MqMiKP90ybymYvsut0CH2g4XWbfLtmlCkXmtmdcDCxNB+mQcu1w/1+L/VD7vi/PSv7X2JYV7SCcR+jiPXnQtA== + +math-intrinsics@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz#a0dd74be81e2aa5c2f27e65ce283605ee4e2b7f9" + integrity sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g== + +merge-stream@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" + integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w== + +merge2@^1.3.0, merge2@^1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" + integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== + +micromatch@^4.0.4: + version "4.0.5" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6" + integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== + dependencies: + braces "^3.0.2" + picomatch "^2.3.1" + +micromatch@^4.0.5: + version "4.0.8" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" + integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA== + dependencies: + braces "^3.0.3" + picomatch "^2.3.1" + +mime-db@1.52.0: + version "1.52.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70" + integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== + +mime-types@^2.1.12: + version "2.1.35" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a" + integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== + dependencies: + mime-db "1.52.0" + +mimic-fn@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-4.0.0.tgz#60a90550d5cb0b239cca65d893b1a53b29871ecc" + integrity sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw== + +min-indent@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/min-indent/-/min-indent-1.0.1.tgz#a63f681673b30571fbe8bc25686ae746eefa9869" + integrity sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg== + +minimatch@^3.0.5, minimatch@^3.1.1, minimatch@^3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" + integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== + dependencies: + brace-expansion "^1.1.7" + +minimatch@^9.0.4: + version "9.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.4.tgz#8e49c731d1749cbec05050ee5145147b32496a51" + integrity sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw== + dependencies: + brace-expansion "^2.0.1" + +minimist@^1.2.8: + version "1.2.8" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" + integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== + +mitt@^1.1.2: + version "1.2.0" + resolved "https://registry.yarnpkg.com/mitt/-/mitt-1.2.0.tgz#cb24e6569c806e31bd4e3995787fe38a04fdf90d" + integrity sha512-r6lj77KlwqLhIUku9UWYes7KJtsczvolZkzp8hbaDPPaE24OmWl5s539Mytlj22siEQKosZ26qCBgda2PKwoJw== + +mlly@^1.4.2, mlly@^1.6.1: + version "1.7.0" + resolved "https://registry.yarnpkg.com/mlly/-/mlly-1.7.0.tgz#587383ae40dda23cadb11c3c3cc972b277724271" + integrity sha512-U9SDaXGEREBYQgfejV97coK0UL1r+qnF2SyO9A3qcI8MzKnsIFKHNVEkrDyNncQTKQQumsasmeq84eNMdBfsNQ== + dependencies: + acorn "^8.11.3" + pathe "^1.1.2" + pkg-types "^1.1.0" + ufo "^1.5.3" + +ms@2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" + integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== + +nanoid@^3.3.7: + version "3.3.7" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8" + integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g== + +natural-compare@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" + integrity sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw== + +no-case@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/no-case/-/no-case-3.0.4.tgz#d361fd5c9800f558551a8369fc0dcd4662b6124d" + integrity sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg== + dependencies: + lower-case "^2.0.2" + tslib "^2.0.3" + +node-addon-api@^7.0.0: + version "7.1.1" + resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-7.1.1.tgz#1aba6693b0f255258a049d621329329322aad558" + integrity sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ== + +node-fetch-h2@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/node-fetch-h2/-/node-fetch-h2-2.3.0.tgz#c6188325f9bd3d834020bf0f2d6dc17ced2241ac" + integrity sha512-ofRW94Ab0T4AOh5Fk8t0h8OBWrmjb0SSB20xh1H8YnPV9EJ+f5AMoYSUQ2zgJ4Iq2HAK0I2l5/Nequ8YzFS3Hg== + dependencies: + http2-client "^1.2.5" + +node-fetch@^2.6.1, node-fetch@^2.6.12: + version "2.7.0" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.7.0.tgz#d0f0fa6e3e2dc1d27efcd8ad99d550bda94d187d" + integrity sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A== + dependencies: + whatwg-url "^5.0.0" + +node-readfiles@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/node-readfiles/-/node-readfiles-0.2.0.tgz#dbbd4af12134e2e635c245ef93ffcf6f60673a5d" + integrity sha512-SU00ZarexNlE4Rjdm83vglt5Y9yiQ+XI1XpflWlb7q7UTN1JUItm69xMeiQCTxtTfnzt+83T8Cx+vI2ED++VDA== + dependencies: + es6-promise "^3.2.1" + +node-releases@^2.0.14: + version "2.0.14" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.14.tgz#2ffb053bceb8b2be8495ece1ab6ce600c4461b0b" + integrity sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw== + +node-releases@^2.0.19: + version "2.0.19" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.19.tgz#9e445a52950951ec4d177d843af370b411caf314" + integrity sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw== + +normalize-range@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/normalize-range/-/normalize-range-0.1.2.tgz#2d10c06bdfd312ea9777695a4d28439456b75942" + integrity sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA== + +normalize.css@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/normalize.css/-/normalize.css-8.0.1.tgz#9b98a208738b9cc2634caacbc42d131c97487bf3" + integrity sha512-qizSNPO93t1YUuUhP22btGOo3chcvDFqFaj2TRybP0DMxkHOCTYwp3n34fel4a31ORXy4m1Xq0Gyqpb5m33qIg== + +npm-run-path@^5.1.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-5.3.0.tgz#e23353d0ebb9317f174e93417e4a4d82d0249e9f" + integrity sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ== + dependencies: + path-key "^4.0.0" + +nwsapi@^2.2.12: + version "2.2.16" + resolved "https://registry.yarnpkg.com/nwsapi/-/nwsapi-2.2.16.tgz#177760bba02c351df1d2644e220c31dfec8cdb43" + integrity sha512-F1I/bimDpj3ncaNDhfyMWuFqmQDBwDB0Fogc2qpL3BWvkQteFD/8BzWuIRl83rq0DXfm8SGt/HFhLXZyljTXcQ== + +oas-kit-common@^1.0.8: + version "1.0.8" + resolved "https://registry.yarnpkg.com/oas-kit-common/-/oas-kit-common-1.0.8.tgz#6d8cacf6e9097967a4c7ea8bcbcbd77018e1f535" + integrity sha512-pJTS2+T0oGIwgjGpw7sIRU8RQMcUoKCDWFLdBqKB2BNmGpbBMH2sdqAaOXUg8OzonZHU0L7vfJu1mJFEiYDWOQ== + dependencies: + fast-safe-stringify "^2.0.7" + +oas-linter@^3.2.2: + version "3.2.2" + resolved "https://registry.yarnpkg.com/oas-linter/-/oas-linter-3.2.2.tgz#ab6a33736313490659035ca6802dc4b35d48aa1e" + integrity sha512-KEGjPDVoU5K6swgo9hJVA/qYGlwfbFx+Kg2QB/kd7rzV5N8N5Mg6PlsoCMohVnQmo+pzJap/F610qTodKzecGQ== + dependencies: + "@exodus/schemasafe" "^1.0.0-rc.2" + should "^13.2.1" + yaml "^1.10.0" + +oas-resolver@^2.5.6: + version "2.5.6" + resolved "https://registry.yarnpkg.com/oas-resolver/-/oas-resolver-2.5.6.tgz#10430569cb7daca56115c915e611ebc5515c561b" + integrity sha512-Yx5PWQNZomfEhPPOphFbZKi9W93CocQj18NlD2Pa4GWZzdZpSJvYwoiuurRI7m3SpcChrnO08hkuQDL3FGsVFQ== + dependencies: + node-fetch-h2 "^2.3.0" + oas-kit-common "^1.0.8" + reftools "^1.1.9" + yaml "^1.10.0" + yargs "^17.0.1" + +oas-schema-walker@^1.1.5: + version "1.1.5" + resolved "https://registry.yarnpkg.com/oas-schema-walker/-/oas-schema-walker-1.1.5.tgz#74c3cd47b70ff8e0b19adada14455b5d3ac38a22" + integrity sha512-2yucenq1a9YPmeNExoUa9Qwrt9RFkjqaMAA1X+U7sbb0AqBeTIdMHky9SQQ6iN94bO5NW0W4TRYXerG+BdAvAQ== + +oas-validator@^5.0.8: + version "5.0.8" + resolved "https://registry.yarnpkg.com/oas-validator/-/oas-validator-5.0.8.tgz#387e90df7cafa2d3ffc83b5fb976052b87e73c28" + integrity sha512-cu20/HE5N5HKqVygs3dt94eYJfBi0TsZvPVXDhbXQHiEityDN+RROTleefoKRKKJ9dFAF2JBkDHgvWj0sjKGmw== + dependencies: + call-me-maybe "^1.0.1" + oas-kit-common "^1.0.8" + oas-linter "^3.2.2" + oas-resolver "^2.5.6" + oas-schema-walker "^1.1.5" + reftools "^1.1.9" + should "^13.2.1" + yaml "^1.10.0" + +oazapfts@^4.8.0: + version "4.12.0" + resolved "https://registry.yarnpkg.com/oazapfts/-/oazapfts-4.12.0.tgz#8a86c5fe5a1237b16b05d06d05815cffa2a2b949" + integrity sha512-hNKRG4eLYceuJuqDDx7Uqsi8p3j5k83gNKSo2qnUOTiiU03sCQOjXxOqCXDbzRcuDFyK94+1PBIpotK4NoxIjw== + dependencies: + "@apidevtools/swagger-parser" "^10.1.0" + lodash "^4.17.21" + minimist "^1.2.8" + swagger2openapi "^7.0.8" + typescript "^5.2.2" + +object-assign@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== + +object-inspect@^1.13.1: + version "1.13.1" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.1.tgz#b96c6109324ccfef6b12216a956ca4dc2ff94bc2" + integrity sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ== + +object-inspect@^1.13.3: + version "1.13.3" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.3.tgz#f14c183de51130243d6d18ae149375ff50ea488a" + integrity sha512-kDCGIbxkDSXE3euJZZXzc6to7fCrKHNI/hSRQnRuQ+BWjFNzZwiFF8fj/6o2t2G9/jTj8PSIYTfCLelLZEeRpA== + +object-keys@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e" + integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA== + +object.assign@^4.1.4, object.assign@^4.1.5: + version "4.1.5" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.5.tgz#3a833f9ab7fdb80fc9e8d2300c803d216d8fdbb0" + integrity sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ== + dependencies: + call-bind "^1.0.5" + define-properties "^1.2.1" + has-symbols "^1.0.3" + object-keys "^1.1.1" + +object.assign@^4.1.7: + version "4.1.7" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.7.tgz#8c14ca1a424c6a561b0bb2a22f66f5049a945d3d" + integrity sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw== + dependencies: + call-bind "^1.0.8" + call-bound "^1.0.3" + define-properties "^1.2.1" + es-object-atoms "^1.0.0" + has-symbols "^1.1.0" + object-keys "^1.1.1" + +object.entries@^1.1.8: + version "1.1.8" + resolved "https://registry.yarnpkg.com/object.entries/-/object.entries-1.1.8.tgz#bffe6f282e01f4d17807204a24f8edd823599c41" + integrity sha512-cmopxi8VwRIAw/fkijJohSfpef5PdN0pMQJN6VC/ZKvn0LIknWD8KtgY6KlQdEc4tIjcQ3HxSMmnvtzIscdaYQ== + dependencies: + call-bind "^1.0.7" + define-properties "^1.2.1" + es-object-atoms "^1.0.0" + +object.fromentries@^2.0.8: + version "2.0.8" + resolved "https://registry.yarnpkg.com/object.fromentries/-/object.fromentries-2.0.8.tgz#f7195d8a9b97bd95cbc1999ea939ecd1a2b00c65" + integrity sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ== + dependencies: + call-bind "^1.0.7" + define-properties "^1.2.1" + es-abstract "^1.23.2" + es-object-atoms "^1.0.0" + +object.values@^1.1.6: + version "1.2.0" + resolved "https://registry.yarnpkg.com/object.values/-/object.values-1.2.0.tgz#65405a9d92cee68ac2d303002e0b8470a4d9ab1b" + integrity sha512-yBYjY9QX2hnRmZHAjG/f13MzmBzxzYgQhFrke06TTyKY5zSTEqkOeukBzIdVA3j3ulu8Qa3MbVFShV7T2RmGtQ== + dependencies: + call-bind "^1.0.7" + define-properties "^1.2.1" + es-object-atoms "^1.0.0" + +object.values@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/object.values/-/object.values-1.2.1.tgz#deed520a50809ff7f75a7cfd4bc64c7a038c6216" + integrity sha512-gXah6aZrcUxjWg2zR2MwouP2eHlCBzdV4pygudehaKXSGW4v2AsRQUK+lwwXhii6KFZcunEnmSUoYp5CXibxtA== + dependencies: + call-bind "^1.0.8" + call-bound "^1.0.3" + define-properties "^1.2.1" + es-object-atoms "^1.0.0" + +once@^1.3.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + integrity sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w== + dependencies: + wrappy "1" + +onetime@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/onetime/-/onetime-6.0.0.tgz#7c24c18ed1fd2e9bca4bd26806a33613c77d34b4" + integrity sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ== + dependencies: + mimic-fn "^4.0.0" + +optionator@^0.9.3: + version "0.9.3" + resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.9.3.tgz#007397d44ed1872fdc6ed31360190f81814e2c64" + integrity sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg== + dependencies: + "@aashutoshrathi/word-wrap" "^1.2.3" + deep-is "^0.1.3" + fast-levenshtein "^2.0.6" + levn "^0.4.1" + prelude-ls "^1.2.1" + type-check "^0.4.0" + +own-keys@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/own-keys/-/own-keys-1.0.1.tgz#e4006910a2bf913585289676eebd6f390cf51358" + integrity sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg== + dependencies: + get-intrinsic "^1.2.6" + object-keys "^1.1.1" + safe-push-apply "^1.0.0" + +p-limit@^3.0.2: + version "3.1.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-3.1.0.tgz#e1daccbe78d0d1388ca18c64fea38e3e57e3706b" + integrity sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== + dependencies: + yocto-queue "^0.1.0" + +p-limit@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-5.0.0.tgz#6946d5b7140b649b7a33a027d89b4c625b3a5985" + integrity sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ== + dependencies: + yocto-queue "^1.0.0" + +p-locate@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-5.0.0.tgz#83c8315c6785005e3bd021839411c9e110e6d834" + integrity sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw== + dependencies: + p-limit "^3.0.2" + +parent-module@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" + integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g== + dependencies: + callsites "^3.0.0" + +parse-json@^5.0.0, parse-json@^5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-5.2.0.tgz#c76fc66dee54231c962b22bcc8a72cf2f99753cd" + integrity sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg== + dependencies: + "@babel/code-frame" "^7.0.0" + error-ex "^1.3.1" + json-parse-even-better-errors "^2.3.0" + lines-and-columns "^1.1.6" + +parse5@^7.1.2: + version "7.1.2" + resolved "https://registry.yarnpkg.com/parse5/-/parse5-7.1.2.tgz#0736bebbfd77793823240a23b7fc5e010b7f8e32" + integrity sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw== + dependencies: + entities "^4.4.0" + +path-exists@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" + integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== + +path-is-absolute@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + integrity sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg== + +path-key@^3.1.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" + integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== + +path-key@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-4.0.0.tgz#295588dc3aee64154f877adb9d780b81c554bf18" + integrity sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ== + +path-parse@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" + integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== + +path-type@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" + integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== + +pathe@^1.1.1, pathe@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/pathe/-/pathe-1.1.2.tgz#6c4cb47a945692e48a1ddd6e4094d170516437ec" + integrity sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ== + +pathval@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/pathval/-/pathval-1.1.1.tgz#8534e77a77ce7ac5a2512ea21e0fdb8fcf6c3d8d" + integrity sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ== + +picocolors@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c" + integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== + +picocolors@^1.1.0, picocolors@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" + integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== + +picomatch@^2.3.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" + integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== + +picomatch@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-4.0.2.tgz#77c742931e8f3b8820946c76cd0c1f13730d1dab" + integrity sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg== + +pkg-types@^1.0.3, pkg-types@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/pkg-types/-/pkg-types-1.1.0.tgz#3ec1bf33379030fd0a34c227b6c650e8ea7ca271" + integrity sha512-/RpmvKdxKf8uILTtoOhAgf30wYbP2Qw+L9p3Rvshx1JZVX+XQNZQFjlbmGHEGIm4CkVPlSn+NXmIM8+9oWQaSA== + dependencies: + confbox "^0.1.7" + mlly "^1.6.1" + pathe "^1.1.2" + +possible-typed-array-names@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz#89bb63c6fada2c3e90adc4a647beeeb39cc7bf8f" + integrity sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q== + +postcss-value-parser@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz#723c09920836ba6d3e5af019f92bc0971c02e514" + integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ== + +postcss@^8.4.38: + version "8.4.38" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.38.tgz#b387d533baf2054288e337066d81c6bee9db9e0e" + integrity sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A== + dependencies: + nanoid "^3.3.7" + picocolors "^1.0.0" + source-map-js "^1.2.0" + +postcss@^8.4.43: + version "8.4.49" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.49.tgz#4ea479048ab059ab3ae61d082190fabfd994fe19" + integrity sha512-OCVPnIObs4N29kxTjzLfUryOkvZEq+pf8jTF0lg8E7uETuWHA+v7j3c/xJmiqpX450191LlmZfUKkXxkTry7nA== + dependencies: + nanoid "^3.3.7" + picocolors "^1.1.1" + source-map-js "^1.2.1" + +prelude-ls@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.2.1.tgz#debc6489d7a6e6b0e7611888cec880337d316396" + integrity sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g== + +prettier@^2.2.1: + version "2.8.8" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-2.8.8.tgz#e8c5d7e98a4305ffe3de2e1fc4aca1a71c28b1da" + integrity sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q== + +prettier@^3.5.3: + version "3.5.3" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.5.3.tgz#4fc2ce0d657e7a02e602549f053b239cb7dfe1b5" + integrity sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw== + +pretty-format@^27.0.2: + version "27.5.1" + resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-27.5.1.tgz#2181879fdea51a7a5851fb39d920faa63f01d88e" + integrity sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ== + dependencies: + ansi-regex "^5.0.1" + ansi-styles "^5.0.0" + react-is "^17.0.1" + +pretty-format@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-29.7.0.tgz#ca42c758310f365bfa71a0bda0a807160b776812" + integrity sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ== + dependencies: + "@jest/schemas" "^29.6.3" + ansi-styles "^5.0.0" + react-is "^18.0.0" + +prop-types@^15.6.1, prop-types@^15.6.2, prop-types@^15.7.2, prop-types@^15.8.1: + version "15.8.1" + resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.8.1.tgz#67d87bf1a694f48435cf332c24af10214a3140b5" + integrity sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg== + dependencies: + loose-envify "^1.4.0" + object-assign "^4.1.1" + react-is "^16.13.1" + +property-expr@^2.0.5: + version "2.0.6" + resolved "https://registry.yarnpkg.com/property-expr/-/property-expr-2.0.6.tgz#f77bc00d5928a6c748414ad12882e83f24aec1e8" + integrity sha512-SVtmxhRE/CGkn3eZY1T6pC8Nln6Fr/lu1mKSgRud0eC73whjGfoAogbn78LkD8aFL0zz3bAFerKSnOl7NlErBA== + +psl@^1.1.33: + version "1.9.0" + resolved "https://registry.yarnpkg.com/psl/-/psl-1.9.0.tgz#d0df2a137f00794565fcaf3b2c00cd09f8d5a5a7" + integrity sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag== + +punycode@^2.1.0, punycode@^2.1.1, punycode@^2.3.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.3.1.tgz#027422e2faec0b25e1549c3e1bd8309b9133b6e5" + integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== + +querystringify@^2.1.1: + version "2.2.0" + resolved "https://registry.yarnpkg.com/querystringify/-/querystringify-2.2.0.tgz#3345941b4153cb9d082d8eee4cda2016a9aef7f6" + integrity sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ== + +queue-microtask@^1.2.2: + version "1.2.3" + resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243" + integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A== + +react-dom@^18.3.1: + version "18.3.1" + resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-18.3.1.tgz#c2265d79511b57d479b3dd3fdfa51536494c5cb4" + integrity sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw== + dependencies: + loose-envify "^1.1.0" + scheduler "^0.23.2" + +react-error-boundary@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/react-error-boundary/-/react-error-boundary-4.1.2.tgz#bc750ad962edb8b135d6ae922c046051eb58f289" + integrity sha512-GQDxZ5Jd+Aq/qUxbCm1UtzmL/s++V7zKgE8yMktJiCQXCCFZnMZh9ng+6/Ne6PjNSXH0L9CjeOEREfRnq6Duag== + dependencies: + "@babel/runtime" "^7.12.5" + +react-hook-form@^7.54.2: + version "7.54.2" + resolved "https://registry.yarnpkg.com/react-hook-form/-/react-hook-form-7.54.2.tgz#8c26ed54c71628dff57ccd3c074b1dd377cfb211" + integrity sha512-eHpAUgUjWbZocoQYUHposymRb4ZP6d0uwUnooL2uOybA9/3tPUvoAKqEWK1WaSiTxxOfTpffNZP7QwlnM3/gEg== + +react-i18next@^14.1.3: + version "14.1.3" + resolved "https://registry.yarnpkg.com/react-i18next/-/react-i18next-14.1.3.tgz#85525c4294ef870ddd3f5d184e793cae362f47cb" + integrity sha512-wZnpfunU6UIAiJ+bxwOiTmBOAaB14ha97MjOEnLGac2RJ+h/maIYXZuTHlmyqQVX1UVHmU1YDTQ5vxLmwfXTjw== + dependencies: + "@babel/runtime" "^7.23.9" + html-parse-stringify "^3.0.1" + +react-is@^16.13.1, react-is@^16.7.0: + version "16.13.1" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" + integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== + +react-is@^17.0.1: + version "17.0.2" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-17.0.2.tgz#e691d4a8e9c789365655539ab372762b0efb54f0" + integrity sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w== + +react-is@^18.0.0: + version "18.3.1" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-18.3.1.tgz#e83557dc12eae63a99e003a46388b1dcbb44db7e" + integrity sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg== + +react-is@^19.0.0: + version "19.0.0" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-19.0.0.tgz#d6669fd389ff022a9684f708cf6fa4962d1fea7a" + integrity sha512-H91OHcwjZsbq3ClIDHMzBShc1rotbfACdWENsmEf0IFvZ3FgGPtdHMcsv45bQ1hAbgdfiA8SnxTKfDS+x/8m2g== + +react-lazylog@^4.5.3: + version "4.5.3" + resolved "https://registry.yarnpkg.com/react-lazylog/-/react-lazylog-4.5.3.tgz#289e24995b5599e75943556ac63f5e2c04d0001e" + integrity sha512-lyov32A/4BqihgXgtNXTHCajXSXkYHPlIEmV8RbYjHIMxCFSnmtdg4kDCI3vATz7dURtiFTvrw5yonHnrS+NNg== + dependencies: + "@mattiasbuelens/web-streams-polyfill" "^0.2.0" + fetch-readablestream "^0.2.0" + immutable "^3.8.2" + mitt "^1.1.2" + prop-types "^15.6.1" + react-string-replace "^0.4.1" + react-virtualized "^9.21.0" + text-encoding-utf-8 "^1.0.1" + whatwg-fetch "^2.0.4" + +react-lifecycles-compat@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz#4f1a273afdfc8f3488a8c516bfda78f872352362" + integrity sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA== + +react-redux@^9.2.0: + version "9.2.0" + resolved "https://registry.yarnpkg.com/react-redux/-/react-redux-9.2.0.tgz#96c3ab23fb9a3af2cb4654be4b51c989e32366f5" + integrity sha512-ROY9fvHhwOD9ySfrF0wmvu//bKCQ6AeZZq1nJNtbDC+kk5DuSuNX/n6YWYF/SYy7bSba4D4FSz8DJeKY/S/r+g== + dependencies: + "@types/use-sync-external-store" "^0.0.6" + use-sync-external-store "^1.4.0" + +react-refresh@^0.14.2: + version "0.14.2" + resolved "https://registry.yarnpkg.com/react-refresh/-/react-refresh-0.14.2.tgz#3833da01ce32da470f1f936b9d477da5c7028bf9" + integrity sha512-jCvmsr+1IUSMUyzOkRcvnVbX3ZYC6g9TDrDbFuFmRDq7PD4yaGbLKNQL6k2jnArV8hjYxh7hVhAZB6s9HDGpZA== + +react-router-dom@^6.30.0: + version "6.30.0" + resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-6.30.0.tgz#a64774104508bff56b1affc2796daa3f7e76b7df" + integrity sha512-x30B78HV5tFk8ex0ITwzC9TTZMua4jGyA9IUlH1JLQYQTFyxr/ZxwOJq7evg1JX1qGVUcvhsmQSKdPncQrjTgA== + dependencies: + "@remix-run/router" "1.23.0" + react-router "6.30.0" + +react-router@6.30.0: + version "6.30.0" + resolved "https://registry.yarnpkg.com/react-router/-/react-router-6.30.0.tgz#9789d775e63bc0df60f39ced77c8c41f1e01ff90" + integrity sha512-D3X8FyH9nBcTSHGdEKurK7r8OYE1kKFn3d/CF+CoxbSHkxU7o37+Uh7eAHRXr6k2tSExXYO++07PeXJtA/dEhQ== + dependencies: + "@remix-run/router" "1.23.0" + +react-string-replace@^0.4.1: + version "0.4.4" + resolved "https://registry.yarnpkg.com/react-string-replace/-/react-string-replace-0.4.4.tgz#24006fbe0db573d5be583133df38b1a735cb4225" + integrity sha512-FAMkhxmDpCsGTwTZg7p/2v+/GTmxAp73so3fbSvlAcBBX36ujiGRNEaM/1u+jiYQrArhns+7eE92g2pi5E5FUA== + dependencies: + lodash "^4.17.4" + +react-toastify@^10.0.6: + version "10.0.6" + resolved "https://registry.yarnpkg.com/react-toastify/-/react-toastify-10.0.6.tgz#19c364b1150f495522c738d592d1dcc93879ade1" + integrity sha512-yYjp+omCDf9lhZcrZHKbSq7YMuK0zcYkDFTzfRFgTXkTFHZ1ToxwAonzA4JI5CxA91JpjFLmwEsZEgfYfOqI1A== + dependencies: + clsx "^2.1.0" + +react-transition-group@^4.4.5: + version "4.4.5" + resolved "https://registry.yarnpkg.com/react-transition-group/-/react-transition-group-4.4.5.tgz#e53d4e3f3344da8521489fbef8f2581d42becdd1" + integrity sha512-pZcd1MCJoiKiBR2NRxeCRg13uCXbydPnmB4EOeRrY7480qNWO8IIgQG6zlDkm6uRMsURXPuKq0GWtiM59a5Q6g== + dependencies: + "@babel/runtime" "^7.5.5" + dom-helpers "^5.0.1" + loose-envify "^1.4.0" + prop-types "^15.6.2" + +react-virtualized@^9.21.0: + version "9.22.5" + resolved "https://registry.yarnpkg.com/react-virtualized/-/react-virtualized-9.22.5.tgz#bfb96fed519de378b50d8c0064b92994b3b91620" + integrity sha512-YqQMRzlVANBv1L/7r63OHa2b0ZsAaDp1UhVNEdUaXI8A5u6hTpA5NYtUueLH2rFuY/27mTGIBl7ZhqFKzw18YQ== + dependencies: + "@babel/runtime" "^7.7.2" + clsx "^1.0.4" + dom-helpers "^5.1.3" + loose-envify "^1.4.0" + prop-types "^15.7.2" + react-lifecycles-compat "^3.0.4" + +react@^18.3.1: + version "18.3.1" + resolved "https://registry.yarnpkg.com/react/-/react-18.3.1.tgz#49ab892009c53933625bd16b2533fc754cab2891" + integrity sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ== + dependencies: + loose-envify "^1.1.0" + +readdirp@^4.0.1: + version "4.0.2" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-4.0.2.tgz#388fccb8b75665da3abffe2d8f8ed59fe74c230a" + integrity sha512-yDMz9g+VaZkqBYS/ozoBJwaBhTbZo3UNYQHNRw1D3UFQB8oHB4uS/tAODO+ZLjGWmUbKnIlOWO+aaIiAxrUWHA== + +redent@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/redent/-/redent-3.0.0.tgz#e557b7998316bb53c9f1f56fa626352c6963059f" + integrity sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg== + dependencies: + indent-string "^4.0.0" + strip-indent "^3.0.0" + +redux-thunk@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/redux-thunk/-/redux-thunk-3.1.0.tgz#94aa6e04977c30e14e892eae84978c1af6058ff3" + integrity sha512-NW2r5T6ksUKXCabzhL9z+h206HQw/NJkcLm1GPImRQ8IzfXwRGqjVhKJGauHirT0DAuyy6hjdnMZaRoAcy0Klw== + +redux@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/redux/-/redux-5.0.1.tgz#97fa26881ce5746500125585d5642c77b6e9447b" + integrity sha512-M9/ELqF6fy8FwmkpnF0S3YKOqMyoWJ4+CS5Efg2ct3oY9daQvd/Pc71FpGZsVsbl3Cpb+IIcjBDUnnyBdQbq4w== + +reflect.getprototypeof@^1.0.6: + version "1.0.8" + resolved "https://registry.yarnpkg.com/reflect.getprototypeof/-/reflect.getprototypeof-1.0.8.tgz#c58afb17a4007b4d1118c07b92c23fca422c5d82" + integrity sha512-B5dj6usc5dkk8uFliwjwDHM8To5/QwdKz9JcBZ8Ic4G1f0YmeeJTtE/ZTdgRFPAfxZFiUaPhZ1Jcs4qeagItGQ== + dependencies: + call-bind "^1.0.8" + define-properties "^1.2.1" + dunder-proto "^1.0.0" + es-abstract "^1.23.5" + es-errors "^1.3.0" + get-intrinsic "^1.2.4" + gopd "^1.2.0" + which-builtin-type "^1.2.0" + +reflect.getprototypeof@^1.0.9: + version "1.0.10" + resolved "https://registry.yarnpkg.com/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz#c629219e78a3316d8b604c765ef68996964e7bf9" + integrity sha512-00o4I+DVrefhv+nX0ulyi3biSHCPDe+yLv5o/p6d/UVlirijB8E16FtfwSAi4g3tcqrQ4lRAqQSoFEZJehYEcw== + dependencies: + call-bind "^1.0.8" + define-properties "^1.2.1" + es-abstract "^1.23.9" + es-errors "^1.3.0" + es-object-atoms "^1.0.0" + get-intrinsic "^1.2.7" + get-proto "^1.0.1" + which-builtin-type "^1.2.1" + +reftools@^1.1.9: + version "1.1.9" + resolved "https://registry.yarnpkg.com/reftools/-/reftools-1.1.9.tgz#e16e19f662ccd4648605312c06d34e5da3a2b77e" + integrity sha512-OVede/NQE13xBQ+ob5CKd5KyeJYU2YInb1bmV4nRoOfquZPkAkxuOXicSe1PvqIuZZ4kD13sPKBbR7UFDmli6w== + +regenerator-runtime@^0.14.0: + version "0.14.1" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz#356ade10263f685dda125100cd862c1db895327f" + integrity sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw== + +regexp.prototype.flags@^1.5.2: + version "1.5.2" + resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.5.2.tgz#138f644a3350f981a858c44f6bb1a61ff59be334" + integrity sha512-NcDiDkTLuPR+++OCKB0nWafEmhg/Da8aUPLPMQbK+bxKKCm1/S5he+AqYa4PlMCVBalb4/yxIRub6qkEx5yJbw== + dependencies: + call-bind "^1.0.6" + define-properties "^1.2.1" + es-errors "^1.3.0" + set-function-name "^2.0.1" + +regexp.prototype.flags@^1.5.3: + version "1.5.3" + resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.5.3.tgz#b3ae40b1d2499b8350ab2c3fe6ef3845d3a96f42" + integrity sha512-vqlC04+RQoFalODCbCumG2xIOvapzVMHwsyIGM/SIE8fRhFFsXeH8/QQ+s0T0kDAhKc4k30s73/0ydkHQz6HlQ== + dependencies: + call-bind "^1.0.7" + define-properties "^1.2.1" + es-errors "^1.3.0" + set-function-name "^2.0.2" + +remove-accents@0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/remove-accents/-/remove-accents-0.5.0.tgz#77991f37ba212afba162e375b627631315bed687" + integrity sha512-8g3/Otx1eJaVD12e31UbJj1YzdtVvzH85HV7t+9MJYk/u3XmkOUJ5Ys9wQrf9PCPK8+xn4ymzqYCiZl6QWKn+A== + +require-directory@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" + integrity sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q== + +require-from-string@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/require-from-string/-/require-from-string-2.0.2.tgz#89a7fdd938261267318eafe14f9c32e598c36909" + integrity sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw== + +requires-port@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff" + integrity sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ== + +reselect@^5.1.0, reselect@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/reselect/-/reselect-5.1.1.tgz#c766b1eb5d558291e5e550298adb0becc24bb72e" + integrity sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w== + +resolve-from@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" + integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== + +resolve@^1.19.0: + version "1.22.8" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.8.tgz#b6c87a9f2aa06dfab52e3d70ac8cde321fa5a48d" + integrity sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw== + dependencies: + is-core-module "^2.13.0" + path-parse "^1.0.7" + supports-preserve-symlinks-flag "^1.0.0" + +resolve@^2.0.0-next.5: + version "2.0.0-next.5" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-2.0.0-next.5.tgz#6b0ec3107e671e52b68cd068ef327173b90dc03c" + integrity sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA== + dependencies: + is-core-module "^2.13.0" + path-parse "^1.0.7" + supports-preserve-symlinks-flag "^1.0.0" + +reusify@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76" + integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw== + +rimraf@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-3.0.2.tgz#f1a5402ba6220ad52cc1282bac1ae3aa49fd061a" + integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA== + dependencies: + glob "^7.1.3" + +rollup@^4.13.0: + version "4.16.4" + resolved "https://registry.yarnpkg.com/rollup/-/rollup-4.16.4.tgz#fe328eb41293f20c9593a095ec23bdc4b5d93317" + integrity sha512-kuaTJSUbz+Wsb2ATGvEknkI12XV40vIiHmLuFlejoo7HtDok/O5eDDD0UpCVY5bBX5U5RYo8wWP83H7ZsqVEnA== + dependencies: + "@types/estree" "1.0.5" + optionalDependencies: + "@rollup/rollup-android-arm-eabi" "4.16.4" + "@rollup/rollup-android-arm64" "4.16.4" + "@rollup/rollup-darwin-arm64" "4.16.4" + "@rollup/rollup-darwin-x64" "4.16.4" + "@rollup/rollup-linux-arm-gnueabihf" "4.16.4" + "@rollup/rollup-linux-arm-musleabihf" "4.16.4" + "@rollup/rollup-linux-arm64-gnu" "4.16.4" + "@rollup/rollup-linux-arm64-musl" "4.16.4" + "@rollup/rollup-linux-powerpc64le-gnu" "4.16.4" + "@rollup/rollup-linux-riscv64-gnu" "4.16.4" + "@rollup/rollup-linux-s390x-gnu" "4.16.4" + "@rollup/rollup-linux-x64-gnu" "4.16.4" + "@rollup/rollup-linux-x64-musl" "4.16.4" + "@rollup/rollup-win32-arm64-msvc" "4.16.4" + "@rollup/rollup-win32-ia32-msvc" "4.16.4" + "@rollup/rollup-win32-x64-msvc" "4.16.4" + fsevents "~2.3.2" + +rollup@^4.20.0: + version "4.28.1" + resolved "https://registry.yarnpkg.com/rollup/-/rollup-4.28.1.tgz#7718ba34d62b449dfc49adbfd2f312b4fe0df4de" + integrity sha512-61fXYl/qNVinKmGSTHAZ6Yy8I3YIJC/r2m9feHo6SwVAVcLT5MPwOUFe7EuURA/4m0NR8lXG4BBXuo/IZEsjMg== + dependencies: + "@types/estree" "1.0.6" + optionalDependencies: + "@rollup/rollup-android-arm-eabi" "4.28.1" + "@rollup/rollup-android-arm64" "4.28.1" + "@rollup/rollup-darwin-arm64" "4.28.1" + "@rollup/rollup-darwin-x64" "4.28.1" + "@rollup/rollup-freebsd-arm64" "4.28.1" + "@rollup/rollup-freebsd-x64" "4.28.1" + "@rollup/rollup-linux-arm-gnueabihf" "4.28.1" + "@rollup/rollup-linux-arm-musleabihf" "4.28.1" + "@rollup/rollup-linux-arm64-gnu" "4.28.1" + "@rollup/rollup-linux-arm64-musl" "4.28.1" + "@rollup/rollup-linux-loongarch64-gnu" "4.28.1" + "@rollup/rollup-linux-powerpc64le-gnu" "4.28.1" + "@rollup/rollup-linux-riscv64-gnu" "4.28.1" + "@rollup/rollup-linux-s390x-gnu" "4.28.1" + "@rollup/rollup-linux-x64-gnu" "4.28.1" + "@rollup/rollup-linux-x64-musl" "4.28.1" + "@rollup/rollup-win32-arm64-msvc" "4.28.1" + "@rollup/rollup-win32-ia32-msvc" "4.28.1" + "@rollup/rollup-win32-x64-msvc" "4.28.1" + fsevents "~2.3.2" + +rrweb-cssom@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/rrweb-cssom/-/rrweb-cssom-0.6.0.tgz#ed298055b97cbddcdeb278f904857629dec5e0e1" + integrity sha512-APM0Gt1KoXBz0iIkkdB/kfvGOwC4UuJFeG/c+yV7wSc7q96cG/kJ0HiYCnzivD9SB53cLV1MlHFNfOuPaadYSw== + +rrweb-cssom@^0.7.1: + version "0.7.1" + resolved "https://registry.yarnpkg.com/rrweb-cssom/-/rrweb-cssom-0.7.1.tgz#c73451a484b86dd7cfb1e0b2898df4b703183e4b" + integrity sha512-TrEMa7JGdVm0UThDJSx7ddw5nVm3UJS9o9CCIZ72B1vSyEZoziDqBYP3XIoi/12lKrJR8rE3jeFHMok2F/Mnsg== + +run-parallel@^1.1.9: + version "1.2.0" + resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee" + integrity sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA== + dependencies: + queue-microtask "^1.2.2" + +safe-array-concat@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/safe-array-concat/-/safe-array-concat-1.1.2.tgz#81d77ee0c4e8b863635227c721278dd524c20edb" + integrity sha512-vj6RsCsWBCf19jIeHEfkRMw8DPiBb+DMXklQ/1SGDHOMlHdPUkZXFQ2YdplS23zESTijAcurb1aSgJA3AgMu1Q== + dependencies: + call-bind "^1.0.7" + get-intrinsic "^1.2.4" + has-symbols "^1.0.3" + isarray "^2.0.5" + +safe-array-concat@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/safe-array-concat/-/safe-array-concat-1.1.3.tgz#c9e54ec4f603b0bbb8e7e5007a5ee7aecd1538c3" + integrity sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q== + dependencies: + call-bind "^1.0.8" + call-bound "^1.0.2" + get-intrinsic "^1.2.6" + has-symbols "^1.1.0" + isarray "^2.0.5" + +safe-push-apply@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/safe-push-apply/-/safe-push-apply-1.0.0.tgz#01850e981c1602d398c85081f360e4e6d03d27f5" + integrity sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA== + dependencies: + es-errors "^1.3.0" + isarray "^2.0.5" + +safe-regex-test@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/safe-regex-test/-/safe-regex-test-1.0.3.tgz#a5b4c0f06e0ab50ea2c395c14d8371232924c377" + integrity sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw== + dependencies: + call-bind "^1.0.6" + es-errors "^1.3.0" + is-regex "^1.1.4" + +safe-regex-test@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/safe-regex-test/-/safe-regex-test-1.1.0.tgz#7f87dfb67a3150782eaaf18583ff5d1711ac10c1" + integrity sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw== + dependencies: + call-bound "^1.0.2" + es-errors "^1.3.0" + is-regex "^1.2.1" + +"safer-buffer@>= 2.1.2 < 3.0.0": + version "2.1.2" + resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== + +sass@^1.86.0: + version "1.86.0" + resolved "https://registry.yarnpkg.com/sass/-/sass-1.86.0.tgz#f49464fb6237a903a93f4e8760ef6e37a5030114" + integrity sha512-zV8vGUld/+mP4KbMLJMX7TyGCuUp7hnkOScgCMsWuHtns8CWBoz+vmEhoGMXsaJrbUP8gj+F1dLvVe79sK8UdA== + dependencies: + chokidar "^4.0.0" + immutable "^5.0.2" + source-map-js ">=0.6.2 <2.0.0" + optionalDependencies: + "@parcel/watcher" "^2.4.1" + +saxes@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/saxes/-/saxes-6.0.0.tgz#fe5b4a4768df4f14a201b1ba6a65c1f3d9988cc5" + integrity sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA== + dependencies: + xmlchars "^2.2.0" + +scheduler@^0.23.2: + version "0.23.2" + resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.23.2.tgz#414ba64a3b282892e944cf2108ecc078d115cdc3" + integrity sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ== + dependencies: + loose-envify "^1.1.0" + +semver@^6.3.1: + version "6.3.1" + resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" + integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== + +semver@^7.3.5: + version "7.6.2" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.2.tgz#1e3b34759f896e8f14d6134732ce798aeb0c6e13" + integrity sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w== + +semver@^7.6.0: + version "7.6.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.0.tgz#1a46a4db4bffcccd97b743b5005c8325f23d4e2d" + integrity sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg== + dependencies: + lru-cache "^6.0.0" + +set-function-length@^1.2.1, set-function-length@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.2.2.tgz#aac72314198eaed975cf77b2c3b6b880695e5449" + integrity sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg== + dependencies: + define-data-property "^1.1.4" + es-errors "^1.3.0" + function-bind "^1.1.2" + get-intrinsic "^1.2.4" + gopd "^1.0.1" + has-property-descriptors "^1.0.2" + +set-function-name@^2.0.1, set-function-name@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/set-function-name/-/set-function-name-2.0.2.tgz#16a705c5a0dc2f5e638ca96d8a8cd4e1c2b90985" + integrity sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ== + dependencies: + define-data-property "^1.1.4" + es-errors "^1.3.0" + functions-have-names "^1.2.3" + has-property-descriptors "^1.0.2" + +set-proto@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/set-proto/-/set-proto-1.0.0.tgz#0760dbcff30b2d7e801fd6e19983e56da337565e" + integrity sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw== + dependencies: + dunder-proto "^1.0.1" + es-errors "^1.3.0" + es-object-atoms "^1.0.0" + +shebang-command@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" + integrity sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA== + dependencies: + shebang-regex "^3.0.0" + +shebang-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" + integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== + +should-equal@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/should-equal/-/should-equal-2.0.0.tgz#6072cf83047360867e68e98b09d71143d04ee0c3" + integrity sha512-ZP36TMrK9euEuWQYBig9W55WPC7uo37qzAEmbjHz4gfyuXrEUgF8cUvQVO+w+d3OMfPvSRQJ22lSm8MQJ43LTA== + dependencies: + should-type "^1.4.0" + +should-format@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/should-format/-/should-format-3.0.3.tgz#9bfc8f74fa39205c53d38c34d717303e277124f1" + integrity sha512-hZ58adtulAk0gKtua7QxevgUaXTTXxIi8t41L3zo9AHvjXO1/7sdLECuHeIN2SRtYXpNkmhoUP2pdeWgricQ+Q== + dependencies: + should-type "^1.3.0" + should-type-adaptors "^1.0.1" + +should-type-adaptors@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/should-type-adaptors/-/should-type-adaptors-1.1.0.tgz#401e7f33b5533033944d5cd8bf2b65027792e27a" + integrity sha512-JA4hdoLnN+kebEp2Vs8eBe9g7uy0zbRo+RMcU0EsNy+R+k049Ki+N5tT5Jagst2g7EAja+euFuoXFCa8vIklfA== + dependencies: + should-type "^1.3.0" + should-util "^1.0.0" + +should-type@^1.3.0, should-type@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/should-type/-/should-type-1.4.0.tgz#0756d8ce846dfd09843a6947719dfa0d4cff5cf3" + integrity sha512-MdAsTu3n25yDbIe1NeN69G4n6mUnJGtSJHygX3+oN0ZbO3DTiATnf7XnYJdGT42JCXurTb1JI0qOBR65shvhPQ== + +should-util@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/should-util/-/should-util-1.0.1.tgz#fb0d71338f532a3a149213639e2d32cbea8bcb28" + integrity sha512-oXF8tfxx5cDk8r2kYqlkUJzZpDBqVY/II2WhvU0n9Y3XYvAYRmeaf1PvvIvTgPnv4KJ+ES5M0PyDq5Jp+Ygy2g== + +should@^13.2.1: + version "13.2.3" + resolved "https://registry.yarnpkg.com/should/-/should-13.2.3.tgz#96d8e5acf3e97b49d89b51feaa5ae8d07ef58f10" + integrity sha512-ggLesLtu2xp+ZxI+ysJTmNjh2U0TsC+rQ/pfED9bUZZ4DKefP27D+7YJVVTvKsmjLpIi9jAa7itwDGkDDmt1GQ== + dependencies: + should-equal "^2.0.0" + should-format "^3.0.3" + should-type "^1.4.0" + should-type-adaptors "^1.0.1" + should-util "^1.0.0" + +side-channel-list@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/side-channel-list/-/side-channel-list-1.0.0.tgz#10cb5984263115d3b7a0e336591e290a830af8ad" + integrity sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA== + dependencies: + es-errors "^1.3.0" + object-inspect "^1.13.3" + +side-channel-map@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/side-channel-map/-/side-channel-map-1.0.1.tgz#d6bb6b37902c6fef5174e5f533fab4c732a26f42" + integrity sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA== + dependencies: + call-bound "^1.0.2" + es-errors "^1.3.0" + get-intrinsic "^1.2.5" + object-inspect "^1.13.3" + +side-channel-weakmap@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz#11dda19d5368e40ce9ec2bdc1fb0ecbc0790ecea" + integrity sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A== + dependencies: + call-bound "^1.0.2" + es-errors "^1.3.0" + get-intrinsic "^1.2.5" + object-inspect "^1.13.3" + side-channel-map "^1.0.1" + +side-channel@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.6.tgz#abd25fb7cd24baf45466406b1096b7831c9215f2" + integrity sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA== + dependencies: + call-bind "^1.0.7" + es-errors "^1.3.0" + get-intrinsic "^1.2.4" + object-inspect "^1.13.1" + +side-channel@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.1.0.tgz#c3fcff9c4da932784873335ec9765fa94ff66bc9" + integrity sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw== + dependencies: + es-errors "^1.3.0" + object-inspect "^1.13.3" + side-channel-list "^1.0.0" + side-channel-map "^1.0.1" + side-channel-weakmap "^1.0.2" + +siginfo@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/siginfo/-/siginfo-2.0.0.tgz#32e76c70b79724e3bb567cb9d543eb858ccfaf30" + integrity sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g== + +signal-exit@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-4.1.0.tgz#952188c1cbd546070e2dd20d0f41c0ae0530cb04" + integrity sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw== + +slash@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" + integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q== + +snake-case@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/snake-case/-/snake-case-3.0.4.tgz#4f2bbd568e9935abdfd593f34c691dadb49c452c" + integrity sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg== + dependencies: + dot-case "^3.0.4" + tslib "^2.0.3" + +"source-map-js@>=0.6.2 <2.0.0", source-map-js@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.2.0.tgz#16b809c162517b5b8c3e7dcd315a2a5c2612b2af" + integrity sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg== + +source-map-js@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.2.1.tgz#1ce5650fddd87abc099eda37dcff024c2667ae46" + integrity sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA== + +source-map-support@0.5.21: + version "0.5.21" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.21.tgz#04fe7c7f9e1ed2d662233c28cb2b35b9f63f6e4f" + integrity sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w== + dependencies: + buffer-from "^1.0.0" + source-map "^0.6.0" + +source-map@^0.5.7: + version "0.5.7" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" + integrity sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ== + +source-map@^0.6.0: + version "0.6.1" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" + integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== + +sprintf-js@~1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" + integrity sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g== + +stackback@0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/stackback/-/stackback-0.0.2.tgz#1ac8a0d9483848d1695e418b6d031a3c3ce68e3b" + integrity sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw== + +state-local@^1.0.6: + version "1.0.7" + resolved "https://registry.yarnpkg.com/state-local/-/state-local-1.0.7.tgz#da50211d07f05748d53009bee46307a37db386d5" + integrity sha512-HTEHMNieakEnoe33shBYcZ7NX83ACUjCu8c40iOGEZsngj9zRnkqS9j1pqQPXwobB0ZcVTk27REb7COQ0UR59w== + +std-env@^3.5.0: + version "3.7.0" + resolved "https://registry.yarnpkg.com/std-env/-/std-env-3.7.0.tgz#c9f7386ced6ecf13360b6c6c55b8aaa4ef7481d2" + integrity sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg== + +string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: + version "4.2.3" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" + +string.prototype.matchall@^4.0.12: + version "4.0.12" + resolved "https://registry.yarnpkg.com/string.prototype.matchall/-/string.prototype.matchall-4.0.12.tgz#6c88740e49ad4956b1332a911e949583a275d4c0" + integrity sha512-6CC9uyBL+/48dYizRf7H7VAYCMCNTBeM78x/VTUe9bFEaxBepPJDa1Ow99LqI/1yF7kuy7Q3cQsYMrcjGUcskA== + dependencies: + call-bind "^1.0.8" + call-bound "^1.0.3" + define-properties "^1.2.1" + es-abstract "^1.23.6" + es-errors "^1.3.0" + es-object-atoms "^1.0.0" + get-intrinsic "^1.2.6" + gopd "^1.2.0" + has-symbols "^1.1.0" + internal-slot "^1.1.0" + regexp.prototype.flags "^1.5.3" + set-function-name "^2.0.2" + side-channel "^1.1.0" + +string.prototype.repeat@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/string.prototype.repeat/-/string.prototype.repeat-1.0.0.tgz#e90872ee0308b29435aa26275f6e1b762daee01a" + integrity sha512-0u/TldDbKD8bFCQ/4f5+mNRrXwZ8hg2w7ZR8wa16e8z9XpePWl3eGEcUD0OXpEH/VJH/2G3gjUtR3ZOiBe2S/w== + dependencies: + define-properties "^1.1.3" + es-abstract "^1.17.5" + +string.prototype.trim@^1.2.10: + version "1.2.10" + resolved "https://registry.yarnpkg.com/string.prototype.trim/-/string.prototype.trim-1.2.10.tgz#40b2dd5ee94c959b4dcfb1d65ce72e90da480c81" + integrity sha512-Rs66F0P/1kedk5lyYyH9uBzuiI/kNRmwJAR9quK6VOtIpZ2G+hMZd+HQbbv25MgCA6gEffoMZYxlTod4WcdrKA== + dependencies: + call-bind "^1.0.8" + call-bound "^1.0.2" + define-data-property "^1.1.4" + define-properties "^1.2.1" + es-abstract "^1.23.5" + es-object-atoms "^1.0.0" + has-property-descriptors "^1.0.2" + +string.prototype.trim@^1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/string.prototype.trim/-/string.prototype.trim-1.2.9.tgz#b6fa326d72d2c78b6df02f7759c73f8f6274faa4" + integrity sha512-klHuCNxiMZ8MlsOihJhJEBJAiMVqU3Z2nEXWfWnIqjN0gEFS9J9+IxKozWWtQGcgoa1WUZzLjKPTr4ZHNFTFxw== + dependencies: + call-bind "^1.0.7" + define-properties "^1.2.1" + es-abstract "^1.23.0" + es-object-atoms "^1.0.0" + +string.prototype.trimend@^1.0.8: + version "1.0.8" + resolved "https://registry.yarnpkg.com/string.prototype.trimend/-/string.prototype.trimend-1.0.8.tgz#3651b8513719e8a9f48de7f2f77640b26652b229" + integrity sha512-p73uL5VCHCO2BZZ6krwwQE3kCzM7NKmis8S//xEC6fQonchbum4eP6kR4DLEjQFO3Wnj3Fuo8NM0kOSjVdHjZQ== + dependencies: + call-bind "^1.0.7" + define-properties "^1.2.1" + es-object-atoms "^1.0.0" + +string.prototype.trimend@^1.0.9: + version "1.0.9" + resolved "https://registry.yarnpkg.com/string.prototype.trimend/-/string.prototype.trimend-1.0.9.tgz#62e2731272cd285041b36596054e9f66569b6942" + integrity sha512-G7Ok5C6E/j4SGfyLCloXTrngQIQU3PWtXGst3yM7Bea9FRURf1S42ZHlZZtsNque2FN2PoUhfZXYLNWwEr4dLQ== + dependencies: + call-bind "^1.0.8" + call-bound "^1.0.2" + define-properties "^1.2.1" + es-object-atoms "^1.0.0" + +string.prototype.trimstart@^1.0.8: + version "1.0.8" + resolved "https://registry.yarnpkg.com/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz#7ee834dda8c7c17eff3118472bb35bfedaa34dde" + integrity sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg== + dependencies: + call-bind "^1.0.7" + define-properties "^1.2.1" + es-object-atoms "^1.0.0" + +strip-ansi@^6.0.0, strip-ansi@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + +strip-final-newline@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-3.0.0.tgz#52894c313fbff318835280aed60ff71ebf12b8fd" + integrity sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw== + +strip-indent@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/strip-indent/-/strip-indent-3.0.0.tgz#c32e1cee940b6b3432c771bc2c54bcce73cd3001" + integrity sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ== + dependencies: + min-indent "^1.0.0" + +strip-json-comments@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.1.1.tgz#31f1281b3832630434831c310c01cccda8cbe006" + integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== + +strip-literal@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/strip-literal/-/strip-literal-2.1.0.tgz#6d82ade5e2e74f5c7e8739b6c84692bd65f0bd2a" + integrity sha512-Op+UycaUt/8FbN/Z2TWPBLge3jWrP3xj10f3fnYxf052bKuS3EKs1ZQcVGjnEMdsNVAM+plXRdmjrZ/KgG3Skw== + dependencies: + js-tokens "^9.0.0" + +stylis@4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/stylis/-/stylis-4.2.0.tgz#79daee0208964c8fe695a42fcffcac633a211a51" + integrity sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw== + +supports-color@^5.3.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" + integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== + dependencies: + has-flag "^3.0.0" + +supports-color@^7.1.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" + integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== + dependencies: + has-flag "^4.0.0" + +supports-preserve-symlinks-flag@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" + integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== + +svg-parser@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/svg-parser/-/svg-parser-2.0.4.tgz#fdc2e29e13951736140b76cb122c8ee6630eb6b5" + integrity sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ== + +swagger2openapi@^7.0.4, swagger2openapi@^7.0.8: + version "7.0.8" + resolved "https://registry.yarnpkg.com/swagger2openapi/-/swagger2openapi-7.0.8.tgz#12c88d5de776cb1cbba758994930f40ad0afac59" + integrity sha512-upi/0ZGkYgEcLeGieoz8gT74oWHA0E7JivX7aN9mAf+Tc7BQoRBvnIGHoPDw+f9TXTW4s6kGYCZJtauP6OYp7g== + dependencies: + call-me-maybe "^1.0.1" + node-fetch "^2.6.1" + node-fetch-h2 "^2.3.0" + node-readfiles "^0.2.0" + oas-kit-common "^1.0.8" + oas-resolver "^2.5.6" + oas-schema-walker "^1.1.5" + oas-validator "^5.0.8" + reftools "^1.1.9" + yaml "^1.10.0" + yargs "^17.0.1" + +symbol-tree@^3.2.4: + version "3.2.4" + resolved "https://registry.yarnpkg.com/symbol-tree/-/symbol-tree-3.2.4.tgz#430637d248ba77e078883951fb9aa0eed7c63fa2" + integrity sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw== + +text-encoding-utf-8@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/text-encoding-utf-8/-/text-encoding-utf-8-1.0.2.tgz#585b62197b0ae437e3c7b5d0af27ac1021e10d13" + integrity sha512-8bw4MY9WjdsD2aMtO0OzOCY3pXGYNx2d2FfHRVUKkiCPDWjKuOlhLVASS+pD7VkLTVjW268LYJHwsnPFlBpbAg== + +text-table@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" + integrity sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw== + +tiny-case@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/tiny-case/-/tiny-case-1.0.3.tgz#d980d66bc72b5d5a9ca86fb7c9ffdb9c898ddd03" + integrity sha512-Eet/eeMhkO6TX8mnUteS9zgPbUMQa4I6Kkp5ORiBD5476/m+PIRiumP5tmh5ioJpH7k51Kehawy2UDfsnxxY8Q== + +tinybench@^2.5.1: + version "2.8.0" + resolved "https://registry.yarnpkg.com/tinybench/-/tinybench-2.8.0.tgz#30e19ae3a27508ee18273ffed9ac7018949acd7b" + integrity sha512-1/eK7zUnIklz4JUUlL+658n58XO2hHLQfSk1Zf2LKieUjxidN16eKFEoDEfjHc3ohofSSqK3X5yO6VGb6iW8Lw== + +tinypool@^0.8.3: + version "0.8.4" + resolved "https://registry.yarnpkg.com/tinypool/-/tinypool-0.8.4.tgz#e217fe1270d941b39e98c625dcecebb1408c9aa8" + integrity sha512-i11VH5gS6IFeLY3gMBQ00/MmLncVP7JLXOw1vlgkytLmJK7QnEr7NXf0LBdxfmNPAeyetukOk0bOYrJrFGjYJQ== + +tinyspy@^2.2.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/tinyspy/-/tinyspy-2.2.1.tgz#117b2342f1f38a0dbdcc73a50a454883adf861d1" + integrity sha512-KYad6Vy5VDWV4GH3fjpseMQ/XU2BhIYP7Vzd0LG44qRWm/Yt2WCOTicFdvmgo6gWaqooMQCawTtILVQJupKu7A== + +to-fast-properties@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e" + integrity sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog== + +to-regex-range@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" + integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ== + dependencies: + is-number "^7.0.0" + +toposort@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/toposort/-/toposort-2.0.2.tgz#ae21768175d1559d48bef35420b2f4962f09c330" + integrity sha512-0a5EOkAUp8D4moMi2W8ZF8jcga7BgZd91O/yabJCFY8az+XSzeGyTKs0Aoo897iV1Nj6guFq8orWDS96z91oGg== + +tough-cookie@^4.1.4: + version "4.1.4" + resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-4.1.4.tgz#945f1461b45b5a8c76821c33ea49c3ac192c1b36" + integrity sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag== + dependencies: + psl "^1.1.33" + punycode "^2.1.1" + universalify "^0.2.0" + url-parse "^1.5.3" + +tr46@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/tr46/-/tr46-5.0.0.tgz#3b46d583613ec7283020d79019f1335723801cec" + integrity sha512-tk2G5R2KRwBd+ZN0zaEXpmzdKyOYksXwywulIX95MBODjSzMIuQnQ3m8JxgbhnL1LeVo7lqQKsYa1O3Htl7K5g== + dependencies: + punycode "^2.3.1" + +tr46@~0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a" + integrity sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw== + +ts-api-utils@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/ts-api-utils/-/ts-api-utils-1.3.0.tgz#4b490e27129f1e8e686b45cc4ab63714dc60eea1" + integrity sha512-UQMIo7pb8WRomKR1/+MFVLTroIvDVtMX3K6OUir8ynLyzB8Jeriont2bTAtmNPa1ekAgN7YPDyf6V+ygrdU+eQ== + +tslib@2.4.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.4.0.tgz#7cecaa7f073ce680a05847aa77be941098f36dc3" + integrity sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ== + +tslib@^2.0.3: + version "2.6.2" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae" + integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q== + +type-check@^0.4.0, type-check@~0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.4.0.tgz#07b8203bfa7056c0657050e3ccd2c37730bab8f1" + integrity sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew== + dependencies: + prelude-ls "^1.2.1" + +type-detect@^4.0.0, type-detect@^4.0.8: + version "4.0.8" + resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.0.8.tgz#7646fb5f18871cfbb7749e69bd39a6388eb7450c" + integrity sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g== + +type-fest@^0.20.2: + version "0.20.2" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.20.2.tgz#1bf207f4b28f91583666cb5fbd327887301cd5f4" + integrity sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ== + +type-fest@^2.19.0: + version "2.19.0" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-2.19.0.tgz#88068015bb33036a598b952e55e9311a60fd3a9b" + integrity sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA== + +typed-array-buffer@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/typed-array-buffer/-/typed-array-buffer-1.0.2.tgz#1867c5d83b20fcb5ccf32649e5e2fc7424474ff3" + integrity sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ== + dependencies: + call-bind "^1.0.7" + es-errors "^1.3.0" + is-typed-array "^1.1.13" + +typed-array-buffer@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/typed-array-buffer/-/typed-array-buffer-1.0.3.tgz#a72395450a4869ec033fd549371b47af3a2ee536" + integrity sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw== + dependencies: + call-bound "^1.0.3" + es-errors "^1.3.0" + is-typed-array "^1.1.14" + +typed-array-byte-length@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/typed-array-byte-length/-/typed-array-byte-length-1.0.1.tgz#d92972d3cff99a3fa2e765a28fcdc0f1d89dec67" + integrity sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw== + dependencies: + call-bind "^1.0.7" + for-each "^0.3.3" + gopd "^1.0.1" + has-proto "^1.0.3" + is-typed-array "^1.1.13" + +typed-array-byte-length@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/typed-array-byte-length/-/typed-array-byte-length-1.0.3.tgz#8407a04f7d78684f3d252aa1a143d2b77b4160ce" + integrity sha512-BaXgOuIxz8n8pIq3e7Atg/7s+DpiYrxn4vdot3w9KbnBhcRQq6o3xemQdIfynqSeXeDrF32x+WvfzmOjPiY9lg== + dependencies: + call-bind "^1.0.8" + for-each "^0.3.3" + gopd "^1.2.0" + has-proto "^1.2.0" + is-typed-array "^1.1.14" + +typed-array-byte-offset@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/typed-array-byte-offset/-/typed-array-byte-offset-1.0.2.tgz#f9ec1acb9259f395093e4567eb3c28a580d02063" + integrity sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA== + dependencies: + available-typed-arrays "^1.0.7" + call-bind "^1.0.7" + for-each "^0.3.3" + gopd "^1.0.1" + has-proto "^1.0.3" + is-typed-array "^1.1.13" + +typed-array-byte-offset@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/typed-array-byte-offset/-/typed-array-byte-offset-1.0.3.tgz#3fa9f22567700cc86aaf86a1e7176f74b59600f2" + integrity sha512-GsvTyUHTriq6o/bHcTd0vM7OQ9JEdlvluu9YISaA7+KzDzPaIzEeDFNkTfhdE3MYcNhNi0vq/LlegYgIs5yPAw== + dependencies: + available-typed-arrays "^1.0.7" + call-bind "^1.0.7" + for-each "^0.3.3" + gopd "^1.0.1" + has-proto "^1.0.3" + is-typed-array "^1.1.13" + reflect.getprototypeof "^1.0.6" + +typed-array-byte-offset@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/typed-array-byte-offset/-/typed-array-byte-offset-1.0.4.tgz#ae3698b8ec91a8ab945016108aef00d5bff12355" + integrity sha512-bTlAFB/FBYMcuX81gbL4OcpH5PmlFHqlCCpAl8AlEzMz5k53oNDvN8p1PNOWLEmI2x4orp3raOFB51tv9X+MFQ== + dependencies: + available-typed-arrays "^1.0.7" + call-bind "^1.0.8" + for-each "^0.3.3" + gopd "^1.2.0" + has-proto "^1.2.0" + is-typed-array "^1.1.15" + reflect.getprototypeof "^1.0.9" + +typed-array-length@^1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/typed-array-length/-/typed-array-length-1.0.6.tgz#57155207c76e64a3457482dfdc1c9d1d3c4c73a3" + integrity sha512-/OxDN6OtAk5KBpGb28T+HZc2M+ADtvRxXrKKbUwtsLgdoxgX13hyy7ek6bFRl5+aBs2yZzB0c4CnQfAtVypW/g== + dependencies: + call-bind "^1.0.7" + for-each "^0.3.3" + gopd "^1.0.1" + has-proto "^1.0.3" + is-typed-array "^1.1.13" + possible-typed-array-names "^1.0.0" + +typed-array-length@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/typed-array-length/-/typed-array-length-1.0.7.tgz#ee4deff984b64be1e118b0de8c9c877d5ce73d3d" + integrity sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg== + dependencies: + call-bind "^1.0.7" + for-each "^0.3.3" + gopd "^1.0.1" + is-typed-array "^1.1.13" + possible-typed-array-names "^1.0.0" + reflect.getprototypeof "^1.0.6" + +typescript@^5.0.0, typescript@^5.2.2: + version "5.4.5" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.4.5.tgz#42ccef2c571fdbd0f6718b1d1f5e6e5ef006f611" + integrity sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ== + +typescript@^5.8.2: + version "5.8.2" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.8.2.tgz#8170b3702f74b79db2e5a96207c15e65807999e4" + integrity sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ== + +ufo@^1.5.3: + version "1.5.3" + resolved "https://registry.yarnpkg.com/ufo/-/ufo-1.5.3.tgz#3325bd3c977b6c6cd3160bf4ff52989adc9d3344" + integrity sha512-Y7HYmWaFwPUmkoQCUIAYpKqkOf+SbVj/2fJJZ4RJMCfZp0rTGwRbzQD+HghfnhKOjL9E01okqz+ncJskGYfBNw== + +unbox-primitive@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/unbox-primitive/-/unbox-primitive-1.0.2.tgz#29032021057d5e6cdbd08c5129c226dff8ed6f9e" + integrity sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw== + dependencies: + call-bind "^1.0.2" + has-bigints "^1.0.2" + has-symbols "^1.0.3" + which-boxed-primitive "^1.0.2" + +unbox-primitive@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/unbox-primitive/-/unbox-primitive-1.1.0.tgz#8d9d2c9edeea8460c7f35033a88867944934d1e2" + integrity sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw== + dependencies: + call-bound "^1.0.3" + has-bigints "^1.0.2" + has-symbols "^1.1.0" + which-boxed-primitive "^1.1.1" + +undici-types@~6.19.2: + version "6.19.8" + resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.19.8.tgz#35111c9d1437ab83a7cdc0abae2f26d88eda0a02" + integrity sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw== + +universalify@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.2.0.tgz#6451760566fa857534745ab1dde952d1b1761be0" + integrity sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg== + +update-browserslist-db@^1.0.13: + version "1.0.13" + resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz#3c5e4f5c083661bd38ef64b6328c26ed6c8248c4" + integrity sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg== + dependencies: + escalade "^3.1.1" + picocolors "^1.0.0" + +update-browserslist-db@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.1.1.tgz#80846fba1d79e82547fb661f8d141e0945755fe5" + integrity sha512-R8UzCaa9Az+38REPiJ1tXlImTJXlVfgHZsglwBD/k6nj76ctsH1E3q4doGrukiLQd3sGQYu56r5+lo5r94l29A== + dependencies: + escalade "^3.2.0" + picocolors "^1.1.0" + +uri-js@^4.2.2, uri-js@^4.4.1: + version "4.4.1" + resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e" + integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg== + dependencies: + punycode "^2.1.0" + +url-parse@^1.5.3: + version "1.5.10" + resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.5.10.tgz#9d3c2f736c1d75dd3bd2be507dcc111f1e2ea9c1" + integrity sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ== + dependencies: + querystringify "^2.1.1" + requires-port "^1.0.0" + +use-sync-external-store@^1.0.0, use-sync-external-store@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/use-sync-external-store/-/use-sync-external-store-1.4.0.tgz#adbc795d8eeb47029963016cefdf89dc799fcebc" + integrity sha512-9WXSPC5fMv61vaupRkCKCxsPxBocVnwakBEkMIHHpkTTg6icbJtg6jzgtLDm4bl3cSHAca52rYWih0k4K3PfHw== + +vite-node@1.6.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/vite-node/-/vite-node-1.6.1.tgz#fff3ef309296ea03ceaa6ca4bb660922f5416c57" + integrity sha512-YAXkfvGtuTzwWbDSACdJSg4A4DZiAqckWe90Zapc/sEX3XvHcw1NdurM/6od8J207tSDqNbSsgdCacBgvJKFuA== + dependencies: + cac "^6.7.14" + debug "^4.3.4" + pathe "^1.1.1" + picocolors "^1.0.0" + vite "^5.0.0" + +vite-plugin-svgr@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/vite-plugin-svgr/-/vite-plugin-svgr-4.3.0.tgz#742f16f11375996306c696ec323e4d23f6005075" + integrity sha512-Jy9qLB2/PyWklpYy0xk0UU3TlU0t2UMpJXZvf+hWII1lAmRHrOUKi11Uw8N3rxoNk7atZNYO3pR3vI1f7oi+6w== + dependencies: + "@rollup/pluginutils" "^5.1.3" + "@svgr/core" "^8.1.0" + "@svgr/plugin-jsx" "^8.1.0" + +vite@^5.0.0: + version "5.2.11" + resolved "https://registry.yarnpkg.com/vite/-/vite-5.2.11.tgz#726ec05555431735853417c3c0bfb36003ca0cbd" + integrity sha512-HndV31LWW05i1BLPMUCE1B9E9GFbOu1MbenhS58FuK6owSO5qHm7GiCotrNY1YE5rMeQSFBGmT5ZaLEjFizgiQ== + dependencies: + esbuild "^0.20.1" + postcss "^8.4.38" + rollup "^4.13.0" + optionalDependencies: + fsevents "~2.3.3" + +vite@^5.4.14: + version "5.4.14" + resolved "https://registry.yarnpkg.com/vite/-/vite-5.4.14.tgz#ff8255edb02134df180dcfca1916c37a6abe8408" + integrity sha512-EK5cY7Q1D8JNhSaPKVK4pwBFvaTmZxEnoKXLG/U9gmdDcihQGNzFlgIvaxezFR4glP1LsuiedwMBqCXH3wZccA== + dependencies: + esbuild "^0.21.3" + postcss "^8.4.43" + rollup "^4.20.0" + optionalDependencies: + fsevents "~2.3.3" + +vitest@^1.6.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/vitest/-/vitest-1.6.1.tgz#b4a3097adf8f79ac18bc2e2e0024c534a7a78d2f" + integrity sha512-Ljb1cnSJSivGN0LqXd/zmDbWEM0RNNg2t1QW/XUhYl/qPqyu7CsqeWtqQXHVaJsecLPuDoak2oJcZN2QoRIOag== + dependencies: + "@vitest/expect" "1.6.1" + "@vitest/runner" "1.6.1" + "@vitest/snapshot" "1.6.1" + "@vitest/spy" "1.6.1" + "@vitest/utils" "1.6.1" + acorn-walk "^8.3.2" + chai "^4.3.10" + debug "^4.3.4" + execa "^8.0.1" + local-pkg "^0.5.0" + magic-string "^0.30.5" + pathe "^1.1.1" + picocolors "^1.0.0" + std-env "^3.5.0" + strip-literal "^2.0.0" + tinybench "^2.5.1" + tinypool "^0.8.3" + vite "^5.0.0" + vite-node "1.6.1" + why-is-node-running "^2.2.2" + +void-elements@3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/void-elements/-/void-elements-3.1.0.tgz#614f7fbf8d801f0bb5f0661f5b2f5785750e4f09" + integrity sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w== + +w3c-xmlserializer@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/w3c-xmlserializer/-/w3c-xmlserializer-5.0.0.tgz#f925ba26855158594d907313cedd1476c5967f6c" + integrity sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA== + dependencies: + xml-name-validator "^5.0.0" + +webidl-conversions@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" + integrity sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ== + +webidl-conversions@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-7.0.0.tgz#256b4e1882be7debbf01d05f0aa2039778ea080a" + integrity sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g== + +whatwg-encoding@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz#d0f4ef769905d426e1688f3e34381a99b60b76e5" + integrity sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ== + dependencies: + iconv-lite "0.6.3" + +whatwg-fetch@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-2.0.4.tgz#dde6a5df315f9d39991aa17621853d720b85566f" + integrity sha512-dcQ1GWpOD/eEQ97k66aiEVpNnapVj90/+R+SXTPYGHpYBBypfKJEQjLrvMZ7YXbKm21gXd4NcuxUTjiv1YtLng== + +whatwg-mimetype@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz#bc1bf94a985dc50388d54a9258ac405c3ca2fc0a" + integrity sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg== + +whatwg-url@^14.0.0: + version "14.0.0" + resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-14.0.0.tgz#00baaa7fd198744910c4b1ef68378f2200e4ceb6" + integrity sha512-1lfMEm2IEr7RIV+f4lUNPOqfFL+pO+Xw3fJSqmjX9AbXcXcYOkCe1P6+9VBZB6n94af16NfZf+sSk0JCBZC9aw== + dependencies: + tr46 "^5.0.0" + webidl-conversions "^7.0.0" + +whatwg-url@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-5.0.0.tgz#966454e8765462e37644d3626f6742ce8b70965d" + integrity sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw== + dependencies: + tr46 "~0.0.3" + webidl-conversions "^3.0.0" + +which-boxed-primitive@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz#13757bc89b209b049fe5d86430e21cf40a89a8e6" + integrity sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg== + dependencies: + is-bigint "^1.0.1" + is-boolean-object "^1.1.0" + is-number-object "^1.0.4" + is-string "^1.0.5" + is-symbol "^1.0.3" + +which-boxed-primitive@^1.1.0, which-boxed-primitive@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/which-boxed-primitive/-/which-boxed-primitive-1.1.1.tgz#d76ec27df7fa165f18d5808374a5fe23c29b176e" + integrity sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA== + dependencies: + is-bigint "^1.1.0" + is-boolean-object "^1.2.1" + is-number-object "^1.1.1" + is-string "^1.1.1" + is-symbol "^1.1.1" + +which-builtin-type@^1.2.0, which-builtin-type@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/which-builtin-type/-/which-builtin-type-1.2.1.tgz#89183da1b4907ab089a6b02029cc5d8d6574270e" + integrity sha512-6iBczoX+kDQ7a3+YJBnh3T+KZRxM/iYNPXicqk66/Qfm1b93iu+yOImkg0zHbj5LNOcNv1TEADiZ0xa34B4q6Q== + dependencies: + call-bound "^1.0.2" + function.prototype.name "^1.1.6" + has-tostringtag "^1.0.2" + is-async-function "^2.0.0" + is-date-object "^1.1.0" + is-finalizationregistry "^1.1.0" + is-generator-function "^1.0.10" + is-regex "^1.2.1" + is-weakref "^1.0.2" + isarray "^2.0.5" + which-boxed-primitive "^1.1.0" + which-collection "^1.0.2" + which-typed-array "^1.1.16" + +which-collection@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/which-collection/-/which-collection-1.0.2.tgz#627ef76243920a107e7ce8e96191debe4b16c2a0" + integrity sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw== + dependencies: + is-map "^2.0.3" + is-set "^2.0.3" + is-weakmap "^2.0.2" + is-weakset "^2.0.3" + +which-typed-array@^1.1.14, which-typed-array@^1.1.15: + version "1.1.15" + resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.15.tgz#264859e9b11a649b388bfaaf4f767df1f779b38d" + integrity sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA== + dependencies: + available-typed-arrays "^1.0.7" + call-bind "^1.0.7" + for-each "^0.3.3" + gopd "^1.0.1" + has-tostringtag "^1.0.2" + +which-typed-array@^1.1.16: + version "1.1.16" + resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.16.tgz#db4db429c4706feca2f01677a144278e4a8c216b" + integrity sha512-g+N+GAWiRj66DngFwHvISJd+ITsyphZvD1vChfVg6cEdnzy53GzB3oy0fUNlvhz7H7+MiqhYr26qxQShCpKTTQ== + dependencies: + available-typed-arrays "^1.0.7" + call-bind "^1.0.7" + for-each "^0.3.3" + gopd "^1.0.1" + has-tostringtag "^1.0.2" + +which-typed-array@^1.1.18: + version "1.1.19" + resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.19.tgz#df03842e870b6b88e117524a4b364b6fc689f956" + integrity sha512-rEvr90Bck4WZt9HHFC4DJMsjvu7x+r6bImz0/BrbWb7A2djJ8hnZMrWnHo9F8ssv0OMErasDhftrfROTyqSDrw== + dependencies: + available-typed-arrays "^1.0.7" + call-bind "^1.0.8" + call-bound "^1.0.4" + for-each "^0.3.5" + get-proto "^1.0.1" + gopd "^1.2.0" + has-tostringtag "^1.0.2" + +which@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" + integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== + dependencies: + isexe "^2.0.0" + +why-is-node-running@^2.2.2: + version "2.2.2" + resolved "https://registry.yarnpkg.com/why-is-node-running/-/why-is-node-running-2.2.2.tgz#4185b2b4699117819e7154594271e7e344c9973e" + integrity sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA== + dependencies: + siginfo "^2.0.0" + stackback "0.0.2" + +wrap-ansi@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + +wrappy@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ== + +ws@^8.18.0: + version "8.18.0" + resolved "https://registry.yarnpkg.com/ws/-/ws-8.18.0.tgz#0d7505a6eafe2b0e712d232b42279f53bc289bbc" + integrity sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw== + +xml-name-validator@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-5.0.0.tgz#82be9b957f7afdacf961e5980f1bf227c0bf7673" + integrity sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg== + +xmlchars@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/xmlchars/-/xmlchars-2.2.0.tgz#060fe1bcb7f9c76fe2a17db86a9bc3ab894210cb" + integrity sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw== + +y18n@^5.0.5: + version "5.0.8" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55" + integrity sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA== + +yallist@^3.0.2: + version "3.1.1" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd" + integrity sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g== + +yallist@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" + integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== + +yaml@^1.10.0: + version "1.10.2" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b" + integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== + +yargs-parser@^21.1.1: + version "21.1.1" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35" + integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== + +yargs@^17.0.1: + version "17.7.2" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-17.7.2.tgz#991df39aca675a192b816e1e0363f9d75d2aa269" + integrity sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w== + dependencies: + cliui "^8.0.1" + escalade "^3.1.1" + get-caller-file "^2.0.5" + require-directory "^2.1.1" + string-width "^4.2.3" + y18n "^5.0.5" + yargs-parser "^21.1.1" + +yocto-queue@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" + integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== + +yocto-queue@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-1.0.0.tgz#7f816433fb2cbc511ec8bf7d263c3b58a1a3c251" + integrity sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g== + +yup@^1.6.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/yup/-/yup-1.6.1.tgz#8defcff9daaf9feac178029c0e13b616563ada4b" + integrity sha512-JED8pB50qbA4FOkDol0bYF/p60qSEDQqBD0/qeIrUCG1KbPBIQ776fCUNb9ldbPcSTxA69g/47XTo4TqWiuXOA== + dependencies: + property-expr "^2.0.5" + tiny-case "^1.0.3" + toposort "^2.0.2" + type-fest "^2.19.0" diff --git a/deploy_pgcluster.yml b/deploy_pgcluster.yml deleted file mode 100644 index 5fda0b744..000000000 --- a/deploy_pgcluster.yml +++ /dev/null @@ -1,163 +0,0 @@ ---- -# yamllint disable rule:line-length - -- name: Deploy PostgreSQL HA Cluster (based on "Patroni" and "{{ dcs_type }}") - hosts: localhost - gather_facts: false - vars_files: - - vars/main.yml - vars: - minimal_ansible_version: 2.7.0 - tasks: - - name: Checking ansible version - fail: - msg: "Ansible version must be {{ minimal_ansible_version }} or higher" - when: ansible_version.full is version(minimal_ansible_version, '<') - -- import_playbook: etcd_cluster.yml - when: not dcs_exists|bool and dcs_type == "etcd" - tags: etcd - -- hosts: postgres_cluster - become: true - become_method: sudo - gather_facts: true - any_errors_fatal: true - vars_files: - - vars/main.yml - - vars/system.yml - - pre_tasks: - - name: Include OS-specific variables - include_vars: "vars/{{ ansible_os_family }}.yml" - - - name: Checking Linux distribution - fail: - msg: "{{ ansible_distribution }} is not supported" - when: ansible_distribution not in os_valid_distributions - - - name: Checking version of OS Linux - fail: - msg: "{{ ansible_distribution_version }} of {{ ansible_distribution }} is not supported" - when: ansible_distribution_version is version_compare(os_minimum_versions[ansible_distribution], '<') - - - name: Update apt cache - apt: - update_cache: true - cache_valid_time: 3600 - environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "Debian" and installation_method == "repo" - tags: add_repo, install_packages, install_postgres - - - name: Make sure the gnupg and apt-transport-https packages are present - apt: - pkg: - - gnupg - - apt-transport-https - state: present - environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "Debian" and installation_method == "repo" - tags: add_repo, install_packages, install_postgres - - - name: Build a firewall_ports_dynamic_var - set_fact: - firewall_ports_dynamic_var: "{{ firewall_ports_dynamic_var |default([]) }} + {{ firewall_allowed_tcp_ports_for[item] }}" - loop: "{{ hostvars[inventory_hostname].group_names }}" - when: firewall_enabled_at_boot|bool - tags: firewall - - - name: Build a firewall_rules_dynamic_var - set_fact: - firewall_rules_dynamic_var: "{{ firewall_rules_dynamic_var |default([]) }} + {{ firewall_additional_rules_for[item] }}" - loop: "{{ hostvars[inventory_hostname].group_names }}" - when: firewall_enabled_at_boot|bool - tags: firewall - - roles: - - role: ansible-role-firewall - environment: "{{ proxy_env | default({}) }}" - vars: - firewall_allowed_tcp_ports: "{{ firewall_ports_dynamic_var|list | unique }}" - firewall_additional_rules: "{{ firewall_rules_dynamic_var|list | unique }}" - when: firewall_enabled_at_boot|bool - tags: firewall - - - role: hostname - - role: resolv_conf - - role: etc_hosts - - role: add-repository - - role: packages - - role: sudo - - role: sysctl - - role: transparent_huge_pages - - role: pam_limits - - role: io-scheduler - - role: locales - - role: timezone - - role: ntp - - role: ssh-keys - -- import_playbook: balancers.yml - when: with_haproxy_load_balancing|bool - tags: load_balancing, haproxy - -- hosts: pgbackrest:postgres_cluster - become: true - become_method: sudo - gather_facts: true - any_errors_fatal: true - vars_files: - - vars/main.yml - pre_tasks: - - name: Include OS-specific variables - include_vars: "vars/{{ ansible_os_family }}.yml" - roles: - - role: pgbackrest - when: pgbackrest_install|bool - -- hosts: postgres_cluster - become: true - become_method: sudo - gather_facts: true - any_errors_fatal: true - vars_files: - - vars/main.yml - - vars/system.yml - - pre_tasks: - - name: Include OS-specific variables - include_vars: "vars/{{ ansible_os_family }}.yml" - - roles: - - role: wal-g - when: wal_g_install|bool - - - role: pgbouncer - when: pgbouncer_install|bool - - - role: patroni - - - role: vip-manager - when: not with_haproxy_load_balancing|bool and - (cluster_vip is defined and cluster_vip | length > 0) - - # optional - - role: postgresql-users - when: is_master == "true" and postgresql_users | length > 0 - - - role: postgresql-databases - when: is_master == "true" and postgresql_databases | length > 0 - - - role: postgresql-extensions - when: is_master == "true" and postgresql_extensions | length > 0 - - - role: pgbouncer/userlist - when: pgbouncer_install|bool and pgbouncer_generate_userlist|bool - - - role: netdata - when: netdata_install is defined and netdata_install|bool - - # finish (info) - - role: deploy-finish - -... diff --git a/group_vars/all b/group_vars/all deleted file mode 100644 index 9925eadb6..000000000 --- a/group_vars/all +++ /dev/null @@ -1,18 +0,0 @@ ---- -# "Check system" variables - -os_valid_distributions: - - RedHat - - CentOS - - Rocky - - OracleLinux - - Ubuntu - - Debian - -os_minimum_versions: - RedHat: 7 - CentOS: 7 - Rocky: 8.4 - OracleLinux: 7 - Ubuntu: 18.04 - Debian: 9 diff --git a/group_vars/master b/group_vars/master deleted file mode 100644 index cef8eb6ab..000000000 --- a/group_vars/master +++ /dev/null @@ -1,4 +0,0 @@ ---- - -is_master: 'true' -postgresql_exists: 'false' diff --git a/group_vars/replica b/group_vars/replica deleted file mode 100644 index 635f72564..000000000 --- a/group_vars/replica +++ /dev/null @@ -1,4 +0,0 @@ ---- - -is_master: 'false' -postgresql_exists: 'false' diff --git a/TypeA.png b/images/TypeA.png similarity index 100% rename from TypeA.png rename to images/TypeA.png diff --git a/TypeB.png b/images/TypeB.png similarity index 100% rename from TypeB.png rename to images/TypeB.png diff --git a/images/TypeC.png b/images/TypeC.png new file mode 100644 index 000000000..6a6fb5dd9 Binary files /dev/null and b/images/TypeC.png differ diff --git a/images/autobase_create_cluster_demo.gif b/images/autobase_create_cluster_demo.gif new file mode 100644 index 000000000..c98387fa7 Binary files /dev/null and b/images/autobase_create_cluster_demo.gif differ diff --git a/images/github-autobase.png b/images/github-autobase.png new file mode 100644 index 000000000..3aa0d576c Binary files /dev/null and b/images/github-autobase.png differ diff --git a/load_balancing.jpg b/images/load_balancing.jpg similarity index 100% rename from load_balancing.jpg rename to images/load_balancing.jpg diff --git a/images/pg_cluster_scheme.dark_mode.png b/images/pg_cluster_scheme.dark_mode.png new file mode 100644 index 000000000..3ff5c87a7 Binary files /dev/null and b/images/pg_cluster_scheme.dark_mode.png differ diff --git a/images/pg_cluster_scheme.png b/images/pg_cluster_scheme.png new file mode 100644 index 000000000..4b640380f Binary files /dev/null and b/images/pg_cluster_scheme.png differ diff --git a/inventory b/inventory deleted file mode 100644 index 76faf9f7a..000000000 --- a/inventory +++ /dev/null @@ -1,55 +0,0 @@ -# This is example inventory file! -# Please specify the ip addresses and connection settings for your environment -# The specified ip addresses will be used to listen by the cluster components. - -# "postgresql_exists='true'" if PostgreSQL is already exists and running -# "hostname=" variable is optional (used to change the server name) - -# if dcs_exists: false and dcs_type: "etcd" (in vars/main.yml) -[etcd_cluster] # recommendation: 3 or 5-7 nodes -10.128.64.140 -10.128.64.142 -10.128.64.143 - - -# if with_haproxy_load_balancing: true (in vars/main.yml) -[balancers] -10.128.64.140 -10.128.64.142 -10.128.64.143 - - -# PostgreSQL nodes -[master] -10.128.64.140 hostname=pgnode01 postgresql_exists='false' - -[replica] -10.128.64.142 hostname=pgnode02 postgresql_exists='false' -10.128.64.143 hostname=pgnode03 postgresql_exists='false' - -[postgres_cluster:children] -master -replica - - -# In this example, all components will be installed on PostgreSQL nodes -# You can deploy the etcd cluster and the haproxy balancers on other dedicated servers. - - -# if pgbackrest_install: true and "repo_host" is set (in vars/main.yml) -[pgbackrest] # optional (Dedicated Repository Host) - - -# Connection settings -[all:vars] -ansible_connection='ssh' -ansible_ssh_port='22' -ansible_user='root' -ansible_ssh_pass='secretpassword' # "sshpass" package is required for use "ansible_ssh_pass" -# ansible_ssh_private_key_file= -# ansible_python_interpreter='/usr/bin/python3' # is required for use python3 - -[pgbackrest:vars] -ansible_user='postgres' -ansible_ssh_pass='secretpassword' - diff --git a/meta/main.yml b/meta/main.yml deleted file mode 100644 index cc597ea85..000000000 --- a/meta/main.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -galaxy_info: - role_name: postgresql_cluster - description: PostgreSQL High-Availability Cluster (based on Patroni) - namespace: vitabaks - author: Vitaliy Kukharik (vitabaks@gmail.com) - license: MIT - - min_ansible_version: 2.7 - - platforms: - - name: EL - versions: - - 7 - - 8 - - name: Debian - versions: - - stretch - - buster - - name: Ubuntu - versions: - - bionic - - focal - - galaxy_tags: - - postgresql - - high-availability - - cluster - - patroni - - etcd - -dependencies: [] diff --git a/molecule/default/converge.yml b/molecule/default/converge.yml deleted file mode 100644 index b24aaad4f..000000000 --- a/molecule/default/converge.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -- name: Converge - hosts: all - gather_facts: true - - tasks: - - name: Set variables for molecule - set_fact: - firewall_enable_ipv6: false # Added to prevent test failures in CI. - sysctl_set: false # Added to prevent test failures in CI. - nameservers: ["8.8.8.8", "9.9.9.9"] - cacheable: true - - - name: Prepare | Clean yum cache - command: yum clean all - args: - warn: false - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version == '7' - - - name: Prepare | Clean dnf cache - command: dnf clean all - args: - warn: false - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version is version('8', '>=') - -- import_playbook: ../../deploy_pgcluster.yml - -... diff --git a/molecule/default/molecule.yml b/molecule/default/molecule.yml deleted file mode 100644 index 7636b02f1..000000000 --- a/molecule/default/molecule.yml +++ /dev/null @@ -1,162 +0,0 @@ ---- -# yamllint disable rule:comments-indentation - -dependency: - name: galaxy - enabled: false -driver: - name: docker -platforms: - - name: 10.172.0.17 - hostname: etcd01 - image: geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest - # docker_networks: # TODO github.com/ansible-community/molecule/pull/2696 - # - name: test_docker_network - # ipam_config: - # - subnet: 10.172.0.0/24 - # gateway: 10.172.0.1 - networks: - - name: test_docker_network - ipv4_address: 10.172.0.17 - exposed_ports: - - 2379/tcp - - 2380/tcp - command: "" - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - privileged: true - pre_build_image: true - env: - LANG: en_US.UTF-8 - LC_ALL: en_US.UTF-8 - groups: - - etcd_cluster - - - name: 10.172.0.18 - hostname: etcd02 - image: geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest - networks: - - name: test_docker_network - ipv4_address: 10.172.0.18 - exposed_ports: - - 2379/tcp - - 2380/tcp - command: "" - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - privileged: true - pre_build_image: true - env: - LANG: en_US.UTF-8 - LC_ALL: en_US.UTF-8 - groups: - - etcd_cluster - - - name: 10.172.0.19 - hostname: etcd03 - image: geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest - networks: - - name: test_docker_network - ipv4_address: 10.172.0.19 - exposed_ports: - - 2379/tcp - - 2380/tcp - command: "" - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - privileged: true - pre_build_image: true - env: - LANG: en_US.UTF-8 - LC_ALL: en_US.UTF-8 - groups: - - etcd_cluster - - - name: 10.172.0.20 - hostname: pgnode01 - image: geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest - networks: - - name: test_docker_network - ipv4_address: 10.172.0.20 - exposed_ports: - - 8008/tcp - - 5432/tcp - - 6432/tcp - command: "" - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - privileged: true - pre_build_image: true - env: - LANG: en_US.UTF-8 - LC_ALL: en_US.UTF-8 - groups: - - master - - postgres_cluster - - balancers - - - name: 10.172.0.21 - hostname: pgnode02 - image: geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest - networks: - - name: test_docker_network - ipv4_address: 10.172.0.21 - exposed_ports: - - 8008/tcp - - 5432/tcp - - 6432/tcp - command: "" - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - privileged: true - pre_build_image: true - env: - LANG: en_US.UTF-8 - LC_ALL: en_US.UTF-8 - groups: - - replica - - postgres_cluster - - balancers - -provisioner: - name: ansible - config_options: - defaults: - display_skipped_hosts: false - remote_tmp: "~/.ansible/tmp" - allow_world_readable_tmpfiles: false - timeout: 60 - inventory: - links: - group_vars: ../../group_vars/ - playbooks: - prepare: prepare.yml - -scenario: - create_sequence: - - prepare - - create - converge_sequence: - - prepare - - create - - converge - destroy_sequence: - - cleanup - - destroy - test_sequence: - - cleanup - - destroy - - syntax - - prepare - - create - - converge -# - idempotence # >> role:patroni,task:"data directory check result" - - verify - - cleanup - - destroy - -verifier: - name: ansible - enabled: false # TODO - -... diff --git a/molecule/oraclelinux/converge.yml b/molecule/oraclelinux/converge.yml deleted file mode 100644 index 5b456ea99..000000000 --- a/molecule/oraclelinux/converge.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -- name: Converge - hosts: all - gather_facts: true - - tasks: - - name: Set variables for molecule - set_fact: - firewall_enable_ipv6: false # Added to prevent test failures in CI. - sysctl_set: false # Added to prevent test failures in CI. - nameservers: ["8.8.8.8", "9.9.9.9"] - cacheable: true - - - name: Prepare | Clean yum cache - command: yum clean all - args: - warn: false - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version == '7' - - - name: Prepare | Clean dnf cache - command: dnf clean all - args: - warn: false - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version is version('8', '>=') - - - name: Prepare | Check that sudo package is exists - package: - name: sudo - -- import_playbook: ../../deploy_pgcluster.yml - -... diff --git a/molecule/oraclelinux/molecule.yml b/molecule/oraclelinux/molecule.yml deleted file mode 100644 index a52e661e0..000000000 --- a/molecule/oraclelinux/molecule.yml +++ /dev/null @@ -1,162 +0,0 @@ ---- -# yamllint disable rule:comments-indentation - -dependency: - name: galaxy - enabled: false -driver: - name: docker -platforms: - - name: 10.172.0.17 - hostname: etcd01 - image: robertdebock/oraclelinux:${MOLECULE_DISTRO_TAG:-7} - # docker_networks: # TODO github.com/ansible-community/molecule/pull/2696 - # - name: test_docker_network - # ipam_config: - # - subnet: 10.172.0.0/24 - # gateway: 10.172.0.1 - networks: - - name: test_docker_network - ipv4_address: 10.172.0.17 - exposed_ports: - - 2379/tcp - - 2380/tcp - command: "" - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - privileged: true - pre_build_image: true - env: - LANG: en_US.UTF-8 - LC_ALL: en_US.UTF-8 - groups: - - etcd_cluster - - - name: 10.172.0.18 - hostname: etcd02 - image: robertdebock/oraclelinux:${MOLECULE_DISTRO_TAG:-7} - networks: - - name: test_docker_network - ipv4_address: 10.172.0.18 - exposed_ports: - - 2379/tcp - - 2380/tcp - command: "" - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - privileged: true - pre_build_image: true - env: - LANG: en_US.UTF-8 - LC_ALL: en_US.UTF-8 - groups: - - etcd_cluster - - - name: 10.172.0.19 - hostname: etcd03 - image: robertdebock/oraclelinux:${MOLECULE_DISTRO_TAG:-7} - networks: - - name: test_docker_network - ipv4_address: 10.172.0.19 - exposed_ports: - - 2379/tcp - - 2380/tcp - command: "" - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - privileged: true - pre_build_image: true - env: - LANG: en_US.UTF-8 - LC_ALL: en_US.UTF-8 - groups: - - etcd_cluster - - - name: 10.172.0.20 - hostname: pgnode01 - image: robertdebock/oraclelinux:${MOLECULE_DISTRO_TAG:-7} - networks: - - name: test_docker_network - ipv4_address: 10.172.0.20 - exposed_ports: - - 8008/tcp - - 5432/tcp - - 6432/tcp - command: "" - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - privileged: true - pre_build_image: true - env: - LANG: en_US.UTF-8 - LC_ALL: en_US.UTF-8 - groups: - - master - - postgres_cluster - - balancers - - - name: 10.172.0.21 - hostname: pgnode02 - image: robertdebock/oraclelinux:${MOLECULE_DISTRO_TAG:-7} - networks: - - name: test_docker_network - ipv4_address: 10.172.0.21 - exposed_ports: - - 8008/tcp - - 5432/tcp - - 6432/tcp - command: "" - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - privileged: true - pre_build_image: true - env: - LANG: en_US.UTF-8 - LC_ALL: en_US.UTF-8 - groups: - - replica - - postgres_cluster - - balancers - -provisioner: - name: ansible - config_options: - defaults: - display_skipped_hosts: false - remote_tmp: "~/.ansible/tmp" - allow_world_readable_tmpfiles: false - timeout: 60 - inventory: - links: - group_vars: ../../group_vars/ - playbooks: - prepare: prepare.yml - -scenario: - create_sequence: - - prepare - - create - converge_sequence: - - prepare - - create - - converge - destroy_sequence: - - cleanup - - destroy - test_sequence: - - cleanup - - destroy - - syntax - - prepare - - create - - converge -# - idempotence # >> role:patroni,task:"data directory check result" - - verify - - cleanup - - destroy - -verifier: - name: ansible - enabled: false # TODO - -... diff --git a/molecule/oraclelinux/prepare.yml b/molecule/oraclelinux/prepare.yml deleted file mode 100644 index b7ab340d6..000000000 --- a/molecule/oraclelinux/prepare.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: "Update docker network(s)" - hosts: localhost - gather_facts: false - become: false - tasks: - - name: "Create docker network: test_docker_network" - docker_network: - name: test_docker_network - driver: bridge - enable_ipv6: false - internal: false - ipam_config: - - subnet: 10.172.0.0/24 - gateway: 10.172.0.1 - force: true - state: present - labels: - owner: molecule - -... diff --git a/molecule/postgrespro/converge.yml b/molecule/postgrespro/converge.yml deleted file mode 100644 index baabbc54f..000000000 --- a/molecule/postgrespro/converge.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- name: Converge - hosts: all - gather_facts: true - - tasks: - - name: Set variables for molecule - set_fact: - firewall_enable_ipv6: false # Added to prevent test failures in CI. - sysctl_set: false # Added to prevent test failures in CI. - nameservers: ["8.8.8.8", "9.9.9.9"] - cacheable: true - - - name: Prepare | Clean yum cache - command: yum clean all - args: - warn: false - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version == '7' - - - name: Prepare | Clean dnf cache - command: dnf clean all - args: - warn: false - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version is version('8', '>=') - - - name: Include postgrespro vars - include_vars: ./postgrespro_vars.yml - -- import_playbook: ../../deploy_pgcluster.yml - -... diff --git a/molecule/postgrespro/molecule.yml b/molecule/postgrespro/molecule.yml deleted file mode 100644 index 06ddeecbe..000000000 --- a/molecule/postgrespro/molecule.yml +++ /dev/null @@ -1,157 +0,0 @@ ---- -# yamllint disable rule:comments-indentation - -dependency: - name: galaxy - enabled: false -driver: - name: docker -platforms: - - name: 10.172.1.17 - hostname: etcd01 - image: geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest - networks: - - name: test_docker_network - ipv4_address: 10.172.1.17 - exposed_ports: - - 2379/tcp - - 2380/tcp - command: "" - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - privileged: true - pre_build_image: true - env: - LANG: en_US.UTF-8 - LC_ALL: en_US.UTF-8 - groups: - - etcd_cluster - - - name: 10.172.1.18 - hostname: etcd02 - image: geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest - networks: - - name: test_docker_network - ipv4_address: 10.172.1.18 - exposed_ports: - - 2379/tcp - - 2380/tcp - command: "" - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - privileged: true - pre_build_image: true - env: - LANG: en_US.UTF-8 - LC_ALL: en_US.UTF-8 - groups: - - etcd_cluster - - - name: 10.172.1.19 - hostname: etcd03 - image: geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest - networks: - - name: test_docker_network - ipv4_address: 10.172.1.19 - exposed_ports: - - 2379/tcp - - 2380/tcp - command: "" - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - privileged: true - pre_build_image: true - env: - LANG: en_US.UTF-8 - LC_ALL: en_US.UTF-8 - groups: - - etcd_cluster - - - name: 10.172.1.20 - hostname: pgnode01 - image: geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest - networks: - - name: test_docker_network - ipv4_address: 10.172.1.20 - exposed_ports: - - 8008/tcp - - 5432/tcp - - 6432/tcp - command: "" - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - privileged: true - pre_build_image: true - env: - LANG: en_US.UTF-8 - LC_ALL: en_US.UTF-8 - groups: - - master - - postgres_cluster - - balancers - - - name: 10.172.1.21 - hostname: pgnode02 - image: geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest - networks: - - name: test_docker_network - ipv4_address: 10.172.1.21 - exposed_ports: - - 8008/tcp - - 5432/tcp - - 6432/tcp - command: "" - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - privileged: true - pre_build_image: true - env: - LANG: en_US.UTF-8 - LC_ALL: en_US.UTF-8 - groups: - - replica - - postgres_cluster - - balancers - -provisioner: - name: ansible - config_options: - defaults: - display_skipped_hosts: false - remote_tmp: "~/.ansible/tmp" - allow_world_readable_tmpfiles: false - timeout: 60 - inventory: - links: - group_vars: ../../group_vars/ - playbooks: - prepare: prepare.yml - -scenario: - create_sequence: - - prepare - - create - converge_sequence: - - prepare - - create - - converge - destroy_sequence: - - cleanup - - destroy - test_sequence: - - cleanup - - destroy - - syntax - - prepare - - create - - converge -# - idempotence # >> role:patroni,task:"data directory check result" - - verify - - cleanup - - destroy - -verifier: - name: ansible - enabled: false # TODO - -... diff --git a/roles/add-repository/tasks/main.yml b/roles/add-repository/tasks/main.yml deleted file mode 100644 index 353304c30..000000000 --- a/roles/add-repository/tasks/main.yml +++ /dev/null @@ -1,136 +0,0 @@ ---- -# yamllint disable rule:line-length - -- block: # Debian/Ubuntu - - name: Add repository apt-key - apt_key: - url: "{{ item.key }}" - state: present - loop: "{{ apt_repository_keys }}" - when: apt_repository_keys | length > 0 - - - name: Add repository - apt_repository: - repo: "{{ item.repo }}" - state: present - update_cache: true - loop: "{{ apt_repository }}" - when: apt_repository | length > 0 - environment: "{{ proxy_env | default({}) }}" - when: installation_method == "repo" and ansible_os_family == "Debian" - tags: add_repo - -- block: # RedHat/CentOS - - name: Add repository - yum_repository: - name: "{{ item.name }}" - description: "{{ item.description }}" - baseurl: "{{ item.baseurl }}" - gpgkey: "{{ item.gpgkey }}" - gpgcheck: "{{ item.gpgcheck }}" - loop: "{{ yum_repository | flatten(1) }}" - when: yum_repository | length > 0 - - # Install Epel Repository - - name: Remove epel-release package (if exists) - package: - name: epel-release - state: absent - when: install_epel_repo|bool - tags: install_epel_repo - - - name: Get epel-release-latest rpm package - get_url: - url: "https://dl.fedoraproject.org/pub/epel/epel-release-latest-%7B%7B%20ansible_distribution_major_version%20%7D%7D.noarch.rpm" # noqa 204 - dest: /tmp/ - timeout: 30 - validate_certs: false - when: install_epel_repo|bool - tags: install_epel_repo - - - name: Install EPEL repository - package: - name: "/tmp/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm" - state: present - disable_gpg_check: true - when: install_epel_repo|bool - tags: install_epel_repo - - # Install SCL Repository - - name: Install Software Collections (SCL) repository for CentOS 7 - package: - name: centos-release-scl-rh - state: present - when: install_scl_repo|bool and - (ansible_distribution == 'CentOS' and - ansible_distribution_major_version == '7') - tags: install_scl_repo - - - name: Add Software Collections (SCL) repository for OracleLinux 7 - yum_repository: - name: ol7_software_collections - description: Software Collection Library packages for Oracle Linux 7 (x86_64) - baseurl: https://yum.oracle.com/repo/OracleLinux/OL7/SoftwareCollections/x86_64/ - gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle - gpgcheck: true - enabled: true - when: install_scl_repo|bool and - (ansible_distribution == "OracleLinux" and - ansible_distribution_major_version == '7') - tags: install_scl_repo - - # Development repository (for llvm-toolset-7-clang) - - name: Add Development repository for OracleLinux 7 - yum_repository: - name: ol7_developer - description: Packages for test and development - Oracle Linux 7 (x86_64) - baseurl: https://yum.oracle.com/repo/OracleLinux/OL7/developer/x86_64 - gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle - gpgcheck: true - enabled: true - when: (ansible_distribution == "OracleLinux" and - ansible_distribution_major_version == '7') - - # Optional Development repository (for libedit-devel) - - name: Add Optional Development repository for OracleLinux 7 - yum_repository: - name: ol7_optional_developer - description: Developer preview optional packages for Development on Oracle Linux 7 (x86_64) - baseurl: https://yum.oracle.com/repo/OracleLinux/OL7/optional/developer/x86_64 - gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle - gpgcheck: true - enabled: true - when: (ansible_distribution == "OracleLinux" and - ansible_distribution_major_version == '7') - - - name: Enable Software Collections (SCL) repository for RedHat 7 - become: true - become_user: root - command: subscription-manager repos --enable rhel-server-rhscl-7-rpms - when: install_scl_repo|bool and - (ansible_distribution == 'RedHat' and - ansible_distribution_major_version == '7') - tags: install_scl_repo - - # Install PostgreSQL Repository - - name: Get pgdg-redhat-repo-latest.noarch.rpm - get_url: - url: "https://download.postgresql.org/pub/repos/yum/reporpms/EL-%7B%7B%20ansible_distribution_major_version%20%7D%7D-x86_64/pgdg-redhat-repo-latest.noarch.rpm" # noqa 204 - dest: /tmp/ - timeout: 30 - validate_certs: false - when: install_postgresql_repo|bool - tags: install_postgresql_repo - - - name: Install PostgreSQL repository - package: - name: /tmp/pgdg-redhat-repo-latest.noarch.rpm - state: present - disable_gpg_check: true - when: install_postgresql_repo|bool - tags: install_postgresql_repo - environment: "{{ proxy_env | default({}) }}" - when: installation_method == "repo" and ansible_os_family == "RedHat" - tags: add_repo - -... diff --git a/roles/ansible-role-firewall/handlers/main.yml b/roles/ansible-role-firewall/handlers/main.yml deleted file mode 100644 index 378095524..000000000 --- a/roles/ansible-role-firewall/handlers/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -- name: restart firewall - service: name=firewall state=restarted diff --git a/roles/ansible-role-firewall/meta/main.yml b/roles/ansible-role-firewall/meta/main.yml deleted file mode 100644 index 2587263d9..000000000 --- a/roles/ansible-role-firewall/meta/main.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -dependencies: [] - -galaxy_info: - author: geerlingguy - description: Simple iptables firewall for most Unix-like systems. - company: "Midwestern Mac, LLC" - license: "license (BSD, MIT)" - min_ansible_version: 2.4 - platforms: - - name: EL - versions: - - all - - name: Debian - versions: - - all - - name: Ubuntu - versions: - - all - galaxy_tags: - - networking - - system - - security - - firewall - - iptables - - tcp diff --git a/roles/ansible-role-firewall/molecule/default/converge.yml b/roles/ansible-role-firewall/molecule/default/converge.yml deleted file mode 100644 index 0c37558ff..000000000 --- a/roles/ansible-role-firewall/molecule/default/converge.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Converge - hosts: all - become: true - - vars: - # Added to prevent test failures in CI. - firewall_enable_ipv6: false - - # Added for a test. - firewall_allowed_tcp_ports: - - "9123" - - pre_tasks: - - name: Update apt cache. - apt: update_cache=true cache_valid_time=1200 - when: ansible_os_family == 'Debian' - changed_when: false - - roles: - - role: geerlingguy.firewall diff --git a/roles/ansible-role-firewall/molecule/default/molecule.yml b/roles/ansible-role-firewall/molecule/default/molecule.yml deleted file mode 100644 index 47c232d37..000000000 --- a/roles/ansible-role-firewall/molecule/default/molecule.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -dependency: - name: galaxy -driver: - name: docker -lint: | - set -e - yamllint . - ansible-lint -platforms: - - name: instance - image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos7}-ansible:latest" - command: ${MOLECULE_DOCKER_COMMAND:-""} - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - privileged: true - pre_build_image: true -provisioner: - name: ansible - playbooks: - converge: ${MOLECULE_PLAYBOOK:-converge.yml} -scenario: - test_sequence: - - lint - - destroy - - dependency - - syntax - - create - - prepare - - converge - - idempotence - - check - - side_effect - - verify - - destroy diff --git a/roles/ansible-role-firewall/molecule/default/playbook.yml b/roles/ansible-role-firewall/molecule/default/playbook.yml deleted file mode 100644 index a7cecd132..000000000 --- a/roles/ansible-role-firewall/molecule/default/playbook.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- name: Converge - hosts: all - become: true - - vars: - firewall_allowed_tcp_ports: - - "9123" - - pre_tasks: - - name: Update apt cache. - apt: update_cache=true cache_valid_time=1200 - when: ansible_os_family == 'Debian' - changed_when: false - - roles: - - role: geerlingguy.firewall diff --git a/roles/ansible-role-firewall/molecule/default/yaml-lint.yml b/roles/ansible-role-firewall/molecule/default/yaml-lint.yml deleted file mode 100644 index a3dbc38ee..000000000 --- a/roles/ansible-role-firewall/molecule/default/yaml-lint.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -extends: default -rules: - line-length: - max: 120 - level: warning diff --git a/roles/confd/tasks/main.yml b/roles/confd/tasks/main.yml deleted file mode 100644 index 0a1426cde..000000000 --- a/roles/confd/tasks/main.yml +++ /dev/null @@ -1,129 +0,0 @@ ---- -# yamllint disable rule:line-length - -# install confd package from repo -- name: Download and copy "confd" binary file to /usr/local/bin/ - get_url: - url: "{{ item }}" - dest: /usr/local/bin/confd - mode: u+x,g+x,o+x - timeout: 60 - validate_certs: false - loop: - - "{{ confd_package_repo }}" - environment: "{{ proxy_env | default({}) }}" - when: installation_method == "repo" and confd_package_repo | length > 0 - tags: get_confd, confd - -# install confd package from file -- name: Copy "confd" binary file to /usr/local/bin/ - copy: - src: "{{ confd_package_file }}" - dest: /usr/local/bin/confd - mode: u+x,g+x,o+x - when: installation_method == "file" and confd_package_file | length > 0 - tags: get_confd, confd - -- name: Create conf directories - file: - path: "{{ item }}" - state: directory - loop: - - /etc/confd/conf.d - - /etc/confd/templates - tags: confd_dir, confd_conf, confd - -- block: - - name: Generate conf file "/etc/confd/confd.toml" - template: - src: templates/confd.toml.j2 - dest: /etc/confd/confd.toml - notify: "restart confd" - tags: confd_toml - - - name: Generate conf file "/etc/confd/conf.d/haproxy.toml" - template: - src: templates/haproxy.toml.j2 - dest: /etc/confd/conf.d/haproxy.toml - notify: "restart confd" - tags: haproxy_toml - - - name: Generate template "/etc/confd/templates/haproxy.tmpl" - template: - src: templates/haproxy.tmpl.j2 - dest: /etc/confd/templates/haproxy.tmpl - notify: "restart confd" - tags: haproxy_tmpl - when: add_balancer is not defined or not add_balancer|bool - tags: confd_conf, confd - -- block: # for add_balancer.yml - - name: Fetch confd.toml, haproxy.toml, haproxy.tmpl conf files from master - run_once: true - fetch: - src: "{{ item }}" - dest: files/ - validate_checksum: true - flat: true - loop: - - /etc/confd/confd.toml - - /etc/confd/conf.d/haproxy.toml - - /etc/confd/templates/haproxy.tmpl - delegate_to: "{{ groups.master[0] }}" - - - name: Copy confd.toml, haproxy.toml, haproxy.tmpl conf files to replica - copy: - src: "files/{{ item.conf }}" - dest: "{{ item.dest }}" - loop: - - {conf: 'confd.toml', dest: '/etc/confd/confd.toml'} - - {conf: 'haproxy.toml', dest: '/etc/confd/conf.d/haproxy.toml'} - - {conf: 'haproxy.tmpl', dest: '/etc/confd/templates/haproxy.tmpl'} - loop_control: - label: "{{ item.dest }}" - notify: "restart confd" - - - name: Prepare haproxy.tmpl template file (replace "bind" for stats) - lineinfile: - path: /etc/confd/templates/haproxy.tmpl - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - backrefs: true - loop: - - {regexp: '^.*bind.*:{{ haproxy_listen_port.stats }}$', line: ' bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.stats }}'} # noqa 204 - - {regexp: '^.*bind.*:{{ haproxy_listen_port.master }}$', line: ' bind {{ cluster_vip }}:{{ haproxy_listen_port.master }}'} # noqa 204 - - {regexp: '^.*bind.*:{{ haproxy_listen_port.replicas }}$', line: ' bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas }}'} # noqa 204 - - {regexp: '^.*bind.*:{{ haproxy_listen_port.replicas_sync }}$', line: ' bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_sync }}'} # noqa 204 - - {regexp: '^.*bind.*:{{ haproxy_listen_port.replicas_async }}$', line: ' bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_async }}'} # noqa 204 - loop_control: - label: "{{ item.line }}" - notify: "restart confd" - when: cluster_vip is defined and cluster_vip | length > 0 - - - name: Prepare haproxy.tmpl template file (replace "bind" for stats) - lineinfile: - path: /etc/confd/templates/haproxy.tmpl - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - backrefs: true - loop: - - {regexp: '^.*bind.*:{{ haproxy_listen_port.stats }}$', line: ' bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.stats }}'} # noqa 204 - - {regexp: '^.*bind.*:{{ haproxy_listen_port.master }}$', line: ' bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.master }}'} # noqa 204 - - {regexp: '^.*bind.*:{{ haproxy_listen_port.replicas }}$', line: ' bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.replicas }}'} # noqa 204 - - {regexp: '^.*bind.*:{{ haproxy_listen_port.replicas_sync }}$', line: ' bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.replicas_sync }}'} # noqa 204 - - {regexp: '^.*bind.*:{{ haproxy_listen_port.replicas_async }}$', line: ' bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.replicas_async }}'} # noqa 204 - loop_control: - label: "{{ item.line }}" - notify: "restart confd" - when: cluster_vip is not defined or cluster_vip | length < 1 - when: add_balancer is defined and add_balancer|bool - tags: confd_conf, confd - -- name: Copy systemd service file - template: - src: templates/confd.service.j2 - dest: /etc/systemd/system/confd.service - notify: "restart confd" - tags: confd_service, confd - -... diff --git a/roles/confd/templates/confd.toml.j2 b/roles/confd/templates/confd.toml.j2 deleted file mode 100644 index a5338d7a2..000000000 --- a/roles/confd/templates/confd.toml.j2 +++ /dev/null @@ -1,15 +0,0 @@ -backend = "etcd" -interval = 10 -watch = true -nodes = [ -{% if not dcs_exists|bool and dcs_type == 'etcd' %} - {% for host in groups['etcd_cluster'] %} - "http://{{ hostvars[host]['inventory_hostname'] }}:2379", - {% endfor %} -{% endif %} -{% if dcs_exists|bool and dcs_type == 'etcd' %} - {% for etcd_hosts in patroni_etcd_hosts %} - {{etcd_hosts.host}}:{{etcd_hosts.port}}, - {% endfor %} -{% endif %} -] diff --git a/roles/confd/templates/haproxy.tmpl.j2 b/roles/confd/templates/haproxy.tmpl.j2 deleted file mode 100644 index c2261fdfa..000000000 --- a/roles/confd/templates/haproxy.tmpl.j2 +++ /dev/null @@ -1,113 +0,0 @@ -global - maxconn {{ haproxy_maxconn.global }} - log /dev/log local0 - log /dev/log local1 notice - chroot /var/lib/haproxy - stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners - stats timeout 30s - user haproxy - group haproxy - daemon - -defaults - mode tcp - log global - retries 2 - timeout queue 5s - timeout connect 5s - timeout client {{ haproxy_timeout.client }} - timeout server {{ haproxy_timeout.server }} - timeout check 15s - -listen stats - mode http - bind {{ hostvars[inventory_hostname]['inventory_hostname'] }}:{{ haproxy_listen_port.stats }} - stats enable - stats uri / - -listen master -{% if cluster_vip is defined and cluster_vip | length > 0 %} - bind {{ cluster_vip }}:{{ haproxy_listen_port.master }} -{% else %} - bind {{ hostvars[inventory_hostname]['inventory_hostname'] }}:{{ haproxy_listen_port.master }} -{% endif %} - maxconn {{ haproxy_maxconn.master }} - option tcplog - option httpchk OPTIONS /master - http-check expect status 200 - default-server inter 3s fastinter 1s fall 3 rise 4 on-marked-down shutdown-sessions -{% if pgbouncer_install|bool %} -{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split (index (split $data.conn_url ":") 1) "/") 2) "@" "/" -1)}}:{% endraw %}{{ pgbouncer_listen_port }}{% raw %} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} -{{end}}{% endraw %} -{% endif %} -{% if ( pgbouncer_install is not defined ) or ( not pgbouncer_install|bool ) %} -{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} -{{end}}{% endraw %} -{% endif %} - - -listen replicas -{% if cluster_vip is defined and cluster_vip | length > 0 %} - bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas }} -{% else %} - bind {{ hostvars[inventory_hostname]['inventory_hostname'] }}:{{ haproxy_listen_port.replicas }} -{% endif %} - maxconn {{ haproxy_maxconn.replica }} - option tcplog - option httpchk OPTIONS /replica - balance roundrobin - http-check expect status 200 - default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions -{% if pgbouncer_install|bool %} -{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split (index (split $data.conn_url ":") 1) "/") 2) "@" "/" -1)}}:{% endraw %}{{ pgbouncer_listen_port }}{% raw %} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} -{{end}}{% endraw %} -{% endif %} -{% if ( pgbouncer_install is not defined ) or ( not pgbouncer_install|bool ) %} -{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} -{{end}}{% endraw %} -{% endif %} - - -listen replicas_sync -{% if cluster_vip is defined and cluster_vip | length > 0 %} - bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_sync }} -{% else %} - bind {{ hostvars[inventory_hostname]['inventory_hostname'] }}:{{ haproxy_listen_port.replicas_sync }} -{% endif %} - maxconn {{ haproxy_maxconn.replica }} - option tcplog - option httpchk OPTIONS /sync - balance roundrobin - http-check expect status 200 - default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions -{% if pgbouncer_install|bool %} -{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split (index (split $data.conn_url ":") 1) "/") 2) "@" "/" -1)}}:{% endraw %}{{ pgbouncer_listen_port }}{% raw %} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} -{{end}}{% endraw %} -{% endif %} -{% if ( pgbouncer_install is not defined ) or ( not pgbouncer_install|bool ) %} -{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} -{{end}}{% endraw %} -{% endif %} - - -listen replicas_async -{% if cluster_vip is defined and cluster_vip | length > 0 %} - bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_async }} -{% else %} - bind {{ hostvars[inventory_hostname]['inventory_hostname'] }}:{{ haproxy_listen_port.replicas_async }} -{% endif %} - maxconn {{ haproxy_maxconn.replica }} - option tcplog - option httpchk OPTIONS /async - balance roundrobin - http-check expect status 200 - default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions -{% if pgbouncer_install|bool %} -{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split (index (split $data.conn_url ":") 1) "/") 2) "@" "/" -1)}}:{% endraw %}{{ pgbouncer_listen_port }}{% raw %} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} -{{end}}{% endraw %} -{% endif %} -{% if ( pgbouncer_install is not defined ) or ( not pgbouncer_install|bool ) %} -{% raw %}{{range gets "/members/*"}} server {{base .Key}} {{$data := json .Value}}{{base (replace (index (split $data.conn_url "/") 2) "@" "/" -1)}} check port {{index (split (index (split $data.api_url "/") 2) ":") 1}} -{{end}}{% endraw %} -{% endif %} - diff --git a/roles/deploy-finish/tasks/main.yml b/roles/deploy-finish/tasks/main.yml deleted file mode 100644 index 62ea35e2c..000000000 --- a/roles/deploy-finish/tasks/main.yml +++ /dev/null @@ -1,191 +0,0 @@ ---- -# yamllint disable rule:line-length - -- name: Make sure handlers are flushed immediately - meta: flush_handlers - -- block: - - name: Check postgresql cluster health - run_once: true - become: true - become_user: postgres - command: patronictl -c /etc/patroni/patroni.yml list - register: patronictl_result - environment: - PATH: "{{ ansible_env.PATH }}:/usr/bin:/usr/local/bin" - changed_when: false - - - name: PostgreSQL Cluster health - run_once: true - debug: - var: patronictl_result.stdout_lines - ignore_errors: true - tags: patroni_status, cluster_info, cluster_status, point_in_time_recovery - -- block: - - name: Get postgresql database list - run_once: true - become: true - become_user: postgres - command: - "{{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U postgres -c - \" - SELECT - d.datname as name, - pg_get_userbyid(d.datdba) as owner, - pg_encoding_to_char(d.encoding) as encoding, - d.datcollate as collate, - d.datctype as ctype, - CASE - WHEN has_database_privilege(d.datname, 'CONNECT') - THEN pg_size_pretty(pg_database_size(d.datname)) - ELSE 'No Access' - END - size, - t.spcname as tablespace - FROM pg_catalog.pg_database d - JOIN pg_catalog.pg_tablespace t - ON d.dattablespace = t.oid - WHERE NOT datistemplate - ORDER BY 1 - \"" - register: dbs_result - delegate_to: "{{ groups.master[0] }}" - changed_when: false - - - name: PostgreSQL list of databases - run_once: true - debug: - var: dbs_result.stdout_lines - ignore_errors: true - tags: databases, db_list, cluster_info, cluster_status, point_in_time_recovery - -- block: - - name: PostgreSQL Cluster connection info - run_once: true - debug: - msg: - - +------------------------------------------------+ - - address (VIP) {{ cluster_vip }} - - port {{ haproxy_listen_port.master }} (read/write) master - - port {{ haproxy_listen_port.replicas }} (read only) all replicas - - port {{ haproxy_listen_port.replicas_sync }} (read only) synchronous replica only - - port {{ haproxy_listen_port.replicas_async }} (read only) asynchronous replicas only - - +------------------------------------------------+ - when: with_haproxy_load_balancing|bool and - synchronous_mode|bool - - - name: PostgreSQL Cluster connection info - run_once: true - debug: - msg: - - +------------------------------------------------+ - - address (VIP) {{ cluster_vip }} - - port {{ haproxy_listen_port.master }} (read/write) master - - port {{ haproxy_listen_port.replicas }} (read only) all replicas - - +------------------------------------------------+ - when: with_haproxy_load_balancing|bool and - not synchronous_mode|bool - - - name: PostgreSQL Cluster connection info - run_once: true - debug: - msg: - - +------------------------------------------------+ - - address (VIP) {{ cluster_vip }} - - port {{ pgbouncer_listen_port }} (pgbouncer) - - +------------------------------------------------+ - when: not with_haproxy_load_balancing|bool and - pgbouncer_install|bool - - - name: PostgreSQL Cluster connection info - run_once: true - debug: - msg: - - +------------------------------------------------+ - - address (VIP) {{ cluster_vip }} - - port {{ postgresql_port }} - - +------------------------------------------------+ - when: not with_haproxy_load_balancing|bool and - not pgbouncer_install|bool - when: cluster_vip is defined and cluster_vip | length > 0 - ignore_errors: true - tags: conn_info, cluster_info, cluster_status - -- block: - - name: Get vip info - set_fact: - man_ip: "{{ item }}" - loop: "{{ ansible_all_ipv4_addresses }}" - when: item == cluster_vip - - - name: Virtual IP Address (VIP) info - debug: - msg: - "Cluster ip address (VIP) {{ cluster_vip }} - is running on server {{ ansible_hostname }}" - when: man_ip is defined and man_ip == cluster_vip - when: cluster_vip is defined and cluster_vip | length > 0 - ignore_errors: true - tags: vip_owner, vip_status, cluster_info, cluster_status - - -- block: # if cluster_vip is not defined - - name: Create list of nodes - run_once: true - set_fact: - haproxy_nodes: "{% for host in groups['balancers'] %}{{ hostvars[host]['inventory_hostname'] }}{% if not loop.last %},{% endif %}{% endfor %}" # noqa 204 - postgres_cluster_nodes: "{% for host in groups['postgres_cluster'] %}{{ hostvars[host]['inventory_hostname'] }}{% if not loop.last %},{% endif %}{% endfor %}" # noqa 204 - - - name: PostgreSQL Cluster connection info - run_once: true - debug: - msg: - - +------------------------------------------------+ - - address {{ haproxy_nodes }} - - port {{ haproxy_listen_port.master }} (read/write) master - - port {{ haproxy_listen_port.replicas }} (read only) all replicas - - port {{ haproxy_listen_port.replicas_sync }} (read only) synchronous replica only - - port {{ haproxy_listen_port.replicas_async }} (read only) asynchronous replicas only - - +------------------------------------------------+ - when: with_haproxy_load_balancing|bool and - synchronous_mode|bool - - - name: PostgreSQL Cluster connection info - run_once: true - debug: - msg: - - +------------------------------------------------+ - - address {{ haproxy_nodes }} - - port {{ haproxy_listen_port.master }} (read/write) master - - port {{ haproxy_listen_port.replicas }} (read only) all replicas - - +------------------------------------------------+ - when: with_haproxy_load_balancing|bool and - not synchronous_mode|bool - - - name: PostgreSQL Cluster connection info - run_once: true - debug: - msg: - - +------------------------------------------------+ - - address {{ postgres_cluster_nodes }} - - port {{ pgbouncer_listen_port }} (pgbouncer) - - +------------------------------------------------+ - when: not with_haproxy_load_balancing|bool and - pgbouncer_install|bool - - - name: PostgreSQL Cluster connection info - run_once: true - debug: - msg: - - +------------------------------------------------+ - - address {{ postgres_cluster_nodes }} - - port {{ postgresql_port }} - - +------------------------------------------------+ - when: not with_haproxy_load_balancing|bool and - not pgbouncer_install|bool - ignore_errors: true - when: cluster_vip is not defined or cluster_vip | length < 1 - tags: conn_info, cluster_info, cluster_status - -... diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml deleted file mode 100644 index 2033fef03..000000000 --- a/roles/etcd/tasks/main.yml +++ /dev/null @@ -1,129 +0,0 @@ ---- -# yamllint disable rule:line-length - -- name: Make sure handlers are flushed immediately - meta: flush_handlers - -- name: Make sure the unzip/tar packages are present - package: - name: - - unzip - - tar - state: present - environment: "{{ proxy_env | default({}) }}" - tags: etcd, etcd_install - -- block: # install etcd package from repo - - name: Download "etcd" package - get_url: - url: "{{ item }}" - dest: /tmp/ - timeout: 60 - validate_certs: false - loop: - - "{{ etcd_package_repo }}" - environment: "{{ proxy_env | default({}) }}" - - - name: Extract "etcd" into /tmp - unarchive: - src: "/tmp/{{ etcd_package_repo | basename }}" - dest: /tmp/ - extra_opts: - - --no-same-owner - remote_src: true - - - name: Copy "etcd" and "etcdctl" binary files to /usr/local/bin/ - copy: - src: "/tmp/{{ etcd_package_repo.split('.tar.gz')[0] | basename }}/{{ item }}" - dest: /usr/local/bin/ - mode: u+x,g+x,o+x - remote_src: true - loop: - - etcd - - etcdctl - when: installation_method == "repo" and etcd_package_repo | length > 0 - tags: etcd, etcd_install - -- block: # install etcd package from file - - name: Extract "etcd" into /tmp - unarchive: - src: "{{ etcd_package_file }}" - dest: /tmp/ - extra_opts: - - --no-same-owner - - - name: Copy "etcd" and "etcdctl" binary files to /usr/local/bin/ - copy: - src: "/tmp/{{ etcd_package_file.split('.tar.gz')[0] | basename }}/{{ item }}" - dest: /usr/local/bin/ - mode: u+x,g+x,o+x - remote_src: true - loop: - - etcd - - etcdctl - when: installation_method == "file" and etcd_package_file | length > 0 - tags: etcd, etcd_install - -- name: Add etcd user - user: - name: etcd - shell: /usr/sbin/nologin - home: "{{ etcd_data_dir }}" - tags: etcd, etcd_conf - -- name: Create etcd conf directory - file: - path: /etc/etcd - state: directory - tags: etcd, etcd_conf - -- name: Generate conf file "/etc/etcd/etcd.conf" - template: - src: templates/etcd.conf.j2 - dest: /etc/etcd/etcd.conf - tags: etcd, etcd_conf - -- name: Copy systemd service file - template: - src: templates/etcd.service.j2 - dest: /etc/systemd/system/etcd.service - tags: etcd, etcd_conf - -- name: Enable and start etcd service - systemd: - daemon_reload: true - name: etcd - enabled: true - state: started - tags: etcd, etcd_start - -- name: Wait for port 2379 to become open on the host - wait_for: - port: 2379 - host: 127.0.0.1 - state: started - timeout: 120 - delay: 10 - ignore_errors: false - tags: etcd, etcd_start - -- block: - - name: Wait until the etcd cluster is healthy - command: /usr/local/bin/etcdctl cluster-health - environment: - ETCDCTL_API: 2 - register: etcd_health_result - run_once: true - changed_when: false - until: "'cluster is healthy' in etcd_health_result.stdout" - retries: 10 - delay: 10 - ignore_errors: false - - - name: cluster health - run_once: true - debug: - var: etcd_health_result.stdout_lines - tags: etcd, etcd_start, etcd_status - -... diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2 deleted file mode 100644 index cc29baa25..000000000 --- a/roles/etcd/templates/etcd.conf.j2 +++ /dev/null @@ -1,13 +0,0 @@ -ETCD_NAME="{{ ansible_hostname }}" -ETCD_LISTEN_CLIENT_URLS="http://{{ inventory_hostname }}:2379,http://127.0.0.1:2379" -ETCD_ADVERTISE_CLIENT_URLS="http://{{ inventory_hostname }}:2379" -ETCD_LISTEN_PEER_URLS="http://{{ inventory_hostname }}:2380" -ETCD_INITIAL_ADVERTISE_PEER_URLS="http://{{ inventory_hostname }}:2380" -ETCD_INITIAL_CLUSTER_TOKEN="{{ etcd_cluster_name }}" -ETCD_INITIAL_CLUSTER="{% for host in groups['etcd_cluster'] %}{{ hostvars[host]['ansible_hostname'] }}=http://{{ hostvars[host]['inventory_hostname'] }}:2380{% if not loop.last %},{% endif %}{% endfor %}" -ETCD_INITIAL_CLUSTER_STATE="new" -ETCD_DATA_DIR="{{ etcd_data_dir }}" -ETCD_ELECTION_TIMEOUT="5000" -ETCD_HEARTBEAT_INTERVAL="1000" -ETCD_INITIAL_ELECTION_TICK_ADVANCE="false" -ETCD_ENABLE_V2="true" diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml deleted file mode 100644 index 215719e32..000000000 --- a/roles/haproxy/tasks/main.yml +++ /dev/null @@ -1,469 +0,0 @@ ---- -# yamllint disable rule:line-length - -# Install HAProxy from rpm/deb packages -# from repo -- block: - # RedHat/CentOS/OracleLinux 7 (SCL) - - name: Install Software Collections (SCL) repository for CentOS 7 - package: - name: centos-release-scl-rh - state: present - when: haproxy_install_repo|bool and - (ansible_distribution == 'CentOS' and - ansible_distribution_major_version == '7') - tags: haproxy_scl_repo - - - name: Install Software Collections (SCL) repository for OracleLinux 7 - yum_repository: - name: ol7_software_collections - description: Software Collection Library packages for Oracle Linux 7 (x86_64) - baseurl: https://yum.oracle.com/repo/OracleLinux/OL7/SoftwareCollections/x86_64/ - gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle - gpgcheck: true - enabled: true - when: haproxy_install_repo|bool and - (ansible_distribution == "OracleLinux" and - ansible_distribution_major_version == '7') - tags: haproxy_scl_repo - - - name: Enable Software Collections (SCL) repository for RedHat 7 - become: true - become_user: root - command: subscription-manager repos --enable rhel-server-rhscl-7-rpms - when: haproxy_install_repo|bool and - (ansible_distribution == 'RedHat' and - ansible_distribution_major_version == '7') - tags: haproxy_scl_repo - - - name: Install HAProxy v1.8 (rh-haproxy18 package) - package: - name: rh-haproxy18 - state: present - when: ansible_distribution_major_version == '7' - - - name: create a symlink "/usr/sbin/haproxy" - file: - src: /opt/rh/rh-haproxy18/root/usr/sbin/haproxy - dest: /usr/sbin/haproxy - owner: root - group: root - state: link - when: ansible_distribution_major_version == '7' - environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "RedHat" and - installation_method == "repo" and - haproxy_installation_method == "rpm" - tags: haproxy, load_balancing - -- block: - # RedHat os family version 8 (and higher) - - name: Install HAProxy v1.8 package - package: - name: haproxy - state: present - when: ansible_distribution_major_version is version('8', '>=') - environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "RedHat" and - installation_method == "repo" and - haproxy_installation_method == "rpm" - tags: haproxy, load_balancing - -- block: - # Debian - - name: Add haproxy.debian.net repository apt-key - apt_key: - url: https://haproxy.debian.net/bernat.debian.org.gpg - state: present - when: haproxy_install_repo|bool and - (ansible_distribution == "Debian" and - ansible_distribution_major_version is version('10', '<=')) - - - name: Add haproxy.debian.net repository - apt_repository: - repo: "deb https://haproxy.debian.net {{ ansible_distribution_release }}-backports-1.8 main" - state: present - update_cache: true - when: haproxy_install_repo|bool and - (ansible_distribution == "Debian" and - ansible_distribution_major_version is version('10', '<=')) - - # Ubuntu - - name: Add ppa:vbernat/haproxy-1.8 repository apt-key - apt_key: # https://github.com/ansible/ansible/issues/31691 - data: | - -----BEGIN PGP PUBLIC KEY BLOCK----- - - xo0EUa70wAEEAMtI29s01PCX0JleVmh1QQr3rfPkfGo/GFKfcXRGE40nQHq+rWUh - 9slUN+kXBckSE0DDrnQH08Uvf12TJiHHFlbXnH5Ep+hgYPZGlVSpvBGO+c/CopU7 - RHMx9bl+pVOhrVeDWqLl2KqJI2wjJBLXA0dbRbCzmXPvrg3mBQ0hZ533ABEBAAHN - IExhdW5jaHBhZCBQUEEgZm9yIFZpbmNlbnQgQmVybmF0wrgEEwECACIFAlGu9MAC - GwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFBdl6QcYbnN8aMD/RM3InMu - bxTF9hzToCPF2EP37Q9WUQNF15f90jTOl8VqqpnUfGd2qlxUW31soCpDVxqX6lXf - qB0bI9EDz2r7w+goxBH+cRArJ2APdC7wE/U9eIxY49mzNsqjsl7zY+eoX4v4fjqk - 33hFyMMJDUtPxSRHWFqP5QNwCN+fbPh5GiyL - =ZiOf - -----END PGP PUBLIC KEY BLOCK----- - state: present - when: haproxy_install_repo|bool and - (ansible_distribution == 'Ubuntu' and - ansible_distribution_major_version is version('18', '<=')) - - - name: Add ppa:vbernat/haproxy-1.8 repository - apt_repository: - repo: "deb http://ppa.launchpad.net/vbernat/haproxy-1.8/ubuntu {{ ansible_distribution_release }} main" - state: present - update_cache: true - when: haproxy_install_repo|bool and - (ansible_distribution == 'Ubuntu' and - ansible_distribution_major_version is version('18', '<=')) - - - name: Install HAProxy v1.8 package - apt: - force_apt_get: true - name: "haproxy=1.8.*" - state: present - when: ansible_distribution_major_version is version('18', '<=') - - - name: Install HAProxy package - apt: - name: haproxy - state: present - when: ansible_distribution_major_version is version('20', '>=') - environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "Debian" and - installation_method == "repo" and - haproxy_installation_method == "deb" - tags: haproxy, load_balancing - -# from file (rpm/deb packages) -- block: - - name: Copy packages into /tmp - copy: - src: "{{ item }}" - dest: /tmp/ - loop: "{{ haproxy_package_file }}" - register: copy_packages_result - - - name: Install packages - apt: - force_apt_get: true - deb: "/tmp/{{ item }}" - state: present - loop: "{{ haproxy_package_file | map('basename') | list }}" - when: ansible_os_family == "Debian" and copy_packages_result.changed - - - name: Install packages - package: - name: "/tmp/{{ item }}" - state: present - loop: "{{ haproxy_package_file | map('basename') | list }}" - when: ansible_os_family == "RedHat" and copy_packages_result.changed - when: haproxy_package_file is defined and haproxy_package_file | length > 0 - tags: haproxy, load_balancing - -# Build and install HAproxy from source -- name: Setting facts - set_fact: - target_linux: "{% if haproxy_major is version('2.0', '>=') %}linux-glibc{% else %}linux2628{% endif %}" - when: haproxy_installation_method == "src" - tags: haproxy, load_balancing - -# from repo -- block: - - name: "Download HAProxy and lua source files" - get_url: - url: "{{ item }}" - dest: /tmp/ - timeout: 120 - validate_certs: false - loop: - - "{{ haproxy_src_repo }}" - - "{{ lua_src_repo }}" - environment: "{{ proxy_env | default({}) }}" - - - name: "Extract HAProxy source files into /tmp" - unarchive: - src: "/tmp/{{ haproxy_src_repo | basename }}" - dest: /tmp/ - extra_opts: - - --no-same-owner - remote_src: true - when: haproxy_src_repo | length > 0 - - - name: "Extract lua source files into /tmp" - unarchive: - src: "/tmp/{{ lua_src_repo | basename }}" - dest: /tmp/ - extra_opts: - - --no-same-owner - remote_src: true - when: lua_src_repo | length > 0 - tags: lua - when: installation_method == "repo" and haproxy_installation_method == "src" - tags: haproxy, load_balancing - -# from file -- block: - - name: "Extract HAProxy source files into /tmp" - unarchive: - src: "{{ haproxy_src_file }}" - dest: /tmp/ - extra_opts: - - --no-same-owner - when: haproxy_src_file | length > 0 - - - name: "Extract lua source files into /tmp" - unarchive: - src: "{{ lua_src_file }}" - dest: /tmp/ - extra_opts: - - --no-same-owner - when: lua_src_file | length > 0 - tags: lua - when: installation_method == "file" and haproxy_installation_method == "src" - tags: haproxy, load_balancing - -- name: Install the prerequisites packages to compile HAProxy - package: - name: "{{ haproxy_compile_requirements }}" - state: present - environment: "{{ proxy_env | default({}) }}" - when: haproxy_installation_method == "src" - tags: haproxy, haproxy_requirements, load_balancing - -- block: - - name: Build and install lua (required for haproxy) - become: true - become_user: root - shell: "make INSTALL_TOP=/opt/{{ lua_src_repo.split('.tar.gz')[0] | basename }} linux install" # noqa 305 - args: - chdir: "/tmp/{{ lua_src_repo.split('.tar.gz')[0] | basename }}" - tags: lua - - - name: Build HAProxy - become: true - become_user: root - make: - chdir: "/tmp/{{ haproxy_src_repo.split('.tar.gz')[0] | basename }}" - params: - TARGET: "{{ target_linux }}" - USE_GETADDRINFO: 1 - USE_ZLIB: 1 - USE_REGPARM: 1 - USE_OPENSSL: 1 - USE_LIBCRYPT: 1 - USE_SYSTEMD: 1 - USE_PCRE: 1 - USE_NS: 1 - USE_TFO: 1 - USE_LUA: 1 - LUA_INC: "/opt/{{ lua_src_repo.split('.tar.gz')[0] | basename }}/include" - LUA_LIB: "/opt/{{ lua_src_repo.split('.tar.gz')[0] | basename }}/lib" - - - name: Install HAProxy - become: true - become_user: root - make: - chdir: "/tmp/{{ haproxy_src_repo.split('.tar.gz')[0] | basename }}" - target: install - when: installation_method == "repo" and haproxy_installation_method == "src" - tags: haproxy, load_balancing - -# installation_method: "file" -- block: - - name: Build and install lua (required for haproxy) - become: true - become_user: root - shell: "make INSTALL_TOP=/opt/{{ lua_src_file.split('.tar.gz')[0] | basename }} linux install" # noqa 305 - args: - chdir: "/tmp/{{ lua_src_file.split('.tar.gz')[0] | basename }}" - tags: lua - - - name: Build HAProxy - become: true - become_user: root - make: - chdir: "/tmp/{{ haproxy_src_file.split('.tar.gz')[0] | basename }}" - params: - TARGET: "{{ target_linux }}" - USE_GETADDRINFO: 1 - USE_ZLIB: 1 - USE_REGPARM: 1 - USE_OPENSSL: 1 - USE_LIBCRYPT: 1 - USE_SYSTEMD: 1 - USE_PCRE: 1 - USE_NS: 1 - USE_TFO: 1 - USE_LUA: 1 - LUA_INC: "/opt/{{ lua_src_file.split('.tar.gz')[0] | basename }}/include" - LUA_LIB: "/opt/{{ lua_src_file.split('.tar.gz')[0] | basename }}/lib" - - - name: Install HAProxy - become: true - become_user: root - make: - chdir: "/tmp/{{ haproxy_src_file.split('.tar.gz')[0] | basename }}" - target: install - when: installation_method == "file" and haproxy_installation_method == "src" - tags: haproxy, load_balancing - -# Configure -- name: Make sure the kernel parameter "net.ipv4.ip_nonlocal_bind" are enabled - sysctl: - name: "net.ipv4.ip_nonlocal_bind" - value: "1" - sysctl_set: true - state: present - reload: true - tags: haproxy, load_balancing - -- name: Add haproxy group - group: - name: haproxy - state: present - tags: haproxy, load_balancing - -- name: Add haproxy user - user: - name: haproxy - comment: "HAProxy user" - group: haproxy - shell: /usr/sbin/nologin - tags: haproxy, load_balancing - -- name: Create directories - file: - dest: "{{ item }}" - state: directory - owner: haproxy - group: haproxy - loop: - - /etc/haproxy - - /var/run/haproxy - - /var/lib/haproxy/dev - tags: haproxy, load_balancing - -- name: Generate conf file "/etc/haproxy/haproxy.cfg" - template: - src: templates/haproxy.cfg.j2 - dest: /etc/haproxy/haproxy.cfg - owner: haproxy - group: haproxy - notify: "restart haproxy" - when: add_balancer is not defined or not add_balancer|bool - tags: haproxy, haproxy_conf, load_balancing - -- name: Generate systemd service file "/etc/systemd/system/haproxy.service" - template: - src: templates/haproxy.service.j2 - dest: /etc/systemd/system/haproxy.service - owner: haproxy - group: haproxy - notify: "restart haproxy" - tags: haproxy, haproxy_service, load_balancing - -- block: # for add_balancer.yml - - name: Fetch haproxy.cfg file from master - run_once: true - fetch: - src: /etc/haproxy/haproxy.cfg - dest: files/haproxy.cfg - validate_checksum: true - flat: true - notify: "restart haproxy" - delegate_to: "{{ groups.master[0] }}" - - - name: Copy haproxy.cfg file to replica - copy: - src: files/haproxy.cfg - dest: /etc/haproxy/haproxy.cfg - owner: haproxy - group: haproxy - notify: "restart haproxy" - - - name: Prepare haproxy.cfg conf file (replace "bind") - lineinfile: - path: /etc/haproxy/haproxy.cfg - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - backrefs: true - loop: - - {regexp: '^.*bind.*:{{ haproxy_listen_port.stats }}$', line: ' bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.stats }}'} # noqa 204 - - {regexp: '^.*bind.*:{{ haproxy_listen_port.master }}$', line: ' bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.master }}'} # noqa 204 - - {regexp: '^.*bind.*:{{ haproxy_listen_port.replicas }}$', line: ' bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.replicas }}'} # noqa 204 - - {regexp: '^.*bind.*:{{ haproxy_listen_port.replicas_sync }}$', line: ' bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.replicas_sync }}'} # noqa 204 - - {regexp: '^.*bind.*:{{ haproxy_listen_port.replicas_async }}$', line: ' bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.replicas_async }}'} # noqa 204 - loop_control: - label: "{{ item.line }}" - notify: "restart haproxy" - when: cluster_vip is not defined or cluster_vip | length < 1 - - - name: Prepare haproxy.cfg conf file (replace "bind" for stats) - lineinfile: - path: /etc/haproxy/haproxy.cfg - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - backrefs: true - loop: - - {regexp: '^.*bind.*:{{ haproxy_listen_port.stats }}$', line: ' bind {{ hostvars[inventory_hostname].inventory_hostname }}:{{ haproxy_listen_port.stats }}'} # noqa 204 - - {regexp: '^.*bind.*:{{ haproxy_listen_port.master }}$', line: ' bind {{ cluster_vip }}:{{ haproxy_listen_port.master }}'} - - {regexp: '^.*bind.*:{{ haproxy_listen_port.replicas }}$', line: ' bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas }}'} - - {regexp: '^.*bind.*:{{ haproxy_listen_port.replicas_sync }}$', line: ' bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_sync }}'} - - {regexp: '^.*bind.*:{{ haproxy_listen_port.replicas_async }}$', line: ' bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_async }}'} - loop_control: - label: "{{ item.line }}" - notify: "restart haproxy" - when: cluster_vip is defined and cluster_vip | length > 0 - when: add_balancer is defined and add_balancer|bool - tags: haproxy, haproxy_conf, load_balancing - -- block: - - name: selinux | make sure the libsemanage-python, policycoreutils-python packages is present - package: - name: "{{ packages }}" - state: present - update_cache: true - vars: - packages: - - libsemanage-python - - policycoreutils-python - environment: "{{ proxy_env | default({}) }}" - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version == '7' - - installation_method == "repo" - - haproxy_installation_method == "rpm" - - - name: selinux | make sure the python3-libsemanage, python3-policycoreutils packages is present - package: - name: "{{ packages }}" - state: present - update_cache: true - vars: - packages: - - python3-libsemanage - - python3-policycoreutils - environment: "{{ proxy_env | default({}) }}" - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version is version('8', '>=') - - installation_method == "repo" - - haproxy_installation_method == "rpm" - - - name: selinux | set haproxy_connect_any flag to enable tcp connections - seboolean: - name: haproxy_connect_any - state: true - persistent: true - - - name: selinux | change the haproxy_t domain to permissive - selinux_permissive: - name: haproxy_t - permissive: true - when: ansible_selinux.status is defined and ansible_selinux.status == 'enabled' - ignore_errors: true - tags: haproxy, load_balancing, haproxy_selinux - -... diff --git a/roles/haproxy/templates/haproxy.cfg.j2 b/roles/haproxy/templates/haproxy.cfg.j2 deleted file mode 100644 index d3613a311..000000000 --- a/roles/haproxy/templates/haproxy.cfg.j2 +++ /dev/null @@ -1,118 +0,0 @@ -global - maxconn {{ haproxy_maxconn.global }} - log /dev/log local0 - log /dev/log local1 notice - chroot /var/lib/haproxy - stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners - stats timeout 30s - user haproxy - group haproxy - daemon - -defaults - mode tcp - log global - retries 2 - timeout queue 5s - timeout connect 5s - timeout client {{ haproxy_timeout.client }} - timeout server {{ haproxy_timeout.server }} - timeout check 15s - -listen stats - mode http - bind {{ hostvars[inventory_hostname]['inventory_hostname'] }}:{{ haproxy_listen_port.stats }} - stats enable - stats uri / - -listen master -{% if cluster_vip is defined and cluster_vip | length > 0 %} - bind {{ cluster_vip }}:{{ haproxy_listen_port.master }} -{% else %} - bind {{ hostvars[inventory_hostname]['inventory_hostname'] }}:{{ haproxy_listen_port.master }} -{% endif %} - maxconn {{ haproxy_maxconn.master }} - option tcplog - option httpchk OPTIONS /master - http-check expect status 200 - default-server inter 3s fastinter 1s fall 3 rise 4 on-marked-down shutdown-sessions -{% if pgbouncer_install|bool %} - {% for host in groups['postgres_cluster'] %} -server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ pgbouncer_listen_port }} check port 8008 - {% endfor %} -{% endif %} -{% if ( pgbouncer_install is not defined ) or ( not pgbouncer_install|bool ) %} - {% for host in groups['postgres_cluster'] %} -server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ postgresql_port }} check port 8008 - {% endfor %} -{% endif %} - -listen replicas -{% if cluster_vip is defined and cluster_vip | length > 0 %} - bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas }} -{% else %} - bind {{ hostvars[inventory_hostname]['inventory_hostname'] }}:{{ haproxy_listen_port.replicas }} -{% endif %} - maxconn {{ haproxy_maxconn.replica }} - option tcplog - option httpchk OPTIONS /replica - balance roundrobin - http-check expect status 200 - default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions -{% if pgbouncer_install|bool %} - {% for host in groups['postgres_cluster'] %} -server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ pgbouncer_listen_port }} check port 8008 - {% endfor %} -{% endif %} -{% if ( pgbouncer_install is not defined ) or ( not pgbouncer_install|bool ) %} - {% for host in groups['postgres_cluster'] %} -server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ postgresql_port }} check port 8008 - {% endfor %} -{% endif %} - -listen replicas_sync -{% if cluster_vip is defined and cluster_vip | length > 0 %} - bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_sync }} -{% else %} - bind {{ hostvars[inventory_hostname]['inventory_hostname'] }}:{{ haproxy_listen_port.replicas_sync }} -{% endif %} - maxconn {{ haproxy_maxconn.replica }} - option tcplog - option httpchk OPTIONS /sync - balance roundrobin - http-check expect status 200 - default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions -{% if pgbouncer_install|bool %} - {% for host in groups['postgres_cluster'] %} -server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ pgbouncer_listen_port }} check port 8008 - {% endfor %} -{% endif %} -{% if ( pgbouncer_install is not defined ) or ( not pgbouncer_install|bool ) %} - {% for host in groups['postgres_cluster'] %} -server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ postgresql_port }} check port 8008 - {% endfor %} -{% endif %} - -listen replicas_async -{% if cluster_vip is defined and cluster_vip | length > 0 %} - bind {{ cluster_vip }}:{{ haproxy_listen_port.replicas_async }} -{% else %} - bind {{ hostvars[inventory_hostname]['inventory_hostname'] }}:{{ haproxy_listen_port.replicas_async }} -{% endif %} - maxconn {{ haproxy_maxconn.replica }} - option tcplog - option httpchk OPTIONS /async - balance roundrobin - http-check expect status 200 - default-server inter 3s fastinter 1s fall 3 rise 2 on-marked-down shutdown-sessions -{% if pgbouncer_install|bool %} - {% for host in groups['postgres_cluster'] %} -server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ pgbouncer_listen_port }} check port 8008 - {% endfor %} -{% endif %} -{% if ( pgbouncer_install is not defined ) or ( not pgbouncer_install|bool ) %} - {% for host in groups['postgres_cluster'] %} -server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['inventory_hostname'] }}:{{ postgresql_port }} check port 8008 - {% endfor %} -{% endif %} - diff --git a/roles/keepalived/handlers/main.yml b/roles/keepalived/handlers/main.yml deleted file mode 100644 index 50c883784..000000000 --- a/roles/keepalived/handlers/main.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- - -- name: Restart keepalived service - systemd: - daemon_reload: true - name: keepalived - enabled: true - state: restarted - listen: "restart keepalived" - -- name: Wait for the cluster ip address (VIP) "{{ cluster_vip }}" is running - wait_for: - host: "{{ cluster_vip }}" - port: "{{ ansible_ssh_port | default(22) }}" - state: started - timeout: 60 - delay: 2 - listen: "restart keepalived" - -... diff --git a/roles/keepalived/templates/keepalived.conf.j2 b/roles/keepalived/templates/keepalived.conf.j2 deleted file mode 100644 index 4deff2c89..000000000 --- a/roles/keepalived/templates/keepalived.conf.j2 +++ /dev/null @@ -1,29 +0,0 @@ -global_defs { - router_id ocp_vrrp - enable_script_security - script_user root -} - -vrrp_script haproxy_check { - script "/usr/libexec/keepalived/haproxy_check.sh" - interval 2 - weight 2 -} - -vrrp_instance VI_1 { - interface {{ vip_interface }} - virtual_router_id {{ cluster_vip.split('.')[3] | int }} - priority 100 - advert_int 2 - state BACKUP - virtual_ipaddress { - {{ cluster_vip }} - } - track_script { - haproxy_check - } - authentication { - auth_type PASS - auth_pass 1ce24b6e - } -} diff --git a/roles/netdata/tasks/main.yml b/roles/netdata/tasks/main.yml deleted file mode 100644 index 4c9cd6db3..000000000 --- a/roles/netdata/tasks/main.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -# yamllint disable rule:line-length - -- block: - - - name: Download the installation script "kickstart.sh" - get_url: - url: https://my-netdata.io/kickstart.sh - dest: /tmp/kickstart.sh - mode: +x - - - name: Enable PowerTools repository - command: dnf config-manager --set-enabled "[Pp]ower[Tt]ools" - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version is version('8', '>=') - - - name: Install Netdata - command: /tmp/kickstart.sh {{ netdata_install_options | default('--dont-wait') }} - - - name: Configure Netdata - template: - src: templates/netdata.conf.j2 - dest: /etc/netdata/netdata.conf - owner: root - group: root - mode: u=wrx,g=rx,o=r,+x - - - name: Restart Netdata - service: - name: netdata - state: restarted - - environment: "{{ proxy_env | default({}) }}" - tags: netdata - -... diff --git a/roles/netdata/templates/netdata.conf.j2 b/roles/netdata/templates/netdata.conf.j2 deleted file mode 100644 index a7c41cec0..000000000 --- a/roles/netdata/templates/netdata.conf.j2 +++ /dev/null @@ -1,30 +0,0 @@ -# netdata configuration -# -# You can download the latest version of this file, using: -# -# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf -# or -# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf -# -# You can uncomment and change any of the options below. -# The value shown in the commented settings, is the default value. -# - -[global] - run as user = netdata - - memory mode = {{ netdata_conf.memory_mode | default('dbengine') }} - page cache size = {{ netdata_conf.page_cache_size | default('64') }} - dbengine disk space = {{ netdata_conf.dbengine_disk_space | default('512') }} - dbengine multihost disk space = {{ netdata_conf.dbengine_disk_space | default('512') }} - - # some defaults to run netdata with least priority - process scheduling policy = idle - OOM score = 1000 - -[web] - web files owner = netdata - web files group = netdata - - bind to = {{ netdata_conf.web_bind_to | default('localhost') }} - diff --git a/roles/packages/tasks/main.yml b/roles/packages/tasks/main.yml deleted file mode 100644 index ce8f37409..000000000 --- a/roles/packages/tasks/main.yml +++ /dev/null @@ -1,128 +0,0 @@ ---- - -# Install packages from files -- block: - - name: Copy packages into /tmp - copy: - src: "{{ item }}" - dest: /tmp/ - loop: "{{ packages_from_file }}" - register: copy_packages_result - - - name: Install packages - apt: - force_apt_get: true - deb: "/tmp/{{ item }}" - state: present - loop: "{{ packages_from_file | map('basename') | list }}" - when: ansible_os_family == "Debian" and copy_packages_result.changed - - - name: Install packages - package: - name: "/tmp/{{ item }}" - state: present - loop: "{{ packages_from_file | map('basename') | list }}" - when: ansible_os_family == "RedHat" and copy_packages_result.changed - when: packages_from_file is defined and packages_from_file | length > 0 - tags: install_packages_from_file - -- block: # RedHat (update cache) - - name: Clean yum cache - command: yum clean all - args: - warn: false - when: ansible_distribution_major_version == '7' - - - name: Clean dnf cache - command: dnf clean all - args: - warn: false - when: ansible_distribution_major_version is version('8', '>=') - environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "RedHat" and installation_method == "repo" - tags: install_packages, install_postgres - -# Install packages from repository -# RedHat -- name: Install system packages - package: - name: "{{ item }}" - state: latest - loop: "{{ system_packages }}" - environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "RedHat" and installation_method == "repo" - tags: install_packages - -# Debian -- name: Install system packages - apt: - name: "{{ item }}" - state: latest - loop: "{{ system_packages }}" - environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "Debian" and installation_method == "repo" - tags: install_packages - -# PostgreSQL prepare for install (for Debian based only) -- block: - - name: PostgreSQL | ensure postgresql database-cluster manager package - package: - name: postgresql-common - state: present - environment: "{{ proxy_env | default({}) }}" - - - name: PostgreSQL | disable initializing of a default postgresql cluster - replace: - path: /etc/postgresql-common/createcluster.conf - replace: create_main_cluster = false - regexp: ^#?create_main_cluster.*$ - - - name: PostgreSQL | disable log rotation with logrotate for postgresql - file: - dest: /etc/logrotate.d/postgresql-common - state: absent - when: installation_method == "repo" and ansible_os_family == "Debian" - tags: install_postgres - -# PostgreSQL prepare for install (for RHEL 8) -- block: - - name: PostgreSQL | check if appstream module is enabled - command: 'dnf -y -C module list postgresql' - register: postgresql_module_result - changed_when: false - args: - warn: false - - - name: PostgreSQL | disable appstream module - command: 'dnf -y -C module disable postgresql' - args: - warn: false - when: "'[x] ' not in postgresql_module_result.stdout" - when: installation_method == "repo" and - ansible_os_family == "RedHat" and - ansible_distribution_major_version >= '8' - ignore_errors: true - tags: install_postgres - -# Install PostgreSQL from repository -# RedHat -- name: Install PostgreSQL packages - package: - name: "{{ item }}" - state: present - loop: "{{ postgresql_packages }}" - environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "RedHat" and installation_method == "repo" - tags: install_postgres - -# Debian -- name: Install PostgreSQL packages - apt: - name: "{{ item }}" - state: present - loop: "{{ postgresql_packages }}" - environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "Debian" and installation_method == "repo" - tags: install_postgres - -... diff --git a/roles/patroni/tasks/custom_wal_dir.yml b/roles/patroni/tasks/custom_wal_dir.yml deleted file mode 100644 index 0ee66a82c..000000000 --- a/roles/patroni/tasks/custom_wal_dir.yml +++ /dev/null @@ -1,91 +0,0 @@ ---- -# yamllint disable rule:line-length -# yamllint disable rule:comments-indentation - -- name: "Make sure {{ postgresql_data_dir }}/{{ 'pg_wal' if postgresql_version is version('10', '>=') else 'pg_xlog' }} is symlink" # noqa 204 - stat: - path: "{{ postgresql_data_dir }}/{{ 'pg_wal' if postgresql_version is version('10', '>=') else 'pg_xlog' }}" - register: sym - -- block: # synchronize WAL`s if wal dir exist (and is not symlink) - - name: Make sure rsync is installed (for synchronize wal dir) - package: - name: - - rsync - - sshpass - state: present - environment: "{{ proxy_env | default({}) }}" - - - name: Stop patroni service (for create symlink) - systemd: - name: patroni - state: stopped - register: patroni_stop_result - - - name: Make sure PostgreSQL is stopped - become: true - become_user: postgres - command: "{{ postgresql_bin_dir }}/pg_ctl status -D {{ postgresql_data_dir }}" - register: stop_result - changed_when: false - failed_when: false - until: stop_result.rc == 3 - retries: 100 - delay: 6 - - - name: "Synchronize {{ postgresql_data_dir }}/{{ 'pg_wal' if postgresql_version is version('10', '>=') else 'pg_xlog' }} to {{ postgresql_wal_dir }}" # noqa 204 - become: true - become_user: postgres - synchronize: - src: "{{ postgresql_data_dir }}/{{ 'pg_wal' if postgresql_version is version('10', '>=') else 'pg_xlog' }}/" - dest: "{{ postgresql_wal_dir }}/" - delegate_to: "{{ inventory_hostname }}" - - - name: "Rename {{ 'pg_wal' if postgresql_version is version('10', '>=') else 'pg_xlog' }} to {{ 'pg_wal' if postgresql_version is version('10', '>=') else 'pg_xlog' }}_old" # noqa 204 - command: mv {{ postgresql_data_dir }}/{{ 'pg_wal' if postgresql_version is version('10', '>=') else 'pg_xlog' }} {{ postgresql_data_dir }}/{{ 'pg_wal' if postgresql_version is version('10', '>=') else 'pg_xlog' }}_old # noqa 204 - register: mv_result - when: sym.stat.exists and not sym.stat.islnk|bool - -- name: "Create symlink {{ postgresql_data_dir }}/{{ 'pg_wal' if postgresql_version is version('10', '>=') else 'pg_xlog' }} -> {{ postgresql_wal_dir }}" # noqa 204 - become: true - become_user: postgres - file: - src: "{{ postgresql_wal_dir }}" - dest: "{{ postgresql_data_dir }}/{{ 'pg_wal' if postgresql_version is version('10', '>=') else 'pg_xlog' }}" - state: link - when: sym.stat.islnk is not defined or not sym.stat.islnk|bool - -- block: # start patroni - - name: Start patroni service - systemd: - name: patroni - state: started - - - name: Wait for port 8008 to become open on the host - wait_for: - port: 8008 - host: "{{ hostvars[inventory_hostname]['inventory_hostname'] }}" - state: started - timeout: 120 - delay: 10 - ignore_errors: false - - - name: Check that the patroni is healthy - uri: - url: "http://{{ hostvars[inventory_hostname]['inventory_hostname'] }}:8008/health" - status_code: 200 - register: replica_result - until: replica_result.status == 200 - retries: 120 - delay: 10 - when: patroni_stop_result is changed - -- name: "Remove {{ 'pg_wal' if postgresql_version is version('10', '>=') else 'pg_xlog' }}_old directory" - file: - path: "{{ postgresql_data_dir }}/{{ 'pg_wal' if postgresql_version is version('10', '>=') else 'pg_xlog' }}_old" - state: absent - when: - - replica_result.status is defined - - replica_result.status == 200 - -... diff --git a/roles/pgbackrest/tasks/main.yml b/roles/pgbackrest/tasks/main.yml deleted file mode 100644 index c04c614c2..000000000 --- a/roles/pgbackrest/tasks/main.yml +++ /dev/null @@ -1,124 +0,0 @@ ---- -# yamllint disable rule:line-length - -- block: # Debian pgdg repo - - name: Make sure the gnupg and apt-transport-https packages are present - apt: - pkg: - - gnupg - - apt-transport-https - state: present - - - name: Make sure pgdg apt key is installed - apt_key: - id: ACCC4CF8 - url: https://apt.postgresql.org/pub/repos/apt/ACCC4CF8.asc - - - name: Make sure pgdg repository is installed - apt_repository: - repo: "deb https://apt.postgresql.org/pub/repos/apt/ {{ ansible_distribution_release }}-pgdg main" - - - name: Update apt cache - apt: - update_cache: true - environment: "{{ proxy_env | default({}) }}" - when: - - installation_method == "repo" - - ansible_os_family == "Debian" - - pgbackrest_install_from_pgdg_repo|bool - tags: pgbackrest, pgbackrest_repo, pgbackrest_install - -- block: # RedHat pgdg repo - - name: Get pgdg-redhat-repo-latest.noarch.rpm - get_url: - url: "https://download.postgresql.org/pub/repos/yum/reporpms/EL-%7B%7B%20ansible_distribution_major_version%20%7D%7D-x86_64/pgdg-redhat-repo-latest.noarch.rpm" # noqa 204 - dest: /tmp/ - timeout: 30 - validate_certs: false - - - name: Make sure pgdg repository is installed - package: - name: /tmp/pgdg-redhat-repo-latest.noarch.rpm - state: present - - - name: Clean yum cache - command: yum clean all - args: - warn: false - when: ansible_distribution_major_version == '7' - - - name: Clean dnf cache - command: dnf clean all - args: - warn: false - when: ansible_distribution_major_version is version('8', '>=') - environment: "{{ proxy_env | default({}) }}" - when: - - installation_method == "repo" - - ansible_os_family == "RedHat" - - pgbackrest_install_from_pgdg_repo|bool - tags: pgbackrest, pgbackrest_repo, pgbackrest_install - -# (workaround for CentOS 8.0/8.1) -# install libzstd RPM from an archived EPEL 8.1 release -# The problem will be solved when CentOS 8.2 will be released. -- block: - - name: Get libzstd rpm package from archived EPEL - get_url: - url: https://dl.fedoraproject.org/pub/archive/epel/8.1/Everything/x86_64/Packages/l/libzstd-1.4.4-1.el8.x86_64.rpm - dest: /tmp/ - timeout: 120 - validate_certs: false - register: get_libzstd_result - - - name: Install libzstd - package: - name: /tmp/libzstd-1.4.4-1.el8.x86_64.rpm - state: present - when: get_libzstd_result is changed - environment: "{{ proxy_env | default({}) }}" - when: - - ansible_distribution == "CentOS" - - ansible_distribution_major_version == '8' - - ansible_distribution_version is version('8.1', '<=') - tags: pgbackrest, pgbackrest_install - -- name: Install pgbackrest - package: - name: pgbackrest - state: latest - environment: "{{ proxy_env | default({}) }}" - tags: pgbackrest, pgbackrest_install - -- block: - - name: Ensure config directory exist - file: - path: "{{ pgbackrest_conf_file | dirname }}" - state: directory - - - name: "Generate conf file {{ pgbackrest_conf_file }}" - template: - src: pgbackrest.conf.j2 - dest: "{{ pgbackrest_conf_file }}" - owner: postgres - group: postgres - mode: 0644 - when: "'postgres_cluster' in group_names" - tags: pgbackrest, pgbackrest_conf - -# if pgbackrest_repo_type: "posix" and pgbackrest_repo_host is set -- import_tasks: ssh_keys.yml - when: - - pgbackrest_repo_type|lower != "s3" - - pgbackrest_repo_host is defined - - pgbackrest_repo_host | length > 0 - tags: pgbackrest, pgbackrest_ssh_keys - -# - import_tasks: bootstrap_script.yml -# when: -# - patroni_cluster_bootstrap_method is defined -# - patroni_cluster_bootstrap_method == "pgbackrest" -# - "'postgres_cluster' in group_names" -# tags: pgbackrest, pgbackrest_bootstrap_script - -... diff --git a/roles/pgbackrest/templates/pgbackrest.conf.j2 b/roles/pgbackrest/templates/pgbackrest.conf.j2 deleted file mode 100644 index bba6c6180..000000000 --- a/roles/pgbackrest/templates/pgbackrest.conf.j2 +++ /dev/null @@ -1,10 +0,0 @@ -[global] -{% for global in pgbackrest_conf.global %} -{{ global.option }}={{ global.value }} -{% endfor %} - -[{{ pgbackrest_stanza }}] -{% for stanza in pgbackrest_conf.stanza %} -{{ stanza.option }}={{ stanza.value }} -{% endfor %} - diff --git a/roles/pgbouncer/handlers/main.yml b/roles/pgbouncer/handlers/main.yml deleted file mode 100644 index eca6b89aa..000000000 --- a/roles/pgbouncer/handlers/main.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- - -- name: Restart pgbouncer service - systemd: - daemon_reload: true - name: pgbouncer - enabled: true - state: restarted - listen: "restart pgbouncer" - -- name: Wait for port "{{ pgbouncer_listen_port }}" to become open on the host - wait_for: - port: "{{ pgbouncer_listen_port }}" - host: "{{ hostvars[inventory_hostname]['inventory_hostname'] }}" - state: started - timeout: 120 - delay: 5 - ignore_errors: false - listen: "restart pgbouncer" - -... diff --git a/roles/pgbouncer/tasks/main.yml b/roles/pgbouncer/tasks/main.yml deleted file mode 100644 index 805b092ef..000000000 --- a/roles/pgbouncer/tasks/main.yml +++ /dev/null @@ -1,158 +0,0 @@ ---- -# yamllint disable rule:line-length - -- name: Install pgbouncer package - package: - name: pgbouncer - environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "Debian" or - (ansible_os_family == "RedHat" and - ansible_distribution_major_version == '7') - tags: pgbouncer_install, pgbouncer - -# RHEL 8 -- name: Install pgbouncer package - dnf: - name: pgbouncer - disablerepo: AppStream - environment: "{{ proxy_env | default({}) }}" - when: ansible_os_family == "RedHat" and - ansible_distribution_major_version >= '8' - tags: pgbouncer_install, pgbouncer - -- name: Ensure config directory "{{ pgbouncer_conf_dir }}" exist - file: - path: "{{ pgbouncer_conf_dir }}" - state: directory - owner: postgres - group: postgres - mode: 0750 - tags: pgbouncer_conf, pgbouncer - -- name: Stop and disable standard init script - service: - name: pgbouncer - state: stopped - enabled: false - when: ansible_os_family == "Debian" - tags: pgbouncer_service, pgbouncer - -- name: Copy systemd service file - template: - src: templates/pgbouncer.service.j2 - dest: /etc/systemd/system/pgbouncer.service - owner: postgres - group: postgres - mode: 0644 - notify: "restart pgbouncer" - tags: pgbouncer_service, pgbouncer - -- block: # workaround for pgbouncer from postgrespro repo - - name: Check that /usr/bin/pgbouncer is exists - stat: - path: /usr/bin/pgbouncer - register: pgbouncer_bin - - - name: create a symlink to /usr/sbin/pgbouncer - file: - src: /usr/sbin/pgbouncer - dest: /usr/bin/pgbouncer - owner: root - group: root - state: link - when: not pgbouncer_bin.stat.exists - when: ansible_os_family == "RedHat" and - postgresql_packages|join(" ") is search("postgrespro") - tags: pgbouncer_service, pgbouncer - -- name: Enable log rotation with logrotate - copy: - content: | - /var/log/pgbouncer/pgbouncer.log { - daily - rotate 7 - copytruncate - delaycompress - compress - notifempty - missingok - su root root - } - dest: /etc/logrotate.d/pgbouncer - tags: pgbouncer_logrotate, pgbouncer - -- name: Configure pgbouncer.ini - template: - src: templates/pgbouncer.ini.j2 - dest: "{{ pgbouncer_conf_dir }}/pgbouncer.ini" - owner: postgres - group: postgres - mode: 0640 - notify: "restart pgbouncer" - when: existing_pgcluster is not defined or not existing_pgcluster|bool - tags: pgbouncer_conf, pgbouncer - -- name: Create userlist.txt - template: - src: templates/userlist.txt.j2 - dest: "{{ pgbouncer_conf_dir }}/userlist.txt" - owner: postgres - group: postgres - mode: 0640 - when: existing_pgcluster is not defined or not existing_pgcluster|bool - tags: pgbouncer - -- block: # for add_pgnode.yml - - name: Fetch pgbouncer.ini and userlist.txt conf files from master - run_once: true - fetch: - src: "{{ item }}" - dest: files/ - validate_checksum: true - flat: true - loop: - - /etc/pgbouncer/pgbouncer.ini - - /etc/pgbouncer/userlist.txt - delegate_to: "{{ groups.master[0] }}" - - - name: Copy pgbouncer.ini and userlist.txt conf files to replica - copy: - src: "files/{{ item }}" - dest: /etc/pgbouncer/ - owner: postgres - group: postgres - mode: 0640 - loop: - - pgbouncer.ini - - userlist.txt - - - name: Prepare pgbouncer.ini conf file (replace "listen_addr") - lineinfile: - path: /etc/pgbouncer/pgbouncer.ini - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - backrefs: true - loop: - - {regexp: '^listen_addr =', line: 'listen_addr = {{ hostvars[inventory_hostname].inventory_hostname }}'} - loop_control: - label: "{{ item.line }}" - notify: "restart pgbouncer" - when: with_haproxy_load_balancing|bool or - (cluster_vip is not defined or cluster_vip | length < 1) - - - name: Prepare pgbouncer.ini conf file (replace "listen_addr") - lineinfile: - path: /etc/pgbouncer/pgbouncer.ini - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - backrefs: true - loop: - - {regexp: '^listen_addr =', line: 'listen_addr = {{ hostvars[inventory_hostname].inventory_hostname }},{{ cluster_vip }}'} - loop_control: - label: "{{ item.line }}" - notify: "restart pgbouncer" - when: not with_haproxy_load_balancing|bool and (cluster_vip is defined and cluster_vip | length > 0 ) - when: existing_pgcluster is defined and existing_pgcluster|bool - tags: pgbouncer_conf, pgbouncer - -... diff --git a/roles/pgbouncer/templates/pgbouncer.ini.j2 b/roles/pgbouncer/templates/pgbouncer.ini.j2 deleted file mode 100644 index dd807c8ad..000000000 --- a/roles/pgbouncer/templates/pgbouncer.ini.j2 +++ /dev/null @@ -1,36 +0,0 @@ -[databases] -{% for pool in pgbouncer_pools %} -{{ pool.name }} = host=127.0.0.1 port={{ postgresql_port }} dbname={{ pool.dbname }} {{ pool.pool_parameters }} -{% endfor %} - -* = host=127.0.0.1 port={{ postgresql_port }} - -[pgbouncer] -logfile = {{ pgbouncer_log_dir }}/pgbouncer.log -pidfile = /var/run/pgbouncer/pgbouncer.pid -{% if not with_haproxy_load_balancing|bool and cluster_vip is defined and cluster_vip | length > 0 %} -listen_addr = {{ hostvars[inventory_hostname]['inventory_hostname'] }},{{ cluster_vip }} -{% else %} -listen_addr = {{ hostvars[inventory_hostname]['inventory_hostname'] }} -{% endif %} -listen_port = {{ pgbouncer_listen_port | default(6432) }} -unix_socket_dir = /var/run/postgresql -auth_type = md5 -auth_file = /etc/pgbouncer/userlist.txt -admin_users = postgres -ignore_startup_parameters = extra_float_digits,geqo - -pool_mode = {{ pgbouncer_default_pool_mode }} -server_reset_query = DISCARD ALL -max_client_conn = {{ pgbouncer_max_client_conn }} -default_pool_size = {{ pgbouncer_default_pool_size }} -reserve_pool_size = 1 -reserve_pool_timeout = 1 -max_db_connections = {{ pgbouncer_max_db_connections }} -pkt_buf = 8192 -listen_backlog = 4096 - -log_connections = 0 -log_disconnections = 0 - -# Documentation https://pgbouncer.github.io/config.html diff --git a/roles/pgbouncer/userlist/handlers/main.yml b/roles/pgbouncer/userlist/handlers/main.yml deleted file mode 100644 index 88e2d185d..000000000 --- a/roles/pgbouncer/userlist/handlers/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -- name: Reload pgbouncer service - systemd: - name: pgbouncer - state: reloaded - listen: "reload pgbouncer" - ignore_errors: true # Added to prevent test failures in CI. - -... diff --git a/roles/pgbouncer/userlist/tasks/main.yml b/roles/pgbouncer/userlist/tasks/main.yml deleted file mode 100644 index e60e3fba8..000000000 --- a/roles/pgbouncer/userlist/tasks/main.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -# yamllint disable rule:line-length - -- name: Get users and password md5 from pg_shadow - run_once: true - become: true - become_user: postgres - command: > - {{ postgresql_bin_dir }}/psql -p {{ postgresql_port }} -U postgres -Atq - -c "SELECT concat('\"', usename, '\" \"', passwd, '\"') FROM pg_shadow where usename != '{{ patroni_replication_username }}'" - register: pg_shadow_result - changed_when: false - delegate_to: "{{ groups.master[0] }}" - when: pgbouncer_generate_userlist|bool - tags: pgbouncer, pgbouncer_generate_userlist - -- name: Generate /etc/pgbouncer/userlist.txt - become: true - become_user: postgres - copy: - content: | - {{ pg_shadow_result.stdout }} - - dest: /etc/pgbouncer/userlist.txt - notify: "reload pgbouncer" - when: - - pg_shadow_result.rc == 0 - - pg_shadow_result.stdout is defined - - pg_shadow_result.stdout | length > 0 - - pgbouncer_generate_userlist|bool - tags: pgbouncer, pgbouncer_generate_userlist - -... diff --git a/roles/postgresql-extensions/tasks/main.yml b/roles/postgresql-extensions/tasks/main.yml deleted file mode 100644 index c8acd257b..000000000 --- a/roles/postgresql-extensions/tasks/main.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# yamllint disable rule:comments-indentation - -- name: Add extensions to the databases - become: true - become_user: postgres - postgresql_ext: - db: "{{ item.db }}" - name: "{{ item.ext }}" - login_unix_socket: "{{ postgresql_unix_socket_dir }}" # added in 2.8 - port: "{{ postgresql_port }}" - state: present - ignore_errors: true - loop: "{{ postgresql_extensions | flatten(1) }}" - when: - - ansible_version.full is version('2.8.0', '>=') - - postgresql_extensions is defined - - postgresql_extensions | length > 0 - tags: postgresql_extensions - - -# if Ansible version 2.7 (for compatibility) -- name: Add extensions to the databases - become: true - become_user: postgres - postgresql_ext: - db: "{{ item.db }}" - name: "{{ item.ext }}" - login_host: "127.0.0.1" - login_port: "{{ postgresql_port }}" - login_user: "{{ patroni_superuser_username }}" - login_password: "{{ patroni_superuser_password }}" - state: present - ignore_errors: true - loop: "{{ postgresql_extensions | flatten(1) }}" - when: - - ansible_version.full is version('2.8.0', '<') - - postgresql_extensions is defined - - postgresql_extensions | length > 0 - tags: postgresql_extensions - -... diff --git a/roles/postgresql-users/tasks/main.yml b/roles/postgresql-users/tasks/main.yml deleted file mode 100644 index fa9d3bb44..000000000 --- a/roles/postgresql-users/tasks/main.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- - -- name: Make sure the PostgreSQL users are present - become: true - become_user: postgres - postgresql_user: - name: "{{ item.name }}" - password: "{{ item.password }}" - encrypted: true - login_unix_socket: "{{ postgresql_unix_socket_dir }}" - port: "{{ postgresql_port }}" - state: present - ignore_errors: true - loop: "{{ postgresql_users | flatten(1) }}" - loop_control: - label: "{{ item.name }}" - when: postgresql_users is defined and postgresql_users | length > 0 - tags: postgresql_users - -... diff --git a/roles/timezone/tasks/main.yml b/roles/timezone/tasks/main.yml deleted file mode 100644 index fd70266d0..000000000 --- a/roles/timezone/tasks/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- name: Set timezone to "{{ timezone }}" - become: true - become_method: sudo - timezone: - name: "{{ timezone }}" - when: timezone is defined and timezone | length > 0 - tags: timezone - -... diff --git a/roles/vip-manager/templates/vip-manager.yml.j2 b/roles/vip-manager/templates/vip-manager.yml.j2 deleted file mode 100644 index be4381e23..000000000 --- a/roles/vip-manager/templates/vip-manager.yml.j2 +++ /dev/null @@ -1,53 +0,0 @@ -# config for vip-manager by Cybertec Schönig & Schönig GmbH - -# time (in milliseconds) after which vip-manager wakes up and checks if it needs to register or release ip addresses. -interval: {{ vip_manager_interval }} - -# the etcd or consul key which vip-manager will regularly poll. -trigger-key: "/service/{{ patroni_cluster_name }}/leader" -# if the value of the above key matches the trigger-value (often the hostname of this host), vip-manager will try to add the virtual ip address to the interface specified in Iface -trigger-value: "{{ ansible_hostname }}" - -ip: {{ vip_manager_ip }} # the virtual ip address to manage -netmask: {{ vip_manager_mask }} # netmask for the virtual ip -interface: {{ vip_manager_iface }} # interface to which the virtual ip will be added - -# how the virtual ip should be managed. we currently support "ip addr add/remove" through shell commands or the Hetzner api -hosting-type: basic # possible values: basic, or hetzner. - -dcs-type: {{ dcs_type }} # etcd or consul -# a list that contains all DCS endpoints to which vip-manager could talk. -{% if not dcs_exists|bool and dcs_type == 'etcd' %} -dcs-endpoints: -{% for host in groups['etcd_cluster'] %} - - http://{{ hostvars[host]['inventory_hostname'] }}:2379 -{% endfor %} -{% endif %} -{% if dcs_exists|bool and dcs_type == 'etcd' %} -dcs-endpoints: -{% for etcd_hosts in patroni_etcd_hosts %} - - http://{{ etcd_hosts.host }}:{{ etcd_hosts.port }} -{% endfor %} -{% endif %} - - # consul will always only use the first entry from this list. - # For consul, you'll obviously need to change the port to 8500. - -#etcd-user: "patroni" -#etcd-password: "Julian's secret password" - -# when etcd-ca-file is specified, TLS connections to the etcd endpoints will be used. -#etcd-ca-file: "/path/to/etcd/trusted/ca/file" -# when etcd-cert-file and etcd-key-file are specified, we will authenticate at the etcd endpoints using this certificate and key. -#etcd-cert-file: "/path/to/etcd/client/cert/file" -#etcd-key-file: "/path/to/etcd/client/key/file" - -# don't worry about parameter with a prefix that doesn't match the endpoint_type. You can write anything there, I won't even look at it. -#consul-token: "Julian's secret token" - -# how often things should be retried and how long to wait between retries. (currently only affects arpClient) -retry-num: 2 -retry-after: 250 #in milliseconds - -# verbose logs (currently only supported for hetzner) -verbose: false diff --git a/roles/wal-g/tasks/main.yml b/roles/wal-g/tasks/main.yml deleted file mode 100644 index b0b03b667..000000000 --- a/roles/wal-g/tasks/main.yml +++ /dev/null @@ -1,59 +0,0 @@ ---- - -- block: # install wal-g package from repo - - name: Download "wal-g" binary - get_url: - url: "{{ item }}" - dest: /tmp/ - timeout: 60 - validate_certs: false - loop: - - "{{ wal_g_package_repo }}" - environment: "{{ proxy_env | default({}) }}" - - - name: Extract "wal-g" into /tmp - unarchive: - src: "/tmp/{{ wal_g_package_repo | basename }}" - dest: /tmp/ - extra_opts: - - --no-same-owner - remote_src: true - - - name: copy "wal-g" binary files to /usr/local/bin/ - copy: - src: "/tmp/wal-g" - dest: /usr/local/bin/ - mode: u+x,g+x,o+x - remote_src: true - - when: installation_method == "repo" and wal_g_package_repo | length > 0 - tags: wal-g, wal_g, wal_g_install - -- block: # install wal-g package from file - - name: Extract "wal-g" into /tmp - unarchive: - src: "{{ wal_g_package_file }}" - dest: /tmp/ - extra_opts: - - --no-same-owner - - - name: Copy "wal-g" binary files to /usr/local/bin/ - copy: - src: "/tmp/wal-g" - dest: /usr/local/bin/ - mode: u+x,g+x,o+x - remote_src: true - - when: installation_method == "file" and wal_g_package_file | length > 0 - tags: wal-g, wal_g, wal_g_install - -- name: "Generate conf file {{ postgresql_home_dir }}/.walg.json" - template: - src: templates/walg.json.j2 - dest: "{{ postgresql_home_dir }}/.walg.json" - owner: postgres - group: postgres - mode: 0644 - tags: wal-g, wal_g, wal_g_conf - -... diff --git a/vars/Debian.yml b/vars/Debian.yml deleted file mode 100644 index 3f53178d5..000000000 --- a/vars/Debian.yml +++ /dev/null @@ -1,190 +0,0 @@ ---- -# yamllint disable rule:line-length - -# PostgreSQL variables -postgresql_cluster_name: "main" -postgresql_data_dir: "/var/lib/postgresql/{{ postgresql_version }}/{{ postgresql_cluster_name }}" # You can specify custom data dir path -postgresql_wal_dir: "" # custom WAL dir path (symlink will be created) [optional] -postgresql_conf_dir: "/etc/postgresql/{{ postgresql_version }}/{{ postgresql_cluster_name }}" -postgresql_bin_dir: "/usr/lib/postgresql/{{ postgresql_version }}/bin" -postgresql_log_dir: "/var/log/postgresql" -postgresql_unix_socket_dir: "/var/run/postgresql" -postgresql_home_dir: "/var/lib/postgresql" - -# stats_temp_directory (mount the statistics directory in tmpfs) -postgresql_stats_temp_directory_path: "/var/lib/pgsql_stats_tmp" # or 'none' -postgresql_stats_temp_directory_size: "1024m" - -postgresql_version_terse: "{{ postgresql_version | replace('.', '') }}" - -# Repository -apt_repository_keys: - - key: "https://www.postgresql.org/media/keys/ACCC4CF8.asc" # postgresql repository apt key -apt_repository: - - repo: "deb https://apt.postgresql.org/pub/repos/apt/ {{ ansible_distribution_release }}-pgdg main" # postgresql apt repository -# - repo: "deb https://deb.debian.org/debian/ {{ ansible_distribution_release }} main" # debian repo (optional) -# - repo: "deb https://deb.debian.org/debian/ {{ ansible_distribution_release }}-updates main" # debian repo (optional) -# - repo: "deb https://security.debian.org/debian-security/ {{ ansible_distribution_release }}/updates main" # debian repo (optional) - -# Packages (for apt repo) -os_specific_packages: - Debian-9: [python, python-dev, python-psycopg2, python-setuptools] - Debian-10: [python, python-dev, python-psycopg2, python-setuptools] - Debian-11: [python3] # python 2 is not required - Ubuntu-18: [python, python-dev, python-psycopg2, python-setuptools] - Ubuntu-20: [python3] # python 2 is not required -system_packages: - - "{{ os_specific_packages[ansible_distribution ~ '-' ~ ansible_distribution_major_version] }}" - - python3 - - python3-dev - - python3-psycopg2 - - python3-setuptools - - python3-pip - - curl - - less - - sudo - - vim - - gcc - - jq - - iptables - - acl - -postgresql_packages: - - postgresql-{{ postgresql_version }} - - postgresql-client-{{ postgresql_version }} - - postgresql-server-dev-{{ postgresql_version }} - - postgresql-contrib-{{ postgresql_version }} -# - postgresql-{{ postgresql_version }}-repack" - -# Extra packages -etcd_package_repo: "https://github.com/etcd-io/etcd/releases/download/%7B%7B%20etcd_ver%20%7D%7D/etcd-%7B%7B%20etcd_ver%20%7D%7D-linux-amd64.tar.gz" -wal_g_package_repo: "https://github.com/wal-g/wal-g/releases/download/%7B%7B%20wal_g_ver%20%7D%7D/wal-g.linux-amd64.tar.gz" -vip_manager_package_repo: "https://github.com/cybertec-postgresql/vip-manager/releases/download/v%7B%7B%20vip_manager_version%20%7D%7D/vip-manager_%7B%7B%20vip_manager_version%20%7D%7D-1_amd64.deb" -# (if with_haproxy_load_balancing: true) -haproxy_installation_method: "deb" # (default)"deb" or "src" -haproxy_install_repo: true # or 'false' -# for Debian 8/9/10 the haproxy version 1.8 (LTS) will be installed from the haproxy.debian.net repository. -# for Ubuntu <=18.04 the haproxy version 1.8 (LTS) will be installed from the ppa:vbernat/haproxy-1.8 repository. -# you can preload the haproxy deb packages to your APT repository (in this case specify "haproxy_install_repo: false"). -confd_package_repo: "https://github.com/kelseyhightower/confd/releases/download/v0.16.0/confd-0.16.0-linux-amd64" - -# (optional) if haproxy_installation_method: 'src' -haproxy_major: "1.8" -haproxy_version: "1.8.25" # version to build from source -lua_src_repo: "https://www.lua.org/ftp/lua-5.3.5.tar.gz" # required for build haproxy -haproxy_src_repo: "https://www.haproxy.org/download/%7B%7B%20haproxy_major%20%7D%7D/src/haproxy-%7B%7B%20haproxy_version%20%7D%7D.tar.gz" -haproxy_compile_requirements: - - unzip - - gzip - - make - - gcc - - build-essential - - libc6-dev - - libpcre3-dev - - liblua5.3-dev - - libreadline-dev - - zlib1g-dev - - libsystemd-dev - - ca-certificates - - libssl-dev - -# You can also use your own repository for extra packages. You need to preload all the packages and change this URLs. -installation_method: "repo" # (default)"repo" or "file" - -# Patroni package will be installed from the pip (by default). -# You also have the option of choosing an patroni installation method using the deb package. -patroni_installation_method: "pip" # (default)"pip" or "deb" - -# (if patroni_installation_type: "pip") -# Packages from your repository will be used to install instead of the pip repository. -pip_package_repo: "https://bootstrap.pypa.io/get-pip.py" # latest version pip3 for python3 (or use "pip-.tar.gz"). -patroni_pip_requirements_repo: [] -# - "http://my-repo.url/setuptools-41.2.0.zip" -# - "http://my-repo.url/setuptools_scm-3.3.3.tar.gz" -# - "http://my-repo.url/urllib3-1.24.3.tar.gz" -# - "http://my-repo.url/boto-2.49.0.tar.gz" # (interfaces to Amazon Web Services) -# - "http://my-repo.url/PyYAML-5.1.2.tar.gz" -# - "http://my-repo.url/chardet-3.0.4.tar.gz" -# - "http://my-repo.url/idna-2.8.tar.gz" -# - "http://my-repo.url/certifi-2019.9.11.tar.gz" -# - "http://my-repo.url/requests-2.22.0.tar.gz" -# - "http://my-repo.url/six-1.12.0.tar.gz" -# - "http://my-repo.url/kazoo-2.6.1.tar.gz" -# - "http://my-repo.url/dnspython-1.16.0.zip" -# - "http://my-repo.url/python-etcd-0.4.5.tar.gz" -# - "http://my-repo.url/Click-7.0.tar.gz" -# - "http://my-repo.url/prettytable-0.7.2.tar.gz" -# - "http://my-repo.url/pytz-2019.2.tar.gz" -# - "http://my-repo.url/tzlocal-2.0.0.tar.gz" -# - "http://my-repo.url/wheel-0.33.6.tar.gz" -# - "http://my-repo.url/python-dateutil-2.8.0.tar.gz" -# - "http://my-repo.url/psutil-5.6.3.tar.gz" -# - "http://my-repo.url/cdiff-1.0.tar.gz" -patroni_pip_package_repo: [] -# - "http://my-repo.url/patroni-1.6.0.tar.gz" - -# ( if patroni_installation_type: "deb" ) -# You can preload the patroni deb package to your APT repository, or explicitly specify the path to the package in this variable: -patroni_deb_package_repo: [] -# - "https://apt.postgresql.org/pub/repos/apt/pool/main/p/patroni/patroni_1.6.5-2.pgdg100%2B1_all.deb" # (package for Debian 10) - - -# ================================================================================================= # -# (optional) if installation_method: "file" -# You can also download the necessary packages into postgresql_cluster/files/ directory. -# Packages from this directory will be used for installation. -pip_package_file: "pip-19.2.3.tar.gz" # https://pypi.org/project/pip/#files -patroni_pip_requirements_file: - - "setuptools-41.2.0.zip" # https://pypi.org/project/setuptools/#files - - "setuptools_scm-3.3.3.tar.gz" # https://pypi.org/project/setuptools-scm/#files - - "urllib3-1.24.3.tar.gz" # https://pypi.org/project/urllib3/1.24.3/#files - - "boto-2.49.0.tar.gz" # https://pypi.org/project/boto/#files # (interfaces to Amazon Web Services) - - "PyYAML-5.1.2.tar.gz" # https://pypi.org/project/PyYAML/#files - - "chardet-3.0.4.tar.gz" # https://pypi.org/project/chardet/#files # (required for "requests") - - "idna-2.8.tar.gz" # https://pypi.org/project/idna/#files # (required for "requests") - - "certifi-2019.9.11.tar.gz" # https://pypi.org/project/certifi/#files # (required for "requests") - - "requests-2.22.0.tar.gz" # https://pypi.org/project/requests/#files - - "six-1.12.0.tar.gz" # https://pypi.org/project/six/#files - - "kazoo-2.6.1.tar.gz" # https://pypi.org/project/kazoo/#files - - "dnspython-1.16.0.zip" # https://pypi.org/project/dnspython/#files # (required for "python-etcd") - - "python-etcd-0.4.5.tar.gz" # https://pypi.org/project/python-etcd/#files - - "Click-7.0.tar.gz" # https://pypi.org/project/click/#files - - "prettytable-0.7.2.tar.gz" # https://pypi.org/project/PrettyTable/#files - - "pytz-2019.2.tar.gz" # https://pypi.org/project/pytz/#files # (required for "tzlocal") - - "tzlocal-2.0.0.tar.gz" # https://pypi.org/project/tzlocal/#files - - "wheel-0.33.6.tar.gz" # https://pypi.org/project/wheel/#files - - "python-dateutil-2.8.0.tar.gz" # https://pypi.org/project/python-dateutil/#files - - "psutil-5.6.3.tar.gz" # https://pypi.org/project/psutil/#files - - "cdiff-1.0.tar.gz" # https://pypi.org/project/cdiff/#files -patroni_pip_package_file: - - "patroni-1.6.0.tar.gz" # https://pypi.org/project/patroni/#files - -# ( if installation_method: "file" and patroni_installation_type: "deb") -patroni_deb_package_file: "patroni_1.6.0-2.pgdg90+1_all.deb" # (package for Debian 9) https://apt.postgresql.org/pub/repos/apt/pool/main/p/patroni/ - -# additional packages -etcd_package_file: "etcd-v3.3.15-linux-amd64.tar.gz" # https://github.com/etcd-io/etcd/releases -wal_g_package_file: "wal-g.linux-amd64.tar.gz" # https://github.com/wal-g/wal-g/releases -vip_manager_package_file: "vip-manager_0.6-1_amd64.deb" # https://github.com/cybertec-postgresql/vip-manager/releases -# (if with_haproxy_load_balancing: true) -haproxy_package_file: [] -# - "haproxy_1.8.21-1~bpo9+1_amd64.deb" -confd_package_file: "confd-0.16.0-linux-amd64" # https://github.com/kelseyhightower/confd/releases -# (optional) if haproxy_installation_method: 'src' -lua_src_file: "lua-5.3.5.tar.gz" # https://www.lua.org/ftp/lua-5.3.5.tar.gz (required for build haproxy) -haproxy_src_file: "haproxy-1.8.20.tar.gz" # http://www.haproxy.org/download/1.8/src/ - -# ------------------------------------------------------------------------------------------------- # -# (optional) Specify additional deb packages if required (for any installation_method) -# this packages will be installed before all other packages. -packages_from_file: [] -# - "my-package-name_1_amd64.deb" -# - "my-package-name_2_amd64.deb" -# - "" - -# ---------------------------------------------------------------------------------------------------------------------------------- -# Attention! If you want to use the installation method "file". -# You need to independently determine all the necessary the dependencies of deb packages for your version of the Linux distribution. -# ---------------------------------------------------------------------------------------------------------------------------------- - -... diff --git a/vars/RedHat.yml b/vars/RedHat.yml deleted file mode 100644 index d20dce8d4..000000000 --- a/vars/RedHat.yml +++ /dev/null @@ -1,208 +0,0 @@ ---- -# yamllint disable rule:line-length - -# PostgreSQL variables -postgresql_data_dir: "/var/lib/pgsql/{{ postgresql_version }}/data" # You can specify custom data dir path -postgresql_wal_dir: "" # custom WAL dir path (symlink will be created) [optional] -postgresql_conf_dir: "{{ postgresql_data_dir }}" -postgresql_bin_dir: "/usr/pgsql-{{ postgresql_version }}/bin" -postgresql_log_dir: "/var/log/postgresql" -postgresql_unix_socket_dir: "/var/run/postgresql" -postgresql_home_dir: "/var/lib/pgsql" - -# stats_temp_directory (mount the statistics directory in tmpfs) -postgresql_stats_temp_directory_path: "/var/lib/pgsql_stats_tmp" # or 'none' -postgresql_stats_temp_directory_size: "1024m" - -postgresql_version_terse: "{{ postgresql_version | replace('.', '') }}" - -# Repository -yum_repository: [] -# - name: "repo name" -# description: "repo description" -# baseurl: "https://my-repo.url/" -# gpgkey: "https://my-repo-key.url/" -# gpgcheck: "yes" - -install_epel_repo: true # or 'false' (installed from the package "epel-release-latest.noarch.rpm") -install_postgresql_repo: true # or 'false' (installed from the package "pgdg-redhat-repo-latest.noarch.rpm") -install_scl_repo: true # or 'false' (Redhat 7 family only) - -# Packages (for yum repo) -os_specific_packages: - RedHat-7: - - python - - python-devel - - python-psycopg2 - - python-setuptools - - libselinux-python - - libsemanage-python - - policycoreutils-python - RedHat-8: - - python2 - - python3-libselinux - - python3-libsemanage - - python3-policycoreutils -system_packages: - - "{{ os_specific_packages[ansible_os_family ~ '-' ~ ansible_distribution_major_version] }}" - - python3 - - python3-devel - - python3-psycopg2 - - python3-setuptools - - python3-pip - - curl - - less - - sudo - - vim - - gcc - - jq - - iptables - - acl - -# The glibc-langpack package includes the basic information required to support the language in your applications. -# for RHEL version 8 (only) -glibc_langpack: - - "glibc-langpack-en" - - "glibc-langpack-ru" # optional -# - "glibc-langpack-de" - -postgresql_packages: - - postgresql{{ postgresql_version_terse }} - - postgresql{{ postgresql_version_terse }}-server - - postgresql{{ postgresql_version_terse }}-contrib - - postgresql{{ postgresql_version_terse }}-devel -# - pg_repack{{ postgresql_version_terse }} - -# Extra packages -etcd_package_repo: "https://github.com/etcd-io/etcd/releases/download/%7B%7B%20etcd_ver%20%7D%7D/etcd-%7B%7B%20etcd_ver%20%7D%7D-linux-amd64.tar.gz" -wal_g_package_repo: "https://github.com/wal-g/wal-g/releases/download/%7B%7B%20wal_g_ver%20%7D%7D/wal-g.linux-amd64.tar.gz" -vip_manager_package_repo: "https://github.com/cybertec-postgresql/vip-manager/releases/download/v%7B%7B%20vip_manager_version%20%7D%7D/vip-manager_%7B%7B%20vip_manager_version%20%7D%7D-1_amd64.rpm" -# (if with_haproxy_load_balancing: true) -haproxy_installation_method: "rpm" # (default)"rpm" or "src" -haproxy_install_repo: true # or 'false' # only for RedHat/CentOS version 7. -# for RedHat/CentOS/OracleLinux 7 the haproxy version 1.8 (LTS) will be installed from the "rh-haproxy18" package from the Software Collections (SCL) repository. -# you can preload the haproxy rpm packages to your YUM repository (in this case specify "haproxy_install_repo: false"). -confd_package_repo: "https://github.com/kelseyhightower/confd/releases/download/v0.16.0/confd-0.16.0-linux-amd64" - -# (optional) if haproxy_installation_method: 'src' -haproxy_major: "1.8" -haproxy_version: "1.8.25" # version to build from source -lua_src_repo: "https://www.lua.org/ftp/lua-5.3.5.tar.gz" # required for build haproxy -haproxy_src_repo: "https://www.haproxy.org/download/%7B%7B%20haproxy_major%20%7D%7D/src/haproxy-%7B%7B%20haproxy_version%20%7D%7D.tar.gz" -haproxy_compile_requirements: - - unzip - - gzip - - make - - gcc - - gcc-c++ - - pcre-devel - - zlib-devel - - readline-devel - - openssl - - openssl-devel - - openssl-libs - - systemd-devel - -# You can also use your own repository for extra packages. You need to preload all the packages and change this URLs. -installation_method: "repo" # (default)"repo" or "file" - -# Patroni package will be installed from the pip repository (by default). -# You also have the option of choosing an patroni installation method using the rpm package. -patroni_installation_method: "pip" # (default)"pip" or "rpm" - -# (if patroni_installation_type: "pip") -# Packages from your repository will be used to install instead of the pip repository. -pip_package_repo: "https://bootstrap.pypa.io/get-pip.py" # latest version pip3 for python3 (or use "pip-.tar.gz"). -patroni_pip_requirements_repo: [] -# - "http://my-repo.url/setuptools-41.2.0.zip" -# - "http://my-repo.url/setuptools_scm-3.3.3.tar.gz" -# - "http://my-repo.url/urllib3-1.24.3.tar.gz" -# - "http://my-repo.url/boto-2.49.0.tar.gz" # (interfaces to Amazon Web Services) -# - "http://my-repo.url/PyYAML-5.1.2.tar.gz" -# - "http://my-repo.url/chardet-3.0.4.tar.gz" -# - "http://my-repo.url/idna-2.8.tar.gz" -# - "http://my-repo.url/certifi-2019.9.11.tar.gz" -# - "http://my-repo.url/requests-2.22.0.tar.gz" -# - "http://my-repo.url/six-1.12.0.tar.gz" -# - "http://my-repo.url/kazoo-2.6.1.tar.gz" -# - "http://my-repo.url/dnspython-1.16.0.zip" -# - "http://my-repo.url/python-etcd-0.4.5.tar.gz" -# - "http://my-repo.url/Click-7.0.tar.gz" -# - "http://my-repo.url/prettytable-0.7.2.tar.gz" -# - "http://my-repo.url/pytz-2019.2.tar.gz" -# - "http://my-repo.url/tzlocal-2.0.0.tar.gz" -# - "http://my-repo.url/wheel-0.33.6.tar.gz" -# - "http://my-repo.url/python-dateutil-2.8.0.tar.gz" -# - "http://my-repo.url/psutil-5.6.3.tar.gz" -# - "http://my-repo.url/cdiff-1.0.tar.gz" -patroni_pip_package_repo: [] -# - "http://my-repo.url/patroni-1.6.0.tar.gz" - -# (if patroni_installation_type: "rpm") -# You can preload the patroni rpm package to your YUM repository, or explicitly specify the path to the package in this variable: -patroni_rpm_package_repo: [] -# - "https://github.com/cybertec-postgresql/patroni-packaging/releases/download/1.6.5-1/patroni-1.6.5-1.rhel7.x86_64.rpm" # (package for RHEL/CentOS 7) - - -# ================================================================================================= # -# (optional) if installation_method: "file" -# You can also download the necessary packages into postgresql_cluster/files/ directory. -# Packages from this directory will be used for installation. -pip_package_file: "pip-19.2.3.tar.gz" # https://pypi.org/project/pip/#files -patroni_pip_requirements_file: - - "setuptools-41.2.0.zip" # https://pypi.org/project/setuptools/#files - - "setuptools_scm-3.3.3.tar.gz" # https://pypi.org/project/setuptools-scm/#files - - "urllib3-1.24.3.tar.gz" # https://pypi.org/project/urllib3/1.24.3/#files - - "boto-2.49.0.tar.gz" # https://pypi.org/project/boto/#files # (interfaces to Amazon Web Services) - - "PyYAML-5.1.2.tar.gz" # https://pypi.org/project/PyYAML/#files - - "chardet-3.0.4.tar.gz" # https://pypi.org/project/chardet/#files # (required for "requests") - - "idna-2.8.tar.gz" # https://pypi.org/project/idna/#files # (required for "requests") - - "certifi-2019.9.11.tar.gz" # https://pypi.org/project/certifi/#files # (required for "requests") - - "requests-2.22.0.tar.gz" # https://pypi.org/project/requests/#files - - "six-1.12.0.tar.gz" # https://pypi.org/project/six/#files - - "kazoo-2.6.1.tar.gz" # https://pypi.org/project/kazoo/#files - - "dnspython-1.16.0.zip" # https://pypi.org/project/dnspython/#files # (required for "python-etcd") - - "python-etcd-0.4.5.tar.gz" # https://pypi.org/project/python-etcd/#files - - "Click-7.0.tar.gz" # https://pypi.org/project/click/#files - - "prettytable-0.7.2.tar.gz" # https://pypi.org/project/PrettyTable/#files - - "pytz-2019.2.tar.gz" # https://pypi.org/project/pytz/#files # (required for "tzlocal") - - "tzlocal-2.0.0.tar.gz" # https://pypi.org/project/tzlocal/#files - - "wheel-0.33.6.tar.gz" # https://pypi.org/project/wheel/#files - - "python-dateutil-2.8.0.tar.gz" # https://pypi.org/project/python-dateutil/#files - - "psutil-5.6.3.tar.gz" # https://pypi.org/project/psutil/#files - - "cdiff-1.0.tar.gz" # https://pypi.org/project/cdiff/#files -patroni_pip_package_file: - - "patroni-1.6.0.tar.gz" # https://pypi.org/project/patroni/#files - -# ( if patroni_installation_type: "rpm" and installation_method: "file" ) -patroni_rpm_package_file: "patroni-1.6.0-1.rhel7.x86_64.rpm" # (package for RHEL/CentOS 7) https://github.com/cybertec-postgresql/patroni-packaging/releases/ - -# additional packages -etcd_package_file: "etcd-v3.3.15-linux-amd64.tar.gz" # https://github.com/etcd-io/etcd/releases -wal_g_package_file: "wal-g.linux-amd64.tar.gz" # https://github.com/wal-g/wal-g/releases -vip_manager_package_file: "vip-manager_0.6-1_amd64.rpm" # https://github.com/cybertec-postgresql/vip-manager/releases -## (if with_haproxy_load_balancing: true) -haproxy_package_file: [] -# - "rh-haproxy18-runtime-3.1-2.el7.x86_64.rpm" -# - "rh-haproxy18-haproxy-1.8.17-1.el7.x86_64.rpm" -confd_package_file: "confd-0.16.0-linux-amd64" # https://github.com/kelseyhightower/confd/releases -# (optional) if haproxy_installation_method: 'src' -lua_src_file: "lua-5.3.5.tar.gz" # https://www.lua.org/ftp/lua-5.3.5.tar.gz (required for build haproxy) -haproxy_src_file: "haproxy-1.8.21.tar.gz" # http://www.haproxy.org/download/1.8/src/ - -# ------------------------------------------------------------------------------------------------- # -# (optional) Specify additional rpm packages if required (for any installation_method) -# this packages will be installed before all other packages. -packages_from_file: [] -# - "python3-psycopg2-2.7.7-2.el7.x86_64.rpm" # https://mirror.linux-ia64.org/epel/7/x86_64/Packages/p/ # (required for patroni rpm) -# - "libyaml-0.1.4-11.el7_0.x86_64.rpm" # (required for patroni rpm) -# - "jq-1.5-1.el7.x86_64.rpm" # https://mirror.linux-ia64.org/epel/7/x86_64/Packages/j/ -# - "other-package-name_1_amd64.rpm" -# - "" - -# ---------------------------------------------------------------------------------------------------------------------------------- -# Attention! If you want to use the installation method "file". -# You need to independently determine all the necessary the dependencies of rpm packages for your version of the Linux distribution. -# ---------------------------------------------------------------------------------------------------------------------------------- - -... diff --git a/vars/main.yml b/vars/main.yml deleted file mode 100644 index 5a55797d7..000000000 --- a/vars/main.yml +++ /dev/null @@ -1,346 +0,0 @@ ---- -# yamllint disable rule:line-length -# yamllint disable rule:comments-indentation - -# Proxy variables (optional) for download packages using a proxy server -proxy_env: {} -# http_proxy: http://10.128.64.9:3128 -# https_proxy: http://10.128.64.9:3128 - -# ------------------------------------------- - -# Cluster variables -cluster_vip: "" # ip address for client access to databases in the cluster (optional) -vip_interface: "{{ ansible_default_ipv4.interface }}" # interface name (ex. "ens32") - -patroni_cluster_name: "postgres-cluster" # specify the cluster name -patroni_install_version: "latest" # or specific version (example 1.5.6) - -patroni_superuser_username: "postgres" -patroni_superuser_password: "postgres-pass" # please change password -patroni_replication_username: "replicator" -patroni_replication_password: "replicator-pass" # please change password - -synchronous_mode: false # or 'true' for enable synchronous database replication -synchronous_mode_strict: false # if 'true' then block all client writes to the master, when a synchronous replica is not available -synchronous_node_count: 1 # number of synchronous standby databases - - -# Load Balancing -with_haproxy_load_balancing: false # or 'true' if you want to install and configure the load-balancing -haproxy_listen_port: - master: 5000 - replicas: 5001 - replicas_sync: 5002 - replicas_async: 5003 - stats: 7000 -haproxy_maxconn: - global: 100000 - master: 10000 - replica: 10000 -haproxy_timeout: - client: "60m" - server: "60m" - -# vip-manager (if cluster_vip is specified and with_haproxy_load_balancing: false) -vip_manager_version: "1.0" # version to install -vip_manager_conf: "/etc/patroni/vip-manager.yml" -vip_manager_interval: "1000" # time (in milliseconds) after which vip-manager wakes up and checks if it needs to register or release ip addresses. -vip_manager_iface: "{{ vip_interface }}" # interface to which the virtual ip will be added -vip_manager_ip: "{{ cluster_vip }}" # the virtual ip address to manage -vip_manager_mask: "24" # netmask for the virtual ip - - -# DCS (Distributed Consensus Store) -dcs_exists: false # or 'true' if you do not want to install and configure the etcd cluster -dcs_type: "etcd" - -# if dcs_exists: false and dcs_type: "etcd" -etcd_ver: "v3.3.25" # version for deploy etcd cluster -etcd_data_dir: "/var/lib/etcd" -etcd_cluster_name: "etcd-{{ patroni_cluster_name }}" # ETCD_INITIAL_CLUSTER_TOKEN - -# if dcs_exists: true and dcs_type: "etcd" - specify ip address your etcd cluster in the "patroni_etcd_hosts" variable -# example (use existing cluster of 3 nodes) -patroni_etcd_hosts: [] -# - { host: "10.128.64.140", port: "2379" } -# - { host: "10.128.64.142", port: "2379" } -# - { host: "10.128.64.143", port: "2379" } - -# more options you can specify in the roles/patroni/templates/patroni.yml.j2 -# https://patroni.readthedocs.io/en/latest/SETTINGS.html#etcd -# https://patroni.readthedocs.io/en/latest/SETTINGS.html#consul - - -# PostgreSQL variables -postgresql_version: "13" -# postgresql_data_dir: see vars/Debian.yml or vars/RedHat.yml -postgresql_port: "5432" -postgresql_encoding: "UTF8" # for bootstrap only (initdb) -postgresql_locale: "en_US.UTF-8" # for bootstrap only (initdb) -postgresql_data_checksums: true # for bootstrap only (initdb) - -# (optional) list of users to be created (if not already exists) -postgresql_users: [] -# - {name: "mydb-user", password: "mydb-user-pass"} -# - {name: "", password: ""} -# - {name: "", password: ""} -# - {name: "", password: ""} - -# (optional) list of databases to be created (if not already exists) -postgresql_databases: [] -# - {db: "mydatabase", encoding: "UTF8", lc_collate: "ru_RU.UTF-8", lc_ctype: "ru_RU.UTF-8", owner: "mydb-user"} -# - {db: "", encoding: "UTF8", lc_collate: "en_US.UTF-8", lc_ctype: "en_US.UTF-8", owner: ""} -# - {db: "", encoding: "UTF8", lc_collate: "en_US.UTF-8", lc_ctype: "en_US.UTF-8", owner: ""} - -# (optional) list of database extensions to be created (if not already exists) -postgresql_extensions: [] -# - {ext: "pg_stat_statements", db: "postgres"} -# - {ext: "pg_stat_statements", db: "mydatabase"} -# - {ext: "pg_stat_statements", db: ""} -# - {ext: "pg_stat_statements", db: ""} -# - {ext: "pg_repack", db: ""} # postgresql--repack package is required -# - {ext: "pg_stat_kcache", db: ""} # postgresql--pg-stat-kcache package is required -# - {ext: "", db: ""} -# - {ext: "", db: ""} - - -# postgresql parameters to bootstrap dcs (are parameters for example) -postgresql_parameters: - - {option: "max_connections", value: "100"} - - {option: "superuser_reserved_connections", value: "5"} - - {option: "max_locks_per_transaction", value: "64"} # raise this value (ex. 512) if you have queries that touch many different tables (partitioning) - - {option: "max_prepared_transactions", value: "0"} - - {option: "huge_pages", value: "try"} # or "on" if you set "vm_nr_hugepages" in kernel parameters - - {option: "shared_buffers", value: "512MB"} # please change this value - - {option: "work_mem", value: "128MB"} # please change this value - - {option: "maintenance_work_mem", value: "256MB"} # please change this value - - {option: "effective_cache_size", value: "4GB"} # please change this value - - {option: "checkpoint_timeout", value: "15min"} - - {option: "checkpoint_completion_target", value: "0.9"} - - {option: "wal_compression", value: "on"} - - {option: "min_wal_size", value: "2GB"} # for PostgreSQL 9.5 and above (for 9.4 use "checkpoint_segments") - - {option: "max_wal_size", value: "4GB"} # for PostgreSQL 9.5 and above (for 9.4 use "checkpoint_segments") - - {option: "wal_buffers", value: "32MB"} - - {option: "default_statistics_target", value: "1000"} - - {option: "seq_page_cost", value: "1"} - - {option: "random_page_cost", value: "4"} # "1.1" for SSD storage. Also, if your databases fits in shared_buffers - - {option: "effective_io_concurrency", value: "2"} # "200" for SSD storage - - {option: "synchronous_commit", value: "on"} # or 'off' if you can you lose single transactions in case of a crash - - {option: "autovacuum", value: "on"} # never turn off the autovacuum! - - {option: "autovacuum_max_workers", value: "5"} - - {option: "autovacuum_vacuum_scale_factor", value: "0.01"} - - {option: "autovacuum_analyze_scale_factor", value: "0.02"} - - {option: "autovacuum_vacuum_cost_limit", value: "200"} # or 500/1000 - - {option: "autovacuum_vacuum_cost_delay", value: "20"} - - {option: "autovacuum_naptime", value: "1s"} - - {option: "max_files_per_process", value: "4096"} - - {option: "archive_mode", value: "on"} - - {option: "archive_timeout", value: "1800s"} - - {option: "archive_command", value: "cd ."} # not doing anything yet with WAL-s -# - {option: "archive_command", value: "wal-g wal-push %p"} # archive WAL-s using WAL-G -# - {option: "archive_command", value: "pgbackrest --stanza={{ pgbackrest_stanza }} archive-push %p"} # archive WAL-s using pgbackrest - - {option: "wal_level", value: "replica"} # "replica" for PostgreSQL 9.6 and above (for 9.4, 9.5 use "hot_standby") - - {option: "wal_keep_segments", value: "130"} - - {option: "max_wal_senders", value: "10"} - - {option: "max_replication_slots", value: "10"} - - {option: "hot_standby", value: "on"} - - {option: "wal_log_hints", value: "on"} - - {option: "shared_preload_libraries", value: "pg_stat_statements,auto_explain"} - - {option: "pg_stat_statements.max", value: "10000"} - - {option: "pg_stat_statements.track", value: "all"} - - {option: "pg_stat_statements.save", value: "off"} - - {option: "auto_explain.log_min_duration", value: "10s"} # 10 sec (by default). Decrease this value if necessary - - {option: "auto_explain.log_analyze", value: "true"} - - {option: "auto_explain.log_buffers", value: "true"} - - {option: "auto_explain.log_timing", value: "false"} - - {option: "auto_explain.log_triggers", value: "true"} - - {option: "auto_explain.log_verbose", value: "true"} - - {option: "auto_explain.log_nested_statements", value: "true"} - - {option: "track_io_timing", value: "on"} - - {option: "log_lock_waits", value: "on"} - - {option: "log_temp_files", value: "0"} - - {option: "track_activities", value: "on"} - - {option: "track_counts", value: "on"} - - {option: "track_functions", value: "all"} - - {option: "log_checkpoints", value: "on"} - - {option: "logging_collector", value: "on"} - - {option: "log_truncate_on_rotation", value: "on"} - - {option: "log_rotation_age", value: "1d"} - - {option: "log_rotation_size", value: "0"} - - {option: "log_line_prefix", value: "'%t [%p-%l] %r %q%u@%d '"} - - {option: "log_filename", value: "'postgresql-%a.log'"} - - {option: "log_directory", value: "{{ postgresql_log_dir }}"} -# - {option: "idle_in_transaction_session_timeout", value: "600000"} # for PostgreSQL 9.6 and above -# - {option: "max_worker_processes", value: "24"} -# - {option: "max_parallel_workers", value: "24"} # for PostgreSQL 10 and above -# - {option: "max_parallel_workers_per_gather", value: "4"} # for PostgreSQL 9.6 and above -# - {option: "max_parallel_maintenance_workers", value: "2"} # for PostgreSQL 11 and above -# - {option: "hot_standby_feedback", value: "on"} # allows feedback from a hot standby to the primary that will avoid query conflicts -# - {option: "max_standby_streaming_delay", value: "30s"} -# - {option: "wal_receiver_status_interval", value: "10s"} -# - {option: "old_snapshot_threshold", value: "60min"} # for PostgreSQL 9.6 and above (1min-60d) -# - {option: "", value: ""} -# - {option: "", value: ""} - - -# specify additional hosts that will be added to the pg_hba.conf -postgresql_pg_hba: - - {type: "local", database: "all", user: "postgres", address: "", method: "trust"} # "local=trust" required for ansible modules "postgresql_(user,db,ext)" - - {type: "local", database: "all", user: "all", address: "", method: "peer"} - - {type: "host", database: "all", user: "all", address: "127.0.0.1/32", method: "md5"} - - {type: "host", database: "all", user: "all", address: "::1/128", method: "md5"} -# - {type: "host", database: "mydatabase", user: "mydb-user", address: "192.168.0.0/24", method: "md5"} -# - {type: "host", database: "all", user: "all", address: "192.168.0.0/24", method: "ident", options: "map=main"} # use pg_ident - -# list of lines that Patroni will use to generate pg_ident.conf -postgresql_pg_ident: [] -# - {mapname: "main", system_username: "postgres", pg_username: "backup"} -# - {mapname: "", system_username: "", pg_username: ""} - - -# PgBouncer parameters -pgbouncer_install: true # or 'false' if you do not want to install and configure the pgbouncer service -pgbouncer_conf_dir: "/etc/pgbouncer" -pgbouncer_log_dir: "/var/log/pgbouncer" -pgbouncer_listen_port: 6432 -pgbouncer_max_client_conn: 10000 -pgbouncer_max_db_connections: 1000 -pgbouncer_default_pool_size: 20 -pgbouncer_default_pool_mode: "session" -pgbouncer_generate_userlist: true # generate userlist.txt (all users and passwords md5) FROM pg_shadow - -pgbouncer_pools: - - {name: "postgres", dbname: "postgres", pool_parameters: ""} -# - {name: "mydatabase", dbname: "mydatabase", pool_parameters: "pool_size=20 pool_mode=transaction"} -# - {name: "", dbname: "", pool_parameters: ""} -# - {name: "", dbname: "", pool_parameters: ""} - - -# Extended variables (optional) -patroni_ttl: 30 -patroni_loop_wait: 10 -patroni_retry_timeout: 10 -patroni_maximum_lag_on_failover: 1048576 -patroni_master_start_timeout: 300 - -# https://patroni.readthedocs.io/en/latest/replica_bootstrap.html#standby-cluster -patroni_standby_cluster: - host: "" # an address of remote master - port: "5432" # a port of remote master -# primary_slot_name: "" # which slot on the remote master to use for replication (optional) -# restore_command: "" # command to restore WAL records from the remote master to standby leader (optional) -# recovery_min_apply_delay: "" # how long to wait before actually apply WAL records on a standby leader (optional) - -patroni_log_destination: stderr # or 'logfile' -# if patroni_log_destination: logfile -patroni_log_dir: /var/log/patroni -patroni_log_level: info -patroni_log_traceback_level: error -patroni_log_format: "%(asctime)s %(levelname)s: %(message)s" -patroni_log_dateformat: "" -patroni_log_max_queue_size: 1000 -patroni_log_file_num: 4 -patroni_log_file_size: 25000000 # bytes -patroni_log_loggers_patroni_postmaster: warning -patroni_log_loggers_urllib3: warning # or 'debug' - -patroni_postgresql_use_pg_rewind: true # or 'false' -# try to use pg_rewind on the former leader when it joins cluster as a replica. - -patroni_remove_data_directory_on_rewind_failure: false # or 'true' (if use_pg_rewind: 'true') -# avoid removing the data directory on an unsuccessful rewind -# if 'true', Patroni will remove the PostgreSQL data directory and recreate the replica. - -patroni_remove_data_directory_on_diverged_timelines: false # or 'true' -# if 'true', Patroni will remove the PostgreSQL data directory and recreate the replica -# if it notices that timelines are diverging and the former master can not start streaming from the new master. - -# https://patroni.readthedocs.io/en/latest/replica_bootstrap.html#bootstrap -patroni_cluster_bootstrap_method: "initdb" # or "wal-g", "pgbackrest" - -# https://patroni.readthedocs.io/en/latest/replica_bootstrap.html#building-replicas -patroni_create_replica_methods: -# - pgbackrest -# - wal_g - - basebackup - -pgbackrest: - - {option: "command", value: "/usr/bin/pgbackrest --stanza={{ pgbackrest_stanza }} --delta restore"} - - {option: "keep_data", value: "True"} - - {option: "no_params", value: "True"} -wal_g: - - {option: "command", value: "wal-g backup-fetch {{ postgresql_data_dir }} LATEST"} - - {option: "no_params", value: "True"} -basebackup: - - {option: "max-rate", value: "100M"} - - {option: "checkpoint", value: "fast"} - -# "restore_command" written to recovery.conf when configuring follower (create replica) -postgresql_restore_command: "" -# postgresql_restore_command: "wal-g wal-fetch %f %p" # restore WAL-s using WAL-G -# postgresql_restore_command: "pgbackrest --stanza={{ pgbackrest_stanza }} archive-get %f %p" # restore WAL-s using pgbackrest - - -# WAL-G -wal_g_install: false # or 'true' -wal_g_ver: "v0.2.19" -wal_g_json: # more options see https://github.com/wal-g/wal-g#configuration - - {option: "AWS_ACCESS_KEY_ID", value: "minio"} - - {option: "AWS_SECRET_ACCESS_KEY", value: "miniosecret"} - - {option: "AWS_ENDPOINT", value: "http://172.26.9.200:9000/"} - - {option: "WALG_S3_PREFIX", value: "s3://bucket"} - - {option: "AWS_S3_FORCE_PATH_STYLE", value: "true"} - - {option: "WALG_COMPRESSION_METHOD", value: "brotli"} - - {option: "PGDATA", value: "{{ postgresql_data_dir }}"} - - {option: "PGHOST", value: "{{ postgresql_unix_socket_dir }}"} -# - {option: "AWS_REGION", value: "us-east-1"} -# - {option: "WALG_S3_CA_CERT_FILE", value: "/path/to/custom/ca/file"} -# - {option: "", value: ""} -wal_g_patroni_cluster_bootstrap_command: "wal-g backup-fetch {{ postgresql_data_dir }} LATEST" - -# pgBackRest -pgbackrest_install: false # or 'true' -pgbackrest_install_from_pgdg_repo: true # or 'false' -pgbackrest_stanza: "stanza_name" # specify your --stanza -pgbackrest_repo_type: "posix" # or "s3" -pgbackrest_repo_host: "10.128.64.50" # dedicated repository host (if repo_type: "posix") -pgbackrest_repo_user: "postgres" # if "repo_host" is set -pgbackrest_conf_file: "/etc/pgbackrest/pgbackrest.conf" -# see more options https://pgbackrest.org/configuration.html -pgbackrest_conf: - global: # [global] section - - {option: "log-level-file", value: "detail"} - - {option: "log-path", value: "/var/log/pgbackrest"} - - {option: "repo1-type", value: "{{ pgbackrest_repo_type |lower }}"} - - {option: "repo1-host", value: "{{ pgbackrest_repo_host }}"} - - {option: "repo1-host-user", value: "{{ pgbackrest_repo_user }}"} -# - {option: "", value: ""} - stanza: # [stanza_name] section - - {option: "pg1-path", value: "{{ postgresql_data_dir }}"} - - {option: "process-max", value: "2"} - - {option: "recovery-option", value: "recovery_target_action=promote"} -# - {option: "", value: ""} -pgbackrest_patroni_cluster_restore_command: - '/usr/bin/pgbackrest --stanza={{ pgbackrest_stanza }} --delta restore' # restore from latest backup -# '/usr/bin/pgbackrest --stanza={{ pgbackrest_stanza }} --type=time "--target=2020-06-01 11:00:00+03" --delta restore' # Point-in-Time Recovery (example) - -# PITR mode (if patroni_cluster_bootstrap_method: "pgbackrest" or "wal-g"): -# 1) The database cluster directory will be cleaned (for "wal-g") or overwritten (for "pgbackrest" --delta restore). -# 2) And also the patroni cluster "{{ patroni_cluster_name }}" will be removed from the DCS (if exist) before recovery. - -disable_archive_command: true # or 'false' to not disable archive_command after restore -keep_patroni_dynamic_json: true # or 'false' to remove patroni.dynamic.json after restore (if exists) - - -# Netdata - https://github.com/netdata/netdata -netdata_install: false # or 'true' for install Netdata on postgresql cluster nodes (with kickstart.sh) -netdata_install_options: "--stable-channel --disable-telemetry --dont-wait" -netdata_conf: - web_bind_to: "*" - # https://learn.netdata.cloud/docs/store/change-metrics-storage - memory_mode: "dbengine" # The long-term metrics storage with efficient RAM and disk usage. - page_cache_size: 64 # Determines the amount of RAM in MiB that is dedicated to caching Netdata metric values. - dbengine_disk_space: 1024 # Determines the amount of disk space in MiB that is dedicated to storing Netdata metric values. - -... diff --git a/vars/system.yml b/vars/system.yml deleted file mode 100644 index 95d7cb1a9..000000000 --- a/vars/system.yml +++ /dev/null @@ -1,160 +0,0 @@ ---- -# yamllint disable rule:line-length -# yamllint disable rule:comments-indentation - -# DNS servers (/etc/resolv.conf) -nameservers: [] -# - "8.8.8.8" # example (Google Public DNS) -# - "9.9.9.9" # (Quad9 Public DNS) - -# /etc/hosts (optional) -etc_hosts: [] -# - "10.128.64.143 pgbackrest.minio.local minio.local s3.eu-west-3.amazonaws.com" # example (MinIO) -# - "" - -ntp_enabled: false # or 'true' if you want to install and configure the ntp service -ntp_servers: [] -# - "10.128.64.44" -# - "10.128.64.45" - -timezone: "" -# timezone: "Europe/Moscow" - -# Generate locale -# (except RHEL>=8,use glibc-langpack) -locale_gen: - - {language_country: "en_US", encoding: "UTF-8"} - - {language_country: "ru_RU", encoding: "UTF-8"} # optional -# - {language_country: "", encoding: ""} - -# Set system locale (LANG,LC_ALL) -locale: "en_US.utf-8" - - -# Kernel parameters -sysctl_set: true # or 'false' -# these parameters for example! Specify kernel options for your system -sysctl_conf: - etcd_cluster: [] - master: [] - replica: [] - postgres_cluster: - - {name: "vm.swappiness", value: "1"} - - {name: "vm.min_free_kbytes", value: "102400"} - - {name: "vm.dirty_expire_centisecs", value: "1000"} - - {name: "vm.dirty_background_bytes", value: "67108864"} - - {name: "vm.dirty_bytes", value: "536870912"} -# - {name: "vm.nr_hugepages", value: "9510"} # example "9510"=18GB - - {name: "vm.zone_reclaim_mode", value: "0"} - - {name: "kernel.numa_balancing", value: "0"} - - {name: "kernel.sched_migration_cost_ns", value: "5000000"} - - {name: "kernel.sched_autogroup_enabled", value: "0"} - - {name: "net.ipv4.ip_nonlocal_bind", value: "1"} - - {name: "net.ipv4.ip_forward", value: "1"} - - {name: "net.ipv4.ip_local_port_range", value: "10000 65535"} - - {name: "net.netfilter.nf_conntrack_max", value: "1048576"} - - {name: "net.core.netdev_max_backlog", value: "10000"} - - {name: "net.ipv4.tcp_max_syn_backlog", value: "8192"} - - {name: "net.core.somaxconn", value: "65535"} - - {name: "net.ipv4.tcp_tw_reuse", value: "1"} -# - {name: "", value: ""} - balancers: - - {name: "net.ipv4.ip_nonlocal_bind", value: "1"} - - {name: "net.ipv4.ip_forward", value: "1"} - - {name: "net.ipv4.ip_local_port_range", value: "10000 65535"} - - {name: "net.netfilter.nf_conntrack_max", value: "1048576"} - - {name: "net.core.netdev_max_backlog", value: "10000"} - - {name: "net.ipv4.tcp_max_syn_backlog", value: "8192"} - - {name: "net.core.somaxconn", value: "65535"} - - {name: "net.ipv4.tcp_tw_reuse", value: "1"} -# - {name: "", value: ""} - - -# Transparent Huge Pages -disable_thp: true # or 'false' - - -# Max open file limit -set_limits: true # or 'false' -limits_user: "postgres" -soft_nofile: 65536 -hard_nofile: 200000 - - -# I/O Scheduler (optional) -set_scheduler: false # or 'true' -scheduler: - - {sched: "deadline", nr_requests: "1024", device: "sda"} -# - {sched: "noop" , nr_requests: "1024", device: "sdb"} -# - {sched: "" , nr_requests: "1024", device: ""} - -# Non-multiqueue I/O schedulers: -# cfq - for desktop systems and slow SATA drives -# deadline - for SAS drives (recommended for databases) -# noop - for SSD drives -# Multiqueue I/O schedulers (blk-mq): -# mq-deadline - (recommended for databases) -# none - (ideal for fast random I/O devices such as NVMe) -# bfq - (avoid for databases) -# kyber - - -# SSH Keys (optional) -enable_ssh_key_based_authentication: false # or 'true' for configure SSH Key-Based Authentication -ssh_key_user: "postgres" -ssh_key_state: "present" -ssh_known_hosts: "{{ groups['postgres_cluster'] }}" - - -# sudo -sudo_users: - - name: "postgres" - nopasswd: "yes" # or "no" to require a password - commands: "ALL" -# - name: "joe" # other user (example) -# nopasswd: "no" -# commands: "/usr/bin/find, /usr/bin/less, /usr/bin/tail, /bin/kill" - - -# Firewall (ansible-role-firewall) -firewall_enabled_at_boot: true - -firewall_allowed_tcp_ports_for: - master: [] - replica: [] - postgres_cluster: - - "{{ ansible_ssh_port | default(22) }}" - - "{{ postgresql_port }}" - - "{{ pgbouncer_listen_port }}" - - "8008" # Patroni REST API port - - "19999" # Netdata -# - "10050" # Zabbix agent -# - "" - etcd_cluster: - - "{{ ansible_ssh_port | default(22) }}" - - "2379" # ETCD port - - "2380" # ETCD port -# - "" - balancers: - - "{{ ansible_ssh_port | default(22) }}" - - "{{ haproxy_listen_port.master }}" # HAProxy (read/write) master - - "{{ haproxy_listen_port.replicas }}" # HAProxy (read only) all replicas - - "{{ haproxy_listen_port.replicas_sync }}" # HAProxy (read only) synchronous replica only - - "{{ haproxy_listen_port.replicas_async }}" # HAProxy (read only) asynchronous replicas only - - "{{ haproxy_listen_port.stats }}" # HAProxy stats -# - "" - -firewall_additional_rules_for: - master: [] - replica: [] - postgres_cluster: [] - etcd_cluster: [] - balancers: - - "iptables -p vrrp -A INPUT -j ACCEPT" # Keepalived (vrrp) - - "iptables -p vrrp -A OUTPUT -j ACCEPT" # Keepalived (vrrp) - -# disable firewalld (installed by default on RHEL/CentOS) or ufw (installed by default on Ubuntu) -firewall_disable_firewalld: true -firewall_disable_ufw: true - -...