diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index ee03ff8d2..000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,105 +0,0 @@ ---- -version: 2.1 - -orbs: - prometheus: prometheus/prometheus@0.17.1 - -executors: - # This must match .promu.yml. - golang: - docker: - - image: cimg/go:1.24 - -jobs: - test: - executor: golang - - steps: - - prometheus/setup_environment - - run: GOHOSTARCH=386 GOARCH=386 make test - - run: make - - prometheus/store_artifact: - file: postgres_exporter - - integration: - docker: - - image: cimg/go:1.24 - - image: << parameters.postgres_image >> - environment: - POSTGRES_DB: circle_test - POSTGRES_USER: postgres - POSTGRES_PASSWORD: test - - parameters: - postgres_image: - type: string - - environment: - DATA_SOURCE_NAME: 'postgresql://postgres:test@localhost:5432/circle_test?sslmode=disable' - GOOPTS: '-v -tags integration' - - steps: - - checkout - - setup_remote_docker - - run: docker version - - run: make build - - run: make test - -workflows: - version: 2 - postgres_exporter: - jobs: - - test: - filters: - tags: - only: /.*/ - - integration: - matrix: - parameters: - postgres_image: - - circleci/postgres:11 - - circleci/postgres:12 - - circleci/postgres:13 - - cimg/postgres:14.9 - - cimg/postgres:15.4 - - cimg/postgres:16.0 - - cimg/postgres:17.0 - - prometheus/build: - name: build - parallelism: 3 - promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" - filters: - tags: - ignore: /^v.*/ - branches: - ignore: /^(main|master|release-.*|.*build-all.*)$/ - - prometheus/build: - name: build_all - parallelism: 12 - filters: - branches: - only: /^(main|master|release-.*|.*build-all.*)$/ - tags: - only: /^v.*/ - - prometheus/publish_master: - context: org-context - docker_hub_organization: prometheuscommunity - quay_io_organization: prometheuscommunity - requires: - - test - - build_all - filters: - branches: - only: master - - prometheus/publish_release: - context: org-context - docker_hub_organization: prometheuscommunity - quay_io_organization: prometheuscommunity - requires: - - test - - build_all - filters: - tags: - only: /^v.*/ - branches: - ignore: /.*/ diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 22f42bc20..000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve. -title: '' -assignees: '' ---- - - - -**What did you do?** - -**What did you expect to see?** - -**What did you see instead? Under which circumstances?** - -**Environment** - -* System information: - - insert output of `uname -srm` here - -* postgres_exporter version: - - insert output of `postgres_exporter --version` here - -* postgres_exporter flags: - -``` -insert list of flags used here -``` - -* PostgreSQL version: - - insert PostgreSQL version here - -* Logs: -``` -insert logs relevant to the issue here -``` diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml deleted file mode 100644 index d70ee5512..000000000 --- a/.github/ISSUE_TEMPLATE/config.yml +++ /dev/null @@ -1,5 +0,0 @@ -blank_issues_enabled: false -contact_links: - - name: Prometheus community support - url: https://prometheus.io/community/ - about: List of communication channels for the Prometheus community. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index ee6d97a85..000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project. -title: '' -labels: '' -assignees: '' ---- - - -## Proposal -**Use case. Why is this important?** - -*“Nice to have” is not a good use case. :)* diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 202ae2366..000000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,6 +0,0 @@ -version: 2 -updates: - - package-ecosystem: "gomod" - directory: "/" - schedule: - interval: "monthly" diff --git a/.github/workflows/container_description.yml b/.github/workflows/container_description.yml deleted file mode 100644 index dcca16ff3..000000000 --- a/.github/workflows/container_description.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -name: Push README to Docker Hub -on: - push: - paths: - - "README.md" - - "README-containers.md" - - ".github/workflows/container_description.yml" - branches: [ main, master ] - -permissions: - contents: read - -jobs: - PushDockerHubReadme: - runs-on: ubuntu-latest - name: Push README to Docker Hub - if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. - steps: - - name: git checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Set docker hub repo name - run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV - - name: Push README to Dockerhub - uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1 - env: - DOCKER_USER: ${{ secrets.DOCKER_HUB_LOGIN }} - DOCKER_PASS: ${{ secrets.DOCKER_HUB_PASSWORD }} - with: - destination_container_repo: ${{ env.DOCKER_REPO_NAME }} - provider: dockerhub - short_description: ${{ env.DOCKER_REPO_NAME }} - # Empty string results in README-containers.md being pushed if it - # exists. Otherwise, README.md is pushed. - readme_file: '' - - PushQuayIoReadme: - runs-on: ubuntu-latest - name: Push README to quay.io - if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. - steps: - - name: git checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Set quay.io org name - run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV - - name: Set quay.io repo name - run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV - - name: Push README to quay.io - uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1 - env: - DOCKER_APIKEY: ${{ secrets.QUAY_IO_API_TOKEN }} - with: - destination_container_repo: ${{ env.DOCKER_REPO_NAME }} - provider: quay - # Empty string results in README-containers.md being pushed if it - # exists. Otherwise, README.md is pushed. - readme_file: '' diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml deleted file mode 100644 index 672dd424d..000000000 --- a/.github/workflows/golangci-lint.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -# This action is synced from https://github.com/prometheus/prometheus -name: golangci-lint -on: - push: - paths: - - "go.sum" - - "go.mod" - - "**.go" - - "scripts/errcheck_excludes.txt" - - ".github/workflows/golangci-lint.yml" - - ".golangci.yml" - pull_request: - -permissions: # added using https://github.com/step-security/secure-repo - contents: read - -jobs: - golangci: - permissions: - contents: read # for actions/checkout to fetch code - pull-requests: read # for golangci/golangci-lint-action to fetch pull requests - name: lint - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 - with: - go-version: 1.24.x - - name: Install snmp_exporter/generator dependencies - run: sudo apt-get update && sudo apt-get -y install libsnmp-dev - if: github.repository == 'prometheus/snmp_exporter' - - name: Lint - uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # v7.0.0 - with: - args: --verbose - version: v2.1.5 diff --git a/.gitignore b/.gitignore deleted file mode 100644 index e6ae827a2..000000000 --- a/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -/.build -/postgres_exporter -/postgres_exporter_integration_test -*.tar.gz -*.test -*-stamp -/.idea -/.vscode -*.iml -/cover.out -/cover.*.out -/.coverage -/bin -/release -/*.prom -/.metrics.*.*.prom -/.metrics.*.*.prom.unique -/.assets-branch -/.metrics.*.added -/.metrics.*.removed -/tools/src -/vendor diff --git a/.golangci.yml b/.golangci.yml deleted file mode 100644 index 4b58b08b6..000000000 --- a/.golangci.yml +++ /dev/null @@ -1,36 +0,0 @@ -version: "2" -linters: - enable: - - misspell - - revive - settings: - errcheck: - exclude-functions: - - (github.com/go-kit/log.Logger).Log - revive: - rules: - - name: unused-parameter - severity: warning - disabled: true - exclusions: - generated: lax - presets: - - comments - - common-false-positives - - legacy - - std-error-handling - rules: - - linters: - - errcheck - path: _test.go - paths: - - third_party$ - - builtin$ - - examples$ -formatters: - exclusions: - generated: lax - paths: - - third_party$ - - builtin$ - - examples$ diff --git a/.promu.yml b/.promu.yml deleted file mode 100644 index 5f915289f..000000000 --- a/.promu.yml +++ /dev/null @@ -1,19 +0,0 @@ -go: - # This must match .circle/config.yml. - version: 1.24 -repository: - path: github.com/prometheus-community/postgres_exporter -build: - binaries: - - name: postgres_exporter - path: ./cmd/postgres_exporter - ldflags: | - -X github.com/prometheus/common/version.Version={{.Version}} - -X github.com/prometheus/common/version.Revision={{.Revision}} - -X github.com/prometheus/common/version.Branch={{.Branch}} - -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} - -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} -tarball: - files: - - LICENSE - - NOTICE diff --git a/.yamllint b/.yamllint deleted file mode 100644 index 8d09c375f..000000000 --- a/.yamllint +++ /dev/null @@ -1,25 +0,0 @@ ---- -extends: default -ignore: | - **/node_modules - -rules: - braces: - max-spaces-inside: 1 - level: error - brackets: - max-spaces-inside: 1 - level: error - commas: disable - comments: disable - comments-indentation: disable - document-start: disable - indentation: - spaces: consistent - indent-sequences: consistent - key-duplicates: - ignore: | - config/testdata/section_key_dup.bad.yml - line-length: disable - truthy: - check-keys: false diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index d01ceb504..000000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,342 +0,0 @@ -## 0.17.1 / 2025-02-26 - -* [BUGFIX] Fix: Handle incoming labels with invalid UTF-8 #1131 - -## 0.17.0 / 2025-02-16 - -## What's Changed -* [ENHANCEMENT] Add Postgres 17 for CI test by @khiemdoan in https://github.com/prometheus-community/postgres_exporter/pull/1105 -* [ENHANCEMENT] Add wait/backend to pg_stat_activity by @fgalind1 in https://github.com/prometheus-community/postgres_exporter/pull/1106 -* [ENHANCEMENT] Export last replay age in replication collector by @bitfehler in https://github.com/prometheus-community/postgres_exporter/pull/1085 -* [BUGFIX] Fix pg_long_running_transactions time by @jyothikirant-sayukth in https://github.com/prometheus-community/postgres_exporter/pull/1092 -* [BUGFIX] Fix to replace dashes with underscore in the metric names by @aagarwalla-fx in https://github.com/prometheus-community/postgres_exporter/pull/1103 -* [BIGFIX] Checkpoint related columns in PG 17 have been moved from pg_stat_bgwriter to pg_stat_checkpointer by @n-rodriguez in https://github.com/prometheus-community/postgres_exporter/pull/1072 -* [BUGFIX] Fix pg_stat_statements for PG17 by @NevermindZ4 in https://github.com/prometheus-community/postgres_exporter/pull/1114 -* [BUGFIX] Handle pg_replication_slots on pg<13 by @michael-todorovic in https://github.com/prometheus-community/postgres_exporter/pull/1098 -* [BUGFIX] Fix missing dsn sanitization for logging by @sysadmind in https://github.com/prometheus-community/postgres_exporter/pull/1104 - -## New Contributors -* @jyothikirant-sayukth made their first contribution in https://github.com/prometheus-community/postgres_exporter/pull/1092 -* @aagarwalla-fx made their first contribution in https://github.com/prometheus-community/postgres_exporter/pull/1103 -* @NevermindZ4 made their first contribution in https://github.com/prometheus-community/postgres_exporter/pull/1114 -* @michael-todorovic made their first contribution in https://github.com/prometheus-community/postgres_exporter/pull/1098 -* @fgalind1 made their first contribution in https://github.com/prometheus-community/postgres_exporter/pull/1106 - -**Full Changelog**: https://github.com/prometheus-community/postgres_exporter/compare/v0.16.0...v0.17.0 - -## 0.16.0 / 2024-11-10 - -BREAKING CHANGES: - -The logging system has been replaced with log/slog from the stdlib. This change is being made across the prometheus ecosystem. The logging output has changed, but the messages and levels remain the same. The `ts` label for the timestamp has bewen replaced with `time`, the accuracy is less, and the timezone is not forced to UTC. The `caller` field has been replaced by the `source` field, which now includes the full path to the source file. The `level` field now exposes the log level in capital letters. - -* [CHANGE] Replace logging system #1073 -* [ENHANCEMENT] Add save_wal_size and wal_status to replication_slot collector #1027 -* [ENHANCEMENT] Add roles collector and connection limit metrics to database collector #997 -* [ENHANCEMENT] Excluded databases log messgae is now info level #1003 -* [ENHANCEMENT] Add active_time to stat_database collector #961 -* [ENHANCEMENT] Add slot_type label to replication_slot collector #960 -* [BUGFIX] Fix walreceiver collectore when no repmgr #1086 -* [BUGFIX] Remove logging errors on replicas #1048 -* [BUGFIX] Fix active_time query on postgres>=14 #1045 - -## 0.15.0 / 2023-10-27 - -* [ENHANCEMENT] Add 1kB and 2kB units #915 -* [BUGFIX] Add error log when probe collector creation fails #918 -* [BUGFIX] Fix test build failures on 32-bit arch #919 -* [BUGFIX] Adjust collector to use separate connection per scrape #936 - -## 0.14.0 / 2023-09-11 - -* [CHANGE] Add `state` label to pg_process_idle_seconds #862 -* [CHANGE] Change database connections to one per scrape #882 #902 -* [ENHANCEMENT] Add wal collector #858 -* [ENHANCEMENT] Add database_wraparound collector #834 -* [ENHANCEMENT] Add stat_activity_autovacuum collector #840 -* [ENHANCEMENT] Add stat_wal_receiver collector #844 -* [ENHANCEMENT] Add xlog_location collector #849 -* [ENHANCEMENT] Add statio_user_indexes collector #845 -* [ENHANCEMENT] Add long_running_transactions collector #836 -* [ENHANCEMENT] Add pg_stat_user_tables_size_bytes metric #904 -* [BUGFIX] Fix tests on 32-bit systems #857 -* [BUGFIX] Fix pg_stat_statements metrics on Postgres 13+ #874 #876 -* [BUGFIX] Fix pg_stat_database metrics for NULL stats_reset #877 -* [BUGFIX] Fix pg_replication_lag_seconds on Postgres 10+ when master is idle #895 - -## 0.13.2 / 2023-07-21 - -* [BUGFIX] Fix type issues on pg_postmaster metrics #828 -* [BUGFIX] Fix pg_replication collector instantiation #854 -* [BUGFIX] Fix pg_process_idle metrics #855 - -## 0.13.1 / 2023-06-27 - -* [BUGFIX] Make collectors not fail on null values #823 - -## 0.13.0 / 2023-06-21 - -BREAKING CHANGES: - -Please note, the following features are deprecated and may be removed in a future release: -- `auto-discover-databases` -- `extend.query-path` -- `constantLabels` -- `exclude-databases` -- `include-databases` - -This exporter is meant to monitor PostgresSQL servers, not the user data/databases. If -you need a generic SQL report exporter https://github.com/burningalchemist/sql_exporter -is recommended. - -* [CHANGE] Adjust log level for collector startup #784 -* [CHANGE] Move queries from queries.yaml to collectors #801 -* [CHANGE] Deprecate extend queries feature #811 -* [CHANGE] Deprecate additional database features #815 -* [CHANGE] Convert pg_stat_database to new collector #685 -* [ENHANCEMENT] Supports alternate postgres:// prefix in URLs #787 -* [BUGFIX] Fix pg_setting different help values #771 -* [BUGFIX] Fix column type for pg_replication_slots #777 -* [BUGFIX] Fix pg_stat_database collector #809 - -## 0.12.1 / 2023-06-12 -* [BUGFIX] Fix column type for pg_replication_slots #777 - -## 0.12.0 / 2023-03-21 - -BREAKING CHANGES: - -This release changes support for multiple postgres servers to use the -multi-target exporter pattern. This makes it much easier to monitor multiple -PostgreSQL servers from a single exporter by passing the target via URL -params. See the Multi-Target Support section of the README. - -* [CHANGE] Add multi-target support #618 -* [CHANGE] Add usename and application_name to pg_stat_activity metrics #673 -* [FEATURE] Add replication metrics from pg_replication_slots #747 -* [BUGFIX] Add dsn type for handling datasources #678 -* [BUGFIX] Add 64kB unit for postgres 15 #740 -* [BUGFIX] Add 4kB unit for postgres compiled with small blocks #699 - -## 0.11.1 / 2022-08-01 - -* [BUGFIX] Fix checkpoint_write_time value type #666 -* [BUGFIX] Fix checkpoint_sync_time value type #667 - -## 0.11.0 / 2022-07-28 - -NOTE: pg_stat_bgwriter counter metrics had the `_total` suffix added #556 - -* [CHANGE] refactor pg_stat_bgwriter metrics into standalone collector #556 -* [FEATURE] Add pg_database collector #613 -* [ENHANCEMENT] Add pg_database_size_bytes metric #613 -* [BUGFIX] Avoid parsing error from bogus Azure Flexible Server custom GUC #587 -* [BUGFIX] Fix pg_stat_archiver error in 9.4 and earlier. #599 -* [BUGFIX] Sanitize setting values because of Aurora irregularity #620 - -## 0.10.1 / 2022-01-14 - -* [BUGFIX] Fix broken log-level for values other than debug. #560 - -## 0.10.0 / 2021-07-08 - -* [ENHANCEMENT] Add ability to set included databases when autoDiscoverDatabases is enabled #499 -* [BUGFIX] fix pg_replication_slots on postgresql versions 9.4 <> 10.0 #537 - -## 0.9.0 / 2021-03-01 - -First release under the Prometheus Community organisation. - -* [CHANGE] Update build to use standard Prometheus promu/Dockerfile -* [ENHANCEMENT] Remove duplicate column in queries.yml #433 -* [ENHANCEMENT] Add query for 'pg_replication_slots' #465 -* [ENHANCEMENT] Allow a custom prefix for metric namespace #387 -* [ENHANCEMENT] Improve PostgreSQL replication lag detection #395 -* [ENHANCEMENT] Support connstring syntax when discovering databases #473 -* [ENHANCEMENT] Detect SIReadLock locks in the pg_locks metric #421 -* [BUGFIX] Fix pg_database_size_bytes metric in queries.yaml #357 -* [BUGFIX] Don't ignore errors in parseUserQueries #362 -* [BUGFIX] Fix queries.yaml for AWS RDS #370 -* [BUGFIX] Recover when connection cannot be established at startup #415 -* [BUGFIX] Don't retry if an error occurs #426 -* [BUGFIX] Do not panic on incorrect env #457 - -## 0.8.0 / 2019-11-25 - -* Add a build info metric (#323) -* Re-add pg_stat_bgwriter metrics which were accidentally removed in the previous version. (resolves #336) -* Export pg_stat_archiver metrics (#324) -* Add support for 'DATA_SOURCE_URI_FILE' envvar. -* Resolve #329 -* Added new field "master" to queries.yaml. (credit to @sfalkon) - - If "master" is true, query will be call only on once database in instance -* Change queries.yaml for work with autoDiscoveryDatabases options (credit to @sfalkon) - - added current database name to metrics because any database in cluster maybe have the same table names - - added "master" field for query instance metrics. - -## 0.7.0 / 2019-11-01 - -Introduces some more significant changes, hence the minor version bump in -such a short time frame. - -* Rename pg_database_size to pg_database_size_bytes in queries.yml. -* Add pg_stat_statements to sample queries.yml file. -* Add support for optional namespace caching. (#319) -* Fix some autodiscovery problems (#314) (resolves #308) -* Yaml parsing refactor (#299) -* Don't stop generating fingerprint while encountering value with "=" sign (#318) - (may resolve problems with passwords and special characters). - -## 0.6.0 / 2019-10-30 - -* Add SQL for grant connect (#303) -* Expose pg_current_wal_lsn_bytes (#307) -* [minor] fix landing page content-type (#305) -* Updated lib/pg driver to 1.2.0 in order to support stronger SCRAM-SHA-256 authentication. This drops support for Go < 1.11 and PostgreSQL < 9.4. (#304) -* Provide more helpful default values for tables that have never been vacuumed (#310) -* Add retries to getServer() (#316) -* Fix pg_up metric returns last calculated value without explicit resetting (#291) -* Discover only databases that are not templates and allow connections (#297) -* Add --exclude-databases option (#298) - -## 0.5.1 / 2019-07-09 - -* Add application_name as a label for pg_stat_replication metrics (#285). - -## 0.5.0 / 2019-07-03 - -It's been far too long since I've done a release and we have a lot of accumulated changes. - -* Docker image now runs as a non-root user named "postgres_exporter" -* Add `--auto-discover-databases` option, which automatically discovers and scrapes all databases. -* Add support for boolean data types as metrics -* Replication lag is now expressed as a float and not truncated to an integer. -* When default metrics are disabled, no version metrics are collected anymore either. -* BUGFIX: Fix exporter panic when postgres server goes down. -* Add support for collecting metrics from multiple servers. -* PostgreSQL 11 is now supported in the integration tests. - -## 0.4.7 / 2018-10-02 - -* Added a query for v9.1 pg_stat_activity. -* Add `--constantLabels` flag to allow applying fixed constant labels to metrics. -* queries.yml: dd pg_statio_user_tables. -* Support 'B' suffix in units. - -## 0.4.6 / 2018-04-15 - -* Fix issue #173 - 32 and 64mb unit sizes were not supported in pg_settings. - -## 0.4.5 / 2018-02-27 - -* Add commandline flag to disable default metrics (thanks @hsun-cnnxty) - -## 0.4.4 / 2018-03-21 - -* Bugfix for 0.4.3 which broke pg_up (it would always be 0). -* pg_up is now refreshed based on database Ping() every scrape. -* Re-release of 0.4.4 to fix version numbering. - -## 0.4.2 / 2018-02-19 - -* Adds the following environment variables for overriding defaults: - * `PG_EXPORTER_WEB_LISTEN_ADDRESS` - * `PG_EXPORTER_WEB_TELEMETRY_PATH` - * `PG_EXPORTER_EXTEND_QUERY_PATH` - -* Add Content-Type to HTTP landing page. -* Fix Makefile to produce .exe binaries for Windows. - -## 0.4.1 / 2017-11-30 - -* No code changes to v0.4.0 for the exporter. -* First release switching to tar-file based distribution. -* First release with Windows and Darwin cross-builds.\\ - -## 0.4.0 / 2017-11-29 - -* Fix panic due to inconsistent label cardinality when using queries.yaml with - queries which return extra columns. -* Add metric for whether the user queries YAML file parsed correctly. This also - includes the filename and SHA256 sum allowing tracking of updates. -* Add pg_up metric to indicate whether the exporter was able to connect and - Ping() the PG instance before a scrape. -* Fix broken link in landing page for `/metrics` - -## 0.3.0 / 2017-10-23 - -* Add support for PostgreSQL 10. - -## 0.2.3 / 2017-09-07 - -* Add support for the 16kB unit when decoding pg_settings. (#101) - -## 0.2.2 / 2017-08-04 - -* Fix DSN logging. The exporter previously never actually logged the DSN when - database connections failed. This was also masking a logic error which could - potentially lead to a crash when DSN was unparseable, though no actual - crash could be produced in testing. - -## 0.2.1 / 2017-06-07 - -* Ignore functions that cannot be executed during replication recovery (#52) -* Add a `-version` flag finally. -* Add confirmed_flush_lsn to pg_stat_replication. - -## 0.2.0 / 2017-04-18 - -* Major change - use pg_settings to retrieve runtime variables. Adds >180 - new metrics and descriptions (big thanks to Matt Bostock for this work). - - Removes the following metrics: - ``` - pg_runtime_variable_max_connections - pg_runtime_variable_max_files_per_process - pg_runtime_variable_max_function_args - pg_runtime_variable_max_identifier_length - pg_runtime_variable_max_index_keys - pg_runtime_variable_max_locks_per_transaction - pg_runtime_variable_max_pred_locks_per_transaction - pg_runtime_variable_max_prepared_transactions - pg_runtime_variable_max_standby_archive_delay_milliseconds - pg_runtime_variable_max_standby_streaming_delay_milliseconds - pg_runtime_variable_max_wal_senders - ``` - - They are replaced by equivalent names under `pg_settings` with the exception of - ``` - pg_runtime_variable_max_standby_archive_delay_milliseconds - pg_runtime_variable_max_standby_streaming_delay_milliseconds - ``` - which are replaced with - ``` - pg_settings_max_standby_archive_delay_seconds - pg_settings_max_standby_streaming_delay_seconds - ``` - -## 0.1.3 / 2017-02-21 - -* Update the Go build to 1.7.5 to include a fix for NAT handling. -* Fix passwords leaking in DB url error message on connection failure. - -## 0.1.2 / 2017-02-07 - -* Use a connection pool of size 1 to reduce memory churn on target database. - -## 0.1.1 / 2016-11-29 - -* Fix pg_stat_replication metrics not being collected due to semantic version - filter problem. - -## 0.1.0 / 2016-11-21 - -* Change default port to 9187. -* Fix regressions with pg_stat_replication on older versions of Postgres. -* Add pg_static metric to store version strings as labels. -* Much more thorough testing structure. -* Move to semantic versioning for releases and docker image publications. - -## 0.0.1 / 2016-06-03 - -Initial release for publication. diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index d325872bd..000000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,3 +0,0 @@ -# Prometheus Community Code of Conduct - -Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 4d28795fc..000000000 --- a/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -ARG ARCH="amd64" -ARG OS="linux" -FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest -LABEL maintainer="The Prometheus Authors " - -ARG ARCH="amd64" -ARG OS="linux" -COPY .build/${OS}-${ARCH}/postgres_exporter /bin/postgres_exporter - -EXPOSE 9187 -USER nobody -ENTRYPOINT [ "/bin/postgres_exporter" ] diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/MAINTAINERS.md b/MAINTAINERS.md deleted file mode 100644 index a3032f51d..000000000 --- a/MAINTAINERS.md +++ /dev/null @@ -1,3 +0,0 @@ -* Ben Kochie @SuperQ -* William Rouesnel @wrouesnel -* Joe Adams @sysadmind diff --git a/Makefile b/Makefile deleted file mode 100644 index 114e3438f..000000000 --- a/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# Ensure that 'all' is the default target otherwise it will be the first target from Makefile.common. -all:: - -# Needs to be defined before including Makefile.common to auto-generate targets -DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le -DOCKER_REPO ?= prometheuscommunity - -include Makefile.common - -DOCKER_IMAGE_NAME ?= postgres-exporter diff --git a/Makefile.common b/Makefile.common deleted file mode 100644 index 4de21512f..000000000 --- a/Makefile.common +++ /dev/null @@ -1,288 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# A common Makefile that includes rules to be reused in different prometheus projects. -# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! - -# Example usage : -# Create the main Makefile in the root project directory. -# include Makefile.common -# customTarget: -# @echo ">> Running customTarget" -# - -# Ensure GOBIN is not set during build so that promu is installed to the correct path -unexport GOBIN - -GO ?= go -GOFMT ?= $(GO)fmt -FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -GOOPTS ?= -GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) -GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) - -GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) -PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') - -PROMU := $(FIRST_GOPATH)/bin/promu -pkgs = ./... - -ifeq (arm, $(GOHOSTARCH)) - GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) -else - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) -endif - -GOTEST := $(GO) test -GOTEST_DIR := -ifneq ($(CIRCLE_JOB),) -ifneq ($(shell command -v gotestsum 2> /dev/null),) - GOTEST_DIR := test-results - GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- -endif -endif - -PROMU_VERSION ?= 0.17.0 -PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz - -SKIP_GOLANGCI_LINT := -GOLANGCI_LINT := -GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v2.1.5 -GOLANGCI_FMT_OPTS ?= -# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. -# windows isn't included here because of the path separator being different. -ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) - ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64)) - # If we're in CI and there is an Actions file, that means the linter - # is being run in Actions, so we don't need to run it here. - ifneq (,$(SKIP_GOLANGCI_LINT)) - GOLANGCI_LINT := - else ifeq (,$(CIRCLE_JOB)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint - else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint - endif - endif -endif - -PREFIX ?= $(shell pwd) -BIN_DIR ?= $(shell pwd) -DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) -DOCKERFILE_PATH ?= ./Dockerfile -DOCKERBUILD_CONTEXT ?= ./ -DOCKER_REPO ?= prom - -DOCKER_ARCHS ?= amd64 - -BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) -PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) -TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) - -SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) - -ifeq ($(GOHOSTARCH),amd64) - ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) - # Only supported on amd64 - test-flags := -race - endif -endif - -# This rule is used to forward a target like "build" to "common-build". This -# allows a new "build" target to be defined in a Makefile which includes this -# one and override "common-build" without override warnings. -%: common-% ; - -.PHONY: common-all -common-all: precheck style check_license lint yamllint unused build test - -.PHONY: common-style -common-style: - @echo ">> checking code style" - @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ - if [ -n "$${fmtRes}" ]; then \ - echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ - echo "Please ensure you are using $$($(GO) version) for formatting code."; \ - exit 1; \ - fi - -.PHONY: common-check_license -common-check_license: - @echo ">> checking license header" - @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ - awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ - done); \ - if [ -n "$${licRes}" ]; then \ - echo "license header checking failed:"; echo "$${licRes}"; \ - exit 1; \ - fi - -.PHONY: common-deps -common-deps: - @echo ">> getting dependencies" - $(GO) mod download - -.PHONY: update-go-deps -update-go-deps: - @echo ">> updating Go dependencies" - @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ - $(GO) get -d $$m; \ - done - $(GO) mod tidy - -.PHONY: common-test-short -common-test-short: $(GOTEST_DIR) - @echo ">> running short tests" - $(GOTEST) -short $(GOOPTS) $(pkgs) - -.PHONY: common-test -common-test: $(GOTEST_DIR) - @echo ">> running all tests" - $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) - -$(GOTEST_DIR): - @mkdir -p $@ - -.PHONY: common-format -common-format: $(GOLANGCI_LINT) - @echo ">> formatting code" - $(GO) fmt $(pkgs) -ifdef GOLANGCI_LINT - @echo ">> formatting code with golangci-lint" - $(GOLANGCI_LINT) fmt $(GOLANGCI_FMT_OPTS) -endif - -.PHONY: common-vet -common-vet: - @echo ">> vetting code" - $(GO) vet $(GOOPTS) $(pkgs) - -.PHONY: common-lint -common-lint: $(GOLANGCI_LINT) -ifdef GOLANGCI_LINT - @echo ">> running golangci-lint" - $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) -endif - -.PHONY: common-lint-fix -common-lint-fix: $(GOLANGCI_LINT) -ifdef GOLANGCI_LINT - @echo ">> running golangci-lint fix" - $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs) -endif - -.PHONY: common-yamllint -common-yamllint: - @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell command -v yamllint 2> /dev/null)) - @echo "yamllint not installed so skipping" -else - yamllint . -endif - -# For backward-compatibility. -.PHONY: common-staticcheck -common-staticcheck: lint - -.PHONY: common-unused -common-unused: - @echo ">> running check for unused/missing packages in go.mod" - $(GO) mod tidy - @git diff --exit-code -- go.sum go.mod - -.PHONY: common-build -common-build: promu - @echo ">> building binaries" - $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) - -.PHONY: common-tarball -common-tarball: promu - @echo ">> building release tarball" - $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) - -.PHONY: common-docker-repo-name -common-docker-repo-name: - @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" - -.PHONY: common-docker $(BUILD_DOCKER_ARCHS) -common-docker: $(BUILD_DOCKER_ARCHS) -$(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ - -f $(DOCKERFILE_PATH) \ - --build-arg ARCH="$*" \ - --build-arg OS="linux" \ - $(DOCKERBUILD_CONTEXT) - -.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) -common-docker-publish: $(PUBLISH_DOCKER_ARCHS) -$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" - -DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) -.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) -common-docker-tag-latest: $(TAG_DOCKER_ARCHS) -$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" - -.PHONY: common-docker-manifest -common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" - -.PHONY: promu -promu: $(PROMU) - -$(PROMU): - $(eval PROMU_TMP := $(shell mktemp -d)) - curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) - mkdir -p $(FIRST_GOPATH)/bin - cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu - rm -r $(PROMU_TMP) - -.PHONY: common-proto -common-proto: - @echo ">> generating code from proto files" - @./scripts/genproto.sh - -ifdef GOLANGCI_LINT -$(GOLANGCI_LINT): - mkdir -p $(FIRST_GOPATH)/bin - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ - | sed -e '/install -d/d' \ - | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) -endif - -.PHONY: precheck -precheck:: - -define PRECHECK_COMMAND_template = -precheck:: $(1)_precheck - -PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) -.PHONY: $(1)_precheck -$(1)_precheck: - @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ - echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ - exit 1; \ - fi -endef - -govulncheck: install-govulncheck - govulncheck ./... - -install-govulncheck: - command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/NOTICE b/NOTICE deleted file mode 100644 index 815e05adc..000000000 --- a/NOTICE +++ /dev/null @@ -1,2 +0,0 @@ -Copyright 2018 William Rouesnel -Copyright 2021 The Prometheus Authors diff --git a/README-RDS.md b/README-RDS.md deleted file mode 100644 index 85bf8691f..000000000 --- a/README-RDS.md +++ /dev/null @@ -1,38 +0,0 @@ -# Using Postgres-Exporter with AWS:RDS - -### When using postgres-exporter with Amazon Web Services' RDS, the - rolname "rdsadmin" and datname "rdsadmin" must be excluded. - -I had success running docker container 'quay.io/prometheuscommunity/postgres-exporter:latest' -with queries.yaml as the PG_EXPORTER_EXTEND_QUERY_PATH. errors -mentioned in issue#335 appeared and I had to modify the -'pg_stat_statements' query with the following: -`WHERE t2.rolname != 'rdsadmin'` - -Running postgres-exporter in a container like so: - ``` - DBNAME='postgres' - PGUSER='postgres' - PGPASS='psqlpasswd123' - PGHOST='name.blahblah.us-east-1.rds.amazonaws.com' - docker run --rm --detach \ - --name "postgresql_exporter_rds" \ - --publish 9187:9187 \ - --volume=/etc/prometheus/postgresql-exporter/queries.yaml:/var/lib/postgresql/queries.yaml \ - -e DATA_SOURCE_NAME="postgresql://${PGUSER}:${PGPASS}@${PGHOST}:5432/${DBNAME}?sslmode=disable" \ - -e PG_EXPORTER_EXCLUDE_DATABASES=rdsadmin \ - -e PG_EXPORTER_DISABLE_DEFAULT_METRICS=true \ - -e PG_EXPORTER_DISABLE_SETTINGS_METRICS=true \ - -e PG_EXPORTER_EXTEND_QUERY_PATH='/var/lib/postgresql/queries.yaml' \ - quay.io/prometheuscommunity/postgres-exporter - ``` - -### Expected changes to RDS: -+ see stackoverflow notes - (https://stackoverflow.com/questions/43926499/amazon-postgres-rds-pg-stat-statements-not-loaded#43931885) -+ you must also use a specific RDS parameter_group that includes the following: - ``` - shared_preload_libraries = "pg_stat_statements,pg_hint_plan" - ``` -+ lastly, you must reboot the RDS instance. - diff --git a/README.md b/README.md index 01bd8b30d..1716c1c3d 100644 --- a/README.md +++ b/README.md @@ -1,447 +1,5 @@ -[![Build Status](https://circleci.com/gh/prometheus-community/postgres_exporter.svg?style=svg)](https://circleci.com/gh/prometheus-community/postgres_exporter) -[![Coverage Status](https://coveralls.io/repos/github/prometheus-community/postgres_exporter/badge.svg?branch=master)](https://coveralls.io/github/prometheus-community/postgres_exporter?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus-community/postgres_exporter)](https://goreportcard.com/report/github.com/prometheus-community/postgres_exporter) -[![Docker Pulls](https://img.shields.io/docker/pulls/prometheuscommunity/postgres-exporter.svg)](https://hub.docker.com/r/prometheuscommunity/postgres-exporter/tags) +# Assets Branch -# PostgreSQL Server Exporter +This is a branch to push various build assets to. -Prometheus exporter for PostgreSQL server metrics. - -CI Tested PostgreSQL versions: `11`, `12`, `13`, `14`, `15`, `16`, `17`. - -## Quick Start -This package is available for Docker: -``` -# Start an example database -docker run --net=host -it --rm -e POSTGRES_PASSWORD=password postgres -# Connect to it -docker run \ - --net=host \ - -e DATA_SOURCE_URI="localhost:5432/postgres?sslmode=disable" \ - -e DATA_SOURCE_USER=postgres \ - -e DATA_SOURCE_PASS=password \ - quay.io/prometheuscommunity/postgres-exporter -``` - -Test with: -```bash -curl "http://localhost:9187/metrics" -``` - -Example Prometheus config: -```yaml -scrape_configs: - - job_name: postgres - static_configs: - - targets: ["127.0.0.1:9187"] # Replace IP with the hostname of the docker container if you're running the container in a separate network -``` - -Now use the DATA_SOURCE_PASS_FILE with a mounted file containing the password to prevent having the password in an environment variable. - -The container process runs with uid/gid 65534 (important for file permissions). - -## Multi-Target Support (BETA) -**This Feature is in beta and may require changes in future releases. Feedback is welcome.** - -This exporter supports the [multi-target pattern](https://prometheus.io/docs/guides/multi-target-exporter/). This allows running a single instance of this exporter for multiple postgres targets. Using the multi-target functionality of this exporter is **optional** and meant for cases where it is impossible to install the exporter as a sidecar, for example SaaS-managed services. - -To use the multi-target functionality, send an http request to the endpoint `/probe?target=foo:5432` where target is set to the DSN of the postgres instance to scrape metrics from. - -To avoid putting sensitive information like username and password in the URL, preconfigured auth modules are supported via the [auth_modules](#auth_modules) section of the config file. auth_modules for DSNs can be used with the `/probe` endpoint by specifying the `?auth_module=foo` http parameter. - -Example Prometheus config: -```yaml -scrape_configs: - - job_name: 'postgres' - static_configs: - - targets: - - server1:5432 - - server2:5432 - metrics_path: /probe - params: - auth_module: [foo] - relabel_configs: - - source_labels: [__address__] - target_label: __param_target - - source_labels: [__param_target] - target_label: instance - - target_label: __address__ - replacement: 127.0.0.1:9116 # The postgres exporter's real hostname:port. -``` - -## Configuration File - -The configuration file controls the behavior of the exporter. It can be set using the `--config.file` command line flag and defaults to `postgres_exporter.yml`. - -### auth_modules -This section defines preset authentication and connection parameters for use in the [multi-target endpoint](#multi-target-support-beta). `auth_modules` is a map of modules with the key being the identifier which can be used in the `/probe` endpoint. -Currently only the `userpass` type is supported. - -Example: -```yaml -auth_modules: - foo1: # Set this to any name you want - type: userpass - userpass: - username: first - password: firstpass - options: - # options become key=value parameters of the DSN - sslmode: disable -``` - -## Building and running - - git clone https://github.com/prometheus-community/postgres_exporter.git - cd postgres_exporter - make build - ./postgres_exporter - -To build the Docker image: - - make promu - promu crossbuild -p linux/amd64 -p linux/armv7 -p linux/arm64 -p linux/ppc64le - make docker - -This will build the docker image as `prometheuscommunity/postgres_exporter:${branch}`. - -### Flags - -* `help` - Show context-sensitive help (also try --help-long and --help-man). - - -* `[no-]collector.database` - Enable the `database` collector (default: enabled). - -* `[no-]collector.database_wraparound` - Enable the `database_wraparound` collector (default: disabled). - -* `[no-]collector.locks` - Enable the `locks` collector (default: enabled). - -* `[no-]collector.long_running_transactions` - Enable the `long_running_transactions` collector (default: disabled). - -* `[no-]collector.postmaster` - Enable the `postmaster` collector (default: disabled). - -* `[no-]collector.process_idle` - Enable the `process_idle` collector (default: disabled). - -* `[no-]collector.replication` - Enable the `replication` collector (default: enabled). - -* `[no-]collector.replication_slot` - Enable the `replication_slot` collector (default: enabled). - -* `[no-]collector.stat_activity_autovacuum` - Enable the `stat_activity_autovacuum` collector (default: disabled). - -* `[no-]collector.stat_bgwriter` - Enable the `stat_bgwriter` collector (default: enabled). - -* `[no-]collector.stat_database` - Enable the `stat_database` collector (default: enabled). - -* `[no-]collector.stat_progress_vacuum` - Enable the `stat_progress_vacuum` collector (default: enabled). - -* `[no-]collector.stat_statements` - Enable the `stat_statements` collector (default: disabled). - -* `[no-]collector.stat_statements.include_query` - Enable selecting statement query together with queryId. (default: disabled) - -* `--collector.stat_statements.query_length` - Maximum length of the statement text. Default is 120. - -* `[no-]collector.stat_user_tables` - Enable the `stat_user_tables` collector (default: enabled). - -* `[no-]collector.stat_wal_receiver` - Enable the `stat_wal_receiver` collector (default: disabled). - -* `[no-]collector.statio_user_indexes` - Enable the `statio_user_indexes` collector (default: disabled). - -* `[no-]collector.statio_user_tables` - Enable the `statio_user_tables` collector (default: enabled). - -* `[no-]collector.wal` - Enable the `wal` collector (default: enabled). - -* `[no-]collector.xlog_location` - Enable the `xlog_location` collector (default: disabled). - -* `config.file` - Set the config file path. Default is `postgres_exporter.yml` - -* `web.systemd-socket` - Use systemd socket activation listeners instead of port listeners (Linux only). Default is `false` - -* `web.listen-address` - Address to listen on for web interface and telemetry. Default is `:9187`. - -* `web.config.file` - Configuration file to use TLS and/or basic authentication. The format of the - file is described [in the exporter-toolkit repository](https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md). - -* `web.telemetry-path` - Path under which to expose metrics. Default is `/metrics`. - -* `disable-default-metrics` - Use only metrics supplied from `queries.yaml` via `--extend.query-path`. Default is `false`. - -* `disable-settings-metrics` - Use the flag if you don't want to scrape `pg_settings`. Default is `false`. - -* `auto-discover-databases` (DEPRECATED) - Whether to discover the databases on a server dynamically. Default is `false`. - -* `extend.query-path` (DEPRECATED) - Path to a YAML file containing custom queries to run. Check out [`queries.yaml`](queries.yaml) - for examples of the format. - -* `dumpmaps` - Do not run - print the internal representation of the metric maps. Useful when debugging a custom - queries file. - -* `constantLabels` (DEPRECATED) - Labels to set in all metrics. A list of `label=value` pairs, separated by commas. - -* `version` - Show application version. - -* `exclude-databases` (DEPRECATED) - A list of databases to remove when autoDiscoverDatabases is enabled. - -* `include-databases` (DEPRECATED) - A list of databases to only include when autoDiscoverDatabases is enabled. - -* `log.level` - Set logging level: one of `debug`, `info`, `warn`, `error`. - -* `log.format` - Set the log format: one of `logfmt`, `json`. - -### Environment Variables - -The following environment variables configure the exporter: - -* `DATA_SOURCE_NAME` - the default legacy format. Accepts URI form and key=value form arguments. The - URI may contain the username and password to connect with. - -* `DATA_SOURCE_URI` - an alternative to `DATA_SOURCE_NAME` which exclusively accepts the hostname - without a username and password component. For example, `my_pg_hostname` or - `my_pg_hostname:5432/postgres?sslmode=disable`. - -* `DATA_SOURCE_URI_FILE` - The same as above but reads the URI from a file. - -* `DATA_SOURCE_USER` - When using `DATA_SOURCE_URI`, this environment variable is used to specify - the username. - -* `DATA_SOURCE_USER_FILE` - The same, but reads the username from a file. - -* `DATA_SOURCE_PASS` - When using `DATA_SOURCE_URI`, this environment variable is used to specify - the password to connect with. - -* `DATA_SOURCE_PASS_FILE` - The same as above but reads the password from a file. - -* `PG_EXPORTER_WEB_TELEMETRY_PATH` - Path under which to expose metrics. Default is `/metrics`. - -* `PG_EXPORTER_DISABLE_DEFAULT_METRICS` - Use only metrics supplied from `queries.yaml`. Value can be `true` or `false`. Default is `false`. - -* `PG_EXPORTER_DISABLE_SETTINGS_METRICS` - Use the flag if you don't want to scrape `pg_settings`. Value can be `true` or `false`. Default is `false`. - -* `PG_EXPORTER_AUTO_DISCOVER_DATABASES` (DEPRECATED) - Whether to discover the databases on a server dynamically. Value can be `true` or `false`. Default is `false`. - -* `PG_EXPORTER_EXTEND_QUERY_PATH` - Path to a YAML file containing custom queries to run. Check out [`queries.yaml`](queries.yaml) - for examples of the format. - -* `PG_EXPORTER_CONSTANT_LABELS` (DEPRECATED) - Labels to set in all metrics. A list of `label=value` pairs, separated by commas. - -* `PG_EXPORTER_EXCLUDE_DATABASES` (DEPRECATED) - A comma-separated list of databases to remove when autoDiscoverDatabases is enabled. Default is empty string. - -* `PG_EXPORTER_INCLUDE_DATABASES` (DEPRECATED) - A comma-separated list of databases to only include when autoDiscoverDatabases is enabled. Default is empty string, - means allow all. - -* `PG_EXPORTER_METRIC_PREFIX` - A prefix to use for each of the default metrics exported by postgres-exporter. Default is `pg` - -Settings set by environment variables starting with `PG_` will be overwritten by the corresponding CLI flag if given. - -### Setting the Postgres server's data source name - -The PostgreSQL server's [data source name](http://en.wikipedia.org/wiki/Data_source_name) -must be set via the `DATA_SOURCE_NAME` environment variable. - -For running it locally on a default Debian/Ubuntu install, this will work (transpose to init script as appropriate): - - sudo -u postgres DATA_SOURCE_NAME="user=postgres host=/var/run/postgresql/ sslmode=disable" postgres_exporter - -Also, you can set a list of sources to scrape different instances from the one exporter setup. Just define a comma separated string. - - sudo -u postgres DATA_SOURCE_NAME="port=5432,port=6432" postgres_exporter - -See the [github.com/lib/pq](http://github.com/lib/pq) module for other ways to format the connection string. - -### Adding new metrics - -The exporter will attempt to dynamically export additional metrics if they are added in the -future, but they will be marked as "untyped". Additional metric maps can be easily created -from Postgres documentation by copying the tables and using the following Python snippet: - -```python -x = """tab separated raw text of a documentation table""" -for l in StringIO(x): - column, ctype, description = l.split('\t') - print """"{0}" : {{ prometheus.CounterValue, prometheus.NewDesc("pg_stat_database_{0}", "{2}", nil, nil) }}, """.format(column.strip(), ctype, description.strip()) -``` -Adjust the value of the resultant prometheus value type appropriately. This helps build -rich self-documenting metrics for the exporter. - -### Adding new metrics via a config file (DEPRECATED) - -This feature is deprecated in favor of built-in collector functions. For generic SQL database monitoring see the [sql_exporter](https://github.com/burningalchemist/sql_exporter). - -The -extend.query-path command-line argument specifies a YAML file containing additional queries to run. -Some examples are provided in [queries.yaml](queries.yaml). - -### Disabling default metrics -To work with non-officially-supported postgres versions (e.g. 8.2.15), -or variants of postgres (e.g. Greenplum), you can disable the default metrics with the `--disable-default-metrics` -flag. This removes all built-in metrics, and uses only metrics defined by queries in the `queries.yaml` file you supply -(so you must supply one, otherwise the exporter will return nothing but internal statuses and not your database). - -### Automatically discover databases (DEPRECATED) -To scrape metrics from all databases on a database server, the database DSN's can be dynamically discovered via the -`--auto-discover-databases` flag. When true, `SELECT datname FROM pg_database WHERE datallowconn = true AND datistemplate = false and datname != current_database()` is run for all configured DSN's. From the -result a new set of DSN's is created for which the metrics are scraped. - -In addition, the option `--exclude-databases` adds the possibily to filter the result from the auto discovery to discard databases you do not need. - -If you want to include only subset of databases, you can use option `--include-databases`. Exporter still makes request to -`pg_database` table, but do scrape from only if database is in include list. - -### Running as non-superuser - -To be able to collect metrics from `pg_stat*` views as non-superuser in PostgreSQL -server versions >= 10 you can grant the `pg_monitor` or `pg_read_all_stats` [built-in roles](https://www.postgresql.org/docs/current/predefined-roles.html) to the user. If -you need to monitor older PostgreSQL servers, you will have to create functions -and views as a superuser, and assign permissions separately to those. - -```sql --- To use IF statements, hence to be able to check if the user exists before --- attempting creation, we need to switch to procedural SQL (PL/pgSQL) --- instead of standard SQL. --- More: https://www.postgresql.org/docs/9.3/plpgsql-overview.html --- To preserve compatibility with <9.0, DO blocks are not used; instead, --- a function is created and dropped. -CREATE OR REPLACE FUNCTION __tmp_create_user() returns void as $$ -BEGIN - IF NOT EXISTS ( - SELECT -- SELECT list can stay empty for this - FROM pg_catalog.pg_user - WHERE usename = 'postgres_exporter') THEN - CREATE USER postgres_exporter; - END IF; -END; -$$ language plpgsql; - -SELECT __tmp_create_user(); -DROP FUNCTION __tmp_create_user(); - -ALTER USER postgres_exporter WITH PASSWORD 'password'; -ALTER USER postgres_exporter SET SEARCH_PATH TO postgres_exporter,pg_catalog; - --- If deploying as non-superuser (for example in AWS RDS), uncomment the GRANT --- line below and replace with your root user. --- GRANT postgres_exporter TO ; - -GRANT CONNECT ON DATABASE postgres TO postgres_exporter; -``` - -Run following command if you use PostgreSQL versions >= 10 -```sql -GRANT pg_monitor to postgres_exporter; -``` - -Run following SQL commands only if you use PostgreSQL versions older than 10. -In PostgreSQL, views run with the permissions of the user that created them so -they can act as security barriers. Functions need to be created to share this -data with the non-superuser. Only creating the views will leave out the most -important bits of data. -```sql -CREATE SCHEMA IF NOT EXISTS postgres_exporter; -GRANT USAGE ON SCHEMA postgres_exporter TO postgres_exporter; - -CREATE OR REPLACE FUNCTION get_pg_stat_activity() RETURNS SETOF pg_stat_activity AS -$$ SELECT * FROM pg_catalog.pg_stat_activity; $$ -LANGUAGE sql -VOLATILE -SECURITY DEFINER; - -CREATE OR REPLACE VIEW postgres_exporter.pg_stat_activity -AS - SELECT * from get_pg_stat_activity(); - -GRANT SELECT ON postgres_exporter.pg_stat_activity TO postgres_exporter; - -CREATE OR REPLACE FUNCTION get_pg_stat_replication() RETURNS SETOF pg_stat_replication AS -$$ SELECT * FROM pg_catalog.pg_stat_replication; $$ -LANGUAGE sql -VOLATILE -SECURITY DEFINER; - -CREATE OR REPLACE VIEW postgres_exporter.pg_stat_replication -AS - SELECT * FROM get_pg_stat_replication(); - -GRANT SELECT ON postgres_exporter.pg_stat_replication TO postgres_exporter; - -CREATE EXTENSION IF NOT EXISTS pg_stat_statements; -CREATE OR REPLACE FUNCTION get_pg_stat_statements() RETURNS SETOF pg_stat_statements AS -$$ SELECT * FROM public.pg_stat_statements; $$ -LANGUAGE sql -VOLATILE -SECURITY DEFINER; - -CREATE OR REPLACE VIEW postgres_exporter.pg_stat_statements -AS - SELECT * FROM get_pg_stat_statements(); - -GRANT SELECT ON postgres_exporter.pg_stat_statements TO postgres_exporter; -``` - -> **NOTE** ->
Remember to use `postgres` database name in the connection string: -> ``` -> DATA_SOURCE_NAME=postgresql://postgres_exporter:password@localhost:5432/postgres?sslmode=disable -> ``` - - -## Running the tests -``` -# Run the unit tests -make test -# Start the test database with docker -docker run -p 5432:5432 -e POSTGRES_DB=circle_test -e POSTGRES_USER=postgres -e POSTGRES_PASSWORD=test -d postgres -# Run the integration tests -DATA_SOURCE_NAME='postgresql://postgres:test@localhost:5432/circle_test?sslmode=disable' GOOPTS='-v -tags integration' make test -``` +It is currently used for tracking expected metrics from the exporter. diff --git a/SECURITY.md b/SECURITY.md deleted file mode 100644 index fed02d85c..000000000 --- a/SECURITY.md +++ /dev/null @@ -1,6 +0,0 @@ -# Reporting a security issue - -The Prometheus security policy, including how to report vulnerabilities, can be -found here: - - diff --git a/VERSION b/VERSION deleted file mode 100644 index 7cca7711a..000000000 --- a/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.17.1 diff --git a/cmd/postgres_exporter/datasource.go b/cmd/postgres_exporter/datasource.go deleted file mode 100644 index 7a22e177c..000000000 --- a/cmd/postgres_exporter/datasource.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "net/url" - "os" - "regexp" - "strings" - - "github.com/prometheus/client_golang/prometheus" -) - -func (e *Exporter) discoverDatabaseDSNs() []string { - // connstring syntax is complex (and not sure if even regular). - // we don't need to parse it, so just superficially validate that it starts - // with a valid-ish keyword pair - connstringRe := regexp.MustCompile(`^ *[a-zA-Z0-9]+ *= *[^= ]+`) - - dsns := make(map[string]struct{}) - for _, dsn := range e.dsn { - var dsnURI *url.URL - var dsnConnstring string - - if strings.HasPrefix(dsn, "postgresql://") || strings.HasPrefix(dsn, "postgres://") { - var err error - dsnURI, err = url.Parse(dsn) - if err != nil { - logger.Error("Unable to parse DSN as URI", "dsn", loggableDSN(dsn), "err", err) - continue - } - } else if connstringRe.MatchString(dsn) { - dsnConnstring = dsn - } else { - logger.Error("Unable to parse DSN as either URI or connstring", "dsn", loggableDSN(dsn)) - continue - } - - server, err := e.servers.GetServer(dsn) - if err != nil { - logger.Error("Error opening connection to database", "dsn", loggableDSN(dsn), "err", err) - continue - } - dsns[dsn] = struct{}{} - - // If autoDiscoverDatabases is true, set first dsn as master database (Default: false) - server.master = true - - databaseNames, err := queryDatabases(server) - if err != nil { - logger.Error("Error querying databases", "dsn", loggableDSN(dsn), "err", err) - continue - } - for _, databaseName := range databaseNames { - if contains(e.excludeDatabases, databaseName) { - continue - } - - if len(e.includeDatabases) != 0 && !contains(e.includeDatabases, databaseName) { - continue - } - - if dsnURI != nil { - dsnURI.Path = databaseName - dsn = dsnURI.String() - } else { - // replacing one dbname with another is complicated. - // just append new dbname to override. - dsn = fmt.Sprintf("%s dbname=%s", dsnConnstring, databaseName) - } - dsns[dsn] = struct{}{} - } - } - - result := make([]string, len(dsns)) - index := 0 - for dsn := range dsns { - result[index] = dsn - index++ - } - - return result -} - -func (e *Exporter) scrapeDSN(ch chan<- prometheus.Metric, dsn string) error { - server, err := e.servers.GetServer(dsn) - - if err != nil { - return &ErrorConnectToServer{fmt.Sprintf("Error opening connection to database (%s): %s", loggableDSN(dsn), err.Error())} - } - - // Check if autoDiscoverDatabases is false, set dsn as master database (Default: false) - if !e.autoDiscoverDatabases { - server.master = true - } - - // Check if map versions need to be updated - if err := e.checkMapVersions(ch, server); err != nil { - logger.Warn("Proceeding with outdated query maps, as the Postgres version could not be determined", "err", err) - } - - return server.Scrape(ch, e.disableSettingsMetrics) -} - -// try to get the DataSource -// DATA_SOURCE_NAME always wins so we do not break older versions -// reading secrets from files wins over secrets in environment variables -// DATA_SOURCE_NAME > DATA_SOURCE_{USER|PASS}_FILE > DATA_SOURCE_{USER|PASS} -func getDataSources() ([]string, error) { - var dsn = os.Getenv("DATA_SOURCE_NAME") - if len(dsn) != 0 { - return strings.Split(dsn, ","), nil - } - - var user, pass, uri string - - dataSourceUserFile := os.Getenv("DATA_SOURCE_USER_FILE") - if len(dataSourceUserFile) != 0 { - fileContents, err := os.ReadFile(dataSourceUserFile) - if err != nil { - return nil, fmt.Errorf("failed loading data source user file %s: %s", dataSourceUserFile, err.Error()) - } - user = strings.TrimSpace(string(fileContents)) - } else { - user = os.Getenv("DATA_SOURCE_USER") - } - - dataSourcePassFile := os.Getenv("DATA_SOURCE_PASS_FILE") - if len(dataSourcePassFile) != 0 { - fileContents, err := os.ReadFile(dataSourcePassFile) - if err != nil { - return nil, fmt.Errorf("failed loading data source pass file %s: %s", dataSourcePassFile, err.Error()) - } - pass = strings.TrimSpace(string(fileContents)) - } else { - pass = os.Getenv("DATA_SOURCE_PASS") - } - - ui := url.UserPassword(user, pass).String() - dataSrouceURIFile := os.Getenv("DATA_SOURCE_URI_FILE") - if len(dataSrouceURIFile) != 0 { - fileContents, err := os.ReadFile(dataSrouceURIFile) - if err != nil { - return nil, fmt.Errorf("failed loading data source URI file %s: %s", dataSrouceURIFile, err.Error()) - } - uri = strings.TrimSpace(string(fileContents)) - } else { - uri = os.Getenv("DATA_SOURCE_URI") - } - - // No datasources found. This allows us to support the multi-target pattern - // without an explicit datasource. - if uri == "" { - return []string{}, nil - } - - dsn = "postgresql://" + ui + "@" + uri - - return []string{dsn}, nil -} diff --git a/cmd/postgres_exporter/main.go b/cmd/postgres_exporter/main.go deleted file mode 100644 index 093ddd301..000000000 --- a/cmd/postgres_exporter/main.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "net/http" - "os" - "strings" - - "github.com/alecthomas/kingpin/v2" - "github.com/prometheus-community/postgres_exporter/collector" - "github.com/prometheus-community/postgres_exporter/config" - "github.com/prometheus/client_golang/prometheus" - versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version" - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/prometheus/common/promslog" - "github.com/prometheus/common/promslog/flag" - "github.com/prometheus/common/version" - "github.com/prometheus/exporter-toolkit/web" - "github.com/prometheus/exporter-toolkit/web/kingpinflag" -) - -var ( - c = config.Handler{ - Config: &config.Config{}, - } - - configFile = kingpin.Flag("config.file", "Postgres exporter configuration file.").Default("postgres_exporter.yml").String() - webConfig = kingpinflag.AddFlags(kingpin.CommandLine, ":9187") - metricsPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").Envar("PG_EXPORTER_WEB_TELEMETRY_PATH").String() - disableDefaultMetrics = kingpin.Flag("disable-default-metrics", "Do not include default metrics.").Default("false").Envar("PG_EXPORTER_DISABLE_DEFAULT_METRICS").Bool() - disableSettingsMetrics = kingpin.Flag("disable-settings-metrics", "Do not include pg_settings metrics.").Default("false").Envar("PG_EXPORTER_DISABLE_SETTINGS_METRICS").Bool() - autoDiscoverDatabases = kingpin.Flag("auto-discover-databases", "Whether to discover the databases on a server dynamically. (DEPRECATED)").Default("false").Envar("PG_EXPORTER_AUTO_DISCOVER_DATABASES").Bool() - queriesPath = kingpin.Flag("extend.query-path", "Path to custom queries to run. (DEPRECATED)").Default("").Envar("PG_EXPORTER_EXTEND_QUERY_PATH").String() - onlyDumpMaps = kingpin.Flag("dumpmaps", "Do not run, simply dump the maps.").Bool() - constantLabelsList = kingpin.Flag("constantLabels", "A list of label=value separated by comma(,). (DEPRECATED)").Default("").Envar("PG_EXPORTER_CONSTANT_LABELS").String() - excludeDatabases = kingpin.Flag("exclude-databases", "A list of databases to remove when autoDiscoverDatabases is enabled (DEPRECATED)").Default("").Envar("PG_EXPORTER_EXCLUDE_DATABASES").String() - includeDatabases = kingpin.Flag("include-databases", "A list of databases to include when autoDiscoverDatabases is enabled (DEPRECATED)").Default("").Envar("PG_EXPORTER_INCLUDE_DATABASES").String() - metricPrefix = kingpin.Flag("metric-prefix", "A metric prefix can be used to have non-default (not \"pg\") prefixes for each of the metrics").Default("pg").Envar("PG_EXPORTER_METRIC_PREFIX").String() - logger = promslog.NewNopLogger() -) - -// Metric name parts. -const ( - // Namespace for all metrics. - namespace = "pg" - // Subsystems. - exporter = "exporter" - // The name of the exporter. - exporterName = "postgres_exporter" - // Metric label used for static string data thats handy to send to Prometheus - // e.g. version - staticLabelName = "static" - // Metric label used for server identification. - serverLabelName = "server" -) - -func main() { - kingpin.Version(version.Print(exporterName)) - promslogConfig := &promslog.Config{} - flag.AddFlags(kingpin.CommandLine, promslogConfig) - kingpin.HelpFlag.Short('h') - kingpin.Parse() - logger = promslog.New(promslogConfig) - - if *onlyDumpMaps { - dumpMaps() - return - } - - if err := c.ReloadConfig(*configFile, logger); err != nil { - // This is not fatal, but it means that auth must be provided for every dsn. - logger.Warn("Error loading config", "err", err) - } - - dsns, err := getDataSources() - if err != nil { - logger.Error("Failed reading data sources", "err", err.Error()) - os.Exit(1) - } - - excludedDatabases := strings.Split(*excludeDatabases, ",") - logger.Info("Excluded databases", "databases", fmt.Sprintf("%v", excludedDatabases)) - - if *queriesPath != "" { - logger.Warn("The extended queries.yaml config is DEPRECATED", "file", *queriesPath) - } - - if *autoDiscoverDatabases || *excludeDatabases != "" || *includeDatabases != "" { - logger.Warn("Scraping additional databases via auto discovery is DEPRECATED") - } - - if *constantLabelsList != "" { - logger.Warn("Constant labels on all metrics is DEPRECATED") - } - - opts := []ExporterOpt{ - DisableDefaultMetrics(*disableDefaultMetrics), - DisableSettingsMetrics(*disableSettingsMetrics), - AutoDiscoverDatabases(*autoDiscoverDatabases), - WithUserQueriesPath(*queriesPath), - WithConstantLabels(*constantLabelsList), - ExcludeDatabases(excludedDatabases), - IncludeDatabases(*includeDatabases), - } - - exporter := NewExporter(dsns, opts...) - defer func() { - exporter.servers.Close() - }() - - prometheus.MustRegister(versioncollector.NewCollector(exporterName)) - - prometheus.MustRegister(exporter) - - // TODO(@sysadmind): Remove this with multi-target support. We are removing multiple DSN support - dsn := "" - if len(dsns) > 0 { - dsn = dsns[0] - } - - pe, err := collector.NewPostgresCollector( - logger, - excludedDatabases, - dsn, - []string{}, - ) - if err != nil { - logger.Warn("Failed to create PostgresCollector", "err", err.Error()) - } else { - prometheus.MustRegister(pe) - } - - http.Handle(*metricsPath, promhttp.Handler()) - - if *metricsPath != "/" && *metricsPath != "" { - landingConfig := web.LandingConfig{ - Name: "Postgres Exporter", - Description: "Prometheus PostgreSQL server Exporter", - Version: version.Info(), - Links: []web.LandingLinks{ - { - Address: *metricsPath, - Text: "Metrics", - }, - }, - } - landingPage, err := web.NewLandingPage(landingConfig) - if err != nil { - logger.Error("error creating landing page", "err", err) - os.Exit(1) - } - http.Handle("/", landingPage) - } - - http.HandleFunc("/probe", handleProbe(logger, excludedDatabases)) - - srv := &http.Server{} - if err := web.ListenAndServe(srv, webConfig, logger); err != nil { - logger.Error("Error running HTTP server", "err", err) - os.Exit(1) - } -} diff --git a/cmd/postgres_exporter/namespace.go b/cmd/postgres_exporter/namespace.go deleted file mode 100644 index ac7a23739..000000000 --- a/cmd/postgres_exporter/namespace.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "database/sql" - "errors" - "fmt" - "time" - - "github.com/blang/semver/v4" - "github.com/lib/pq" - "github.com/prometheus/client_golang/prometheus" -) - -// Query within a namespace mapping and emit metrics. Returns fatal errors if -// the scrape fails, and a slice of errors if they were non-fatal. -func queryNamespaceMapping(server *Server, namespace string, mapping MetricMapNamespace) ([]prometheus.Metric, []error, error) { - // Check for a query override for this namespace - query, found := server.queryOverrides[namespace] - - // Was this query disabled (i.e. nothing sensible can be queried on cu - // version of PostgreSQL? - if query == "" && found { - // Return success (no pertinent data) - return []prometheus.Metric{}, []error{}, nil - } - - // Don't fail on a bad scrape of one metric - var rows *sql.Rows - var err error - - if !found { - // I've no idea how to avoid this properly at the moment, but this is - // an admin tool so you're not injecting SQL right? - rows, err = server.db.Query(fmt.Sprintf("SELECT * FROM %s;", namespace)) // nolint: gas - } else { - rows, err = server.db.Query(query) - } - if err != nil { - return []prometheus.Metric{}, []error{}, fmt.Errorf("Error running query on database %q: %s %v", server, namespace, err) - } - defer rows.Close() // nolint: errcheck - - var columnNames []string - columnNames, err = rows.Columns() - if err != nil { - return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving column list for: ", namespace, err)) - } - - // Make a lookup map for the column indices - var columnIdx = make(map[string]int, len(columnNames)) - for i, n := range columnNames { - columnIdx[n] = i - } - - var columnData = make([]interface{}, len(columnNames)) - var scanArgs = make([]interface{}, len(columnNames)) - for i := range columnData { - scanArgs[i] = &columnData[i] - } - - nonfatalErrors := []error{} - - metrics := make([]prometheus.Metric, 0) - - for rows.Next() { - err = rows.Scan(scanArgs...) - if err != nil { - return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err)) - } - - // Get the label values for this row. - labels := make([]string, len(mapping.labels)) - for idx, label := range mapping.labels { - labels[idx], _ = dbToString(columnData[columnIdx[label]]) - } - - // Loop over column names, and match to scan data. Unknown columns - // will be filled with an untyped metric number *if* they can be - // converted to float64s. NULLs are allowed and treated as NaN. - for idx, columnName := range columnNames { - var metric prometheus.Metric - if metricMapping, ok := mapping.columnMappings[columnName]; ok { - // Is this a metricy metric? - if metricMapping.discard { - continue - } - - if metricMapping.histogram { - var keys []float64 - err = pq.Array(&keys).Scan(columnData[idx]) - if err != nil { - return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving", columnName, "buckets:", namespace, err)) - } - - var values []int64 - valuesIdx, ok := columnIdx[columnName+"_bucket"] - if !ok { - nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Missing column: ", namespace, columnName+"_bucket"))) - continue - } - err = pq.Array(&values).Scan(columnData[valuesIdx]) - if err != nil { - return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving", columnName, "bucket values:", namespace, err)) - } - - buckets := make(map[float64]uint64, len(keys)) - for i, key := range keys { - if i >= len(values) { - break - } - buckets[key] = uint64(values[i]) - } - - idx, ok = columnIdx[columnName+"_sum"] - if !ok { - nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Missing column: ", namespace, columnName+"_sum"))) - continue - } - sum, ok := dbToFloat64(columnData[idx]) - if !ok { - nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName+"_sum", columnData[idx]))) - continue - } - - idx, ok = columnIdx[columnName+"_count"] - if !ok { - nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Missing column: ", namespace, columnName+"_count"))) - continue - } - count, ok := dbToUint64(columnData[idx]) - if !ok { - nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName+"_count", columnData[idx]))) - continue - } - - metric = prometheus.MustNewConstHistogram( - metricMapping.desc, - count, sum, buckets, - labels..., - ) - } else { - value, ok := dbToFloat64(columnData[idx]) - if !ok { - nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName, columnData[idx]))) - continue - } - // Generate the metric - metric = prometheus.MustNewConstMetric(metricMapping.desc, metricMapping.vtype, value, labels...) - } - } else { - // Unknown metric. Report as untyped if scan to float64 works, else note an error too. - metricLabel := fmt.Sprintf("%s_%s", namespace, columnName) - desc := prometheus.NewDesc(metricLabel, fmt.Sprintf("Unknown metric from %s", namespace), mapping.labels, server.labels) - - // Its not an error to fail here, since the values are - // unexpected anyway. - value, ok := dbToFloat64(columnData[idx]) - if !ok { - nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unparseable column type - discarding: ", namespace, columnName, err))) - continue - } - metric = prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, value, labels...) - } - metrics = append(metrics, metric) - } - } - return metrics, nonfatalErrors, nil -} - -// Iterate through all the namespace mappings in the exporter and run their -// queries. -func queryNamespaceMappings(ch chan<- prometheus.Metric, server *Server) map[string]error { - // Return a map of namespace -> errors - namespaceErrors := make(map[string]error) - - scrapeStart := time.Now() - - for namespace, mapping := range server.metricMap { - logger.Debug("Querying namespace", "namespace", namespace) - - if mapping.master && !server.master { - logger.Debug("Query skipped...") - continue - } - - // check if the query is to be run on specific database server version range or not - if len(server.runonserver) > 0 { - serVersion, _ := semver.Parse(server.lastMapVersion.String()) - runServerRange, _ := semver.ParseRange(server.runonserver) - if !runServerRange(serVersion) { - logger.Debug("Query skipped for this database version", "version", server.lastMapVersion.String(), "target_version", server.runonserver) - continue - } - } - - scrapeMetric := false - // Check if the metric is cached - server.cacheMtx.Lock() - cachedMetric, found := server.metricCache[namespace] - server.cacheMtx.Unlock() - // If found, check if needs refresh from cache - if found { - if scrapeStart.Sub(cachedMetric.lastScrape).Seconds() > float64(mapping.cacheSeconds) { - scrapeMetric = true - } - } else { - scrapeMetric = true - } - - var metrics []prometheus.Metric - var nonFatalErrors []error - var err error - if scrapeMetric { - metrics, nonFatalErrors, err = queryNamespaceMapping(server, namespace, mapping) - } else { - metrics = cachedMetric.metrics - } - - // Serious error - a namespace disappeared - if err != nil { - namespaceErrors[namespace] = err - logger.Info("error finding namespace", "err", err) - } - // Non-serious errors - likely version or parsing problems. - if len(nonFatalErrors) > 0 { - for _, err := range nonFatalErrors { - logger.Info("error querying namespace", "err", err) - } - } - - // Emit the metrics into the channel - for _, metric := range metrics { - ch <- metric - } - - if scrapeMetric { - // Only cache if metric is meaningfully cacheable - if mapping.cacheSeconds > 0 { - server.cacheMtx.Lock() - server.metricCache[namespace] = cachedMetrics{ - metrics: metrics, - lastScrape: scrapeStart, - } - server.cacheMtx.Unlock() - } - } - } - - return namespaceErrors -} diff --git a/cmd/postgres_exporter/pg_setting.go b/cmd/postgres_exporter/pg_setting.go deleted file mode 100644 index 5b13e160f..000000000 --- a/cmd/postgres_exporter/pg_setting.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "math" - "strconv" - "strings" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - settingUnits = []string{ - "ms", "s", "min", "h", "d", - "B", "kB", "MB", "GB", "TB", - } -) - -// Query the pg_settings view containing runtime variables -func querySettings(ch chan<- prometheus.Metric, server *Server) error { - logger.Debug("Querying pg_setting view", "server", server) - - // pg_settings docs: https://www.postgresql.org/docs/current/static/view-pg-settings.html - // - // NOTE: If you add more vartypes here, you must update the supported - // types in normaliseUnit() below - query := "SELECT name, setting, COALESCE(unit, ''), short_desc, vartype FROM pg_settings WHERE vartype IN ('bool', 'integer', 'real') AND name != 'sync_commit_cancel_wait';" - - rows, err := server.db.Query(query) - if err != nil { - return fmt.Errorf("Error running query on database %q: %s %v", server, namespace, err) - } - defer rows.Close() // nolint: errcheck - - for rows.Next() { - s := &pgSetting{} - err = rows.Scan(&s.name, &s.setting, &s.unit, &s.shortDesc, &s.vartype) - if err != nil { - return fmt.Errorf("Error retrieving rows on %q: %s %v", server, namespace, err) - } - - ch <- s.metric(server.labels) - } - - return nil -} - -// pgSetting is represents a PostgreSQL runtime variable as returned by the -// pg_settings view. -type pgSetting struct { - name, setting, unit, shortDesc, vartype string -} - -func (s *pgSetting) metric(labels prometheus.Labels) prometheus.Metric { - var ( - err error - name = strings.ReplaceAll(strings.ReplaceAll(s.name, ".", "_"), "-", "_") - unit = s.unit // nolint: ineffassign - shortDesc = fmt.Sprintf("Server Parameter: %s", s.name) - subsystem = "settings" - val float64 - ) - - switch s.vartype { - case "bool": - if s.setting == "on" { - val = 1 - } - case "integer", "real": - if val, unit, err = s.normaliseUnit(); err != nil { - // Panic, since we should recognise all units - // and don't want to silently exlude metrics - panic(err) - } - - if len(unit) > 0 { - name = fmt.Sprintf("%s_%s", name, unit) - shortDesc = fmt.Sprintf("%s [Units converted to %s.]", shortDesc, unit) - } - default: - // Panic because we got a type we didn't ask for - panic(fmt.Sprintf("Unsupported vartype %q", s.vartype)) - } - - desc := newDesc(subsystem, name, shortDesc, labels) - return prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, val) -} - -// Removes units from any of the setting values. -// This is mostly because of a irregularity regarding AWS RDS Aurora -// https://github.com/prometheus-community/postgres_exporter/issues/619 -func (s *pgSetting) sanitizeValue() { - for _, unit := range settingUnits { - if strings.HasSuffix(s.setting, unit) { - endPos := len(s.setting) - len(unit) - 1 - s.setting = s.setting[:endPos] - return - } - } -} - -// TODO: fix linter override -// nolint: nakedret -func (s *pgSetting) normaliseUnit() (val float64, unit string, err error) { - s.sanitizeValue() - - val, err = strconv.ParseFloat(s.setting, 64) - if err != nil { - return val, unit, fmt.Errorf("Error converting setting %q value %q to float: %s", s.name, s.setting, err) - } - - // Units defined in: https://www.postgresql.org/docs/current/static/config-setting.html - switch s.unit { - case "": - return - case "ms", "s", "min", "h", "d": - unit = "seconds" - case "B", "kB", "MB", "GB", "TB", "1kB", "2kB", "4kB", "8kB", "16kB", "32kB", "64kB", "16MB", "32MB", "64MB": - unit = "bytes" - default: - err = fmt.Errorf("unknown unit for runtime variable: %q", s.unit) - return - } - - // -1 is special, don't modify the value - if val == -1 { - return - } - - switch s.unit { - case "ms": - val /= 1000 - case "min": - val *= 60 - case "h": - val *= 60 * 60 - case "d": - val *= 60 * 60 * 24 - case "kB": - val *= math.Pow(2, 10) - case "MB": - val *= math.Pow(2, 20) - case "GB": - val *= math.Pow(2, 30) - case "TB": - val *= math.Pow(2, 40) - case "1kB": - val *= math.Pow(2, 10) - case "2kB": - val *= math.Pow(2, 11) - case "4kB": - val *= math.Pow(2, 12) - case "8kB": - val *= math.Pow(2, 13) - case "16kB": - val *= math.Pow(2, 14) - case "32kB": - val *= math.Pow(2, 15) - case "64kB": - val *= math.Pow(2, 16) - case "16MB": - val *= math.Pow(2, 24) - case "32MB": - val *= math.Pow(2, 25) - case "64MB": - val *= math.Pow(2, 26) - } - - return -} diff --git a/cmd/postgres_exporter/pg_setting_test.go b/cmd/postgres_exporter/pg_setting_test.go deleted file mode 100644 index 6923da630..000000000 --- a/cmd/postgres_exporter/pg_setting_test.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !integration -// +build !integration - -package main - -import ( - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - . "gopkg.in/check.v1" -) - -type PgSettingSuite struct{} - -var _ = Suite(&PgSettingSuite{}) - -var fixtures = []fixture{ - { - p: pgSetting{ - name: "seconds_fixture_metric", - setting: "5", - unit: "s", - shortDesc: "Foo foo foo", - vartype: "integer", - }, - n: normalised{ - val: 5, - unit: "seconds", - err: "", - }, - d: `Desc{fqName: "pg_settings_seconds_fixture_metric_seconds", help: "Server Parameter: seconds_fixture_metric [Units converted to seconds.]", constLabels: {}, variableLabels: {}}`, - v: 5, - }, - { - p: pgSetting{ - name: "milliseconds_fixture_metric", - setting: "5000", - unit: "ms", - shortDesc: "Foo foo foo", - vartype: "integer", - }, - n: normalised{ - val: 5, - unit: "seconds", - err: "", - }, - d: `Desc{fqName: "pg_settings_milliseconds_fixture_metric_seconds", help: "Server Parameter: milliseconds_fixture_metric [Units converted to seconds.]", constLabels: {}, variableLabels: {}}`, - v: 5, - }, - { - p: pgSetting{ - name: "eight_kb_fixture_metric", - setting: "17", - unit: "8kB", - shortDesc: "Foo foo foo", - vartype: "integer", - }, - n: normalised{ - val: 139264, - unit: "bytes", - err: "", - }, - d: `Desc{fqName: "pg_settings_eight_kb_fixture_metric_bytes", help: "Server Parameter: eight_kb_fixture_metric [Units converted to bytes.]", constLabels: {}, variableLabels: {}}`, - v: 139264, - }, - { - p: pgSetting{ - name: "16_kb_real_fixture_metric", - setting: "3.0", - unit: "16kB", - shortDesc: "Foo foo foo", - vartype: "real", - }, - n: normalised{ - val: 49152, - unit: "bytes", - err: "", - }, - d: `Desc{fqName: "pg_settings_16_kb_real_fixture_metric_bytes", help: "Server Parameter: 16_kb_real_fixture_metric [Units converted to bytes.]", constLabels: {}, variableLabels: {}}`, - v: 49152, - }, - { - p: pgSetting{ - name: "16_mb_real_fixture_metric", - setting: "3.0", - unit: "16MB", - shortDesc: "Foo foo foo", - vartype: "real", - }, - n: normalised{ - val: 5.0331648e+07, - unit: "bytes", - err: "", - }, - d: `Desc{fqName: "pg_settings_16_mb_real_fixture_metric_bytes", help: "Server Parameter: 16_mb_real_fixture_metric [Units converted to bytes.]", constLabels: {}, variableLabels: {}}`, - v: 5.0331648e+07, - }, - { - p: pgSetting{ - name: "32_mb_real_fixture_metric", - setting: "3.0", - unit: "32MB", - shortDesc: "Foo foo foo", - vartype: "real", - }, - n: normalised{ - val: 1.00663296e+08, - unit: "bytes", - err: "", - }, - d: `Desc{fqName: "pg_settings_32_mb_real_fixture_metric_bytes", help: "Server Parameter: 32_mb_real_fixture_metric [Units converted to bytes.]", constLabels: {}, variableLabels: {}}`, - v: 1.00663296e+08, - }, - { - p: pgSetting{ - name: "64_mb_real_fixture_metric", - setting: "3.0", - unit: "64MB", - shortDesc: "Foo foo foo", - vartype: "real", - }, - n: normalised{ - val: 2.01326592e+08, - unit: "bytes", - err: "", - }, - d: `Desc{fqName: "pg_settings_64_mb_real_fixture_metric_bytes", help: "Server Parameter: 64_mb_real_fixture_metric [Units converted to bytes.]", constLabels: {}, variableLabels: {}}`, - v: 2.01326592e+08, - }, - { - p: pgSetting{ - name: "bool_on_fixture_metric", - setting: "on", - unit: "", - shortDesc: "Foo foo foo", - vartype: "bool", - }, - n: normalised{ - val: 1, - unit: "", - err: "", - }, - d: `Desc{fqName: "pg_settings_bool_on_fixture_metric", help: "Server Parameter: bool_on_fixture_metric", constLabels: {}, variableLabels: {}}`, - v: 1, - }, - { - p: pgSetting{ - name: "bool_off_fixture_metric", - setting: "off", - unit: "", - shortDesc: "Foo foo foo", - vartype: "bool", - }, - n: normalised{ - val: 0, - unit: "", - err: "", - }, - d: `Desc{fqName: "pg_settings_bool_off_fixture_metric", help: "Server Parameter: bool_off_fixture_metric", constLabels: {}, variableLabels: {}}`, - v: 0, - }, - { - p: pgSetting{ - name: "special_minus_one_value", - setting: "-1", - unit: "d", - shortDesc: "foo foo foo", - vartype: "integer", - }, - n: normalised{ - val: -1, - unit: "seconds", - err: "", - }, - d: `Desc{fqName: "pg_settings_special_minus_one_value_seconds", help: "Server Parameter: special_minus_one_value [Units converted to seconds.]", constLabels: {}, variableLabels: {}}`, - v: -1, - }, - { - p: pgSetting{ - name: "rds.rds_superuser_reserved_connections", - setting: "2", - unit: "", - shortDesc: "Sets the number of connection slots reserved for rds_superusers.", - vartype: "integer", - }, - n: normalised{ - val: 2, - unit: "", - err: "", - }, - d: `Desc{fqName: "pg_settings_rds_rds_superuser_reserved_connections", help: "Server Parameter: rds.rds_superuser_reserved_connections", constLabels: {}, variableLabels: {}}`, - v: 2, - }, - { - p: pgSetting{ - name: "unknown_unit", - setting: "10", - unit: "nonexistent", - shortDesc: "foo foo foo", - vartype: "integer", - }, - n: normalised{ - val: 10, - unit: "", - err: `unknown unit for runtime variable: "nonexistent"`, - }, - }, -} - -func (s *PgSettingSuite) TestNormaliseUnit(c *C) { - for _, f := range fixtures { - switch f.p.vartype { - case "integer", "real": - val, unit, err := f.p.normaliseUnit() - - c.Check(val, Equals, f.n.val) - c.Check(unit, Equals, f.n.unit) - - if err == nil { - c.Check("", Equals, f.n.err) - } else { - c.Check(err.Error(), Equals, f.n.err) - } - } - } -} - -func (s *PgSettingSuite) TestMetric(c *C) { - defer func() { - if r := recover(); r != nil { - if r.(error).Error() != `unknown unit for runtime variable: "nonexistent"` { - panic(r) - } - } - }() - - for _, f := range fixtures { - d := &dto.Metric{} - m := f.p.metric(prometheus.Labels{}) - m.Write(d) // nolint: errcheck - - c.Check(m.Desc().String(), Equals, f.d) - c.Check(d.GetGauge().GetValue(), Equals, f.v) - } -} - -type normalised struct { - val float64 - unit string - err string -} - -type fixture struct { - p pgSetting - n normalised - d string - v float64 -} diff --git a/cmd/postgres_exporter/postgres_exporter.go b/cmd/postgres_exporter/postgres_exporter.go deleted file mode 100644 index a76479611..000000000 --- a/cmd/postgres_exporter/postgres_exporter.go +++ /dev/null @@ -1,705 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "crypto/sha256" - "database/sql" - "errors" - "fmt" - "math" - "os" - "regexp" - "strings" - "time" - - "github.com/blang/semver/v4" - "github.com/prometheus/client_golang/prometheus" -) - -// ColumnUsage should be one of several enum values which describe how a -// queried row is to be converted to a Prometheus metric. -type ColumnUsage int - -const ( - // DISCARD ignores a column - DISCARD ColumnUsage = iota - // LABEL identifies a column as a label - LABEL ColumnUsage = iota - // COUNTER identifies a column as a counter - COUNTER ColumnUsage = iota - // GAUGE identifies a column as a gauge - GAUGE ColumnUsage = iota - // MAPPEDMETRIC identifies a column as a mapping of text values - MAPPEDMETRIC ColumnUsage = iota - // DURATION identifies a column as a text duration (and converted to milliseconds) - DURATION ColumnUsage = iota - // HISTOGRAM identifies a column as a histogram - HISTOGRAM ColumnUsage = iota -) - -// UnmarshalYAML implements the yaml.Unmarshaller interface. -func (cu *ColumnUsage) UnmarshalYAML(unmarshal func(interface{}) error) error { - var value string - if err := unmarshal(&value); err != nil { - return err - } - - columnUsage, err := stringToColumnUsage(value) - if err != nil { - return err - } - - *cu = columnUsage - return nil -} - -// MappingOptions is a copy of ColumnMapping used only for parsing -type MappingOptions struct { - Usage string `yaml:"usage"` - Description string `yaml:"description"` - Mapping map[string]float64 `yaml:"metric_mapping"` // Optional column mapping for MAPPEDMETRIC - SupportedVersions semver.Range `yaml:"pg_version"` // Semantic version ranges which are supported. Unsupported columns are not queried (internally converted to DISCARD). -} - -// Mapping represents a set of MappingOptions -type Mapping map[string]MappingOptions - -// Regex used to get the "short-version" from the postgres version field. -var versionRegex = regexp.MustCompile(`^\w+ ((\d+)(\.\d+)?(\.\d+)?)`) -var lowestSupportedVersion = semver.MustParse("9.1.0") - -// Parses the version of postgres into the short version string we can use to -// match behaviors. -func parseVersion(versionString string) (semver.Version, error) { - submatches := versionRegex.FindStringSubmatch(versionString) - if len(submatches) > 1 { - return semver.ParseTolerant(submatches[1]) - } - return semver.Version{}, - errors.New(fmt.Sprintln("Could not find a postgres version in string:", versionString)) -} - -// ColumnMapping is the user-friendly representation of a prometheus descriptor map -type ColumnMapping struct { - usage ColumnUsage `yaml:"usage"` - description string `yaml:"description"` - mapping map[string]float64 `yaml:"metric_mapping"` // Optional column mapping for MAPPEDMETRIC - supportedVersions semver.Range `yaml:"pg_version"` // Semantic version ranges which are supported. Unsupported columns are not queried (internally converted to DISCARD). -} - -// UnmarshalYAML implements yaml.Unmarshaller -func (cm *ColumnMapping) UnmarshalYAML(unmarshal func(interface{}) error) error { - type plain ColumnMapping - return unmarshal((*plain)(cm)) -} - -// intermediateMetricMap holds the partially loaded metric map parsing. -// This is mainly so we can parse cacheSeconds around. -type intermediateMetricMap struct { - columnMappings map[string]ColumnMapping - master bool - cacheSeconds uint64 -} - -// MetricMapNamespace groups metric maps under a shared set of labels. -type MetricMapNamespace struct { - labels []string // Label names for this namespace - columnMappings map[string]MetricMap // Column mappings in this namespace - master bool // Call query only for master database - cacheSeconds uint64 // Number of seconds this metric namespace can be cached. 0 disables. -} - -// MetricMap stores the prometheus metric description which a given column will -// be mapped to by the collector -type MetricMap struct { - discard bool // Should metric be discarded during mapping? - histogram bool // Should metric be treated as a histogram? - vtype prometheus.ValueType // Prometheus valuetype - desc *prometheus.Desc // Prometheus descriptor - conversion func(interface{}) (float64, bool) // Conversion function to turn PG result into float64 -} - -// ErrorConnectToServer is a connection to PgSQL server error -type ErrorConnectToServer struct { - Msg string -} - -// Error returns error -func (e *ErrorConnectToServer) Error() string { - return e.Msg -} - -// TODO: revisit this with the semver system -func dumpMaps() { - // TODO: make this function part of the exporter - for name, cmap := range builtinMetricMaps { - query, ok := queryOverrides[name] - if !ok { - fmt.Println(name) - } else { - for _, queryOverride := range query { - fmt.Println(name, queryOverride.versionRange, queryOverride.query) - } - } - - for column, details := range cmap.columnMappings { - fmt.Printf(" %-40s %v\n", column, details) - } - fmt.Println() - } -} - -var builtinMetricMaps = map[string]intermediateMetricMap{ - "pg_stat_database_conflicts": { - map[string]ColumnMapping{ - "datid": {LABEL, "OID of a database", nil, nil}, - "datname": {LABEL, "Name of this database", nil, nil}, - "confl_tablespace": {COUNTER, "Number of queries in this database that have been canceled due to dropped tablespaces", nil, nil}, - "confl_lock": {COUNTER, "Number of queries in this database that have been canceled due to lock timeouts", nil, nil}, - "confl_snapshot": {COUNTER, "Number of queries in this database that have been canceled due to old snapshots", nil, nil}, - "confl_bufferpin": {COUNTER, "Number of queries in this database that have been canceled due to pinned buffers", nil, nil}, - "confl_deadlock": {COUNTER, "Number of queries in this database that have been canceled due to deadlocks", nil, nil}, - }, - true, - 0, - }, - "pg_stat_replication": { - map[string]ColumnMapping{ - "procpid": {DISCARD, "Process ID of a WAL sender process", nil, semver.MustParseRange("<9.2.0")}, - "pid": {DISCARD, "Process ID of a WAL sender process", nil, semver.MustParseRange(">=9.2.0")}, - "usesysid": {DISCARD, "OID of the user logged into this WAL sender process", nil, nil}, - "usename": {DISCARD, "Name of the user logged into this WAL sender process", nil, nil}, - "application_name": {LABEL, "Name of the application that is connected to this WAL sender", nil, nil}, - "client_addr": {LABEL, "IP address of the client connected to this WAL sender. If this field is null, it indicates that the client is connected via a Unix socket on the server machine.", nil, nil}, - "client_hostname": {DISCARD, "Host name of the connected client, as reported by a reverse DNS lookup of client_addr. This field will only be non-null for IP connections, and only when log_hostname is enabled.", nil, nil}, - "client_port": {DISCARD, "TCP port number that the client is using for communication with this WAL sender, or -1 if a Unix socket is used", nil, nil}, - "backend_start": {DISCARD, "with time zone Time when this process was started, i.e., when the client connected to this WAL sender", nil, nil}, - "backend_xmin": {DISCARD, "The current backend's xmin horizon.", nil, nil}, - "state": {LABEL, "Current WAL sender state", nil, nil}, - "sent_location": {DISCARD, "Last transaction log position sent on this connection", nil, semver.MustParseRange("<10.0.0")}, - "write_location": {DISCARD, "Last transaction log position written to disk by this standby server", nil, semver.MustParseRange("<10.0.0")}, - "flush_location": {DISCARD, "Last transaction log position flushed to disk by this standby server", nil, semver.MustParseRange("<10.0.0")}, - "replay_location": {DISCARD, "Last transaction log position replayed into the database on this standby server", nil, semver.MustParseRange("<10.0.0")}, - "sent_lsn": {DISCARD, "Last transaction log position sent on this connection", nil, semver.MustParseRange(">=10.0.0")}, - "write_lsn": {DISCARD, "Last transaction log position written to disk by this standby server", nil, semver.MustParseRange(">=10.0.0")}, - "flush_lsn": {DISCARD, "Last transaction log position flushed to disk by this standby server", nil, semver.MustParseRange(">=10.0.0")}, - "replay_lsn": {DISCARD, "Last transaction log position replayed into the database on this standby server", nil, semver.MustParseRange(">=10.0.0")}, - "sync_priority": {DISCARD, "Priority of this standby server for being chosen as the synchronous standby", nil, nil}, - "sync_state": {DISCARD, "Synchronous state of this standby server", nil, nil}, - "slot_name": {LABEL, "A unique, cluster-wide identifier for the replication slot", nil, semver.MustParseRange(">=9.2.0")}, - "plugin": {DISCARD, "The base name of the shared object containing the output plugin this logical slot is using, or null for physical slots", nil, nil}, - "slot_type": {DISCARD, "The slot type - physical or logical", nil, nil}, - "datoid": {DISCARD, "The OID of the database this slot is associated with, or null. Only logical slots have an associated database", nil, nil}, - "database": {DISCARD, "The name of the database this slot is associated with, or null. Only logical slots have an associated database", nil, nil}, - "active": {DISCARD, "True if this slot is currently actively being used", nil, nil}, - "active_pid": {DISCARD, "Process ID of a WAL sender process", nil, nil}, - "xmin": {DISCARD, "The oldest transaction that this slot needs the database to retain. VACUUM cannot remove tuples deleted by any later transaction", nil, nil}, - "catalog_xmin": {DISCARD, "The oldest transaction affecting the system catalogs that this slot needs the database to retain. VACUUM cannot remove catalog tuples deleted by any later transaction", nil, nil}, - "restart_lsn": {DISCARD, "The address (LSN) of oldest WAL which still might be required by the consumer of this slot and thus won't be automatically removed during checkpoints", nil, nil}, - "pg_current_xlog_location": {DISCARD, "pg_current_xlog_location", nil, nil}, - "pg_current_wal_lsn": {DISCARD, "pg_current_xlog_location", nil, semver.MustParseRange(">=10.0.0")}, - "pg_current_wal_lsn_bytes": {GAUGE, "WAL position in bytes", nil, semver.MustParseRange(">=10.0.0")}, - "pg_xlog_location_diff": {GAUGE, "Lag in bytes between master and slave", nil, semver.MustParseRange(">=9.2.0 <10.0.0")}, - "pg_wal_lsn_diff": {GAUGE, "Lag in bytes between master and slave", nil, semver.MustParseRange(">=10.0.0")}, - "confirmed_flush_lsn": {DISCARD, "LSN position a consumer of a slot has confirmed flushing the data received", nil, nil}, - "write_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it (but not yet flushed it or applied it). This can be used to gauge the delay that synchronous_commit level remote_write incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")}, - "flush_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it (but not yet applied it). This can be used to gauge the delay that synchronous_commit level remote_flush incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")}, - "replay_lag": {DISCARD, "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it. This can be used to gauge the delay that synchronous_commit level remote_apply incurred while committing if this server was configured as a synchronous standby.", nil, semver.MustParseRange(">=10.0.0")}, - }, - true, - 0, - }, - "pg_replication_slots": { - map[string]ColumnMapping{ - "slot_name": {LABEL, "Name of the replication slot", nil, nil}, - "database": {LABEL, "Name of the database", nil, nil}, - "active": {GAUGE, "Flag indicating if the slot is active", nil, nil}, - "pg_wal_lsn_diff": {GAUGE, "Replication lag in bytes", nil, nil}, - }, - true, - 0, - }, - "pg_stat_archiver": { - map[string]ColumnMapping{ - "archived_count": {COUNTER, "Number of WAL files that have been successfully archived", nil, nil}, - "last_archived_wal": {DISCARD, "Name of the last WAL file successfully archived", nil, nil}, - "last_archived_time": {DISCARD, "Time of the last successful archive operation", nil, nil}, - "failed_count": {COUNTER, "Number of failed attempts for archiving WAL files", nil, nil}, - "last_failed_wal": {DISCARD, "Name of the WAL file of the last failed archival operation", nil, nil}, - "last_failed_time": {DISCARD, "Time of the last failed archival operation", nil, nil}, - "stats_reset": {DISCARD, "Time at which these statistics were last reset", nil, nil}, - "last_archive_age": {GAUGE, "Time in seconds since last WAL segment was successfully archived", nil, nil}, - }, - true, - 0, - }, - "pg_stat_activity": { - map[string]ColumnMapping{ - "datname": {LABEL, "Name of this database", nil, nil}, - "state": {LABEL, "connection state", nil, semver.MustParseRange(">=9.2.0")}, - "usename": {LABEL, "connection usename", nil, nil}, - "application_name": {LABEL, "connection application_name", nil, nil}, - "backend_type": {LABEL, "connection backend_type", nil, nil}, - "wait_event_type": {LABEL, "connection wait_event_type", nil, nil}, - "wait_event": {LABEL, "connection wait_event", nil, nil}, - "count": {GAUGE, "number of connections in this state", nil, nil}, - "max_tx_duration": {GAUGE, "max duration in seconds any active transaction has been running", nil, nil}, - }, - true, - 0, - }, -} - -// Turn the MetricMap column mapping into a prometheus descriptor mapping. -func makeDescMap(pgVersion semver.Version, serverLabels prometheus.Labels, metricMaps map[string]intermediateMetricMap) map[string]MetricMapNamespace { - var metricMap = make(map[string]MetricMapNamespace) - - for namespace, intermediateMappings := range metricMaps { - thisMap := make(map[string]MetricMap) - - namespace = strings.Replace(namespace, "pg", *metricPrefix, 1) - - // Get the constant labels - var variableLabels []string - for columnName, columnMapping := range intermediateMappings.columnMappings { - if columnMapping.usage == LABEL { - variableLabels = append(variableLabels, columnName) - } - } - - for columnName, columnMapping := range intermediateMappings.columnMappings { - // Check column version compatibility for the current map - // Force to discard if not compatible. - if columnMapping.supportedVersions != nil { - if !columnMapping.supportedVersions(pgVersion) { - // It's very useful to be able to see what columns are being - // rejected. - logger.Debug("Column is being forced to discard due to version incompatibility", "column", columnName) - thisMap[columnName] = MetricMap{ - discard: true, - conversion: func(_ interface{}) (float64, bool) { - return math.NaN(), true - }, - } - continue - } - } - - // Determine how to convert the column based on its usage. - // nolint: dupl - switch columnMapping.usage { - case DISCARD, LABEL: - thisMap[columnName] = MetricMap{ - discard: true, - conversion: func(_ interface{}) (float64, bool) { - return math.NaN(), true - }, - } - case COUNTER: - thisMap[columnName] = MetricMap{ - vtype: prometheus.CounterValue, - desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels), - conversion: func(in interface{}) (float64, bool) { - return dbToFloat64(in) - }, - } - case GAUGE: - thisMap[columnName] = MetricMap{ - vtype: prometheus.GaugeValue, - desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels), - conversion: func(in interface{}) (float64, bool) { - return dbToFloat64(in) - }, - } - case HISTOGRAM: - thisMap[columnName] = MetricMap{ - histogram: true, - vtype: prometheus.UntypedValue, - desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels), - conversion: func(in interface{}) (float64, bool) { - return dbToFloat64(in) - }, - } - thisMap[columnName+"_bucket"] = MetricMap{ - histogram: true, - discard: true, - } - thisMap[columnName+"_sum"] = MetricMap{ - histogram: true, - discard: true, - } - thisMap[columnName+"_count"] = MetricMap{ - histogram: true, - discard: true, - } - case MAPPEDMETRIC: - thisMap[columnName] = MetricMap{ - vtype: prometheus.GaugeValue, - desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, variableLabels, serverLabels), - conversion: func(in interface{}) (float64, bool) { - text, ok := in.(string) - if !ok { - return math.NaN(), false - } - - val, ok := columnMapping.mapping[text] - if !ok { - return math.NaN(), false - } - return val, true - }, - } - case DURATION: - thisMap[columnName] = MetricMap{ - vtype: prometheus.GaugeValue, - desc: prometheus.NewDesc(fmt.Sprintf("%s_%s_milliseconds", namespace, columnName), columnMapping.description, variableLabels, serverLabels), - conversion: func(in interface{}) (float64, bool) { - var durationString string - switch t := in.(type) { - case []byte: - durationString = string(t) - case string: - durationString = t - default: - logger.Error("Duration conversion metric was not a string") - return math.NaN(), false - } - - if durationString == "-1" { - return math.NaN(), false - } - - d, err := time.ParseDuration(durationString) - if err != nil { - logger.Error("Failed converting result to metric", "column", columnName, "in", in, "err", err) - return math.NaN(), false - } - return float64(d / time.Millisecond), true - }, - } - } - } - - metricMap[namespace] = MetricMapNamespace{variableLabels, thisMap, intermediateMappings.master, intermediateMappings.cacheSeconds} - } - - return metricMap -} - -type cachedMetrics struct { - metrics []prometheus.Metric - lastScrape time.Time -} - -// Exporter collects Postgres metrics. It implements prometheus.Collector. -type Exporter struct { - // Holds a reference to the build in column mappings. Currently this is for testing purposes - // only, since it just points to the global. - builtinMetricMaps map[string]intermediateMetricMap - - disableDefaultMetrics, disableSettingsMetrics, autoDiscoverDatabases bool - - excludeDatabases []string - includeDatabases []string - dsn []string - userQueriesPath string - constantLabels prometheus.Labels - duration prometheus.Gauge - error prometheus.Gauge - psqlUp prometheus.Gauge - userQueriesError *prometheus.GaugeVec - totalScrapes prometheus.Counter - - // servers are used to allow re-using the DB connection between scrapes. - // servers contains metrics map and query overrides. - servers *Servers -} - -// ExporterOpt configures Exporter. -type ExporterOpt func(*Exporter) - -// DisableDefaultMetrics configures default metrics export. -func DisableDefaultMetrics(b bool) ExporterOpt { - return func(e *Exporter) { - e.disableDefaultMetrics = b - } -} - -// DisableSettingsMetrics configures pg_settings export. -func DisableSettingsMetrics(b bool) ExporterOpt { - return func(e *Exporter) { - e.disableSettingsMetrics = b - } -} - -// AutoDiscoverDatabases allows scraping all databases on a database server. -func AutoDiscoverDatabases(b bool) ExporterOpt { - return func(e *Exporter) { - e.autoDiscoverDatabases = b - } -} - -// ExcludeDatabases allows to filter out result from AutoDiscoverDatabases -func ExcludeDatabases(s []string) ExporterOpt { - return func(e *Exporter) { - e.excludeDatabases = s - } -} - -// IncludeDatabases allows to filter result from AutoDiscoverDatabases -func IncludeDatabases(s string) ExporterOpt { - return func(e *Exporter) { - if len(s) > 0 { - e.includeDatabases = strings.Split(s, ",") - } - } -} - -// WithUserQueriesPath configures user's queries path. -func WithUserQueriesPath(p string) ExporterOpt { - return func(e *Exporter) { - e.userQueriesPath = p - } -} - -// WithConstantLabels configures constant labels. -func WithConstantLabels(s string) ExporterOpt { - return func(e *Exporter) { - e.constantLabels = parseConstLabels(s) - } -} - -func parseConstLabels(s string) prometheus.Labels { - labels := make(prometheus.Labels) - - s = strings.TrimSpace(s) - if len(s) == 0 { - return labels - } - - parts := strings.Split(s, ",") - for _, p := range parts { - keyValue := strings.Split(strings.TrimSpace(p), "=") - if len(keyValue) != 2 { - logger.Error(`Wrong constant labels format, should be "key=value"`, "input", p) - continue - } - key := strings.TrimSpace(keyValue[0]) - value := strings.TrimSpace(keyValue[1]) - if key == "" || value == "" { - continue - } - labels[key] = value - } - - return labels -} - -// NewExporter returns a new PostgreSQL exporter for the provided DSN. -func NewExporter(dsn []string, opts ...ExporterOpt) *Exporter { - e := &Exporter{ - dsn: dsn, - builtinMetricMaps: builtinMetricMaps, - } - - for _, opt := range opts { - opt(e) - } - - e.setupInternalMetrics() - e.servers = NewServers(ServerWithLabels(e.constantLabels)) - - return e -} - -func (e *Exporter) setupInternalMetrics() { - e.duration = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: exporter, - Name: "last_scrape_duration_seconds", - Help: "Duration of the last scrape of metrics from PostgreSQL.", - ConstLabels: e.constantLabels, - }) - e.totalScrapes = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: exporter, - Name: "scrapes_total", - Help: "Total number of times PostgreSQL was scraped for metrics.", - ConstLabels: e.constantLabels, - }) - e.error = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: exporter, - Name: "last_scrape_error", - Help: "Whether the last scrape of metrics from PostgreSQL resulted in an error (1 for error, 0 for success).", - ConstLabels: e.constantLabels, - }) - e.psqlUp = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "up", - Help: "Whether the last scrape of metrics from PostgreSQL was able to connect to the server (1 for yes, 0 for no).", - ConstLabels: e.constantLabels, - }) - e.userQueriesError = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: exporter, - Name: "user_queries_load_error", - Help: "Whether the user queries file was loaded and parsed successfully (1 for error, 0 for success).", - ConstLabels: e.constantLabels, - }, []string{"filename", "hashsum"}) -} - -// Describe implements prometheus.Collector. -func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { -} - -// Collect implements prometheus.Collector. -func (e *Exporter) Collect(ch chan<- prometheus.Metric) { - e.scrape(ch) - - ch <- e.duration - ch <- e.totalScrapes - ch <- e.error - ch <- e.psqlUp - e.userQueriesError.Collect(ch) -} - -func newDesc(subsystem, name, help string, labels prometheus.Labels) *prometheus.Desc { - return prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, name), - help, nil, labels, - ) -} - -func checkPostgresVersion(db *sql.DB, server string) (semver.Version, string, error) { - logger.Debug("Querying PostgreSQL version", "server", server) - versionRow := db.QueryRow("SELECT version();") - var versionString string - err := versionRow.Scan(&versionString) - if err != nil { - return semver.Version{}, "", fmt.Errorf("Error scanning version string on %q: %v", server, err) - } - semanticVersion, err := parseVersion(versionString) - if err != nil { - return semver.Version{}, "", fmt.Errorf("Error parsing version string on %q: %v", server, err) - } - - return semanticVersion, versionString, nil -} - -// Check and update the exporters query maps if the version has changed. -func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, server *Server) error { - semanticVersion, versionString, err := checkPostgresVersion(server.db, server.String()) - if err != nil { - return fmt.Errorf("Error fetching version string on %q: %v", server, err) - } - - if !e.disableDefaultMetrics && semanticVersion.LT(lowestSupportedVersion) { - logger.Warn("PostgreSQL version is lower than our lowest supported version", "server", server, "version", semanticVersion, "lowest_supported_version", lowestSupportedVersion) - } - - // Check if semantic version changed and recalculate maps if needed. - if semanticVersion.NE(server.lastMapVersion) || server.metricMap == nil { - logger.Info("Semantic version changed", "server", server, "from", server.lastMapVersion, "to", semanticVersion) - server.mappingMtx.Lock() - - // Get Default Metrics only for master database - if !e.disableDefaultMetrics && server.master { - server.metricMap = makeDescMap(semanticVersion, server.labels, e.builtinMetricMaps) - server.queryOverrides = makeQueryOverrideMap(semanticVersion, queryOverrides) - } else { - server.metricMap = make(map[string]MetricMapNamespace) - server.queryOverrides = make(map[string]string) - } - - server.lastMapVersion = semanticVersion - - if e.userQueriesPath != "" { - // Clear the metric while a reload is happening - e.userQueriesError.Reset() - - // Calculate the hashsum of the useQueries - userQueriesData, err := os.ReadFile(e.userQueriesPath) - if err != nil { - logger.Error("Failed to reload user queries", "path", e.userQueriesPath, "err", err) - e.userQueriesError.WithLabelValues(e.userQueriesPath, "").Set(1) - } else { - hashsumStr := fmt.Sprintf("%x", sha256.Sum256(userQueriesData)) - - if err := addQueries(userQueriesData, semanticVersion, server); err != nil { - logger.Error("Failed to reload user queries", "path", e.userQueriesPath, "err", err) - e.userQueriesError.WithLabelValues(e.userQueriesPath, hashsumStr).Set(1) - } else { - // Mark user queries as successfully loaded - e.userQueriesError.WithLabelValues(e.userQueriesPath, hashsumStr).Set(0) - } - } - } - - server.mappingMtx.Unlock() - } - - // Output the version as a special metric only for master database - versionDesc := prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, staticLabelName), - "Version string as reported by postgres", []string{"version", "short_version"}, server.labels) - - if !e.disableDefaultMetrics && server.master { - ch <- prometheus.MustNewConstMetric(versionDesc, - prometheus.UntypedValue, 1, versionString, semanticVersion.String()) - } - return nil -} - -func (e *Exporter) scrape(ch chan<- prometheus.Metric) { - defer func(begun time.Time) { - e.duration.Set(time.Since(begun).Seconds()) - }(time.Now()) - - e.totalScrapes.Inc() - - dsns := e.dsn - if e.autoDiscoverDatabases { - dsns = e.discoverDatabaseDSNs() - } - - var errorsCount int - var connectionErrorsCount int - - for _, dsn := range dsns { - if err := e.scrapeDSN(ch, dsn); err != nil { - errorsCount++ - - logger.Error("error scraping dsn", "err", err, "dsn", loggableDSN(dsn)) - - if _, ok := err.(*ErrorConnectToServer); ok { - connectionErrorsCount++ - } - } - } - - switch { - case connectionErrorsCount >= len(dsns): - e.psqlUp.Set(0) - default: - e.psqlUp.Set(1) // Didn't fail, can mark connection as up for this scrape. - } - - switch errorsCount { - case 0: - e.error.Set(0) - default: - e.error.Set(1) - } -} diff --git a/cmd/postgres_exporter/postgres_exporter_integration_test.go b/cmd/postgres_exporter/postgres_exporter_integration_test.go deleted file mode 100644 index 7043c1e88..000000000 --- a/cmd/postgres_exporter/postgres_exporter_integration_test.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// These are specialized integration tests. We only build them when we're doing -// a lot of additional work to keep the external docker environment they require -// working. -//go:build integration -// +build integration - -package main - -import ( - "fmt" - "os" - "strings" - "testing" - - _ "github.com/lib/pq" - "github.com/prometheus/client_golang/prometheus" - . "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } - -type IntegrationSuite struct { - e *Exporter -} - -var _ = Suite(&IntegrationSuite{}) - -func (s *IntegrationSuite) SetUpSuite(c *C) { - dsn := os.Getenv("DATA_SOURCE_NAME") - c.Assert(dsn, Not(Equals), "") - - exporter := NewExporter(strings.Split(dsn, ",")) - c.Assert(exporter, NotNil) - // Assign the exporter to the suite - s.e = exporter - - prometheus.MustRegister(exporter) -} - -// TODO: it would be nice if cu didn't mostly just recreate the scrape function -func (s *IntegrationSuite) TestAllNamespacesReturnResults(c *C) { - // Setup a dummy channel to consume metrics - ch := make(chan prometheus.Metric, 100) - go func() { - for range ch { - } - }() - - for _, dsn := range s.e.dsn { - // Open a database connection - server, err := NewServer(dsn) - c.Assert(server, NotNil) - c.Assert(err, IsNil) - - // Do a version update - err = s.e.checkMapVersions(ch, server) - c.Assert(err, IsNil) - - err = querySettings(ch, server) - if !c.Check(err, Equals, nil) { - fmt.Println("## ERRORS FOUND") - fmt.Println(err) - } - - // This should never happen in our test cases. - errMap := queryNamespaceMappings(ch, server) - if !c.Check(len(errMap), Equals, 0) { - fmt.Println("## NAMESPACE ERRORS FOUND") - for namespace, err := range errMap { - fmt.Println(namespace, ":", err) - } - } - server.Close() - } -} - -// TestInvalidDsnDoesntCrash tests that specifying an invalid DSN doesn't crash -// the exporter. Related to https://github.com/prometheus-community/postgres_exporter/issues/93 -// although not a replication of the scenario. -func (s *IntegrationSuite) TestInvalidDsnDoesntCrash(c *C) { - // Setup a dummy channel to consume metrics - ch := make(chan prometheus.Metric, 100) - go func() { - for range ch { - } - }() - - // Send a bad DSN - exporter := NewExporter([]string{"invalid dsn"}) - c.Assert(exporter, NotNil) - exporter.scrape(ch) - - // Send a DSN to a non-listening port. - exporter = NewExporter([]string{"postgresql://nothing:nothing@127.0.0.1:1/nothing"}) - c.Assert(exporter, NotNil) - exporter.scrape(ch) -} - -// TestUnknownMetricParsingDoesntCrash deliberately deletes all the column maps out -// of an exporter to test that the default metric handling code can cope with unknown columns. -func (s *IntegrationSuite) TestUnknownMetricParsingDoesntCrash(c *C) { - // Setup a dummy channel to consume metrics - ch := make(chan prometheus.Metric, 100) - go func() { - for range ch { - } - }() - - dsn := os.Getenv("DATA_SOURCE_NAME") - c.Assert(dsn, Not(Equals), "") - - exporter := NewExporter(strings.Split(dsn, ",")) - c.Assert(exporter, NotNil) - - // Convert the default maps into a list of empty maps. - emptyMaps := make(map[string]intermediateMetricMap, 0) - for k := range exporter.builtinMetricMaps { - emptyMaps[k] = intermediateMetricMap{ - map[string]ColumnMapping{}, - true, - 0, - } - } - exporter.builtinMetricMaps = emptyMaps - - // scrape the exporter and make sure it works - exporter.scrape(ch) -} - -// TestExtendQueriesDoesntCrash tests that specifying extend.query-path doesn't -// crash. -func (s *IntegrationSuite) TestExtendQueriesDoesntCrash(c *C) { - // Setup a dummy channel to consume metrics - ch := make(chan prometheus.Metric, 100) - go func() { - for range ch { - } - }() - - dsn := os.Getenv("DATA_SOURCE_NAME") - c.Assert(dsn, Not(Equals), "") - - exporter := NewExporter( - strings.Split(dsn, ","), - WithUserQueriesPath("../user_queries_test.yaml"), - ) - c.Assert(exporter, NotNil) - - // scrape the exporter and make sure it works - exporter.scrape(ch) -} - -func (s *IntegrationSuite) TestAutoDiscoverDatabases(c *C) { - dsn := os.Getenv("DATA_SOURCE_NAME") - - exporter := NewExporter( - strings.Split(dsn, ","), - ) - c.Assert(exporter, NotNil) - - dsns := exporter.discoverDatabaseDSNs() - - c.Assert(len(dsns), Equals, 2) -} diff --git a/cmd/postgres_exporter/postgres_exporter_test.go b/cmd/postgres_exporter/postgres_exporter_test.go deleted file mode 100644 index 0f36febf4..000000000 --- a/cmd/postgres_exporter/postgres_exporter_test.go +++ /dev/null @@ -1,422 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !integration -// +build !integration - -package main - -import ( - "math" - "os" - "reflect" - "testing" - "time" - - "github.com/blang/semver/v4" - "github.com/prometheus/client_golang/prometheus" - . "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } - -type FunctionalSuite struct { -} - -var _ = Suite(&FunctionalSuite{}) - -func (s *FunctionalSuite) SetUpSuite(c *C) { - -} - -func (s *FunctionalSuite) TestSemanticVersionColumnDiscard(c *C) { - testMetricMap := map[string]intermediateMetricMap{ - "test_namespace": { - map[string]ColumnMapping{ - "metric_which_stays": {COUNTER, "This metric should not be eliminated", nil, nil}, - "metric_which_discards": {COUNTER, "This metric should be forced to DISCARD", nil, nil}, - }, - true, - 0, - }, - } - - { - // No metrics should be eliminated - resultMap := makeDescMap(semver.MustParse("0.0.1"), prometheus.Labels{}, testMetricMap) - c.Check( - resultMap["test_namespace"].columnMappings["metric_which_stays"].discard, - Equals, - false, - ) - c.Check( - resultMap["test_namespace"].columnMappings["metric_which_discards"].discard, - Equals, - false, - ) - } - - // nolint: dupl - { - // Update the map so the discard metric should be eliminated - discardableMetric := testMetricMap["test_namespace"].columnMappings["metric_which_discards"] - discardableMetric.supportedVersions = semver.MustParseRange(">0.0.1") - testMetricMap["test_namespace"].columnMappings["metric_which_discards"] = discardableMetric - - // Discard metric should be discarded - resultMap := makeDescMap(semver.MustParse("0.0.1"), prometheus.Labels{}, testMetricMap) - c.Check( - resultMap["test_namespace"].columnMappings["metric_which_stays"].discard, - Equals, - false, - ) - c.Check( - resultMap["test_namespace"].columnMappings["metric_which_discards"].discard, - Equals, - true, - ) - } - - // nolint: dupl - { - // Update the map so the discard metric should be kept but has a version - discardableMetric := testMetricMap["test_namespace"].columnMappings["metric_which_discards"] - discardableMetric.supportedVersions = semver.MustParseRange(">0.0.1") - testMetricMap["test_namespace"].columnMappings["metric_which_discards"] = discardableMetric - - // Discard metric should be discarded - resultMap := makeDescMap(semver.MustParse("0.0.2"), prometheus.Labels{}, testMetricMap) - c.Check( - resultMap["test_namespace"].columnMappings["metric_which_stays"].discard, - Equals, - false, - ) - c.Check( - resultMap["test_namespace"].columnMappings["metric_which_discards"].discard, - Equals, - false, - ) - } -} - -// test read username and password from file -func (s *FunctionalSuite) TestEnvironmentSettingWithSecretsFiles(c *C) { - err := os.Setenv("DATA_SOURCE_USER_FILE", "./tests/username_file") - c.Assert(err, IsNil) - defer UnsetEnvironment(c, "DATA_SOURCE_USER_FILE") - - err = os.Setenv("DATA_SOURCE_PASS_FILE", "./tests/userpass_file") - c.Assert(err, IsNil) - defer UnsetEnvironment(c, "DATA_SOURCE_PASS_FILE") - - err = os.Setenv("DATA_SOURCE_URI", "localhost:5432/?sslmode=disable") - c.Assert(err, IsNil) - defer UnsetEnvironment(c, "DATA_SOURCE_URI") - - var expected = "postgresql://custom_username$&+,%2F%3A;=%3F%40:custom_password$&+,%2F%3A;=%3F%40@localhost:5432/?sslmode=disable" - - dsn, err := getDataSources() - if err != nil { - c.Errorf("Unexpected error reading datasources") - } - - if len(dsn) == 0 { - c.Errorf("Expected one data source, zero found") - } - if dsn[0] != expected { - c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn[0], expected) - } -} - -// test read DATA_SOURCE_NAME from environment -func (s *FunctionalSuite) TestEnvironmentSettingWithDns(c *C) { - envDsn := "postgresql://user:password@localhost:5432/?sslmode=enabled" - err := os.Setenv("DATA_SOURCE_NAME", envDsn) - c.Assert(err, IsNil) - defer UnsetEnvironment(c, "DATA_SOURCE_NAME") - - dsn, err := getDataSources() - if err != nil { - c.Errorf("Unexpected error reading datasources") - } - - if len(dsn) == 0 { - c.Errorf("Expected one data source, zero found") - } - if dsn[0] != envDsn { - c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn[0], envDsn) - } -} - -// test DATA_SOURCE_NAME is used even if username and password environment variables are set -func (s *FunctionalSuite) TestEnvironmentSettingWithDnsAndSecrets(c *C) { - envDsn := "postgresql://userDsn:passwordDsn@localhost:55432/?sslmode=disabled" - err := os.Setenv("DATA_SOURCE_NAME", envDsn) - c.Assert(err, IsNil) - defer UnsetEnvironment(c, "DATA_SOURCE_NAME") - - err = os.Setenv("DATA_SOURCE_USER_FILE", "./tests/username_file") - c.Assert(err, IsNil) - defer UnsetEnvironment(c, "DATA_SOURCE_USER_FILE") - - err = os.Setenv("DATA_SOURCE_PASS", "envUserPass") - c.Assert(err, IsNil) - defer UnsetEnvironment(c, "DATA_SOURCE_PASS") - - dsn, err := getDataSources() - if err != nil { - c.Errorf("Unexpected error reading datasources") - } - - if len(dsn) == 0 { - c.Errorf("Expected one data source, zero found") - } - if dsn[0] != envDsn { - c.Errorf("Expected Username to be read from file. Found=%v, expected=%v", dsn[0], envDsn) - } -} - -func (s *FunctionalSuite) TestPostgresVersionParsing(c *C) { - type TestCase struct { - input string - expected string - } - - cases := []TestCase{ - { - input: "PostgreSQL 10.1 on x86_64-pc-linux-gnu, compiled by gcc (Debian 6.3.0-18) 6.3.0 20170516, 64-bit", - expected: "10.1.0", - }, - { - input: "PostgreSQL 9.5.4, compiled by Visual C++ build 1800, 64-bit", - expected: "9.5.4", - }, - { - input: "EnterpriseDB 9.6.5.10 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16), 64-bit", - expected: "9.6.5", - }, - } - - for _, cs := range cases { - ver, err := parseVersion(cs.input) - c.Assert(err, IsNil) - c.Assert(ver.String(), Equals, cs.expected) - } -} - -func (s *FunctionalSuite) TestParseFingerprint(c *C) { - cases := []struct { - url string - fingerprint string - err string - }{ - { - url: "postgresql://userDsn:passwordDsn@localhost:55432/?sslmode=disabled", - fingerprint: "localhost:55432", - }, - { - url: "postgresql://userDsn:passwordDsn%3D@localhost:55432/?sslmode=disabled", - fingerprint: "localhost:55432", - }, - { - url: "port=1234", - fingerprint: "localhost:1234", - }, - { - url: "host=example", - fingerprint: "example:5432", - }, - { - url: "xyz", - err: "malformed dsn \"xyz\"", - }, - } - - for _, cs := range cases { - f, err := parseFingerprint(cs.url) - if cs.err == "" { - c.Assert(err, IsNil) - } else { - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, cs.err) - } - c.Assert(f, Equals, cs.fingerprint) - } -} - -func (s *FunctionalSuite) TestParseConstLabels(c *C) { - cases := []struct { - s string - labels prometheus.Labels - }{ - { - s: "a=b", - labels: prometheus.Labels{ - "a": "b", - }, - }, - { - s: "", - labels: prometheus.Labels{}, - }, - { - s: "a=b, c=d", - labels: prometheus.Labels{ - "a": "b", - "c": "d", - }, - }, - { - s: "a=b, xyz", - labels: prometheus.Labels{ - "a": "b", - }, - }, - { - s: "a=", - labels: prometheus.Labels{}, - }, - } - - for _, cs := range cases { - labels := parseConstLabels(cs.s) - if !reflect.DeepEqual(labels, cs.labels) { - c.Fatalf("labels not equal (%v -> %v)", labels, cs.labels) - } - } -} - -func UnsetEnvironment(c *C, d string) { - err := os.Unsetenv(d) - c.Assert(err, IsNil) -} - -type isNaNChecker struct { - *CheckerInfo -} - -var IsNaN Checker = &isNaNChecker{ - &CheckerInfo{Name: "IsNaN", Params: []string{"value"}}, -} - -func (checker *isNaNChecker) Check(params []interface{}, names []string) (result bool, error string) { - param, ok := (params[0]).(float64) - if !ok { - return false, "obtained value type is not a float" - } - return math.IsNaN(param), "" -} - -// test boolean metric type gets converted to float -func (s *FunctionalSuite) TestBooleanConversionToValueAndString(c *C) { - - type TestCase struct { - input interface{} - expectedString string - expectedValue float64 - expectedCount uint64 - expectedOK bool - } - - cases := []TestCase{ - { - input: true, - expectedString: "true", - expectedValue: 1.0, - expectedCount: 1, - expectedOK: true, - }, - { - input: false, - expectedString: "false", - expectedValue: 0.0, - expectedCount: 0, - expectedOK: true, - }, - { - input: nil, - expectedString: "", - expectedValue: math.NaN(), - expectedCount: 0, - expectedOK: true, - }, - { - input: TestCase{}, - expectedString: "", - expectedValue: math.NaN(), - expectedCount: 0, - expectedOK: false, - }, - { - input: 123.0, - expectedString: "123", - expectedValue: 123.0, - expectedCount: 123, - expectedOK: true, - }, - { - input: "123", - expectedString: "123", - expectedValue: 123.0, - expectedCount: 123, - expectedOK: true, - }, - { - input: []byte("123"), - expectedString: "123", - expectedValue: 123.0, - expectedCount: 123, - expectedOK: true, - }, - { - input: time.Unix(1600000000, 0), - expectedString: "1600000000", - expectedValue: 1600000000.0, - expectedCount: 1600000000, - expectedOK: true, - }, - } - - for _, cs := range cases { - value, ok := dbToFloat64(cs.input) - if math.IsNaN(cs.expectedValue) { - c.Assert(value, IsNaN) - } else { - c.Assert(value, Equals, cs.expectedValue) - } - c.Assert(ok, Equals, cs.expectedOK) - - count, ok := dbToUint64(cs.input) - c.Assert(count, Equals, cs.expectedCount) - c.Assert(ok, Equals, cs.expectedOK) - - str, ok := dbToString(cs.input) - c.Assert(str, Equals, cs.expectedString) - c.Assert(ok, Equals, cs.expectedOK) - } -} - -func (s *FunctionalSuite) TestParseUserQueries(c *C) { - userQueriesData, err := os.ReadFile("./tests/user_queries_ok.yaml") - if err == nil { - metricMaps, newQueryOverrides, err := parseUserQueries(userQueriesData) - c.Assert(err, Equals, nil) - c.Assert(metricMaps, NotNil) - c.Assert(newQueryOverrides, NotNil) - - if len(metricMaps) != 2 { - c.Errorf("Expected 2 metrics from user file, got %d", len(metricMaps)) - } - } -} diff --git a/cmd/postgres_exporter/probe.go b/cmd/postgres_exporter/probe.go deleted file mode 100644 index 2c8c7652e..000000000 --- a/cmd/postgres_exporter/probe.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "log/slog" - "net/http" - - "github.com/prometheus-community/postgres_exporter/collector" - "github.com/prometheus-community/postgres_exporter/config" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" -) - -func handleProbe(logger *slog.Logger, excludeDatabases []string) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - conf := c.GetConfig() - params := r.URL.Query() - target := params.Get("target") - if target == "" { - http.Error(w, "target is required", http.StatusBadRequest) - return - } - var authModule config.AuthModule - authModuleName := params.Get("auth_module") - if authModuleName == "" { - logger.Info("no auth_module specified, using default") - } else { - var ok bool - authModule, ok = conf.AuthModules[authModuleName] - if !ok { - http.Error(w, fmt.Sprintf("auth_module %s not found", authModuleName), http.StatusBadRequest) - return - } - if authModule.UserPass.Username == "" || authModule.UserPass.Password == "" { - http.Error(w, fmt.Sprintf("auth_module %s has no username or password", authModuleName), http.StatusBadRequest) - return - } - } - - dsn, err := authModule.ConfigureTarget(target) - if err != nil { - logger.Error("failed to configure target", "err", err) - http.Error(w, fmt.Sprintf("could not configure dsn for target: %v", err), http.StatusBadRequest) - return - } - - // TODO(@sysadmind): Timeout - - tl := logger.With("target", target) - - registry := prometheus.NewRegistry() - - opts := []ExporterOpt{ - DisableDefaultMetrics(*disableDefaultMetrics), - DisableSettingsMetrics(*disableSettingsMetrics), - AutoDiscoverDatabases(*autoDiscoverDatabases), - WithUserQueriesPath(*queriesPath), - WithConstantLabels(*constantLabelsList), - ExcludeDatabases(excludeDatabases), - IncludeDatabases(*includeDatabases), - } - - dsns := []string{dsn.GetConnectionString()} - exporter := NewExporter(dsns, opts...) - defer func() { - exporter.servers.Close() - }() - registry.MustRegister(exporter) - - // Run the probe - pc, err := collector.NewProbeCollector(tl, excludeDatabases, registry, dsn) - if err != nil { - logger.Error("Error creating probe collector", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - // Cleanup underlying connections to prevent connection leaks - defer pc.Close() - - // TODO(@sysadmind): Remove the registry.MustRegister() call below and instead handle the collection here. That will allow - // for the passing of context, handling of timeouts, and more control over the collection. - // The current NewProbeCollector() implementation relies on the MustNewConstMetric() call to create the metrics which is not - // ideal to use without the registry.MustRegister() call. - _ = ctx - - registry.MustRegister(pc) - - // TODO check success, etc - h := promhttp.HandlerFor(registry, promhttp.HandlerOpts{}) - h.ServeHTTP(w, r) - } -} diff --git a/cmd/postgres_exporter/queries.go b/cmd/postgres_exporter/queries.go deleted file mode 100644 index 80be72d54..000000000 --- a/cmd/postgres_exporter/queries.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "errors" - "fmt" - - "github.com/blang/semver/v4" - "gopkg.in/yaml.v2" -) - -// UserQuery represents a user defined query -type UserQuery struct { - Query string `yaml:"query"` - Metrics []Mapping `yaml:"metrics"` - Master bool `yaml:"master"` // Querying only for master database - CacheSeconds uint64 `yaml:"cache_seconds"` // Number of seconds to cache the namespace result metrics for. - RunOnServer string `yaml:"runonserver"` // Querying to run on which server version -} - -// UserQueries represents a set of UserQuery objects -type UserQueries map[string]UserQuery - -// OverrideQuery 's are run in-place of simple namespace look ups, and provide -// advanced functionality. But they have a tendency to postgres version specific. -// There aren't too many versions, so we simply store customized versions using -// the semver matching we do for columns. -type OverrideQuery struct { - versionRange semver.Range - query string -} - -// Overriding queries for namespaces above. -// TODO: validate this is a closed set in tests, and there are no overlaps -var queryOverrides = map[string][]OverrideQuery{ - "pg_stat_replication": { - { - semver.MustParseRange(">=10.0.0"), - ` - SELECT *, - (case pg_is_in_recovery() when 't' then pg_last_wal_receive_lsn() else pg_current_wal_lsn() end) AS pg_current_wal_lsn, - (case pg_is_in_recovery() when 't' then pg_wal_lsn_diff(pg_last_wal_receive_lsn(), pg_lsn('0/0'))::float else pg_wal_lsn_diff(pg_current_wal_lsn(), pg_lsn('0/0'))::float end) AS pg_current_wal_lsn_bytes, - (case pg_is_in_recovery() when 't' then pg_wal_lsn_diff(pg_last_wal_receive_lsn(), replay_lsn)::float else pg_wal_lsn_diff(pg_current_wal_lsn(), replay_lsn)::float end) AS pg_wal_lsn_diff - FROM pg_stat_replication - `, - }, - { - semver.MustParseRange(">=9.2.0 <10.0.0"), - ` - SELECT *, - (case pg_is_in_recovery() when 't' then pg_last_xlog_receive_location() else pg_current_xlog_location() end) AS pg_current_xlog_location, - (case pg_is_in_recovery() when 't' then pg_xlog_location_diff(pg_last_xlog_receive_location(), replay_location)::float else pg_xlog_location_diff(pg_current_xlog_location(), replay_location)::float end) AS pg_xlog_location_diff - FROM pg_stat_replication - `, - }, - { - semver.MustParseRange("<9.2.0"), - ` - SELECT *, - (case pg_is_in_recovery() when 't' then pg_last_xlog_receive_location() else pg_current_xlog_location() end) AS pg_current_xlog_location - FROM pg_stat_replication - `, - }, - }, - - "pg_replication_slots": { - { - semver.MustParseRange(">=9.4.0 <10.0.0"), - ` - SELECT slot_name, database, active, - (case pg_is_in_recovery() when 't' then pg_xlog_location_diff(pg_last_xlog_receive_location(), restart_lsn) else pg_xlog_location_diff(pg_current_xlog_location(), restart_lsn) end) as pg_xlog_location_diff - FROM pg_replication_slots - `, - }, - { - semver.MustParseRange(">=10.0.0"), - ` - SELECT slot_name, database, active, - (case pg_is_in_recovery() when 't' then pg_wal_lsn_diff(pg_last_wal_receive_lsn(), restart_lsn) else pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn) end) as pg_wal_lsn_diff - FROM pg_replication_slots - `, - }, - }, - - "pg_stat_archiver": { - { - semver.MustParseRange(">=9.4.0"), - ` - SELECT *, - extract(epoch from now() - last_archived_time) AS last_archive_age - FROM pg_stat_archiver - `, - }, - }, - - "pg_stat_activity": { - // This query only works - { - semver.MustParseRange(">=9.2.0"), - ` - SELECT - pg_database.datname, - tmp.state, - tmp2.usename, - tmp2.application_name, - tmp2.backend_type, - tmp2.wait_event_type, - tmp2.wait_event, - COALESCE(count,0) as count, - COALESCE(max_tx_duration,0) as max_tx_duration - FROM - ( - VALUES ('active'), - ('idle'), - ('idle in transaction'), - ('idle in transaction (aborted)'), - ('fastpath function call'), - ('disabled') - ) AS tmp(state) CROSS JOIN pg_database - LEFT JOIN - ( - SELECT - datname, - state, - usename, - application_name, - backend_type, - wait_event_type, - wait_event, - count(*) AS count, - MAX(EXTRACT(EPOCH FROM now() - xact_start))::float AS max_tx_duration - FROM pg_stat_activity - GROUP BY datname,state,usename,application_name,backend_type,wait_event_type,wait_event) AS tmp2 - ON tmp.state = tmp2.state AND pg_database.datname = tmp2.datname - `, - }, - { - semver.MustParseRange("<9.2.0"), - ` - SELECT - datname, - 'unknown' AS state, - usename, - application_name, - COALESCE(count(*),0) AS count, - COALESCE(MAX(EXTRACT(EPOCH FROM now() - xact_start))::float,0) AS max_tx_duration - FROM pg_stat_activity GROUP BY datname,usename,application_name - `, - }, - }, -} - -// Convert the query override file to the version-specific query override file -// for the exporter. -func makeQueryOverrideMap(pgVersion semver.Version, queryOverrides map[string][]OverrideQuery) map[string]string { - resultMap := make(map[string]string) - for name, overrideDef := range queryOverrides { - // Find a matching semver. We make it an error to have overlapping - // ranges at test-time, so only 1 should ever match. - matched := false - for _, queryDef := range overrideDef { - if queryDef.versionRange(pgVersion) { - resultMap[name] = queryDef.query - matched = true - break - } - } - if !matched { - logger.Warn("No query matched override, disabling metric space", "name", name) - resultMap[name] = "" - } - } - - return resultMap -} - -func parseUserQueries(content []byte) (map[string]intermediateMetricMap, map[string]string, error) { - var userQueries UserQueries - - err := yaml.Unmarshal(content, &userQueries) - if err != nil { - return nil, nil, err - } - - // Stores the loaded map representation - metricMaps := make(map[string]intermediateMetricMap) - newQueryOverrides := make(map[string]string) - - for metric, specs := range userQueries { - logger.Debug("New user metric namespace from YAML metric", "metric", metric, "cache_seconds", specs.CacheSeconds) - newQueryOverrides[metric] = specs.Query - metricMap, ok := metricMaps[metric] - if !ok { - // Namespace for metric not found - add it. - newMetricMap := make(map[string]ColumnMapping) - metricMap = intermediateMetricMap{ - columnMappings: newMetricMap, - master: specs.Master, - cacheSeconds: specs.CacheSeconds, - } - metricMaps[metric] = metricMap - } - for _, metric := range specs.Metrics { - for name, mappingOption := range metric { - var columnMapping ColumnMapping - tmpUsage, _ := stringToColumnUsage(mappingOption.Usage) - columnMapping.usage = tmpUsage - columnMapping.description = mappingOption.Description - - // TODO: we should support cu - columnMapping.mapping = nil - // Should we support this for users? - columnMapping.supportedVersions = nil - - metricMap.columnMappings[name] = columnMapping - } - } - } - return metricMaps, newQueryOverrides, nil -} - -// Add queries to the builtinMetricMaps and queryOverrides maps. Added queries do not -// respect version requirements, because it is assumed that the user knows -// what they are doing with their version of postgres. -// -// This function modifies metricMap and queryOverrideMap to contain the new -// queries. -// TODO: test code for all cu. -// TODO: the YAML this supports is "non-standard" - we should move away from it. -func addQueries(content []byte, pgVersion semver.Version, server *Server) error { - metricMaps, newQueryOverrides, err := parseUserQueries(content) - if err != nil { - return err - } - // Convert the loaded metric map into exporter representation - partialExporterMap := makeDescMap(pgVersion, server.labels, metricMaps) - - // Merge the two maps (which are now quite flatteend) - for k, v := range partialExporterMap { - _, found := server.metricMap[k] - if found { - logger.Debug("Overriding metric from user YAML file", "metric", k) - } else { - logger.Debug("Adding new metric from user YAML file", "metric", k) - } - server.metricMap[k] = v - } - - // Merge the query override map - for k, v := range newQueryOverrides { - _, found := server.queryOverrides[k] - if found { - logger.Debug("Overriding query override from user YAML file", "query_override", k) - } else { - logger.Debug("Adding new query override from user YAML file", "query_override", k) - } - server.queryOverrides[k] = v - } - return nil -} - -func queryDatabases(server *Server) ([]string, error) { - rows, err := server.db.Query("SELECT datname FROM pg_database WHERE datallowconn = true AND datistemplate = false AND datname != current_database()") - if err != nil { - return nil, fmt.Errorf("Error retrieving databases: %v", err) - } - defer rows.Close() // nolint: errcheck - - var databaseName string - result := make([]string, 0) - for rows.Next() { - err = rows.Scan(&databaseName) - if err != nil { - return nil, errors.New(fmt.Sprintln("Error retrieving rows:", err)) - } - result = append(result, databaseName) - } - - return result, nil -} diff --git a/cmd/postgres_exporter/server.go b/cmd/postgres_exporter/server.go deleted file mode 100644 index 3d2ecde91..000000000 --- a/cmd/postgres_exporter/server.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "database/sql" - "fmt" - "sync" - "time" - - "github.com/blang/semver/v4" - "github.com/prometheus/client_golang/prometheus" -) - -// Server describes a connection to Postgres. -// Also it contains metrics map and query overrides. -type Server struct { - db *sql.DB - labels prometheus.Labels - master bool - runonserver string - - // Last version used to calculate metric map. If mismatch on scrape, - // then maps are recalculated. - lastMapVersion semver.Version - // Currently active metric map - metricMap map[string]MetricMapNamespace - // Currently active query overrides - queryOverrides map[string]string - mappingMtx sync.RWMutex - // Currently cached metrics - metricCache map[string]cachedMetrics - cacheMtx sync.Mutex -} - -// ServerOpt configures a server. -type ServerOpt func(*Server) - -// ServerWithLabels configures a set of labels. -func ServerWithLabels(labels prometheus.Labels) ServerOpt { - return func(s *Server) { - for k, v := range labels { - s.labels[k] = v - } - } -} - -// NewServer establishes a new connection using DSN. -func NewServer(dsn string, opts ...ServerOpt) (*Server, error) { - fingerprint, err := parseFingerprint(dsn) - if err != nil { - return nil, err - } - - db, err := sql.Open("postgres", dsn) - if err != nil { - return nil, err - } - db.SetMaxOpenConns(1) - db.SetMaxIdleConns(1) - - logger.Info("Established new database connection", "fingerprint", fingerprint) - - s := &Server{ - db: db, - master: false, - labels: prometheus.Labels{ - serverLabelName: fingerprint, - }, - metricCache: make(map[string]cachedMetrics), - } - - for _, opt := range opts { - opt(s) - } - - return s, nil -} - -// Close disconnects from Postgres. -func (s *Server) Close() error { - return s.db.Close() -} - -// Ping checks connection availability and possibly invalidates the connection if it fails. -func (s *Server) Ping() error { - if err := s.db.Ping(); err != nil { - if cerr := s.Close(); cerr != nil { - logger.Error("Error while closing non-pinging DB connection", "server", s, "err", cerr) - } - return err - } - return nil -} - -// String returns server's fingerprint. -func (s *Server) String() string { - return s.labels[serverLabelName] -} - -// Scrape loads metrics. -func (s *Server) Scrape(ch chan<- prometheus.Metric, disableSettingsMetrics bool) error { - s.mappingMtx.RLock() - defer s.mappingMtx.RUnlock() - - var err error - - if !disableSettingsMetrics && s.master { - if err = querySettings(ch, s); err != nil { - err = fmt.Errorf("error retrieving settings: %s", err) - return err - } - } - - errMap := queryNamespaceMappings(ch, s) - if len(errMap) == 0 { - return nil - } - err = fmt.Errorf("queryNamespaceMappings errors encountered") - for namespace, errStr := range errMap { - err = fmt.Errorf("%s, namespace: %s error: %s", err, namespace, errStr) - } - - return err -} - -// Servers contains a collection of servers to Postgres. -type Servers struct { - m sync.Mutex - servers map[string]*Server - opts []ServerOpt -} - -// NewServers creates a collection of servers to Postgres. -func NewServers(opts ...ServerOpt) *Servers { - return &Servers{ - servers: make(map[string]*Server), - opts: opts, - } -} - -// GetServer returns established connection from a collection. -func (s *Servers) GetServer(dsn string) (*Server, error) { - s.m.Lock() - defer s.m.Unlock() - var err error - var ok bool - errCount := 0 // start at zero because we increment before doing work - retries := 1 - var server *Server - for { - if errCount++; errCount > retries { - return nil, err - } - server, ok = s.servers[dsn] - if !ok { - server, err = NewServer(dsn, s.opts...) - if err != nil { - time.Sleep(time.Duration(errCount) * time.Second) - continue - } - s.servers[dsn] = server - } - if err = server.Ping(); err != nil { - delete(s.servers, dsn) - time.Sleep(time.Duration(errCount) * time.Second) - continue - } - break - } - return server, nil -} - -// Close disconnects from all known servers. -func (s *Servers) Close() { - s.m.Lock() - defer s.m.Unlock() - for _, server := range s.servers { - if err := server.Close(); err != nil { - logger.Error("Failed to close connection", "server", server, "err", err) - } - } -} diff --git a/cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile b/cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile deleted file mode 100755 index f12569faf..000000000 --- a/cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM postgres:11 -MAINTAINER Daniel Dent (https://www.danieldent.com) -ENV PG_MAX_WAL_SENDERS 8 -ENV PG_WAL_KEEP_SEGMENTS 8 -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y inetutils-ping -COPY setup-replication.sh /docker-entrypoint-initdb.d/ -COPY docker-entrypoint.sh /docker-entrypoint.sh -RUN chmod +x /docker-entrypoint-initdb.d/setup-replication.sh /docker-entrypoint.sh diff --git a/cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile.p2 b/cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile.p2 deleted file mode 100644 index 1689000de..000000000 --- a/cmd/postgres_exporter/tests/docker-postgres-replication/Dockerfile.p2 +++ /dev/null @@ -1,8 +0,0 @@ -FROM postgres:{{VERSION}} -MAINTAINER Daniel Dent (https://www.danieldent.com) -ENV PG_MAX_WAL_SENDERS 8 -ENV PG_WAL_KEEP_SEGMENTS 8 -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y inetutils-ping -COPY setup-replication.sh /docker-entrypoint-initdb.d/ -COPY docker-entrypoint.sh /docker-entrypoint.sh -RUN chmod +x /docker-entrypoint-initdb.d/setup-replication.sh /docker-entrypoint.sh diff --git a/cmd/postgres_exporter/tests/docker-postgres-replication/README.md b/cmd/postgres_exporter/tests/docker-postgres-replication/README.md deleted file mode 100644 index 86106b678..000000000 --- a/cmd/postgres_exporter/tests/docker-postgres-replication/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Replicated postgres cluster in docker. - -Upstream is forked from https://github.com/DanielDent/docker-postgres-replication - -My version lives at https://github.com/wrouesnel/docker-postgres-replication - -This very simple docker-compose file lets us stand up a replicated postgres -cluster so we can test streaming. - -# TODO: -Pull in p2 and template the Dockerfile so we can test multiple versions. diff --git a/cmd/postgres_exporter/tests/docker-postgres-replication/docker-compose.yml b/cmd/postgres_exporter/tests/docker-postgres-replication/docker-compose.yml deleted file mode 100644 index 3ea59a85b..000000000 --- a/cmd/postgres_exporter/tests/docker-postgres-replication/docker-compose.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -version: '2' - -services: - pg-master: - build: '.' - image: 'danieldent/postgres-replication' - restart: 'always' - environment: - POSTGRES_USER: 'postgres' - POSTGRES_PASSWORD: 'postgres' - PGDATA: '/var/lib/postgresql/data/pgdata' - volumes: - - '/var/lib/postgresql/data' - expose: - - '5432' - - pg-slave: - build: '.' - image: 'danieldent/postgres-replication' - restart: 'always' - environment: - POSTGRES_USER: 'postgres' - POSTGRES_PASSWORD: 'postgres' - PGDATA: '/var/lib/postgresql/data/pgdata' - REPLICATE_FROM: 'pg-master' - volumes: - - '/var/lib/postgresql/data' - expose: - - '5432' - links: - - 'pg-master' diff --git a/cmd/postgres_exporter/tests/docker-postgres-replication/docker-entrypoint.sh b/cmd/postgres_exporter/tests/docker-postgres-replication/docker-entrypoint.sh deleted file mode 100755 index 24d15c89d..000000000 --- a/cmd/postgres_exporter/tests/docker-postgres-replication/docker-entrypoint.sh +++ /dev/null @@ -1,140 +0,0 @@ -#!/bin/bash - -# Backwards compatibility for old variable names (deprecated) -if [ "x$PGUSER" != "x" ]; then - POSTGRES_USER=$PGUSER -fi -if [ "x$PGPASSWORD" != "x" ]; then - POSTGRES_PASSWORD=$PGPASSWORD -fi - -# Forwards-compatibility for old variable names (pg_basebackup uses them) -if [ "x$PGPASSWORD" = "x" ]; then - export PGPASSWORD=$POSTGRES_PASSWORD -fi - -# Based on official postgres package's entrypoint script (https://hub.docker.com/_/postgres/) -# Modified to be able to set up a slave. The docker-entrypoint-initdb.d hook provided is inadequate. - -set -e - -if [ "${1:0:1}" = '-' ]; then - set -- postgres "$@" -fi - -if [ "$1" = 'postgres' ]; then - mkdir -p "$PGDATA" - chmod 700 "$PGDATA" - chown -R postgres "$PGDATA" - - mkdir -p /run/postgresql - chmod g+s /run/postgresql - chown -R postgres /run/postgresql - - # look specifically for PG_VERSION, as it is expected in the DB dir - if [ ! -s "$PGDATA/PG_VERSION" ]; then - if [ "x$REPLICATE_FROM" == "x" ]; then - eval "gosu postgres initdb $POSTGRES_INITDB_ARGS" - else - until /bin/ping -c 1 -W 1 ${REPLICATE_FROM} - do - echo "Waiting for master to ping..." - sleep 1s - done - until gosu postgres pg_basebackup -h ${REPLICATE_FROM} -D ${PGDATA} -U ${POSTGRES_USER} -vP -w - do - echo "Waiting for master to connect..." - sleep 1s - done - fi - - # check password first so we can output the warning before postgres - # messes it up - if [ ! -z "$POSTGRES_PASSWORD" ]; then - pass="PASSWORD '$POSTGRES_PASSWORD'" - authMethod=md5 - else - # The - option suppresses leading tabs but *not* spaces. :) - cat >&2 <<-'EOWARN' - **************************************************** - WARNING: No password has been set for the database. - This will allow anyone with access to the - Postgres port to access your database. In - Docker's default configuration, this is - effectively any other container on the same - system. - - Use "-e POSTGRES_PASSWORD=password" to set - it in "docker run". - **************************************************** - EOWARN - - pass= - authMethod=trust - fi - - if [ "x$REPLICATE_FROM" == "x" ]; then - - { echo; echo "host replication all 0.0.0.0/0 $authMethod"; } | gosu postgres tee -a "$PGDATA/pg_hba.conf" > /dev/null - { echo; echo "host all all 0.0.0.0/0 $authMethod"; } | gosu postgres tee -a "$PGDATA/pg_hba.conf" > /dev/null - - # internal start of server in order to allow set-up using psql-client - # does not listen on external TCP/IP and waits until start finishes - gosu postgres pg_ctl -D "$PGDATA" \ - -o "-c listen_addresses='localhost'" \ - -w start - - : ${POSTGRES_USER:=postgres} - : ${POSTGRES_DB:=$POSTGRES_USER} - export POSTGRES_USER POSTGRES_DB - - psql=( "psql" "-v" "ON_ERROR_STOP=1" ) - - if [ "$POSTGRES_DB" != 'postgres' ]; then - "${psql[@]}" --username postgres <<-EOSQL - CREATE DATABASE "$POSTGRES_DB" ; - EOSQL - echo - fi - - if [ "$POSTGRES_USER" = 'postgres' ]; then - op='ALTER' - else - op='CREATE' - fi - "${psql[@]}" --username postgres <<-EOSQL - $op USER "$POSTGRES_USER" WITH SUPERUSER $pass ; - EOSQL - echo - - fi - - psql+=( --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" ) - - echo - for f in /docker-entrypoint-initdb.d/*; do - case "$f" in - *.sh) echo "$0: running $f"; . "$f" ;; - *.sql) echo "$0: running $f"; "${psql[@]}" < "$f"; echo ;; - *.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;; - *) echo "$0: ignoring $f" ;; - esac - echo - done - - if [ "x$REPLICATE_FROM" == "x" ]; then - gosu postgres pg_ctl -D "$PGDATA" -m fast -w stop - fi - - echo - echo 'PostgreSQL init process complete; ready for start up.' - echo - fi - - # We need this health check so we know when it's started up. - touch /tmp/.postgres_init_complete - - exec gosu postgres "$@" -fi - -exec "$@" diff --git a/cmd/postgres_exporter/tests/docker-postgres-replication/setup-replication.sh b/cmd/postgres_exporter/tests/docker-postgres-replication/setup-replication.sh deleted file mode 100755 index 460c54891..000000000 --- a/cmd/postgres_exporter/tests/docker-postgres-replication/setup-replication.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -if [ "x$REPLICATE_FROM" == "x" ]; then - -cat >> ${PGDATA}/postgresql.conf < ${PGDATA}/recovery.conf <&2 -echo "Test Binary: $test_binary" 1>&2 - -[ -z "$postgres_exporter" ] && echo "Missing exporter binary" && exit 1 -[ -z "$test_binary" ] && echo "Missing test binary" && exit 1 - -cd "$DIR" || exit 1 - -VERSIONS=( \ - 9.4 \ - 9.5 \ - 9.6 \ - 10 \ - 11 \ -) - -wait_for_postgres(){ - local container=$1 - local ip=$2 - local port=$3 - if [ -z "$ip" ]; then - echo "No IP specified." 1>&2 - exit 1 - fi - - if [ -z "$port" ]; then - echo "No port specified." 1>&2 - exit 1 - fi - - local wait_start - wait_start=$(date +%s) || exit 1 - echo "Waiting for postgres to start listening..." - while ! docker exec "$container" pg_isready --host="$ip" --port="$port" &> /dev/null; do - if [ $(( $(date +%s) - wait_start )) -gt "$TIMEOUT" ]; then - echo "Timed out waiting for postgres to start!" 1>&2 - exit 1 - fi - sleep 1 - done - echo "Postgres is online at $ip:$port" -} - -wait_for_exporter() { - local wait_start - wait_start=$(date +%s) || exit 1 - echo "Waiting for exporter to start..." - while ! nc -z localhost "$exporter_port" ; do - if [ $(( $(date +%s) - wait_start )) -gt "$TIMEOUT" ]; then - echo "Timed out waiting for exporter!" 1>&2 - exit 1 - fi - sleep 1 - done - echo "Exporter is online at localhost:$exporter_port" -} - -smoketest_postgres() { - local version=$1 - local CONTAINER_NAME=postgres_exporter-test-smoke - local TIMEOUT=30 - local IMAGE_NAME=postgres - - local CUR_IMAGE=$IMAGE_NAME:$version - - echo "#######################" - echo "Standalone Postgres $version" - echo "#######################" - local docker_cmd="docker run -d -e POSTGRES_PASSWORD=$POSTGRES_PASSWORD $CUR_IMAGE" - echo "Docker Cmd: $docker_cmd" - - CONTAINER_NAME=$($docker_cmd) - standalone_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $CONTAINER_NAME) - # shellcheck disable=SC2064 - trap "docker logs $CONTAINER_NAME ; docker kill $CONTAINER_NAME ; docker rm -v $CONTAINER_NAME; exit 1" EXIT INT TERM - wait_for_postgres "$CONTAINER_NAME" "$standalone_ip" 5432 - - # Run the test binary. - DATA_SOURCE_NAME="postgresql://postgres:$POSTGRES_PASSWORD@$standalone_ip:5432/?sslmode=disable" $test_binary || exit $? - - # Extract a raw metric list. - DATA_SOURCE_NAME="postgresql://postgres:$POSTGRES_PASSWORD@$standalone_ip:5432/?sslmode=disable" $postgres_exporter \ - --log.level=debug --web.listen-address=:$exporter_port & - exporter_pid=$! - # shellcheck disable=SC2064 - trap "docker logs $CONTAINER_NAME ; docker kill $CONTAINER_NAME ; docker rm -v $CONTAINER_NAME; kill $exporter_pid; exit 1" EXIT INT TERM - wait_for_exporter - - # Dump the metrics to a file. - if ! wget -q -O - http://localhost:$exporter_port/metrics 1> "$METRICS_DIR/.metrics.single.$version.prom" ; then - echo "Failed on postgres $version (standalone $DOCKER_IMAGE)" 1>&2 - kill $exporter_pid - exit 1 - fi - - # HACK test: check pg_up is a 1 - TODO: expand integration tests to include metric consumption - if ! grep 'pg_up.* 1' $METRICS_DIR/.metrics.single.$version.prom ; then - echo "pg_up metric was not 1 despite exporter and database being up" - kill $exporter_pid - exit 1 - fi - - kill $exporter_pid - docker kill "$CONTAINER_NAME" - docker rm -v "$CONTAINER_NAME" - trap - EXIT INT TERM - - echo "#######################" - echo "Replicated Postgres $version" - echo "#######################" - old_pwd=$(pwd) - cd docker-postgres-replication || exit 1 - - if ! VERSION="$version" p2 -t Dockerfile.p2 -o Dockerfile ; then - echo "Templating failed" 1>&2 - exit 1 - fi - trap "docker-compose logs; docker-compose down ; docker-compose rm -v; exit 1" EXIT INT TERM - local compose_cmd="POSTGRES_PASSWORD=$POSTGRES_PASSWORD docker-compose up -d --force-recreate --build" - echo "Compose Cmd: $compose_cmd" - eval "$compose_cmd" - - master_container=$(docker-compose ps -q pg-master) - slave_container=$(docker-compose ps -q pg-slave) - master_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$master_container") - slave_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$slave_container") - echo "Got master IP: $master_ip" - wait_for_postgres "$master_container" "$master_ip" 5432 - wait_for_postgres "$slave_container" "$slave_ip" 5432 - - DATA_SOURCE_NAME="postgresql://postgres:$POSTGRES_PASSWORD@$master_ip:5432/?sslmode=disable" $test_binary || exit $? - - DATA_SOURCE_NAME="postgresql://postgres:$POSTGRES_PASSWORD@$master_ip:5432/?sslmode=disable" $postgres_exporter \ - --log.level=debug --web.listen-address=:$exporter_port & - exporter_pid=$! - # shellcheck disable=SC2064 - trap "docker-compose logs; docker-compose down ; docker-compose rm -v ; kill $exporter_pid; exit 1" EXIT INT TERM - wait_for_exporter - - if ! wget -q -O - http://localhost:$exporter_port/metrics 1> "$METRICS_DIR/.metrics.replicated.$version.prom" ; then - echo "Failed on postgres $version (replicated $DOCKER_IMAGE)" 1>&2 - exit 1 - fi - - kill $exporter_pid - docker-compose down - docker-compose rm -v - trap - EXIT INT TERM - - cd "$old_pwd" || exit 1 -} - -# Start pulling the docker images in advance -for version in "${VERSIONS[@]}"; do - docker pull "postgres:$version" > /dev/null & -done - -for version in "${VERSIONS[@]}"; do - echo "Testing postgres version $version" - smoketest_postgres "$version" -done diff --git a/cmd/postgres_exporter/tests/user_queries_ok.yaml b/cmd/postgres_exporter/tests/user_queries_ok.yaml deleted file mode 100644 index e5ecec948..000000000 --- a/cmd/postgres_exporter/tests/user_queries_ok.yaml +++ /dev/null @@ -1,23 +0,0 @@ -pg_locks_mode: - query: "WITH q_locks AS (select * from pg_locks where pid != pg_backend_pid() and database = (select oid from pg_database where datname = current_database())) SELECT (select current_database()) as datname, - lockmodes AS tag_lockmode, coalesce((select count(*) FROM q_locks WHERE mode = lockmodes), 0) AS count FROM - unnest('{AccessShareLock, ExclusiveLock, RowShareLock, RowExclusiveLock, ShareLock, ShareRowExclusiveLock, AccessExclusiveLock, ShareUpdateExclusiveLock}'::text[]) lockmodes;" - metrics: - - datname: - usage: "LABEL" - description: "Database name" - - tag_lockmode: - usage: "LABEL" - description: "Lock type" - - count: - usage: "GAUGE" - description: "Number of lock" -pg_wal: - query: "select current_database() as datname, case when pg_is_in_recovery() = false then pg_xlog_location_diff(pg_current_xlog_location(), '0/0')::int8 else pg_xlog_location_diff(pg_last_xlog_replay_location(), '0/0')::int8 end as xlog_location_b;" - metrics: - - datname: - usage: "LABEL" - description: "Database name" - - xlog_location_b: - usage: "COUNTER" - description: "current transaction log write location" diff --git a/cmd/postgres_exporter/tests/user_queries_test.yaml b/cmd/postgres_exporter/tests/user_queries_test.yaml deleted file mode 100644 index c9a396551..000000000 --- a/cmd/postgres_exporter/tests/user_queries_test.yaml +++ /dev/null @@ -1,51 +0,0 @@ -random: - query: | - WITH data AS (SELECT floor(random()*10) AS d FROM generate_series(1,100)), - metrics AS (SELECT SUM(d) AS sum, COUNT(*) AS count FROM data), - buckets AS (SELECT le, SUM(CASE WHEN d <= le THEN 1 ELSE 0 END) AS d - FROM data, UNNEST(ARRAY[1, 2, 4, 8]) AS le GROUP BY le) - SELECT - sum AS histogram_sum, - count AS histogram_count, - ARRAY_AGG(le) AS histogram, - ARRAY_AGG(d) AS histogram_bucket, - ARRAY_AGG(le) AS missing, - ARRAY_AGG(le) AS missing_sum, - ARRAY_AGG(d) AS missing_sum_bucket, - ARRAY_AGG(le) AS missing_count, - ARRAY_AGG(d) AS missing_count_bucket, - sum AS missing_count_sum, - ARRAY_AGG(le) AS unexpected_sum, - ARRAY_AGG(d) AS unexpected_sum_bucket, - 'data' AS unexpected_sum_sum, - ARRAY_AGG(le) AS unexpected_count, - ARRAY_AGG(d) AS unexpected_count_bucket, - sum AS unexpected_count_sum, - 'nan'::varchar AS unexpected_count_count, - ARRAY_AGG(le) AS unexpected_bytes, - ARRAY_AGG(d) AS unexpected_bytes_bucket, - sum AS unexpected_bytes_sum, - 'nan'::bytea AS unexpected_bytes_count - FROM metrics, buckets GROUP BY 1,2 - metrics: - - histogram: - usage: "HISTOGRAM" - description: "Random data" - - missing: - usage: "HISTOGRAM" - description: "nonfatal error" - - missing_sum: - usage: "HISTOGRAM" - description: "nonfatal error" - - missing_count: - usage: "HISTOGRAM" - description: "nonfatal error" - - unexpected_sum: - usage: "HISTOGRAM" - description: "nonfatal error" - - unexpected_count: - usage: "HISTOGRAM" - description: "nonfatal error" - - unexpected_bytes: - usage: "HISTOGRAM" - description: "nonfatal error" diff --git a/cmd/postgres_exporter/tests/username_file b/cmd/postgres_exporter/tests/username_file deleted file mode 100644 index 0650cfd36..000000000 --- a/cmd/postgres_exporter/tests/username_file +++ /dev/null @@ -1 +0,0 @@ -custom_username$&+,/:;=?@ diff --git a/cmd/postgres_exporter/tests/userpass_file b/cmd/postgres_exporter/tests/userpass_file deleted file mode 100644 index a9caa8dee..000000000 --- a/cmd/postgres_exporter/tests/userpass_file +++ /dev/null @@ -1 +0,0 @@ -custom_password$&+,/:;=?@ diff --git a/cmd/postgres_exporter/util.go b/cmd/postgres_exporter/util.go deleted file mode 100644 index 8907e7c5f..000000000 --- a/cmd/postgres_exporter/util.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "math" - "net/url" - "strconv" - "strings" - "time" - - "github.com/lib/pq" -) - -func contains(a []string, x string) bool { - for _, n := range a { - if x == n { - return true - } - } - return false -} - -// convert a string to the corresponding ColumnUsage -func stringToColumnUsage(s string) (ColumnUsage, error) { - var u ColumnUsage - var err error - switch s { - case "DISCARD": - u = DISCARD - - case "LABEL": - u = LABEL - - case "COUNTER": - u = COUNTER - - case "GAUGE": - u = GAUGE - - case "HISTOGRAM": - u = HISTOGRAM - - case "MAPPEDMETRIC": - u = MAPPEDMETRIC - - case "DURATION": - u = DURATION - - default: - err = fmt.Errorf("wrong ColumnUsage given : %s", s) - } - - return u, err -} - -// Convert database.sql types to float64s for Prometheus consumption. Null types are mapped to NaN. string and []byte -// types are mapped as NaN and !ok -func dbToFloat64(t interface{}) (float64, bool) { - switch v := t.(type) { - case int64: - return float64(v), true - case float64: - return v, true - case time.Time: - return float64(v.Unix()), true - case []byte: - // Try and convert to string and then parse to a float64 - strV := string(v) - result, err := strconv.ParseFloat(strV, 64) - if err != nil { - logger.Info("Could not parse []byte", "err", err) - return math.NaN(), false - } - return result, true - case string: - result, err := strconv.ParseFloat(v, 64) - if err != nil { - logger.Info("Could not parse string", "err", err) - return math.NaN(), false - } - return result, true - case bool: - if v { - return 1.0, true - } - return 0.0, true - case nil: - return math.NaN(), true - default: - return math.NaN(), false - } -} - -// Convert database.sql types to uint64 for Prometheus consumption. Null types are mapped to 0. string and []byte -// types are mapped as 0 and !ok -func dbToUint64(t interface{}) (uint64, bool) { - switch v := t.(type) { - case uint64: - return v, true - case int64: - return uint64(v), true - case float64: - return uint64(v), true - case time.Time: - return uint64(v.Unix()), true - case []byte: - // Try and convert to string and then parse to a uint64 - strV := string(v) - result, err := strconv.ParseUint(strV, 10, 64) - if err != nil { - logger.Info("Could not parse []byte", "err", err) - return 0, false - } - return result, true - case string: - result, err := strconv.ParseUint(v, 10, 64) - if err != nil { - logger.Info("Could not parse string", "err", err) - return 0, false - } - return result, true - case bool: - if v { - return 1, true - } - return 0, true - case nil: - return 0, true - default: - return 0, false - } -} - -// Convert database.sql to string for Prometheus labels. Null types are mapped to empty strings. -func dbToString(t interface{}) (string, bool) { - switch v := t.(type) { - case int64: - return fmt.Sprintf("%v", v), true - case float64: - return fmt.Sprintf("%v", v), true - case time.Time: - return fmt.Sprintf("%v", v.Unix()), true - case nil: - return "", true - case []byte: - // Try and convert to string - return string(v), true - case string: - return strings.ToValidUTF8(v, "�"), true - case bool: - if v { - return "true", true - } - return "false", true - default: - return "", false - } -} - -func parseFingerprint(url string) (string, error) { - dsn, err := pq.ParseURL(url) - if err != nil { - dsn = url - } - - pairs := strings.Split(dsn, " ") - kv := make(map[string]string, len(pairs)) - for _, pair := range pairs { - splitted := strings.SplitN(pair, "=", 2) - if len(splitted) != 2 { - return "", fmt.Errorf("malformed dsn %q", dsn) - } - // Newer versions of pq.ParseURL quote values so trim them off if they exist - key := strings.Trim(splitted[0], "'\"") - value := strings.Trim(splitted[1], "'\"") - kv[key] = value - } - - var fingerprint string - - if host, ok := kv["host"]; ok { - fingerprint += host - } else { - fingerprint += "localhost" - } - - if port, ok := kv["port"]; ok { - fingerprint += ":" + port - } else { - fingerprint += ":5432" - } - - return fingerprint, nil -} - -func loggableDSN(dsn string) string { - pDSN, err := url.Parse(dsn) - if err != nil { - return "could not parse DATA_SOURCE_NAME" - } - // Blank user info if not nil - if pDSN.User != nil { - pDSN.User = url.UserPassword(pDSN.User.Username(), "PASSWORD_REMOVED") - } - - return pDSN.String() -} diff --git a/collector/collector.go b/collector/collector.go deleted file mode 100644 index 298bc36ee..000000000 --- a/collector/collector.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "errors" - "fmt" - "log/slog" - "sync" - "time" - - "github.com/alecthomas/kingpin/v2" - "github.com/prometheus/client_golang/prometheus" -) - -var ( - factories = make(map[string]func(collectorConfig) (Collector, error)) - initiatedCollectorsMtx = sync.Mutex{} - initiatedCollectors = make(map[string]Collector) - collectorState = make(map[string]*bool) - forcedCollectors = map[string]bool{} // collectors which have been explicitly enabled or disabled -) - -const ( - // Namespace for all metrics. - namespace = "pg" - - collectorFlagPrefix = "collector." - defaultEnabled = true - defaultDisabled = false -) - -var ( - scrapeDurationDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "scrape", "collector_duration_seconds"), - "postgres_exporter: Duration of a collector scrape.", - []string{"collector"}, - nil, - ) - scrapeSuccessDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "scrape", "collector_success"), - "postgres_exporter: Whether a collector succeeded.", - []string{"collector"}, - nil, - ) -) - -type Collector interface { - Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error -} - -type collectorConfig struct { - logger *slog.Logger - excludeDatabases []string -} - -func registerCollector(name string, isDefaultEnabled bool, createFunc func(collectorConfig) (Collector, error)) { - var helpDefaultState string - if isDefaultEnabled { - helpDefaultState = "enabled" - } else { - helpDefaultState = "disabled" - } - - // Create flag for this collector - flagName := collectorFlagPrefix + name - flagHelp := fmt.Sprintf("Enable the %s collector (default: %s).", name, helpDefaultState) - defaultValue := fmt.Sprintf("%v", isDefaultEnabled) - - flag := kingpin.Flag(flagName, flagHelp).Default(defaultValue).Action(collectorFlagAction(name)).Bool() - collectorState[name] = flag - - // Register the create function for this collector - factories[name] = createFunc -} - -// PostgresCollector implements the prometheus.Collector interface. -type PostgresCollector struct { - Collectors map[string]Collector - logger *slog.Logger - - instance *instance -} - -type Option func(*PostgresCollector) error - -// NewPostgresCollector creates a new PostgresCollector. -func NewPostgresCollector(logger *slog.Logger, excludeDatabases []string, dsn string, filters []string, options ...Option) (*PostgresCollector, error) { - p := &PostgresCollector{ - logger: logger, - } - // Apply options to customize the collector - for _, o := range options { - err := o(p) - if err != nil { - return nil, err - } - } - - f := make(map[string]bool) - for _, filter := range filters { - enabled, exist := collectorState[filter] - if !exist { - return nil, fmt.Errorf("missing collector: %s", filter) - } - if !*enabled { - return nil, fmt.Errorf("disabled collector: %s", filter) - } - f[filter] = true - } - collectors := make(map[string]Collector) - initiatedCollectorsMtx.Lock() - defer initiatedCollectorsMtx.Unlock() - for key, enabled := range collectorState { - if !*enabled || (len(f) > 0 && !f[key]) { - continue - } - if collector, ok := initiatedCollectors[key]; ok { - collectors[key] = collector - } else { - collector, err := factories[key](collectorConfig{ - logger: logger.With("collector", key), - excludeDatabases: excludeDatabases, - }) - if err != nil { - return nil, err - } - collectors[key] = collector - initiatedCollectors[key] = collector - } - } - - p.Collectors = collectors - - if dsn == "" { - return nil, errors.New("empty dsn") - } - - instance, err := newInstance(dsn) - if err != nil { - return nil, err - } - p.instance = instance - - return p, nil -} - -// Describe implements the prometheus.Collector interface. -func (p PostgresCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- scrapeDurationDesc - ch <- scrapeSuccessDesc -} - -// Collect implements the prometheus.Collector interface. -func (p PostgresCollector) Collect(ch chan<- prometheus.Metric) { - ctx := context.TODO() - - // copy the instance so that concurrent scrapes have independent instances - inst := p.instance.copy() - - // Set up the database connection for the collector. - err := inst.setup() - if err != nil { - p.logger.Error("Error opening connection to database", "err", err) - return - } - defer inst.Close() - - wg := sync.WaitGroup{} - wg.Add(len(p.Collectors)) - for name, c := range p.Collectors { - go func(name string, c Collector) { - execute(ctx, name, c, inst, ch, p.logger) - wg.Done() - }(name, c) - } - wg.Wait() -} - -func execute(ctx context.Context, name string, c Collector, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) { - begin := time.Now() - err := c.Update(ctx, instance, ch) - duration := time.Since(begin) - var success float64 - - if err != nil { - if IsNoDataError(err) { - logger.Debug("collector returned no data", "name", name, "duration_seconds", duration.Seconds(), "err", err) - } else { - logger.Error("collector failed", "name", name, "duration_seconds", duration.Seconds(), "err", err) - } - success = 0 - } else { - logger.Debug("collector succeeded", "name", name, "duration_seconds", duration.Seconds()) - success = 1 - } - ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), name) - ch <- prometheus.MustNewConstMetric(scrapeSuccessDesc, prometheus.GaugeValue, success, name) -} - -// collectorFlagAction generates a new action function for the given collector -// to track whether it has been explicitly enabled or disabled from the command line. -// A new action function is needed for each collector flag because the ParseContext -// does not contain information about which flag called the action. -// See: https://github.com/alecthomas/kingpin/issues/294 -func collectorFlagAction(collector string) func(ctx *kingpin.ParseContext) error { - return func(ctx *kingpin.ParseContext) error { - forcedCollectors[collector] = true - return nil - } -} - -// ErrNoData indicates the collector found no data to collect, but had no other error. -var ErrNoData = errors.New("collector returned no data") - -func IsNoDataError(err error) bool { - return err == ErrNoData -} diff --git a/collector/collector_test.go b/collector/collector_test.go deleted file mode 100644 index d3b473b43..000000000 --- a/collector/collector_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "strings" - - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" -) - -type labelMap map[string]string - -type MetricResult struct { - labels labelMap - value float64 - metricType dto.MetricType -} - -func readMetric(m prometheus.Metric) MetricResult { - pb := &dto.Metric{} - m.Write(pb) - labels := make(labelMap, len(pb.Label)) - for _, v := range pb.Label { - labels[v.GetName()] = v.GetValue() - } - if pb.Gauge != nil { - return MetricResult{labels: labels, value: pb.GetGauge().GetValue(), metricType: dto.MetricType_GAUGE} - } - if pb.Counter != nil { - return MetricResult{labels: labels, value: pb.GetCounter().GetValue(), metricType: dto.MetricType_COUNTER} - } - if pb.Untyped != nil { - return MetricResult{labels: labels, value: pb.GetUntyped().GetValue(), metricType: dto.MetricType_UNTYPED} - } - panic("Unsupported metric type") -} - -func sanitizeQuery(q string) string { - q = strings.Join(strings.Fields(q), " ") - q = strings.ReplaceAll(q, "(", "\\(") - q = strings.ReplaceAll(q, "?", "\\?") - q = strings.ReplaceAll(q, ")", "\\)") - q = strings.ReplaceAll(q, "[", "\\[") - q = strings.ReplaceAll(q, "]", "\\]") - q = strings.ReplaceAll(q, "{", "\\{") - q = strings.ReplaceAll(q, "}", "\\}") - q = strings.ReplaceAll(q, "*", "\\*") - q = strings.ReplaceAll(q, "^", "\\^") - q = strings.ReplaceAll(q, "$", "\\$") - return q -} diff --git a/collector/instance.go b/collector/instance.go deleted file mode 100644 index a365697d6..000000000 --- a/collector/instance.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "database/sql" - "fmt" - "regexp" - - "github.com/blang/semver/v4" -) - -type instance struct { - dsn string - db *sql.DB - version semver.Version -} - -func newInstance(dsn string) (*instance, error) { - i := &instance{ - dsn: dsn, - } - - // "Create" a database handle to verify the DSN provided is valid. - // Open is not guaranteed to create a connection. - db, err := sql.Open("postgres", dsn) - if err != nil { - return nil, err - } - db.Close() - - return i, nil -} - -// copy returns a copy of the instance. -func (i *instance) copy() *instance { - return &instance{ - dsn: i.dsn, - } -} - -func (i *instance) setup() error { - db, err := sql.Open("postgres", i.dsn) - if err != nil { - return err - } - db.SetMaxOpenConns(1) - db.SetMaxIdleConns(1) - i.db = db - - version, err := queryVersion(i.db) - if err != nil { - return fmt.Errorf("error querying postgresql version: %w", err) - } else { - i.version = version - } - return nil -} - -func (i *instance) getDB() *sql.DB { - return i.db -} - -func (i *instance) Close() error { - return i.db.Close() -} - -// Regex used to get the "short-version" from the postgres version field. -// The result of SELECT version() is something like "PostgreSQL 9.6.2 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 6.2.1 20160830, 64-bit" -var versionRegex = regexp.MustCompile(`^\w+ ((\d+)(\.\d+)?(\.\d+)?)`) -var serverVersionRegex = regexp.MustCompile(`^((\d+)(\.\d+)?(\.\d+)?)`) - -func queryVersion(db *sql.DB) (semver.Version, error) { - var version string - err := db.QueryRow("SELECT version();").Scan(&version) - if err != nil { - return semver.Version{}, err - } - submatches := versionRegex.FindStringSubmatch(version) - if len(submatches) > 1 { - return semver.ParseTolerant(submatches[1]) - } - - // We could also try to parse the version from the server_version field. - // This is of the format 13.3 (Debian 13.3-1.pgdg100+1) - err = db.QueryRow("SHOW server_version;").Scan(&version) - if err != nil { - return semver.Version{}, err - } - submatches = serverVersionRegex.FindStringSubmatch(version) - if len(submatches) > 1 { - return semver.ParseTolerant(submatches[1]) - } - return semver.Version{}, fmt.Errorf("could not parse version from %q", version) -} diff --git a/collector/pg_database.go b/collector/pg_database.go deleted file mode 100644 index 4c0972080..000000000 --- a/collector/pg_database.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "database/sql" - "log/slog" - - "github.com/prometheus/client_golang/prometheus" -) - -const databaseSubsystem = "database" - -func init() { - registerCollector(databaseSubsystem, defaultEnabled, NewPGDatabaseCollector) -} - -type PGDatabaseCollector struct { - log *slog.Logger - excludedDatabases []string -} - -func NewPGDatabaseCollector(config collectorConfig) (Collector, error) { - exclude := config.excludeDatabases - if exclude == nil { - exclude = []string{} - } - return &PGDatabaseCollector{ - log: config.logger, - excludedDatabases: exclude, - }, nil -} - -var ( - pgDatabaseSizeDesc = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - databaseSubsystem, - "size_bytes", - ), - "Disk space used by the database", - []string{"datname"}, nil, - ) - pgDatabaseConnectionLimitsDesc = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - databaseSubsystem, - "connection_limit", - ), - "Connection limit set for the database", - []string{"datname"}, nil, - ) - - pgDatabaseQuery = "SELECT pg_database.datname, pg_database.datconnlimit FROM pg_database;" - pgDatabaseSizeQuery = "SELECT pg_database_size($1)" -) - -// Update implements Collector and exposes database size and connection limits. -// It is called by the Prometheus registry when collecting metrics. -// The list of databases is retrieved from pg_database and filtered -// by the excludeDatabase config parameter. The tradeoff here is that -// we have to query the list of databases and then query the size of -// each database individually. This is because we can't filter the -// list of databases in the query because the list of excluded -// databases is dynamic. -func (c PGDatabaseCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - // Query the list of databases - rows, err := db.QueryContext(ctx, - pgDatabaseQuery, - ) - if err != nil { - return err - } - defer rows.Close() - - var databases []string - - for rows.Next() { - var datname sql.NullString - var connLimit sql.NullInt64 - if err := rows.Scan(&datname, &connLimit); err != nil { - return err - } - - if !datname.Valid { - continue - } - database := datname.String - // Ignore excluded databases - // Filtering is done here instead of in the query to avoid - // a complicated NOT IN query with a variable number of parameters - if sliceContains(c.excludedDatabases, database) { - continue - } - - databases = append(databases, database) - - connLimitMetric := 0.0 - if connLimit.Valid { - connLimitMetric = float64(connLimit.Int64) - } - ch <- prometheus.MustNewConstMetric( - pgDatabaseConnectionLimitsDesc, - prometheus.GaugeValue, connLimitMetric, database, - ) - } - - // Query the size of the databases - for _, datname := range databases { - var size sql.NullFloat64 - err = db.QueryRowContext(ctx, pgDatabaseSizeQuery, datname).Scan(&size) - if err != nil { - return err - } - - sizeMetric := 0.0 - if size.Valid { - sizeMetric = size.Float64 - } - ch <- prometheus.MustNewConstMetric( - pgDatabaseSizeDesc, - prometheus.GaugeValue, sizeMetric, datname, - ) - - } - return rows.Err() -} - -func sliceContains(slice []string, s string) bool { - for _, item := range slice { - if item == s { - return true - } - } - return false -} diff --git a/collector/pg_database_test.go b/collector/pg_database_test.go deleted file mode 100644 index fe94166e9..000000000 --- a/collector/pg_database_test.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPGDatabaseCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - - mock.ExpectQuery(sanitizeQuery(pgDatabaseQuery)).WillReturnRows(sqlmock.NewRows([]string{"datname", "datconnlimit"}). - AddRow("postgres", 15)) - - mock.ExpectQuery(sanitizeQuery(pgDatabaseSizeQuery)).WithArgs("postgres").WillReturnRows(sqlmock.NewRows([]string{"pg_database_size"}). - AddRow(1024)) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGDatabaseCollector{} - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGDatabaseCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"datname": "postgres"}, value: 15, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"datname": "postgres"}, value: 1024, metricType: dto.MetricType_GAUGE}, - } - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -// TODO add a null db test - -func TestPGDatabaseCollectorNullMetric(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - - mock.ExpectQuery(sanitizeQuery(pgDatabaseQuery)).WillReturnRows(sqlmock.NewRows([]string{"datname", "datconnlimit"}). - AddRow("postgres", nil)) - - mock.ExpectQuery(sanitizeQuery(pgDatabaseSizeQuery)).WithArgs("postgres").WillReturnRows(sqlmock.NewRows([]string{"pg_database_size"}). - AddRow(nil)) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGDatabaseCollector{} - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGDatabaseCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"datname": "postgres"}, value: 0, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"datname": "postgres"}, value: 0, metricType: dto.MetricType_GAUGE}, - } - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/pg_database_wraparound.go b/collector/pg_database_wraparound.go deleted file mode 100644 index d170821b5..000000000 --- a/collector/pg_database_wraparound.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "database/sql" - "log/slog" - - "github.com/prometheus/client_golang/prometheus" -) - -const databaseWraparoundSubsystem = "database_wraparound" - -func init() { - registerCollector(databaseWraparoundSubsystem, defaultDisabled, NewPGDatabaseWraparoundCollector) -} - -type PGDatabaseWraparoundCollector struct { - log *slog.Logger -} - -func NewPGDatabaseWraparoundCollector(config collectorConfig) (Collector, error) { - return &PGDatabaseWraparoundCollector{log: config.logger}, nil -} - -var ( - databaseWraparoundAgeDatfrozenxid = prometheus.NewDesc( - prometheus.BuildFQName(namespace, databaseWraparoundSubsystem, "age_datfrozenxid_seconds"), - "Age of the oldest transaction ID that has not been frozen.", - []string{"datname"}, - prometheus.Labels{}, - ) - databaseWraparoundAgeDatminmxid = prometheus.NewDesc( - prometheus.BuildFQName(namespace, databaseWraparoundSubsystem, "age_datminmxid_seconds"), - "Age of the oldest multi-transaction ID that has been replaced with a transaction ID.", - []string{"datname"}, - prometheus.Labels{}, - ) - - databaseWraparoundQuery = ` - SELECT - datname, - age(d.datfrozenxid) as age_datfrozenxid, - mxid_age(d.datminmxid) as age_datminmxid - FROM - pg_catalog.pg_database d - WHERE - d.datallowconn - ` -) - -func (c *PGDatabaseWraparoundCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - rows, err := db.QueryContext(ctx, - databaseWraparoundQuery) - - if err != nil { - return err - } - defer rows.Close() - - for rows.Next() { - var datname sql.NullString - var ageDatfrozenxid, ageDatminmxid sql.NullFloat64 - - if err := rows.Scan(&datname, &ageDatfrozenxid, &ageDatminmxid); err != nil { - return err - } - - if !datname.Valid { - c.log.Debug("Skipping database with NULL name") - continue - } - if !ageDatfrozenxid.Valid { - c.log.Debug("Skipping stat emission with NULL age_datfrozenxid") - continue - } - if !ageDatminmxid.Valid { - c.log.Debug("Skipping stat emission with NULL age_datminmxid") - continue - } - - ageDatfrozenxidMetric := ageDatfrozenxid.Float64 - - ch <- prometheus.MustNewConstMetric( - databaseWraparoundAgeDatfrozenxid, - prometheus.GaugeValue, - ageDatfrozenxidMetric, datname.String, - ) - - ageDatminmxidMetric := ageDatminmxid.Float64 - ch <- prometheus.MustNewConstMetric( - databaseWraparoundAgeDatminmxid, - prometheus.GaugeValue, - ageDatminmxidMetric, datname.String, - ) - } - if err := rows.Err(); err != nil { - return err - } - return nil -} diff --git a/collector/pg_database_wraparound_test.go b/collector/pg_database_wraparound_test.go deleted file mode 100644 index d0a74c362..000000000 --- a/collector/pg_database_wraparound_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPGDatabaseWraparoundCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - inst := &instance{db: db} - columns := []string{ - "datname", - "age_datfrozenxid", - "age_datminmxid", - } - rows := sqlmock.NewRows(columns). - AddRow("newreddit", 87126426, 0) - - mock.ExpectQuery(sanitizeQuery(databaseWraparoundQuery)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGDatabaseWraparoundCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGDatabaseWraparoundCollector.Update: %s", err) - } - }() - expected := []MetricResult{ - {labels: labelMap{"datname": "newreddit"}, value: 87126426, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"datname": "newreddit"}, value: 0, metricType: dto.MetricType_GAUGE}, - } - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/pg_locks.go b/collector/pg_locks.go deleted file mode 100644 index add3e6d42..000000000 --- a/collector/pg_locks.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "database/sql" - "log/slog" - - "github.com/prometheus/client_golang/prometheus" -) - -const locksSubsystem = "locks" - -func init() { - registerCollector(locksSubsystem, defaultEnabled, NewPGLocksCollector) -} - -type PGLocksCollector struct { - log *slog.Logger -} - -func NewPGLocksCollector(config collectorConfig) (Collector, error) { - return &PGLocksCollector{ - log: config.logger, - }, nil -} - -var ( - pgLocksDesc = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - locksSubsystem, - "count", - ), - "Number of locks", - []string{"datname", "mode"}, nil, - ) - - pgLocksQuery = ` - SELECT - pg_database.datname as datname, - tmp.mode as mode, - COALESCE(count, 0) as count - FROM - ( - VALUES - ('accesssharelock'), - ('rowsharelock'), - ('rowexclusivelock'), - ('shareupdateexclusivelock'), - ('sharelock'), - ('sharerowexclusivelock'), - ('exclusivelock'), - ('accessexclusivelock'), - ('sireadlock') - ) AS tmp(mode) - CROSS JOIN pg_database - LEFT JOIN ( - SELECT - database, - lower(mode) AS mode, - count(*) AS count - FROM - pg_locks - WHERE - database IS NOT NULL - GROUP BY - database, - lower(mode) - ) AS tmp2 ON tmp.mode = tmp2.mode - and pg_database.oid = tmp2.database - ORDER BY - 1 - ` -) - -// Update implements Collector and exposes database locks. -// It is called by the Prometheus registry when collecting metrics. -func (c PGLocksCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - // Query the list of databases - rows, err := db.QueryContext(ctx, - pgLocksQuery, - ) - if err != nil { - return err - } - defer rows.Close() - - var datname, mode sql.NullString - var count sql.NullInt64 - - for rows.Next() { - if err := rows.Scan(&datname, &mode, &count); err != nil { - return err - } - - if !datname.Valid || !mode.Valid { - continue - } - - countMetric := 0.0 - if count.Valid { - countMetric = float64(count.Int64) - } - - ch <- prometheus.MustNewConstMetric( - pgLocksDesc, - prometheus.GaugeValue, countMetric, - datname.String, mode.String, - ) - } - if err := rows.Err(); err != nil { - return err - } - return nil -} diff --git a/collector/pg_locks_test.go b/collector/pg_locks_test.go deleted file mode 100644 index 99597ea2d..000000000 --- a/collector/pg_locks_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPGLocksCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - - rows := sqlmock.NewRows([]string{"datname", "mode", "count"}). - AddRow("test", "exclusivelock", 42) - - mock.ExpectQuery(sanitizeQuery(pgLocksQuery)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGLocksCollector{} - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGLocksCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"datname": "test", "mode": "exclusivelock"}, value: 42, metricType: dto.MetricType_GAUGE}, - } - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/pg_long_running_transactions.go b/collector/pg_long_running_transactions.go deleted file mode 100644 index d7d1e6d30..000000000 --- a/collector/pg_long_running_transactions.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "log/slog" - - "github.com/prometheus/client_golang/prometheus" -) - -const longRunningTransactionsSubsystem = "long_running_transactions" - -func init() { - registerCollector(longRunningTransactionsSubsystem, defaultDisabled, NewPGLongRunningTransactionsCollector) -} - -type PGLongRunningTransactionsCollector struct { - log *slog.Logger -} - -func NewPGLongRunningTransactionsCollector(config collectorConfig) (Collector, error) { - return &PGLongRunningTransactionsCollector{log: config.logger}, nil -} - -var ( - longRunningTransactionsCount = prometheus.NewDesc( - "pg_long_running_transactions", - "Current number of long running transactions", - []string{}, - prometheus.Labels{}, - ) - - longRunningTransactionsAgeInSeconds = prometheus.NewDesc( - prometheus.BuildFQName(namespace, longRunningTransactionsSubsystem, "oldest_timestamp_seconds"), - "The current maximum transaction age in seconds", - []string{}, - prometheus.Labels{}, - ) - - longRunningTransactionsQuery = ` - SELECT - COUNT(*) as transactions, - MAX(EXTRACT(EPOCH FROM clock_timestamp() - pg_stat_activity.xact_start)) AS oldest_timestamp_seconds -FROM pg_catalog.pg_stat_activity -WHERE state IS DISTINCT FROM 'idle' -AND query NOT LIKE 'autovacuum:%' -AND pg_stat_activity.xact_start IS NOT NULL; - ` -) - -func (PGLongRunningTransactionsCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - rows, err := db.QueryContext(ctx, - longRunningTransactionsQuery) - - if err != nil { - return err - } - defer rows.Close() - - for rows.Next() { - var transactions, ageInSeconds float64 - - if err := rows.Scan(&transactions, &ageInSeconds); err != nil { - return err - } - - ch <- prometheus.MustNewConstMetric( - longRunningTransactionsCount, - prometheus.GaugeValue, - transactions, - ) - ch <- prometheus.MustNewConstMetric( - longRunningTransactionsAgeInSeconds, - prometheus.GaugeValue, - ageInSeconds, - ) - } - if err := rows.Err(); err != nil { - return err - } - return nil -} diff --git a/collector/pg_long_running_transactions_test.go b/collector/pg_long_running_transactions_test.go deleted file mode 100644 index eedda7c65..000000000 --- a/collector/pg_long_running_transactions_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPGLongRunningTransactionsCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - inst := &instance{db: db} - columns := []string{ - "transactions", - "age_in_seconds", - } - rows := sqlmock.NewRows(columns). - AddRow(20, 1200) - - mock.ExpectQuery(sanitizeQuery(longRunningTransactionsQuery)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGLongRunningTransactionsCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGLongRunningTransactionsCollector.Update: %s", err) - } - }() - expected := []MetricResult{ - {labels: labelMap{}, value: 20, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{}, value: 1200, metricType: dto.MetricType_GAUGE}, - } - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/pg_postmaster.go b/collector/pg_postmaster.go deleted file mode 100644 index b81e4f905..000000000 --- a/collector/pg_postmaster.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "database/sql" - - "github.com/prometheus/client_golang/prometheus" -) - -const postmasterSubsystem = "postmaster" - -func init() { - registerCollector(postmasterSubsystem, defaultDisabled, NewPGPostmasterCollector) -} - -type PGPostmasterCollector struct { -} - -func NewPGPostmasterCollector(collectorConfig) (Collector, error) { - return &PGPostmasterCollector{}, nil -} - -var ( - pgPostMasterStartTimeSeconds = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - postmasterSubsystem, - "start_time_seconds", - ), - "Time at which postmaster started", - []string{}, nil, - ) - - pgPostmasterQuery = "SELECT extract(epoch from pg_postmaster_start_time) from pg_postmaster_start_time();" -) - -func (c *PGPostmasterCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - row := db.QueryRowContext(ctx, - pgPostmasterQuery) - - var startTimeSeconds sql.NullFloat64 - err := row.Scan(&startTimeSeconds) - if err != nil { - return err - } - startTimeSecondsMetric := 0.0 - if startTimeSeconds.Valid { - startTimeSecondsMetric = startTimeSeconds.Float64 - } - ch <- prometheus.MustNewConstMetric( - pgPostMasterStartTimeSeconds, - prometheus.GaugeValue, startTimeSecondsMetric, - ) - return nil -} diff --git a/collector/pg_postmaster_test.go b/collector/pg_postmaster_test.go deleted file mode 100644 index 8405b4225..000000000 --- a/collector/pg_postmaster_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPgPostmasterCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - - mock.ExpectQuery(sanitizeQuery(pgPostmasterQuery)).WillReturnRows(sqlmock.NewRows([]string{"pg_postmaster_start_time"}). - AddRow(1685739904)) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGPostmasterCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{}, value: 1685739904, metricType: dto.MetricType_GAUGE}, - } - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -func TestPgPostmasterCollectorNullTime(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - - mock.ExpectQuery(sanitizeQuery(pgPostmasterQuery)).WillReturnRows(sqlmock.NewRows([]string{"pg_postmaster_start_time"}). - AddRow(nil)) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGPostmasterCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{}, value: 0, metricType: dto.MetricType_GAUGE}, - } - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/pg_process_idle.go b/collector/pg_process_idle.go deleted file mode 100644 index 7f3ff6f0f..000000000 --- a/collector/pg_process_idle.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "database/sql" - "log/slog" - - "github.com/lib/pq" - "github.com/prometheus/client_golang/prometheus" -) - -func init() { - // Making this default disabled because we have no tests for it - registerCollector(processIdleSubsystem, defaultDisabled, NewPGProcessIdleCollector) -} - -type PGProcessIdleCollector struct { - log *slog.Logger -} - -const processIdleSubsystem = "process_idle" - -func NewPGProcessIdleCollector(config collectorConfig) (Collector, error) { - return &PGProcessIdleCollector{log: config.logger}, nil -} - -var pgProcessIdleSeconds = prometheus.NewDesc( - prometheus.BuildFQName(namespace, processIdleSubsystem, "seconds"), - "Idle time of server processes", - []string{"state", "application_name"}, - prometheus.Labels{}, -) - -func (PGProcessIdleCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - row := db.QueryRowContext(ctx, - `WITH - metrics AS ( - SELECT - state, - application_name, - SUM(EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change))::bigint)::float AS process_idle_seconds_sum, - COUNT(*) AS process_idle_seconds_count - FROM pg_stat_activity - WHERE state ~ '^idle' - GROUP BY state, application_name - ), - buckets AS ( - SELECT - state, - application_name, - le, - SUM( - CASE WHEN EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change)) <= le - THEN 1 - ELSE 0 - END - )::bigint AS bucket - FROM - pg_stat_activity, - UNNEST(ARRAY[1, 2, 5, 15, 30, 60, 90, 120, 300]) AS le - GROUP BY state, application_name, le - ORDER BY state, application_name, le - ) - SELECT - state, - application_name, - process_idle_seconds_sum as seconds_sum, - process_idle_seconds_count as seconds_count, - ARRAY_AGG(le) AS seconds, - ARRAY_AGG(bucket) AS seconds_bucket - FROM metrics JOIN buckets USING (state, application_name) - GROUP BY 1, 2, 3, 4;`) - - var state sql.NullString - var applicationName sql.NullString - var secondsSum sql.NullFloat64 - var secondsCount sql.NullInt64 - var seconds []float64 - var secondsBucket []int64 - - err := row.Scan(&state, &applicationName, &secondsSum, &secondsCount, pq.Array(&seconds), pq.Array(&secondsBucket)) - if err != nil { - return err - } - - var buckets = make(map[float64]uint64, len(seconds)) - for i, second := range seconds { - if i >= len(secondsBucket) { - break - } - buckets[second] = uint64(secondsBucket[i]) - } - - stateLabel := "unknown" - if state.Valid { - stateLabel = state.String - } - - applicationNameLabel := "unknown" - if applicationName.Valid { - applicationNameLabel = applicationName.String - } - - var secondsCountMetric uint64 - if secondsCount.Valid { - secondsCountMetric = uint64(secondsCount.Int64) - } - secondsSumMetric := 0.0 - if secondsSum.Valid { - secondsSumMetric = secondsSum.Float64 - } - ch <- prometheus.MustNewConstHistogram( - pgProcessIdleSeconds, - secondsCountMetric, secondsSumMetric, buckets, - stateLabel, applicationNameLabel, - ) - return nil -} diff --git a/collector/pg_replication.go b/collector/pg_replication.go deleted file mode 100644 index 7f8b2fbd7..000000000 --- a/collector/pg_replication.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - - "github.com/prometheus/client_golang/prometheus" -) - -const replicationSubsystem = "replication" - -func init() { - registerCollector(replicationSubsystem, defaultEnabled, NewPGReplicationCollector) -} - -type PGReplicationCollector struct { -} - -func NewPGReplicationCollector(collectorConfig) (Collector, error) { - return &PGReplicationCollector{}, nil -} - -var ( - pgReplicationLag = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - replicationSubsystem, - "lag_seconds", - ), - "Replication lag behind master in seconds", - []string{}, nil, - ) - pgReplicationIsReplica = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - replicationSubsystem, - "is_replica", - ), - "Indicates if the server is a replica", - []string{}, nil, - ) - pgReplicationLastReplay = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - replicationSubsystem, - "last_replay_seconds", - ), - "Age of last replay in seconds", - []string{}, nil, - ) - - pgReplicationQuery = `SELECT - CASE - WHEN NOT pg_is_in_recovery() THEN 0 - WHEN pg_last_wal_receive_lsn () = pg_last_wal_replay_lsn () THEN 0 - ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) - END AS lag, - CASE - WHEN pg_is_in_recovery() THEN 1 - ELSE 0 - END as is_replica, - GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) as last_replay` -) - -func (c *PGReplicationCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - row := db.QueryRowContext(ctx, - pgReplicationQuery, - ) - - var lag float64 - var isReplica int64 - var replayAge float64 - err := row.Scan(&lag, &isReplica, &replayAge) - if err != nil { - return err - } - ch <- prometheus.MustNewConstMetric( - pgReplicationLag, - prometheus.GaugeValue, lag, - ) - ch <- prometheus.MustNewConstMetric( - pgReplicationIsReplica, - prometheus.GaugeValue, float64(isReplica), - ) - ch <- prometheus.MustNewConstMetric( - pgReplicationLastReplay, - prometheus.GaugeValue, replayAge, - ) - return nil -} diff --git a/collector/pg_replication_slot.go b/collector/pg_replication_slot.go deleted file mode 100644 index e6c9773eb..000000000 --- a/collector/pg_replication_slot.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "database/sql" - "log/slog" - - "github.com/blang/semver/v4" - "github.com/prometheus/client_golang/prometheus" -) - -const replicationSlotSubsystem = "replication_slot" - -func init() { - registerCollector(replicationSlotSubsystem, defaultEnabled, NewPGReplicationSlotCollector) -} - -type PGReplicationSlotCollector struct { - log *slog.Logger -} - -func NewPGReplicationSlotCollector(config collectorConfig) (Collector, error) { - return &PGReplicationSlotCollector{log: config.logger}, nil -} - -var ( - pgReplicationSlotCurrentWalDesc = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - replicationSlotSubsystem, - "slot_current_wal_lsn", - ), - "current wal lsn value", - []string{"slot_name", "slot_type"}, nil, - ) - pgReplicationSlotCurrentFlushDesc = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - replicationSlotSubsystem, - "slot_confirmed_flush_lsn", - ), - "last lsn confirmed flushed to the replication slot", - []string{"slot_name", "slot_type"}, nil, - ) - pgReplicationSlotIsActiveDesc = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - replicationSlotSubsystem, - "slot_is_active", - ), - "whether the replication slot is active or not", - []string{"slot_name", "slot_type"}, nil, - ) - pgReplicationSlotSafeWal = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - replicationSlotSubsystem, - "safe_wal_size_bytes", - ), - "number of bytes that can be written to WAL such that this slot is not in danger of getting in state lost", - []string{"slot_name", "slot_type"}, nil, - ) - pgReplicationSlotWalStatus = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - replicationSlotSubsystem, - "wal_status", - ), - "availability of WAL files claimed by this slot", - []string{"slot_name", "slot_type", "wal_status"}, nil, - ) - pgReplicationSlotQuery = `SELECT - slot_name, - slot_type, - CASE WHEN pg_is_in_recovery() THEN - pg_last_wal_receive_lsn() - '0/0' - ELSE - pg_current_wal_lsn() - '0/0' - END AS current_wal_lsn, - COALESCE(confirmed_flush_lsn, '0/0') - '0/0' AS confirmed_flush_lsn, - active - FROM pg_replication_slots;` - pgReplicationSlotNewQuery = `SELECT - slot_name, - slot_type, - CASE WHEN pg_is_in_recovery() THEN - pg_last_wal_receive_lsn() - '0/0' - ELSE - pg_current_wal_lsn() - '0/0' - END AS current_wal_lsn, - COALESCE(confirmed_flush_lsn, '0/0') - '0/0' AS confirmed_flush_lsn, - active, - safe_wal_size, - wal_status - FROM pg_replication_slots;` -) - -func (PGReplicationSlotCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - query := pgReplicationSlotQuery - abovePG13 := instance.version.GTE(semver.MustParse("13.0.0")) - if abovePG13 { - query = pgReplicationSlotNewQuery - } - - db := instance.getDB() - rows, err := db.QueryContext(ctx, - query) - if err != nil { - return err - } - defer rows.Close() - - for rows.Next() { - var slotName sql.NullString - var slotType sql.NullString - var walLSN sql.NullFloat64 - var flushLSN sql.NullFloat64 - var isActive sql.NullBool - var safeWalSize sql.NullInt64 - var walStatus sql.NullString - - r := []any{ - &slotName, - &slotType, - &walLSN, - &flushLSN, - &isActive, - } - - if abovePG13 { - r = append(r, &safeWalSize) - r = append(r, &walStatus) - } - - err := rows.Scan(r...) - if err != nil { - return err - } - - isActiveValue := 0.0 - if isActive.Valid && isActive.Bool { - isActiveValue = 1.0 - } - slotNameLabel := "unknown" - if slotName.Valid { - slotNameLabel = slotName.String - } - slotTypeLabel := "unknown" - if slotType.Valid { - slotTypeLabel = slotType.String - } - - var walLSNMetric float64 - if walLSN.Valid { - walLSNMetric = walLSN.Float64 - } - ch <- prometheus.MustNewConstMetric( - pgReplicationSlotCurrentWalDesc, - prometheus.GaugeValue, walLSNMetric, slotNameLabel, slotTypeLabel, - ) - if isActive.Valid && isActive.Bool { - var flushLSNMetric float64 - if flushLSN.Valid { - flushLSNMetric = flushLSN.Float64 - } - ch <- prometheus.MustNewConstMetric( - pgReplicationSlotCurrentFlushDesc, - prometheus.GaugeValue, flushLSNMetric, slotNameLabel, slotTypeLabel, - ) - } - ch <- prometheus.MustNewConstMetric( - pgReplicationSlotIsActiveDesc, - prometheus.GaugeValue, isActiveValue, slotNameLabel, slotTypeLabel, - ) - - if safeWalSize.Valid { - ch <- prometheus.MustNewConstMetric( - pgReplicationSlotSafeWal, - prometheus.GaugeValue, float64(safeWalSize.Int64), slotNameLabel, slotTypeLabel, - ) - } - - if walStatus.Valid { - ch <- prometheus.MustNewConstMetric( - pgReplicationSlotWalStatus, - prometheus.GaugeValue, 1, slotNameLabel, slotTypeLabel, walStatus.String, - ) - } - } - return rows.Err() -} diff --git a/collector/pg_replication_slot_test.go b/collector/pg_replication_slot_test.go deleted file mode 100644 index 981b5db62..000000000 --- a/collector/pg_replication_slot_test.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/blang/semver/v4" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPgReplicationSlotCollectorActive(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db, version: semver.MustParse("13.3.7")} - - columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "safe_wal_size", "wal_status"} - rows := sqlmock.NewRows(columns). - AddRow("test_slot", "physical", 5, 3, true, 323906992, "reserved") - mock.ExpectQuery(sanitizeQuery(pgReplicationSlotNewQuery)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGReplicationSlotCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 5, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 3, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 1, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 323906992, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical", "wal_status": "reserved"}, value: 1, metricType: dto.MetricType_GAUGE}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -func TestPgReplicationSlotCollectorInActive(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db, version: semver.MustParse("13.3.7")} - - columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "safe_wal_size", "wal_status"} - rows := sqlmock.NewRows(columns). - AddRow("test_slot", "physical", 6, 12, false, -4000, "extended") - mock.ExpectQuery(sanitizeQuery(pgReplicationSlotNewQuery)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGReplicationSlotCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGReplicationSlotCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 6, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 0, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: -4000, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical", "wal_status": "extended"}, value: 1, metricType: dto.MetricType_GAUGE}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } - -} - -func TestPgReplicationSlotCollectorActiveNil(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db, version: semver.MustParse("13.3.7")} - - columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "safe_wal_size", "wal_status"} - rows := sqlmock.NewRows(columns). - AddRow("test_slot", "physical", 6, 12, nil, nil, "lost") - mock.ExpectQuery(sanitizeQuery(pgReplicationSlotNewQuery)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGReplicationSlotCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGReplicationSlotCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 6, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 0, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical", "wal_status": "lost"}, value: 1, metricType: dto.MetricType_GAUGE}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -func TestPgReplicationSlotCollectorTestNilValues(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db, version: semver.MustParse("13.3.7")} - - columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "safe_wal_size", "wal_status"} - rows := sqlmock.NewRows(columns). - AddRow(nil, nil, nil, nil, true, nil, nil) - mock.ExpectQuery(sanitizeQuery(pgReplicationSlotNewQuery)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGReplicationSlotCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGReplicationSlotCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"slot_name": "unknown", "slot_type": "unknown"}, value: 0, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"slot_name": "unknown", "slot_type": "unknown"}, value: 0, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"slot_name": "unknown", "slot_type": "unknown"}, value: 1, metricType: dto.MetricType_GAUGE}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/pg_replication_test.go b/collector/pg_replication_test.go deleted file mode 100644 index a48e9fd69..000000000 --- a/collector/pg_replication_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPgReplicationCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - - columns := []string{"lag", "is_replica", "last_replay"} - rows := sqlmock.NewRows(columns). - AddRow(1000, 1, 3) - mock.ExpectQuery(sanitizeQuery(pgReplicationQuery)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGReplicationCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGReplicationCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{}, value: 1000, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{}, value: 1, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{}, value: 3, metricType: dto.MetricType_GAUGE}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/pg_roles.go b/collector/pg_roles.go deleted file mode 100644 index 626dbb44f..000000000 --- a/collector/pg_roles.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "database/sql" - "log/slog" - - "github.com/prometheus/client_golang/prometheus" -) - -const rolesSubsystem = "roles" - -func init() { - registerCollector(rolesSubsystem, defaultEnabled, NewPGRolesCollector) -} - -type PGRolesCollector struct { - log *slog.Logger -} - -func NewPGRolesCollector(config collectorConfig) (Collector, error) { - return &PGRolesCollector{ - log: config.logger, - }, nil -} - -var ( - pgRolesConnectionLimitsDesc = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - rolesSubsystem, - "connection_limit", - ), - "Connection limit set for the role", - []string{"rolname"}, nil, - ) - - pgRolesConnectionLimitsQuery = "SELECT pg_roles.rolname, pg_roles.rolconnlimit FROM pg_roles" -) - -// Update implements Collector and exposes roles connection limits. -// It is called by the Prometheus registry when collecting metrics. -func (c PGRolesCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - // Query the list of databases - rows, err := db.QueryContext(ctx, - pgRolesConnectionLimitsQuery, - ) - if err != nil { - return err - } - defer rows.Close() - - for rows.Next() { - var rolname sql.NullString - var connLimit sql.NullInt64 - if err := rows.Scan(&rolname, &connLimit); err != nil { - return err - } - - if !rolname.Valid { - continue - } - rolnameLabel := rolname.String - - if !connLimit.Valid { - continue - } - connLimitMetric := float64(connLimit.Int64) - - ch <- prometheus.MustNewConstMetric( - pgRolesConnectionLimitsDesc, - prometheus.GaugeValue, connLimitMetric, rolnameLabel, - ) - } - - return rows.Err() -} diff --git a/collector/pg_roles_test.go b/collector/pg_roles_test.go deleted file mode 100644 index 182a120f9..000000000 --- a/collector/pg_roles_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPGRolesCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - - mock.ExpectQuery(sanitizeQuery(pgRolesConnectionLimitsQuery)).WillReturnRows(sqlmock.NewRows([]string{"rolname", "rolconnlimit"}). - AddRow("postgres", 15)) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGRolesCollector{} - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGRolesCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"rolname": "postgres"}, value: 15, metricType: dto.MetricType_GAUGE}, - } - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/pg_stat_activity_autovacuum.go b/collector/pg_stat_activity_autovacuum.go deleted file mode 100644 index f08029d18..000000000 --- a/collector/pg_stat_activity_autovacuum.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "log/slog" - - "github.com/prometheus/client_golang/prometheus" -) - -const statActivityAutovacuumSubsystem = "stat_activity_autovacuum" - -func init() { - registerCollector(statActivityAutovacuumSubsystem, defaultDisabled, NewPGStatActivityAutovacuumCollector) -} - -type PGStatActivityAutovacuumCollector struct { - log *slog.Logger -} - -func NewPGStatActivityAutovacuumCollector(config collectorConfig) (Collector, error) { - return &PGStatActivityAutovacuumCollector{log: config.logger}, nil -} - -var ( - statActivityAutovacuumAgeInSeconds = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statActivityAutovacuumSubsystem, "timestamp_seconds"), - "Start timestamp of the vacuum process in seconds", - []string{"relname"}, - prometheus.Labels{}, - ) - - statActivityAutovacuumQuery = ` - SELECT - SPLIT_PART(query, '.', 2) AS relname, - EXTRACT(EPOCH FROM xact_start) AS timestamp_seconds - FROM - pg_catalog.pg_stat_activity - WHERE - query LIKE 'autovacuum:%' - ` -) - -func (PGStatActivityAutovacuumCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - rows, err := db.QueryContext(ctx, - statActivityAutovacuumQuery) - - if err != nil { - return err - } - defer rows.Close() - - for rows.Next() { - var relname string - var ageInSeconds float64 - - if err := rows.Scan(&relname, &ageInSeconds); err != nil { - return err - } - - ch <- prometheus.MustNewConstMetric( - statActivityAutovacuumAgeInSeconds, - prometheus.GaugeValue, - ageInSeconds, relname, - ) - } - if err := rows.Err(); err != nil { - return err - } - return nil -} diff --git a/collector/pg_stat_activity_autovacuum_test.go b/collector/pg_stat_activity_autovacuum_test.go deleted file mode 100644 index a6fcdbcad..000000000 --- a/collector/pg_stat_activity_autovacuum_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPGStatActivityAutovacuumCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - inst := &instance{db: db} - columns := []string{ - "relname", - "timestamp_seconds", - } - rows := sqlmock.NewRows(columns). - AddRow("test", 3600) - - mock.ExpectQuery(sanitizeQuery(statActivityAutovacuumQuery)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatActivityAutovacuumCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatActivityAutovacuumCollector.Update: %s", err) - } - }() - expected := []MetricResult{ - {labels: labelMap{"relname": "test"}, value: 3600, metricType: dto.MetricType_GAUGE}, - } - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/pg_stat_bgwriter.go b/collector/pg_stat_bgwriter.go deleted file mode 100644 index 6e3bd09cb..000000000 --- a/collector/pg_stat_bgwriter.go +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "database/sql" - - "github.com/blang/semver/v4" - "github.com/prometheus/client_golang/prometheus" -) - -const bgWriterSubsystem = "stat_bgwriter" - -func init() { - registerCollector(bgWriterSubsystem, defaultEnabled, NewPGStatBGWriterCollector) -} - -type PGStatBGWriterCollector struct { -} - -func NewPGStatBGWriterCollector(collectorConfig) (Collector, error) { - return &PGStatBGWriterCollector{}, nil -} - -var ( - statBGWriterCheckpointsTimedDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_timed_total"), - "Number of scheduled checkpoints that have been performed", - []string{}, - prometheus.Labels{}, - ) - statBGWriterCheckpointsReqDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_req_total"), - "Number of requested checkpoints that have been performed", - []string{}, - prometheus.Labels{}, - ) - statBGWriterCheckpointsReqTimeDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_write_time_total"), - "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds", - []string{}, - prometheus.Labels{}, - ) - statBGWriterCheckpointsSyncTimeDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_sync_time_total"), - "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds", - []string{}, - prometheus.Labels{}, - ) - statBGWriterBuffersCheckpointDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_checkpoint_total"), - "Number of buffers written during checkpoints", - []string{}, - prometheus.Labels{}, - ) - statBGWriterBuffersCleanDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_clean_total"), - "Number of buffers written by the background writer", - []string{}, - prometheus.Labels{}, - ) - statBGWriterMaxwrittenCleanDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "maxwritten_clean_total"), - "Number of times the background writer stopped a cleaning scan because it had written too many buffers", - []string{}, - prometheus.Labels{}, - ) - statBGWriterBuffersBackendDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_total"), - "Number of buffers written directly by a backend", - []string{}, - prometheus.Labels{}, - ) - statBGWriterBuffersBackendFsyncDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_fsync_total"), - "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)", - []string{}, - prometheus.Labels{}, - ) - statBGWriterBuffersAllocDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_alloc_total"), - "Number of buffers allocated", - []string{}, - prometheus.Labels{}, - ) - statBGWriterStatsResetDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "stats_reset_total"), - "Time at which these statistics were last reset", - []string{}, - prometheus.Labels{}, - ) - - statBGWriterQueryBefore17 = `SELECT - checkpoints_timed - ,checkpoints_req - ,checkpoint_write_time - ,checkpoint_sync_time - ,buffers_checkpoint - ,buffers_clean - ,maxwritten_clean - ,buffers_backend - ,buffers_backend_fsync - ,buffers_alloc - ,stats_reset - FROM pg_stat_bgwriter;` - - statBGWriterQueryAfter17 = `SELECT - buffers_clean - ,maxwritten_clean - ,buffers_alloc - ,stats_reset - FROM pg_stat_bgwriter;` -) - -func (PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - if instance.version.GE(semver.MustParse("17.0.0")) { - db := instance.getDB() - row := db.QueryRowContext(ctx, statBGWriterQueryAfter17) - - var bc, mwc, ba sql.NullInt64 - var sr sql.NullTime - - err := row.Scan(&bc, &mwc, &ba, &sr) - if err != nil { - return err - } - - bcMetric := 0.0 - if bc.Valid { - bcMetric = float64(bc.Int64) - } - ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersCleanDesc, - prometheus.CounterValue, - bcMetric, - ) - mwcMetric := 0.0 - if mwc.Valid { - mwcMetric = float64(mwc.Int64) - } - ch <- prometheus.MustNewConstMetric( - statBGWriterMaxwrittenCleanDesc, - prometheus.CounterValue, - mwcMetric, - ) - baMetric := 0.0 - if ba.Valid { - baMetric = float64(ba.Int64) - } - ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersAllocDesc, - prometheus.CounterValue, - baMetric, - ) - srMetric := 0.0 - if sr.Valid { - srMetric = float64(sr.Time.Unix()) - } - ch <- prometheus.MustNewConstMetric( - statBGWriterStatsResetDesc, - prometheus.CounterValue, - srMetric, - ) - } else { - db := instance.getDB() - row := db.QueryRowContext(ctx, statBGWriterQueryBefore17) - - var cpt, cpr, bcp, bc, mwc, bb, bbf, ba sql.NullInt64 - var cpwt, cpst sql.NullFloat64 - var sr sql.NullTime - - err := row.Scan(&cpt, &cpr, &cpwt, &cpst, &bcp, &bc, &mwc, &bb, &bbf, &ba, &sr) - if err != nil { - return err - } - - cptMetric := 0.0 - if cpt.Valid { - cptMetric = float64(cpt.Int64) - } - ch <- prometheus.MustNewConstMetric( - statBGWriterCheckpointsTimedDesc, - prometheus.CounterValue, - cptMetric, - ) - cprMetric := 0.0 - if cpr.Valid { - cprMetric = float64(cpr.Int64) - } - ch <- prometheus.MustNewConstMetric( - statBGWriterCheckpointsReqDesc, - prometheus.CounterValue, - cprMetric, - ) - cpwtMetric := 0.0 - if cpwt.Valid { - cpwtMetric = float64(cpwt.Float64) - } - ch <- prometheus.MustNewConstMetric( - statBGWriterCheckpointsReqTimeDesc, - prometheus.CounterValue, - cpwtMetric, - ) - cpstMetric := 0.0 - if cpst.Valid { - cpstMetric = float64(cpst.Float64) - } - ch <- prometheus.MustNewConstMetric( - statBGWriterCheckpointsSyncTimeDesc, - prometheus.CounterValue, - cpstMetric, - ) - bcpMetric := 0.0 - if bcp.Valid { - bcpMetric = float64(bcp.Int64) - } - ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersCheckpointDesc, - prometheus.CounterValue, - bcpMetric, - ) - bcMetric := 0.0 - if bc.Valid { - bcMetric = float64(bc.Int64) - } - ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersCleanDesc, - prometheus.CounterValue, - bcMetric, - ) - mwcMetric := 0.0 - if mwc.Valid { - mwcMetric = float64(mwc.Int64) - } - ch <- prometheus.MustNewConstMetric( - statBGWriterMaxwrittenCleanDesc, - prometheus.CounterValue, - mwcMetric, - ) - bbMetric := 0.0 - if bb.Valid { - bbMetric = float64(bb.Int64) - } - ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersBackendDesc, - prometheus.CounterValue, - bbMetric, - ) - bbfMetric := 0.0 - if bbf.Valid { - bbfMetric = float64(bbf.Int64) - } - ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersBackendFsyncDesc, - prometheus.CounterValue, - bbfMetric, - ) - baMetric := 0.0 - if ba.Valid { - baMetric = float64(ba.Int64) - } - ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersAllocDesc, - prometheus.CounterValue, - baMetric, - ) - srMetric := 0.0 - if sr.Valid { - srMetric = float64(sr.Time.Unix()) - } - ch <- prometheus.MustNewConstMetric( - statBGWriterStatsResetDesc, - prometheus.CounterValue, - srMetric, - ) - } - - return nil -} diff --git a/collector/pg_stat_bgwriter_test.go b/collector/pg_stat_bgwriter_test.go deleted file mode 100644 index 6fde2fb6a..000000000 --- a/collector/pg_stat_bgwriter_test.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - "time" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPGStatBGWriterCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - - columns := []string{ - "checkpoints_timed", - "checkpoints_req", - "checkpoint_write_time", - "checkpoint_sync_time", - "buffers_checkpoint", - "buffers_clean", - "maxwritten_clean", - "buffers_backend", - "buffers_backend_fsync", - "buffers_alloc", - "stats_reset"} - - srT, err := time.Parse("2006-01-02 15:04:05.00000-07", "2023-05-25 17:10:42.81132-07") - if err != nil { - t.Fatalf("Error parsing time: %s", err) - } - - rows := sqlmock.NewRows(columns). - AddRow(354, 4945, 289097744, 1242257, int64(3275602074), 89320867, 450139, 2034563757, 0, int64(2725688749), srT) - mock.ExpectQuery(sanitizeQuery(statBGWriterQueryBefore17)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatBGWriterCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatBGWriterCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 354}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 4945}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 289097744}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 1242257}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 3275602074}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 89320867}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 450139}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 2034563757}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 2725688749}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 1685059842}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -func TestPGStatBGWriterCollectorNullValues(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - - columns := []string{ - "checkpoints_timed", - "checkpoints_req", - "checkpoint_write_time", - "checkpoint_sync_time", - "buffers_checkpoint", - "buffers_clean", - "maxwritten_clean", - "buffers_backend", - "buffers_backend_fsync", - "buffers_alloc", - "stats_reset"} - - rows := sqlmock.NewRows(columns). - AddRow(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) - mock.ExpectQuery(sanitizeQuery(statBGWriterQueryBefore17)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatBGWriterCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatBGWriterCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/pg_stat_checkpointer.go b/collector/pg_stat_checkpointer.go deleted file mode 100644 index 31e9c5d62..000000000 --- a/collector/pg_stat_checkpointer.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "database/sql" - "log/slog" - - "github.com/blang/semver/v4" - "github.com/prometheus/client_golang/prometheus" -) - -const statCheckpointerSubsystem = "stat_checkpointer" - -func init() { - // WARNING: - // Disabled by default because this set of metrics is only available from Postgres 17 - registerCollector(statCheckpointerSubsystem, defaultDisabled, NewPGStatCheckpointerCollector) -} - -type PGStatCheckpointerCollector struct { - log *slog.Logger -} - -func NewPGStatCheckpointerCollector(config collectorConfig) (Collector, error) { - return &PGStatCheckpointerCollector{log: config.logger}, nil -} - -var ( - statCheckpointerNumTimedDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "num_timed_total"), - "Number of scheduled checkpoints due to timeout", - []string{}, - prometheus.Labels{}, - ) - statCheckpointerNumRequestedDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "num_requested_total"), - "Number of requested checkpoints that have been performed", - []string{}, - prometheus.Labels{}, - ) - statCheckpointerRestartpointsTimedDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "restartpoints_timed_total"), - "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it", - []string{}, - prometheus.Labels{}, - ) - statCheckpointerRestartpointsReqDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "restartpoints_req_total"), - "Number of requested restartpoints", - []string{}, - prometheus.Labels{}, - ) - statCheckpointerRestartpointsDoneDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "restartpoints_done_total"), - "Number of restartpoints that have been performed", - []string{}, - prometheus.Labels{}, - ) - statCheckpointerWriteTimeDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "write_time_total"), - "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds", - []string{}, - prometheus.Labels{}, - ) - statCheckpointerSyncTimeDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "sync_time_total"), - "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds", - []string{}, - prometheus.Labels{}, - ) - statCheckpointerBuffersWrittenDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "buffers_written_total"), - "Number of buffers written during checkpoints and restartpoints", - []string{}, - prometheus.Labels{}, - ) - statCheckpointerStatsResetDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statCheckpointerSubsystem, "stats_reset_total"), - "Time at which these statistics were last reset", - []string{}, - prometheus.Labels{}, - ) - - statCheckpointerQuery = `SELECT - num_timed - ,num_requested - ,restartpoints_timed - ,restartpoints_req - ,restartpoints_done - ,write_time - ,sync_time - ,buffers_written - ,stats_reset - FROM pg_stat_checkpointer;` -) - -func (c PGStatCheckpointerCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - - before17 := instance.version.LT(semver.MustParse("17.0.0")) - if before17 { - c.log.Warn("pg_stat_checkpointer collector is not available on PostgreSQL < 17.0.0, skipping") - return nil - } - - row := db.QueryRowContext(ctx, statCheckpointerQuery) - - // num_timed = nt = bigint - // num_requested = nr = bigint - // restartpoints_timed = rpt = bigint - // restartpoints_req = rpr = bigint - // restartpoints_done = rpd = bigint - // write_time = wt = double precision - // sync_time = st = double precision - // buffers_written = bw = bigint - // stats_reset = sr = timestamp - - var nt, nr, rpt, rpr, rpd, bw sql.NullInt64 - var wt, st sql.NullFloat64 - var sr sql.NullTime - - err := row.Scan(&nt, &nr, &rpt, &rpr, &rpd, &wt, &st, &bw, &sr) - if err != nil { - return err - } - - ntMetric := 0.0 - if nt.Valid { - ntMetric = float64(nt.Int64) - } - ch <- prometheus.MustNewConstMetric( - statCheckpointerNumTimedDesc, - prometheus.CounterValue, - ntMetric, - ) - - nrMetric := 0.0 - if nr.Valid { - nrMetric = float64(nr.Int64) - } - ch <- prometheus.MustNewConstMetric( - statCheckpointerNumRequestedDesc, - prometheus.CounterValue, - nrMetric, - ) - - rptMetric := 0.0 - if rpt.Valid { - rptMetric = float64(rpt.Int64) - } - ch <- prometheus.MustNewConstMetric( - statCheckpointerRestartpointsTimedDesc, - prometheus.CounterValue, - rptMetric, - ) - - rprMetric := 0.0 - if rpr.Valid { - rprMetric = float64(rpr.Int64) - } - ch <- prometheus.MustNewConstMetric( - statCheckpointerRestartpointsReqDesc, - prometheus.CounterValue, - rprMetric, - ) - - rpdMetric := 0.0 - if rpd.Valid { - rpdMetric = float64(rpd.Int64) - } - ch <- prometheus.MustNewConstMetric( - statCheckpointerRestartpointsDoneDesc, - prometheus.CounterValue, - rpdMetric, - ) - - wtMetric := 0.0 - if wt.Valid { - wtMetric = float64(wt.Float64) - } - ch <- prometheus.MustNewConstMetric( - statCheckpointerWriteTimeDesc, - prometheus.CounterValue, - wtMetric, - ) - - stMetric := 0.0 - if st.Valid { - stMetric = float64(st.Float64) - } - ch <- prometheus.MustNewConstMetric( - statCheckpointerSyncTimeDesc, - prometheus.CounterValue, - stMetric, - ) - - bwMetric := 0.0 - if bw.Valid { - bwMetric = float64(bw.Int64) - } - ch <- prometheus.MustNewConstMetric( - statCheckpointerBuffersWrittenDesc, - prometheus.CounterValue, - bwMetric, - ) - - srMetric := 0.0 - if sr.Valid { - srMetric = float64(sr.Time.Unix()) - } - ch <- prometheus.MustNewConstMetric( - statCheckpointerStatsResetDesc, - prometheus.CounterValue, - srMetric, - ) - - return nil -} diff --git a/collector/pg_stat_checkpointer_test.go b/collector/pg_stat_checkpointer_test.go deleted file mode 100644 index 9a8dd7f21..000000000 --- a/collector/pg_stat_checkpointer_test.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - "time" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/blang/semver/v4" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPGStatCheckpointerCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db, version: semver.MustParse("17.0.0")} - - columns := []string{ - "num_timed", - "num_requested", - "restartpoints_timed", - "restartpoints_req", - "restartpoints_done", - "write_time", - "sync_time", - "buffers_written", - "stats_reset"} - - srT, err := time.Parse("2006-01-02 15:04:05.00000-07", "2023-05-25 17:10:42.81132-07") - if err != nil { - t.Fatalf("Error parsing time: %s", err) - } - - rows := sqlmock.NewRows(columns). - AddRow(354, 4945, 289097744, 1242257, int64(3275602074), 89320867, 450139, 2034563757, srT) - mock.ExpectQuery(sanitizeQuery(statCheckpointerQuery)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatCheckpointerCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatCheckpointerCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 354}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 4945}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 289097744}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 1242257}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 3275602074}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 89320867}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 450139}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 2034563757}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 1685059842}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -func TestPGStatCheckpointerCollectorNullValues(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db, version: semver.MustParse("17.0.0")} - - columns := []string{ - "num_timed", - "num_requested", - "restartpoints_timed", - "restartpoints_req", - "restartpoints_done", - "write_time", - "sync_time", - "buffers_written", - "stats_reset"} - - rows := sqlmock.NewRows(columns). - AddRow(nil, nil, nil, nil, nil, nil, nil, nil, nil) - mock.ExpectQuery(sanitizeQuery(statCheckpointerQuery)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatCheckpointerCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatCheckpointerCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{}, metricType: dto.MetricType_COUNTER, value: 0}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/pg_stat_database.go b/collector/pg_stat_database.go deleted file mode 100644 index b9210740f..000000000 --- a/collector/pg_stat_database.go +++ /dev/null @@ -1,516 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "database/sql" - "fmt" - "log/slog" - "strings" - - "github.com/blang/semver/v4" - "github.com/prometheus/client_golang/prometheus" -) - -const statDatabaseSubsystem = "stat_database" - -func init() { - registerCollector(statDatabaseSubsystem, defaultEnabled, NewPGStatDatabaseCollector) -} - -type PGStatDatabaseCollector struct { - log *slog.Logger -} - -func NewPGStatDatabaseCollector(config collectorConfig) (Collector, error) { - return &PGStatDatabaseCollector{log: config.logger}, nil -} - -var ( - statDatabaseNumbackends = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "numbackends", - ), - "Number of backends currently connected to this database. This is the only column in this view that returns a value reflecting current state; all other columns return the accumulated values since the last reset.", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseXactCommit = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "xact_commit", - ), - "Number of transactions in this database that have been committed", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseXactRollback = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "xact_rollback", - ), - "Number of transactions in this database that have been rolled back", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseBlksRead = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "blks_read", - ), - "Number of disk blocks read in this database", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseBlksHit = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "blks_hit", - ), - "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseTupReturned = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "tup_returned", - ), - "Number of rows returned by queries in this database", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseTupFetched = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "tup_fetched", - ), - "Number of rows fetched by queries in this database", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseTupInserted = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "tup_inserted", - ), - "Number of rows inserted by queries in this database", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseTupUpdated = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "tup_updated", - ), - "Number of rows updated by queries in this database", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseTupDeleted = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "tup_deleted", - ), - "Number of rows deleted by queries in this database", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseConflicts = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "conflicts", - ), - "Number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see pg_stat_database_conflicts for details.)", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseTempFiles = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "temp_files", - ), - "Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the log_temp_files setting.", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseTempBytes = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "temp_bytes", - ), - "Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting.", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseDeadlocks = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "deadlocks", - ), - "Number of deadlocks detected in this database", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseBlkReadTime = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "blk_read_time", - ), - "Time spent reading data file blocks by backends in this database, in milliseconds", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseBlkWriteTime = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "blk_write_time", - ), - "Time spent writing data file blocks by backends in this database, in milliseconds", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseStatsReset = prometheus.NewDesc(prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "stats_reset", - ), - "Time at which these statistics were last reset", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseActiveTime = prometheus.NewDesc(prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "active_time_seconds_total", - ), - "Time spent executing SQL statements in this database, in seconds", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) -) - -func statDatabaseQuery(columns []string) string { - return fmt.Sprintf("SELECT %s FROM pg_stat_database;", strings.Join(columns, ",")) -} - -func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - - columns := []string{ - "datid", - "datname", - "numbackends", - "xact_commit", - "xact_rollback", - "blks_read", - "blks_hit", - "tup_returned", - "tup_fetched", - "tup_inserted", - "tup_updated", - "tup_deleted", - "conflicts", - "temp_files", - "temp_bytes", - "deadlocks", - "blk_read_time", - "blk_write_time", - "stats_reset", - } - - activeTimeAvail := instance.version.GTE(semver.MustParse("14.0.0")) - if activeTimeAvail { - columns = append(columns, "active_time") - } - - rows, err := db.QueryContext(ctx, - statDatabaseQuery(columns), - ) - if err != nil { - return err - } - defer rows.Close() - - for rows.Next() { - var datid, datname sql.NullString - var numBackends, xactCommit, xactRollback, blksRead, blksHit, tupReturned, tupFetched, tupInserted, tupUpdated, tupDeleted, conflicts, tempFiles, tempBytes, deadlocks, blkReadTime, blkWriteTime, activeTime sql.NullFloat64 - var statsReset sql.NullTime - - r := []any{ - &datid, - &datname, - &numBackends, - &xactCommit, - &xactRollback, - &blksRead, - &blksHit, - &tupReturned, - &tupFetched, - &tupInserted, - &tupUpdated, - &tupDeleted, - &conflicts, - &tempFiles, - &tempBytes, - &deadlocks, - &blkReadTime, - &blkWriteTime, - &statsReset, - } - - if activeTimeAvail { - r = append(r, &activeTime) - } - - err := rows.Scan(r...) - if err != nil { - return err - } - - if !datid.Valid { - c.log.Debug("Skipping collecting metric because it has no datid") - continue - } - if !datname.Valid { - c.log.Debug("Skipping collecting metric because it has no datname") - continue - } - if !numBackends.Valid { - c.log.Debug("Skipping collecting metric because it has no numbackends") - continue - } - if !xactCommit.Valid { - c.log.Debug("Skipping collecting metric because it has no xact_commit") - continue - } - if !xactRollback.Valid { - c.log.Debug("Skipping collecting metric because it has no xact_rollback") - continue - } - if !blksRead.Valid { - c.log.Debug("Skipping collecting metric because it has no blks_read") - continue - } - if !blksHit.Valid { - c.log.Debug("Skipping collecting metric because it has no blks_hit") - continue - } - if !tupReturned.Valid { - c.log.Debug("Skipping collecting metric because it has no tup_returned") - continue - } - if !tupFetched.Valid { - c.log.Debug("Skipping collecting metric because it has no tup_fetched") - continue - } - if !tupInserted.Valid { - c.log.Debug("Skipping collecting metric because it has no tup_inserted") - continue - } - if !tupUpdated.Valid { - c.log.Debug("Skipping collecting metric because it has no tup_updated") - continue - } - if !tupDeleted.Valid { - c.log.Debug("Skipping collecting metric because it has no tup_deleted") - continue - } - if !conflicts.Valid { - c.log.Debug("Skipping collecting metric because it has no conflicts") - continue - } - if !tempFiles.Valid { - c.log.Debug("Skipping collecting metric because it has no temp_files") - continue - } - if !tempBytes.Valid { - c.log.Debug("Skipping collecting metric because it has no temp_bytes") - continue - } - if !deadlocks.Valid { - c.log.Debug("Skipping collecting metric because it has no deadlocks") - continue - } - if !blkReadTime.Valid { - c.log.Debug("Skipping collecting metric because it has no blk_read_time") - continue - } - if !blkWriteTime.Valid { - c.log.Debug("Skipping collecting metric because it has no blk_write_time") - continue - } - if activeTimeAvail && !activeTime.Valid { - c.log.Debug("Skipping collecting metric because it has no active_time") - continue - } - - statsResetMetric := 0.0 - if !statsReset.Valid { - c.log.Debug("No metric for stats_reset, will collect 0 instead") - } - if statsReset.Valid { - statsResetMetric = float64(statsReset.Time.Unix()) - } - - labels := []string{datid.String, datname.String} - - ch <- prometheus.MustNewConstMetric( - statDatabaseNumbackends, - prometheus.GaugeValue, - numBackends.Float64, - labels..., - ) - - ch <- prometheus.MustNewConstMetric( - statDatabaseXactCommit, - prometheus.CounterValue, - xactCommit.Float64, - labels..., - ) - - ch <- prometheus.MustNewConstMetric( - statDatabaseXactRollback, - prometheus.CounterValue, - xactRollback.Float64, - labels..., - ) - - ch <- prometheus.MustNewConstMetric( - statDatabaseBlksRead, - prometheus.CounterValue, - blksRead.Float64, - labels..., - ) - - ch <- prometheus.MustNewConstMetric( - statDatabaseBlksHit, - prometheus.CounterValue, - blksHit.Float64, - labels..., - ) - - ch <- prometheus.MustNewConstMetric( - statDatabaseTupReturned, - prometheus.CounterValue, - tupReturned.Float64, - labels..., - ) - - ch <- prometheus.MustNewConstMetric( - statDatabaseTupFetched, - prometheus.CounterValue, - tupFetched.Float64, - labels..., - ) - - ch <- prometheus.MustNewConstMetric( - statDatabaseTupInserted, - prometheus.CounterValue, - tupInserted.Float64, - labels..., - ) - - ch <- prometheus.MustNewConstMetric( - statDatabaseTupUpdated, - prometheus.CounterValue, - tupUpdated.Float64, - labels..., - ) - - ch <- prometheus.MustNewConstMetric( - statDatabaseTupDeleted, - prometheus.CounterValue, - tupDeleted.Float64, - labels..., - ) - - ch <- prometheus.MustNewConstMetric( - statDatabaseConflicts, - prometheus.CounterValue, - conflicts.Float64, - labels..., - ) - - ch <- prometheus.MustNewConstMetric( - statDatabaseTempFiles, - prometheus.CounterValue, - tempFiles.Float64, - labels..., - ) - - ch <- prometheus.MustNewConstMetric( - statDatabaseTempBytes, - prometheus.CounterValue, - tempBytes.Float64, - labels..., - ) - - ch <- prometheus.MustNewConstMetric( - statDatabaseDeadlocks, - prometheus.CounterValue, - deadlocks.Float64, - labels..., - ) - - ch <- prometheus.MustNewConstMetric( - statDatabaseBlkReadTime, - prometheus.CounterValue, - blkReadTime.Float64, - labels..., - ) - - ch <- prometheus.MustNewConstMetric( - statDatabaseBlkWriteTime, - prometheus.CounterValue, - blkWriteTime.Float64, - labels..., - ) - - ch <- prometheus.MustNewConstMetric( - statDatabaseStatsReset, - prometheus.CounterValue, - statsResetMetric, - labels..., - ) - - if activeTimeAvail { - ch <- prometheus.MustNewConstMetric( - statDatabaseActiveTime, - prometheus.CounterValue, - activeTime.Float64/1000.0, - labels..., - ) - } - } - return nil -} diff --git a/collector/pg_stat_database_test.go b/collector/pg_stat_database_test.go deleted file mode 100644 index e6194ca2e..000000000 --- a/collector/pg_stat_database_test.go +++ /dev/null @@ -1,530 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - "time" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/blang/semver/v4" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/promslog" - "github.com/smartystreets/goconvey/convey" -) - -func TestPGStatDatabaseCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db, version: semver.MustParse("14.0.0")} - - columns := []string{ - "datid", - "datname", - "numbackends", - "xact_commit", - "xact_rollback", - "blks_read", - "blks_hit", - "tup_returned", - "tup_fetched", - "tup_inserted", - "tup_updated", - "tup_deleted", - "conflicts", - "temp_files", - "temp_bytes", - "deadlocks", - "blk_read_time", - "blk_write_time", - "stats_reset", - "active_time", - } - - srT, err := time.Parse("2006-01-02 15:04:05.00000-07", "2023-05-25 17:10:42.81132-07") - if err != nil { - t.Fatalf("Error parsing time: %s", err) - } - - rows := sqlmock.NewRows(columns). - AddRow( - "pid", - "postgres", - 354, - 4945, - 289097744, - 1242257, - int64(3275602074), - 89320867, - 450139, - 2034563757, - 0, - int64(2725688749), - 23, - 52, - 74, - 925, - 16, - 823, - srT, - 33, - ) - - mock.ExpectQuery(sanitizeQuery(statDatabaseQuery(columns))).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatDatabaseCollector{ - log: promslog.NewNopLogger().With("collector", "pg_stat_database"), - } - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatDatabaseCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_GAUGE, value: 354}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 4945}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 289097744}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1242257}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 3275602074}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 89320867}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 450139}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 2034563757}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 2725688749}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 23}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 52}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 74}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.033}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -func TestPGStatDatabaseCollectorNullValues(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - srT, err := time.Parse("2006-01-02 15:04:05.00000-07", "2023-05-25 17:10:42.81132-07") - if err != nil { - t.Fatalf("Error parsing time: %s", err) - } - inst := &instance{db: db, version: semver.MustParse("14.0.0")} - - columns := []string{ - "datid", - "datname", - "numbackends", - "xact_commit", - "xact_rollback", - "blks_read", - "blks_hit", - "tup_returned", - "tup_fetched", - "tup_inserted", - "tup_updated", - "tup_deleted", - "conflicts", - "temp_files", - "temp_bytes", - "deadlocks", - "blk_read_time", - "blk_write_time", - "stats_reset", - "active_time", - } - - rows := sqlmock.NewRows(columns). - AddRow( - nil, - "postgres", - 354, - 4945, - 289097744, - 1242257, - int64(3275602074), - 89320867, - 450139, - 2034563757, - 0, - int64(2725688749), - 23, - 52, - 74, - 925, - 16, - 823, - srT, - 32, - ). - AddRow( - "pid", - "postgres", - 354, - 4945, - 289097744, - 1242257, - int64(3275602074), - 89320867, - 450139, - 2034563757, - 0, - int64(2725688749), - 23, - 52, - 74, - 925, - 16, - 823, - srT, - 32, - ) - mock.ExpectQuery(sanitizeQuery(statDatabaseQuery(columns))).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatDatabaseCollector{ - log: promslog.NewNopLogger().With("collector", "pg_stat_database"), - } - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatDatabaseCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_GAUGE, value: 354}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 4945}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 289097744}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1242257}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 3275602074}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 89320867}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 450139}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 2034563757}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 2725688749}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 23}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 52}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 74}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.032}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} -func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db, version: semver.MustParse("14.0.0")} - - columns := []string{ - "datid", - "datname", - "numbackends", - "xact_commit", - "xact_rollback", - "blks_read", - "blks_hit", - "tup_returned", - "tup_fetched", - "tup_inserted", - "tup_updated", - "tup_deleted", - "conflicts", - "temp_files", - "temp_bytes", - "deadlocks", - "blk_read_time", - "blk_write_time", - "stats_reset", - "active_time", - } - - srT, err := time.Parse("2006-01-02 15:04:05.00000-07", "2023-05-25 17:10:42.81132-07") - if err != nil { - t.Fatalf("Error parsing time: %s", err) - } - - rows := sqlmock.NewRows(columns). - AddRow( - "pid", - "postgres", - 354, - 4945, - 289097744, - 1242257, - int64(3275602074), - 89320867, - 450139, - 2034563757, - 0, - int64(2725688749), - 23, - 52, - 74, - 925, - 16, - 823, - srT, - 14, - ). - AddRow( - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - ). - AddRow( - "pid", - "postgres", - 355, - 4946, - 289097745, - 1242258, - int64(3275602075), - 89320868, - 450140, - 2034563758, - 1, - int64(2725688750), - 24, - 53, - 75, - 926, - 17, - 824, - srT, - 15, - ) - mock.ExpectQuery(sanitizeQuery(statDatabaseQuery(columns))).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatDatabaseCollector{ - log: promslog.NewNopLogger().With("collector", "pg_stat_database"), - } - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatDatabaseCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_GAUGE, value: 354}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 4945}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 289097744}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1242257}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 3275602074}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 89320867}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 450139}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 2034563757}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 2725688749}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 23}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 52}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 74}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.014}, - - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_GAUGE, value: 355}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 4946}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 289097745}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1242258}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 3275602075}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 89320868}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 450140}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 2034563758}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 2725688750}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 24}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 53}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 75}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 926}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 17}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 824}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.015}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -func TestPGStatDatabaseCollectorTestNilStatReset(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db, version: semver.MustParse("14.0.0")} - - columns := []string{ - "datid", - "datname", - "numbackends", - "xact_commit", - "xact_rollback", - "blks_read", - "blks_hit", - "tup_returned", - "tup_fetched", - "tup_inserted", - "tup_updated", - "tup_deleted", - "conflicts", - "temp_files", - "temp_bytes", - "deadlocks", - "blk_read_time", - "blk_write_time", - "stats_reset", - "active_time", - } - - rows := sqlmock.NewRows(columns). - AddRow( - "pid", - "postgres", - 354, - 4945, - 289097744, - 1242257, - int64(3275602074), - 89320867, - 450139, - 2034563757, - 0, - int64(2725688749), - 23, - 52, - 74, - 925, - 16, - 823, - nil, - 7, - ) - - mock.ExpectQuery(sanitizeQuery(statDatabaseQuery(columns))).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatDatabaseCollector{ - log: promslog.NewNopLogger().With("collector", "pg_stat_database"), - } - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatDatabaseCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_GAUGE, value: 354}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 4945}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 289097744}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1242257}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 3275602074}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 89320867}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 450139}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 2034563757}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 2725688749}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 23}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 52}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 74}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.007}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/pg_stat_progress_vacuum.go b/collector/pg_stat_progress_vacuum.go deleted file mode 100644 index f8083a49f..000000000 --- a/collector/pg_stat_progress_vacuum.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "database/sql" - "log/slog" - - "github.com/prometheus/client_golang/prometheus" -) - -const progressVacuumSubsystem = "stat_progress_vacuum" - -func init() { - registerCollector(progressVacuumSubsystem, defaultEnabled, NewPGStatProgressVacuumCollector) -} - -type PGStatProgressVacuumCollector struct { - log *slog.Logger -} - -func NewPGStatProgressVacuumCollector(config collectorConfig) (Collector, error) { - return &PGStatProgressVacuumCollector{log: config.logger}, nil -} - -var vacuumPhases = []string{ - "initializing", - "scanning heap", - "vacuuming indexes", - "vacuuming heap", - "cleaning up indexes", - "truncating heap", - "performing final cleanup", -} - -var ( - statProgressVacuumPhase = prometheus.NewDesc( - prometheus.BuildFQName(namespace, progressVacuumSubsystem, "phase"), - "Current vacuum phase (1 = active, 0 = inactive). Label 'phase' is human-readable.", - []string{"datname", "relname", "phase"}, - nil, - ) - - statProgressVacuumHeapBlksTotal = prometheus.NewDesc( - prometheus.BuildFQName(namespace, progressVacuumSubsystem, "heap_blks"), - "Total number of heap blocks in the table being vacuumed.", - []string{"datname", "relname"}, - nil, - ) - - statProgressVacuumHeapBlksScanned = prometheus.NewDesc( - prometheus.BuildFQName(namespace, progressVacuumSubsystem, "heap_blks_scanned"), - "Number of heap blocks scanned so far.", - []string{"datname", "relname"}, - nil, - ) - - statProgressVacuumHeapBlksVacuumed = prometheus.NewDesc( - prometheus.BuildFQName(namespace, progressVacuumSubsystem, "heap_blks_vacuumed"), - "Number of heap blocks vacuumed so far.", - []string{"datname", "relname"}, - nil, - ) - - statProgressVacuumIndexVacuumCount = prometheus.NewDesc( - prometheus.BuildFQName(namespace, progressVacuumSubsystem, "index_vacuums"), - "Number of completed index vacuum cycles.", - []string{"datname", "relname"}, - nil, - ) - - statProgressVacuumMaxDeadTuples = prometheus.NewDesc( - prometheus.BuildFQName(namespace, progressVacuumSubsystem, "max_dead_tuples"), - "Maximum number of dead tuples that can be stored before cleanup is performed.", - []string{"datname", "relname"}, - nil, - ) - - statProgressVacuumNumDeadTuples = prometheus.NewDesc( - prometheus.BuildFQName(namespace, progressVacuumSubsystem, "num_dead_tuples"), - "Current number of dead tuples found so far.", - []string{"datname", "relname"}, - nil, - ) - - // This is the view definition of pg_stat_progress_vacuum, albeit without the conversion - // of "phase" to a human-readable string. We will prefer the numeric representation. - statProgressVacuumQuery = `SELECT - d.datname, - s.relid::regclass::text AS relname, - s.param1 AS phase, - s.param2 AS heap_blks_total, - s.param3 AS heap_blks_scanned, - s.param4 AS heap_blks_vacuumed, - s.param5 AS index_vacuum_count, - s.param6 AS max_dead_tuples, - s.param7 AS num_dead_tuples - FROM - pg_stat_get_progress_info('VACUUM'::text) - s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) - LEFT JOIN - pg_database d ON s.datid = d.oid` -) - -func (c *PGStatProgressVacuumCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - rows, err := db.QueryContext(ctx, - statProgressVacuumQuery) - - if err != nil { - return err - } - defer rows.Close() - - for rows.Next() { - var ( - datname sql.NullString - relname sql.NullString - phase sql.NullInt64 - heapBlksTotal sql.NullInt64 - heapBlksScanned sql.NullInt64 - heapBlksVacuumed sql.NullInt64 - indexVacuumCount sql.NullInt64 - maxDeadTuples sql.NullInt64 - numDeadTuples sql.NullInt64 - ) - - if err := rows.Scan( - &datname, - &relname, - &phase, - &heapBlksTotal, - &heapBlksScanned, - &heapBlksVacuumed, - &indexVacuumCount, - &maxDeadTuples, - &numDeadTuples, - ); err != nil { - return err - } - - datnameLabel := "unknown" - if datname.Valid { - datnameLabel = datname.String - } - relnameLabel := "unknown" - if relname.Valid { - relnameLabel = relname.String - } - - labels := []string{datnameLabel, relnameLabel} - - var phaseMetric *float64 - if phase.Valid { - v := float64(phase.Int64) - phaseMetric = &v - } - - for i, label := range vacuumPhases { - v := 0.0 - // Only the current phase should be 1.0. - if phaseMetric != nil && float64(i) == *phaseMetric { - v = 1.0 - } - labelsCopy := append(labels, label) - ch <- prometheus.MustNewConstMetric(statProgressVacuumPhase, prometheus.GaugeValue, v, labelsCopy...) - } - - heapTotal := 0.0 - if heapBlksTotal.Valid { - heapTotal = float64(heapBlksTotal.Int64) - } - ch <- prometheus.MustNewConstMetric(statProgressVacuumHeapBlksTotal, prometheus.GaugeValue, heapTotal, labels...) - - heapScanned := 0.0 - if heapBlksScanned.Valid { - heapScanned = float64(heapBlksScanned.Int64) - } - ch <- prometheus.MustNewConstMetric(statProgressVacuumHeapBlksScanned, prometheus.GaugeValue, heapScanned, labels...) - - heapVacuumed := 0.0 - if heapBlksVacuumed.Valid { - heapVacuumed = float64(heapBlksVacuumed.Int64) - } - ch <- prometheus.MustNewConstMetric(statProgressVacuumHeapBlksVacuumed, prometheus.GaugeValue, heapVacuumed, labels...) - - indexCount := 0.0 - if indexVacuumCount.Valid { - indexCount = float64(indexVacuumCount.Int64) - } - ch <- prometheus.MustNewConstMetric(statProgressVacuumIndexVacuumCount, prometheus.GaugeValue, indexCount, labels...) - - maxDead := 0.0 - if maxDeadTuples.Valid { - maxDead = float64(maxDeadTuples.Int64) - } - ch <- prometheus.MustNewConstMetric(statProgressVacuumMaxDeadTuples, prometheus.GaugeValue, maxDead, labels...) - - numDead := 0.0 - if numDeadTuples.Valid { - numDead = float64(numDeadTuples.Int64) - } - ch <- prometheus.MustNewConstMetric(statProgressVacuumNumDeadTuples, prometheus.GaugeValue, numDead, labels...) - } - - if err := rows.Err(); err != nil { - return err - } - return nil -} diff --git a/collector/pg_stat_progress_vacuum_test.go b/collector/pg_stat_progress_vacuum_test.go deleted file mode 100644 index 80572feb8..000000000 --- a/collector/pg_stat_progress_vacuum_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPGStatProgressVacuumCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - - columns := []string{ - "datname", "relname", "phase", "heap_blks_total", "heap_blks_scanned", - "heap_blks_vacuumed", "index_vacuum_count", "max_dead_tuples", "num_dead_tuples", - } - - rows := sqlmock.NewRows(columns).AddRow( - "postgres", "a_table", 3, 3000, 400, 200, 2, 500, 123) - - mock.ExpectQuery(sanitizeQuery(statProgressVacuumQuery)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatProgressVacuumCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatProgressVacuumCollector.Update; %+v", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"datname": "postgres", "relname": "a_table", "phase": "initializing"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "a_table", "phase": "scanning heap"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "a_table", "phase": "vacuuming indexes"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "a_table", "phase": "vacuuming heap"}, metricType: dto.MetricType_GAUGE, value: 1}, - {labels: labelMap{"datname": "postgres", "relname": "a_table", "phase": "cleaning up indexes"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "a_table", "phase": "truncating heap"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "a_table", "phase": "performing final cleanup"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 3000}, - {labels: labelMap{"datname": "postgres", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 400}, - {labels: labelMap{"datname": "postgres", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 200}, - {labels: labelMap{"datname": "postgres", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 2}, - {labels: labelMap{"datname": "postgres", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 500}, - {labels: labelMap{"datname": "postgres", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 123}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(m, convey.ShouldResemble, expect) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("There were unfulfilled exceptions: %+v", err) - } -} - -func TestPGStatProgressVacuumCollectorNullValues(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - - columns := []string{ - "datname", "relname", "phase", "heap_blks_total", "heap_blks_scanned", - "heap_blks_vacuumed", "index_vacuum_count", "max_dead_tuples", "num_dead_tuples", - } - - rows := sqlmock.NewRows(columns).AddRow( - "postgres", nil, nil, nil, nil, nil, nil, nil, nil) - - mock.ExpectQuery(sanitizeQuery(statProgressVacuumQuery)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatProgressVacuumCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatProgressVacuumCollector.Update; %+v", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"datname": "postgres", "relname": "unknown", "phase": "initializing"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "unknown", "phase": "scanning heap"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "unknown", "phase": "vacuuming indexes"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "unknown", "phase": "vacuuming heap"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "unknown", "phase": "cleaning up indexes"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "unknown", "phase": "truncating heap"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "unknown", "phase": "performing final cleanup"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("There were unfulfilled exceptions: %+v", err) - } -} diff --git a/collector/pg_stat_statements.go b/collector/pg_stat_statements.go deleted file mode 100644 index 9160d3c16..000000000 --- a/collector/pg_stat_statements.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "database/sql" - "fmt" - "log/slog" - - "github.com/alecthomas/kingpin/v2" - "github.com/blang/semver/v4" - "github.com/prometheus/client_golang/prometheus" -) - -const statStatementsSubsystem = "stat_statements" - -var ( - includeQueryFlag *bool = nil - statementLengthFlag *uint = nil -) - -func init() { - // WARNING: - // Disabled by default because this set of metrics can be quite expensive on a busy server - // Every unique query will cause a new timeseries to be created - registerCollector(statStatementsSubsystem, defaultDisabled, NewPGStatStatementsCollector) - - includeQueryFlag = kingpin.Flag( - fmt.Sprint(collectorFlagPrefix, statStatementsSubsystem, ".include_query"), - "Enable selecting statement query together with queryId. (default: disabled)"). - Default(fmt.Sprintf("%v", defaultDisabled)). - Bool() - statementLengthFlag = kingpin.Flag( - fmt.Sprint(collectorFlagPrefix, statStatementsSubsystem, ".query_length"), - "Maximum length of the statement text."). - Default("120"). - Uint() -} - -type PGStatStatementsCollector struct { - log *slog.Logger - includeQueryStatement bool - statementLength uint -} - -func NewPGStatStatementsCollector(config collectorConfig) (Collector, error) { - return &PGStatStatementsCollector{ - log: config.logger, - includeQueryStatement: *includeQueryFlag, - statementLength: *statementLengthFlag, - }, nil -} - -var ( - statSTatementsCallsTotal = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "calls_total"), - "Number of times executed", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, - ) - statStatementsSecondsTotal = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "seconds_total"), - "Total time spent in the statement, in seconds", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, - ) - statStatementsRowsTotal = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "rows_total"), - "Total number of rows retrieved or affected by the statement", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, - ) - statStatementsBlockReadSecondsTotal = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_read_seconds_total"), - "Total time the statement spent reading blocks, in seconds", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, - ) - statStatementsBlockWriteSecondsTotal = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_write_seconds_total"), - "Total time the statement spent writing blocks, in seconds", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, - ) - - statStatementsQuery = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "query_id"), - "SQL Query to queryid mapping", - []string{"queryid", "query"}, - prometheus.Labels{}, - ) -) - -const ( - pgStatStatementQuerySelect = `LEFT(pg_stat_statements.query, %d) as query,` - - pgStatStatementsQuery = `SELECT - pg_get_userbyid(userid) as user, - pg_database.datname, - pg_stat_statements.queryid, - %s - pg_stat_statements.calls as calls_total, - pg_stat_statements.total_time / 1000.0 as seconds_total, - pg_stat_statements.rows as rows_total, - pg_stat_statements.blk_read_time / 1000.0 as block_read_seconds_total, - pg_stat_statements.blk_write_time / 1000.0 as block_write_seconds_total - FROM pg_stat_statements - JOIN pg_database - ON pg_database.oid = pg_stat_statements.dbid - WHERE - total_time > ( - SELECT percentile_cont(0.1) - WITHIN GROUP (ORDER BY total_time) - FROM pg_stat_statements - ) - ORDER BY seconds_total DESC - LIMIT 100;` - - pgStatStatementsNewQuery = `SELECT - pg_get_userbyid(userid) as user, - pg_database.datname, - pg_stat_statements.queryid, - %s - pg_stat_statements.calls as calls_total, - pg_stat_statements.total_exec_time / 1000.0 as seconds_total, - pg_stat_statements.rows as rows_total, - pg_stat_statements.blk_read_time / 1000.0 as block_read_seconds_total, - pg_stat_statements.blk_write_time / 1000.0 as block_write_seconds_total - FROM pg_stat_statements - JOIN pg_database - ON pg_database.oid = pg_stat_statements.dbid - WHERE - total_exec_time > ( - SELECT percentile_cont(0.1) - WITHIN GROUP (ORDER BY total_exec_time) - FROM pg_stat_statements - ) - ORDER BY seconds_total DESC - LIMIT 100;` - - pgStatStatementsQuery_PG17 = `SELECT - pg_get_userbyid(userid) as user, - pg_database.datname, - pg_stat_statements.queryid, - %s - pg_stat_statements.calls as calls_total, - pg_stat_statements.total_exec_time / 1000.0 as seconds_total, - pg_stat_statements.rows as rows_total, - pg_stat_statements.shared_blk_read_time / 1000.0 as block_read_seconds_total, - pg_stat_statements.shared_blk_write_time / 1000.0 as block_write_seconds_total - FROM pg_stat_statements - JOIN pg_database - ON pg_database.oid = pg_stat_statements.dbid - WHERE - total_exec_time > ( - SELECT percentile_cont(0.1) - WITHIN GROUP (ORDER BY total_exec_time) - FROM pg_stat_statements - ) - ORDER BY seconds_total DESC - LIMIT 100;` -) - -func (c PGStatStatementsCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - var queryTemplate string - switch { - case instance.version.GE(semver.MustParse("17.0.0")): - queryTemplate = pgStatStatementsQuery_PG17 - case instance.version.GE(semver.MustParse("13.0.0")): - queryTemplate = pgStatStatementsNewQuery - default: - queryTemplate = pgStatStatementsQuery - } - var querySelect = "" - if c.includeQueryStatement { - querySelect = fmt.Sprintf(pgStatStatementQuerySelect, c.statementLength) - } - query := fmt.Sprintf(queryTemplate, querySelect) - - db := instance.getDB() - rows, err := db.QueryContext(ctx, query) - - var presentQueryIds = make(map[string]struct{}) - - if err != nil { - return err - } - defer rows.Close() - for rows.Next() { - var user, datname, queryid, statement sql.NullString - var callsTotal, rowsTotal sql.NullInt64 - var secondsTotal, blockReadSecondsTotal, blockWriteSecondsTotal sql.NullFloat64 - var columns []any - if c.includeQueryStatement { - columns = []any{&user, &datname, &queryid, &statement, &callsTotal, &secondsTotal, &rowsTotal, &blockReadSecondsTotal, &blockWriteSecondsTotal} - } else { - columns = []any{&user, &datname, &queryid, &callsTotal, &secondsTotal, &rowsTotal, &blockReadSecondsTotal, &blockWriteSecondsTotal} - } - if err := rows.Scan(columns...); err != nil { - return err - } - - userLabel := "unknown" - if user.Valid { - userLabel = user.String - } - datnameLabel := "unknown" - if datname.Valid { - datnameLabel = datname.String - } - queryidLabel := "unknown" - if queryid.Valid { - queryidLabel = queryid.String - } - - callsTotalMetric := 0.0 - if callsTotal.Valid { - callsTotalMetric = float64(callsTotal.Int64) - } - ch <- prometheus.MustNewConstMetric( - statSTatementsCallsTotal, - prometheus.CounterValue, - callsTotalMetric, - userLabel, datnameLabel, queryidLabel, - ) - - secondsTotalMetric := 0.0 - if secondsTotal.Valid { - secondsTotalMetric = secondsTotal.Float64 - } - ch <- prometheus.MustNewConstMetric( - statStatementsSecondsTotal, - prometheus.CounterValue, - secondsTotalMetric, - userLabel, datnameLabel, queryidLabel, - ) - - rowsTotalMetric := 0.0 - if rowsTotal.Valid { - rowsTotalMetric = float64(rowsTotal.Int64) - } - ch <- prometheus.MustNewConstMetric( - statStatementsRowsTotal, - prometheus.CounterValue, - rowsTotalMetric, - userLabel, datnameLabel, queryidLabel, - ) - - blockReadSecondsTotalMetric := 0.0 - if blockReadSecondsTotal.Valid { - blockReadSecondsTotalMetric = blockReadSecondsTotal.Float64 - } - ch <- prometheus.MustNewConstMetric( - statStatementsBlockReadSecondsTotal, - prometheus.CounterValue, - blockReadSecondsTotalMetric, - userLabel, datnameLabel, queryidLabel, - ) - - blockWriteSecondsTotalMetric := 0.0 - if blockWriteSecondsTotal.Valid { - blockWriteSecondsTotalMetric = blockWriteSecondsTotal.Float64 - } - ch <- prometheus.MustNewConstMetric( - statStatementsBlockWriteSecondsTotal, - prometheus.CounterValue, - blockWriteSecondsTotalMetric, - userLabel, datnameLabel, queryidLabel, - ) - - if c.includeQueryStatement { - _, ok := presentQueryIds[queryidLabel] - if !ok { - presentQueryIds[queryidLabel] = struct{}{} - - queryLabel := "unknown" - if statement.Valid { - queryLabel = statement.String - } - - ch <- prometheus.MustNewConstMetric( - statStatementsQuery, - prometheus.CounterValue, - 1, - queryidLabel, queryLabel, - ) - } - } - } - if err := rows.Err(); err != nil { - return err - } - return nil -} diff --git a/collector/pg_stat_statements_test.go b/collector/pg_stat_statements_test.go deleted file mode 100644 index 0497ba380..000000000 --- a/collector/pg_stat_statements_test.go +++ /dev/null @@ -1,373 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "fmt" - "testing" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/blang/semver/v4" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPGStateStatementsCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db, version: semver.MustParse("12.0.0")} - - columns := []string{"user", "datname", "queryid", "calls_total", "seconds_total", "rows_total", "block_read_seconds_total", "block_write_seconds_total"} - rows := sqlmock.NewRows(columns). - AddRow("postgres", "postgres", 1500, 5, 0.4, 100, 0.1, 0.2) - mock.ExpectQuery(sanitizeQuery(fmt.Sprintf(pgStatStatementsQuery, ""))).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatStatementsCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatStatementsCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 5}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.4}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 100}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.1}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.2}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -func TestPGStateStatementsCollectorWithStatement(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db, version: semver.MustParse("12.0.0")} - - columns := []string{"user", "datname", "queryid", "LEFT(pg_stat_statements.query, 100) as query", "calls_total", "seconds_total", "rows_total", "block_read_seconds_total", "block_write_seconds_total"} - rows := sqlmock.NewRows(columns). - AddRow("postgres", "postgres", 1500, "select 1 from foo", 5, 0.4, 100, 0.1, 0.2) - mock.ExpectQuery(sanitizeQuery(fmt.Sprintf(pgStatStatementsQuery, fmt.Sprintf(pgStatStatementQuerySelect, 100)))).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatStatementsCollector{includeQueryStatement: true, statementLength: 100} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatStatementsCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 5}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.4}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 100}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.1}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.2}, - {labels: labelMap{"queryid": "1500", "query": "select 1 from foo"}, metricType: dto.MetricType_COUNTER, value: 1}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -func TestPGStateStatementsCollectorNull(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db, version: semver.MustParse("13.3.7")} - - columns := []string{"user", "datname", "queryid", "calls_total", "seconds_total", "rows_total", "block_read_seconds_total", "block_write_seconds_total"} - rows := sqlmock.NewRows(columns). - AddRow(nil, nil, nil, nil, nil, nil, nil, nil) - mock.ExpectQuery(sanitizeQuery(fmt.Sprintf(pgStatStatementsNewQuery, ""))).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatStatementsCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatStatementsCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"user": "unknown", "datname": "unknown", "queryid": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"user": "unknown", "datname": "unknown", "queryid": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"user": "unknown", "datname": "unknown", "queryid": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"user": "unknown", "datname": "unknown", "queryid": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"user": "unknown", "datname": "unknown", "queryid": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -func TestPGStateStatementsCollectorNullWithStatement(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db, version: semver.MustParse("13.3.7")} - - columns := []string{"user", "datname", "queryid", "LEFT(pg_stat_statements.query, 200) as query", "calls_total", "seconds_total", "rows_total", "block_read_seconds_total", "block_write_seconds_total"} - rows := sqlmock.NewRows(columns). - AddRow(nil, nil, nil, nil, nil, nil, nil, nil, nil) - mock.ExpectQuery(sanitizeQuery(fmt.Sprintf(pgStatStatementsNewQuery, fmt.Sprintf(pgStatStatementQuerySelect, 200)))).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatStatementsCollector{includeQueryStatement: true, statementLength: 200} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatStatementsCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"user": "unknown", "datname": "unknown", "queryid": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"user": "unknown", "datname": "unknown", "queryid": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"user": "unknown", "datname": "unknown", "queryid": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"user": "unknown", "datname": "unknown", "queryid": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"user": "unknown", "datname": "unknown", "queryid": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"queryid": "unknown", "query": "unknown"}, metricType: dto.MetricType_COUNTER, value: 1}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -func TestPGStateStatementsCollectorNewPG(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db, version: semver.MustParse("13.3.7")} - - columns := []string{"user", "datname", "queryid", "calls_total", "seconds_total", "rows_total", "block_read_seconds_total", "block_write_seconds_total"} - rows := sqlmock.NewRows(columns). - AddRow("postgres", "postgres", 1500, 5, 0.4, 100, 0.1, 0.2) - mock.ExpectQuery(sanitizeQuery(fmt.Sprintf(pgStatStatementsNewQuery, ""))).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatStatementsCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatStatementsCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 5}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.4}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 100}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.1}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.2}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -func TestPGStateStatementsCollectorNewPGWithStatement(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db, version: semver.MustParse("13.3.7")} - - columns := []string{"user", "datname", "queryid", "LEFT(pg_stat_statements.query, 300) as query", "calls_total", "seconds_total", "rows_total", "block_read_seconds_total", "block_write_seconds_total"} - rows := sqlmock.NewRows(columns). - AddRow("postgres", "postgres", 1500, "select 1 from foo", 5, 0.4, 100, 0.1, 0.2) - mock.ExpectQuery(sanitizeQuery(fmt.Sprintf(pgStatStatementsNewQuery, fmt.Sprintf(pgStatStatementQuerySelect, 300)))).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatStatementsCollector{includeQueryStatement: true, statementLength: 300} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatStatementsCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 5}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.4}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 100}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.1}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.2}, - {labels: labelMap{"queryid": "1500", "query": "select 1 from foo"}, metricType: dto.MetricType_COUNTER, value: 1}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -func TestPGStateStatementsCollector_PG17(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db, version: semver.MustParse("17.0.0")} - - columns := []string{"user", "datname", "queryid", "calls_total", "seconds_total", "rows_total", "block_read_seconds_total", "block_write_seconds_total"} - rows := sqlmock.NewRows(columns). - AddRow("postgres", "postgres", 1500, 5, 0.4, 100, 0.1, 0.2) - mock.ExpectQuery(sanitizeQuery(fmt.Sprintf(pgStatStatementsQuery_PG17, ""))).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatStatementsCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatStatementsCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 5}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.4}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 100}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.1}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.2}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -func TestPGStateStatementsCollector_PG17_WithStatement(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db, version: semver.MustParse("17.0.0")} - - columns := []string{"user", "datname", "queryid", "LEFT(pg_stat_statements.query, 300) as query", "calls_total", "seconds_total", "rows_total", "block_read_seconds_total", "block_write_seconds_total"} - rows := sqlmock.NewRows(columns). - AddRow("postgres", "postgres", 1500, "select 1 from foo", 5, 0.4, 100, 0.1, 0.2) - mock.ExpectQuery(sanitizeQuery(fmt.Sprintf(pgStatStatementsQuery_PG17, fmt.Sprintf(pgStatStatementQuerySelect, 300)))).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatStatementsCollector{includeQueryStatement: true, statementLength: 300} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatStatementsCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 5}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.4}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 100}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.1}, - {labels: labelMap{"user": "postgres", "datname": "postgres", "queryid": "1500"}, metricType: dto.MetricType_COUNTER, value: 0.2}, - {labels: labelMap{"queryid": "1500", "query": "select 1 from foo"}, metricType: dto.MetricType_COUNTER, value: 1}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/pg_stat_user_tables.go b/collector/pg_stat_user_tables.go deleted file mode 100644 index ad8bcace7..000000000 --- a/collector/pg_stat_user_tables.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "database/sql" - "log/slog" - - "github.com/prometheus/client_golang/prometheus" -) - -const userTableSubsystem = "stat_user_tables" - -func init() { - registerCollector(userTableSubsystem, defaultEnabled, NewPGStatUserTablesCollector) -} - -type PGStatUserTablesCollector struct { - log *slog.Logger -} - -func NewPGStatUserTablesCollector(config collectorConfig) (Collector, error) { - return &PGStatUserTablesCollector{log: config.logger}, nil -} - -var ( - statUserTablesSeqScan = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "seq_scan"), - "Number of sequential scans initiated on this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesSeqTupRead = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "seq_tup_read"), - "Number of live rows fetched by sequential scans", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesIdxScan = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "idx_scan"), - "Number of index scans initiated on this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesIdxTupFetch = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "idx_tup_fetch"), - "Number of live rows fetched by index scans", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesNTupIns = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_ins"), - "Number of rows inserted", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesNTupUpd = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_upd"), - "Number of rows updated", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesNTupDel = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_del"), - "Number of rows deleted", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesNTupHotUpd = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_hot_upd"), - "Number of rows HOT updated (i.e., with no separate index update required)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesNLiveTup = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_live_tup"), - "Estimated number of live rows", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesNDeadTup = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_dead_tup"), - "Estimated number of dead rows", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesNModSinceAnalyze = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_mod_since_analyze"), - "Estimated number of rows changed since last analyze", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesLastVacuum = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "last_vacuum"), - "Last time at which this table was manually vacuumed (not counting VACUUM FULL)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesLastAutovacuum = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "last_autovacuum"), - "Last time at which this table was vacuumed by the autovacuum daemon", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesLastAnalyze = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "last_analyze"), - "Last time at which this table was manually analyzed", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesLastAutoanalyze = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "last_autoanalyze"), - "Last time at which this table was analyzed by the autovacuum daemon", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesVacuumCount = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "vacuum_count"), - "Number of times this table has been manually vacuumed (not counting VACUUM FULL)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesAutovacuumCount = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "autovacuum_count"), - "Number of times this table has been vacuumed by the autovacuum daemon", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesAnalyzeCount = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "analyze_count"), - "Number of times this table has been manually analyzed", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesAutoanalyzeCount = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "autoanalyze_count"), - "Number of times this table has been analyzed by the autovacuum daemon", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserIndexSize = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "index_size_bytes"), - "Total disk space used by this index, in bytes", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTableSize = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "table_size_bytes"), - "Total disk space used by this table, in bytes", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - - statUserTablesQuery = `SELECT - current_database() datname, - schemaname, - relname, - seq_scan, - seq_tup_read, - idx_scan, - idx_tup_fetch, - n_tup_ins, - n_tup_upd, - n_tup_del, - n_tup_hot_upd, - n_live_tup, - n_dead_tup, - n_mod_since_analyze, - COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum, - COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum, - COALESCE(last_analyze, '1970-01-01Z') as last_analyze, - COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze, - vacuum_count, - autovacuum_count, - analyze_count, - autoanalyze_count, - pg_indexes_size(relid) as indexes_size, - pg_table_size(relid) as table_size - FROM - pg_stat_user_tables` -) - -func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - rows, err := db.QueryContext(ctx, - statUserTablesQuery) - - if err != nil { - return err - } - defer rows.Close() - - for rows.Next() { - var datname, schemaname, relname sql.NullString - var seqScan, seqTupRead, idxScan, idxTupFetch, nTupIns, nTupUpd, nTupDel, nTupHotUpd, nLiveTup, nDeadTup, - nModSinceAnalyze, vacuumCount, autovacuumCount, analyzeCount, autoanalyzeCount, indexSize, tableSize sql.NullInt64 - var lastVacuum, lastAutovacuum, lastAnalyze, lastAutoanalyze sql.NullTime - - if err := rows.Scan(&datname, &schemaname, &relname, &seqScan, &seqTupRead, &idxScan, &idxTupFetch, &nTupIns, &nTupUpd, &nTupDel, &nTupHotUpd, &nLiveTup, &nDeadTup, &nModSinceAnalyze, &lastVacuum, &lastAutovacuum, &lastAnalyze, &lastAutoanalyze, &vacuumCount, &autovacuumCount, &analyzeCount, &autoanalyzeCount, &indexSize, &tableSize); err != nil { - return err - } - - datnameLabel := "unknown" - if datname.Valid { - datnameLabel = datname.String - } - schemanameLabel := "unknown" - if schemaname.Valid { - schemanameLabel = schemaname.String - } - relnameLabel := "unknown" - if relname.Valid { - relnameLabel = relname.String - } - - seqScanMetric := 0.0 - if seqScan.Valid { - seqScanMetric = float64(seqScan.Int64) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesSeqScan, - prometheus.CounterValue, - seqScanMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - seqTupReadMetric := 0.0 - if seqTupRead.Valid { - seqTupReadMetric = float64(seqTupRead.Int64) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesSeqTupRead, - prometheus.CounterValue, - seqTupReadMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - idxScanMetric := 0.0 - if idxScan.Valid { - idxScanMetric = float64(idxScan.Int64) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesIdxScan, - prometheus.CounterValue, - idxScanMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - idxTupFetchMetric := 0.0 - if idxTupFetch.Valid { - idxTupFetchMetric = float64(idxTupFetch.Int64) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesIdxTupFetch, - prometheus.CounterValue, - idxTupFetchMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - nTupInsMetric := 0.0 - if nTupIns.Valid { - nTupInsMetric = float64(nTupIns.Int64) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesNTupIns, - prometheus.CounterValue, - nTupInsMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - nTupUpdMetric := 0.0 - if nTupUpd.Valid { - nTupUpdMetric = float64(nTupUpd.Int64) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesNTupUpd, - prometheus.CounterValue, - nTupUpdMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - nTupDelMetric := 0.0 - if nTupDel.Valid { - nTupDelMetric = float64(nTupDel.Int64) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesNTupDel, - prometheus.CounterValue, - nTupDelMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - nTupHotUpdMetric := 0.0 - if nTupHotUpd.Valid { - nTupHotUpdMetric = float64(nTupHotUpd.Int64) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesNTupHotUpd, - prometheus.CounterValue, - nTupHotUpdMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - nLiveTupMetric := 0.0 - if nLiveTup.Valid { - nLiveTupMetric = float64(nLiveTup.Int64) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesNLiveTup, - prometheus.GaugeValue, - nLiveTupMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - nDeadTupMetric := 0.0 - if nDeadTup.Valid { - nDeadTupMetric = float64(nDeadTup.Int64) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesNDeadTup, - prometheus.GaugeValue, - nDeadTupMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - nModSinceAnalyzeMetric := 0.0 - if nModSinceAnalyze.Valid { - nModSinceAnalyzeMetric = float64(nModSinceAnalyze.Int64) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesNModSinceAnalyze, - prometheus.GaugeValue, - nModSinceAnalyzeMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - lastVacuumMetric := 0.0 - if lastVacuum.Valid { - lastVacuumMetric = float64(lastVacuum.Time.Unix()) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesLastVacuum, - prometheus.GaugeValue, - lastVacuumMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - lastAutovacuumMetric := 0.0 - if lastAutovacuum.Valid { - lastAutovacuumMetric = float64(lastAutovacuum.Time.Unix()) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesLastAutovacuum, - prometheus.GaugeValue, - lastAutovacuumMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - lastAnalyzeMetric := 0.0 - if lastAnalyze.Valid { - lastAnalyzeMetric = float64(lastAnalyze.Time.Unix()) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesLastAnalyze, - prometheus.GaugeValue, - lastAnalyzeMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - lastAutoanalyzeMetric := 0.0 - if lastAutoanalyze.Valid { - lastAutoanalyzeMetric = float64(lastAutoanalyze.Time.Unix()) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesLastAutoanalyze, - prometheus.GaugeValue, - lastAutoanalyzeMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - vacuumCountMetric := 0.0 - if vacuumCount.Valid { - vacuumCountMetric = float64(vacuumCount.Int64) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesVacuumCount, - prometheus.CounterValue, - vacuumCountMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - autovacuumCountMetric := 0.0 - if autovacuumCount.Valid { - autovacuumCountMetric = float64(autovacuumCount.Int64) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesAutovacuumCount, - prometheus.CounterValue, - autovacuumCountMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - analyzeCountMetric := 0.0 - if analyzeCount.Valid { - analyzeCountMetric = float64(analyzeCount.Int64) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesAnalyzeCount, - prometheus.CounterValue, - analyzeCountMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - autoanalyzeCountMetric := 0.0 - if autoanalyzeCount.Valid { - autoanalyzeCountMetric = float64(autoanalyzeCount.Int64) - } - ch <- prometheus.MustNewConstMetric( - statUserTablesAutoanalyzeCount, - prometheus.CounterValue, - autoanalyzeCountMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - indexSizeMetric := 0.0 - if indexSize.Valid { - indexSizeMetric = float64(indexSize.Int64) - } - ch <- prometheus.MustNewConstMetric( - statUserIndexSize, - prometheus.GaugeValue, - indexSizeMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - tableSizeMetric := 0.0 - if tableSize.Valid { - tableSizeMetric = float64(tableSize.Int64) - } - ch <- prometheus.MustNewConstMetric( - statUserTableSize, - prometheus.GaugeValue, - tableSizeMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - } - - if err := rows.Err(); err != nil { - return err - } - return nil -} diff --git a/collector/pg_stat_user_tables_test.go b/collector/pg_stat_user_tables_test.go deleted file mode 100644 index 4649bdbc5..000000000 --- a/collector/pg_stat_user_tables_test.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - "time" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPGStatUserTablesCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - - lastVacuumTime, err := time.Parse("2006-01-02Z", "2023-06-02Z") - if err != nil { - t.Fatalf("Error parsing vacuum time: %s", err) - } - lastAutoVacuumTime, err := time.Parse("2006-01-02Z", "2023-06-03Z") - if err != nil { - t.Fatalf("Error parsing vacuum time: %s", err) - } - lastAnalyzeTime, err := time.Parse("2006-01-02Z", "2023-06-04Z") - if err != nil { - t.Fatalf("Error parsing vacuum time: %s", err) - } - lastAutoAnalyzeTime, err := time.Parse("2006-01-02Z", "2023-06-05Z") - if err != nil { - t.Fatalf("Error parsing vacuum time: %s", err) - } - - columns := []string{ - "datname", - "schemaname", - "relname", - "seq_scan", - "seq_tup_read", - "idx_scan", - "idx_tup_fetch", - "n_tup_ins", - "n_tup_upd", - "n_tup_del", - "n_tup_hot_upd", - "n_live_tup", - "n_dead_tup", - "n_mod_since_analyze", - "last_vacuum", - "last_autovacuum", - "last_analyze", - "last_autoanalyze", - "vacuum_count", - "autovacuum_count", - "analyze_count", - "autoanalyze_count", - "index_size", - "table_size"} - rows := sqlmock.NewRows(columns). - AddRow("postgres", - "public", - "a_table", - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 0, - lastVacuumTime, - lastAutoVacuumTime, - lastAnalyzeTime, - lastAutoAnalyzeTime, - 11, - 12, - 13, - 14, - 15, - 16) - mock.ExpectQuery(sanitizeQuery(statUserTablesQuery)).WillReturnRows(rows) - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatUserTablesCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatUserTablesCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 1}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 2}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 3}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 4}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 5}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 6}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 7}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 8}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 9}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 10}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 1685664000}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 1685750400}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 1685836800}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 1685923200}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 11}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 12}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 13}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 14}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 15}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_GAUGE, value: 16}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -func TestPGStatUserTablesCollectorNullValues(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - - columns := []string{ - "datname", - "schemaname", - "relname", - "seq_scan", - "seq_tup_read", - "idx_scan", - "idx_tup_fetch", - "n_tup_ins", - "n_tup_upd", - "n_tup_del", - "n_tup_hot_upd", - "n_live_tup", - "n_dead_tup", - "n_mod_since_analyze", - "last_vacuum", - "last_autovacuum", - "last_analyze", - "last_autoanalyze", - "vacuum_count", - "autovacuum_count", - "analyze_count", - "autoanalyze_count", - "index_size", - "table_size"} - rows := sqlmock.NewRows(columns). - AddRow("postgres", - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil) - mock.ExpectQuery(sanitizeQuery(statUserTablesQuery)).WillReturnRows(rows) - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatUserTablesCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatUserTablesCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0}, - {labels: labelMap{"datname": "postgres", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_GAUGE, value: 0}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/pg_stat_walreceiver.go b/collector/pg_stat_walreceiver.go deleted file mode 100644 index ea0db4558..000000000 --- a/collector/pg_stat_walreceiver.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "database/sql" - "fmt" - "log/slog" - - "github.com/prometheus/client_golang/prometheus" -) - -func init() { - registerCollector(statWalReceiverSubsystem, defaultDisabled, NewPGStatWalReceiverCollector) -} - -type PGStatWalReceiverCollector struct { - log *slog.Logger -} - -const statWalReceiverSubsystem = "stat_wal_receiver" - -func NewPGStatWalReceiverCollector(config collectorConfig) (Collector, error) { - return &PGStatWalReceiverCollector{log: config.logger}, nil -} - -var ( - labelCats = []string{"upstream_host", "slot_name", "status"} - statWalReceiverReceiveStartLsn = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "receive_start_lsn"), - "First write-ahead log location used when WAL receiver is started represented as a decimal", - labelCats, - prometheus.Labels{}, - ) - statWalReceiverReceiveStartTli = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "receive_start_tli"), - "First timeline number used when WAL receiver is started", - labelCats, - prometheus.Labels{}, - ) - statWalReceiverFlushedLSN = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "flushed_lsn"), - "Last write-ahead log location already received and flushed to disk, the initial value of this field being the first log location used when WAL receiver is started represented as a decimal", - labelCats, - prometheus.Labels{}, - ) - statWalReceiverReceivedTli = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "received_tli"), - "Timeline number of last write-ahead log location received and flushed to disk", - labelCats, - prometheus.Labels{}, - ) - statWalReceiverLastMsgSendTime = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "last_msg_send_time"), - "Send time of last message received from origin WAL sender", - labelCats, - prometheus.Labels{}, - ) - statWalReceiverLastMsgReceiptTime = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "last_msg_receipt_time"), - "Send time of last message received from origin WAL sender", - labelCats, - prometheus.Labels{}, - ) - statWalReceiverLatestEndLsn = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "latest_end_lsn"), - "Last write-ahead log location reported to origin WAL sender as integer", - labelCats, - prometheus.Labels{}, - ) - statWalReceiverLatestEndTime = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "latest_end_time"), - "Time of last write-ahead log location reported to origin WAL sender", - labelCats, - prometheus.Labels{}, - ) - statWalReceiverUpstreamNode = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "upstream_node"), - "Node ID of the upstream node", - labelCats, - prometheus.Labels{}, - ) - - pgStatWalColumnQuery = ` - SELECT - column_name - FROM information_schema.columns - WHERE - table_name = 'pg_stat_wal_receiver' and - column_name = 'flushed_lsn' - ` - - pgStatWalReceiverQueryTemplate = ` - SELECT - trim(both '''' from substring(conninfo from 'host=([^ ]*)')) as upstream_host, - slot_name, - status, - (receive_start_lsn- '0/0') %% (2^52)::bigint as receive_start_lsn, - %s -receive_start_tli, - received_tli, - extract(epoch from last_msg_send_time) as last_msg_send_time, - extract(epoch from last_msg_receipt_time) as last_msg_receipt_time, - (latest_end_lsn - '0/0') %% (2^52)::bigint as latest_end_lsn, - extract(epoch from latest_end_time) as latest_end_time, - substring(slot_name from 'repmgr_slot_([0-9]*)') as upstream_node - FROM pg_catalog.pg_stat_wal_receiver - ` -) - -func (c *PGStatWalReceiverCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - hasFlushedLSNRows, err := db.QueryContext(ctx, pgStatWalColumnQuery) - if err != nil { - return err - } - - hasFlushedLSN := hasFlushedLSNRows.Next() - var query string - if hasFlushedLSN { - query = fmt.Sprintf(pgStatWalReceiverQueryTemplate, "(flushed_lsn - '0/0') % (2^52)::bigint as flushed_lsn,\n") - } else { - query = fmt.Sprintf(pgStatWalReceiverQueryTemplate, "") - } - - hasFlushedLSNRows.Close() - - rows, err := db.QueryContext(ctx, query) - if err != nil { - return err - } - defer rows.Close() - for rows.Next() { - var upstreamHost, slotName, status sql.NullString - var receiveStartLsn, receiveStartTli, flushedLsn, receivedTli, latestEndLsn, upstreamNode sql.NullInt64 - var lastMsgSendTime, lastMsgReceiptTime, latestEndTime sql.NullFloat64 - - if hasFlushedLSN { - if err := rows.Scan(&upstreamHost, &slotName, &status, &receiveStartLsn, &receiveStartTli, &flushedLsn, &receivedTli, &lastMsgSendTime, &lastMsgReceiptTime, &latestEndLsn, &latestEndTime, &upstreamNode); err != nil { - return err - } - } else { - if err := rows.Scan(&upstreamHost, &slotName, &status, &receiveStartLsn, &receiveStartTli, &receivedTli, &lastMsgSendTime, &lastMsgReceiptTime, &latestEndLsn, &latestEndTime, &upstreamNode); err != nil { - return err - } - } - if !upstreamHost.Valid { - c.log.Debug("Skipping wal receiver stats because upstream host is null") - continue - } - - if !slotName.Valid { - c.log.Debug("Skipping wal receiver stats because slotname host is null") - continue - } - - if !status.Valid { - c.log.Debug("Skipping wal receiver stats because status is null") - continue - } - labels := []string{upstreamHost.String, slotName.String, status.String} - - if !receiveStartLsn.Valid { - c.log.Debug("Skipping wal receiver stats because receive_start_lsn is null") - continue - } - if !receiveStartTli.Valid { - c.log.Debug("Skipping wal receiver stats because receive_start_tli is null") - continue - } - if hasFlushedLSN && !flushedLsn.Valid { - c.log.Debug("Skipping wal receiver stats because flushed_lsn is null") - continue - } - if !receivedTli.Valid { - c.log.Debug("Skipping wal receiver stats because received_tli is null") - continue - } - if !lastMsgSendTime.Valid { - c.log.Debug("Skipping wal receiver stats because last_msg_send_time is null") - continue - } - if !lastMsgReceiptTime.Valid { - c.log.Debug("Skipping wal receiver stats because last_msg_receipt_time is null") - continue - } - if !latestEndLsn.Valid { - c.log.Debug("Skipping wal receiver stats because latest_end_lsn is null") - continue - } - if !latestEndTime.Valid { - c.log.Debug("Skipping wal receiver stats because latest_end_time is null") - continue - } - ch <- prometheus.MustNewConstMetric( - statWalReceiverReceiveStartLsn, - prometheus.CounterValue, - float64(receiveStartLsn.Int64), - labels...) - - ch <- prometheus.MustNewConstMetric( - statWalReceiverReceiveStartTli, - prometheus.GaugeValue, - float64(receiveStartTli.Int64), - labels...) - - if hasFlushedLSN { - ch <- prometheus.MustNewConstMetric( - statWalReceiverFlushedLSN, - prometheus.CounterValue, - float64(flushedLsn.Int64), - labels...) - } - - ch <- prometheus.MustNewConstMetric( - statWalReceiverReceivedTli, - prometheus.GaugeValue, - float64(receivedTli.Int64), - labels...) - - ch <- prometheus.MustNewConstMetric( - statWalReceiverLastMsgSendTime, - prometheus.CounterValue, - float64(lastMsgSendTime.Float64), - labels...) - - ch <- prometheus.MustNewConstMetric( - statWalReceiverLastMsgReceiptTime, - prometheus.CounterValue, - float64(lastMsgReceiptTime.Float64), - labels...) - - ch <- prometheus.MustNewConstMetric( - statWalReceiverLatestEndLsn, - prometheus.CounterValue, - float64(latestEndLsn.Int64), - labels...) - - ch <- prometheus.MustNewConstMetric( - statWalReceiverLatestEndTime, - prometheus.CounterValue, - latestEndTime.Float64, - labels...) - - if !upstreamNode.Valid { - c.log.Debug("Skipping wal receiver stats upstream_node because it is null") - } else { - ch <- prometheus.MustNewConstMetric( - statWalReceiverUpstreamNode, - prometheus.GaugeValue, - float64(upstreamNode.Int64), - labels...) - } - } - if err := rows.Err(); err != nil { - return err - } - return nil -} diff --git a/collector/pg_stat_walreceiver_test.go b/collector/pg_stat_walreceiver_test.go deleted file mode 100644 index c81c9ecae..000000000 --- a/collector/pg_stat_walreceiver_test.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "fmt" - "testing" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -var queryWithFlushedLSN = fmt.Sprintf(pgStatWalReceiverQueryTemplate, "(flushed_lsn - '0/0') % (2^52)::bigint as flushed_lsn,\n") -var queryWithNoFlushedLSN = fmt.Sprintf(pgStatWalReceiverQueryTemplate, "") - -func TestPGStatWalReceiverCollectorWithFlushedLSN(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - infoSchemaColumns := []string{ - "column_name", - } - - infoSchemaRows := sqlmock.NewRows(infoSchemaColumns). - AddRow( - "flushed_lsn", - ) - - mock.ExpectQuery(sanitizeQuery(pgStatWalColumnQuery)).WillReturnRows(infoSchemaRows) - - columns := []string{ - "upstream_host", - "slot_name", - "status", - "receive_start_lsn", - "receive_start_tli", - "flushed_lsn", - "received_tli", - "last_msg_send_time", - "last_msg_receipt_time", - "latest_end_lsn", - "latest_end_time", - "upstream_node", - } - rows := sqlmock.NewRows(columns). - AddRow( - "foo", - "bar", - "stopping", - int64(1200668684563608), - 1687321285, - int64(1200668684563609), - 1687321280, - 1687321275, - 1687321276, - int64(1200668684563610), - 1687321277, - 5, - ) - - mock.ExpectQuery(sanitizeQuery(queryWithFlushedLSN)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatWalReceiverCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PgStatWalReceiverCollector.Update: %s", err) - } - }() - expected := []MetricResult{ - {labels: labelMap{"upstream_host": "foo", "slot_name": "bar", "status": "stopping"}, value: 1200668684563608, metricType: dto.MetricType_COUNTER}, - {labels: labelMap{"upstream_host": "foo", "slot_name": "bar", "status": "stopping"}, value: 1687321285, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"upstream_host": "foo", "slot_name": "bar", "status": "stopping"}, value: 1200668684563609, metricType: dto.MetricType_COUNTER}, - {labels: labelMap{"upstream_host": "foo", "slot_name": "bar", "status": "stopping"}, value: 1687321280, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"upstream_host": "foo", "slot_name": "bar", "status": "stopping"}, value: 1687321275, metricType: dto.MetricType_COUNTER}, - {labels: labelMap{"upstream_host": "foo", "slot_name": "bar", "status": "stopping"}, value: 1687321276, metricType: dto.MetricType_COUNTER}, - {labels: labelMap{"upstream_host": "foo", "slot_name": "bar", "status": "stopping"}, value: 1200668684563610, metricType: dto.MetricType_COUNTER}, - {labels: labelMap{"upstream_host": "foo", "slot_name": "bar", "status": "stopping"}, value: 1687321277, metricType: dto.MetricType_COUNTER}, - {labels: labelMap{"upstream_host": "foo", "slot_name": "bar", "status": "stopping"}, value: 5, metricType: dto.MetricType_GAUGE}, - } - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } - -} - -func TestPGStatWalReceiverCollectorWithNoFlushedLSN(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - infoSchemaColumns := []string{ - "column_name", - } - - infoSchemaRows := sqlmock.NewRows(infoSchemaColumns) - - mock.ExpectQuery(sanitizeQuery(pgStatWalColumnQuery)).WillReturnRows(infoSchemaRows) - - columns := []string{ - "upstream_host", - "slot_name", - "status", - "receive_start_lsn", - "receive_start_tli", - "received_tli", - "last_msg_send_time", - "last_msg_receipt_time", - "latest_end_lsn", - "latest_end_time", - "upstream_node", - } - rows := sqlmock.NewRows(columns). - AddRow( - "foo", - "bar", - "starting", - int64(1200668684563608), - 1687321285, - 1687321280, - 1687321275, - 1687321276, - int64(1200668684563610), - 1687321277, - 5, - ) - mock.ExpectQuery(sanitizeQuery(queryWithNoFlushedLSN)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatWalReceiverCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PgStatWalReceiverCollector.Update: %s", err) - } - }() - expected := []MetricResult{ - {labels: labelMap{"upstream_host": "foo", "slot_name": "bar", "status": "starting"}, value: 1200668684563608, metricType: dto.MetricType_COUNTER}, - {labels: labelMap{"upstream_host": "foo", "slot_name": "bar", "status": "starting"}, value: 1687321285, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"upstream_host": "foo", "slot_name": "bar", "status": "starting"}, value: 1687321280, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"upstream_host": "foo", "slot_name": "bar", "status": "starting"}, value: 1687321275, metricType: dto.MetricType_COUNTER}, - {labels: labelMap{"upstream_host": "foo", "slot_name": "bar", "status": "starting"}, value: 1687321276, metricType: dto.MetricType_COUNTER}, - {labels: labelMap{"upstream_host": "foo", "slot_name": "bar", "status": "starting"}, value: 1200668684563610, metricType: dto.MetricType_COUNTER}, - {labels: labelMap{"upstream_host": "foo", "slot_name": "bar", "status": "starting"}, value: 1687321277, metricType: dto.MetricType_COUNTER}, - {labels: labelMap{"upstream_host": "foo", "slot_name": "bar", "status": "starting"}, value: 5, metricType: dto.MetricType_GAUGE}, - } - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } - -} diff --git a/collector/pg_statio_user_indexes.go b/collector/pg_statio_user_indexes.go deleted file mode 100644 index c53f52185..000000000 --- a/collector/pg_statio_user_indexes.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "database/sql" - "log/slog" - - "github.com/prometheus/client_golang/prometheus" -) - -func init() { - registerCollector(statioUserIndexesSubsystem, defaultDisabled, NewPGStatioUserIndexesCollector) -} - -type PGStatioUserIndexesCollector struct { - log *slog.Logger -} - -const statioUserIndexesSubsystem = "statio_user_indexes" - -func NewPGStatioUserIndexesCollector(config collectorConfig) (Collector, error) { - return &PGStatioUserIndexesCollector{log: config.logger}, nil -} - -var ( - statioUserIndexesIdxBlksRead = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserIndexesSubsystem, "idx_blks_read_total"), - "Number of disk blocks read from this index", - []string{"schemaname", "relname", "indexrelname"}, - prometheus.Labels{}, - ) - statioUserIndexesIdxBlksHit = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserIndexesSubsystem, "idx_blks_hit_total"), - "Number of buffer hits in this index", - []string{"schemaname", "relname", "indexrelname"}, - prometheus.Labels{}, - ) - - statioUserIndexesQuery = ` - SELECT - schemaname, - relname, - indexrelname, - idx_blks_read, - idx_blks_hit - FROM pg_statio_user_indexes - ` -) - -func (c *PGStatioUserIndexesCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - rows, err := db.QueryContext(ctx, - statioUserIndexesQuery) - - if err != nil { - return err - } - defer rows.Close() - for rows.Next() { - var schemaname, relname, indexrelname sql.NullString - var idxBlksRead, idxBlksHit sql.NullFloat64 - - if err := rows.Scan(&schemaname, &relname, &indexrelname, &idxBlksRead, &idxBlksHit); err != nil { - return err - } - schemanameLabel := "unknown" - if schemaname.Valid { - schemanameLabel = schemaname.String - } - relnameLabel := "unknown" - if relname.Valid { - relnameLabel = relname.String - } - indexrelnameLabel := "unknown" - if indexrelname.Valid { - indexrelnameLabel = indexrelname.String - } - labels := []string{schemanameLabel, relnameLabel, indexrelnameLabel} - - idxBlksReadMetric := 0.0 - if idxBlksRead.Valid { - idxBlksReadMetric = idxBlksRead.Float64 - } - ch <- prometheus.MustNewConstMetric( - statioUserIndexesIdxBlksRead, - prometheus.CounterValue, - idxBlksReadMetric, - labels..., - ) - - idxBlksHitMetric := 0.0 - if idxBlksHit.Valid { - idxBlksHitMetric = idxBlksHit.Float64 - } - ch <- prometheus.MustNewConstMetric( - statioUserIndexesIdxBlksHit, - prometheus.CounterValue, - idxBlksHitMetric, - labels..., - ) - } - if err := rows.Err(); err != nil { - return err - } - return nil -} diff --git a/collector/pg_statio_user_indexes_test.go b/collector/pg_statio_user_indexes_test.go deleted file mode 100644 index 174012162..000000000 --- a/collector/pg_statio_user_indexes_test.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPgStatioUserIndexesCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - inst := &instance{db: db} - columns := []string{ - "schemaname", - "relname", - "indexrelname", - "idx_blks_read", - "idx_blks_hit", - } - rows := sqlmock.NewRows(columns). - AddRow("public", "pgtest_accounts", "pgtest_accounts_pkey", 8, 9) - - mock.ExpectQuery(sanitizeQuery(statioUserIndexesQuery)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatioUserIndexesCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatioUserIndexesCollector.Update: %s", err) - } - }() - expected := []MetricResult{ - {labels: labelMap{"schemaname": "public", "relname": "pgtest_accounts", "indexrelname": "pgtest_accounts_pkey"}, value: 8, metricType: dto.MetricType_COUNTER}, - {labels: labelMap{"schemaname": "public", "relname": "pgtest_accounts", "indexrelname": "pgtest_accounts_pkey"}, value: 9, metricType: dto.MetricType_COUNTER}, - } - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -func TestPgStatioUserIndexesCollectorNull(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - inst := &instance{db: db} - columns := []string{ - "schemaname", - "relname", - "indexrelname", - "idx_blks_read", - "idx_blks_hit", - } - rows := sqlmock.NewRows(columns). - AddRow(nil, nil, nil, nil, nil) - - mock.ExpectQuery(sanitizeQuery(statioUserIndexesQuery)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatioUserIndexesCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatioUserIndexesCollector.Update: %s", err) - } - }() - expected := []MetricResult{ - {labels: labelMap{"schemaname": "unknown", "relname": "unknown", "indexrelname": "unknown"}, value: 0, metricType: dto.MetricType_COUNTER}, - {labels: labelMap{"schemaname": "unknown", "relname": "unknown", "indexrelname": "unknown"}, value: 0, metricType: dto.MetricType_COUNTER}, - } - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/pg_statio_user_tables.go b/collector/pg_statio_user_tables.go deleted file mode 100644 index 48f6438f2..000000000 --- a/collector/pg_statio_user_tables.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "database/sql" - "log/slog" - - "github.com/prometheus/client_golang/prometheus" -) - -const statioUserTableSubsystem = "statio_user_tables" - -func init() { - registerCollector(statioUserTableSubsystem, defaultEnabled, NewPGStatIOUserTablesCollector) -} - -type PGStatIOUserTablesCollector struct { - log *slog.Logger -} - -func NewPGStatIOUserTablesCollector(config collectorConfig) (Collector, error) { - return &PGStatIOUserTablesCollector{log: config.logger}, nil -} - -var ( - statioUserTablesHeapBlksRead = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_read"), - "Number of disk blocks read from this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statioUserTablesHeapBlksHit = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_hit"), - "Number of buffer hits in this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statioUserTablesIdxBlksRead = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_read"), - "Number of disk blocks read from all indexes on this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statioUserTablesIdxBlksHit = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_hit"), - "Number of buffer hits in all indexes on this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statioUserTablesToastBlksRead = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_read"), - "Number of disk blocks read from this table's TOAST table (if any)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statioUserTablesToastBlksHit = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_hit"), - "Number of buffer hits in this table's TOAST table (if any)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statioUserTablesTidxBlksRead = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_read"), - "Number of disk blocks read from this table's TOAST table indexes (if any)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statioUserTablesTidxBlksHit = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_hit"), - "Number of buffer hits in this table's TOAST table indexes (if any)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - - statioUserTablesQuery = `SELECT - current_database() datname, - schemaname, - relname, - heap_blks_read, - heap_blks_hit, - idx_blks_read, - idx_blks_hit, - toast_blks_read, - toast_blks_hit, - tidx_blks_read, - tidx_blks_hit - FROM pg_statio_user_tables` -) - -func (PGStatIOUserTablesCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - rows, err := db.QueryContext(ctx, - statioUserTablesQuery) - - if err != nil { - return err - } - defer rows.Close() - - for rows.Next() { - var datname, schemaname, relname sql.NullString - var heapBlksRead, heapBlksHit, idxBlksRead, idxBlksHit, toastBlksRead, toastBlksHit, tidxBlksRead, tidxBlksHit sql.NullInt64 - - if err := rows.Scan(&datname, &schemaname, &relname, &heapBlksRead, &heapBlksHit, &idxBlksRead, &idxBlksHit, &toastBlksRead, &toastBlksHit, &tidxBlksRead, &tidxBlksHit); err != nil { - return err - } - datnameLabel := "unknown" - if datname.Valid { - datnameLabel = datname.String - } - schemanameLabel := "unknown" - if schemaname.Valid { - schemanameLabel = schemaname.String - } - relnameLabel := "unknown" - if relname.Valid { - relnameLabel = relname.String - } - - heapBlksReadMetric := 0.0 - if heapBlksRead.Valid { - heapBlksReadMetric = float64(heapBlksRead.Int64) - } - ch <- prometheus.MustNewConstMetric( - statioUserTablesHeapBlksRead, - prometheus.CounterValue, - heapBlksReadMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - heapBlksHitMetric := 0.0 - if heapBlksHit.Valid { - heapBlksHitMetric = float64(heapBlksHit.Int64) - } - ch <- prometheus.MustNewConstMetric( - statioUserTablesHeapBlksHit, - prometheus.CounterValue, - heapBlksHitMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - idxBlksReadMetric := 0.0 - if idxBlksRead.Valid { - idxBlksReadMetric = float64(idxBlksRead.Int64) - } - ch <- prometheus.MustNewConstMetric( - statioUserTablesIdxBlksRead, - prometheus.CounterValue, - idxBlksReadMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - idxBlksHitMetric := 0.0 - if idxBlksHit.Valid { - idxBlksHitMetric = float64(idxBlksHit.Int64) - } - ch <- prometheus.MustNewConstMetric( - statioUserTablesIdxBlksHit, - prometheus.CounterValue, - idxBlksHitMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - toastBlksReadMetric := 0.0 - if toastBlksRead.Valid { - toastBlksReadMetric = float64(toastBlksRead.Int64) - } - ch <- prometheus.MustNewConstMetric( - statioUserTablesToastBlksRead, - prometheus.CounterValue, - toastBlksReadMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - toastBlksHitMetric := 0.0 - if toastBlksHit.Valid { - toastBlksHitMetric = float64(toastBlksHit.Int64) - } - ch <- prometheus.MustNewConstMetric( - statioUserTablesToastBlksHit, - prometheus.CounterValue, - toastBlksHitMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - tidxBlksReadMetric := 0.0 - if tidxBlksRead.Valid { - tidxBlksReadMetric = float64(tidxBlksRead.Int64) - } - ch <- prometheus.MustNewConstMetric( - statioUserTablesTidxBlksRead, - prometheus.CounterValue, - tidxBlksReadMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - - tidxBlksHitMetric := 0.0 - if tidxBlksHit.Valid { - tidxBlksHitMetric = float64(tidxBlksHit.Int64) - } - ch <- prometheus.MustNewConstMetric( - statioUserTablesTidxBlksHit, - prometheus.CounterValue, - tidxBlksHitMetric, - datnameLabel, schemanameLabel, relnameLabel, - ) - } - return rows.Err() -} diff --git a/collector/pg_statio_user_tables_test.go b/collector/pg_statio_user_tables_test.go deleted file mode 100644 index c7304a38c..000000000 --- a/collector/pg_statio_user_tables_test.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPGStatIOUserTablesCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - - columns := []string{ - "datname", - "schemaname", - "relname", - "heap_blks_read", - "heap_blks_hit", - "idx_blks_read", - "idx_blks_hit", - "toast_blks_read", - "toast_blks_hit", - "tidx_blks_read", - "tidx_blks_hit", - } - rows := sqlmock.NewRows(columns). - AddRow("postgres", - "public", - "a_table", - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8) - mock.ExpectQuery(sanitizeQuery(statioUserTablesQuery)).WillReturnRows(rows) - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatIOUserTablesCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatIOUserTablesCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 1}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 2}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 3}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 4}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 5}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 6}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 7}, - {labels: labelMap{"datname": "postgres", "schemaname": "public", "relname": "a_table"}, metricType: dto.MetricType_COUNTER, value: 8}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} - -func TestPGStatIOUserTablesCollectorNullValues(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - - columns := []string{ - "datname", - "schemaname", - "relname", - "heap_blks_read", - "heap_blks_hit", - "idx_blks_read", - "idx_blks_hit", - "toast_blks_read", - "toast_blks_hit", - "tidx_blks_read", - "tidx_blks_hit", - } - rows := sqlmock.NewRows(columns). - AddRow(nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil) - mock.ExpectQuery(sanitizeQuery(statioUserTablesQuery)).WillReturnRows(rows) - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGStatIOUserTablesCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGStatIOUserTablesCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - {labels: labelMap{"datname": "unknown", "schemaname": "unknown", "relname": "unknown"}, metricType: dto.MetricType_COUNTER, value: 0}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/pg_wal.go b/collector/pg_wal.go deleted file mode 100644 index afa8fcef6..000000000 --- a/collector/pg_wal.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - - "github.com/prometheus/client_golang/prometheus" -) - -const walSubsystem = "wal" - -func init() { - registerCollector(walSubsystem, defaultEnabled, NewPGWALCollector) -} - -type PGWALCollector struct { -} - -func NewPGWALCollector(config collectorConfig) (Collector, error) { - return &PGWALCollector{}, nil -} - -var ( - pgWALSegments = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - walSubsystem, - "segments", - ), - "Number of WAL segments", - []string{}, nil, - ) - pgWALSize = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - walSubsystem, - "size_bytes", - ), - "Total size of WAL segments", - []string{}, nil, - ) - - pgWALQuery = ` - SELECT - COUNT(*) AS segments, - SUM(size) AS size - FROM pg_ls_waldir() - WHERE name ~ '^[0-9A-F]{24}$'` -) - -func (c PGWALCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - row := db.QueryRowContext(ctx, - pgWALQuery, - ) - - var segments uint64 - var size uint64 - err := row.Scan(&segments, &size) - if err != nil { - return err - } - ch <- prometheus.MustNewConstMetric( - pgWALSegments, - prometheus.GaugeValue, float64(segments), - ) - ch <- prometheus.MustNewConstMetric( - pgWALSize, - prometheus.GaugeValue, float64(size), - ) - return nil -} diff --git a/collector/pg_wal_test.go b/collector/pg_wal_test.go deleted file mode 100644 index 745105a13..000000000 --- a/collector/pg_wal_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPgWALCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - - inst := &instance{db: db} - - columns := []string{"segments", "size"} - rows := sqlmock.NewRows(columns). - AddRow(47, 788529152) - mock.ExpectQuery(sanitizeQuery(pgWALQuery)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGWALCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGWALCollector.Update: %s", err) - } - }() - - expected := []MetricResult{ - {labels: labelMap{}, value: 47, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{}, value: 788529152, metricType: dto.MetricType_GAUGE}, - } - - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/pg_xlog_location.go b/collector/pg_xlog_location.go deleted file mode 100644 index 5f091471f..000000000 --- a/collector/pg_xlog_location.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "log/slog" - - "github.com/blang/semver/v4" - "github.com/prometheus/client_golang/prometheus" -) - -const xlogLocationSubsystem = "xlog_location" - -func init() { - registerCollector(xlogLocationSubsystem, defaultDisabled, NewPGXlogLocationCollector) -} - -type PGXlogLocationCollector struct { - log *slog.Logger -} - -func NewPGXlogLocationCollector(config collectorConfig) (Collector, error) { - return &PGXlogLocationCollector{log: config.logger}, nil -} - -var ( - xlogLocationBytes = prometheus.NewDesc( - prometheus.BuildFQName(namespace, xlogLocationSubsystem, "bytes"), - "Postgres LSN (log sequence number) being generated on primary or replayed on replica (truncated to low 52 bits)", - []string{}, - prometheus.Labels{}, - ) - - xlogLocationQuery = ` - SELECT CASE - WHEN pg_is_in_recovery() THEN (pg_last_xlog_replay_location() - '0/0') % (2^52)::bigint - ELSE (pg_current_xlog_location() - '0/0') % (2^52)::bigint - END AS bytes - ` -) - -func (c PGXlogLocationCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { - db := instance.getDB() - - // xlog was renmaed to WAL in PostgreSQL 10 - // https://wiki.postgresql.org/wiki/New_in_postgres_10#Renaming_of_.22xlog.22_to_.22wal.22_Globally_.28and_location.2Flsn.29 - after10 := instance.version.Compare(semver.MustParse("10.0.0")) - if after10 >= 0 { - c.log.Warn("xlog_location collector is not available on PostgreSQL >= 10.0.0, skipping") - return nil - } - - rows, err := db.QueryContext(ctx, - xlogLocationQuery) - - if err != nil { - return err - } - defer rows.Close() - - for rows.Next() { - var bytes float64 - - if err := rows.Scan(&bytes); err != nil { - return err - } - - ch <- prometheus.MustNewConstMetric( - xlogLocationBytes, - prometheus.GaugeValue, - bytes, - ) - } - if err := rows.Err(); err != nil { - return err - } - return nil -} diff --git a/collector/pg_xlog_location_test.go b/collector/pg_xlog_location_test.go deleted file mode 100644 index 561a7df94..000000000 --- a/collector/pg_xlog_location_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package collector - -import ( - "context" - "testing" - - "github.com/DATA-DOG/go-sqlmock" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/smartystreets/goconvey/convey" -) - -func TestPGXlogLocationCollector(t *testing.T) { - db, mock, err := sqlmock.New() - if err != nil { - t.Fatalf("Error opening a stub db connection: %s", err) - } - defer db.Close() - inst := &instance{db: db} - columns := []string{ - "bytes", - } - rows := sqlmock.NewRows(columns). - AddRow(53401) - - mock.ExpectQuery(sanitizeQuery(xlogLocationQuery)).WillReturnRows(rows) - - ch := make(chan prometheus.Metric) - go func() { - defer close(ch) - c := PGXlogLocationCollector{} - - if err := c.Update(context.Background(), inst, ch); err != nil { - t.Errorf("Error calling PGXlogLocationCollector.Update: %s", err) - } - }() - expected := []MetricResult{ - {labels: labelMap{}, value: 53401, metricType: dto.MetricType_GAUGE}, - } - convey.Convey("Metrics comparison", t, func() { - for _, expect := range expected { - m := readMetric(<-ch) - convey.So(expect, convey.ShouldResemble, m) - } - }) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled exceptions: %s", err) - } -} diff --git a/collector/probe.go b/collector/probe.go deleted file mode 100644 index 54a06261f..000000000 --- a/collector/probe.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "log/slog" - "sync" - - "github.com/prometheus-community/postgres_exporter/config" - "github.com/prometheus/client_golang/prometheus" -) - -type ProbeCollector struct { - registry *prometheus.Registry - collectors map[string]Collector - logger *slog.Logger - instance *instance -} - -func NewProbeCollector(logger *slog.Logger, excludeDatabases []string, registry *prometheus.Registry, dsn config.DSN) (*ProbeCollector, error) { - collectors := make(map[string]Collector) - initiatedCollectorsMtx.Lock() - defer initiatedCollectorsMtx.Unlock() - for key, enabled := range collectorState { - // TODO: Handle filters - // if !*enabled || (len(f) > 0 && !f[key]) { - // continue - // } - if !*enabled { - continue - } - if collector, ok := initiatedCollectors[key]; ok { - collectors[key] = collector - } else { - collector, err := factories[key]( - collectorConfig{ - logger: logger.With("collector", key), - excludeDatabases: excludeDatabases, - }) - if err != nil { - return nil, err - } - collectors[key] = collector - initiatedCollectors[key] = collector - } - } - - instance, err := newInstance(dsn.GetConnectionString()) - if err != nil { - return nil, err - } - - return &ProbeCollector{ - registry: registry, - collectors: collectors, - logger: logger, - instance: instance, - }, nil -} - -func (pc *ProbeCollector) Describe(ch chan<- *prometheus.Desc) { -} - -func (pc *ProbeCollector) Collect(ch chan<- prometheus.Metric) { - // Set up the database connection for the collector. - err := pc.instance.setup() - if err != nil { - pc.logger.Error("Error opening connection to database", "err", err) - return - } - defer pc.instance.Close() - - wg := sync.WaitGroup{} - wg.Add(len(pc.collectors)) - for name, c := range pc.collectors { - go func(name string, c Collector) { - execute(context.TODO(), name, c, pc.instance, ch, pc.logger) - wg.Done() - }(name, c) - } - wg.Wait() -} - -func (pc *ProbeCollector) Close() error { - return pc.instance.Close() -} diff --git a/config/config.go b/config/config.go deleted file mode 100644 index 52c66513a..000000000 --- a/config/config.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "fmt" - "log/slog" - "os" - "sync" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "gopkg.in/yaml.v3" -) - -var ( - configReloadSuccess = promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: "postgres_exporter", - Name: "config_last_reload_successful", - Help: "Postgres exporter config loaded successfully.", - }) - - configReloadSeconds = promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: "postgres_exporter", - Name: "config_last_reload_success_timestamp_seconds", - Help: "Timestamp of the last successful configuration reload.", - }) -) - -type Config struct { - AuthModules map[string]AuthModule `yaml:"auth_modules"` -} - -type AuthModule struct { - Type string `yaml:"type"` - UserPass UserPass `yaml:"userpass,omitempty"` - // Add alternative auth modules here - Options map[string]string `yaml:"options"` -} - -type UserPass struct { - Username string `yaml:"username"` - Password string `yaml:"password"` -} - -type Handler struct { - sync.RWMutex - Config *Config -} - -func (ch *Handler) GetConfig() *Config { - ch.RLock() - defer ch.RUnlock() - return ch.Config -} - -func (ch *Handler) ReloadConfig(f string, logger *slog.Logger) error { - config := &Config{} - var err error - defer func() { - if err != nil { - configReloadSuccess.Set(0) - } else { - configReloadSuccess.Set(1) - configReloadSeconds.SetToCurrentTime() - } - }() - - yamlReader, err := os.Open(f) - if err != nil { - return fmt.Errorf("error opening config file %q: %s", f, err) - } - defer yamlReader.Close() - decoder := yaml.NewDecoder(yamlReader) - decoder.KnownFields(true) - - if err = decoder.Decode(config); err != nil { - return fmt.Errorf("error parsing config file %q: %s", f, err) - } - - ch.Lock() - ch.Config = config - ch.Unlock() - return nil -} - -func (m AuthModule) ConfigureTarget(target string) (DSN, error) { - dsn, err := dsnFromString(target) - if err != nil { - return DSN{}, err - } - - // Set the credentials from the authentication module - // TODO(@sysadmind): What should the order of precedence be? - if m.Type == "userpass" { - if m.UserPass.Username != "" { - dsn.username = m.UserPass.Username - } - if m.UserPass.Password != "" { - dsn.password = m.UserPass.Password - } - } - - for k, v := range m.Options { - dsn.query.Set(k, v) - } - - return dsn, nil -} diff --git a/config/config_test.go b/config/config_test.go deleted file mode 100644 index fa59c9b40..000000000 --- a/config/config_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "testing" -) - -func TestLoadConfig(t *testing.T) { - ch := &Handler{ - Config: &Config{}, - } - - err := ch.ReloadConfig("testdata/config-good.yaml", nil) - if err != nil { - t.Errorf("error loading config: %s", err) - } -} - -func TestLoadBadConfigs(t *testing.T) { - ch := &Handler{ - Config: &Config{}, - } - - tests := []struct { - input string - want string - }{ - { - input: "testdata/config-bad-auth-module.yaml", - want: "error parsing config file \"testdata/config-bad-auth-module.yaml\": yaml: unmarshal errors:\n line 3: field pretendauth not found in type config.AuthModule", - }, - { - input: "testdata/config-bad-extra-field.yaml", - want: "error parsing config file \"testdata/config-bad-extra-field.yaml\": yaml: unmarshal errors:\n line 8: field doesNotExist not found in type config.AuthModule", - }, - } - - for _, test := range tests { - t.Run(test.input, func(t *testing.T) { - got := ch.ReloadConfig(test.input, nil) - if got == nil || got.Error() != test.want { - t.Fatalf("ReloadConfig(%q) = %v, want %s", test.input, got, test.want) - } - }) - } -} diff --git a/config/dsn.go b/config/dsn.go deleted file mode 100644 index 168d00d62..000000000 --- a/config/dsn.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "fmt" - "net/url" - "regexp" - "strings" - "unicode" -) - -// DSN represents a parsed datasource. It contains fields for the individual connection components. -type DSN struct { - scheme string - username string - password string - host string - path string - query url.Values -} - -// String makes a dsn safe to print by excluding any passwords. This allows dsn to be used in -// strings and log messages without needing to call a redaction function first. -func (d DSN) String() string { - if d.password != "" { - return fmt.Sprintf("%s://%s:******@%s%s?%s", d.scheme, d.username, d.host, d.path, d.query.Encode()) - } - - if d.username != "" { - return fmt.Sprintf("%s://%s@%s%s?%s", d.scheme, d.username, d.host, d.path, d.query.Encode()) - } - - return fmt.Sprintf("%s://%s%s?%s", d.scheme, d.host, d.path, d.query.Encode()) -} - -// GetConnectionString returns the URL to pass to the driver for database connections. This value should not be logged. -func (d DSN) GetConnectionString() string { - u := url.URL{ - Scheme: d.scheme, - Host: d.host, - Path: d.path, - RawQuery: d.query.Encode(), - } - - // Username and Password - if d.username != "" { - u.User = url.UserPassword(d.username, d.password) - } - - return u.String() -} - -// dsnFromString parses a connection string into a dsn. It will attempt to parse the string as -// a URL and as a set of key=value pairs. If both attempts fail, dsnFromString will return an error. -func dsnFromString(in string) (DSN, error) { - if strings.HasPrefix(in, "postgresql://") || strings.HasPrefix(in, "postgres://") { - return dsnFromURL(in) - } - - // Try to parse as key=value pairs - d, err := dsnFromKeyValue(in) - if err == nil { - return d, nil - } - - // Parse the string as a URL, with the scheme prefixed - d, err = dsnFromURL(fmt.Sprintf("postgresql://%s", in)) - if err == nil { - return d, nil - } - - return DSN{}, fmt.Errorf("could not understand DSN") -} - -// dsnFromURL parses the input as a URL and returns the dsn representation. -func dsnFromURL(in string) (DSN, error) { - u, err := url.Parse(in) - if err != nil { - return DSN{}, err - } - pass, _ := u.User.Password() - user := u.User.Username() - - query := u.Query() - - if queryPass := query.Get("password"); queryPass != "" { - if pass == "" { - pass = queryPass - } - } - query.Del("password") - - if queryUser := query.Get("user"); queryUser != "" { - if user == "" { - user = queryUser - } - } - query.Del("user") - - d := DSN{ - scheme: u.Scheme, - username: user, - password: pass, - host: u.Host, - path: u.Path, - query: query, - } - - return d, nil -} - -// dsnFromKeyValue parses the input as a set of key=value pairs and returns the dsn representation. -func dsnFromKeyValue(in string) (DSN, error) { - // Attempt to confirm at least one key=value pair before starting the rune parser - connstringRe := regexp.MustCompile(`^ *[a-zA-Z0-9]+ *= *[^= ]+`) - if !connstringRe.MatchString(in) { - return DSN{}, fmt.Errorf("input is not a key-value DSN") - } - - // Anything other than known fields should be part of the querystring - query := url.Values{} - - pairs, err := parseKeyValue(in) - if err != nil { - return DSN{}, fmt.Errorf("failed to parse key-value DSN: %v", err) - } - - // Build the dsn from the key=value pairs - d := DSN{ - scheme: "postgresql", - } - - hostname := "" - port := "" - - for k, v := range pairs { - switch k { - case "host": - hostname = v - case "port": - port = v - case "user": - d.username = v - case "password": - d.password = v - default: - query.Set(k, v) - } - } - - if hostname == "" { - hostname = "localhost" - } - - if port == "" { - d.host = hostname - } else { - d.host = fmt.Sprintf("%s:%s", hostname, port) - } - - d.query = query - - return d, nil -} - -// parseKeyValue is a key=value parser. It loops over each rune to split out keys and values -// and attempting to honor quoted values. parseKeyValue will return an error if it is unable -// to properly parse the input. -func parseKeyValue(in string) (map[string]string, error) { - out := map[string]string{} - - inPart := false - inQuote := false - part := []rune{} - key := "" - for _, c := range in { - switch { - case unicode.In(c, unicode.Quotation_Mark): - if inQuote { - inQuote = false - } else { - inQuote = true - } - case unicode.In(c, unicode.White_Space): - if inPart { - if inQuote { - part = append(part, c) - } else { - // Are we finishing a key=value? - if key == "" { - return out, fmt.Errorf("invalid input") - } - out[key] = string(part) - inPart = false - part = []rune{} - } - } else { - // Are we finishing a key=value? - if key == "" { - return out, fmt.Errorf("invalid input") - } - out[key] = string(part) - inPart = false - part = []rune{} - // Do something with the value - } - case c == '=': - if inPart { - inPart = false - key = string(part) - part = []rune{} - } else { - return out, fmt.Errorf("invalid input") - } - default: - inPart = true - part = append(part, c) - } - } - - if key != "" && len(part) > 0 { - out[key] = string(part) - } - - return out, nil -} diff --git a/config/dsn_test.go b/config/dsn_test.go deleted file mode 100644 index 68340cd09..000000000 --- a/config/dsn_test.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "net/url" - "reflect" - "testing" -) - -// Test_dsn_String is designed to test different dsn combinations for their string representation. -// dsn.String() is designed to be safe to print, redacting any password information and these test -// cases are intended to cover known cases. -func Test_dsn_String(t *testing.T) { - type fields struct { - scheme string - username string - password string - host string - path string - query url.Values - } - tests := []struct { - name string - fields fields - want string - }{ - { - name: "Without Password", - fields: fields{ - scheme: "postgresql", - username: "test", - host: "localhost:5432", - query: url.Values{}, - }, - want: "postgresql://test@localhost:5432?", - }, - { - name: "With Password", - fields: fields{ - scheme: "postgresql", - username: "test", - password: "supersecret", - host: "localhost:5432", - query: url.Values{}, - }, - want: "postgresql://test:******@localhost:5432?", - }, - { - name: "With Password and Query String", - fields: fields{ - scheme: "postgresql", - username: "test", - password: "supersecret", - host: "localhost:5432", - query: url.Values{ - "ssldisable": []string{"true"}, - }, - }, - want: "postgresql://test:******@localhost:5432?ssldisable=true", - }, - { - name: "With Password, Path, and Query String", - fields: fields{ - scheme: "postgresql", - username: "test", - password: "supersecret", - host: "localhost:5432", - path: "/somevalue", - query: url.Values{ - "ssldisable": []string{"true"}, - }, - }, - want: "postgresql://test:******@localhost:5432/somevalue?ssldisable=true", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - d := DSN{ - scheme: tt.fields.scheme, - username: tt.fields.username, - password: tt.fields.password, - host: tt.fields.host, - path: tt.fields.path, - query: tt.fields.query, - } - if got := d.String(); got != tt.want { - t.Errorf("dsn.String() = %v, want %v", got, tt.want) - } - }) - } -} - -// Test_dsnFromString tests the dsnFromString function with known variations -// of connection string inputs to ensure that it properly parses the input into -// a dsn. -func Test_dsnFromString(t *testing.T) { - - tests := []struct { - name string - input string - want DSN - wantErr bool - }{ - { - name: "Key value with password", - input: "host=host.example.com user=postgres port=5432 password=s3cr3t", - want: DSN{ - scheme: "postgresql", - host: "host.example.com:5432", - username: "postgres", - password: "s3cr3t", - query: url.Values{}, - }, - wantErr: false, - }, - { - name: "Key value with quoted password and space", - input: "host=host.example.com user=postgres port=5432 password=\"s3cr 3t\"", - want: DSN{ - scheme: "postgresql", - host: "host.example.com:5432", - username: "postgres", - password: "s3cr 3t", - query: url.Values{}, - }, - wantErr: false, - }, - { - name: "Key value with different order", - input: "password=abcde host=host.example.com user=postgres port=5432", - want: DSN{ - scheme: "postgresql", - host: "host.example.com:5432", - username: "postgres", - password: "abcde", - query: url.Values{}, - }, - wantErr: false, - }, - { - name: "Key value with different order, quoted password, duplicate password", - input: "password=abcde host=host.example.com user=postgres port=5432 password=\"s3cr 3t\"", - want: DSN{ - scheme: "postgresql", - host: "host.example.com:5432", - username: "postgres", - password: "s3cr 3t", - query: url.Values{}, - }, - wantErr: false, - }, - { - name: "URL with user in query string", - input: "postgresql://host.example.com:5432/tsdb?user=postgres", - want: DSN{ - scheme: "postgresql", - host: "host.example.com:5432", - path: "/tsdb", - query: url.Values{}, - username: "postgres", - }, - wantErr: false, - }, - { - name: "URL with user and password", - input: "postgresql://user:s3cret@host.example.com:5432/tsdb?user=postgres", - want: DSN{ - scheme: "postgresql", - host: "host.example.com:5432", - path: "/tsdb", - query: url.Values{}, - username: "user", - password: "s3cret", - }, - wantErr: false, - }, - { - name: "Alternative URL prefix", - input: "postgres://user:s3cret@host.example.com:5432/tsdb?user=postgres", - want: DSN{ - scheme: "postgres", - host: "host.example.com:5432", - path: "/tsdb", - query: url.Values{}, - username: "user", - password: "s3cret", - }, - wantErr: false, - }, - { - name: "URL with user and password in query string", - input: "postgresql://host.example.com:5432/tsdb?user=postgres&password=s3cr3t", - want: DSN{ - scheme: "postgresql", - host: "host.example.com:5432", - path: "/tsdb", - query: url.Values{}, - username: "postgres", - password: "s3cr3t", - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := dsnFromString(tt.input) - if (err != nil) != tt.wantErr { - t.Errorf("dsnFromString() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("dsnFromString() = %+v, want %+v", got, tt.want) - } - }) - } -} diff --git a/config/testdata/config-bad-auth-module.yaml b/config/testdata/config-bad-auth-module.yaml deleted file mode 100644 index 8f718dd5a..000000000 --- a/config/testdata/config-bad-auth-module.yaml +++ /dev/null @@ -1,7 +0,0 @@ -auth_modules: - foo: - pretendauth: - username: test - password: pass - options: - extra: "1" diff --git a/config/testdata/config-bad-extra-field.yaml b/config/testdata/config-bad-extra-field.yaml deleted file mode 100644 index f6ff6d6cf..000000000 --- a/config/testdata/config-bad-extra-field.yaml +++ /dev/null @@ -1,8 +0,0 @@ -auth_modules: - foo: - userpass: - username: test - password: pass - options: - extra: "1" - doesNotExist: test diff --git a/config/testdata/config-good.yaml b/config/testdata/config-good.yaml deleted file mode 100644 index 13453e26f..000000000 --- a/config/testdata/config-good.yaml +++ /dev/null @@ -1,8 +0,0 @@ -auth_modules: - first: - type: userpass - userpass: - username: first - password: firstpass - options: - sslmode: disable diff --git a/gh-assets-clone.sh b/gh-assets-clone.sh deleted file mode 100755 index 506485e06..000000000 --- a/gh-assets-clone.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# Script to setup the assets clone of the repository using GIT_ASSETS_BRANCH and -# GIT_API_KEY. - -[ ! -z "$GIT_ASSETS_BRANCH" ] || exit 1 - -setup_git() { - git config --global user.email "travis@travis-ci.org" || exit 1 - git config --global user.name "Travis CI" || exit 1 -} - -# Constants -ASSETS_DIR=".assets-branch" - -# Clone the assets branch with the correct credentials -git clone --single-branch -b "$GIT_ASSETS_BRANCH" \ - "https://$%7BGIT_API_KEY%7D@github.com/$%7BTRAVIS_REPO_SLUG%7D.git" "$ASSETS_DIR" || exit 1 - diff --git a/gh-metrics-push.sh b/gh-metrics-push.sh deleted file mode 100755 index 37f335de8..000000000 --- a/gh-metrics-push.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Script to copy and push new metric versions to the assets branch. - -[ ! -z "$GIT_ASSETS_BRANCH" ] || exit 1 -[ ! -z "$GIT_API_KEY" ] || exit 1 - -version=$(git describe HEAD) || exit 1 - -# Constants -ASSETS_DIR=".assets-branch" -METRICS_DIR="$ASSETS_DIR/metriclists" - -# Ensure metrics dir exists -mkdir -p "$METRICS_DIR/" - -# Remove old files so we spot deletions -rm -f "$METRICS_DIR/.*.unique" - -# Copy new files -cp -f -t "$METRICS_DIR/" ./.metrics.*.prom.unique || exit 1 - -# Enter the assets dir and push. -cd "$ASSETS_DIR" || exit 1 - -git add "metriclists" || exit 1 -git commit -m "Added unique metrics for build from $version" || exit 1 -git push origin "$GIT_ASSETS_BRANCH" || exit 1 - -exit 0 \ No newline at end of file diff --git a/go.mod b/go.mod deleted file mode 100644 index 9acdafc81..000000000 --- a/go.mod +++ /dev/null @@ -1,47 +0,0 @@ -module github.com/prometheus-community/postgres_exporter - -go 1.23.0 - -toolchain go1.24.1 - -require ( - github.com/DATA-DOG/go-sqlmock v1.5.2 - github.com/alecthomas/kingpin/v2 v2.4.0 - github.com/blang/semver/v4 v4.0.0 - github.com/lib/pq v1.10.9 - github.com/prometheus/client_golang v1.22.0 - github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.63.0 - github.com/prometheus/exporter-toolkit v0.14.0 - github.com/smartystreets/goconvey v1.8.1 - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c - gopkg.in/yaml.v2 v2.4.0 - gopkg.in/yaml.v3 v3.0.1 -) - -require ( - github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/gopherjs/gopherjs v1.17.2 // indirect - github.com/jpillora/backoff v1.0.0 // indirect - github.com/jtolds/gls v4.20.0+incompatible // indirect - github.com/kr/pretty v0.3.1 // indirect - github.com/kr/text v0.2.0 // indirect - github.com/mdlayher/socket v0.4.1 // indirect - github.com/mdlayher/vsock v1.2.1 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect - github.com/smarty/assertions v1.15.0 // indirect - github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - golang.org/x/crypto v0.36.0 // indirect - golang.org/x/net v0.38.0 // indirect - golang.org/x/oauth2 v0.25.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/text v0.23.0 // indirect - google.golang.org/protobuf v1.36.5 // indirect -) diff --git a/go.sum b/go.sum deleted file mode 100644 index cd073f325..000000000 --- a/go.sum +++ /dev/null @@ -1,97 +0,0 @@ -github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= -github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= -github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= -github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= -github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= -github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= -github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= -github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= -github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= -github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= -github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= -github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= -github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg= -github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= -github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= -github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= -github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= -github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/metriclists/.metrics.replicated.10.prom.unique b/metriclists/.metrics.replicated.10.prom.unique new file mode 100644 index 000000000..bfa7be0bc --- /dev/null +++ b/metriclists/.metrics.replicated.10.prom.unique @@ -0,0 +1,281 @@ +go_gc_duration_seconds +go_gc_duration_seconds_count +go_gc_duration_seconds_sum +go_goroutines +go_info +go_memstats_alloc_bytes +go_memstats_alloc_bytes_total +go_memstats_buck_hash_sys_bytes +go_memstats_frees_total +go_memstats_gc_cpu_fraction +go_memstats_gc_sys_bytes +go_memstats_heap_alloc_bytes +go_memstats_heap_idle_bytes +go_memstats_heap_inuse_bytes +go_memstats_heap_objects +go_memstats_heap_released_bytes +go_memstats_heap_sys_bytes +go_memstats_last_gc_time_seconds +go_memstats_lookups_total +go_memstats_mallocs_total +go_memstats_mcache_inuse_bytes +go_memstats_mcache_sys_bytes +go_memstats_mspan_inuse_bytes +go_memstats_mspan_sys_bytes +go_memstats_next_gc_bytes +go_memstats_other_sys_bytes +go_memstats_stack_inuse_bytes +go_memstats_stack_sys_bytes +go_memstats_sys_bytes +go_threads +pg_exporter_last_scrape_duration_seconds +pg_exporter_last_scrape_error +pg_exporter_scrapes_total +pg_locks_count +pg_settings_allow_system_table_mods +pg_settings_archive_timeout_seconds +pg_settings_array_nulls +pg_settings_authentication_timeout_seconds +pg_settings_autovacuum +pg_settings_autovacuum_analyze_scale_factor +pg_settings_autovacuum_analyze_threshold +pg_settings_autovacuum_freeze_max_age +pg_settings_autovacuum_max_workers +pg_settings_autovacuum_multixact_freeze_max_age +pg_settings_autovacuum_naptime_seconds +pg_settings_autovacuum_vacuum_cost_delay_seconds +pg_settings_autovacuum_vacuum_cost_limit +pg_settings_autovacuum_vacuum_scale_factor +pg_settings_autovacuum_vacuum_threshold +pg_settings_autovacuum_work_mem_bytes +pg_settings_backend_flush_after_bytes +pg_settings_bgwriter_delay_seconds +pg_settings_bgwriter_flush_after_bytes +pg_settings_bgwriter_lru_maxpages +pg_settings_bgwriter_lru_multiplier +pg_settings_block_size +pg_settings_bonjour +pg_settings_check_function_bodies +pg_settings_checkpoint_completion_target +pg_settings_checkpoint_flush_after_bytes +pg_settings_checkpoint_timeout_seconds +pg_settings_checkpoint_warning_seconds +pg_settings_commit_delay +pg_settings_commit_siblings +pg_settings_cpu_index_tuple_cost +pg_settings_cpu_operator_cost +pg_settings_cpu_tuple_cost +pg_settings_cursor_tuple_fraction +pg_settings_data_checksums +pg_settings_data_sync_retry +pg_settings_db_user_namespace +pg_settings_deadlock_timeout_seconds +pg_settings_debug_assertions +pg_settings_debug_pretty_print +pg_settings_debug_print_parse +pg_settings_debug_print_plan +pg_settings_debug_print_rewritten +pg_settings_default_statistics_target +pg_settings_default_transaction_deferrable +pg_settings_default_transaction_read_only +pg_settings_default_with_oids +pg_settings_effective_cache_size_bytes +pg_settings_effective_io_concurrency +pg_settings_enable_bitmapscan +pg_settings_enable_gathermerge +pg_settings_enable_hashagg +pg_settings_enable_hashjoin +pg_settings_enable_indexonlyscan +pg_settings_enable_indexscan +pg_settings_enable_material +pg_settings_enable_mergejoin +pg_settings_enable_nestloop +pg_settings_enable_seqscan +pg_settings_enable_sort +pg_settings_enable_tidscan +pg_settings_escape_string_warning +pg_settings_exit_on_error +pg_settings_extra_float_digits +pg_settings_from_collapse_limit +pg_settings_fsync +pg_settings_full_page_writes +pg_settings_geqo +pg_settings_geqo_effort +pg_settings_geqo_generations +pg_settings_geqo_pool_size +pg_settings_geqo_seed +pg_settings_geqo_selection_bias +pg_settings_geqo_threshold +pg_settings_gin_fuzzy_search_limit +pg_settings_gin_pending_list_limit_bytes +pg_settings_hot_standby +pg_settings_hot_standby_feedback +pg_settings_idle_in_transaction_session_timeout_seconds +pg_settings_ignore_checksum_failure +pg_settings_ignore_system_indexes +pg_settings_integer_datetimes +pg_settings_join_collapse_limit +pg_settings_krb_caseins_users +pg_settings_lock_timeout_seconds +pg_settings_lo_compat_privileges +pg_settings_log_autovacuum_min_duration_seconds +pg_settings_log_checkpoints +pg_settings_log_connections +pg_settings_log_disconnections +pg_settings_log_duration +pg_settings_log_executor_stats +pg_settings_log_file_mode +pg_settings_logging_collector +pg_settings_log_hostname +pg_settings_log_lock_waits +pg_settings_log_min_duration_statement_seconds +pg_settings_log_parser_stats +pg_settings_log_planner_stats +pg_settings_log_replication_commands +pg_settings_log_rotation_age_seconds +pg_settings_log_rotation_size_bytes +pg_settings_log_statement_stats +pg_settings_log_temp_files_bytes +pg_settings_log_truncate_on_rotation +pg_settings_maintenance_work_mem_bytes +pg_settings_max_connections +pg_settings_max_files_per_process +pg_settings_max_function_args +pg_settings_max_identifier_length +pg_settings_max_index_keys +pg_settings_max_locks_per_transaction +pg_settings_max_logical_replication_workers +pg_settings_max_parallel_workers +pg_settings_max_parallel_workers_per_gather +pg_settings_max_pred_locks_per_page +pg_settings_max_pred_locks_per_relation +pg_settings_max_pred_locks_per_transaction +pg_settings_max_prepared_transactions +pg_settings_max_replication_slots +pg_settings_max_stack_depth_bytes +pg_settings_max_standby_archive_delay_seconds +pg_settings_max_standby_streaming_delay_seconds +pg_settings_max_sync_workers_per_subscription +pg_settings_max_wal_senders +pg_settings_max_wal_size_bytes +pg_settings_max_worker_processes +pg_settings_min_parallel_index_scan_size_bytes +pg_settings_min_parallel_table_scan_size_bytes +pg_settings_min_wal_size_bytes +pg_settings_old_snapshot_threshold_seconds +pg_settings_operator_precedence_warning +pg_settings_parallel_setup_cost +pg_settings_parallel_tuple_cost +pg_settings_port +pg_settings_post_auth_delay_seconds +pg_settings_pre_auth_delay_seconds +pg_settings_quote_all_identifiers +pg_settings_random_page_cost +pg_settings_replacement_sort_tuples +pg_settings_restart_after_crash +pg_settings_row_security +pg_settings_segment_size_bytes +pg_settings_seq_page_cost +pg_settings_server_version_num +pg_settings_shared_buffers_bytes +pg_settings_ssl +pg_settings_ssl_prefer_server_ciphers +pg_settings_standard_conforming_strings +pg_settings_statement_timeout_seconds +pg_settings_superuser_reserved_connections +pg_settings_synchronize_seqscans +pg_settings_syslog_sequence_numbers +pg_settings_syslog_split_messages +pg_settings_tcp_keepalives_count +pg_settings_tcp_keepalives_idle_seconds +pg_settings_tcp_keepalives_interval_seconds +pg_settings_temp_buffers_bytes +pg_settings_temp_file_limit_bytes +pg_settings_trace_notify +pg_settings_trace_sort +pg_settings_track_activities +pg_settings_track_activity_query_size +pg_settings_track_commit_timestamp +pg_settings_track_counts +pg_settings_track_io_timing +pg_settings_transaction_deferrable +pg_settings_transaction_read_only +pg_settings_transform_null_equals +pg_settings_unix_socket_permissions +pg_settings_update_process_title +pg_settings_vacuum_cost_delay_seconds +pg_settings_vacuum_cost_limit +pg_settings_vacuum_cost_page_dirty +pg_settings_vacuum_cost_page_hit +pg_settings_vacuum_cost_page_miss +pg_settings_vacuum_defer_cleanup_age +pg_settings_vacuum_freeze_min_age +pg_settings_vacuum_freeze_table_age +pg_settings_vacuum_multixact_freeze_min_age +pg_settings_vacuum_multixact_freeze_table_age +pg_settings_wal_block_size +pg_settings_wal_buffers_bytes +pg_settings_wal_compression +pg_settings_wal_keep_segments +pg_settings_wal_log_hints +pg_settings_wal_receiver_status_interval_seconds +pg_settings_wal_receiver_timeout_seconds +pg_settings_wal_retrieve_retry_interval_seconds +pg_settings_wal_segment_size_bytes +pg_settings_wal_sender_timeout_seconds +pg_settings_wal_writer_delay_seconds +pg_settings_wal_writer_flush_after_bytes +pg_settings_work_mem_bytes +pg_settings_zero_damaged_pages +pg_stat_activity_count +pg_stat_activity_max_tx_duration +pg_stat_archiver_archived_count +pg_stat_archiver_failed_count +pg_stat_archiver_last_archive_age +pg_stat_bgwriter_buffers_alloc +pg_stat_bgwriter_buffers_backend +pg_stat_bgwriter_buffers_backend_fsync +pg_stat_bgwriter_buffers_checkpoint +pg_stat_bgwriter_buffers_clean +pg_stat_bgwriter_checkpoints_req +pg_stat_bgwriter_checkpoints_timed +pg_stat_bgwriter_checkpoint_sync_time +pg_stat_bgwriter_checkpoint_write_time +pg_stat_bgwriter_maxwritten_clean +pg_stat_bgwriter_stats_reset +pg_stat_database_blk_read_time +pg_stat_database_blks_hit +pg_stat_database_blks_read +pg_stat_database_blk_write_time +pg_stat_database_conflicts +pg_stat_database_conflicts_confl_bufferpin +pg_stat_database_conflicts_confl_deadlock +pg_stat_database_conflicts_confl_lock +pg_stat_database_conflicts_confl_snapshot +pg_stat_database_conflicts_confl_tablespace +pg_stat_database_deadlocks +pg_stat_database_numbackends +pg_stat_database_stats_reset +pg_stat_database_temp_bytes +pg_stat_database_temp_files +pg_stat_database_tup_deleted +pg_stat_database_tup_fetched +pg_stat_database_tup_inserted +pg_stat_database_tup_returned +pg_stat_database_tup_updated +pg_stat_database_xact_commit +pg_stat_database_xact_rollback +pg_static +pg_stat_replication_pg_current_wal_lsn_bytes +pg_stat_replication_pg_wal_lsn_diff +pg_up +postgres_exporter_build_info +process_cpu_seconds_total +process_max_fds +process_open_fds +process_resident_memory_bytes +process_start_time_seconds +process_virtual_memory_bytes +process_virtual_memory_max_bytes +promhttp_metric_handler_requests_in_flight +promhttp_metric_handler_requests_total diff --git a/metriclists/.metrics.replicated.11.prom.unique b/metriclists/.metrics.replicated.11.prom.unique new file mode 100644 index 000000000..87d9a439d --- /dev/null +++ b/metriclists/.metrics.replicated.11.prom.unique @@ -0,0 +1,299 @@ +go_gc_duration_seconds +go_gc_duration_seconds_count +go_gc_duration_seconds_sum +go_goroutines +go_info +go_memstats_alloc_bytes +go_memstats_alloc_bytes_total +go_memstats_buck_hash_sys_bytes +go_memstats_frees_total +go_memstats_gc_cpu_fraction +go_memstats_gc_sys_bytes +go_memstats_heap_alloc_bytes +go_memstats_heap_idle_bytes +go_memstats_heap_inuse_bytes +go_memstats_heap_objects +go_memstats_heap_released_bytes +go_memstats_heap_sys_bytes +go_memstats_last_gc_time_seconds +go_memstats_lookups_total +go_memstats_mallocs_total +go_memstats_mcache_inuse_bytes +go_memstats_mcache_sys_bytes +go_memstats_mspan_inuse_bytes +go_memstats_mspan_sys_bytes +go_memstats_next_gc_bytes +go_memstats_other_sys_bytes +go_memstats_stack_inuse_bytes +go_memstats_stack_sys_bytes +go_memstats_sys_bytes +go_threads +pg_exporter_last_scrape_duration_seconds +pg_exporter_last_scrape_error +pg_exporter_scrapes_total +pg_locks_count +pg_settings_allow_system_table_mods +pg_settings_archive_timeout_seconds +pg_settings_array_nulls +pg_settings_authentication_timeout_seconds +pg_settings_autovacuum +pg_settings_autovacuum_analyze_scale_factor +pg_settings_autovacuum_analyze_threshold +pg_settings_autovacuum_freeze_max_age +pg_settings_autovacuum_max_workers +pg_settings_autovacuum_multixact_freeze_max_age +pg_settings_autovacuum_naptime_seconds +pg_settings_autovacuum_vacuum_cost_delay_seconds +pg_settings_autovacuum_vacuum_cost_limit +pg_settings_autovacuum_vacuum_scale_factor +pg_settings_autovacuum_vacuum_threshold +pg_settings_autovacuum_work_mem_bytes +pg_settings_backend_flush_after_bytes +pg_settings_bgwriter_delay_seconds +pg_settings_bgwriter_flush_after_bytes +pg_settings_bgwriter_lru_maxpages +pg_settings_bgwriter_lru_multiplier +pg_settings_block_size +pg_settings_bonjour +pg_settings_check_function_bodies +pg_settings_checkpoint_completion_target +pg_settings_checkpoint_flush_after_bytes +pg_settings_checkpoint_timeout_seconds +pg_settings_checkpoint_warning_seconds +pg_settings_commit_delay +pg_settings_commit_siblings +pg_settings_cpu_index_tuple_cost +pg_settings_cpu_operator_cost +pg_settings_cpu_tuple_cost +pg_settings_cursor_tuple_fraction +pg_settings_data_checksums +pg_settings_data_directory_mode +pg_settings_data_sync_retry +pg_settings_db_user_namespace +pg_settings_deadlock_timeout_seconds +pg_settings_debug_assertions +pg_settings_debug_pretty_print +pg_settings_debug_print_parse +pg_settings_debug_print_plan +pg_settings_debug_print_rewritten +pg_settings_default_statistics_target +pg_settings_default_transaction_deferrable +pg_settings_default_transaction_read_only +pg_settings_default_with_oids +pg_settings_effective_cache_size_bytes +pg_settings_effective_io_concurrency +pg_settings_enable_bitmapscan +pg_settings_enable_gathermerge +pg_settings_enable_hashagg +pg_settings_enable_hashjoin +pg_settings_enable_indexonlyscan +pg_settings_enable_indexscan +pg_settings_enable_material +pg_settings_enable_mergejoin +pg_settings_enable_nestloop +pg_settings_enable_parallel_append +pg_settings_enable_parallel_hash +pg_settings_enable_partition_pruning +pg_settings_enable_partitionwise_aggregate +pg_settings_enable_partitionwise_join +pg_settings_enable_seqscan +pg_settings_enable_sort +pg_settings_enable_tidscan +pg_settings_escape_string_warning +pg_settings_exit_on_error +pg_settings_extra_float_digits +pg_settings_from_collapse_limit +pg_settings_fsync +pg_settings_full_page_writes +pg_settings_geqo +pg_settings_geqo_effort +pg_settings_geqo_generations +pg_settings_geqo_pool_size +pg_settings_geqo_seed +pg_settings_geqo_selection_bias +pg_settings_geqo_threshold +pg_settings_gin_fuzzy_search_limit +pg_settings_gin_pending_list_limit_bytes +pg_settings_hot_standby +pg_settings_hot_standby_feedback +pg_settings_idle_in_transaction_session_timeout_seconds +pg_settings_ignore_checksum_failure +pg_settings_ignore_system_indexes +pg_settings_integer_datetimes +pg_settings_jit +pg_settings_jit_above_cost +pg_settings_jit_debugging_support +pg_settings_jit_dump_bitcode +pg_settings_jit_expressions +pg_settings_jit_inline_above_cost +pg_settings_jit_optimize_above_cost +pg_settings_jit_profiling_support +pg_settings_jit_tuple_deforming +pg_settings_join_collapse_limit +pg_settings_krb_caseins_users +pg_settings_lock_timeout_seconds +pg_settings_lo_compat_privileges +pg_settings_log_autovacuum_min_duration_seconds +pg_settings_log_checkpoints +pg_settings_log_connections +pg_settings_log_disconnections +pg_settings_log_duration +pg_settings_log_executor_stats +pg_settings_log_file_mode +pg_settings_logging_collector +pg_settings_log_hostname +pg_settings_log_lock_waits +pg_settings_log_min_duration_statement_seconds +pg_settings_log_parser_stats +pg_settings_log_planner_stats +pg_settings_log_replication_commands +pg_settings_log_rotation_age_seconds +pg_settings_log_rotation_size_bytes +pg_settings_log_statement_stats +pg_settings_log_temp_files_bytes +pg_settings_log_truncate_on_rotation +pg_settings_maintenance_work_mem_bytes +pg_settings_max_connections +pg_settings_max_files_per_process +pg_settings_max_function_args +pg_settings_max_identifier_length +pg_settings_max_index_keys +pg_settings_max_locks_per_transaction +pg_settings_max_logical_replication_workers +pg_settings_max_parallel_maintenance_workers +pg_settings_max_parallel_workers +pg_settings_max_parallel_workers_per_gather +pg_settings_max_pred_locks_per_page +pg_settings_max_pred_locks_per_relation +pg_settings_max_pred_locks_per_transaction +pg_settings_max_prepared_transactions +pg_settings_max_replication_slots +pg_settings_max_stack_depth_bytes +pg_settings_max_standby_archive_delay_seconds +pg_settings_max_standby_streaming_delay_seconds +pg_settings_max_sync_workers_per_subscription +pg_settings_max_wal_senders +pg_settings_max_wal_size_bytes +pg_settings_max_worker_processes +pg_settings_min_parallel_index_scan_size_bytes +pg_settings_min_parallel_table_scan_size_bytes +pg_settings_min_wal_size_bytes +pg_settings_old_snapshot_threshold_seconds +pg_settings_operator_precedence_warning +pg_settings_parallel_leader_participation +pg_settings_parallel_setup_cost +pg_settings_parallel_tuple_cost +pg_settings_port +pg_settings_post_auth_delay_seconds +pg_settings_pre_auth_delay_seconds +pg_settings_quote_all_identifiers +pg_settings_random_page_cost +pg_settings_restart_after_crash +pg_settings_row_security +pg_settings_segment_size_bytes +pg_settings_seq_page_cost +pg_settings_server_version_num +pg_settings_shared_buffers_bytes +pg_settings_ssl +pg_settings_ssl_passphrase_command_supports_reload +pg_settings_ssl_prefer_server_ciphers +pg_settings_standard_conforming_strings +pg_settings_statement_timeout_seconds +pg_settings_superuser_reserved_connections +pg_settings_synchronize_seqscans +pg_settings_syslog_sequence_numbers +pg_settings_syslog_split_messages +pg_settings_tcp_keepalives_count +pg_settings_tcp_keepalives_idle_seconds +pg_settings_tcp_keepalives_interval_seconds +pg_settings_temp_buffers_bytes +pg_settings_temp_file_limit_bytes +pg_settings_trace_notify +pg_settings_trace_sort +pg_settings_track_activities +pg_settings_track_activity_query_size_bytes +pg_settings_track_commit_timestamp +pg_settings_track_counts +pg_settings_track_io_timing +pg_settings_transaction_deferrable +pg_settings_transaction_read_only +pg_settings_transform_null_equals +pg_settings_unix_socket_permissions +pg_settings_update_process_title +pg_settings_vacuum_cleanup_index_scale_factor +pg_settings_vacuum_cost_delay_seconds +pg_settings_vacuum_cost_limit +pg_settings_vacuum_cost_page_dirty +pg_settings_vacuum_cost_page_hit +pg_settings_vacuum_cost_page_miss +pg_settings_vacuum_defer_cleanup_age +pg_settings_vacuum_freeze_min_age +pg_settings_vacuum_freeze_table_age +pg_settings_vacuum_multixact_freeze_min_age +pg_settings_vacuum_multixact_freeze_table_age +pg_settings_wal_block_size +pg_settings_wal_buffers_bytes +pg_settings_wal_compression +pg_settings_wal_keep_segments +pg_settings_wal_log_hints +pg_settings_wal_receiver_status_interval_seconds +pg_settings_wal_receiver_timeout_seconds +pg_settings_wal_retrieve_retry_interval_seconds +pg_settings_wal_segment_size_bytes +pg_settings_wal_sender_timeout_seconds +pg_settings_wal_writer_delay_seconds +pg_settings_wal_writer_flush_after_bytes +pg_settings_work_mem_bytes +pg_settings_zero_damaged_pages +pg_stat_activity_count +pg_stat_activity_max_tx_duration +pg_stat_archiver_archived_count +pg_stat_archiver_failed_count +pg_stat_archiver_last_archive_age +pg_stat_bgwriter_buffers_alloc +pg_stat_bgwriter_buffers_backend +pg_stat_bgwriter_buffers_backend_fsync +pg_stat_bgwriter_buffers_checkpoint +pg_stat_bgwriter_buffers_clean +pg_stat_bgwriter_checkpoints_req +pg_stat_bgwriter_checkpoints_timed +pg_stat_bgwriter_checkpoint_sync_time +pg_stat_bgwriter_checkpoint_write_time +pg_stat_bgwriter_maxwritten_clean +pg_stat_bgwriter_stats_reset +pg_stat_database_blk_read_time +pg_stat_database_blks_hit +pg_stat_database_blks_read +pg_stat_database_blk_write_time +pg_stat_database_conflicts +pg_stat_database_conflicts_confl_bufferpin +pg_stat_database_conflicts_confl_deadlock +pg_stat_database_conflicts_confl_lock +pg_stat_database_conflicts_confl_snapshot +pg_stat_database_conflicts_confl_tablespace +pg_stat_database_deadlocks +pg_stat_database_numbackends +pg_stat_database_stats_reset +pg_stat_database_temp_bytes +pg_stat_database_temp_files +pg_stat_database_tup_deleted +pg_stat_database_tup_fetched +pg_stat_database_tup_inserted +pg_stat_database_tup_returned +pg_stat_database_tup_updated +pg_stat_database_xact_commit +pg_stat_database_xact_rollback +pg_static +pg_stat_replication_pg_current_wal_lsn_bytes +pg_stat_replication_pg_wal_lsn_diff +pg_up +postgres_exporter_build_info +process_cpu_seconds_total +process_max_fds +process_open_fds +process_resident_memory_bytes +process_start_time_seconds +process_virtual_memory_bytes +process_virtual_memory_max_bytes +promhttp_metric_handler_requests_in_flight +promhttp_metric_handler_requests_total diff --git a/metriclists/.metrics.replicated.9.1.prom.unique b/metriclists/.metrics.replicated.9.1.prom.unique new file mode 100644 index 000000000..bf250e2ff --- /dev/null +++ b/metriclists/.metrics.replicated.9.1.prom.unique @@ -0,0 +1,227 @@ +go_gc_duration_seconds +go_gc_duration_seconds_count +go_gc_duration_seconds_sum +go_goroutines +go_info +go_memstats_alloc_bytes +go_memstats_alloc_bytes_total +go_memstats_buck_hash_sys_bytes +go_memstats_frees_total +go_memstats_gc_cpu_fraction +go_memstats_gc_sys_bytes +go_memstats_heap_alloc_bytes +go_memstats_heap_idle_bytes +go_memstats_heap_inuse_bytes +go_memstats_heap_objects +go_memstats_heap_released_bytes +go_memstats_heap_sys_bytes +go_memstats_last_gc_time_seconds +go_memstats_lookups_total +go_memstats_mallocs_total +go_memstats_mcache_inuse_bytes +go_memstats_mcache_sys_bytes +go_memstats_mspan_inuse_bytes +go_memstats_mspan_sys_bytes +go_memstats_next_gc_bytes +go_memstats_other_sys_bytes +go_memstats_stack_inuse_bytes +go_memstats_stack_sys_bytes +go_memstats_sys_bytes +go_threads +pg_exporter_last_scrape_duration_seconds +pg_exporter_last_scrape_error +pg_exporter_scrapes_total +pg_locks_count +pg_settings_allow_system_table_mods +pg_settings_archive_mode +pg_settings_archive_timeout_seconds +pg_settings_array_nulls +pg_settings_authentication_timeout_seconds +pg_settings_autovacuum +pg_settings_autovacuum_analyze_scale_factor +pg_settings_autovacuum_analyze_threshold +pg_settings_autovacuum_freeze_max_age +pg_settings_autovacuum_max_workers +pg_settings_autovacuum_naptime_seconds +pg_settings_autovacuum_vacuum_cost_delay_seconds +pg_settings_autovacuum_vacuum_cost_limit +pg_settings_autovacuum_vacuum_scale_factor +pg_settings_autovacuum_vacuum_threshold +pg_settings_bgwriter_delay_seconds +pg_settings_bgwriter_lru_maxpages +pg_settings_bgwriter_lru_multiplier +pg_settings_block_size +pg_settings_bonjour +pg_settings_check_function_bodies +pg_settings_checkpoint_completion_target +pg_settings_checkpoint_segments +pg_settings_checkpoint_timeout_seconds +pg_settings_checkpoint_warning_seconds +pg_settings_commit_delay +pg_settings_commit_siblings +pg_settings_cpu_index_tuple_cost +pg_settings_cpu_operator_cost +pg_settings_cpu_tuple_cost +pg_settings_cursor_tuple_fraction +pg_settings_db_user_namespace +pg_settings_deadlock_timeout_seconds +pg_settings_debug_assertions +pg_settings_debug_pretty_print +pg_settings_debug_print_parse +pg_settings_debug_print_plan +pg_settings_debug_print_rewritten +pg_settings_default_statistics_target +pg_settings_default_transaction_deferrable +pg_settings_default_transaction_read_only +pg_settings_default_with_oids +pg_settings_effective_cache_size_bytes +pg_settings_effective_io_concurrency +pg_settings_enable_bitmapscan +pg_settings_enable_hashagg +pg_settings_enable_hashjoin +pg_settings_enable_indexscan +pg_settings_enable_material +pg_settings_enable_mergejoin +pg_settings_enable_nestloop +pg_settings_enable_seqscan +pg_settings_enable_sort +pg_settings_enable_tidscan +pg_settings_escape_string_warning +pg_settings_exit_on_error +pg_settings_extra_float_digits +pg_settings_from_collapse_limit +pg_settings_fsync +pg_settings_full_page_writes +pg_settings_geqo +pg_settings_geqo_effort +pg_settings_geqo_generations +pg_settings_geqo_pool_size +pg_settings_geqo_seed +pg_settings_geqo_selection_bias +pg_settings_geqo_threshold +pg_settings_gin_fuzzy_search_limit +pg_settings_hot_standby +pg_settings_hot_standby_feedback +pg_settings_ignore_system_indexes +pg_settings_integer_datetimes +pg_settings_join_collapse_limit +pg_settings_krb_caseins_users +pg_settings_lo_compat_privileges +pg_settings_log_autovacuum_min_duration_seconds +pg_settings_log_checkpoints +pg_settings_log_connections +pg_settings_log_disconnections +pg_settings_log_duration +pg_settings_log_executor_stats +pg_settings_log_file_mode +pg_settings_logging_collector +pg_settings_log_hostname +pg_settings_log_lock_waits +pg_settings_log_min_duration_statement_seconds +pg_settings_log_parser_stats +pg_settings_log_planner_stats +pg_settings_log_rotation_age_seconds +pg_settings_log_rotation_size_bytes +pg_settings_log_statement_stats +pg_settings_log_temp_files_bytes +pg_settings_log_truncate_on_rotation +pg_settings_maintenance_work_mem_bytes +pg_settings_max_connections +pg_settings_max_files_per_process +pg_settings_max_function_args +pg_settings_max_identifier_length +pg_settings_max_index_keys +pg_settings_max_locks_per_transaction +pg_settings_max_pred_locks_per_transaction +pg_settings_max_prepared_transactions +pg_settings_max_stack_depth_bytes +pg_settings_max_standby_archive_delay_seconds +pg_settings_max_standby_streaming_delay_seconds +pg_settings_max_wal_senders +pg_settings_password_encryption +pg_settings_port +pg_settings_post_auth_delay_seconds +pg_settings_pre_auth_delay_seconds +pg_settings_quote_all_identifiers +pg_settings_random_page_cost +pg_settings_replication_timeout_seconds +pg_settings_restart_after_crash +pg_settings_segment_size_bytes +pg_settings_seq_page_cost +pg_settings_server_version_num +pg_settings_shared_buffers_bytes +pg_settings_silent_mode +pg_settings_sql_inheritance +pg_settings_ssl +pg_settings_ssl_renegotiation_limit_bytes +pg_settings_standard_conforming_strings +pg_settings_statement_timeout_seconds +pg_settings_superuser_reserved_connections +pg_settings_synchronize_seqscans +pg_settings_tcp_keepalives_count +pg_settings_tcp_keepalives_idle_seconds +pg_settings_tcp_keepalives_interval_seconds +pg_settings_temp_buffers_bytes +pg_settings_trace_notify +pg_settings_trace_sort +pg_settings_track_activities +pg_settings_track_activity_query_size +pg_settings_track_counts +pg_settings_transaction_deferrable +pg_settings_transaction_read_only +pg_settings_transform_null_equals +pg_settings_unix_socket_permissions +pg_settings_update_process_title +pg_settings_vacuum_cost_delay_seconds +pg_settings_vacuum_cost_limit +pg_settings_vacuum_cost_page_dirty +pg_settings_vacuum_cost_page_hit +pg_settings_vacuum_cost_page_miss +pg_settings_vacuum_defer_cleanup_age +pg_settings_vacuum_freeze_min_age +pg_settings_vacuum_freeze_table_age +pg_settings_wal_block_size +pg_settings_wal_buffers_bytes +pg_settings_wal_keep_segments +pg_settings_wal_receiver_status_interval_seconds +pg_settings_wal_segment_size_bytes +pg_settings_wal_sender_delay_seconds +pg_settings_wal_writer_delay_seconds +pg_settings_work_mem_bytes +pg_settings_zero_damaged_pages +pg_stat_activity_count +pg_stat_activity_max_tx_duration +pg_stat_bgwriter_buffers_alloc +pg_stat_bgwriter_buffers_backend +pg_stat_bgwriter_buffers_backend_fsync +pg_stat_bgwriter_buffers_checkpoint +pg_stat_bgwriter_buffers_clean +pg_stat_bgwriter_checkpoints_req +pg_stat_bgwriter_checkpoints_timed +pg_stat_bgwriter_maxwritten_clean +pg_stat_bgwriter_stats_reset +pg_stat_database_blks_hit +pg_stat_database_blks_read +pg_stat_database_conflicts +pg_stat_database_conflicts_confl_bufferpin +pg_stat_database_conflicts_confl_deadlock +pg_stat_database_conflicts_confl_lock +pg_stat_database_conflicts_confl_snapshot +pg_stat_database_conflicts_confl_tablespace +pg_stat_database_numbackends +pg_stat_database_stats_reset +pg_stat_database_tup_deleted +pg_stat_database_tup_fetched +pg_stat_database_tup_inserted +pg_stat_database_tup_returned +pg_stat_database_tup_updated +pg_stat_database_xact_commit +pg_stat_database_xact_rollback +pg_static +pg_up +process_cpu_seconds_total +process_max_fds +process_open_fds +process_resident_memory_bytes +process_start_time_seconds +process_virtual_memory_bytes diff --git a/metriclists/.metrics.replicated.9.2.prom.unique b/metriclists/.metrics.replicated.9.2.prom.unique new file mode 100644 index 000000000..a8d5d3c22 --- /dev/null +++ b/metriclists/.metrics.replicated.9.2.prom.unique @@ -0,0 +1,236 @@ +go_gc_duration_seconds +go_gc_duration_seconds_count +go_gc_duration_seconds_sum +go_goroutines +go_info +go_memstats_alloc_bytes +go_memstats_alloc_bytes_total +go_memstats_buck_hash_sys_bytes +go_memstats_frees_total +go_memstats_gc_cpu_fraction +go_memstats_gc_sys_bytes +go_memstats_heap_alloc_bytes +go_memstats_heap_idle_bytes +go_memstats_heap_inuse_bytes +go_memstats_heap_objects +go_memstats_heap_released_bytes +go_memstats_heap_sys_bytes +go_memstats_last_gc_time_seconds +go_memstats_lookups_total +go_memstats_mallocs_total +go_memstats_mcache_inuse_bytes +go_memstats_mcache_sys_bytes +go_memstats_mspan_inuse_bytes +go_memstats_mspan_sys_bytes +go_memstats_next_gc_bytes +go_memstats_other_sys_bytes +go_memstats_stack_inuse_bytes +go_memstats_stack_sys_bytes +go_memstats_sys_bytes +go_threads +pg_exporter_last_scrape_duration_seconds +pg_exporter_last_scrape_error +pg_exporter_scrapes_total +pg_locks_count +pg_settings_allow_system_table_mods +pg_settings_archive_mode +pg_settings_archive_timeout_seconds +pg_settings_array_nulls +pg_settings_authentication_timeout_seconds +pg_settings_autovacuum +pg_settings_autovacuum_analyze_scale_factor +pg_settings_autovacuum_analyze_threshold +pg_settings_autovacuum_freeze_max_age +pg_settings_autovacuum_max_workers +pg_settings_autovacuum_naptime_seconds +pg_settings_autovacuum_vacuum_cost_delay_seconds +pg_settings_autovacuum_vacuum_cost_limit +pg_settings_autovacuum_vacuum_scale_factor +pg_settings_autovacuum_vacuum_threshold +pg_settings_bgwriter_delay_seconds +pg_settings_bgwriter_lru_maxpages +pg_settings_bgwriter_lru_multiplier +pg_settings_block_size +pg_settings_bonjour +pg_settings_check_function_bodies +pg_settings_checkpoint_completion_target +pg_settings_checkpoint_segments +pg_settings_checkpoint_timeout_seconds +pg_settings_checkpoint_warning_seconds +pg_settings_commit_delay +pg_settings_commit_siblings +pg_settings_cpu_index_tuple_cost +pg_settings_cpu_operator_cost +pg_settings_cpu_tuple_cost +pg_settings_cursor_tuple_fraction +pg_settings_db_user_namespace +pg_settings_deadlock_timeout_seconds +pg_settings_debug_assertions +pg_settings_debug_pretty_print +pg_settings_debug_print_parse +pg_settings_debug_print_plan +pg_settings_debug_print_rewritten +pg_settings_default_statistics_target +pg_settings_default_transaction_deferrable +pg_settings_default_transaction_read_only +pg_settings_default_with_oids +pg_settings_effective_cache_size_bytes +pg_settings_effective_io_concurrency +pg_settings_enable_bitmapscan +pg_settings_enable_hashagg +pg_settings_enable_hashjoin +pg_settings_enable_indexonlyscan +pg_settings_enable_indexscan +pg_settings_enable_material +pg_settings_enable_mergejoin +pg_settings_enable_nestloop +pg_settings_enable_seqscan +pg_settings_enable_sort +pg_settings_enable_tidscan +pg_settings_escape_string_warning +pg_settings_exit_on_error +pg_settings_extra_float_digits +pg_settings_from_collapse_limit +pg_settings_fsync +pg_settings_full_page_writes +pg_settings_geqo +pg_settings_geqo_effort +pg_settings_geqo_generations +pg_settings_geqo_pool_size +pg_settings_geqo_seed +pg_settings_geqo_selection_bias +pg_settings_geqo_threshold +pg_settings_gin_fuzzy_search_limit +pg_settings_hot_standby +pg_settings_hot_standby_feedback +pg_settings_ignore_system_indexes +pg_settings_integer_datetimes +pg_settings_join_collapse_limit +pg_settings_krb_caseins_users +pg_settings_lo_compat_privileges +pg_settings_log_autovacuum_min_duration_seconds +pg_settings_log_checkpoints +pg_settings_log_connections +pg_settings_log_disconnections +pg_settings_log_duration +pg_settings_log_executor_stats +pg_settings_log_file_mode +pg_settings_logging_collector +pg_settings_log_hostname +pg_settings_log_lock_waits +pg_settings_log_min_duration_statement_seconds +pg_settings_log_parser_stats +pg_settings_log_planner_stats +pg_settings_log_rotation_age_seconds +pg_settings_log_rotation_size_bytes +pg_settings_log_statement_stats +pg_settings_log_temp_files_bytes +pg_settings_log_truncate_on_rotation +pg_settings_maintenance_work_mem_bytes +pg_settings_max_connections +pg_settings_max_files_per_process +pg_settings_max_function_args +pg_settings_max_identifier_length +pg_settings_max_index_keys +pg_settings_max_locks_per_transaction +pg_settings_max_pred_locks_per_transaction +pg_settings_max_prepared_transactions +pg_settings_max_stack_depth_bytes +pg_settings_max_standby_archive_delay_seconds +pg_settings_max_standby_streaming_delay_seconds +pg_settings_max_wal_senders +pg_settings_password_encryption +pg_settings_port +pg_settings_post_auth_delay_seconds +pg_settings_pre_auth_delay_seconds +pg_settings_quote_all_identifiers +pg_settings_random_page_cost +pg_settings_replication_timeout_seconds +pg_settings_restart_after_crash +pg_settings_segment_size_bytes +pg_settings_seq_page_cost +pg_settings_server_version_num +pg_settings_shared_buffers_bytes +pg_settings_sql_inheritance +pg_settings_ssl +pg_settings_ssl_renegotiation_limit_bytes +pg_settings_standard_conforming_strings +pg_settings_statement_timeout_seconds +pg_settings_superuser_reserved_connections +pg_settings_synchronize_seqscans +pg_settings_tcp_keepalives_count +pg_settings_tcp_keepalives_idle_seconds +pg_settings_tcp_keepalives_interval_seconds +pg_settings_temp_buffers_bytes +pg_settings_temp_file_limit_bytes +pg_settings_trace_notify +pg_settings_trace_sort +pg_settings_track_activities +pg_settings_track_activity_query_size +pg_settings_track_counts +pg_settings_track_io_timing +pg_settings_transaction_deferrable +pg_settings_transaction_read_only +pg_settings_transform_null_equals +pg_settings_unix_socket_permissions +pg_settings_update_process_title +pg_settings_vacuum_cost_delay_seconds +pg_settings_vacuum_cost_limit +pg_settings_vacuum_cost_page_dirty +pg_settings_vacuum_cost_page_hit +pg_settings_vacuum_cost_page_miss +pg_settings_vacuum_defer_cleanup_age +pg_settings_vacuum_freeze_min_age +pg_settings_vacuum_freeze_table_age +pg_settings_wal_block_size +pg_settings_wal_buffers_bytes +pg_settings_wal_keep_segments +pg_settings_wal_receiver_status_interval_seconds +pg_settings_wal_segment_size_bytes +pg_settings_wal_writer_delay_seconds +pg_settings_work_mem_bytes +pg_settings_zero_damaged_pages +pg_stat_activity_count +pg_stat_activity_max_tx_duration +pg_stat_bgwriter_buffers_alloc +pg_stat_bgwriter_buffers_backend +pg_stat_bgwriter_buffers_backend_fsync +pg_stat_bgwriter_buffers_checkpoint +pg_stat_bgwriter_buffers_clean +pg_stat_bgwriter_checkpoints_req +pg_stat_bgwriter_checkpoints_timed +pg_stat_bgwriter_checkpoint_sync_time +pg_stat_bgwriter_checkpoint_write_time +pg_stat_bgwriter_maxwritten_clean +pg_stat_bgwriter_stats_reset +pg_stat_database_blk_read_time +pg_stat_database_blks_hit +pg_stat_database_blks_read +pg_stat_database_blk_write_time +pg_stat_database_conflicts +pg_stat_database_conflicts_confl_bufferpin +pg_stat_database_conflicts_confl_deadlock +pg_stat_database_conflicts_confl_lock +pg_stat_database_conflicts_confl_snapshot +pg_stat_database_conflicts_confl_tablespace +pg_stat_database_deadlocks +pg_stat_database_numbackends +pg_stat_database_stats_reset +pg_stat_database_temp_bytes +pg_stat_database_temp_files +pg_stat_database_tup_deleted +pg_stat_database_tup_fetched +pg_stat_database_tup_inserted +pg_stat_database_tup_returned +pg_stat_database_tup_updated +pg_stat_database_xact_commit +pg_stat_database_xact_rollback +pg_static +pg_stat_replication_pg_xlog_location_diff +pg_up +process_cpu_seconds_total +process_max_fds +process_open_fds +process_resident_memory_bytes +process_start_time_seconds +process_virtual_memory_bytes diff --git a/metriclists/.metrics.replicated.9.3.prom.unique b/metriclists/.metrics.replicated.9.3.prom.unique new file mode 100644 index 000000000..f9bf4dd93 --- /dev/null +++ b/metriclists/.metrics.replicated.9.3.prom.unique @@ -0,0 +1,243 @@ +go_gc_duration_seconds +go_gc_duration_seconds_count +go_gc_duration_seconds_sum +go_goroutines +go_info +go_memstats_alloc_bytes +go_memstats_alloc_bytes_total +go_memstats_buck_hash_sys_bytes +go_memstats_frees_total +go_memstats_gc_cpu_fraction +go_memstats_gc_sys_bytes +go_memstats_heap_alloc_bytes +go_memstats_heap_idle_bytes +go_memstats_heap_inuse_bytes +go_memstats_heap_objects +go_memstats_heap_released_bytes +go_memstats_heap_sys_bytes +go_memstats_last_gc_time_seconds +go_memstats_lookups_total +go_memstats_mallocs_total +go_memstats_mcache_inuse_bytes +go_memstats_mcache_sys_bytes +go_memstats_mspan_inuse_bytes +go_memstats_mspan_sys_bytes +go_memstats_next_gc_bytes +go_memstats_other_sys_bytes +go_memstats_stack_inuse_bytes +go_memstats_stack_sys_bytes +go_memstats_sys_bytes +go_threads +pg_exporter_last_scrape_duration_seconds +pg_exporter_last_scrape_error +pg_exporter_scrapes_total +pg_locks_count +pg_settings_allow_system_table_mods +pg_settings_archive_mode +pg_settings_archive_timeout_seconds +pg_settings_array_nulls +pg_settings_authentication_timeout_seconds +pg_settings_autovacuum +pg_settings_autovacuum_analyze_scale_factor +pg_settings_autovacuum_analyze_threshold +pg_settings_autovacuum_freeze_max_age +pg_settings_autovacuum_max_workers +pg_settings_autovacuum_multixact_freeze_max_age +pg_settings_autovacuum_naptime_seconds +pg_settings_autovacuum_vacuum_cost_delay_seconds +pg_settings_autovacuum_vacuum_cost_limit +pg_settings_autovacuum_vacuum_scale_factor +pg_settings_autovacuum_vacuum_threshold +pg_settings_bgwriter_delay_seconds +pg_settings_bgwriter_lru_maxpages +pg_settings_bgwriter_lru_multiplier +pg_settings_block_size +pg_settings_bonjour +pg_settings_check_function_bodies +pg_settings_checkpoint_completion_target +pg_settings_checkpoint_segments +pg_settings_checkpoint_timeout_seconds +pg_settings_checkpoint_warning_seconds +pg_settings_commit_delay +pg_settings_commit_siblings +pg_settings_cpu_index_tuple_cost +pg_settings_cpu_operator_cost +pg_settings_cpu_tuple_cost +pg_settings_cursor_tuple_fraction +pg_settings_data_checksums +pg_settings_db_user_namespace +pg_settings_deadlock_timeout_seconds +pg_settings_debug_assertions +pg_settings_debug_pretty_print +pg_settings_debug_print_parse +pg_settings_debug_print_plan +pg_settings_debug_print_rewritten +pg_settings_default_statistics_target +pg_settings_default_transaction_deferrable +pg_settings_default_transaction_read_only +pg_settings_default_with_oids +pg_settings_effective_cache_size_bytes +pg_settings_effective_io_concurrency +pg_settings_enable_bitmapscan +pg_settings_enable_hashagg +pg_settings_enable_hashjoin +pg_settings_enable_indexonlyscan +pg_settings_enable_indexscan +pg_settings_enable_material +pg_settings_enable_mergejoin +pg_settings_enable_nestloop +pg_settings_enable_seqscan +pg_settings_enable_sort +pg_settings_enable_tidscan +pg_settings_escape_string_warning +pg_settings_exit_on_error +pg_settings_extra_float_digits +pg_settings_from_collapse_limit +pg_settings_fsync +pg_settings_full_page_writes +pg_settings_geqo +pg_settings_geqo_effort +pg_settings_geqo_generations +pg_settings_geqo_pool_size +pg_settings_geqo_seed +pg_settings_geqo_selection_bias +pg_settings_geqo_threshold +pg_settings_gin_fuzzy_search_limit +pg_settings_hot_standby +pg_settings_hot_standby_feedback +pg_settings_ignore_checksum_failure +pg_settings_ignore_system_indexes +pg_settings_integer_datetimes +pg_settings_join_collapse_limit +pg_settings_krb_caseins_users +pg_settings_lock_timeout_seconds +pg_settings_lo_compat_privileges +pg_settings_log_autovacuum_min_duration_seconds +pg_settings_log_checkpoints +pg_settings_log_connections +pg_settings_log_disconnections +pg_settings_log_duration +pg_settings_log_executor_stats +pg_settings_log_file_mode +pg_settings_logging_collector +pg_settings_log_hostname +pg_settings_log_lock_waits +pg_settings_log_min_duration_statement_seconds +pg_settings_log_parser_stats +pg_settings_log_planner_stats +pg_settings_log_rotation_age_seconds +pg_settings_log_rotation_size_bytes +pg_settings_log_statement_stats +pg_settings_log_temp_files_bytes +pg_settings_log_truncate_on_rotation +pg_settings_maintenance_work_mem_bytes +pg_settings_max_connections +pg_settings_max_files_per_process +pg_settings_max_function_args +pg_settings_max_identifier_length +pg_settings_max_index_keys +pg_settings_max_locks_per_transaction +pg_settings_max_pred_locks_per_transaction +pg_settings_max_prepared_transactions +pg_settings_max_stack_depth_bytes +pg_settings_max_standby_archive_delay_seconds +pg_settings_max_standby_streaming_delay_seconds +pg_settings_max_wal_senders +pg_settings_password_encryption +pg_settings_port +pg_settings_post_auth_delay_seconds +pg_settings_pre_auth_delay_seconds +pg_settings_quote_all_identifiers +pg_settings_random_page_cost +pg_settings_restart_after_crash +pg_settings_segment_size_bytes +pg_settings_seq_page_cost +pg_settings_server_version_num +pg_settings_shared_buffers_bytes +pg_settings_sql_inheritance +pg_settings_ssl +pg_settings_ssl_renegotiation_limit_bytes +pg_settings_standard_conforming_strings +pg_settings_statement_timeout_seconds +pg_settings_superuser_reserved_connections +pg_settings_synchronize_seqscans +pg_settings_tcp_keepalives_count +pg_settings_tcp_keepalives_idle_seconds +pg_settings_tcp_keepalives_interval_seconds +pg_settings_temp_buffers_bytes +pg_settings_temp_file_limit_bytes +pg_settings_trace_notify +pg_settings_trace_sort +pg_settings_track_activities +pg_settings_track_activity_query_size +pg_settings_track_counts +pg_settings_track_io_timing +pg_settings_transaction_deferrable +pg_settings_transaction_read_only +pg_settings_transform_null_equals +pg_settings_unix_socket_permissions +pg_settings_update_process_title +pg_settings_vacuum_cost_delay_seconds +pg_settings_vacuum_cost_limit +pg_settings_vacuum_cost_page_dirty +pg_settings_vacuum_cost_page_hit +pg_settings_vacuum_cost_page_miss +pg_settings_vacuum_defer_cleanup_age +pg_settings_vacuum_freeze_min_age +pg_settings_vacuum_freeze_table_age +pg_settings_vacuum_multixact_freeze_min_age +pg_settings_vacuum_multixact_freeze_table_age +pg_settings_wal_block_size +pg_settings_wal_buffers_bytes +pg_settings_wal_keep_segments +pg_settings_wal_receiver_status_interval_seconds +pg_settings_wal_receiver_timeout_seconds +pg_settings_wal_segment_size_bytes +pg_settings_wal_sender_timeout_seconds +pg_settings_wal_writer_delay_seconds +pg_settings_work_mem_bytes +pg_settings_zero_damaged_pages +pg_stat_activity_count +pg_stat_activity_max_tx_duration +pg_stat_bgwriter_buffers_alloc +pg_stat_bgwriter_buffers_backend +pg_stat_bgwriter_buffers_backend_fsync +pg_stat_bgwriter_buffers_checkpoint +pg_stat_bgwriter_buffers_clean +pg_stat_bgwriter_checkpoints_req +pg_stat_bgwriter_checkpoints_timed +pg_stat_bgwriter_checkpoint_sync_time +pg_stat_bgwriter_checkpoint_write_time +pg_stat_bgwriter_maxwritten_clean +pg_stat_bgwriter_stats_reset +pg_stat_database_blk_read_time +pg_stat_database_blks_hit +pg_stat_database_blks_read +pg_stat_database_blk_write_time +pg_stat_database_conflicts +pg_stat_database_conflicts_confl_bufferpin +pg_stat_database_conflicts_confl_deadlock +pg_stat_database_conflicts_confl_lock +pg_stat_database_conflicts_confl_snapshot +pg_stat_database_conflicts_confl_tablespace +pg_stat_database_deadlocks +pg_stat_database_numbackends +pg_stat_database_stats_reset +pg_stat_database_temp_bytes +pg_stat_database_temp_files +pg_stat_database_tup_deleted +pg_stat_database_tup_fetched +pg_stat_database_tup_inserted +pg_stat_database_tup_returned +pg_stat_database_tup_updated +pg_stat_database_xact_commit +pg_stat_database_xact_rollback +pg_static +pg_stat_replication_pg_xlog_location_diff +pg_up +process_cpu_seconds_total +process_max_fds +process_open_fds +process_resident_memory_bytes +process_start_time_seconds +process_virtual_memory_bytes diff --git a/metriclists/.metrics.replicated.9.4.prom.unique b/metriclists/.metrics.replicated.9.4.prom.unique new file mode 100644 index 000000000..60e66b11c --- /dev/null +++ b/metriclists/.metrics.replicated.9.4.prom.unique @@ -0,0 +1,256 @@ +go_gc_duration_seconds +go_gc_duration_seconds_count +go_gc_duration_seconds_sum +go_goroutines +go_info +go_memstats_alloc_bytes +go_memstats_alloc_bytes_total +go_memstats_buck_hash_sys_bytes +go_memstats_frees_total +go_memstats_gc_cpu_fraction +go_memstats_gc_sys_bytes +go_memstats_heap_alloc_bytes +go_memstats_heap_idle_bytes +go_memstats_heap_inuse_bytes +go_memstats_heap_objects +go_memstats_heap_released_bytes +go_memstats_heap_sys_bytes +go_memstats_last_gc_time_seconds +go_memstats_lookups_total +go_memstats_mallocs_total +go_memstats_mcache_inuse_bytes +go_memstats_mcache_sys_bytes +go_memstats_mspan_inuse_bytes +go_memstats_mspan_sys_bytes +go_memstats_next_gc_bytes +go_memstats_other_sys_bytes +go_memstats_stack_inuse_bytes +go_memstats_stack_sys_bytes +go_memstats_sys_bytes +go_threads +pg_exporter_last_scrape_duration_seconds +pg_exporter_last_scrape_error +pg_exporter_scrapes_total +pg_locks_count +pg_settings_allow_system_table_mods +pg_settings_archive_mode +pg_settings_archive_timeout_seconds +pg_settings_array_nulls +pg_settings_authentication_timeout_seconds +pg_settings_autovacuum +pg_settings_autovacuum_analyze_scale_factor +pg_settings_autovacuum_analyze_threshold +pg_settings_autovacuum_freeze_max_age +pg_settings_autovacuum_max_workers +pg_settings_autovacuum_multixact_freeze_max_age +pg_settings_autovacuum_naptime_seconds +pg_settings_autovacuum_vacuum_cost_delay_seconds +pg_settings_autovacuum_vacuum_cost_limit +pg_settings_autovacuum_vacuum_scale_factor +pg_settings_autovacuum_vacuum_threshold +pg_settings_autovacuum_work_mem_bytes +pg_settings_bgwriter_delay_seconds +pg_settings_bgwriter_lru_maxpages +pg_settings_bgwriter_lru_multiplier +pg_settings_block_size +pg_settings_bonjour +pg_settings_check_function_bodies +pg_settings_checkpoint_completion_target +pg_settings_checkpoint_segments +pg_settings_checkpoint_timeout_seconds +pg_settings_checkpoint_warning_seconds +pg_settings_commit_delay +pg_settings_commit_siblings +pg_settings_cpu_index_tuple_cost +pg_settings_cpu_operator_cost +pg_settings_cpu_tuple_cost +pg_settings_cursor_tuple_fraction +pg_settings_data_checksums +pg_settings_data_sync_retry +pg_settings_db_user_namespace +pg_settings_deadlock_timeout_seconds +pg_settings_debug_assertions +pg_settings_debug_pretty_print +pg_settings_debug_print_parse +pg_settings_debug_print_plan +pg_settings_debug_print_rewritten +pg_settings_default_statistics_target +pg_settings_default_transaction_deferrable +pg_settings_default_transaction_read_only +pg_settings_default_with_oids +pg_settings_effective_cache_size_bytes +pg_settings_effective_io_concurrency +pg_settings_enable_bitmapscan +pg_settings_enable_hashagg +pg_settings_enable_hashjoin +pg_settings_enable_indexonlyscan +pg_settings_enable_indexscan +pg_settings_enable_material +pg_settings_enable_mergejoin +pg_settings_enable_nestloop +pg_settings_enable_seqscan +pg_settings_enable_sort +pg_settings_enable_tidscan +pg_settings_escape_string_warning +pg_settings_exit_on_error +pg_settings_extra_float_digits +pg_settings_from_collapse_limit +pg_settings_fsync +pg_settings_full_page_writes +pg_settings_geqo +pg_settings_geqo_effort +pg_settings_geqo_generations +pg_settings_geqo_pool_size +pg_settings_geqo_seed +pg_settings_geqo_selection_bias +pg_settings_geqo_threshold +pg_settings_gin_fuzzy_search_limit +pg_settings_hot_standby +pg_settings_hot_standby_feedback +pg_settings_ignore_checksum_failure +pg_settings_ignore_system_indexes +pg_settings_integer_datetimes +pg_settings_join_collapse_limit +pg_settings_krb_caseins_users +pg_settings_lock_timeout_seconds +pg_settings_lo_compat_privileges +pg_settings_log_autovacuum_min_duration_seconds +pg_settings_log_checkpoints +pg_settings_log_connections +pg_settings_log_disconnections +pg_settings_log_duration +pg_settings_log_executor_stats +pg_settings_log_file_mode +pg_settings_logging_collector +pg_settings_log_hostname +pg_settings_log_lock_waits +pg_settings_log_min_duration_statement_seconds +pg_settings_log_parser_stats +pg_settings_log_planner_stats +pg_settings_log_rotation_age_seconds +pg_settings_log_rotation_size_bytes +pg_settings_log_statement_stats +pg_settings_log_temp_files_bytes +pg_settings_log_truncate_on_rotation +pg_settings_maintenance_work_mem_bytes +pg_settings_max_connections +pg_settings_max_files_per_process +pg_settings_max_function_args +pg_settings_max_identifier_length +pg_settings_max_index_keys +pg_settings_max_locks_per_transaction +pg_settings_max_pred_locks_per_transaction +pg_settings_max_prepared_transactions +pg_settings_max_replication_slots +pg_settings_max_stack_depth_bytes +pg_settings_max_standby_archive_delay_seconds +pg_settings_max_standby_streaming_delay_seconds +pg_settings_max_wal_senders +pg_settings_max_worker_processes +pg_settings_password_encryption +pg_settings_port +pg_settings_post_auth_delay_seconds +pg_settings_pre_auth_delay_seconds +pg_settings_quote_all_identifiers +pg_settings_random_page_cost +pg_settings_restart_after_crash +pg_settings_segment_size_bytes +pg_settings_seq_page_cost +pg_settings_server_version_num +pg_settings_shared_buffers_bytes +pg_settings_sql_inheritance +pg_settings_ssl +pg_settings_ssl_prefer_server_ciphers +pg_settings_ssl_renegotiation_limit_bytes +pg_settings_standard_conforming_strings +pg_settings_statement_timeout_seconds +pg_settings_superuser_reserved_connections +pg_settings_synchronize_seqscans +pg_settings_tcp_keepalives_count +pg_settings_tcp_keepalives_idle_seconds +pg_settings_tcp_keepalives_interval_seconds +pg_settings_temp_buffers_bytes +pg_settings_temp_file_limit_bytes +pg_settings_trace_notify +pg_settings_trace_sort +pg_settings_track_activities +pg_settings_track_activity_query_size +pg_settings_track_counts +pg_settings_track_io_timing +pg_settings_transaction_deferrable +pg_settings_transaction_read_only +pg_settings_transform_null_equals +pg_settings_unix_socket_permissions +pg_settings_update_process_title +pg_settings_vacuum_cost_delay_seconds +pg_settings_vacuum_cost_limit +pg_settings_vacuum_cost_page_dirty +pg_settings_vacuum_cost_page_hit +pg_settings_vacuum_cost_page_miss +pg_settings_vacuum_defer_cleanup_age +pg_settings_vacuum_freeze_min_age +pg_settings_vacuum_freeze_table_age +pg_settings_vacuum_multixact_freeze_min_age +pg_settings_vacuum_multixact_freeze_table_age +pg_settings_wal_block_size +pg_settings_wal_buffers_bytes +pg_settings_wal_keep_segments +pg_settings_wal_log_hints +pg_settings_wal_receiver_status_interval_seconds +pg_settings_wal_receiver_timeout_seconds +pg_settings_wal_segment_size_bytes +pg_settings_wal_sender_timeout_seconds +pg_settings_wal_writer_delay_seconds +pg_settings_work_mem_bytes +pg_settings_zero_damaged_pages +pg_stat_activity_count +pg_stat_activity_max_tx_duration +pg_stat_archiver_archived_count +pg_stat_archiver_failed_count +pg_stat_archiver_last_archive_age +pg_stat_bgwriter_buffers_alloc +pg_stat_bgwriter_buffers_backend +pg_stat_bgwriter_buffers_backend_fsync +pg_stat_bgwriter_buffers_checkpoint +pg_stat_bgwriter_buffers_clean +pg_stat_bgwriter_checkpoints_req +pg_stat_bgwriter_checkpoints_timed +pg_stat_bgwriter_checkpoint_sync_time +pg_stat_bgwriter_checkpoint_write_time +pg_stat_bgwriter_maxwritten_clean +pg_stat_bgwriter_stats_reset +pg_stat_database_blk_read_time +pg_stat_database_blks_hit +pg_stat_database_blks_read +pg_stat_database_blk_write_time +pg_stat_database_conflicts +pg_stat_database_conflicts_confl_bufferpin +pg_stat_database_conflicts_confl_deadlock +pg_stat_database_conflicts_confl_lock +pg_stat_database_conflicts_confl_snapshot +pg_stat_database_conflicts_confl_tablespace +pg_stat_database_deadlocks +pg_stat_database_numbackends +pg_stat_database_stats_reset +pg_stat_database_temp_bytes +pg_stat_database_temp_files +pg_stat_database_tup_deleted +pg_stat_database_tup_fetched +pg_stat_database_tup_inserted +pg_stat_database_tup_returned +pg_stat_database_tup_updated +pg_stat_database_xact_commit +pg_stat_database_xact_rollback +pg_static +pg_stat_replication_pg_xlog_location_diff +pg_up +postgres_exporter_build_info +process_cpu_seconds_total +process_max_fds +process_open_fds +process_resident_memory_bytes +process_start_time_seconds +process_virtual_memory_bytes +process_virtual_memory_max_bytes +promhttp_metric_handler_requests_in_flight +promhttp_metric_handler_requests_total diff --git a/metriclists/.metrics.replicated.9.5.prom.unique b/metriclists/.metrics.replicated.9.5.prom.unique new file mode 100644 index 000000000..47c702002 --- /dev/null +++ b/metriclists/.metrics.replicated.9.5.prom.unique @@ -0,0 +1,262 @@ +go_gc_duration_seconds +go_gc_duration_seconds_count +go_gc_duration_seconds_sum +go_goroutines +go_info +go_memstats_alloc_bytes +go_memstats_alloc_bytes_total +go_memstats_buck_hash_sys_bytes +go_memstats_frees_total +go_memstats_gc_cpu_fraction +go_memstats_gc_sys_bytes +go_memstats_heap_alloc_bytes +go_memstats_heap_idle_bytes +go_memstats_heap_inuse_bytes +go_memstats_heap_objects +go_memstats_heap_released_bytes +go_memstats_heap_sys_bytes +go_memstats_last_gc_time_seconds +go_memstats_lookups_total +go_memstats_mallocs_total +go_memstats_mcache_inuse_bytes +go_memstats_mcache_sys_bytes +go_memstats_mspan_inuse_bytes +go_memstats_mspan_sys_bytes +go_memstats_next_gc_bytes +go_memstats_other_sys_bytes +go_memstats_stack_inuse_bytes +go_memstats_stack_sys_bytes +go_memstats_sys_bytes +go_threads +pg_exporter_last_scrape_duration_seconds +pg_exporter_last_scrape_error +pg_exporter_scrapes_total +pg_locks_count +pg_settings_allow_system_table_mods +pg_settings_archive_timeout_seconds +pg_settings_array_nulls +pg_settings_authentication_timeout_seconds +pg_settings_autovacuum +pg_settings_autovacuum_analyze_scale_factor +pg_settings_autovacuum_analyze_threshold +pg_settings_autovacuum_freeze_max_age +pg_settings_autovacuum_max_workers +pg_settings_autovacuum_multixact_freeze_max_age +pg_settings_autovacuum_naptime_seconds +pg_settings_autovacuum_vacuum_cost_delay_seconds +pg_settings_autovacuum_vacuum_cost_limit +pg_settings_autovacuum_vacuum_scale_factor +pg_settings_autovacuum_vacuum_threshold +pg_settings_autovacuum_work_mem_bytes +pg_settings_bgwriter_delay_seconds +pg_settings_bgwriter_lru_maxpages +pg_settings_bgwriter_lru_multiplier +pg_settings_block_size +pg_settings_bonjour +pg_settings_check_function_bodies +pg_settings_checkpoint_completion_target +pg_settings_checkpoint_timeout_seconds +pg_settings_checkpoint_warning_seconds +pg_settings_commit_delay +pg_settings_commit_siblings +pg_settings_cpu_index_tuple_cost +pg_settings_cpu_operator_cost +pg_settings_cpu_tuple_cost +pg_settings_cursor_tuple_fraction +pg_settings_data_checksums +pg_settings_data_sync_retry +pg_settings_db_user_namespace +pg_settings_deadlock_timeout_seconds +pg_settings_debug_assertions +pg_settings_debug_pretty_print +pg_settings_debug_print_parse +pg_settings_debug_print_plan +pg_settings_debug_print_rewritten +pg_settings_default_statistics_target +pg_settings_default_transaction_deferrable +pg_settings_default_transaction_read_only +pg_settings_default_with_oids +pg_settings_effective_cache_size_bytes +pg_settings_effective_io_concurrency +pg_settings_enable_bitmapscan +pg_settings_enable_hashagg +pg_settings_enable_hashjoin +pg_settings_enable_indexonlyscan +pg_settings_enable_indexscan +pg_settings_enable_material +pg_settings_enable_mergejoin +pg_settings_enable_nestloop +pg_settings_enable_seqscan +pg_settings_enable_sort +pg_settings_enable_tidscan +pg_settings_escape_string_warning +pg_settings_exit_on_error +pg_settings_extra_float_digits +pg_settings_from_collapse_limit +pg_settings_fsync +pg_settings_full_page_writes +pg_settings_geqo +pg_settings_geqo_effort +pg_settings_geqo_generations +pg_settings_geqo_pool_size +pg_settings_geqo_seed +pg_settings_geqo_selection_bias +pg_settings_geqo_threshold +pg_settings_gin_fuzzy_search_limit +pg_settings_gin_pending_list_limit_bytes +pg_settings_hot_standby +pg_settings_hot_standby_feedback +pg_settings_ignore_checksum_failure +pg_settings_ignore_system_indexes +pg_settings_integer_datetimes +pg_settings_join_collapse_limit +pg_settings_krb_caseins_users +pg_settings_lock_timeout_seconds +pg_settings_lo_compat_privileges +pg_settings_log_autovacuum_min_duration_seconds +pg_settings_log_checkpoints +pg_settings_log_connections +pg_settings_log_disconnections +pg_settings_log_duration +pg_settings_log_executor_stats +pg_settings_log_file_mode +pg_settings_logging_collector +pg_settings_log_hostname +pg_settings_log_lock_waits +pg_settings_log_min_duration_statement_seconds +pg_settings_log_parser_stats +pg_settings_log_planner_stats +pg_settings_log_replication_commands +pg_settings_log_rotation_age_seconds +pg_settings_log_rotation_size_bytes +pg_settings_log_statement_stats +pg_settings_log_temp_files_bytes +pg_settings_log_truncate_on_rotation +pg_settings_maintenance_work_mem_bytes +pg_settings_max_connections +pg_settings_max_files_per_process +pg_settings_max_function_args +pg_settings_max_identifier_length +pg_settings_max_index_keys +pg_settings_max_locks_per_transaction +pg_settings_max_pred_locks_per_transaction +pg_settings_max_prepared_transactions +pg_settings_max_replication_slots +pg_settings_max_stack_depth_bytes +pg_settings_max_standby_archive_delay_seconds +pg_settings_max_standby_streaming_delay_seconds +pg_settings_max_wal_senders +pg_settings_max_wal_size_bytes +pg_settings_max_worker_processes +pg_settings_min_wal_size_bytes +pg_settings_operator_precedence_warning +pg_settings_password_encryption +pg_settings_port +pg_settings_post_auth_delay_seconds +pg_settings_pre_auth_delay_seconds +pg_settings_quote_all_identifiers +pg_settings_random_page_cost +pg_settings_restart_after_crash +pg_settings_row_security +pg_settings_segment_size_bytes +pg_settings_seq_page_cost +pg_settings_server_version_num +pg_settings_shared_buffers_bytes +pg_settings_sql_inheritance +pg_settings_ssl +pg_settings_ssl_prefer_server_ciphers +pg_settings_standard_conforming_strings +pg_settings_statement_timeout_seconds +pg_settings_superuser_reserved_connections +pg_settings_synchronize_seqscans +pg_settings_tcp_keepalives_count +pg_settings_tcp_keepalives_idle_seconds +pg_settings_tcp_keepalives_interval_seconds +pg_settings_temp_buffers_bytes +pg_settings_temp_file_limit_bytes +pg_settings_trace_notify +pg_settings_trace_sort +pg_settings_track_activities +pg_settings_track_activity_query_size +pg_settings_track_commit_timestamp +pg_settings_track_counts +pg_settings_track_io_timing +pg_settings_transaction_deferrable +pg_settings_transaction_read_only +pg_settings_transform_null_equals +pg_settings_unix_socket_permissions +pg_settings_update_process_title +pg_settings_vacuum_cost_delay_seconds +pg_settings_vacuum_cost_limit +pg_settings_vacuum_cost_page_dirty +pg_settings_vacuum_cost_page_hit +pg_settings_vacuum_cost_page_miss +pg_settings_vacuum_defer_cleanup_age +pg_settings_vacuum_freeze_min_age +pg_settings_vacuum_freeze_table_age +pg_settings_vacuum_multixact_freeze_min_age +pg_settings_vacuum_multixact_freeze_table_age +pg_settings_wal_block_size +pg_settings_wal_buffers_bytes +pg_settings_wal_compression +pg_settings_wal_keep_segments +pg_settings_wal_log_hints +pg_settings_wal_receiver_status_interval_seconds +pg_settings_wal_receiver_timeout_seconds +pg_settings_wal_retrieve_retry_interval_seconds +pg_settings_wal_segment_size_bytes +pg_settings_wal_sender_timeout_seconds +pg_settings_wal_writer_delay_seconds +pg_settings_work_mem_bytes +pg_settings_zero_damaged_pages +pg_stat_activity_count +pg_stat_activity_max_tx_duration +pg_stat_archiver_archived_count +pg_stat_archiver_failed_count +pg_stat_archiver_last_archive_age +pg_stat_bgwriter_buffers_alloc +pg_stat_bgwriter_buffers_backend +pg_stat_bgwriter_buffers_backend_fsync +pg_stat_bgwriter_buffers_checkpoint +pg_stat_bgwriter_buffers_clean +pg_stat_bgwriter_checkpoints_req +pg_stat_bgwriter_checkpoints_timed +pg_stat_bgwriter_checkpoint_sync_time +pg_stat_bgwriter_checkpoint_write_time +pg_stat_bgwriter_maxwritten_clean +pg_stat_bgwriter_stats_reset +pg_stat_database_blk_read_time +pg_stat_database_blks_hit +pg_stat_database_blks_read +pg_stat_database_blk_write_time +pg_stat_database_conflicts +pg_stat_database_conflicts_confl_bufferpin +pg_stat_database_conflicts_confl_deadlock +pg_stat_database_conflicts_confl_lock +pg_stat_database_conflicts_confl_snapshot +pg_stat_database_conflicts_confl_tablespace +pg_stat_database_deadlocks +pg_stat_database_numbackends +pg_stat_database_stats_reset +pg_stat_database_temp_bytes +pg_stat_database_temp_files +pg_stat_database_tup_deleted +pg_stat_database_tup_fetched +pg_stat_database_tup_inserted +pg_stat_database_tup_returned +pg_stat_database_tup_updated +pg_stat_database_xact_commit +pg_stat_database_xact_rollback +pg_static +pg_stat_replication_pg_xlog_location_diff +pg_up +postgres_exporter_build_info +process_cpu_seconds_total +process_max_fds +process_open_fds +process_resident_memory_bytes +process_start_time_seconds +process_virtual_memory_bytes +process_virtual_memory_max_bytes +promhttp_metric_handler_requests_in_flight +promhttp_metric_handler_requests_total diff --git a/metriclists/.metrics.replicated.9.6.prom.unique b/metriclists/.metrics.replicated.9.6.prom.unique new file mode 100644 index 000000000..addb3969c --- /dev/null +++ b/metriclists/.metrics.replicated.9.6.prom.unique @@ -0,0 +1,275 @@ +go_gc_duration_seconds +go_gc_duration_seconds_count +go_gc_duration_seconds_sum +go_goroutines +go_info +go_memstats_alloc_bytes +go_memstats_alloc_bytes_total +go_memstats_buck_hash_sys_bytes +go_memstats_frees_total +go_memstats_gc_cpu_fraction +go_memstats_gc_sys_bytes +go_memstats_heap_alloc_bytes +go_memstats_heap_idle_bytes +go_memstats_heap_inuse_bytes +go_memstats_heap_objects +go_memstats_heap_released_bytes +go_memstats_heap_sys_bytes +go_memstats_last_gc_time_seconds +go_memstats_lookups_total +go_memstats_mallocs_total +go_memstats_mcache_inuse_bytes +go_memstats_mcache_sys_bytes +go_memstats_mspan_inuse_bytes +go_memstats_mspan_sys_bytes +go_memstats_next_gc_bytes +go_memstats_other_sys_bytes +go_memstats_stack_inuse_bytes +go_memstats_stack_sys_bytes +go_memstats_sys_bytes +go_threads +pg_exporter_last_scrape_duration_seconds +pg_exporter_last_scrape_error +pg_exporter_scrapes_total +pg_locks_count +pg_settings_allow_system_table_mods +pg_settings_archive_timeout_seconds +pg_settings_array_nulls +pg_settings_authentication_timeout_seconds +pg_settings_autovacuum +pg_settings_autovacuum_analyze_scale_factor +pg_settings_autovacuum_analyze_threshold +pg_settings_autovacuum_freeze_max_age +pg_settings_autovacuum_max_workers +pg_settings_autovacuum_multixact_freeze_max_age +pg_settings_autovacuum_naptime_seconds +pg_settings_autovacuum_vacuum_cost_delay_seconds +pg_settings_autovacuum_vacuum_cost_limit +pg_settings_autovacuum_vacuum_scale_factor +pg_settings_autovacuum_vacuum_threshold +pg_settings_autovacuum_work_mem_bytes +pg_settings_backend_flush_after_bytes +pg_settings_bgwriter_delay_seconds +pg_settings_bgwriter_flush_after_bytes +pg_settings_bgwriter_lru_maxpages +pg_settings_bgwriter_lru_multiplier +pg_settings_block_size +pg_settings_bonjour +pg_settings_check_function_bodies +pg_settings_checkpoint_completion_target +pg_settings_checkpoint_flush_after_bytes +pg_settings_checkpoint_timeout_seconds +pg_settings_checkpoint_warning_seconds +pg_settings_commit_delay +pg_settings_commit_siblings +pg_settings_cpu_index_tuple_cost +pg_settings_cpu_operator_cost +pg_settings_cpu_tuple_cost +pg_settings_cursor_tuple_fraction +pg_settings_data_checksums +pg_settings_data_sync_retry +pg_settings_db_user_namespace +pg_settings_deadlock_timeout_seconds +pg_settings_debug_assertions +pg_settings_debug_pretty_print +pg_settings_debug_print_parse +pg_settings_debug_print_plan +pg_settings_debug_print_rewritten +pg_settings_default_statistics_target +pg_settings_default_transaction_deferrable +pg_settings_default_transaction_read_only +pg_settings_default_with_oids +pg_settings_effective_cache_size_bytes +pg_settings_effective_io_concurrency +pg_settings_enable_bitmapscan +pg_settings_enable_hashagg +pg_settings_enable_hashjoin +pg_settings_enable_indexonlyscan +pg_settings_enable_indexscan +pg_settings_enable_material +pg_settings_enable_mergejoin +pg_settings_enable_nestloop +pg_settings_enable_seqscan +pg_settings_enable_sort +pg_settings_enable_tidscan +pg_settings_escape_string_warning +pg_settings_exit_on_error +pg_settings_extra_float_digits +pg_settings_from_collapse_limit +pg_settings_fsync +pg_settings_full_page_writes +pg_settings_geqo +pg_settings_geqo_effort +pg_settings_geqo_generations +pg_settings_geqo_pool_size +pg_settings_geqo_seed +pg_settings_geqo_selection_bias +pg_settings_geqo_threshold +pg_settings_gin_fuzzy_search_limit +pg_settings_gin_pending_list_limit_bytes +pg_settings_hot_standby +pg_settings_hot_standby_feedback +pg_settings_idle_in_transaction_session_timeout_seconds +pg_settings_ignore_checksum_failure +pg_settings_ignore_system_indexes +pg_settings_integer_datetimes +pg_settings_join_collapse_limit +pg_settings_krb_caseins_users +pg_settings_lock_timeout_seconds +pg_settings_lo_compat_privileges +pg_settings_log_autovacuum_min_duration_seconds +pg_settings_log_checkpoints +pg_settings_log_connections +pg_settings_log_disconnections +pg_settings_log_duration +pg_settings_log_executor_stats +pg_settings_log_file_mode +pg_settings_logging_collector +pg_settings_log_hostname +pg_settings_log_lock_waits +pg_settings_log_min_duration_statement_seconds +pg_settings_log_parser_stats +pg_settings_log_planner_stats +pg_settings_log_replication_commands +pg_settings_log_rotation_age_seconds +pg_settings_log_rotation_size_bytes +pg_settings_log_statement_stats +pg_settings_log_temp_files_bytes +pg_settings_log_truncate_on_rotation +pg_settings_maintenance_work_mem_bytes +pg_settings_max_connections +pg_settings_max_files_per_process +pg_settings_max_function_args +pg_settings_max_identifier_length +pg_settings_max_index_keys +pg_settings_max_locks_per_transaction +pg_settings_max_parallel_workers_per_gather +pg_settings_max_pred_locks_per_transaction +pg_settings_max_prepared_transactions +pg_settings_max_replication_slots +pg_settings_max_stack_depth_bytes +pg_settings_max_standby_archive_delay_seconds +pg_settings_max_standby_streaming_delay_seconds +pg_settings_max_wal_senders +pg_settings_max_wal_size_bytes +pg_settings_max_worker_processes +pg_settings_min_parallel_relation_size_bytes +pg_settings_min_wal_size_bytes +pg_settings_old_snapshot_threshold_seconds +pg_settings_operator_precedence_warning +pg_settings_parallel_setup_cost +pg_settings_parallel_tuple_cost +pg_settings_password_encryption +pg_settings_port +pg_settings_post_auth_delay_seconds +pg_settings_pre_auth_delay_seconds +pg_settings_quote_all_identifiers +pg_settings_random_page_cost +pg_settings_replacement_sort_tuples +pg_settings_restart_after_crash +pg_settings_row_security +pg_settings_segment_size_bytes +pg_settings_seq_page_cost +pg_settings_server_version_num +pg_settings_shared_buffers_bytes +pg_settings_sql_inheritance +pg_settings_ssl +pg_settings_ssl_prefer_server_ciphers +pg_settings_standard_conforming_strings +pg_settings_statement_timeout_seconds +pg_settings_superuser_reserved_connections +pg_settings_synchronize_seqscans +pg_settings_syslog_sequence_numbers +pg_settings_syslog_split_messages +pg_settings_tcp_keepalives_count +pg_settings_tcp_keepalives_idle_seconds +pg_settings_tcp_keepalives_interval_seconds +pg_settings_temp_buffers_bytes +pg_settings_temp_file_limit_bytes +pg_settings_trace_notify +pg_settings_trace_sort +pg_settings_track_activities +pg_settings_track_activity_query_size +pg_settings_track_commit_timestamp +pg_settings_track_counts +pg_settings_track_io_timing +pg_settings_transaction_deferrable +pg_settings_transaction_read_only +pg_settings_transform_null_equals +pg_settings_unix_socket_permissions +pg_settings_update_process_title +pg_settings_vacuum_cost_delay_seconds +pg_settings_vacuum_cost_limit +pg_settings_vacuum_cost_page_dirty +pg_settings_vacuum_cost_page_hit +pg_settings_vacuum_cost_page_miss +pg_settings_vacuum_defer_cleanup_age +pg_settings_vacuum_freeze_min_age +pg_settings_vacuum_freeze_table_age +pg_settings_vacuum_multixact_freeze_min_age +pg_settings_vacuum_multixact_freeze_table_age +pg_settings_wal_block_size +pg_settings_wal_buffers_bytes +pg_settings_wal_compression +pg_settings_wal_keep_segments +pg_settings_wal_log_hints +pg_settings_wal_receiver_status_interval_seconds +pg_settings_wal_receiver_timeout_seconds +pg_settings_wal_retrieve_retry_interval_seconds +pg_settings_wal_segment_size_bytes +pg_settings_wal_sender_timeout_seconds +pg_settings_wal_writer_delay_seconds +pg_settings_wal_writer_flush_after_bytes +pg_settings_work_mem_bytes +pg_settings_zero_damaged_pages +pg_stat_activity_count +pg_stat_activity_max_tx_duration +pg_stat_archiver_archived_count +pg_stat_archiver_failed_count +pg_stat_archiver_last_archive_age +pg_stat_bgwriter_buffers_alloc +pg_stat_bgwriter_buffers_backend +pg_stat_bgwriter_buffers_backend_fsync +pg_stat_bgwriter_buffers_checkpoint +pg_stat_bgwriter_buffers_clean +pg_stat_bgwriter_checkpoints_req +pg_stat_bgwriter_checkpoints_timed +pg_stat_bgwriter_checkpoint_sync_time +pg_stat_bgwriter_checkpoint_write_time +pg_stat_bgwriter_maxwritten_clean +pg_stat_bgwriter_stats_reset +pg_stat_database_blk_read_time +pg_stat_database_blks_hit +pg_stat_database_blks_read +pg_stat_database_blk_write_time +pg_stat_database_conflicts +pg_stat_database_conflicts_confl_bufferpin +pg_stat_database_conflicts_confl_deadlock +pg_stat_database_conflicts_confl_lock +pg_stat_database_conflicts_confl_snapshot +pg_stat_database_conflicts_confl_tablespace +pg_stat_database_deadlocks +pg_stat_database_numbackends +pg_stat_database_stats_reset +pg_stat_database_temp_bytes +pg_stat_database_temp_files +pg_stat_database_tup_deleted +pg_stat_database_tup_fetched +pg_stat_database_tup_inserted +pg_stat_database_tup_returned +pg_stat_database_tup_updated +pg_stat_database_xact_commit +pg_stat_database_xact_rollback +pg_static +pg_stat_replication_pg_xlog_location_diff +pg_up +postgres_exporter_build_info +process_cpu_seconds_total +process_max_fds +process_open_fds +process_resident_memory_bytes +process_start_time_seconds +process_virtual_memory_bytes +process_virtual_memory_max_bytes +promhttp_metric_handler_requests_in_flight +promhttp_metric_handler_requests_total diff --git a/metriclists/.metrics.single.10.prom.unique b/metriclists/.metrics.single.10.prom.unique new file mode 100644 index 000000000..10147214e --- /dev/null +++ b/metriclists/.metrics.single.10.prom.unique @@ -0,0 +1,279 @@ +go_gc_duration_seconds +go_gc_duration_seconds_count +go_gc_duration_seconds_sum +go_goroutines +go_info +go_memstats_alloc_bytes +go_memstats_alloc_bytes_total +go_memstats_buck_hash_sys_bytes +go_memstats_frees_total +go_memstats_gc_cpu_fraction +go_memstats_gc_sys_bytes +go_memstats_heap_alloc_bytes +go_memstats_heap_idle_bytes +go_memstats_heap_inuse_bytes +go_memstats_heap_objects +go_memstats_heap_released_bytes +go_memstats_heap_sys_bytes +go_memstats_last_gc_time_seconds +go_memstats_lookups_total +go_memstats_mallocs_total +go_memstats_mcache_inuse_bytes +go_memstats_mcache_sys_bytes +go_memstats_mspan_inuse_bytes +go_memstats_mspan_sys_bytes +go_memstats_next_gc_bytes +go_memstats_other_sys_bytes +go_memstats_stack_inuse_bytes +go_memstats_stack_sys_bytes +go_memstats_sys_bytes +go_threads +pg_exporter_last_scrape_duration_seconds +pg_exporter_last_scrape_error +pg_exporter_scrapes_total +pg_locks_count +pg_settings_allow_system_table_mods +pg_settings_archive_timeout_seconds +pg_settings_array_nulls +pg_settings_authentication_timeout_seconds +pg_settings_autovacuum +pg_settings_autovacuum_analyze_scale_factor +pg_settings_autovacuum_analyze_threshold +pg_settings_autovacuum_freeze_max_age +pg_settings_autovacuum_max_workers +pg_settings_autovacuum_multixact_freeze_max_age +pg_settings_autovacuum_naptime_seconds +pg_settings_autovacuum_vacuum_cost_delay_seconds +pg_settings_autovacuum_vacuum_cost_limit +pg_settings_autovacuum_vacuum_scale_factor +pg_settings_autovacuum_vacuum_threshold +pg_settings_autovacuum_work_mem_bytes +pg_settings_backend_flush_after_bytes +pg_settings_bgwriter_delay_seconds +pg_settings_bgwriter_flush_after_bytes +pg_settings_bgwriter_lru_maxpages +pg_settings_bgwriter_lru_multiplier +pg_settings_block_size +pg_settings_bonjour +pg_settings_check_function_bodies +pg_settings_checkpoint_completion_target +pg_settings_checkpoint_flush_after_bytes +pg_settings_checkpoint_timeout_seconds +pg_settings_checkpoint_warning_seconds +pg_settings_commit_delay +pg_settings_commit_siblings +pg_settings_cpu_index_tuple_cost +pg_settings_cpu_operator_cost +pg_settings_cpu_tuple_cost +pg_settings_cursor_tuple_fraction +pg_settings_data_checksums +pg_settings_data_sync_retry +pg_settings_db_user_namespace +pg_settings_deadlock_timeout_seconds +pg_settings_debug_assertions +pg_settings_debug_pretty_print +pg_settings_debug_print_parse +pg_settings_debug_print_plan +pg_settings_debug_print_rewritten +pg_settings_default_statistics_target +pg_settings_default_transaction_deferrable +pg_settings_default_transaction_read_only +pg_settings_default_with_oids +pg_settings_effective_cache_size_bytes +pg_settings_effective_io_concurrency +pg_settings_enable_bitmapscan +pg_settings_enable_gathermerge +pg_settings_enable_hashagg +pg_settings_enable_hashjoin +pg_settings_enable_indexonlyscan +pg_settings_enable_indexscan +pg_settings_enable_material +pg_settings_enable_mergejoin +pg_settings_enable_nestloop +pg_settings_enable_seqscan +pg_settings_enable_sort +pg_settings_enable_tidscan +pg_settings_escape_string_warning +pg_settings_exit_on_error +pg_settings_extra_float_digits +pg_settings_from_collapse_limit +pg_settings_fsync +pg_settings_full_page_writes +pg_settings_geqo +pg_settings_geqo_effort +pg_settings_geqo_generations +pg_settings_geqo_pool_size +pg_settings_geqo_seed +pg_settings_geqo_selection_bias +pg_settings_geqo_threshold +pg_settings_gin_fuzzy_search_limit +pg_settings_gin_pending_list_limit_bytes +pg_settings_hot_standby +pg_settings_hot_standby_feedback +pg_settings_idle_in_transaction_session_timeout_seconds +pg_settings_ignore_checksum_failure +pg_settings_ignore_system_indexes +pg_settings_integer_datetimes +pg_settings_join_collapse_limit +pg_settings_krb_caseins_users +pg_settings_lock_timeout_seconds +pg_settings_lo_compat_privileges +pg_settings_log_autovacuum_min_duration_seconds +pg_settings_log_checkpoints +pg_settings_log_connections +pg_settings_log_disconnections +pg_settings_log_duration +pg_settings_log_executor_stats +pg_settings_log_file_mode +pg_settings_logging_collector +pg_settings_log_hostname +pg_settings_log_lock_waits +pg_settings_log_min_duration_statement_seconds +pg_settings_log_parser_stats +pg_settings_log_planner_stats +pg_settings_log_replication_commands +pg_settings_log_rotation_age_seconds +pg_settings_log_rotation_size_bytes +pg_settings_log_statement_stats +pg_settings_log_temp_files_bytes +pg_settings_log_truncate_on_rotation +pg_settings_maintenance_work_mem_bytes +pg_settings_max_connections +pg_settings_max_files_per_process +pg_settings_max_function_args +pg_settings_max_identifier_length +pg_settings_max_index_keys +pg_settings_max_locks_per_transaction +pg_settings_max_logical_replication_workers +pg_settings_max_parallel_workers +pg_settings_max_parallel_workers_per_gather +pg_settings_max_pred_locks_per_page +pg_settings_max_pred_locks_per_relation +pg_settings_max_pred_locks_per_transaction +pg_settings_max_prepared_transactions +pg_settings_max_replication_slots +pg_settings_max_stack_depth_bytes +pg_settings_max_standby_archive_delay_seconds +pg_settings_max_standby_streaming_delay_seconds +pg_settings_max_sync_workers_per_subscription +pg_settings_max_wal_senders +pg_settings_max_wal_size_bytes +pg_settings_max_worker_processes +pg_settings_min_parallel_index_scan_size_bytes +pg_settings_min_parallel_table_scan_size_bytes +pg_settings_min_wal_size_bytes +pg_settings_old_snapshot_threshold_seconds +pg_settings_operator_precedence_warning +pg_settings_parallel_setup_cost +pg_settings_parallel_tuple_cost +pg_settings_port +pg_settings_post_auth_delay_seconds +pg_settings_pre_auth_delay_seconds +pg_settings_quote_all_identifiers +pg_settings_random_page_cost +pg_settings_replacement_sort_tuples +pg_settings_restart_after_crash +pg_settings_row_security +pg_settings_segment_size_bytes +pg_settings_seq_page_cost +pg_settings_server_version_num +pg_settings_shared_buffers_bytes +pg_settings_ssl +pg_settings_ssl_prefer_server_ciphers +pg_settings_standard_conforming_strings +pg_settings_statement_timeout_seconds +pg_settings_superuser_reserved_connections +pg_settings_synchronize_seqscans +pg_settings_syslog_sequence_numbers +pg_settings_syslog_split_messages +pg_settings_tcp_keepalives_count +pg_settings_tcp_keepalives_idle_seconds +pg_settings_tcp_keepalives_interval_seconds +pg_settings_temp_buffers_bytes +pg_settings_temp_file_limit_bytes +pg_settings_trace_notify +pg_settings_trace_sort +pg_settings_track_activities +pg_settings_track_activity_query_size +pg_settings_track_commit_timestamp +pg_settings_track_counts +pg_settings_track_io_timing +pg_settings_transaction_deferrable +pg_settings_transaction_read_only +pg_settings_transform_null_equals +pg_settings_unix_socket_permissions +pg_settings_update_process_title +pg_settings_vacuum_cost_delay_seconds +pg_settings_vacuum_cost_limit +pg_settings_vacuum_cost_page_dirty +pg_settings_vacuum_cost_page_hit +pg_settings_vacuum_cost_page_miss +pg_settings_vacuum_defer_cleanup_age +pg_settings_vacuum_freeze_min_age +pg_settings_vacuum_freeze_table_age +pg_settings_vacuum_multixact_freeze_min_age +pg_settings_vacuum_multixact_freeze_table_age +pg_settings_wal_block_size +pg_settings_wal_buffers_bytes +pg_settings_wal_compression +pg_settings_wal_keep_segments +pg_settings_wal_log_hints +pg_settings_wal_receiver_status_interval_seconds +pg_settings_wal_receiver_timeout_seconds +pg_settings_wal_retrieve_retry_interval_seconds +pg_settings_wal_segment_size_bytes +pg_settings_wal_sender_timeout_seconds +pg_settings_wal_writer_delay_seconds +pg_settings_wal_writer_flush_after_bytes +pg_settings_work_mem_bytes +pg_settings_zero_damaged_pages +pg_stat_activity_count +pg_stat_activity_max_tx_duration +pg_stat_archiver_archived_count +pg_stat_archiver_failed_count +pg_stat_archiver_last_archive_age +pg_stat_bgwriter_buffers_alloc +pg_stat_bgwriter_buffers_backend +pg_stat_bgwriter_buffers_backend_fsync +pg_stat_bgwriter_buffers_checkpoint +pg_stat_bgwriter_buffers_clean +pg_stat_bgwriter_checkpoints_req +pg_stat_bgwriter_checkpoints_timed +pg_stat_bgwriter_checkpoint_sync_time +pg_stat_bgwriter_checkpoint_write_time +pg_stat_bgwriter_maxwritten_clean +pg_stat_bgwriter_stats_reset +pg_stat_database_blk_read_time +pg_stat_database_blks_hit +pg_stat_database_blks_read +pg_stat_database_blk_write_time +pg_stat_database_conflicts +pg_stat_database_conflicts_confl_bufferpin +pg_stat_database_conflicts_confl_deadlock +pg_stat_database_conflicts_confl_lock +pg_stat_database_conflicts_confl_snapshot +pg_stat_database_conflicts_confl_tablespace +pg_stat_database_deadlocks +pg_stat_database_numbackends +pg_stat_database_stats_reset +pg_stat_database_temp_bytes +pg_stat_database_temp_files +pg_stat_database_tup_deleted +pg_stat_database_tup_fetched +pg_stat_database_tup_inserted +pg_stat_database_tup_returned +pg_stat_database_tup_updated +pg_stat_database_xact_commit +pg_stat_database_xact_rollback +pg_static +pg_up +postgres_exporter_build_info +process_cpu_seconds_total +process_max_fds +process_open_fds +process_resident_memory_bytes +process_start_time_seconds +process_virtual_memory_bytes +process_virtual_memory_max_bytes +promhttp_metric_handler_requests_in_flight +promhttp_metric_handler_requests_total diff --git a/metriclists/.metrics.single.11.prom.unique b/metriclists/.metrics.single.11.prom.unique new file mode 100644 index 000000000..da7280ed8 --- /dev/null +++ b/metriclists/.metrics.single.11.prom.unique @@ -0,0 +1,297 @@ +go_gc_duration_seconds +go_gc_duration_seconds_count +go_gc_duration_seconds_sum +go_goroutines +go_info +go_memstats_alloc_bytes +go_memstats_alloc_bytes_total +go_memstats_buck_hash_sys_bytes +go_memstats_frees_total +go_memstats_gc_cpu_fraction +go_memstats_gc_sys_bytes +go_memstats_heap_alloc_bytes +go_memstats_heap_idle_bytes +go_memstats_heap_inuse_bytes +go_memstats_heap_objects +go_memstats_heap_released_bytes +go_memstats_heap_sys_bytes +go_memstats_last_gc_time_seconds +go_memstats_lookups_total +go_memstats_mallocs_total +go_memstats_mcache_inuse_bytes +go_memstats_mcache_sys_bytes +go_memstats_mspan_inuse_bytes +go_memstats_mspan_sys_bytes +go_memstats_next_gc_bytes +go_memstats_other_sys_bytes +go_memstats_stack_inuse_bytes +go_memstats_stack_sys_bytes +go_memstats_sys_bytes +go_threads +pg_exporter_last_scrape_duration_seconds +pg_exporter_last_scrape_error +pg_exporter_scrapes_total +pg_locks_count +pg_settings_allow_system_table_mods +pg_settings_archive_timeout_seconds +pg_settings_array_nulls +pg_settings_authentication_timeout_seconds +pg_settings_autovacuum +pg_settings_autovacuum_analyze_scale_factor +pg_settings_autovacuum_analyze_threshold +pg_settings_autovacuum_freeze_max_age +pg_settings_autovacuum_max_workers +pg_settings_autovacuum_multixact_freeze_max_age +pg_settings_autovacuum_naptime_seconds +pg_settings_autovacuum_vacuum_cost_delay_seconds +pg_settings_autovacuum_vacuum_cost_limit +pg_settings_autovacuum_vacuum_scale_factor +pg_settings_autovacuum_vacuum_threshold +pg_settings_autovacuum_work_mem_bytes +pg_settings_backend_flush_after_bytes +pg_settings_bgwriter_delay_seconds +pg_settings_bgwriter_flush_after_bytes +pg_settings_bgwriter_lru_maxpages +pg_settings_bgwriter_lru_multiplier +pg_settings_block_size +pg_settings_bonjour +pg_settings_check_function_bodies +pg_settings_checkpoint_completion_target +pg_settings_checkpoint_flush_after_bytes +pg_settings_checkpoint_timeout_seconds +pg_settings_checkpoint_warning_seconds +pg_settings_commit_delay +pg_settings_commit_siblings +pg_settings_cpu_index_tuple_cost +pg_settings_cpu_operator_cost +pg_settings_cpu_tuple_cost +pg_settings_cursor_tuple_fraction +pg_settings_data_checksums +pg_settings_data_directory_mode +pg_settings_data_sync_retry +pg_settings_db_user_namespace +pg_settings_deadlock_timeout_seconds +pg_settings_debug_assertions +pg_settings_debug_pretty_print +pg_settings_debug_print_parse +pg_settings_debug_print_plan +pg_settings_debug_print_rewritten +pg_settings_default_statistics_target +pg_settings_default_transaction_deferrable +pg_settings_default_transaction_read_only +pg_settings_default_with_oids +pg_settings_effective_cache_size_bytes +pg_settings_effective_io_concurrency +pg_settings_enable_bitmapscan +pg_settings_enable_gathermerge +pg_settings_enable_hashagg +pg_settings_enable_hashjoin +pg_settings_enable_indexonlyscan +pg_settings_enable_indexscan +pg_settings_enable_material +pg_settings_enable_mergejoin +pg_settings_enable_nestloop +pg_settings_enable_parallel_append +pg_settings_enable_parallel_hash +pg_settings_enable_partition_pruning +pg_settings_enable_partitionwise_aggregate +pg_settings_enable_partitionwise_join +pg_settings_enable_seqscan +pg_settings_enable_sort +pg_settings_enable_tidscan +pg_settings_escape_string_warning +pg_settings_exit_on_error +pg_settings_extra_float_digits +pg_settings_from_collapse_limit +pg_settings_fsync +pg_settings_full_page_writes +pg_settings_geqo +pg_settings_geqo_effort +pg_settings_geqo_generations +pg_settings_geqo_pool_size +pg_settings_geqo_seed +pg_settings_geqo_selection_bias +pg_settings_geqo_threshold +pg_settings_gin_fuzzy_search_limit +pg_settings_gin_pending_list_limit_bytes +pg_settings_hot_standby +pg_settings_hot_standby_feedback +pg_settings_idle_in_transaction_session_timeout_seconds +pg_settings_ignore_checksum_failure +pg_settings_ignore_system_indexes +pg_settings_integer_datetimes +pg_settings_jit +pg_settings_jit_above_cost +pg_settings_jit_debugging_support +pg_settings_jit_dump_bitcode +pg_settings_jit_expressions +pg_settings_jit_inline_above_cost +pg_settings_jit_optimize_above_cost +pg_settings_jit_profiling_support +pg_settings_jit_tuple_deforming +pg_settings_join_collapse_limit +pg_settings_krb_caseins_users +pg_settings_lock_timeout_seconds +pg_settings_lo_compat_privileges +pg_settings_log_autovacuum_min_duration_seconds +pg_settings_log_checkpoints +pg_settings_log_connections +pg_settings_log_disconnections +pg_settings_log_duration +pg_settings_log_executor_stats +pg_settings_log_file_mode +pg_settings_logging_collector +pg_settings_log_hostname +pg_settings_log_lock_waits +pg_settings_log_min_duration_statement_seconds +pg_settings_log_parser_stats +pg_settings_log_planner_stats +pg_settings_log_replication_commands +pg_settings_log_rotation_age_seconds +pg_settings_log_rotation_size_bytes +pg_settings_log_statement_stats +pg_settings_log_temp_files_bytes +pg_settings_log_truncate_on_rotation +pg_settings_maintenance_work_mem_bytes +pg_settings_max_connections +pg_settings_max_files_per_process +pg_settings_max_function_args +pg_settings_max_identifier_length +pg_settings_max_index_keys +pg_settings_max_locks_per_transaction +pg_settings_max_logical_replication_workers +pg_settings_max_parallel_maintenance_workers +pg_settings_max_parallel_workers +pg_settings_max_parallel_workers_per_gather +pg_settings_max_pred_locks_per_page +pg_settings_max_pred_locks_per_relation +pg_settings_max_pred_locks_per_transaction +pg_settings_max_prepared_transactions +pg_settings_max_replication_slots +pg_settings_max_stack_depth_bytes +pg_settings_max_standby_archive_delay_seconds +pg_settings_max_standby_streaming_delay_seconds +pg_settings_max_sync_workers_per_subscription +pg_settings_max_wal_senders +pg_settings_max_wal_size_bytes +pg_settings_max_worker_processes +pg_settings_min_parallel_index_scan_size_bytes +pg_settings_min_parallel_table_scan_size_bytes +pg_settings_min_wal_size_bytes +pg_settings_old_snapshot_threshold_seconds +pg_settings_operator_precedence_warning +pg_settings_parallel_leader_participation +pg_settings_parallel_setup_cost +pg_settings_parallel_tuple_cost +pg_settings_port +pg_settings_post_auth_delay_seconds +pg_settings_pre_auth_delay_seconds +pg_settings_quote_all_identifiers +pg_settings_random_page_cost +pg_settings_restart_after_crash +pg_settings_row_security +pg_settings_segment_size_bytes +pg_settings_seq_page_cost +pg_settings_server_version_num +pg_settings_shared_buffers_bytes +pg_settings_ssl +pg_settings_ssl_passphrase_command_supports_reload +pg_settings_ssl_prefer_server_ciphers +pg_settings_standard_conforming_strings +pg_settings_statement_timeout_seconds +pg_settings_superuser_reserved_connections +pg_settings_synchronize_seqscans +pg_settings_syslog_sequence_numbers +pg_settings_syslog_split_messages +pg_settings_tcp_keepalives_count +pg_settings_tcp_keepalives_idle_seconds +pg_settings_tcp_keepalives_interval_seconds +pg_settings_temp_buffers_bytes +pg_settings_temp_file_limit_bytes +pg_settings_trace_notify +pg_settings_trace_sort +pg_settings_track_activities +pg_settings_track_activity_query_size_bytes +pg_settings_track_commit_timestamp +pg_settings_track_counts +pg_settings_track_io_timing +pg_settings_transaction_deferrable +pg_settings_transaction_read_only +pg_settings_transform_null_equals +pg_settings_unix_socket_permissions +pg_settings_update_process_title +pg_settings_vacuum_cleanup_index_scale_factor +pg_settings_vacuum_cost_delay_seconds +pg_settings_vacuum_cost_limit +pg_settings_vacuum_cost_page_dirty +pg_settings_vacuum_cost_page_hit +pg_settings_vacuum_cost_page_miss +pg_settings_vacuum_defer_cleanup_age +pg_settings_vacuum_freeze_min_age +pg_settings_vacuum_freeze_table_age +pg_settings_vacuum_multixact_freeze_min_age +pg_settings_vacuum_multixact_freeze_table_age +pg_settings_wal_block_size +pg_settings_wal_buffers_bytes +pg_settings_wal_compression +pg_settings_wal_keep_segments +pg_settings_wal_log_hints +pg_settings_wal_receiver_status_interval_seconds +pg_settings_wal_receiver_timeout_seconds +pg_settings_wal_retrieve_retry_interval_seconds +pg_settings_wal_segment_size_bytes +pg_settings_wal_sender_timeout_seconds +pg_settings_wal_writer_delay_seconds +pg_settings_wal_writer_flush_after_bytes +pg_settings_work_mem_bytes +pg_settings_zero_damaged_pages +pg_stat_activity_count +pg_stat_activity_max_tx_duration +pg_stat_archiver_archived_count +pg_stat_archiver_failed_count +pg_stat_archiver_last_archive_age +pg_stat_bgwriter_buffers_alloc +pg_stat_bgwriter_buffers_backend +pg_stat_bgwriter_buffers_backend_fsync +pg_stat_bgwriter_buffers_checkpoint +pg_stat_bgwriter_buffers_clean +pg_stat_bgwriter_checkpoints_req +pg_stat_bgwriter_checkpoints_timed +pg_stat_bgwriter_checkpoint_sync_time +pg_stat_bgwriter_checkpoint_write_time +pg_stat_bgwriter_maxwritten_clean +pg_stat_bgwriter_stats_reset +pg_stat_database_blk_read_time +pg_stat_database_blks_hit +pg_stat_database_blks_read +pg_stat_database_blk_write_time +pg_stat_database_conflicts +pg_stat_database_conflicts_confl_bufferpin +pg_stat_database_conflicts_confl_deadlock +pg_stat_database_conflicts_confl_lock +pg_stat_database_conflicts_confl_snapshot +pg_stat_database_conflicts_confl_tablespace +pg_stat_database_deadlocks +pg_stat_database_numbackends +pg_stat_database_stats_reset +pg_stat_database_temp_bytes +pg_stat_database_temp_files +pg_stat_database_tup_deleted +pg_stat_database_tup_fetched +pg_stat_database_tup_inserted +pg_stat_database_tup_returned +pg_stat_database_tup_updated +pg_stat_database_xact_commit +pg_stat_database_xact_rollback +pg_static +pg_up +postgres_exporter_build_info +process_cpu_seconds_total +process_max_fds +process_open_fds +process_resident_memory_bytes +process_start_time_seconds +process_virtual_memory_bytes +process_virtual_memory_max_bytes +promhttp_metric_handler_requests_in_flight +promhttp_metric_handler_requests_total diff --git a/metriclists/.metrics.single.9.1.prom.unique b/metriclists/.metrics.single.9.1.prom.unique new file mode 100644 index 000000000..bf250e2ff --- /dev/null +++ b/metriclists/.metrics.single.9.1.prom.unique @@ -0,0 +1,227 @@ +go_gc_duration_seconds +go_gc_duration_seconds_count +go_gc_duration_seconds_sum +go_goroutines +go_info +go_memstats_alloc_bytes +go_memstats_alloc_bytes_total +go_memstats_buck_hash_sys_bytes +go_memstats_frees_total +go_memstats_gc_cpu_fraction +go_memstats_gc_sys_bytes +go_memstats_heap_alloc_bytes +go_memstats_heap_idle_bytes +go_memstats_heap_inuse_bytes +go_memstats_heap_objects +go_memstats_heap_released_bytes +go_memstats_heap_sys_bytes +go_memstats_last_gc_time_seconds +go_memstats_lookups_total +go_memstats_mallocs_total +go_memstats_mcache_inuse_bytes +go_memstats_mcache_sys_bytes +go_memstats_mspan_inuse_bytes +go_memstats_mspan_sys_bytes +go_memstats_next_gc_bytes +go_memstats_other_sys_bytes +go_memstats_stack_inuse_bytes +go_memstats_stack_sys_bytes +go_memstats_sys_bytes +go_threads +pg_exporter_last_scrape_duration_seconds +pg_exporter_last_scrape_error +pg_exporter_scrapes_total +pg_locks_count +pg_settings_allow_system_table_mods +pg_settings_archive_mode +pg_settings_archive_timeout_seconds +pg_settings_array_nulls +pg_settings_authentication_timeout_seconds +pg_settings_autovacuum +pg_settings_autovacuum_analyze_scale_factor +pg_settings_autovacuum_analyze_threshold +pg_settings_autovacuum_freeze_max_age +pg_settings_autovacuum_max_workers +pg_settings_autovacuum_naptime_seconds +pg_settings_autovacuum_vacuum_cost_delay_seconds +pg_settings_autovacuum_vacuum_cost_limit +pg_settings_autovacuum_vacuum_scale_factor +pg_settings_autovacuum_vacuum_threshold +pg_settings_bgwriter_delay_seconds +pg_settings_bgwriter_lru_maxpages +pg_settings_bgwriter_lru_multiplier +pg_settings_block_size +pg_settings_bonjour +pg_settings_check_function_bodies +pg_settings_checkpoint_completion_target +pg_settings_checkpoint_segments +pg_settings_checkpoint_timeout_seconds +pg_settings_checkpoint_warning_seconds +pg_settings_commit_delay +pg_settings_commit_siblings +pg_settings_cpu_index_tuple_cost +pg_settings_cpu_operator_cost +pg_settings_cpu_tuple_cost +pg_settings_cursor_tuple_fraction +pg_settings_db_user_namespace +pg_settings_deadlock_timeout_seconds +pg_settings_debug_assertions +pg_settings_debug_pretty_print +pg_settings_debug_print_parse +pg_settings_debug_print_plan +pg_settings_debug_print_rewritten +pg_settings_default_statistics_target +pg_settings_default_transaction_deferrable +pg_settings_default_transaction_read_only +pg_settings_default_with_oids +pg_settings_effective_cache_size_bytes +pg_settings_effective_io_concurrency +pg_settings_enable_bitmapscan +pg_settings_enable_hashagg +pg_settings_enable_hashjoin +pg_settings_enable_indexscan +pg_settings_enable_material +pg_settings_enable_mergejoin +pg_settings_enable_nestloop +pg_settings_enable_seqscan +pg_settings_enable_sort +pg_settings_enable_tidscan +pg_settings_escape_string_warning +pg_settings_exit_on_error +pg_settings_extra_float_digits +pg_settings_from_collapse_limit +pg_settings_fsync +pg_settings_full_page_writes +pg_settings_geqo +pg_settings_geqo_effort +pg_settings_geqo_generations +pg_settings_geqo_pool_size +pg_settings_geqo_seed +pg_settings_geqo_selection_bias +pg_settings_geqo_threshold +pg_settings_gin_fuzzy_search_limit +pg_settings_hot_standby +pg_settings_hot_standby_feedback +pg_settings_ignore_system_indexes +pg_settings_integer_datetimes +pg_settings_join_collapse_limit +pg_settings_krb_caseins_users +pg_settings_lo_compat_privileges +pg_settings_log_autovacuum_min_duration_seconds +pg_settings_log_checkpoints +pg_settings_log_connections +pg_settings_log_disconnections +pg_settings_log_duration +pg_settings_log_executor_stats +pg_settings_log_file_mode +pg_settings_logging_collector +pg_settings_log_hostname +pg_settings_log_lock_waits +pg_settings_log_min_duration_statement_seconds +pg_settings_log_parser_stats +pg_settings_log_planner_stats +pg_settings_log_rotation_age_seconds +pg_settings_log_rotation_size_bytes +pg_settings_log_statement_stats +pg_settings_log_temp_files_bytes +pg_settings_log_truncate_on_rotation +pg_settings_maintenance_work_mem_bytes +pg_settings_max_connections +pg_settings_max_files_per_process +pg_settings_max_function_args +pg_settings_max_identifier_length +pg_settings_max_index_keys +pg_settings_max_locks_per_transaction +pg_settings_max_pred_locks_per_transaction +pg_settings_max_prepared_transactions +pg_settings_max_stack_depth_bytes +pg_settings_max_standby_archive_delay_seconds +pg_settings_max_standby_streaming_delay_seconds +pg_settings_max_wal_senders +pg_settings_password_encryption +pg_settings_port +pg_settings_post_auth_delay_seconds +pg_settings_pre_auth_delay_seconds +pg_settings_quote_all_identifiers +pg_settings_random_page_cost +pg_settings_replication_timeout_seconds +pg_settings_restart_after_crash +pg_settings_segment_size_bytes +pg_settings_seq_page_cost +pg_settings_server_version_num +pg_settings_shared_buffers_bytes +pg_settings_silent_mode +pg_settings_sql_inheritance +pg_settings_ssl +pg_settings_ssl_renegotiation_limit_bytes +pg_settings_standard_conforming_strings +pg_settings_statement_timeout_seconds +pg_settings_superuser_reserved_connections +pg_settings_synchronize_seqscans +pg_settings_tcp_keepalives_count +pg_settings_tcp_keepalives_idle_seconds +pg_settings_tcp_keepalives_interval_seconds +pg_settings_temp_buffers_bytes +pg_settings_trace_notify +pg_settings_trace_sort +pg_settings_track_activities +pg_settings_track_activity_query_size +pg_settings_track_counts +pg_settings_transaction_deferrable +pg_settings_transaction_read_only +pg_settings_transform_null_equals +pg_settings_unix_socket_permissions +pg_settings_update_process_title +pg_settings_vacuum_cost_delay_seconds +pg_settings_vacuum_cost_limit +pg_settings_vacuum_cost_page_dirty +pg_settings_vacuum_cost_page_hit +pg_settings_vacuum_cost_page_miss +pg_settings_vacuum_defer_cleanup_age +pg_settings_vacuum_freeze_min_age +pg_settings_vacuum_freeze_table_age +pg_settings_wal_block_size +pg_settings_wal_buffers_bytes +pg_settings_wal_keep_segments +pg_settings_wal_receiver_status_interval_seconds +pg_settings_wal_segment_size_bytes +pg_settings_wal_sender_delay_seconds +pg_settings_wal_writer_delay_seconds +pg_settings_work_mem_bytes +pg_settings_zero_damaged_pages +pg_stat_activity_count +pg_stat_activity_max_tx_duration +pg_stat_bgwriter_buffers_alloc +pg_stat_bgwriter_buffers_backend +pg_stat_bgwriter_buffers_backend_fsync +pg_stat_bgwriter_buffers_checkpoint +pg_stat_bgwriter_buffers_clean +pg_stat_bgwriter_checkpoints_req +pg_stat_bgwriter_checkpoints_timed +pg_stat_bgwriter_maxwritten_clean +pg_stat_bgwriter_stats_reset +pg_stat_database_blks_hit +pg_stat_database_blks_read +pg_stat_database_conflicts +pg_stat_database_conflicts_confl_bufferpin +pg_stat_database_conflicts_confl_deadlock +pg_stat_database_conflicts_confl_lock +pg_stat_database_conflicts_confl_snapshot +pg_stat_database_conflicts_confl_tablespace +pg_stat_database_numbackends +pg_stat_database_stats_reset +pg_stat_database_tup_deleted +pg_stat_database_tup_fetched +pg_stat_database_tup_inserted +pg_stat_database_tup_returned +pg_stat_database_tup_updated +pg_stat_database_xact_commit +pg_stat_database_xact_rollback +pg_static +pg_up +process_cpu_seconds_total +process_max_fds +process_open_fds +process_resident_memory_bytes +process_start_time_seconds +process_virtual_memory_bytes diff --git a/metriclists/.metrics.single.9.2.prom.unique b/metriclists/.metrics.single.9.2.prom.unique new file mode 100644 index 000000000..f3ea218bb --- /dev/null +++ b/metriclists/.metrics.single.9.2.prom.unique @@ -0,0 +1,235 @@ +go_gc_duration_seconds +go_gc_duration_seconds_count +go_gc_duration_seconds_sum +go_goroutines +go_info +go_memstats_alloc_bytes +go_memstats_alloc_bytes_total +go_memstats_buck_hash_sys_bytes +go_memstats_frees_total +go_memstats_gc_cpu_fraction +go_memstats_gc_sys_bytes +go_memstats_heap_alloc_bytes +go_memstats_heap_idle_bytes +go_memstats_heap_inuse_bytes +go_memstats_heap_objects +go_memstats_heap_released_bytes +go_memstats_heap_sys_bytes +go_memstats_last_gc_time_seconds +go_memstats_lookups_total +go_memstats_mallocs_total +go_memstats_mcache_inuse_bytes +go_memstats_mcache_sys_bytes +go_memstats_mspan_inuse_bytes +go_memstats_mspan_sys_bytes +go_memstats_next_gc_bytes +go_memstats_other_sys_bytes +go_memstats_stack_inuse_bytes +go_memstats_stack_sys_bytes +go_memstats_sys_bytes +go_threads +pg_exporter_last_scrape_duration_seconds +pg_exporter_last_scrape_error +pg_exporter_scrapes_total +pg_locks_count +pg_settings_allow_system_table_mods +pg_settings_archive_mode +pg_settings_archive_timeout_seconds +pg_settings_array_nulls +pg_settings_authentication_timeout_seconds +pg_settings_autovacuum +pg_settings_autovacuum_analyze_scale_factor +pg_settings_autovacuum_analyze_threshold +pg_settings_autovacuum_freeze_max_age +pg_settings_autovacuum_max_workers +pg_settings_autovacuum_naptime_seconds +pg_settings_autovacuum_vacuum_cost_delay_seconds +pg_settings_autovacuum_vacuum_cost_limit +pg_settings_autovacuum_vacuum_scale_factor +pg_settings_autovacuum_vacuum_threshold +pg_settings_bgwriter_delay_seconds +pg_settings_bgwriter_lru_maxpages +pg_settings_bgwriter_lru_multiplier +pg_settings_block_size +pg_settings_bonjour +pg_settings_check_function_bodies +pg_settings_checkpoint_completion_target +pg_settings_checkpoint_segments +pg_settings_checkpoint_timeout_seconds +pg_settings_checkpoint_warning_seconds +pg_settings_commit_delay +pg_settings_commit_siblings +pg_settings_cpu_index_tuple_cost +pg_settings_cpu_operator_cost +pg_settings_cpu_tuple_cost +pg_settings_cursor_tuple_fraction +pg_settings_db_user_namespace +pg_settings_deadlock_timeout_seconds +pg_settings_debug_assertions +pg_settings_debug_pretty_print +pg_settings_debug_print_parse +pg_settings_debug_print_plan +pg_settings_debug_print_rewritten +pg_settings_default_statistics_target +pg_settings_default_transaction_deferrable +pg_settings_default_transaction_read_only +pg_settings_default_with_oids +pg_settings_effective_cache_size_bytes +pg_settings_effective_io_concurrency +pg_settings_enable_bitmapscan +pg_settings_enable_hashagg +pg_settings_enable_hashjoin +pg_settings_enable_indexonlyscan +pg_settings_enable_indexscan +pg_settings_enable_material +pg_settings_enable_mergejoin +pg_settings_enable_nestloop +pg_settings_enable_seqscan +pg_settings_enable_sort +pg_settings_enable_tidscan +pg_settings_escape_string_warning +pg_settings_exit_on_error +pg_settings_extra_float_digits +pg_settings_from_collapse_limit +pg_settings_fsync +pg_settings_full_page_writes +pg_settings_geqo +pg_settings_geqo_effort +pg_settings_geqo_generations +pg_settings_geqo_pool_size +pg_settings_geqo_seed +pg_settings_geqo_selection_bias +pg_settings_geqo_threshold +pg_settings_gin_fuzzy_search_limit +pg_settings_hot_standby +pg_settings_hot_standby_feedback +pg_settings_ignore_system_indexes +pg_settings_integer_datetimes +pg_settings_join_collapse_limit +pg_settings_krb_caseins_users +pg_settings_lo_compat_privileges +pg_settings_log_autovacuum_min_duration_seconds +pg_settings_log_checkpoints +pg_settings_log_connections +pg_settings_log_disconnections +pg_settings_log_duration +pg_settings_log_executor_stats +pg_settings_log_file_mode +pg_settings_logging_collector +pg_settings_log_hostname +pg_settings_log_lock_waits +pg_settings_log_min_duration_statement_seconds +pg_settings_log_parser_stats +pg_settings_log_planner_stats +pg_settings_log_rotation_age_seconds +pg_settings_log_rotation_size_bytes +pg_settings_log_statement_stats +pg_settings_log_temp_files_bytes +pg_settings_log_truncate_on_rotation +pg_settings_maintenance_work_mem_bytes +pg_settings_max_connections +pg_settings_max_files_per_process +pg_settings_max_function_args +pg_settings_max_identifier_length +pg_settings_max_index_keys +pg_settings_max_locks_per_transaction +pg_settings_max_pred_locks_per_transaction +pg_settings_max_prepared_transactions +pg_settings_max_stack_depth_bytes +pg_settings_max_standby_archive_delay_seconds +pg_settings_max_standby_streaming_delay_seconds +pg_settings_max_wal_senders +pg_settings_password_encryption +pg_settings_port +pg_settings_post_auth_delay_seconds +pg_settings_pre_auth_delay_seconds +pg_settings_quote_all_identifiers +pg_settings_random_page_cost +pg_settings_replication_timeout_seconds +pg_settings_restart_after_crash +pg_settings_segment_size_bytes +pg_settings_seq_page_cost +pg_settings_server_version_num +pg_settings_shared_buffers_bytes +pg_settings_sql_inheritance +pg_settings_ssl +pg_settings_ssl_renegotiation_limit_bytes +pg_settings_standard_conforming_strings +pg_settings_statement_timeout_seconds +pg_settings_superuser_reserved_connections +pg_settings_synchronize_seqscans +pg_settings_tcp_keepalives_count +pg_settings_tcp_keepalives_idle_seconds +pg_settings_tcp_keepalives_interval_seconds +pg_settings_temp_buffers_bytes +pg_settings_temp_file_limit_bytes +pg_settings_trace_notify +pg_settings_trace_sort +pg_settings_track_activities +pg_settings_track_activity_query_size +pg_settings_track_counts +pg_settings_track_io_timing +pg_settings_transaction_deferrable +pg_settings_transaction_read_only +pg_settings_transform_null_equals +pg_settings_unix_socket_permissions +pg_settings_update_process_title +pg_settings_vacuum_cost_delay_seconds +pg_settings_vacuum_cost_limit +pg_settings_vacuum_cost_page_dirty +pg_settings_vacuum_cost_page_hit +pg_settings_vacuum_cost_page_miss +pg_settings_vacuum_defer_cleanup_age +pg_settings_vacuum_freeze_min_age +pg_settings_vacuum_freeze_table_age +pg_settings_wal_block_size +pg_settings_wal_buffers_bytes +pg_settings_wal_keep_segments +pg_settings_wal_receiver_status_interval_seconds +pg_settings_wal_segment_size_bytes +pg_settings_wal_writer_delay_seconds +pg_settings_work_mem_bytes +pg_settings_zero_damaged_pages +pg_stat_activity_count +pg_stat_activity_max_tx_duration +pg_stat_bgwriter_buffers_alloc +pg_stat_bgwriter_buffers_backend +pg_stat_bgwriter_buffers_backend_fsync +pg_stat_bgwriter_buffers_checkpoint +pg_stat_bgwriter_buffers_clean +pg_stat_bgwriter_checkpoints_req +pg_stat_bgwriter_checkpoints_timed +pg_stat_bgwriter_checkpoint_sync_time +pg_stat_bgwriter_checkpoint_write_time +pg_stat_bgwriter_maxwritten_clean +pg_stat_bgwriter_stats_reset +pg_stat_database_blk_read_time +pg_stat_database_blks_hit +pg_stat_database_blks_read +pg_stat_database_blk_write_time +pg_stat_database_conflicts +pg_stat_database_conflicts_confl_bufferpin +pg_stat_database_conflicts_confl_deadlock +pg_stat_database_conflicts_confl_lock +pg_stat_database_conflicts_confl_snapshot +pg_stat_database_conflicts_confl_tablespace +pg_stat_database_deadlocks +pg_stat_database_numbackends +pg_stat_database_stats_reset +pg_stat_database_temp_bytes +pg_stat_database_temp_files +pg_stat_database_tup_deleted +pg_stat_database_tup_fetched +pg_stat_database_tup_inserted +pg_stat_database_tup_returned +pg_stat_database_tup_updated +pg_stat_database_xact_commit +pg_stat_database_xact_rollback +pg_static +pg_up +process_cpu_seconds_total +process_max_fds +process_open_fds +process_resident_memory_bytes +process_start_time_seconds +process_virtual_memory_bytes diff --git a/metriclists/.metrics.single.9.3.prom.unique b/metriclists/.metrics.single.9.3.prom.unique new file mode 100644 index 000000000..35d39a262 --- /dev/null +++ b/metriclists/.metrics.single.9.3.prom.unique @@ -0,0 +1,242 @@ +go_gc_duration_seconds +go_gc_duration_seconds_count +go_gc_duration_seconds_sum +go_goroutines +go_info +go_memstats_alloc_bytes +go_memstats_alloc_bytes_total +go_memstats_buck_hash_sys_bytes +go_memstats_frees_total +go_memstats_gc_cpu_fraction +go_memstats_gc_sys_bytes +go_memstats_heap_alloc_bytes +go_memstats_heap_idle_bytes +go_memstats_heap_inuse_bytes +go_memstats_heap_objects +go_memstats_heap_released_bytes +go_memstats_heap_sys_bytes +go_memstats_last_gc_time_seconds +go_memstats_lookups_total +go_memstats_mallocs_total +go_memstats_mcache_inuse_bytes +go_memstats_mcache_sys_bytes +go_memstats_mspan_inuse_bytes +go_memstats_mspan_sys_bytes +go_memstats_next_gc_bytes +go_memstats_other_sys_bytes +go_memstats_stack_inuse_bytes +go_memstats_stack_sys_bytes +go_memstats_sys_bytes +go_threads +pg_exporter_last_scrape_duration_seconds +pg_exporter_last_scrape_error +pg_exporter_scrapes_total +pg_locks_count +pg_settings_allow_system_table_mods +pg_settings_archive_mode +pg_settings_archive_timeout_seconds +pg_settings_array_nulls +pg_settings_authentication_timeout_seconds +pg_settings_autovacuum +pg_settings_autovacuum_analyze_scale_factor +pg_settings_autovacuum_analyze_threshold +pg_settings_autovacuum_freeze_max_age +pg_settings_autovacuum_max_workers +pg_settings_autovacuum_multixact_freeze_max_age +pg_settings_autovacuum_naptime_seconds +pg_settings_autovacuum_vacuum_cost_delay_seconds +pg_settings_autovacuum_vacuum_cost_limit +pg_settings_autovacuum_vacuum_scale_factor +pg_settings_autovacuum_vacuum_threshold +pg_settings_bgwriter_delay_seconds +pg_settings_bgwriter_lru_maxpages +pg_settings_bgwriter_lru_multiplier +pg_settings_block_size +pg_settings_bonjour +pg_settings_check_function_bodies +pg_settings_checkpoint_completion_target +pg_settings_checkpoint_segments +pg_settings_checkpoint_timeout_seconds +pg_settings_checkpoint_warning_seconds +pg_settings_commit_delay +pg_settings_commit_siblings +pg_settings_cpu_index_tuple_cost +pg_settings_cpu_operator_cost +pg_settings_cpu_tuple_cost +pg_settings_cursor_tuple_fraction +pg_settings_data_checksums +pg_settings_db_user_namespace +pg_settings_deadlock_timeout_seconds +pg_settings_debug_assertions +pg_settings_debug_pretty_print +pg_settings_debug_print_parse +pg_settings_debug_print_plan +pg_settings_debug_print_rewritten +pg_settings_default_statistics_target +pg_settings_default_transaction_deferrable +pg_settings_default_transaction_read_only +pg_settings_default_with_oids +pg_settings_effective_cache_size_bytes +pg_settings_effective_io_concurrency +pg_settings_enable_bitmapscan +pg_settings_enable_hashagg +pg_settings_enable_hashjoin +pg_settings_enable_indexonlyscan +pg_settings_enable_indexscan +pg_settings_enable_material +pg_settings_enable_mergejoin +pg_settings_enable_nestloop +pg_settings_enable_seqscan +pg_settings_enable_sort +pg_settings_enable_tidscan +pg_settings_escape_string_warning +pg_settings_exit_on_error +pg_settings_extra_float_digits +pg_settings_from_collapse_limit +pg_settings_fsync +pg_settings_full_page_writes +pg_settings_geqo +pg_settings_geqo_effort +pg_settings_geqo_generations +pg_settings_geqo_pool_size +pg_settings_geqo_seed +pg_settings_geqo_selection_bias +pg_settings_geqo_threshold +pg_settings_gin_fuzzy_search_limit +pg_settings_hot_standby +pg_settings_hot_standby_feedback +pg_settings_ignore_checksum_failure +pg_settings_ignore_system_indexes +pg_settings_integer_datetimes +pg_settings_join_collapse_limit +pg_settings_krb_caseins_users +pg_settings_lock_timeout_seconds +pg_settings_lo_compat_privileges +pg_settings_log_autovacuum_min_duration_seconds +pg_settings_log_checkpoints +pg_settings_log_connections +pg_settings_log_disconnections +pg_settings_log_duration +pg_settings_log_executor_stats +pg_settings_log_file_mode +pg_settings_logging_collector +pg_settings_log_hostname +pg_settings_log_lock_waits +pg_settings_log_min_duration_statement_seconds +pg_settings_log_parser_stats +pg_settings_log_planner_stats +pg_settings_log_rotation_age_seconds +pg_settings_log_rotation_size_bytes +pg_settings_log_statement_stats +pg_settings_log_temp_files_bytes +pg_settings_log_truncate_on_rotation +pg_settings_maintenance_work_mem_bytes +pg_settings_max_connections +pg_settings_max_files_per_process +pg_settings_max_function_args +pg_settings_max_identifier_length +pg_settings_max_index_keys +pg_settings_max_locks_per_transaction +pg_settings_max_pred_locks_per_transaction +pg_settings_max_prepared_transactions +pg_settings_max_stack_depth_bytes +pg_settings_max_standby_archive_delay_seconds +pg_settings_max_standby_streaming_delay_seconds +pg_settings_max_wal_senders +pg_settings_password_encryption +pg_settings_port +pg_settings_post_auth_delay_seconds +pg_settings_pre_auth_delay_seconds +pg_settings_quote_all_identifiers +pg_settings_random_page_cost +pg_settings_restart_after_crash +pg_settings_segment_size_bytes +pg_settings_seq_page_cost +pg_settings_server_version_num +pg_settings_shared_buffers_bytes +pg_settings_sql_inheritance +pg_settings_ssl +pg_settings_ssl_renegotiation_limit_bytes +pg_settings_standard_conforming_strings +pg_settings_statement_timeout_seconds +pg_settings_superuser_reserved_connections +pg_settings_synchronize_seqscans +pg_settings_tcp_keepalives_count +pg_settings_tcp_keepalives_idle_seconds +pg_settings_tcp_keepalives_interval_seconds +pg_settings_temp_buffers_bytes +pg_settings_temp_file_limit_bytes +pg_settings_trace_notify +pg_settings_trace_sort +pg_settings_track_activities +pg_settings_track_activity_query_size +pg_settings_track_counts +pg_settings_track_io_timing +pg_settings_transaction_deferrable +pg_settings_transaction_read_only +pg_settings_transform_null_equals +pg_settings_unix_socket_permissions +pg_settings_update_process_title +pg_settings_vacuum_cost_delay_seconds +pg_settings_vacuum_cost_limit +pg_settings_vacuum_cost_page_dirty +pg_settings_vacuum_cost_page_hit +pg_settings_vacuum_cost_page_miss +pg_settings_vacuum_defer_cleanup_age +pg_settings_vacuum_freeze_min_age +pg_settings_vacuum_freeze_table_age +pg_settings_vacuum_multixact_freeze_min_age +pg_settings_vacuum_multixact_freeze_table_age +pg_settings_wal_block_size +pg_settings_wal_buffers_bytes +pg_settings_wal_keep_segments +pg_settings_wal_receiver_status_interval_seconds +pg_settings_wal_receiver_timeout_seconds +pg_settings_wal_segment_size_bytes +pg_settings_wal_sender_timeout_seconds +pg_settings_wal_writer_delay_seconds +pg_settings_work_mem_bytes +pg_settings_zero_damaged_pages +pg_stat_activity_count +pg_stat_activity_max_tx_duration +pg_stat_bgwriter_buffers_alloc +pg_stat_bgwriter_buffers_backend +pg_stat_bgwriter_buffers_backend_fsync +pg_stat_bgwriter_buffers_checkpoint +pg_stat_bgwriter_buffers_clean +pg_stat_bgwriter_checkpoints_req +pg_stat_bgwriter_checkpoints_timed +pg_stat_bgwriter_checkpoint_sync_time +pg_stat_bgwriter_checkpoint_write_time +pg_stat_bgwriter_maxwritten_clean +pg_stat_bgwriter_stats_reset +pg_stat_database_blk_read_time +pg_stat_database_blks_hit +pg_stat_database_blks_read +pg_stat_database_blk_write_time +pg_stat_database_conflicts +pg_stat_database_conflicts_confl_bufferpin +pg_stat_database_conflicts_confl_deadlock +pg_stat_database_conflicts_confl_lock +pg_stat_database_conflicts_confl_snapshot +pg_stat_database_conflicts_confl_tablespace +pg_stat_database_deadlocks +pg_stat_database_numbackends +pg_stat_database_stats_reset +pg_stat_database_temp_bytes +pg_stat_database_temp_files +pg_stat_database_tup_deleted +pg_stat_database_tup_fetched +pg_stat_database_tup_inserted +pg_stat_database_tup_returned +pg_stat_database_tup_updated +pg_stat_database_xact_commit +pg_stat_database_xact_rollback +pg_static +pg_up +process_cpu_seconds_total +process_max_fds +process_open_fds +process_resident_memory_bytes +process_start_time_seconds +process_virtual_memory_bytes diff --git a/metriclists/.metrics.single.9.4.prom.unique b/metriclists/.metrics.single.9.4.prom.unique new file mode 100644 index 000000000..6238bb4b7 --- /dev/null +++ b/metriclists/.metrics.single.9.4.prom.unique @@ -0,0 +1,255 @@ +go_gc_duration_seconds +go_gc_duration_seconds_count +go_gc_duration_seconds_sum +go_goroutines +go_info +go_memstats_alloc_bytes +go_memstats_alloc_bytes_total +go_memstats_buck_hash_sys_bytes +go_memstats_frees_total +go_memstats_gc_cpu_fraction +go_memstats_gc_sys_bytes +go_memstats_heap_alloc_bytes +go_memstats_heap_idle_bytes +go_memstats_heap_inuse_bytes +go_memstats_heap_objects +go_memstats_heap_released_bytes +go_memstats_heap_sys_bytes +go_memstats_last_gc_time_seconds +go_memstats_lookups_total +go_memstats_mallocs_total +go_memstats_mcache_inuse_bytes +go_memstats_mcache_sys_bytes +go_memstats_mspan_inuse_bytes +go_memstats_mspan_sys_bytes +go_memstats_next_gc_bytes +go_memstats_other_sys_bytes +go_memstats_stack_inuse_bytes +go_memstats_stack_sys_bytes +go_memstats_sys_bytes +go_threads +pg_exporter_last_scrape_duration_seconds +pg_exporter_last_scrape_error +pg_exporter_scrapes_total +pg_locks_count +pg_settings_allow_system_table_mods +pg_settings_archive_mode +pg_settings_archive_timeout_seconds +pg_settings_array_nulls +pg_settings_authentication_timeout_seconds +pg_settings_autovacuum +pg_settings_autovacuum_analyze_scale_factor +pg_settings_autovacuum_analyze_threshold +pg_settings_autovacuum_freeze_max_age +pg_settings_autovacuum_max_workers +pg_settings_autovacuum_multixact_freeze_max_age +pg_settings_autovacuum_naptime_seconds +pg_settings_autovacuum_vacuum_cost_delay_seconds +pg_settings_autovacuum_vacuum_cost_limit +pg_settings_autovacuum_vacuum_scale_factor +pg_settings_autovacuum_vacuum_threshold +pg_settings_autovacuum_work_mem_bytes +pg_settings_bgwriter_delay_seconds +pg_settings_bgwriter_lru_maxpages +pg_settings_bgwriter_lru_multiplier +pg_settings_block_size +pg_settings_bonjour +pg_settings_check_function_bodies +pg_settings_checkpoint_completion_target +pg_settings_checkpoint_segments +pg_settings_checkpoint_timeout_seconds +pg_settings_checkpoint_warning_seconds +pg_settings_commit_delay +pg_settings_commit_siblings +pg_settings_cpu_index_tuple_cost +pg_settings_cpu_operator_cost +pg_settings_cpu_tuple_cost +pg_settings_cursor_tuple_fraction +pg_settings_data_checksums +pg_settings_data_sync_retry +pg_settings_db_user_namespace +pg_settings_deadlock_timeout_seconds +pg_settings_debug_assertions +pg_settings_debug_pretty_print +pg_settings_debug_print_parse +pg_settings_debug_print_plan +pg_settings_debug_print_rewritten +pg_settings_default_statistics_target +pg_settings_default_transaction_deferrable +pg_settings_default_transaction_read_only +pg_settings_default_with_oids +pg_settings_effective_cache_size_bytes +pg_settings_effective_io_concurrency +pg_settings_enable_bitmapscan +pg_settings_enable_hashagg +pg_settings_enable_hashjoin +pg_settings_enable_indexonlyscan +pg_settings_enable_indexscan +pg_settings_enable_material +pg_settings_enable_mergejoin +pg_settings_enable_nestloop +pg_settings_enable_seqscan +pg_settings_enable_sort +pg_settings_enable_tidscan +pg_settings_escape_string_warning +pg_settings_exit_on_error +pg_settings_extra_float_digits +pg_settings_from_collapse_limit +pg_settings_fsync +pg_settings_full_page_writes +pg_settings_geqo +pg_settings_geqo_effort +pg_settings_geqo_generations +pg_settings_geqo_pool_size +pg_settings_geqo_seed +pg_settings_geqo_selection_bias +pg_settings_geqo_threshold +pg_settings_gin_fuzzy_search_limit +pg_settings_hot_standby +pg_settings_hot_standby_feedback +pg_settings_ignore_checksum_failure +pg_settings_ignore_system_indexes +pg_settings_integer_datetimes +pg_settings_join_collapse_limit +pg_settings_krb_caseins_users +pg_settings_lock_timeout_seconds +pg_settings_lo_compat_privileges +pg_settings_log_autovacuum_min_duration_seconds +pg_settings_log_checkpoints +pg_settings_log_connections +pg_settings_log_disconnections +pg_settings_log_duration +pg_settings_log_executor_stats +pg_settings_log_file_mode +pg_settings_logging_collector +pg_settings_log_hostname +pg_settings_log_lock_waits +pg_settings_log_min_duration_statement_seconds +pg_settings_log_parser_stats +pg_settings_log_planner_stats +pg_settings_log_rotation_age_seconds +pg_settings_log_rotation_size_bytes +pg_settings_log_statement_stats +pg_settings_log_temp_files_bytes +pg_settings_log_truncate_on_rotation +pg_settings_maintenance_work_mem_bytes +pg_settings_max_connections +pg_settings_max_files_per_process +pg_settings_max_function_args +pg_settings_max_identifier_length +pg_settings_max_index_keys +pg_settings_max_locks_per_transaction +pg_settings_max_pred_locks_per_transaction +pg_settings_max_prepared_transactions +pg_settings_max_replication_slots +pg_settings_max_stack_depth_bytes +pg_settings_max_standby_archive_delay_seconds +pg_settings_max_standby_streaming_delay_seconds +pg_settings_max_wal_senders +pg_settings_max_worker_processes +pg_settings_password_encryption +pg_settings_port +pg_settings_post_auth_delay_seconds +pg_settings_pre_auth_delay_seconds +pg_settings_quote_all_identifiers +pg_settings_random_page_cost +pg_settings_restart_after_crash +pg_settings_segment_size_bytes +pg_settings_seq_page_cost +pg_settings_server_version_num +pg_settings_shared_buffers_bytes +pg_settings_sql_inheritance +pg_settings_ssl +pg_settings_ssl_prefer_server_ciphers +pg_settings_ssl_renegotiation_limit_bytes +pg_settings_standard_conforming_strings +pg_settings_statement_timeout_seconds +pg_settings_superuser_reserved_connections +pg_settings_synchronize_seqscans +pg_settings_tcp_keepalives_count +pg_settings_tcp_keepalives_idle_seconds +pg_settings_tcp_keepalives_interval_seconds +pg_settings_temp_buffers_bytes +pg_settings_temp_file_limit_bytes +pg_settings_trace_notify +pg_settings_trace_sort +pg_settings_track_activities +pg_settings_track_activity_query_size +pg_settings_track_counts +pg_settings_track_io_timing +pg_settings_transaction_deferrable +pg_settings_transaction_read_only +pg_settings_transform_null_equals +pg_settings_unix_socket_permissions +pg_settings_update_process_title +pg_settings_vacuum_cost_delay_seconds +pg_settings_vacuum_cost_limit +pg_settings_vacuum_cost_page_dirty +pg_settings_vacuum_cost_page_hit +pg_settings_vacuum_cost_page_miss +pg_settings_vacuum_defer_cleanup_age +pg_settings_vacuum_freeze_min_age +pg_settings_vacuum_freeze_table_age +pg_settings_vacuum_multixact_freeze_min_age +pg_settings_vacuum_multixact_freeze_table_age +pg_settings_wal_block_size +pg_settings_wal_buffers_bytes +pg_settings_wal_keep_segments +pg_settings_wal_log_hints +pg_settings_wal_receiver_status_interval_seconds +pg_settings_wal_receiver_timeout_seconds +pg_settings_wal_segment_size_bytes +pg_settings_wal_sender_timeout_seconds +pg_settings_wal_writer_delay_seconds +pg_settings_work_mem_bytes +pg_settings_zero_damaged_pages +pg_stat_activity_count +pg_stat_activity_max_tx_duration +pg_stat_archiver_archived_count +pg_stat_archiver_failed_count +pg_stat_archiver_last_archive_age +pg_stat_bgwriter_buffers_alloc +pg_stat_bgwriter_buffers_backend +pg_stat_bgwriter_buffers_backend_fsync +pg_stat_bgwriter_buffers_checkpoint +pg_stat_bgwriter_buffers_clean +pg_stat_bgwriter_checkpoints_req +pg_stat_bgwriter_checkpoints_timed +pg_stat_bgwriter_checkpoint_sync_time +pg_stat_bgwriter_checkpoint_write_time +pg_stat_bgwriter_maxwritten_clean +pg_stat_bgwriter_stats_reset +pg_stat_database_blk_read_time +pg_stat_database_blks_hit +pg_stat_database_blks_read +pg_stat_database_blk_write_time +pg_stat_database_conflicts +pg_stat_database_conflicts_confl_bufferpin +pg_stat_database_conflicts_confl_deadlock +pg_stat_database_conflicts_confl_lock +pg_stat_database_conflicts_confl_snapshot +pg_stat_database_conflicts_confl_tablespace +pg_stat_database_deadlocks +pg_stat_database_numbackends +pg_stat_database_stats_reset +pg_stat_database_temp_bytes +pg_stat_database_temp_files +pg_stat_database_tup_deleted +pg_stat_database_tup_fetched +pg_stat_database_tup_inserted +pg_stat_database_tup_returned +pg_stat_database_tup_updated +pg_stat_database_xact_commit +pg_stat_database_xact_rollback +pg_static +pg_up +postgres_exporter_build_info +process_cpu_seconds_total +process_max_fds +process_open_fds +process_resident_memory_bytes +process_start_time_seconds +process_virtual_memory_bytes +process_virtual_memory_max_bytes +promhttp_metric_handler_requests_in_flight +promhttp_metric_handler_requests_total diff --git a/metriclists/.metrics.single.9.5.prom.unique b/metriclists/.metrics.single.9.5.prom.unique new file mode 100644 index 000000000..6e690dacc --- /dev/null +++ b/metriclists/.metrics.single.9.5.prom.unique @@ -0,0 +1,261 @@ +go_gc_duration_seconds +go_gc_duration_seconds_count +go_gc_duration_seconds_sum +go_goroutines +go_info +go_memstats_alloc_bytes +go_memstats_alloc_bytes_total +go_memstats_buck_hash_sys_bytes +go_memstats_frees_total +go_memstats_gc_cpu_fraction +go_memstats_gc_sys_bytes +go_memstats_heap_alloc_bytes +go_memstats_heap_idle_bytes +go_memstats_heap_inuse_bytes +go_memstats_heap_objects +go_memstats_heap_released_bytes +go_memstats_heap_sys_bytes +go_memstats_last_gc_time_seconds +go_memstats_lookups_total +go_memstats_mallocs_total +go_memstats_mcache_inuse_bytes +go_memstats_mcache_sys_bytes +go_memstats_mspan_inuse_bytes +go_memstats_mspan_sys_bytes +go_memstats_next_gc_bytes +go_memstats_other_sys_bytes +go_memstats_stack_inuse_bytes +go_memstats_stack_sys_bytes +go_memstats_sys_bytes +go_threads +pg_exporter_last_scrape_duration_seconds +pg_exporter_last_scrape_error +pg_exporter_scrapes_total +pg_locks_count +pg_settings_allow_system_table_mods +pg_settings_archive_timeout_seconds +pg_settings_array_nulls +pg_settings_authentication_timeout_seconds +pg_settings_autovacuum +pg_settings_autovacuum_analyze_scale_factor +pg_settings_autovacuum_analyze_threshold +pg_settings_autovacuum_freeze_max_age +pg_settings_autovacuum_max_workers +pg_settings_autovacuum_multixact_freeze_max_age +pg_settings_autovacuum_naptime_seconds +pg_settings_autovacuum_vacuum_cost_delay_seconds +pg_settings_autovacuum_vacuum_cost_limit +pg_settings_autovacuum_vacuum_scale_factor +pg_settings_autovacuum_vacuum_threshold +pg_settings_autovacuum_work_mem_bytes +pg_settings_bgwriter_delay_seconds +pg_settings_bgwriter_lru_maxpages +pg_settings_bgwriter_lru_multiplier +pg_settings_block_size +pg_settings_bonjour +pg_settings_check_function_bodies +pg_settings_checkpoint_completion_target +pg_settings_checkpoint_timeout_seconds +pg_settings_checkpoint_warning_seconds +pg_settings_commit_delay +pg_settings_commit_siblings +pg_settings_cpu_index_tuple_cost +pg_settings_cpu_operator_cost +pg_settings_cpu_tuple_cost +pg_settings_cursor_tuple_fraction +pg_settings_data_checksums +pg_settings_data_sync_retry +pg_settings_db_user_namespace +pg_settings_deadlock_timeout_seconds +pg_settings_debug_assertions +pg_settings_debug_pretty_print +pg_settings_debug_print_parse +pg_settings_debug_print_plan +pg_settings_debug_print_rewritten +pg_settings_default_statistics_target +pg_settings_default_transaction_deferrable +pg_settings_default_transaction_read_only +pg_settings_default_with_oids +pg_settings_effective_cache_size_bytes +pg_settings_effective_io_concurrency +pg_settings_enable_bitmapscan +pg_settings_enable_hashagg +pg_settings_enable_hashjoin +pg_settings_enable_indexonlyscan +pg_settings_enable_indexscan +pg_settings_enable_material +pg_settings_enable_mergejoin +pg_settings_enable_nestloop +pg_settings_enable_seqscan +pg_settings_enable_sort +pg_settings_enable_tidscan +pg_settings_escape_string_warning +pg_settings_exit_on_error +pg_settings_extra_float_digits +pg_settings_from_collapse_limit +pg_settings_fsync +pg_settings_full_page_writes +pg_settings_geqo +pg_settings_geqo_effort +pg_settings_geqo_generations +pg_settings_geqo_pool_size +pg_settings_geqo_seed +pg_settings_geqo_selection_bias +pg_settings_geqo_threshold +pg_settings_gin_fuzzy_search_limit +pg_settings_gin_pending_list_limit_bytes +pg_settings_hot_standby +pg_settings_hot_standby_feedback +pg_settings_ignore_checksum_failure +pg_settings_ignore_system_indexes +pg_settings_integer_datetimes +pg_settings_join_collapse_limit +pg_settings_krb_caseins_users +pg_settings_lock_timeout_seconds +pg_settings_lo_compat_privileges +pg_settings_log_autovacuum_min_duration_seconds +pg_settings_log_checkpoints +pg_settings_log_connections +pg_settings_log_disconnections +pg_settings_log_duration +pg_settings_log_executor_stats +pg_settings_log_file_mode +pg_settings_logging_collector +pg_settings_log_hostname +pg_settings_log_lock_waits +pg_settings_log_min_duration_statement_seconds +pg_settings_log_parser_stats +pg_settings_log_planner_stats +pg_settings_log_replication_commands +pg_settings_log_rotation_age_seconds +pg_settings_log_rotation_size_bytes +pg_settings_log_statement_stats +pg_settings_log_temp_files_bytes +pg_settings_log_truncate_on_rotation +pg_settings_maintenance_work_mem_bytes +pg_settings_max_connections +pg_settings_max_files_per_process +pg_settings_max_function_args +pg_settings_max_identifier_length +pg_settings_max_index_keys +pg_settings_max_locks_per_transaction +pg_settings_max_pred_locks_per_transaction +pg_settings_max_prepared_transactions +pg_settings_max_replication_slots +pg_settings_max_stack_depth_bytes +pg_settings_max_standby_archive_delay_seconds +pg_settings_max_standby_streaming_delay_seconds +pg_settings_max_wal_senders +pg_settings_max_wal_size_bytes +pg_settings_max_worker_processes +pg_settings_min_wal_size_bytes +pg_settings_operator_precedence_warning +pg_settings_password_encryption +pg_settings_port +pg_settings_post_auth_delay_seconds +pg_settings_pre_auth_delay_seconds +pg_settings_quote_all_identifiers +pg_settings_random_page_cost +pg_settings_restart_after_crash +pg_settings_row_security +pg_settings_segment_size_bytes +pg_settings_seq_page_cost +pg_settings_server_version_num +pg_settings_shared_buffers_bytes +pg_settings_sql_inheritance +pg_settings_ssl +pg_settings_ssl_prefer_server_ciphers +pg_settings_standard_conforming_strings +pg_settings_statement_timeout_seconds +pg_settings_superuser_reserved_connections +pg_settings_synchronize_seqscans +pg_settings_tcp_keepalives_count +pg_settings_tcp_keepalives_idle_seconds +pg_settings_tcp_keepalives_interval_seconds +pg_settings_temp_buffers_bytes +pg_settings_temp_file_limit_bytes +pg_settings_trace_notify +pg_settings_trace_sort +pg_settings_track_activities +pg_settings_track_activity_query_size +pg_settings_track_commit_timestamp +pg_settings_track_counts +pg_settings_track_io_timing +pg_settings_transaction_deferrable +pg_settings_transaction_read_only +pg_settings_transform_null_equals +pg_settings_unix_socket_permissions +pg_settings_update_process_title +pg_settings_vacuum_cost_delay_seconds +pg_settings_vacuum_cost_limit +pg_settings_vacuum_cost_page_dirty +pg_settings_vacuum_cost_page_hit +pg_settings_vacuum_cost_page_miss +pg_settings_vacuum_defer_cleanup_age +pg_settings_vacuum_freeze_min_age +pg_settings_vacuum_freeze_table_age +pg_settings_vacuum_multixact_freeze_min_age +pg_settings_vacuum_multixact_freeze_table_age +pg_settings_wal_block_size +pg_settings_wal_buffers_bytes +pg_settings_wal_compression +pg_settings_wal_keep_segments +pg_settings_wal_log_hints +pg_settings_wal_receiver_status_interval_seconds +pg_settings_wal_receiver_timeout_seconds +pg_settings_wal_retrieve_retry_interval_seconds +pg_settings_wal_segment_size_bytes +pg_settings_wal_sender_timeout_seconds +pg_settings_wal_writer_delay_seconds +pg_settings_work_mem_bytes +pg_settings_zero_damaged_pages +pg_stat_activity_count +pg_stat_activity_max_tx_duration +pg_stat_archiver_archived_count +pg_stat_archiver_failed_count +pg_stat_archiver_last_archive_age +pg_stat_bgwriter_buffers_alloc +pg_stat_bgwriter_buffers_backend +pg_stat_bgwriter_buffers_backend_fsync +pg_stat_bgwriter_buffers_checkpoint +pg_stat_bgwriter_buffers_clean +pg_stat_bgwriter_checkpoints_req +pg_stat_bgwriter_checkpoints_timed +pg_stat_bgwriter_checkpoint_sync_time +pg_stat_bgwriter_checkpoint_write_time +pg_stat_bgwriter_maxwritten_clean +pg_stat_bgwriter_stats_reset +pg_stat_database_blk_read_time +pg_stat_database_blks_hit +pg_stat_database_blks_read +pg_stat_database_blk_write_time +pg_stat_database_conflicts +pg_stat_database_conflicts_confl_bufferpin +pg_stat_database_conflicts_confl_deadlock +pg_stat_database_conflicts_confl_lock +pg_stat_database_conflicts_confl_snapshot +pg_stat_database_conflicts_confl_tablespace +pg_stat_database_deadlocks +pg_stat_database_numbackends +pg_stat_database_stats_reset +pg_stat_database_temp_bytes +pg_stat_database_temp_files +pg_stat_database_tup_deleted +pg_stat_database_tup_fetched +pg_stat_database_tup_inserted +pg_stat_database_tup_returned +pg_stat_database_tup_updated +pg_stat_database_xact_commit +pg_stat_database_xact_rollback +pg_static +pg_up +postgres_exporter_build_info +process_cpu_seconds_total +process_max_fds +process_open_fds +process_resident_memory_bytes +process_start_time_seconds +process_virtual_memory_bytes +process_virtual_memory_max_bytes +promhttp_metric_handler_requests_in_flight +promhttp_metric_handler_requests_total diff --git a/metriclists/.metrics.single.9.6.prom.unique b/metriclists/.metrics.single.9.6.prom.unique new file mode 100644 index 000000000..4a5ad9e0d --- /dev/null +++ b/metriclists/.metrics.single.9.6.prom.unique @@ -0,0 +1,274 @@ +go_gc_duration_seconds +go_gc_duration_seconds_count +go_gc_duration_seconds_sum +go_goroutines +go_info +go_memstats_alloc_bytes +go_memstats_alloc_bytes_total +go_memstats_buck_hash_sys_bytes +go_memstats_frees_total +go_memstats_gc_cpu_fraction +go_memstats_gc_sys_bytes +go_memstats_heap_alloc_bytes +go_memstats_heap_idle_bytes +go_memstats_heap_inuse_bytes +go_memstats_heap_objects +go_memstats_heap_released_bytes +go_memstats_heap_sys_bytes +go_memstats_last_gc_time_seconds +go_memstats_lookups_total +go_memstats_mallocs_total +go_memstats_mcache_inuse_bytes +go_memstats_mcache_sys_bytes +go_memstats_mspan_inuse_bytes +go_memstats_mspan_sys_bytes +go_memstats_next_gc_bytes +go_memstats_other_sys_bytes +go_memstats_stack_inuse_bytes +go_memstats_stack_sys_bytes +go_memstats_sys_bytes +go_threads +pg_exporter_last_scrape_duration_seconds +pg_exporter_last_scrape_error +pg_exporter_scrapes_total +pg_locks_count +pg_settings_allow_system_table_mods +pg_settings_archive_timeout_seconds +pg_settings_array_nulls +pg_settings_authentication_timeout_seconds +pg_settings_autovacuum +pg_settings_autovacuum_analyze_scale_factor +pg_settings_autovacuum_analyze_threshold +pg_settings_autovacuum_freeze_max_age +pg_settings_autovacuum_max_workers +pg_settings_autovacuum_multixact_freeze_max_age +pg_settings_autovacuum_naptime_seconds +pg_settings_autovacuum_vacuum_cost_delay_seconds +pg_settings_autovacuum_vacuum_cost_limit +pg_settings_autovacuum_vacuum_scale_factor +pg_settings_autovacuum_vacuum_threshold +pg_settings_autovacuum_work_mem_bytes +pg_settings_backend_flush_after_bytes +pg_settings_bgwriter_delay_seconds +pg_settings_bgwriter_flush_after_bytes +pg_settings_bgwriter_lru_maxpages +pg_settings_bgwriter_lru_multiplier +pg_settings_block_size +pg_settings_bonjour +pg_settings_check_function_bodies +pg_settings_checkpoint_completion_target +pg_settings_checkpoint_flush_after_bytes +pg_settings_checkpoint_timeout_seconds +pg_settings_checkpoint_warning_seconds +pg_settings_commit_delay +pg_settings_commit_siblings +pg_settings_cpu_index_tuple_cost +pg_settings_cpu_operator_cost +pg_settings_cpu_tuple_cost +pg_settings_cursor_tuple_fraction +pg_settings_data_checksums +pg_settings_data_sync_retry +pg_settings_db_user_namespace +pg_settings_deadlock_timeout_seconds +pg_settings_debug_assertions +pg_settings_debug_pretty_print +pg_settings_debug_print_parse +pg_settings_debug_print_plan +pg_settings_debug_print_rewritten +pg_settings_default_statistics_target +pg_settings_default_transaction_deferrable +pg_settings_default_transaction_read_only +pg_settings_default_with_oids +pg_settings_effective_cache_size_bytes +pg_settings_effective_io_concurrency +pg_settings_enable_bitmapscan +pg_settings_enable_hashagg +pg_settings_enable_hashjoin +pg_settings_enable_indexonlyscan +pg_settings_enable_indexscan +pg_settings_enable_material +pg_settings_enable_mergejoin +pg_settings_enable_nestloop +pg_settings_enable_seqscan +pg_settings_enable_sort +pg_settings_enable_tidscan +pg_settings_escape_string_warning +pg_settings_exit_on_error +pg_settings_extra_float_digits +pg_settings_from_collapse_limit +pg_settings_fsync +pg_settings_full_page_writes +pg_settings_geqo +pg_settings_geqo_effort +pg_settings_geqo_generations +pg_settings_geqo_pool_size +pg_settings_geqo_seed +pg_settings_geqo_selection_bias +pg_settings_geqo_threshold +pg_settings_gin_fuzzy_search_limit +pg_settings_gin_pending_list_limit_bytes +pg_settings_hot_standby +pg_settings_hot_standby_feedback +pg_settings_idle_in_transaction_session_timeout_seconds +pg_settings_ignore_checksum_failure +pg_settings_ignore_system_indexes +pg_settings_integer_datetimes +pg_settings_join_collapse_limit +pg_settings_krb_caseins_users +pg_settings_lock_timeout_seconds +pg_settings_lo_compat_privileges +pg_settings_log_autovacuum_min_duration_seconds +pg_settings_log_checkpoints +pg_settings_log_connections +pg_settings_log_disconnections +pg_settings_log_duration +pg_settings_log_executor_stats +pg_settings_log_file_mode +pg_settings_logging_collector +pg_settings_log_hostname +pg_settings_log_lock_waits +pg_settings_log_min_duration_statement_seconds +pg_settings_log_parser_stats +pg_settings_log_planner_stats +pg_settings_log_replication_commands +pg_settings_log_rotation_age_seconds +pg_settings_log_rotation_size_bytes +pg_settings_log_statement_stats +pg_settings_log_temp_files_bytes +pg_settings_log_truncate_on_rotation +pg_settings_maintenance_work_mem_bytes +pg_settings_max_connections +pg_settings_max_files_per_process +pg_settings_max_function_args +pg_settings_max_identifier_length +pg_settings_max_index_keys +pg_settings_max_locks_per_transaction +pg_settings_max_parallel_workers_per_gather +pg_settings_max_pred_locks_per_transaction +pg_settings_max_prepared_transactions +pg_settings_max_replication_slots +pg_settings_max_stack_depth_bytes +pg_settings_max_standby_archive_delay_seconds +pg_settings_max_standby_streaming_delay_seconds +pg_settings_max_wal_senders +pg_settings_max_wal_size_bytes +pg_settings_max_worker_processes +pg_settings_min_parallel_relation_size_bytes +pg_settings_min_wal_size_bytes +pg_settings_old_snapshot_threshold_seconds +pg_settings_operator_precedence_warning +pg_settings_parallel_setup_cost +pg_settings_parallel_tuple_cost +pg_settings_password_encryption +pg_settings_port +pg_settings_post_auth_delay_seconds +pg_settings_pre_auth_delay_seconds +pg_settings_quote_all_identifiers +pg_settings_random_page_cost +pg_settings_replacement_sort_tuples +pg_settings_restart_after_crash +pg_settings_row_security +pg_settings_segment_size_bytes +pg_settings_seq_page_cost +pg_settings_server_version_num +pg_settings_shared_buffers_bytes +pg_settings_sql_inheritance +pg_settings_ssl +pg_settings_ssl_prefer_server_ciphers +pg_settings_standard_conforming_strings +pg_settings_statement_timeout_seconds +pg_settings_superuser_reserved_connections +pg_settings_synchronize_seqscans +pg_settings_syslog_sequence_numbers +pg_settings_syslog_split_messages +pg_settings_tcp_keepalives_count +pg_settings_tcp_keepalives_idle_seconds +pg_settings_tcp_keepalives_interval_seconds +pg_settings_temp_buffers_bytes +pg_settings_temp_file_limit_bytes +pg_settings_trace_notify +pg_settings_trace_sort +pg_settings_track_activities +pg_settings_track_activity_query_size +pg_settings_track_commit_timestamp +pg_settings_track_counts +pg_settings_track_io_timing +pg_settings_transaction_deferrable +pg_settings_transaction_read_only +pg_settings_transform_null_equals +pg_settings_unix_socket_permissions +pg_settings_update_process_title +pg_settings_vacuum_cost_delay_seconds +pg_settings_vacuum_cost_limit +pg_settings_vacuum_cost_page_dirty +pg_settings_vacuum_cost_page_hit +pg_settings_vacuum_cost_page_miss +pg_settings_vacuum_defer_cleanup_age +pg_settings_vacuum_freeze_min_age +pg_settings_vacuum_freeze_table_age +pg_settings_vacuum_multixact_freeze_min_age +pg_settings_vacuum_multixact_freeze_table_age +pg_settings_wal_block_size +pg_settings_wal_buffers_bytes +pg_settings_wal_compression +pg_settings_wal_keep_segments +pg_settings_wal_log_hints +pg_settings_wal_receiver_status_interval_seconds +pg_settings_wal_receiver_timeout_seconds +pg_settings_wal_retrieve_retry_interval_seconds +pg_settings_wal_segment_size_bytes +pg_settings_wal_sender_timeout_seconds +pg_settings_wal_writer_delay_seconds +pg_settings_wal_writer_flush_after_bytes +pg_settings_work_mem_bytes +pg_settings_zero_damaged_pages +pg_stat_activity_count +pg_stat_activity_max_tx_duration +pg_stat_archiver_archived_count +pg_stat_archiver_failed_count +pg_stat_archiver_last_archive_age +pg_stat_bgwriter_buffers_alloc +pg_stat_bgwriter_buffers_backend +pg_stat_bgwriter_buffers_backend_fsync +pg_stat_bgwriter_buffers_checkpoint +pg_stat_bgwriter_buffers_clean +pg_stat_bgwriter_checkpoints_req +pg_stat_bgwriter_checkpoints_timed +pg_stat_bgwriter_checkpoint_sync_time +pg_stat_bgwriter_checkpoint_write_time +pg_stat_bgwriter_maxwritten_clean +pg_stat_bgwriter_stats_reset +pg_stat_database_blk_read_time +pg_stat_database_blks_hit +pg_stat_database_blks_read +pg_stat_database_blk_write_time +pg_stat_database_conflicts +pg_stat_database_conflicts_confl_bufferpin +pg_stat_database_conflicts_confl_deadlock +pg_stat_database_conflicts_confl_lock +pg_stat_database_conflicts_confl_snapshot +pg_stat_database_conflicts_confl_tablespace +pg_stat_database_deadlocks +pg_stat_database_numbackends +pg_stat_database_stats_reset +pg_stat_database_temp_bytes +pg_stat_database_temp_files +pg_stat_database_tup_deleted +pg_stat_database_tup_fetched +pg_stat_database_tup_inserted +pg_stat_database_tup_returned +pg_stat_database_tup_updated +pg_stat_database_xact_commit +pg_stat_database_xact_rollback +pg_static +pg_up +postgres_exporter_build_info +process_cpu_seconds_total +process_max_fds +process_open_fds +process_resident_memory_bytes +process_start_time_seconds +process_virtual_memory_bytes +process_virtual_memory_max_bytes +promhttp_metric_handler_requests_in_flight +promhttp_metric_handler_requests_total diff --git a/postgres-metrics-get-changes.sh b/postgres-metrics-get-changes.sh deleted file mode 100755 index 37dbfb373..000000000 --- a/postgres-metrics-get-changes.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -# Script to parse a text exposition format file into a unique list of metrics -# output by the exporter and then build lists of added/removed metrics. - -old_src="http://github.com/$1" -if [ ! -d "$old_src" ] ; then - mkdir -p "$old_src" -fi - -function generate_add_removed() { - type="$1" - pg_version="$2" - old_version="$3" - new_version="$4" - - if [ ! -e "$old_version" ] ; then - touch "$old_version" - fi - - comm -23 "$old_version" "$new_version" > ".metrics.${type}.${pg_version}.removed" - comm -13 "$old_version" "$new_version" > ".metrics.${type}.${pg_version}.added" -} - -for raw_prom in $(echo .*.prom) ; do - # Get the type and version - type=$(echo "$raw_prom" | cut -d'.' -f3) - pg_version=$(echo "$raw_prom" | cut -d'.' -f4- | sed 's/\.prom$//g') - - unique_file="${raw_prom}.unique" - old_unique_file="$old_src/$unique_file" - - # Strip, sort and deduplicate the label names - grep -v '#' "$raw_prom" | \ - rev | cut -d' ' -f2- | \ - rev | cut -d'{' -f1 | \ - sort | \ - uniq > "$unique_file" - - generate_add_removed "$type" "$pg_version" "$old_unique_file" "$unique_file" -done diff --git a/postgres_exporter.rc b/postgres_exporter.rc deleted file mode 100644 index b7a59a960..000000000 --- a/postgres_exporter.rc +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/sh - -# PROVIDE: postgres_exporter -# REQUIRE: LOGIN -# KEYWORD: shutdown -# -# rc-script for postgres_exporter -# -# -# Add the following lines to /etc/rc.conf.local or /etc/rc.conf -# to enable this service: -# -# postgres_exporter_enable (bool): Set to NO by default. -# Set it to YES to enable postgres_exporter. -# postgres_exporter_user (string): Set user that postgres_exporter will run under -# Default is "nobody". -# postgres_exporter_group (string): Set group that postgres_exporter will run under -# Default is "nobody". -# postgres_exporter_args (string): Set extra arguments to pass to postgres_exporter -# Default is "". -# postgres_exporter_listen_address (string):Set ip:port to listen on for web interface and telemetry. -# Defaults to ":9187" -# postgres_exporter_pg_user (string): Set the Postgres database user -# Defaults to "postgres_exporter" -# postgres_exporter_pg_pass (string): Set the Postgres datase password -# Default is empty -# postgres_exporter_pg_host (string): Set the Postgres database server -# Defaults to "localhost" -# postgres_exporter_pg_port (string): Set the Postgres database port -# Defaults to "5432" - -# Add extra arguments via "postgres_exporter_args" -# (see $ postgres_exporter --help) - - -. /etc/rc.subr - -name=postgres_exporter -rcvar=postgres_exporter_enable - -load_rc_config $name - -: ${postgres_exporter_enable:="NO"} -: ${postgres_exporter_user:="nobody"} -: ${postgres_exporter_group:="nobody"} -: ${postgres_exporter_args:=""} -: ${postgres_exporter_listen_address:=":9187"} -: ${postgres_exporter_pg_user:="postgres_exporter"} -: ${postgres_exporter_pg_pass:=""} -: ${postgres_exporter_pg_host:="localhost"} -: ${postgres_exporter_pg_port:="5432"} - -postgres_exporter_data_source_name="postgresql://${postgres_exporter_pg_user}:${postgres_exporter_pg_pass}@${postgres_exporter_pg_host}:${postgres_exporter_pg_port}/postgres?sslmode=disable" - - -pidfile=/var/run/postgres_exporter.pid -command="/usr/sbin/daemon" -procname="/usr/local/bin/postgres_exporter" -command_args="-f -p ${pidfile} -T ${name} \ - /usr/bin/env DATA_SOURCE_NAME="${postgres_exporter_data_source_name}" ${procname} \ - --web.listen-address=${postgres_exporter_listen_address} \ - ${postgres_exporter_args}" - -start_precmd=postgres_exporter_startprecmd - -postgres_exporter_startprecmd() -{ - if [ ! -e ${pidfile} ]; then - install -o ${postgres_exporter_user} -g ${postgres_exporter_group} /dev/null ${pidfile}; - fi -} - -load_rc_config $name -run_rc_command "$1" diff --git a/postgres_exporter_integration_test_script b/postgres_exporter_integration_test_script deleted file mode 100755 index ebaf83d91..000000000 --- a/postgres_exporter_integration_test_script +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# This script wraps the integration test binary so it produces concatenated -# test output. - -test_binary=$1 -shift -output_cov=$1 -shift - -echo "Test Binary: $test_binary" 1>&2 -echo "Coverage File: $output_cov" 1>&2 - -echo "mode: count" > $output_cov - -test_cov=$(mktemp) -$test_binary -test.coverprofile=$test_cov $@ || exit 1 -tail -n +2 $test_cov >> $output_cov -rm -f $test_cov diff --git a/postgres_mixin/.gitignore b/postgres_mixin/.gitignore deleted file mode 100644 index 97bf5f5c8..000000000 --- a/postgres_mixin/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -/alerts.yaml -/rules.yaml -dashboards_out diff --git a/postgres_mixin/Makefile b/postgres_mixin/Makefile deleted file mode 100644 index f2643c2b7..000000000 --- a/postgres_mixin/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -JSONNET_FMT := jsonnetfmt -n 2 --max-blank-lines 2 --string-style s --comment-style s - -default: build - -all: fmt lint build clean - -fmt: - find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \ - xargs -n 1 -- $(JSONNET_FMT) -i - -lint: - find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \ - while read f; do \ - $(JSONNET_FMT) "$$f" | diff -u "$$f" -; \ - done - - mixtool lint mixin.libsonnet - -build: - mixtool generate all mixin.libsonnet - -clean: - rm -rf dashboards_out alerts.yaml rules.yaml diff --git a/postgres_mixin/README.md b/postgres_mixin/README.md deleted file mode 100644 index e5f12edf6..000000000 --- a/postgres_mixin/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# Postgres Mixin - -_This is a work in progress. We aim for it to become a good role model for alerts -and dashboards eventually, but it is not quite there yet._ - -The Postgres Mixin is a set of configurable, reusable, and extensible alerts and -dashboards based on the metrics exported by the Postgres Exporter. The mixin creates -recording and alerting rules for Prometheus and suitable dashboard descriptions -for Grafana. - -To use them, you need to have `mixtool` and `jsonnetfmt` installed. If you -have a working Go development environment, it's easiest to run the following: -```bash -$ go get github.com/monitoring-mixins/mixtool/cmd/mixtool -$ go get github.com/google/go-jsonnet/cmd/jsonnetfmt -``` - -You can then build the Prometheus rules files `alerts.yaml` and -`rules.yaml` and a directory `dashboard_out` with the JSON dashboard files -for Grafana: -```bash -$ make build -``` - -For more advanced uses of mixins, see -https://github.com/monitoring-mixins/docs. diff --git a/postgres_mixin/alerts/alerts.libsonnet b/postgres_mixin/alerts/alerts.libsonnet deleted file mode 100644 index 7f70d8a8d..000000000 --- a/postgres_mixin/alerts/alerts.libsonnet +++ /dev/null @@ -1 +0,0 @@ -(import 'postgres.libsonnet') diff --git a/postgres_mixin/alerts/postgres.libsonnet b/postgres_mixin/alerts/postgres.libsonnet deleted file mode 100644 index 4b0275df1..000000000 --- a/postgres_mixin/alerts/postgres.libsonnet +++ /dev/null @@ -1,127 +0,0 @@ -{ - prometheusAlerts+:: { - groups+: [ - { - name: 'PostgreSQL', - rules: [ - { - alert: 'PostgreSQLMaxConnectionsReached', - annotations: { - description: '{{ $labels.instance }} is exceeding the currently configured maximum Postgres connection limit (current value: {{ $value }}s). Services may be degraded - please take immediate action (you probably need to increase max_connections in the Docker image and re-deploy.', - summary: '{{ $labels.instance }} has maxed out Postgres connections.', - }, - expr: ||| - sum by (instance) (pg_stat_activity_count{%(postgresExporterSelector)s}) - >= - sum by (instance) (pg_settings_max_connections{%(postgresExporterSelector)s}) - - - sum by (instance) (pg_settings_superuser_reserved_connections{%(postgresExporterSelector)s}) - ||| % $._config, - 'for': '1m', - labels: { - severity: 'warning', - }, - }, - { - alert: 'PostgreSQLHighConnections', - annotations: { - description: '{{ $labels.instance }} is exceeding 80% of the currently configured maximum Postgres connection limit (current value: {{ $value }}s). Please check utilization graphs and confirm if this is normal service growth, abuse or an otherwise temporary condition or if new resources need to be provisioned (or the limits increased, which is mostly likely).', - summary: '{{ $labels.instance }} is over 80% of max Postgres connections.', - }, - expr: ||| - sum by (instance) (pg_stat_activity_count{%(postgresExporterSelector)s}) - > - ( - sum by (instance) (pg_settings_max_connections{%(postgresExporterSelector)s}) - - - sum by (instance) (pg_settings_superuser_reserved_connections{%(postgresExporterSelector)s}) - ) * 0.8 - ||| % $._config, - 'for': '10m', - labels: { - severity: 'warning', - }, - }, - { - alert: 'PostgreSQLDown', - annotations: { - description: '{{ $labels.instance }} is rejecting query requests from the exporter, and thus probably not allowing DNS requests to work either. User services should not be effected provided at least 1 node is still alive.', - summary: 'PostgreSQL is not processing queries: {{ $labels.instance }}', - }, - expr: 'pg_up{%(postgresExporterSelector)s} != 1' % $._config, - 'for': '1m', - labels: { - severity: 'warning', - }, - }, - { - alert: 'PostgreSQLSlowQueries', - annotations: { - description: 'PostgreSQL high number of slow queries {{ $labels.cluster }} for database {{ $labels.datname }} with a value of {{ $value }} ', - summary: 'PostgreSQL high number of slow on {{ $labels.cluster }} for database {{ $labels.datname }} ', - }, - expr: ||| - avg by (datname) ( - rate ( - pg_stat_activity_max_tx_duration{datname!~"template.*",%(postgresExporterSelector)s}[2m] - ) - ) > 2 * 60 - ||| % $._config, - 'for': '2m', - labels: { - severity: 'warning', - }, - }, - { - alert: 'PostgreSQLQPS', - annotations: { - description: 'PostgreSQL high number of queries per second on {{ $labels.cluster }} for database {{ $labels.datname }} with a value of {{ $value }}', - summary: 'PostgreSQL high number of queries per second {{ $labels.cluster }} for database {{ $labels.datname }}', - }, - expr: ||| - avg by (datname) ( - irate( - pg_stat_database_xact_commit{datname!~"template.*",%(postgresExporterSelector)s}[5m] - ) - + - irate( - pg_stat_database_xact_rollback{datname!~"template.*",%(postgresExporterSelector)s}[5m] - ) - ) > 10000 - ||| % $._config, - 'for': '5m', - labels: { - severity: 'warning', - }, - }, - { - alert: 'PostgreSQLCacheHitRatio', - annotations: { - description: 'PostgreSQL low on cache hit rate on {{ $labels.cluster }} for database {{ $labels.datname }} with a value of {{ $value }}', - summary: 'PostgreSQL low cache hit rate on {{ $labels.cluster }} for database {{ $labels.datname }}', - }, - expr: ||| - avg by (datname) ( - rate(pg_stat_database_blks_hit{datname!~"template.*",%(postgresExporterSelector)s}[5m]) - / - ( - rate( - pg_stat_database_blks_hit{datname!~"template.*",%(postgresExporterSelector)s}[5m] - ) - + - rate( - pg_stat_database_blks_read{datname!~"template.*",%(postgresExporterSelector)s}[5m] - ) - ) - ) < 0.98 - ||| % $._config, - 'for': '5m', - labels: { - severity: 'warning', - }, - }, - ], - }, - ], - }, -} diff --git a/postgres_mixin/config.libsonnet b/postgres_mixin/config.libsonnet deleted file mode 100644 index d7bd7ac1b..000000000 --- a/postgres_mixin/config.libsonnet +++ /dev/null @@ -1,5 +0,0 @@ -{ - _config+:: { - postgresExporterSelector: '', - }, -} diff --git a/postgres_mixin/dashboards/dashboards.libsonnet b/postgres_mixin/dashboards/dashboards.libsonnet deleted file mode 100644 index d55f1ef5c..000000000 --- a/postgres_mixin/dashboards/dashboards.libsonnet +++ /dev/null @@ -1,5 +0,0 @@ -{ - grafanaDashboards+:: { - 'postgres-overview.json': (import 'postgres-overview.json'), - }, -} diff --git a/postgres_mixin/dashboards/postgres-overview.json b/postgres_mixin/dashboards/postgres-overview.json deleted file mode 100644 index 9bf41be6a..000000000 --- a/postgres_mixin/dashboards/postgres-overview.json +++ /dev/null @@ -1,1412 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "description": "Performance metrics for Postgres", - "editable": true, - "gnetId": 455, - "graphTooltip": 0, - "id": 1, - "iteration": 1603191461722, - "links": [], - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "grid": {}, - "gridPos": { - "h": 7, - "w": 20, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 1, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "connected", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.2.1", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "fetched", - "dsType": "prometheus", - "expr": "sum(irate(pg_stat_database_tup_fetched{datname=~\"$db\",job=~\"$job\",instance=~\"$instance\"}[$__rate_interval]))", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "fetched", - "measurement": "postgresql", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "tup_fetched" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - }, - { - "params": [ - "10s" - ], - "type": "non_negative_derivative" - } - ] - ], - "step": 120, - "tags": [ - { - "key": "instance", - "operator": "=~", - "value": "/^$instance$/" - } - ] - }, - { - "alias": "fetched", - "dsType": "prometheus", - "expr": "sum(irate(pg_stat_database_tup_returned{datname=~\"$db\",job=~\"$job\",instance=~\"$instance\"}[$__rate_interval]))", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "returned", - "measurement": "postgresql", - "policy": "default", - "refId": "B", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "tup_fetched" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - }, - { - "params": [ - "10s" - ], - "type": "non_negative_derivative" - } - ] - ], - "step": 120, - "tags": [ - { - "key": "instance", - "operator": "=~", - "value": "/^$instance$/" - } - ] - }, - { - "alias": "fetched", - "dsType": "prometheus", - "expr": "sum(irate(pg_stat_database_tup_inserted{datname=~\"$db\",job=~\"$job\",instance=~\"$instance\"}[$__rate_interval]))", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "inserted", - "measurement": "postgresql", - "policy": "default", - "refId": "C", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "tup_fetched" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - }, - { - "params": [ - "10s" - ], - "type": "non_negative_derivative" - } - ] - ], - "step": 120, - "tags": [ - { - "key": "instance", - "operator": "=~", - "value": "/^$instance$/" - } - ] - }, - { - "alias": "fetched", - "dsType": "prometheus", - "expr": "sum(irate(pg_stat_database_tup_updated{datname=~\"$db\",job=~\"$job\",instance=~\"$instance\"}[$__rate_interval]))", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "updated", - "measurement": "postgresql", - "policy": "default", - "refId": "D", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "tup_fetched" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - }, - { - "params": [ - "10s" - ], - "type": "non_negative_derivative" - } - ] - ], - "step": 120, - "tags": [ - { - "key": "instance", - "operator": "=~", - "value": "/^$instance$/" - } - ] - }, - { - "alias": "fetched", - "dsType": "prometheus", - "expr": "sum(irate(pg_stat_database_tup_deleted{datname=~\"$db\",job=~\"$job\",instance=~\"$instance\"}[$__rate_interval]))", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "deleted", - "measurement": "postgresql", - "policy": "default", - "refId": "E", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "tup_fetched" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - }, - { - "params": [ - "10s" - ], - "type": "non_negative_derivative" - } - ] - ], - "step": 120, - "tags": [ - { - "key": "instance", - "operator": "=~", - "value": "/^$instance$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Rows", - "tooltip": { - "msResolution": true, - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "datasource": "$datasource", - "decimals": 0, - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 4, - "x": 20, - "y": 0 - }, - "height": "55px", - "id": 11, - "interval": null, - "isNew": true, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "dsType": "prometheus", - "expr": "sum(irate(pg_stat_database_xact_commit{datname=~\"$db\",job=~\"$job\",instance=~\"$instance\"}[$__rate_interval])) + sum(irate(pg_stat_database_xact_rollback{datname=~\"$db\",job=~\"$job\",instance=~\"$instance\"}[$__rate_interval]))", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "measurement": "postgresql", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "xact_commit" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - }, - { - "params": [ - "10s" - ], - "type": "non_negative_derivative" - } - ] - ], - "step": 1800, - "tags": [ - { - "key": "instance", - "operator": "=~", - "value": "/^$instance$/" - } - ] - } - ], - "thresholds": "", - "title": "QPS", - "transparent": true, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "decimals": 1, - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 7 - }, - "hiddenSeries": false, - "id": 2, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "hideZero": true, - "max": true, - "min": true, - "rightSide": false, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "connected", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.2.1", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "Buffers Allocated", - "dsType": "prometheus", - "expr": "irate(pg_stat_bgwriter_buffers_alloc{job=~\"$job\",instance=~\"$instance\"}[$__rate_interval])", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "buffers_alloc", - "measurement": "postgresql", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "buffers_alloc" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - }, - { - "params": [], - "type": "difference" - } - ] - ], - "step": 240, - "tags": [ - { - "key": "instance", - "operator": "=~", - "value": "/^$instance$/" - } - ] - }, - { - "alias": "Buffers Allocated", - "dsType": "prometheus", - "expr": "irate(pg_stat_bgwriter_buffers_backend_fsync{job=~\"$job\",instance=~\"$instance\"}[$__rate_interval])", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "buffers_backend_fsync", - "measurement": "postgresql", - "policy": "default", - "refId": "B", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "buffers_alloc" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - }, - { - "params": [], - "type": "difference" - } - ] - ], - "step": 240, - "tags": [ - { - "key": "instance", - "operator": "=~", - "value": "/^$instance$/" - } - ] - }, - { - "alias": "Buffers Allocated", - "dsType": "prometheus", - "expr": "irate(pg_stat_bgwriter_buffers_backend{job=~\"$job\",instance=~\"$instance\"}[$__rate_interval])", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "buffers_backend", - "measurement": "postgresql", - "policy": "default", - "refId": "C", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "buffers_alloc" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - }, - { - "params": [], - "type": "difference" - } - ] - ], - "step": 240, - "tags": [ - { - "key": "instance", - "operator": "=~", - "value": "/^$instance$/" - } - ] - }, - { - "alias": "Buffers Allocated", - "dsType": "prometheus", - "expr": "irate(pg_stat_bgwriter_buffers_clean{job=~\"$job\",instance=~\"$instance\"}[$__rate_interval])", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "buffers_clean", - "measurement": "postgresql", - "policy": "default", - "refId": "D", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "buffers_alloc" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - }, - { - "params": [], - "type": "difference" - } - ] - ], - "step": 240, - "tags": [ - { - "key": "instance", - "operator": "=~", - "value": "/^$instance$/" - } - ] - }, - { - "alias": "Buffers Allocated", - "dsType": "prometheus", - "expr": "irate(pg_stat_bgwriter_buffers_checkpoint{job=~\"$job\",instance=~\"$instance\"}[$__rate_interval])", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "buffers_checkpoint", - "measurement": "postgresql", - "policy": "default", - "refId": "E", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "buffers_alloc" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - }, - { - "params": [], - "type": "difference" - } - ] - ], - "step": 240, - "tags": [ - { - "key": "instance", - "operator": "=~", - "value": "/^$instance$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Buffers", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 7 - }, - "hiddenSeries": false, - "id": 3, - "isNew": true, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.2.1", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "conflicts", - "dsType": "prometheus", - "expr": "sum(rate(pg_stat_database_deadlocks{datname=~\"$db\",job=~\"$job\",instance=~\"$instance\"}[$__rate_interval]))", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "deadlocks", - "measurement": "postgresql", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "conflicts" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - }, - { - "params": [], - "type": "difference" - } - ] - ], - "step": 240, - "tags": [ - { - "key": "instance", - "operator": "=~", - "value": "/^$instance$/" - } - ] - }, - { - "alias": "deadlocks", - "dsType": "prometheus", - "expr": "sum(rate(pg_stat_database_conflicts{datname=~\"$db\",job=~\"$job\",instance=~\"$instance\"}[$__rate_interval]))", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "conflicts", - "measurement": "postgresql", - "policy": "default", - "refId": "B", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "deadlocks" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - }, - { - "params": [], - "type": "difference" - } - ] - ], - "step": 240, - "tags": [ - { - "key": "instance", - "operator": "=~", - "value": "/^$instance$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Conflicts/Deadlocks", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 14 - }, - "hiddenSeries": false, - "id": 12, - "isNew": true, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "options": { - "alertThreshold": true - }, - "percentage": true, - "pluginVersion": "7.2.1", - "pointradius": 1, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (datname) (rate(pg_stat_database_blks_hit{datname=~\"$db\",job=~\"$job\",instance=~\"$instance\"}[$__rate_interval])) / (sum by (datname)(rate(pg_stat_database_blks_hit{datname=~\"$db\",job=~\"$job\",instance=~\"$instance\"}[$__rate_interval])) + sum by (datname)(rate(pg_stat_database_blks_read{datname=~\"$db\",job=~\"$job\",instance=~\"$instance\"}[$__rate_interval])))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{datname}} - cache hit rate", - "refId": "A", - "step": 240 - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Cache hit ratio", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 14 - }, - "hiddenSeries": false, - "id": 13, - "isNew": true, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.2.1", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "pg_stat_database_numbackends{datname=~\"$db\",job=~\"$job\",instance=~\"$instance\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{datname}} - {{__name__}}", - "refId": "A", - "step": 240 - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Number of active connections", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": false, - "schemaVersion": 26, - "style": "dark", - "tags": [ - "postgres" - ], - "templating": { - "list": [ - { - "hide": 0, - "includeAll": false, - "label": "Data Source", - "multi": false, - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - }, - { - "allValue": ".+", - "datasource": "$datasource", - "definition": "label_values(pg_up, job)", - "hide": 0, - "includeAll": true, - "label": "job", - "multi": true, - "name": "job", - "options": [], - "query": "label_values(pg_up, job)", - "refresh": 0, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": ".+", - "datasource": "$datasource", - "definition": "", - "hide": 0, - "includeAll": true, - "label": "instance", - "multi": true, - "name": "instance", - "options": [], - "query": "label_values(up{job=~\"$job\"},instance)", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": ".+", - "datasource": "$datasource", - "definition": "label_values(pg_stat_database_tup_fetched{instance=~\"$instance\",datname!~\"template.*|postgres\"},datname)", - "hide": 0, - "includeAll": true, - "label": "db", - "multi": false, - "name": "db", - "options": [], - "query": "label_values(pg_stat_database_tup_fetched{instance=~\"$instance\",datname!~\"template.*|postgres\"},datname)", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "browser", - "title": "Postgres Overview", - "uid": "wGgaPlciz", - "version": 5 -} diff --git a/postgres_mixin/go.mod b/postgres_mixin/go.mod deleted file mode 100644 index fa5e20579..000000000 --- a/postgres_mixin/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/wrouesnel/postgres_exporter/postgres_mixin - -go 1.15 diff --git a/postgres_mixin/mixin.libsonnet b/postgres_mixin/mixin.libsonnet deleted file mode 100644 index 119d2cdde..000000000 --- a/postgres_mixin/mixin.libsonnet +++ /dev/null @@ -1,3 +0,0 @@ -(import 'alerts/alerts.libsonnet') + -(import 'dashboards/dashboards.libsonnet') + -(import 'config.libsonnet') diff --git a/queries.yaml b/queries.yaml deleted file mode 100644 index 189ce0866..000000000 --- a/queries.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Adding queries to this file is deprecated -# Example queries have been transformed into collectors. \ No newline at end of file