diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 98099e295..000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,104 +0,0 @@ ---- -version: 2.1 - -orbs: - prometheus: prometheus/prometheus@0.17.1 - -executors: - # This must match .promu.yml. - golang: - docker: - - image: cimg/go:1.21 - -jobs: - test: - executor: golang - - steps: - - prometheus/setup_environment - - run: GOHOSTARCH=386 GOARCH=386 make test - - run: make - - prometheus/store_artifact: - file: postgres_exporter - - integration: - docker: - - image: cimg/go:1.20 - - image: << parameters.postgres_image >> - environment: - POSTGRES_DB: circle_test - POSTGRES_USER: postgres - POSTGRES_PASSWORD: test - - parameters: - postgres_image: - type: string - - environment: - DATA_SOURCE_NAME: 'postgresql://postgres:test@localhost:5432/circle_test?sslmode=disable' - GOOPTS: '-v -tags integration' - - steps: - - checkout - - setup_remote_docker - - run: docker version - - run: make build - - run: make test - -workflows: - version: 2 - postgres_exporter: - jobs: - - test: - filters: - tags: - only: /.*/ - - integration: - matrix: - parameters: - postgres_image: - - circleci/postgres:11 - - circleci/postgres:12 - - circleci/postgres:13 - - cimg/postgres:14.9 - - cimg/postgres:15.4 - - cimg/postgres:16.0 - - prometheus/build: - name: build - parallelism: 3 - promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" - filters: - tags: - ignore: /^v.*/ - branches: - ignore: /^(main|master|release-.*|.*build-all.*)$/ - - prometheus/build: - name: build_all - parallelism: 12 - filters: - branches: - only: /^(main|master|release-.*|.*build-all.*)$/ - tags: - only: /^v.*/ - - prometheus/publish_master: - context: org-context - docker_hub_organization: prometheuscommunity - quay_io_organization: prometheuscommunity - requires: - - test - - build_all - filters: - branches: - only: master - - prometheus/publish_release: - context: org-context - docker_hub_organization: prometheuscommunity - quay_io_organization: prometheuscommunity - requires: - - test - - build_all - filters: - tags: - only: /^v.*/ - branches: - ignore: /.*/ diff --git a/.github/workflows/calculate-tag.yaml b/.github/workflows/calculate-tag.yaml new file mode 100644 index 000000000..40b9cd640 --- /dev/null +++ b/.github/workflows/calculate-tag.yaml @@ -0,0 +1,59 @@ +name: Calculate new tag + +on: + workflow_call: + outputs: + tag: + description: "The the next semantic version tag based on commit messages." + value: ${{ jobs.calculate-tag.outputs.tag }} + inputs: + head_ref: + description: "Head ref to be used as pre-release suffix" + type: string + default: "${{ github.head_ref }}" + f3_tag: + description: "Additional tag to be prefixed to the latest upstream release tag" + type: string + default: "${{ github.sha }}" + +jobs: + calculate-tag: + runs-on: ubuntu-20.04 + permissions: read-all + outputs: + tag: "${{ steps.tag.outputs.tag }}" + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Get the tag name + id: latest_tag + run: | + latest_tag="" + echo "latest_tag=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT + if [[ "${{ github.ref_type }}" = "tag" ]]; then + latest_tag="${GITHUB_REF#refs/tags/}" + else + latest_tag=$(git describe --tags --abbrev=1) + fi + echo "latest_tag=$latest_tag" >> $GITHUB_OUTPUT + - name: Calculate pre-release suffix + id: tag_suffix + run: | + SUFFIX="${{ inputs.head_ref }}" + if [[ "${{ github.event_name }}" = "push" && "${{ github.ref_type }}" = "branch" ]]; then + SUFFIX="release-$(date +%Y-%m-%d)" + fi + underscores_and_slashes_to_dashes_suffix="${SUFFIX//[\/_]/-}" + echo "tag_suffix=-${underscores_and_slashes_to_dashes_suffix}" >> $GITHUB_OUTPUT + + - name: Compute next tag + id: tag + run: | + latest_tag="${{ steps.latest_tag.outputs.latest_tag }}" + if [[ "${{ github.ref_type }}" = "tag" ]]; then + tag="${latest_tag}" + else + tag="${latest_tag}${{steps.tag_suffix.outputs.tag_suffix}}" + fi + echo "tag=$tag" >> $GITHUB_OUTPUT diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 000000000..1124007bb --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,82 @@ +name: CI +on: + push: + branches: + - master + tags: + - "v*.*.*" + pull_request: + +jobs: + calculate-tag: + uses: ./.github/workflows/calculate-tag.yaml + test: + runs-on: ubuntu-latest + steps: + - name: Checkout Code + uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0 + - name: Setup Golang + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + - name: Test + run: make test + build: + runs-on: ubuntu-latest + needs: [ calculate-tag, test ] + steps: + - name: Checkout Code + uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0 + - name: Setup Golang + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + - name: Set version + run: echo "${{ needs.calculate-tag.outputs.tag }}" | sed s/v//g > VERSION + - name: Build + run: make crossbuild + - name: Package + run: make crossbuild-tarballs + - name: Archive artifacts + uses: actions/upload-artifact@v4 + with: + name: tarballs + include-hidden-files: true + path: ./.tarballs/*.tar.gz + + commit-message: + runs-on: ubuntu-latest + outputs: + head-commit-message: ${{ steps.get_head_commit_message.outputs.headCommitMsg }} + steps: + - name: Checkout code + uses: actions/checkout@8f4b7f84864484a7bf31766abe9204da3cbe65b3 # v3.5.0 + with: + fetch-depth: 0 + ref: ${{github.event.after}} + - name: Print head git commit message + id: get_head_commit_message + run: | + msg="$(git show -s --format=%s)" + echo "Latest msg is $msg" + echo "headCommitMsg=$msg" >> $GITHUB_OUTPUT + + publish: + runs-on: ubuntu-latest + needs: [ calculate-tag, build, commit-message ] + permissions: write-all + if: contains(needs.commit-message.outputs.head-commit-message, 'pre-release') || startsWith(github.ref, 'refs/tags') + steps: + - name: Download artifacts + uses: actions/download-artifact@v4 + with: + name: tarballs + - name: Create GH release + uses: softprops/action-gh-release@v1 + id: release + with: + generate_release_notes: true + tag_name: ${{ needs.calculate-tag.outputs.tag }} + prerelease: "${{ github.event_name == 'pull_request' }}" + files: | + *.tar.gz diff --git a/.gitignore b/.gitignore index e6ae827a2..d3d503d88 100644 --- a/.gitignore +++ b/.gitignore @@ -19,4 +19,6 @@ /.metrics.*.added /.metrics.*.removed /tools/src +/assets /vendor +/build \ No newline at end of file diff --git a/.promu.yml b/.promu.yml index dbad0ba95..3b167a017 100644 --- a/.promu.yml +++ b/.promu.yml @@ -1,8 +1,7 @@ go: - # This must match .circle/config.yml. - version: 1.21 + version: 1.22 repository: - path: github.com/prometheus-community/postgres_exporter + path: github.com/form3tech-oss/postgres_exporter build: binaries: - name: postgres_exporter @@ -11,8 +10,11 @@ build: -X github.com/prometheus/common/version.Version={{.Version}} -X github.com/prometheus/common/version.Revision={{.Revision}} -X github.com/prometheus/common/version.Branch={{.Branch}} - -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} +crossbuild: + platforms: + - linux/amd64 + - linux/arm64 tarball: files: - LICENSE diff --git a/Makefile b/Makefile index 114e3438f..f8792d1b1 100644 --- a/Makefile +++ b/Makefile @@ -2,8 +2,8 @@ all:: # Needs to be defined before including Makefile.common to auto-generate targets -DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le -DOCKER_REPO ?= prometheuscommunity +DOCKER_REPO ?= form3tech +CROSS_BUILD_PROMUOPTS := -p linux/arm64 #-p linux/amd64 -p windows/amd64 include Makefile.common diff --git a/Makefile.common b/Makefile.common index 062a28185..24cc34cea 100644 --- a/Makefile.common +++ b/Makefile.common @@ -78,7 +78,7 @@ ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) endif endif -PREFIX ?= $(shell pwd) +PREFIX ?= $(shell pwd)/build BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) DOCKERFILE_PATH ?= ./Dockerfile @@ -199,10 +199,20 @@ common-build: promu @echo ">> building binaries" $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) +.PHONY: common-crossbuild +common-crossbuild: promu + @echo ">> building binaries" + $(PROMU) crossbuild -v + +.PHONY: common-crossbuild-tarballs +common-crossbuild-tarballs: promu + @echo ">> building tarballs" + $(PROMU) crossbuild -v "tarballs" + .PHONY: common-tarball common-tarball: promu @echo ">> building release tarball" - $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) + $(PROMU) tarball --prefix="$(BIN_DIR)" $(BIN_DIR)/.bin .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) diff --git a/README.md b/README.md index 429058e6d..79790212b 100644 --- a/README.md +++ b/README.md @@ -255,6 +255,15 @@ The following environment variables configure the exporter: * `PG_EXPORTER_METRIC_PREFIX` A prefix to use for each of the default metrics exported by postgres-exporter. Default is `pg` + +* `PG_MAX_CONNECTIONS` + Maximum number of open connections by the collector `-1` + +* `PG_MAX_IDLE_CONNECTIONS` + Maximum number of idle connections by the collector `-1` + +* `PG_SCRAPE_TIMEOUT` + Single collector timeout `10s` Settings set by environment variables starting with `PG_` will be overwritten by the corresponding CLI flag if given. diff --git a/VERSION b/VERSION deleted file mode 100644 index a55105169..000000000 --- a/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.15.0 diff --git a/cmd/postgres_exporter/datasource.go b/cmd/postgres_exporter/datasource.go index 0b8cef04a..56f1b6465 100644 --- a/cmd/postgres_exporter/datasource.go +++ b/cmd/postgres_exporter/datasource.go @@ -20,6 +20,7 @@ import ( "regexp" "strings" + "github.com/form3tech-oss/go-vault-client/v4/pkg/vaultclient" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) @@ -149,6 +150,21 @@ func getDataSources() ([]string, error) { pass = os.Getenv("DATA_SOURCE_PASS") } + if len(user) == 0 || len(pass) == 0 { + secrets, err := loadSecrets() + if err != nil { + panic(err) + } + + if len(user) == 0 { + user = secrets["database-username"].(string) + } + + if len(pass) == 0 { + pass = secrets["database-password"].(string) + } + } + ui := url.UserPassword(user, pass).String() dataSrouceURIFile := os.Getenv("DATA_SOURCE_URI_FILE") if len(dataSrouceURIFile) != 0 { @@ -171,3 +187,46 @@ func getDataSources() ([]string, error) { return []string{dsn}, nil } + +func loadSecrets() (map[string]interface{}, error) { + result := make(map[string]interface{}) + vaultAuth, err := vaultclient.NewVaultAuth(vaultclient.NewDefaultConfig()) + if err != nil { + return nil, err + } + + client, err := vaultAuth.VaultClient() + if err != nil { + return nil, err + } + + secret, err := client.Logical().Read("/secret/application") + if err == nil { + for key, value := range secret.Data { + result[key] = value + } + } else { + level.Warn(logger).Log("msg", "error reading vault secrets from /secret/application", "err", err) + } + + secret, err = client.Logical().Read("/secret/postgres_exporter") + if err == nil && secret != nil { + for key, value := range secret.Data { + result[key] = value + } + } else { + level.Warn(logger).Log("msg", "error reading vault secrets from /secret/postgres_exporter", "err", err) + } + + dbCredsPath := os.Getenv("VAULT_DB_CREDENTIALS_PATH") + secret, err = client.Logical().Read(dbCredsPath) + if err == nil { + for key, value := range secret.Data { + result[key] = value + } + } else { + level.Warn(logger).Log("error reading vault secrets from "+dbCredsPath, err) + } + + return result, nil +} diff --git a/cmd/postgres_exporter/main.go b/cmd/postgres_exporter/main.go index f4d454996..8f0b88e5b 100644 --- a/cmd/postgres_exporter/main.go +++ b/cmd/postgres_exporter/main.go @@ -14,16 +14,22 @@ package main import ( + "context" + "errors" "fmt" + "net" "net/http" "os" + "os/signal" "strings" + "syscall" + "time" "github.com/alecthomas/kingpin/v2" + "github.com/form3tech-oss/postgres_exporter/collector" + "github.com/form3tech-oss/postgres_exporter/config" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/prometheus-community/postgres_exporter/collector" - "github.com/prometheus-community/postgres_exporter/config" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/promlog" @@ -46,10 +52,14 @@ var ( autoDiscoverDatabases = kingpin.Flag("auto-discover-databases", "Whether to discover the databases on a server dynamically. (DEPRECATED)").Default("false").Envar("PG_EXPORTER_AUTO_DISCOVER_DATABASES").Bool() queriesPath = kingpin.Flag("extend.query-path", "Path to custom queries to run. (DEPRECATED)").Default("").Envar("PG_EXPORTER_EXTEND_QUERY_PATH").String() onlyDumpMaps = kingpin.Flag("dumpmaps", "Do not run, simply dump the maps.").Bool() + onlyHealthCheck = kingpin.Flag("healthcheck", "Do not run, just return if up and running.").Bool() constantLabelsList = kingpin.Flag("constantLabels", "A list of label=value separated by comma(,). (DEPRECATED)").Default("").Envar("PG_EXPORTER_CONSTANT_LABELS").String() excludeDatabases = kingpin.Flag("exclude-databases", "A list of databases to remove when autoDiscoverDatabases is enabled (DEPRECATED)").Default("").Envar("PG_EXPORTER_EXCLUDE_DATABASES").String() includeDatabases = kingpin.Flag("include-databases", "A list of databases to include when autoDiscoverDatabases is enabled (DEPRECATED)").Default("").Envar("PG_EXPORTER_INCLUDE_DATABASES").String() metricPrefix = kingpin.Flag("metric-prefix", "A metric prefix can be used to have non-default (not \"pg\") prefixes for each of the metrics").Default("pg").Envar("PG_EXPORTER_METRIC_PREFIX").String() + maxOpenConnections = kingpin.Flag("max-connections", "the maximum number of opened connections").Default("10").Envar("PG_MAX_CONNECTIONS").Int() + maxIdleConnections = kingpin.Flag("max-idle-connections", "the maximum number of idle connections").Default("5").Envar("PG_MAX_IDLE_CONNECTIONS").Int() + collectorTimeout = kingpin.Flag("collector-timeout", "the single collector scrape timeout").Default("10s").Envar("PG_COLLECTOR_TIMEOUT").Duration() logger = log.NewNopLogger() ) @@ -81,6 +91,18 @@ func main() { return } + if *onlyHealthCheck { + healthy, err := runHealthCheck(webConfig) + if err != nil { + level.Error(logger).Log("msg", "error running health check", "err", err) + } + if healthy { + level.Info(logger).Log("msg", "health ok") + os.Exit(0) + } + os.Exit(1) + } + if err := c.ReloadConfig(*configFile, logger); err != nil { // This is not fatal, but it means that auth must be provided for every dsn. level.Warn(logger).Log("msg", "Error loading config", "err", err) @@ -115,16 +137,19 @@ func main() { WithConstantLabels(*constantLabelsList), ExcludeDatabases(excludedDatabases), IncludeDatabases(*includeDatabases), + WithMaxOpenConnections(*maxOpenConnections), + WithMaxIdleConnections(*maxIdleConnections), + WithScrapeTimeout(*collectorTimeout), } exporter := NewExporter(dsns, opts...) - defer func() { - exporter.servers.Close() - }() - prometheus.MustRegister(version.NewCollector(exporterName)) + reg := prometheus.NewRegistry() - prometheus.MustRegister(exporter) + reg.MustRegister( + version.NewCollector(exporterName), + exporter, + ) // TODO(@sysadmind): Remove this with multi-target support. We are removing multiple DSN support dsn := "" @@ -132,19 +157,34 @@ func main() { dsn = dsns[0] } - pe, err := collector.NewPostgresCollector( + collOpts := []collector.Option{ + collector.WithConstantLabels(parseConstLabels(*constantLabelsList)), + collector.WithMaxIdleConnections(*maxIdleConnections), + collector.WithMaxOpenConnections(*maxOpenConnections), + collector.WithScrapeTimeout(*collectorTimeout), + } + + if *maxOpenConnections >= 0 { + collOpts = append(collOpts, collector.WithMaxOpenConnections(*maxOpenConnections)) + } + if *maxIdleConnections >= 0 { + collOpts = append(collOpts, collector.WithMaxIdleConnections(*maxIdleConnections)) + } + + pgColl, err := collector.NewPostgresCollector( logger, excludedDatabases, dsn, []string{}, + collOpts..., ) if err != nil { - level.Warn(logger).Log("msg", "Failed to create PostgresCollector", "err", err.Error()) + level.Error(logger).Log("msg", "Failed to create PostgresCollector", "err", err.Error()) } else { - prometheus.MustRegister(pe) + reg.MustRegister(pgColl) } - http.Handle(*metricsPath, promhttp.Handler()) + http.Handle(*metricsPath, promhttp.HandlerFor(reg, promhttp.HandlerOpts{})) if *metricsPath != "/" && *metricsPath != "" { landingConfig := web.LandingConfig{ @@ -166,11 +206,55 @@ func main() { http.Handle("/", landingPage) } - http.HandleFunc("/probe", handleProbe(logger, excludedDatabases)) - srv := &http.Server{} - if err := web.ListenAndServe(srv, webConfig, logger); err != nil { - level.Error(logger).Log("msg", "Error running HTTP server", "err", err) + srv.RegisterOnShutdown(func() { + level.Info(logger).Log("msg", "gracefully shutting down HTTP server") + exporter.servers.Close() + pgColl.Close() + }) + + go func() { + if err := web.ListenAndServe(srv, webConfig, logger); !errors.Is(err, http.ErrServerClosed) { + level.Error(logger).Log("msg", "running HTTP server", "err", err) + } + }() + + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + <-sigChan + + shutdownCtx, shutdownRelease := context.WithTimeout(context.Background(), 30*time.Second) + defer shutdownRelease() + if err := srv.Shutdown(shutdownCtx); err != nil { + level.Error(logger).Log("msg", "during HTTP server shut down", "err", err) os.Exit(1) } + level.Info(logger).Log("msg", "HTTP server gracefully shut down") +} + +func runHealthCheck(webConfig *web.FlagConfig) (bool, error) { + if len(*webConfig.WebListenAddresses) == 0 { + return false, errors.New("no listen addresses to run the request to") + } + addr := (*webConfig.WebListenAddresses)[0] + host, port, err := net.SplitHostPort(addr) + if err != nil { + return false, err + } + if host == "" { + host = "localhost" + } + url := fmt.Sprintf("http://%s:%s/", host, port) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return false, err + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return false, err + } + + defer resp.Body.Close() + return resp.StatusCode == 200, nil } diff --git a/cmd/postgres_exporter/namespace.go b/cmd/postgres_exporter/namespace.go index 41674007d..66fd37333 100644 --- a/cmd/postgres_exporter/namespace.go +++ b/cmd/postgres_exporter/namespace.go @@ -14,6 +14,7 @@ package main import ( + "context" "database/sql" "errors" "fmt" @@ -27,7 +28,7 @@ import ( // Query within a namespace mapping and emit metrics. Returns fatal errors if // the scrape fails, and a slice of errors if they were non-fatal. -func queryNamespaceMapping(server *Server, namespace string, mapping MetricMapNamespace) ([]prometheus.Metric, []error, error) { +func queryNamespaceMappingWithContext(ctx context.Context, server *Server, namespace string, mapping MetricMapNamespace) ([]prometheus.Metric, []error, error) { // Check for a query override for this namespace query, found := server.queryOverrides[namespace] @@ -45,19 +46,19 @@ func queryNamespaceMapping(server *Server, namespace string, mapping MetricMapNa if !found { // I've no idea how to avoid this properly at the moment, but this is // an admin tool so you're not injecting SQL right? - rows, err = server.db.Query(fmt.Sprintf("SELECT * FROM %s;", namespace)) // nolint: gas + rows, err = server.db.QueryContext(ctx, fmt.Sprintf("SELECT * FROM %s;", namespace)) // nolint: gas } else { - rows, err = server.db.Query(query) + rows, err = server.db.QueryContext(ctx, query) } if err != nil { - return []prometheus.Metric{}, []error{}, fmt.Errorf("Error running query on database %q: %s %v", server, namespace, err) + return []prometheus.Metric{}, []error{}, fmt.Errorf("error running query on database %q: %s %v", server, namespace, err) } defer rows.Close() // nolint: errcheck var columnNames []string columnNames, err = rows.Columns() if err != nil { - return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("Error retrieving column list for: ", namespace, err)) + return []prometheus.Metric{}, []error{}, errors.New(fmt.Sprintln("error retrieving column list for: ", namespace, err)) } // Make a lookup map for the column indices @@ -183,17 +184,17 @@ func queryNamespaceMapping(server *Server, namespace string, mapping MetricMapNa // Iterate through all the namespace mappings in the exporter and run their // queries. -func queryNamespaceMappings(ch chan<- prometheus.Metric, server *Server) map[string]error { +func queryNamespaceMappings(ctx context.Context, ch chan<- prometheus.Metric, server *Server) map[string]error { // Return a map of namespace -> errors namespaceErrors := make(map[string]error) scrapeStart := time.Now() for namespace, mapping := range server.metricMap { - level.Debug(logger).Log("msg", "Querying namespace", "namespace", namespace) + level.Debug(logger).Log("msg", "querying namespace", "namespace", namespace) if mapping.master && !server.master { - level.Debug(logger).Log("msg", "Query skipped...") + level.Debug(logger).Log("msg", "query skipped...", "namespace", namespace) continue } @@ -202,7 +203,7 @@ func queryNamespaceMappings(ch chan<- prometheus.Metric, server *Server) map[str serVersion, _ := semver.Parse(server.lastMapVersion.String()) runServerRange, _ := semver.ParseRange(server.runonserver) if !runServerRange(serVersion) { - level.Debug(logger).Log("msg", "Query skipped for this database version", "version", server.lastMapVersion.String(), "target_version", server.runonserver) + level.Debug(logger).Log("msg", "query skipped for this database version", "version", server.lastMapVersion.String(), "target_version", server.runonserver) continue } } @@ -225,20 +226,21 @@ func queryNamespaceMappings(ch chan<- prometheus.Metric, server *Server) map[str var nonFatalErrors []error var err error if scrapeMetric { - metrics, nonFatalErrors, err = queryNamespaceMapping(server, namespace, mapping) - } else { + metrics, nonFatalErrors, err = queryNamespaceMappingWithContext(ctx, server, namespace, mapping) + } else { + level.Debug(logger).Log("msg", "found cached metrics", "namespace", namespace) metrics = cachedMetric.metrics } // Serious error - a namespace disappeared if err != nil { namespaceErrors[namespace] = err - level.Info(logger).Log("err", err) + level.Error(logger).Log("err", err) } // Non-serious errors - likely version or parsing problems. if len(nonFatalErrors) > 0 { for _, err := range nonFatalErrors { - level.Info(logger).Log("err", err) + level.Error(logger).Log("err", err) } } diff --git a/cmd/postgres_exporter/pg_setting.go b/cmd/postgres_exporter/pg_setting.go index f162c354a..6c9fa4234 100644 --- a/cmd/postgres_exporter/pg_setting.go +++ b/cmd/postgres_exporter/pg_setting.go @@ -129,10 +129,10 @@ func (s *pgSetting) normaliseUnit() (val float64, unit string, err error) { return case "ms", "s", "min", "h", "d": unit = "seconds" - case "B", "kB", "MB", "GB", "TB", "1kB", "2kB", "4kB", "8kB", "16kB", "32kB", "64kB", "16MB", "32MB", "64MB": + case "B", "kB", "MB", "GB", "TB", "4kB", "8kB", "16kB", "32kB", "64kB", "16MB", "32MB", "64MB": unit = "bytes" default: - err = fmt.Errorf("Unknown unit for runtime variable: %q", s.unit) + err = fmt.Errorf("unknown unit for runtime variable: %q", s.unit) return } @@ -158,10 +158,6 @@ func (s *pgSetting) normaliseUnit() (val float64, unit string, err error) { val *= math.Pow(2, 30) case "TB": val *= math.Pow(2, 40) - case "1kB": - val *= math.Pow(2, 10) - case "2kB": - val *= math.Pow(2, 11) case "4kB": val *= math.Pow(2, 12) case "8kB": diff --git a/cmd/postgres_exporter/pg_setting_test.go b/cmd/postgres_exporter/pg_setting_test.go index 0e010444d..6923da630 100644 --- a/cmd/postgres_exporter/pg_setting_test.go +++ b/cmd/postgres_exporter/pg_setting_test.go @@ -214,7 +214,7 @@ var fixtures = []fixture{ n: normalised{ val: 10, unit: "", - err: `Unknown unit for runtime variable: "nonexistent"`, + err: `unknown unit for runtime variable: "nonexistent"`, }, }, } @@ -240,7 +240,7 @@ func (s *PgSettingSuite) TestNormaliseUnit(c *C) { func (s *PgSettingSuite) TestMetric(c *C) { defer func() { if r := recover(); r != nil { - if r.(error).Error() != `Unknown unit for runtime variable: "nonexistent"` { + if r.(error).Error() != `unknown unit for runtime variable: "nonexistent"` { panic(r) } } diff --git a/cmd/postgres_exporter/postgres_exporter.go b/cmd/postgres_exporter/postgres_exporter.go index fa34eecc5..d4549d0e8 100644 --- a/cmd/postgres_exporter/postgres_exporter.go +++ b/cmd/postgres_exporter/postgres_exporter.go @@ -415,11 +415,16 @@ type Exporter struct { userQueriesPath string constantLabels prometheus.Labels duration prometheus.Gauge - error prometheus.Gauge + errors prometheus.Gauge psqlUp prometheus.Gauge userQueriesError *prometheus.GaugeVec totalScrapes prometheus.Counter + // Connection settings + maxOpenConnections int + maxIdleConnections int + scrapeTimeout time.Duration + // servers are used to allow re-using the DB connection between scrapes. // servers contains metrics map and query overrides. servers *Servers @@ -472,6 +477,24 @@ func WithUserQueriesPath(p string) ExporterOpt { } } +func WithMaxOpenConnections(n int) ExporterOpt { + return func(e *Exporter) { + e.maxOpenConnections = n + } +} + +func WithMaxIdleConnections(n int) ExporterOpt { + return func(e *Exporter) { + e.maxIdleConnections = n + } +} + +func WithScrapeTimeout(d time.Duration) ExporterOpt { + return func(e *Exporter) { + e.scrapeTimeout = d + } +} + // WithConstantLabels configures constant labels. func WithConstantLabels(s string) ExporterOpt { return func(e *Exporter) { @@ -517,7 +540,12 @@ func NewExporter(dsn []string, opts ...ExporterOpt) *Exporter { } e.setupInternalMetrics() - e.servers = NewServers(ServerWithLabels(e.constantLabels)) + e.servers = NewServers( + ServerWithLabels(e.constantLabels), + ServerWithMaxIdleConnections(*maxIdleConnections), + ServerWithMaxOpenConnections(*maxOpenConnections), + ServerWithScrapeTimeout(e.scrapeTimeout), + ) return e } @@ -537,7 +565,7 @@ func (e *Exporter) setupInternalMetrics() { Help: "Total number of times PostgreSQL was scraped for metrics.", ConstLabels: e.constantLabels, }) - e.error = prometheus.NewGauge(prometheus.GaugeOpts{ + e.errors = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: exporter, Name: "last_scrape_error", @@ -569,7 +597,7 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { ch <- e.duration ch <- e.totalScrapes - ch <- e.error + ch <- e.errors ch <- e.psqlUp e.userQueriesError.Collect(ch) } @@ -676,6 +704,7 @@ func (e *Exporter) scrape(ch chan<- prometheus.Metric) { var connectionErrorsCount int for _, dsn := range dsns { + level.Debug(logger).Log("msg", "start scrape", "dsn", dsn) if err := e.scrapeDSN(ch, dsn); err != nil { errorsCount++ @@ -696,8 +725,8 @@ func (e *Exporter) scrape(ch chan<- prometheus.Metric) { switch errorsCount { case 0: - e.error.Set(0) + e.errors.Set(0) default: - e.error.Set(1) + e.errors.Set(1) } } diff --git a/cmd/postgres_exporter/postgres_exporter_test.go b/cmd/postgres_exporter/postgres_exporter_test.go index 0f36febf4..2a528b939 100644 --- a/cmd/postgres_exporter/postgres_exporter_test.go +++ b/cmd/postgres_exporter/postgres_exporter_test.go @@ -229,6 +229,10 @@ func (s *FunctionalSuite) TestParseFingerprint(c *C) { url: "postgresql://userDsn:passwordDsn%3D@localhost:55432/?sslmode=disabled", fingerprint: "localhost:55432", }, + { + url: "postgresql://userDsn:passwordDsn%3D@localhost:55432/foo?sslmode=disabled&options=-c%20statement_timeout%3D3min%20-c%20statement_timeout%3D1min", + fingerprint: "localhost:55432", + }, { url: "port=1234", fingerprint: "localhost:1234", diff --git a/cmd/postgres_exporter/probe.go b/cmd/postgres_exporter/probe.go deleted file mode 100644 index 5945e07b8..000000000 --- a/cmd/postgres_exporter/probe.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "net/http" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus-community/postgres_exporter/collector" - "github.com/prometheus-community/postgres_exporter/config" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" -) - -func handleProbe(logger log.Logger, excludeDatabases []string) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - conf := c.GetConfig() - params := r.URL.Query() - target := params.Get("target") - if target == "" { - http.Error(w, "target is required", http.StatusBadRequest) - return - } - var authModule config.AuthModule - authModuleName := params.Get("auth_module") - if authModuleName == "" { - level.Info(logger).Log("msg", "no auth_module specified, using default") - } else { - var ok bool - authModule, ok = conf.AuthModules[authModuleName] - if !ok { - http.Error(w, fmt.Sprintf("auth_module %s not found", authModuleName), http.StatusBadRequest) - return - } - if authModule.UserPass.Username == "" || authModule.UserPass.Password == "" { - http.Error(w, fmt.Sprintf("auth_module %s has no username or password", authModuleName), http.StatusBadRequest) - return - } - } - - dsn, err := authModule.ConfigureTarget(target) - if err != nil { - level.Error(logger).Log("msg", "failed to configure target", "err", err) - http.Error(w, fmt.Sprintf("could not configure dsn for target: %v", err), http.StatusBadRequest) - return - } - - // TODO(@sysadmind): Timeout - - tl := log.With(logger, "target", target) - - registry := prometheus.NewRegistry() - - opts := []ExporterOpt{ - DisableDefaultMetrics(*disableDefaultMetrics), - DisableSettingsMetrics(*disableSettingsMetrics), - AutoDiscoverDatabases(*autoDiscoverDatabases), - WithUserQueriesPath(*queriesPath), - WithConstantLabels(*constantLabelsList), - ExcludeDatabases(excludeDatabases), - IncludeDatabases(*includeDatabases), - } - - dsns := []string{dsn.GetConnectionString()} - exporter := NewExporter(dsns, opts...) - defer func() { - exporter.servers.Close() - }() - registry.MustRegister(exporter) - - // Run the probe - pc, err := collector.NewProbeCollector(tl, excludeDatabases, registry, dsn) - if err != nil { - level.Error(logger).Log("msg", "Error creating probe collector", "err", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - // Cleanup underlying connections to prevent connection leaks - defer pc.Close() - - // TODO(@sysadmind): Remove the registry.MustRegister() call below and instead handle the collection here. That will allow - // for the passing of context, handling of timeouts, and more control over the collection. - // The current NewProbeCollector() implementation relies on the MustNewConstMetric() call to create the metrics which is not - // ideal to use without the registry.MustRegister() call. - _ = ctx - - registry.MustRegister(pc) - - // TODO check success, etc - h := promhttp.HandlerFor(registry, promhttp.HandlerOpts{}) - h.ServeHTTP(w, r) - } -} diff --git a/cmd/postgres_exporter/server.go b/cmd/postgres_exporter/server.go index bcfee6812..e62c76453 100644 --- a/cmd/postgres_exporter/server.go +++ b/cmd/postgres_exporter/server.go @@ -14,6 +14,7 @@ package main import ( + "context" "database/sql" "fmt" "sync" @@ -43,6 +44,11 @@ type Server struct { // Currently cached metrics metricCache map[string]cachedMetrics cacheMtx sync.Mutex + + // Connection settings and timeout + maxOpenConnections int + maxIdleConnections int + scrapeTimeout time.Duration } // ServerOpt configures a server. @@ -57,6 +63,24 @@ func ServerWithLabels(labels prometheus.Labels) ServerOpt { } } +func ServerWithMaxOpenConnections(n int) ServerOpt { + return func(e *Server) { + e.maxOpenConnections = n + } +} + +func ServerWithMaxIdleConnections(n int) ServerOpt { + return func(e *Server) { + e.maxIdleConnections = n + } +} + +func ServerWithScrapeTimeout(d time.Duration) ServerOpt { + return func(e *Server) { + e.scrapeTimeout = d + } +} + // NewServer establishes a new connection using DSN. func NewServer(dsn string, opts ...ServerOpt) (*Server, error) { fingerprint, err := parseFingerprint(dsn) @@ -68,10 +92,8 @@ func NewServer(dsn string, opts ...ServerOpt) (*Server, error) { if err != nil { return nil, err } - db.SetMaxOpenConns(1) - db.SetMaxIdleConns(1) - level.Info(logger).Log("msg", "Established new database connection", "fingerprint", fingerprint) + level.Info(logger).Log("msg", "setting up new server", "fingerprint", fingerprint) s := &Server{ db: db, @@ -86,6 +108,9 @@ func NewServer(dsn string, opts ...ServerOpt) (*Server, error) { opt(s) } + s.db.SetMaxOpenConns(s.maxOpenConnections) + s.db.SetMaxIdleConns(s.maxIdleConnections) + return s, nil } @@ -123,7 +148,10 @@ func (s *Server) Scrape(ch chan<- prometheus.Metric, disableSettingsMetrics bool } } - errMap := queryNamespaceMappings(ch, s) + ctx, cancel := context.WithTimeout(context.TODO(), s.scrapeTimeout) + defer cancel() + + errMap := queryNamespaceMappings(ctx, ch, s) if len(errMap) > 0 { err = fmt.Errorf("queryNamespaceMappings returned %d errors", len(errMap)) } @@ -163,6 +191,7 @@ func (s *Servers) GetServer(dsn string) (*Server, error) { if !ok { server, err = NewServer(dsn, s.opts...) if err != nil { + level.Error(logger).Log("msg", "failed create NewServer", "server", server, "err", err) time.Sleep(time.Duration(errCount) * time.Second) continue } @@ -182,9 +211,14 @@ func (s *Servers) GetServer(dsn string) (*Server, error) { func (s *Servers) Close() { s.m.Lock() defer s.m.Unlock() + if len(s.servers) == 0 { + level.Debug(logger).Log("msg", "no servers to close connection for") + return + } for _, server := range s.servers { + level.Info(logger).Log("msg", "closing server", "server", server) if err := server.Close(); err != nil { - level.Error(logger).Log("msg", "Failed to close connection", "server", server, "err", err) + level.Error(logger).Log("msg", "failed to close connection", "server", server, "err", err) } } } diff --git a/cmd/postgres_exporter/util.go b/cmd/postgres_exporter/util.go index 3a125f1d3..5310bb880 100644 --- a/cmd/postgres_exporter/util.go +++ b/cmd/postgres_exporter/util.go @@ -17,6 +17,7 @@ import ( "fmt" "math" "net/url" + "regexp" "strconv" "strings" "time" @@ -177,7 +178,12 @@ func parseFingerprint(url string) (string, error) { dsn = url } - pairs := strings.Split(dsn, " ") + re, _ := regexp.Compile(`[a-z_]+=(?:(?:'[^']+')|(?:[^\s]+))`) + pairs := re.FindAllString(dsn, -1) + if pairs == nil { + return "", fmt.Errorf("malformed dsn %q", dsn) + } + kv := make(map[string]string, len(pairs)) for _, pair := range pairs { splitted := strings.SplitN(pair, "=", 2) diff --git a/collector/collector.go b/collector/collector.go index 121129871..cc79c4c3a 100644 --- a/collector/collector.go +++ b/collector/collector.go @@ -40,6 +40,9 @@ const ( defaultEnabled = true defaultDisabled = false + + defaultMaxOpenConnections = 10 + defaultIdleConnections = 5 ) var ( @@ -64,6 +67,7 @@ type Collector interface { type collectorConfig struct { logger log.Logger excludeDatabases []string + constantLabels prometheus.Labels } func registerCollector(name string, isDefaultEnabled bool, createFunc func(collectorConfig) (Collector, error)) { @@ -91,15 +95,54 @@ type PostgresCollector struct { Collectors map[string]Collector logger log.Logger - instance *instance + instance *instance + constantLabels prometheus.Labels + maxOpenConnections int + maxIdleConnections int + scrapeTimeout time.Duration } type Option func(*PostgresCollector) error +// WithConstantLabels configures constant labels. +func WithConstantLabels(l prometheus.Labels) Option { + return func(c *PostgresCollector) error { + c.constantLabels = l + return nil + } +} + +// WithMaxOpenConnections configures the max number of open connections kept in the underlying pool. +func WithMaxOpenConnections(v int) Option { + return func(c *PostgresCollector) error { + c.maxOpenConnections = v + return nil + } +} + +// WithMaxIdleConnections configures the max number of idle connections kept in the underlying pool. +func WithMaxIdleConnections(v int) Option { + return func(c *PostgresCollector) error { + c.maxIdleConnections = v + return nil + } +} + +// WithScrapeTimeout configures the timeout for a single collector scrape. +func WithScrapeTimeout(t time.Duration) Option { + return func(c *PostgresCollector) error { + c.scrapeTimeout = t + return nil + } +} + // NewPostgresCollector creates a new PostgresCollector. func NewPostgresCollector(logger log.Logger, excludeDatabases []string, dsn string, filters []string, options ...Option) (*PostgresCollector, error) { p := &PostgresCollector{ - logger: logger, + logger: logger, + scrapeTimeout: 5 * time.Second, + maxOpenConnections: defaultMaxOpenConnections, + maxIdleConnections: defaultIdleConnections, } // Apply options to customize the collector for _, o := range options { @@ -124,6 +167,7 @@ func NewPostgresCollector(logger log.Logger, excludeDatabases []string, dsn stri initiatedCollectorsMtx.Lock() defer initiatedCollectorsMtx.Unlock() for key, enabled := range collectorState { + level.Debug(logger).Log("msg", "collector state", "name", key, "enabled", enabled) if !*enabled || (len(f) > 0 && !f[key]) { continue } @@ -133,6 +177,7 @@ func NewPostgresCollector(logger log.Logger, excludeDatabases []string, dsn stri collector, err := factories[key](collectorConfig{ logger: log.With(logger, "collector", key), excludeDatabases: excludeDatabases, + constantLabels: p.constantLabels, }) if err != nil { return nil, err @@ -148,8 +193,18 @@ func NewPostgresCollector(logger log.Logger, excludeDatabases []string, dsn stri return nil, errors.New("empty dsn") } - instance, err := newInstance(dsn) + instanceConf := &instanceConfiguration{ + dbMaxOpenConns: p.maxOpenConnections, + dbMaxIdleConns: p.maxIdleConnections, + } + instance, err := newInstance(dsn, instanceConf) + if err != nil { + return nil, err + } + + err = instance.setup() if err != nil { + level.Error(p.logger).Log("msg", "setting up connection to database", "err", err) return nil, err } p.instance = instance @@ -157,6 +212,12 @@ func NewPostgresCollector(logger log.Logger, excludeDatabases []string, dsn stri return p, nil } +// Close closes the underlying collector instance +func (p PostgresCollector) Close() error { + level.Debug(p.logger).Log("msg", "closing collector", "instance", p.instance) + return p.instance.Close() +} + // Describe implements the prometheus.Collector interface. func (p PostgresCollector) Describe(ch chan<- *prometheus.Desc) { ch <- scrapeDurationDesc @@ -165,45 +226,40 @@ func (p PostgresCollector) Describe(ch chan<- *prometheus.Desc) { // Collect implements the prometheus.Collector interface. func (p PostgresCollector) Collect(ch chan<- prometheus.Metric) { - ctx := context.TODO() - - // copy the instance so that concurrent scrapes have independent instances - inst := p.instance.copy() - - // Set up the database connection for the collector. - err := inst.setup() - if err != nil { - level.Error(p.logger).Log("msg", "Error opening connection to database", "err", err) - return - } - defer inst.Close() + ctx := context.Background() wg := sync.WaitGroup{} wg.Add(len(p.Collectors)) for name, c := range p.Collectors { go func(name string, c Collector) { - execute(ctx, name, c, inst, ch, p.logger) - wg.Done() + execute(ctx, p.scrapeTimeout, name, c, p.instance, ch, p.logger, &wg) }(name, c) } wg.Wait() } -func execute(ctx context.Context, name string, c Collector, instance *instance, ch chan<- prometheus.Metric, logger log.Logger) { +func execute(ctx context.Context, timeout time.Duration, name string, c Collector, instance *instance, ch chan<- prometheus.Metric, logger log.Logger, wg *sync.WaitGroup) { + defer wg.Done() begin := time.Now() - err := c.Update(ctx, instance, ch) + + scrapeCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + err := c.Update(scrapeCtx, instance, ch) duration := time.Since(begin) var success float64 if err != nil { + success = 0 if IsNoDataError(err) { level.Debug(logger).Log("msg", "collector returned no data", "name", name, "duration_seconds", duration.Seconds(), "err", err) + } else if scrapeCtx.Err() == context.DeadlineExceeded { + level.Error(logger).Log("msg", "collector timedout", "name", name, "duration_seconds", duration.Seconds(), "err", err) } else { level.Error(logger).Log("msg", "collector failed", "name", name, "duration_seconds", duration.Seconds(), "err", err) } - success = 0 } else { - level.Debug(logger).Log("msg", "collector succeeded", "name", name, "duration_seconds", duration.Seconds()) + level.Info(logger).Log("msg", "collector succeeded", "name", name, "duration_seconds", duration.Seconds()) success = 1 } ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), name) diff --git a/collector/instance.go b/collector/instance.go index a365697d6..69fc0b7a0 100644 --- a/collector/instance.go +++ b/collector/instance.go @@ -25,11 +25,18 @@ type instance struct { dsn string db *sql.DB version semver.Version + conf *instanceConfiguration } -func newInstance(dsn string) (*instance, error) { +type instanceConfiguration struct { + dbMaxOpenConns int + dbMaxIdleConns int +} + +func newInstance(dsn string, conf *instanceConfiguration) (*instance, error) { i := &instance{ - dsn: dsn, + dsn: dsn, + conf: conf, } // "Create" a database handle to verify the DSN provided is valid. @@ -43,28 +50,21 @@ func newInstance(dsn string) (*instance, error) { return i, nil } -// copy returns a copy of the instance. -func (i *instance) copy() *instance { - return &instance{ - dsn: i.dsn, - } -} - func (i *instance) setup() error { db, err := sql.Open("postgres", i.dsn) if err != nil { return err } - db.SetMaxOpenConns(1) - db.SetMaxIdleConns(1) - i.db = db + db.SetMaxOpenConns(i.conf.dbMaxOpenConns) + db.SetMaxIdleConns(i.conf.dbMaxIdleConns) - version, err := queryVersion(i.db) + version, err := queryVersion(db) if err != nil { return fmt.Errorf("error querying postgresql version: %w", err) } else { i.version = version } + i.db = db return nil } diff --git a/collector/pg_database.go b/collector/pg_database.go index 30c4c8af0..dea685cdc 100644 --- a/collector/pg_database.go +++ b/collector/pg_database.go @@ -30,6 +30,8 @@ func init() { type PGDatabaseCollector struct { log log.Logger excludedDatabases []string + pgDatabaseSizeDesc *prometheus.Desc + pgDatabaseConnectionLimitsDesc *prometheus.Desc } func NewPGDatabaseCollector(config collectorConfig) (Collector, error) { @@ -40,29 +42,28 @@ func NewPGDatabaseCollector(config collectorConfig) (Collector, error) { return &PGDatabaseCollector{ log: config.logger, excludedDatabases: exclude, + pgDatabaseSizeDesc: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + databaseSubsystem, + "size_bytes", + ), + "Disk space used by the database", + []string{"datname"}, config.constantLabels, + ), + pgDatabaseConnectionLimitsDesc: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + databaseSubsystem, + "connection_limit", + ), + "Connection limit set for the database", + []string{"datname"}, config.constantLabels, + ), }, nil } var ( - pgDatabaseSizeDesc = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - databaseSubsystem, - "size_bytes", - ), - "Disk space used by the database", - []string{"datname"}, nil, - ) - pgDatabaseConnectionLimitsDesc = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - databaseSubsystem, - "connection_limit", - ), - "Connection limit set for the database", - []string{"datname"}, nil, - ) - pgDatabaseQuery = "SELECT pg_database.datname, pg_database.datconnlimit FROM pg_database;" pgDatabaseSizeQuery = "SELECT pg_database_size($1)" ) @@ -113,7 +114,7 @@ func (c PGDatabaseCollector) Update(ctx context.Context, instance *instance, ch connLimitMetric = float64(connLimit.Int64) } ch <- prometheus.MustNewConstMetric( - pgDatabaseConnectionLimitsDesc, + c.pgDatabaseConnectionLimitsDesc, prometheus.GaugeValue, connLimitMetric, database, ) } @@ -131,7 +132,7 @@ func (c PGDatabaseCollector) Update(ctx context.Context, instance *instance, ch sizeMetric = size.Float64 } ch <- prometheus.MustNewConstMetric( - pgDatabaseSizeDesc, + c.pgDatabaseSizeDesc, prometheus.GaugeValue, sizeMetric, datname, ) diff --git a/collector/pg_database_test.go b/collector/pg_database_test.go index fe94166e9..c9650aa25 100644 --- a/collector/pg_database_test.go +++ b/collector/pg_database_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" @@ -40,7 +41,10 @@ func TestPGDatabaseCollector(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGDatabaseCollector{} + c, _ := NewPGDatabaseCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGDatabaseCollector.Update: %s", err) } @@ -81,7 +85,10 @@ func TestPGDatabaseCollectorNullMetric(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGDatabaseCollector{} + c, _ := NewPGDatabaseCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGDatabaseCollector.Update: %s", err) } diff --git a/collector/pg_database_wraparound.go b/collector/pg_database_wraparound.go index d46270637..ec20c610e 100644 --- a/collector/pg_database_wraparound.go +++ b/collector/pg_database_wraparound.go @@ -29,27 +29,30 @@ func init() { } type PGDatabaseWraparoundCollector struct { - log log.Logger + log log.Logger + databaseWraparoundAgeDatfrozenxid *prometheus.Desc + databaseWraparoundAgeDatminmxid *prometheus.Desc } func NewPGDatabaseWraparoundCollector(config collectorConfig) (Collector, error) { - return &PGDatabaseWraparoundCollector{log: config.logger}, nil + return &PGDatabaseWraparoundCollector{ + log: config.logger, + databaseWraparoundAgeDatfrozenxid: prometheus.NewDesc( + prometheus.BuildFQName(namespace, databaseWraparoundSubsystem, "age_datfrozenxid_seconds"), + "Age of the oldest transaction ID that has not been frozen.", + []string{"datname"}, + config.constantLabels, + ), + databaseWraparoundAgeDatminmxid: prometheus.NewDesc( + prometheus.BuildFQName(namespace, databaseWraparoundSubsystem, "age_datminmxid_seconds"), + "Age of the oldest multi-transaction ID that has been replaced with a transaction ID.", + []string{"datname"}, + config.constantLabels, + ), + }, nil } var ( - databaseWraparoundAgeDatfrozenxid = prometheus.NewDesc( - prometheus.BuildFQName(namespace, databaseWraparoundSubsystem, "age_datfrozenxid_seconds"), - "Age of the oldest transaction ID that has not been frozen.", - []string{"datname"}, - prometheus.Labels{}, - ) - databaseWraparoundAgeDatminmxid = prometheus.NewDesc( - prometheus.BuildFQName(namespace, databaseWraparoundSubsystem, "age_datminmxid_seconds"), - "Age of the oldest multi-transaction ID that has been replaced with a transaction ID.", - []string{"datname"}, - prometheus.Labels{}, - ) - databaseWraparoundQuery = ` SELECT datname, @@ -96,14 +99,14 @@ func (c *PGDatabaseWraparoundCollector) Update(ctx context.Context, instance *in ageDatfrozenxidMetric := ageDatfrozenxid.Float64 ch <- prometheus.MustNewConstMetric( - databaseWraparoundAgeDatfrozenxid, + c.databaseWraparoundAgeDatfrozenxid, prometheus.GaugeValue, ageDatfrozenxidMetric, datname.String, ) ageDatminmxidMetric := ageDatminmxid.Float64 ch <- prometheus.MustNewConstMetric( - databaseWraparoundAgeDatminmxid, + c.databaseWraparoundAgeDatminmxid, prometheus.GaugeValue, ageDatminmxidMetric, datname.String, ) diff --git a/collector/pg_database_wraparound_test.go b/collector/pg_database_wraparound_test.go index d0a74c362..fac9f853c 100644 --- a/collector/pg_database_wraparound_test.go +++ b/collector/pg_database_wraparound_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" @@ -42,7 +43,10 @@ func TestPGDatabaseWraparoundCollector(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGDatabaseWraparoundCollector{} + c, _ := NewPGDatabaseWraparoundCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGDatabaseWraparoundCollector.Update: %s", err) diff --git a/collector/pg_locks.go b/collector/pg_locks.go index d2c77ccd5..efc998811 100644 --- a/collector/pg_locks.go +++ b/collector/pg_locks.go @@ -29,24 +29,27 @@ func init() { type PGLocksCollector struct { log log.Logger + pgLocksDesc *prometheus.Desc } func NewPGLocksCollector(config collectorConfig) (Collector, error) { return &PGLocksCollector{ log: config.logger, + pgLocksDesc : prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + locksSubsystem, + "count", + ), + "Number of locks", + []string{"datname", "mode"}, + config.constantLabels, + ), }, nil } var ( - pgLocksDesc = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - locksSubsystem, - "count", - ), - "Number of locks", - []string{"datname", "mode"}, nil, - ) + pgLocksQuery = ` SELECT @@ -117,7 +120,7 @@ func (c PGLocksCollector) Update(ctx context.Context, instance *instance, ch cha } ch <- prometheus.MustNewConstMetric( - pgLocksDesc, + c.pgLocksDesc, prometheus.GaugeValue, countMetric, datname.String, mode.String, ) diff --git a/collector/pg_locks_test.go b/collector/pg_locks_test.go index 99597ea2d..5455fa286 100644 --- a/collector/pg_locks_test.go +++ b/collector/pg_locks_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" @@ -39,7 +40,10 @@ func TestPGLocksCollector(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGLocksCollector{} + c, _ := NewPGLocksCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGLocksCollector.Update: %s", err) } diff --git a/collector/pg_long_running_transactions.go b/collector/pg_long_running_transactions.go index ffd89d5f0..419865409 100644 --- a/collector/pg_long_running_transactions.go +++ b/collector/pg_long_running_transactions.go @@ -28,27 +28,29 @@ func init() { type PGLongRunningTransactionsCollector struct { log log.Logger + longRunningTransactionsCount *prometheus.Desc + longRunningTransactionsAgeInSeconds *prometheus.Desc } func NewPGLongRunningTransactionsCollector(config collectorConfig) (Collector, error) { - return &PGLongRunningTransactionsCollector{log: config.logger}, nil + return &PGLongRunningTransactionsCollector{ + log: config.logger, + longRunningTransactionsCount : prometheus.NewDesc( + "pg_long_running_transactions", + "Current number of long running transactions", + []string{}, + config.constantLabels, + ), + longRunningTransactionsAgeInSeconds : prometheus.NewDesc( + prometheus.BuildFQName(namespace, longRunningTransactionsSubsystem, "oldest_timestamp_seconds"), + "The current maximum transaction age in seconds", + []string{}, + config.constantLabels, + ), + }, nil } var ( - longRunningTransactionsCount = prometheus.NewDesc( - "pg_long_running_transactions", - "Current number of long running transactions", - []string{}, - prometheus.Labels{}, - ) - - longRunningTransactionsAgeInSeconds = prometheus.NewDesc( - prometheus.BuildFQName(namespace, longRunningTransactionsSubsystem, "oldest_timestamp_seconds"), - "The current maximum transaction age in seconds", - []string{}, - prometheus.Labels{}, - ) - longRunningTransactionsQuery = ` SELECT COUNT(*) as transactions, @@ -58,7 +60,7 @@ var ( ` ) -func (PGLongRunningTransactionsCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { +func (c *PGLongRunningTransactionsCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { db := instance.getDB() rows, err := db.QueryContext(ctx, longRunningTransactionsQuery) @@ -76,12 +78,12 @@ func (PGLongRunningTransactionsCollector) Update(ctx context.Context, instance * } ch <- prometheus.MustNewConstMetric( - longRunningTransactionsCount, + c.longRunningTransactionsCount, prometheus.GaugeValue, transactions, ) ch <- prometheus.MustNewConstMetric( - longRunningTransactionsAgeInSeconds, + c.longRunningTransactionsAgeInSeconds, prometheus.GaugeValue, ageInSeconds, ) diff --git a/collector/pg_long_running_transactions_test.go b/collector/pg_long_running_transactions_test.go index eedda7c65..01ead7f0c 100644 --- a/collector/pg_long_running_transactions_test.go +++ b/collector/pg_long_running_transactions_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" @@ -41,7 +42,10 @@ func TestPGLongRunningTransactionsCollector(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGLongRunningTransactionsCollector{} + c, _ := NewPGLongRunningTransactionsCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGLongRunningTransactionsCollector.Update: %s", err) diff --git a/collector/pg_postmaster.go b/collector/pg_postmaster.go index b81e4f905..49fcea17f 100644 --- a/collector/pg_postmaster.go +++ b/collector/pg_postmaster.go @@ -27,22 +27,26 @@ func init() { } type PGPostmasterCollector struct { + pgPostMasterStartTimeSeconds *prometheus.Desc } -func NewPGPostmasterCollector(collectorConfig) (Collector, error) { - return &PGPostmasterCollector{}, nil +func NewPGPostmasterCollector(config collectorConfig) (Collector, error) { + return &PGPostmasterCollector{ + pgPostMasterStartTimeSeconds : prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + postmasterSubsystem, + "start_time_seconds", + ), + "Time at which postmaster started", + []string{}, + config.constantLabels, + ), + }, nil } var ( - pgPostMasterStartTimeSeconds = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - postmasterSubsystem, - "start_time_seconds", - ), - "Time at which postmaster started", - []string{}, nil, - ) + pgPostmasterQuery = "SELECT extract(epoch from pg_postmaster_start_time) from pg_postmaster_start_time();" ) @@ -62,7 +66,7 @@ func (c *PGPostmasterCollector) Update(ctx context.Context, instance *instance, startTimeSecondsMetric = startTimeSeconds.Float64 } ch <- prometheus.MustNewConstMetric( - pgPostMasterStartTimeSeconds, + c.pgPostMasterStartTimeSeconds, prometheus.GaugeValue, startTimeSecondsMetric, ) return nil diff --git a/collector/pg_postmaster_test.go b/collector/pg_postmaster_test.go index 8405b4225..dbe31a4d4 100644 --- a/collector/pg_postmaster_test.go +++ b/collector/pg_postmaster_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" @@ -37,7 +38,10 @@ func TestPgPostmasterCollector(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGPostmasterCollector{} + c, _ := NewPGPostmasterCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) @@ -73,7 +77,10 @@ func TestPgPostmasterCollectorNullTime(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGPostmasterCollector{} + c, _ := NewPGPostmasterCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) diff --git a/collector/pg_process_idle.go b/collector/pg_process_idle.go index c401ab56f..5b226506d 100644 --- a/collector/pg_process_idle.go +++ b/collector/pg_process_idle.go @@ -28,23 +28,25 @@ func init() { } type PGProcessIdleCollector struct { - log log.Logger + log log.Logger + pgProcessIdleSeconds *prometheus.Desc } const processIdleSubsystem = "process_idle" func NewPGProcessIdleCollector(config collectorConfig) (Collector, error) { - return &PGProcessIdleCollector{log: config.logger}, nil + return &PGProcessIdleCollector{ + log: config.logger, + pgProcessIdleSeconds: prometheus.NewDesc( + prometheus.BuildFQName(namespace, processIdleSubsystem, "seconds"), + "Idle time of server processes", + []string{"state", "application_name"}, + config.constantLabels, + ), + }, nil } -var pgProcessIdleSeconds = prometheus.NewDesc( - prometheus.BuildFQName(namespace, processIdleSubsystem, "seconds"), - "Idle time of server processes", - []string{"state", "application_name"}, - prometheus.Labels{}, -) - -func (PGProcessIdleCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { +func (c *PGProcessIdleCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { db := instance.getDB() row := db.QueryRowContext(ctx, `WITH @@ -124,7 +126,7 @@ func (PGProcessIdleCollector) Update(ctx context.Context, instance *instance, ch secondsSumMetric = secondsSum.Float64 } ch <- prometheus.MustNewConstHistogram( - pgProcessIdleSeconds, + c.pgProcessIdleSeconds, secondsCountMetric, secondsSumMetric, buckets, stateLabel, applicationNameLabel, ) diff --git a/collector/pg_replication.go b/collector/pg_replication.go index 6067cc9b1..9cbce9050 100644 --- a/collector/pg_replication.go +++ b/collector/pg_replication.go @@ -26,33 +26,36 @@ func init() { } type PGReplicationCollector struct { + pgReplicationLag *prometheus.Desc + pgReplicationIsReplica *prometheus.Desc } -func NewPGReplicationCollector(collectorConfig) (Collector, error) { - return &PGReplicationCollector{}, nil -} - -var ( - pgReplicationLag = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - replicationSubsystem, - "lag_seconds", +func NewPGReplicationCollector(config collectorConfig) (Collector, error) { + return &PGReplicationCollector{ + pgReplicationLag : prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + replicationSubsystem, + "lag_seconds", + ), + "Replication lag behind master in seconds", + []string{}, + config.constantLabels, ), - "Replication lag behind master in seconds", - []string{}, nil, - ) - pgReplicationIsReplica = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - replicationSubsystem, - "is_replica", + pgReplicationIsReplica : prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + replicationSubsystem, + "is_replica", + ), + "Indicates if the server is a replica", + []string{}, + config.constantLabels, ), - "Indicates if the server is a replica", - []string{}, nil, - ) + }, nil +} - pgReplicationQuery = `SELECT +var pgReplicationQuery = `SELECT CASE WHEN NOT pg_is_in_recovery() THEN 0 WHEN pg_last_wal_receive_lsn () = pg_last_wal_replay_lsn () THEN 0 @@ -62,7 +65,6 @@ var ( WHEN pg_is_in_recovery() THEN 1 ELSE 0 END as is_replica` -) func (c *PGReplicationCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { db := instance.getDB() @@ -77,11 +79,11 @@ func (c *PGReplicationCollector) Update(ctx context.Context, instance *instance, return err } ch <- prometheus.MustNewConstMetric( - pgReplicationLag, + c.pgReplicationLag, prometheus.GaugeValue, lag, ) ch <- prometheus.MustNewConstMetric( - pgReplicationIsReplica, + c.pgReplicationIsReplica, prometheus.GaugeValue, float64(isReplica), ) return nil diff --git a/collector/pg_replication_slot.go b/collector/pg_replication_slot.go index 1d29f8498..78489b23c 100644 --- a/collector/pg_replication_slot.go +++ b/collector/pg_replication_slot.go @@ -28,60 +28,59 @@ func init() { } type PGReplicationSlotCollector struct { - log log.Logger + log log.Logger + pgReplicationSlotCurrentWalDesc *prometheus.Desc + pgReplicationSlotCurrentFlushDesc *prometheus.Desc + pgReplicationSlotIsActiveDesc *prometheus.Desc + pgReplicationSlotWalStatus *prometheus.Desc } func NewPGReplicationSlotCollector(config collectorConfig) (Collector, error) { - return &PGReplicationSlotCollector{log: config.logger}, nil -} - -var ( - pgReplicationSlotCurrentWalDesc = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - replicationSlotSubsystem, - "slot_current_wal_lsn", + return &PGReplicationSlotCollector{log: config.logger, + pgReplicationSlotCurrentWalDesc: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + replicationSlotSubsystem, + "slot_current_wal_lsn", + ), + "current wal lsn value", + []string{"slot_name", "slot_type"}, + config.constantLabels, ), - "current wal lsn value", - []string{"slot_name", "slot_type"}, nil, - ) - pgReplicationSlotCurrentFlushDesc = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - replicationSlotSubsystem, - "slot_confirmed_flush_lsn", + pgReplicationSlotCurrentFlushDesc: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + replicationSlotSubsystem, + "slot_confirmed_flush_lsn", + ), + "last lsn confirmed flushed to the replication slot", + []string{"slot_name", "slot_type"}, + config.constantLabels, ), - "last lsn confirmed flushed to the replication slot", - []string{"slot_name", "slot_type"}, nil, - ) - pgReplicationSlotIsActiveDesc = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - replicationSlotSubsystem, - "slot_is_active", + pgReplicationSlotIsActiveDesc: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + replicationSlotSubsystem, + "slot_is_active", + ), + "whether the replication slot is active or not", + []string{"slot_name", "slot_type"}, + config.constantLabels, ), - "whether the replication slot is active or not", - []string{"slot_name", "slot_type"}, nil, - ) - pgReplicationSlotSafeWal = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - replicationSlotSubsystem, - "safe_wal_size_bytes", + pgReplicationSlotWalStatus: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + replicationSlotSubsystem, + "wal_status", + ), + "availability of WAL files claimed by this slot", + []string{"slot_name", "slot_type", "wal_status"}, + config.constantLabels, ), - "number of bytes that can be written to WAL such that this slot is not in danger of getting in state lost", - []string{"slot_name", "slot_type"}, nil, - ) - pgReplicationSlotWalStatus = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - replicationSlotSubsystem, - "wal_status", - ), - "availability of WAL files claimed by this slot", - []string{"slot_name", "slot_type", "wal_status"}, nil, - ) + }, nil +} +var ( pgReplicationSlotQuery = `SELECT slot_name, slot_type, @@ -92,12 +91,11 @@ var ( END AS current_wal_lsn, COALESCE(confirmed_flush_lsn, '0/0') - '0/0' AS confirmed_flush_lsn, active, - safe_wal_size, wal_status FROM pg_replication_slots;` ) -func (PGReplicationSlotCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { +func (c *PGReplicationSlotCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { db := instance.getDB() rows, err := db.QueryContext(ctx, pgReplicationSlotQuery) @@ -112,9 +110,8 @@ func (PGReplicationSlotCollector) Update(ctx context.Context, instance *instance var walLSN sql.NullFloat64 var flushLSN sql.NullFloat64 var isActive sql.NullBool - var safeWalSize sql.NullInt64 var walStatus sql.NullString - if err := rows.Scan(&slotName, &slotType, &walLSN, &flushLSN, &isActive, &safeWalSize, &walStatus); err != nil { + if err := rows.Scan(&slotName, &slotType, &walLSN, &flushLSN, &isActive, &walStatus); err != nil { return err } @@ -136,7 +133,7 @@ func (PGReplicationSlotCollector) Update(ctx context.Context, instance *instance walLSNMetric = walLSN.Float64 } ch <- prometheus.MustNewConstMetric( - pgReplicationSlotCurrentWalDesc, + c.pgReplicationSlotCurrentWalDesc, prometheus.GaugeValue, walLSNMetric, slotNameLabel, slotTypeLabel, ) if isActive.Valid && isActive.Bool { @@ -145,25 +142,18 @@ func (PGReplicationSlotCollector) Update(ctx context.Context, instance *instance flushLSNMetric = flushLSN.Float64 } ch <- prometheus.MustNewConstMetric( - pgReplicationSlotCurrentFlushDesc, + c.pgReplicationSlotCurrentFlushDesc, prometheus.GaugeValue, flushLSNMetric, slotNameLabel, slotTypeLabel, ) } ch <- prometheus.MustNewConstMetric( - pgReplicationSlotIsActiveDesc, + c.pgReplicationSlotIsActiveDesc, prometheus.GaugeValue, isActiveValue, slotNameLabel, slotTypeLabel, ) - if safeWalSize.Valid { - ch <- prometheus.MustNewConstMetric( - pgReplicationSlotSafeWal, - prometheus.GaugeValue, float64(safeWalSize.Int64), slotNameLabel, slotTypeLabel, - ) - } - if walStatus.Valid { ch <- prometheus.MustNewConstMetric( - pgReplicationSlotWalStatus, + c.pgReplicationSlotWalStatus, prometheus.GaugeValue, 1, slotNameLabel, slotTypeLabel, walStatus.String, ) } diff --git a/collector/pg_replication_slot_test.go b/collector/pg_replication_slot_test.go index 174743ac3..930b72a09 100644 --- a/collector/pg_replication_slot_test.go +++ b/collector/pg_replication_slot_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" @@ -31,15 +32,18 @@ func TestPgReplicationSlotCollectorActive(t *testing.T) { inst := &instance{db: db} - columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "safe_wal_size", "wal_status"} + columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "wal_status"} rows := sqlmock.NewRows(columns). - AddRow("test_slot", "physical", 5, 3, true, 323906992, "reserved") + AddRow("test_slot", "physical", 5, 3, true, "reserved") mock.ExpectQuery(sanitizeQuery(pgReplicationSlotQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGReplicationSlotCollector{} + c, _ := NewPGReplicationSlotCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGPostmasterCollector.Update: %s", err) @@ -50,7 +54,6 @@ func TestPgReplicationSlotCollectorActive(t *testing.T) { {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 5, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 3, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 1, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 323906992, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical", "wal_status": "reserved"}, value: 1, metricType: dto.MetricType_GAUGE}, } @@ -74,15 +77,18 @@ func TestPgReplicationSlotCollectorInActive(t *testing.T) { inst := &instance{db: db} - columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "safe_wal_size", "wal_status"} + columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "wal_status"} rows := sqlmock.NewRows(columns). - AddRow("test_slot", "physical", 6, 12, false, -4000, "extended") + AddRow("test_slot", "physical", 6, 12, false, "extended") mock.ExpectQuery(sanitizeQuery(pgReplicationSlotQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGReplicationSlotCollector{} + c, _ := NewPGReplicationSlotCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGReplicationSlotCollector.Update: %s", err) @@ -92,7 +98,6 @@ func TestPgReplicationSlotCollectorInActive(t *testing.T) { expected := []MetricResult{ {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 6, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 0, metricType: dto.MetricType_GAUGE}, - {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: -4000, metricType: dto.MetricType_GAUGE}, {labels: labelMap{"slot_name": "test_slot", "slot_type": "physical", "wal_status": "extended"}, value: 1, metricType: dto.MetricType_GAUGE}, } @@ -117,15 +122,18 @@ func TestPgReplicationSlotCollectorActiveNil(t *testing.T) { inst := &instance{db: db} - columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "safe_wal_size", "wal_status"} + columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "wal_status"} rows := sqlmock.NewRows(columns). - AddRow("test_slot", "physical", 6, 12, nil, nil, "lost") + AddRow("test_slot", "physical", 6, 12, nil, "lost") mock.ExpectQuery(sanitizeQuery(pgReplicationSlotQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGReplicationSlotCollector{} + c, _ := NewPGReplicationSlotCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGReplicationSlotCollector.Update: %s", err) @@ -158,15 +166,18 @@ func TestPgReplicationSlotCollectorTestNilValues(t *testing.T) { inst := &instance{db: db} - columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "safe_wal_size", "wal_status"} + columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "wal_status"} rows := sqlmock.NewRows(columns). - AddRow(nil, nil, nil, nil, true, nil, nil) + AddRow(nil, nil, nil, nil, true, nil) mock.ExpectQuery(sanitizeQuery(pgReplicationSlotQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGReplicationSlotCollector{} + c, _ := NewPGReplicationSlotCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGReplicationSlotCollector.Update: %s", err) diff --git a/collector/pg_replication_test.go b/collector/pg_replication_test.go index b6df698e3..3d598298b 100644 --- a/collector/pg_replication_test.go +++ b/collector/pg_replication_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" @@ -39,7 +40,10 @@ func TestPgReplicationCollector(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGReplicationCollector{} + c, _ := NewPGReplicationCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGReplicationCollector.Update: %s", err) diff --git a/collector/pg_roles.go b/collector/pg_roles.go index 609c34c33..e24672c29 100644 --- a/collector/pg_roles.go +++ b/collector/pg_roles.go @@ -29,31 +29,30 @@ func init() { type PGRolesCollector struct { log log.Logger + pgRolesConnectionLimitsDesc *prometheus.Desc } func NewPGRolesCollector(config collectorConfig) (Collector, error) { return &PGRolesCollector{ log: config.logger, + pgRolesConnectionLimitsDesc: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + rolesSubsystem, + "connection_limit", + ), + "Connection limit set for the role", + []string{"rolname"}, + config.constantLabels, + ), }, nil } -var ( - pgRolesConnectionLimitsDesc = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - rolesSubsystem, - "connection_limit", - ), - "Connection limit set for the role", - []string{"rolname"}, nil, - ) - - pgRolesConnectionLimitsQuery = "SELECT pg_roles.rolname, pg_roles.rolconnlimit FROM pg_roles" -) +var pgRolesConnectionLimitsQuery = "SELECT pg_roles.rolname, pg_roles.rolconnlimit FROM pg_roles" // Update implements Collector and exposes roles connection limits. // It is called by the Prometheus registry when collecting metrics. -func (c PGRolesCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { +func (c *PGRolesCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { db := instance.getDB() // Query the list of databases rows, err := db.QueryContext(ctx, @@ -82,7 +81,7 @@ func (c PGRolesCollector) Update(ctx context.Context, instance *instance, ch cha connLimitMetric := float64(connLimit.Int64) ch <- prometheus.MustNewConstMetric( - pgRolesConnectionLimitsDesc, + c.pgRolesConnectionLimitsDesc, prometheus.GaugeValue, connLimitMetric, rolnameLabel, ) } diff --git a/collector/pg_roles_test.go b/collector/pg_roles_test.go index 182a120f9..36e04c893 100644 --- a/collector/pg_roles_test.go +++ b/collector/pg_roles_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" @@ -37,7 +38,10 @@ func TestPGRolesCollector(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGRolesCollector{} + c, _ := NewPGRolesCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGRolesCollector.Update: %s", err) } diff --git a/collector/pg_stat_activity_autovacuum.go b/collector/pg_stat_activity_autovacuum.go index 6cf8cdcec..384b1a40e 100644 --- a/collector/pg_stat_activity_autovacuum.go +++ b/collector/pg_stat_activity_autovacuum.go @@ -27,21 +27,23 @@ func init() { } type PGStatActivityAutovacuumCollector struct { - log log.Logger + log log.Logger + statActivityAutovacuumAgeInSeconds *prometheus.Desc } func NewPGStatActivityAutovacuumCollector(config collectorConfig) (Collector, error) { - return &PGStatActivityAutovacuumCollector{log: config.logger}, nil + return &PGStatActivityAutovacuumCollector{ + log: config.logger, + statActivityAutovacuumAgeInSeconds: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statActivityAutovacuumSubsystem, "timestamp_seconds"), + "Start timestamp of the vacuum process in seconds", + []string{"relname"}, + config.constantLabels, + ), + }, nil } var ( - statActivityAutovacuumAgeInSeconds = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statActivityAutovacuumSubsystem, "timestamp_seconds"), - "Start timestamp of the vacuum process in seconds", - []string{"relname"}, - prometheus.Labels{}, - ) - statActivityAutovacuumQuery = ` SELECT SPLIT_PART(query, '.', 2) AS relname, @@ -53,7 +55,7 @@ var ( ` ) -func (PGStatActivityAutovacuumCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { +func (c *PGStatActivityAutovacuumCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { db := instance.getDB() rows, err := db.QueryContext(ctx, statActivityAutovacuumQuery) @@ -72,7 +74,7 @@ func (PGStatActivityAutovacuumCollector) Update(ctx context.Context, instance *i } ch <- prometheus.MustNewConstMetric( - statActivityAutovacuumAgeInSeconds, + c.statActivityAutovacuumAgeInSeconds, prometheus.GaugeValue, ageInSeconds, relname, ) diff --git a/collector/pg_stat_activity_autovacuum_test.go b/collector/pg_stat_activity_autovacuum_test.go index a6fcdbcad..61c11b246 100644 --- a/collector/pg_stat_activity_autovacuum_test.go +++ b/collector/pg_stat_activity_autovacuum_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" @@ -41,7 +42,10 @@ func TestPGStatActivityAutovacuumCollector(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatActivityAutovacuumCollector{} + c, _ := NewPGStatActivityAutovacuumCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGStatActivityAutovacuumCollector.Update: %s", err) diff --git a/collector/pg_stat_bgwriter.go b/collector/pg_stat_bgwriter.go index ec446d58c..d9abe5b08 100644 --- a/collector/pg_stat_bgwriter.go +++ b/collector/pg_stat_bgwriter.go @@ -27,80 +27,91 @@ func init() { } type PGStatBGWriterCollector struct { + statBGWriterCheckpointsTimedDesc *prometheus.Desc + statBGWriterCheckpointsReqDesc *prometheus.Desc + statBGWriterCheckpointsReqTimeDesc *prometheus.Desc + statBGWriterCheckpointsSyncTimeDesc *prometheus.Desc + statBGWriterBuffersCheckpointDesc *prometheus.Desc + statBGWriterBuffersCleanDesc *prometheus.Desc + statBGWriterMaxwrittenCleanDesc *prometheus.Desc + statBGWriterBuffersBackendDesc *prometheus.Desc + statBGWriterBuffersBackendFsyncDesc *prometheus.Desc + statBGWriterBuffersAllocDesc *prometheus.Desc + statBGWriterStatsResetDesc *prometheus.Desc } -func NewPGStatBGWriterCollector(collectorConfig) (Collector, error) { - return &PGStatBGWriterCollector{}, nil +func NewPGStatBGWriterCollector(config collectorConfig) (Collector, error) { + return &PGStatBGWriterCollector{ + statBGWriterCheckpointsTimedDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_timed_total"), + "Number of scheduled checkpoints that have been performed", + []string{}, + config.constantLabels, + ), + statBGWriterCheckpointsReqDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_req_total"), + "Number of requested checkpoints that have been performed", + []string{}, + config.constantLabels, + ), + statBGWriterCheckpointsReqTimeDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_write_time_total"), + "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds", + []string{}, + config.constantLabels, + ), + statBGWriterCheckpointsSyncTimeDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_sync_time_total"), + "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds", + []string{}, + config.constantLabels, + ), + statBGWriterBuffersCheckpointDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_checkpoint_total"), + "Number of buffers written during checkpoints", + []string{}, + config.constantLabels, + ), + statBGWriterBuffersCleanDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_clean_total"), + "Number of buffers written by the background writer", + []string{}, + config.constantLabels, + ), + statBGWriterMaxwrittenCleanDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "maxwritten_clean_total"), + "Number of times the background writer stopped a cleaning scan because it had written too many buffers", + []string{}, + config.constantLabels, + ), + statBGWriterBuffersBackendDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_total"), + "Number of buffers written directly by a backend", + []string{}, + config.constantLabels, + ), + statBGWriterBuffersBackendFsyncDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_fsync_total"), + "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)", + []string{}, + config.constantLabels, + ), + statBGWriterBuffersAllocDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_alloc_total"), + "Number of buffers allocated", + []string{}, + config.constantLabels, + ), + statBGWriterStatsResetDesc: prometheus.NewDesc( + prometheus.BuildFQName(namespace, bgWriterSubsystem, "stats_reset_total"), + "Time at which these statistics were last reset", + []string{}, + config.constantLabels, + ), + }, nil } var ( - statBGWriterCheckpointsTimedDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_timed_total"), - "Number of scheduled checkpoints that have been performed", - []string{}, - prometheus.Labels{}, - ) - statBGWriterCheckpointsReqDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoints_req_total"), - "Number of requested checkpoints that have been performed", - []string{}, - prometheus.Labels{}, - ) - statBGWriterCheckpointsReqTimeDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_write_time_total"), - "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds", - []string{}, - prometheus.Labels{}, - ) - statBGWriterCheckpointsSyncTimeDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "checkpoint_sync_time_total"), - "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds", - []string{}, - prometheus.Labels{}, - ) - statBGWriterBuffersCheckpointDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_checkpoint_total"), - "Number of buffers written during checkpoints", - []string{}, - prometheus.Labels{}, - ) - statBGWriterBuffersCleanDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_clean_total"), - "Number of buffers written by the background writer", - []string{}, - prometheus.Labels{}, - ) - statBGWriterMaxwrittenCleanDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "maxwritten_clean_total"), - "Number of times the background writer stopped a cleaning scan because it had written too many buffers", - []string{}, - prometheus.Labels{}, - ) - statBGWriterBuffersBackendDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_total"), - "Number of buffers written directly by a backend", - []string{}, - prometheus.Labels{}, - ) - statBGWriterBuffersBackendFsyncDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_backend_fsync_total"), - "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)", - []string{}, - prometheus.Labels{}, - ) - statBGWriterBuffersAllocDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "buffers_alloc_total"), - "Number of buffers allocated", - []string{}, - prometheus.Labels{}, - ) - statBGWriterStatsResetDesc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, bgWriterSubsystem, "stats_reset_total"), - "Time at which these statistics were last reset", - []string{}, - prometheus.Labels{}, - ) - statBGWriterQuery = `SELECT checkpoints_timed ,checkpoints_req @@ -116,7 +127,7 @@ var ( FROM pg_stat_bgwriter;` ) -func (PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { +func (c *PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { db := instance.getDB() row := db.QueryRowContext(ctx, statBGWriterQuery) @@ -135,7 +146,7 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, c cptMetric = float64(cpt.Int64) } ch <- prometheus.MustNewConstMetric( - statBGWriterCheckpointsTimedDesc, + c.statBGWriterCheckpointsTimedDesc, prometheus.CounterValue, cptMetric, ) @@ -144,7 +155,7 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, c cprMetric = float64(cpr.Int64) } ch <- prometheus.MustNewConstMetric( - statBGWriterCheckpointsReqDesc, + c.statBGWriterCheckpointsReqDesc, prometheus.CounterValue, cprMetric, ) @@ -153,7 +164,7 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, c cpwtMetric = float64(cpwt.Float64) } ch <- prometheus.MustNewConstMetric( - statBGWriterCheckpointsReqTimeDesc, + c.statBGWriterCheckpointsReqTimeDesc, prometheus.CounterValue, cpwtMetric, ) @@ -162,7 +173,7 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, c cpstMetric = float64(cpst.Float64) } ch <- prometheus.MustNewConstMetric( - statBGWriterCheckpointsSyncTimeDesc, + c.statBGWriterCheckpointsSyncTimeDesc, prometheus.CounterValue, cpstMetric, ) @@ -171,7 +182,7 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, c bcpMetric = float64(bcp.Int64) } ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersCheckpointDesc, + c.statBGWriterBuffersCheckpointDesc, prometheus.CounterValue, bcpMetric, ) @@ -180,7 +191,7 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, c bcMetric = float64(bc.Int64) } ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersCleanDesc, + c.statBGWriterBuffersCleanDesc, prometheus.CounterValue, bcMetric, ) @@ -189,7 +200,7 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, c mwcMetric = float64(mwc.Int64) } ch <- prometheus.MustNewConstMetric( - statBGWriterMaxwrittenCleanDesc, + c.statBGWriterMaxwrittenCleanDesc, prometheus.CounterValue, mwcMetric, ) @@ -198,7 +209,7 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, c bbMetric = float64(bb.Int64) } ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersBackendDesc, + c.statBGWriterBuffersBackendDesc, prometheus.CounterValue, bbMetric, ) @@ -207,7 +218,7 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, c bbfMetric = float64(bbf.Int64) } ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersBackendFsyncDesc, + c.statBGWriterBuffersBackendFsyncDesc, prometheus.CounterValue, bbfMetric, ) @@ -216,7 +227,7 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, c baMetric = float64(ba.Int64) } ch <- prometheus.MustNewConstMetric( - statBGWriterBuffersAllocDesc, + c.statBGWriterBuffersAllocDesc, prometheus.CounterValue, baMetric, ) @@ -225,7 +236,7 @@ func (PGStatBGWriterCollector) Update(ctx context.Context, instance *instance, c srMetric = float64(sr.Time.Unix()) } ch <- prometheus.MustNewConstMetric( - statBGWriterStatsResetDesc, + c.statBGWriterStatsResetDesc, prometheus.CounterValue, srMetric, ) diff --git a/collector/pg_stat_bgwriter_test.go b/collector/pg_stat_bgwriter_test.go index 1c2cf98de..d950a4210 100644 --- a/collector/pg_stat_bgwriter_test.go +++ b/collector/pg_stat_bgwriter_test.go @@ -18,6 +18,7 @@ import ( "time" "github.com/DATA-DOG/go-sqlmock" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" @@ -57,7 +58,10 @@ func TestPGStatBGWriterCollector(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatBGWriterCollector{} + c, _ := NewPGStatBGWriterCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGStatBGWriterCollector.Update: %s", err) @@ -118,7 +122,10 @@ func TestPGStatBGWriterCollectorNullValues(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatBGWriterCollector{} + c, _ := NewPGStatBGWriterCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGStatBGWriterCollector.Update: %s", err) diff --git a/collector/pg_stat_database.go b/collector/pg_stat_database.go index ea7075303..1b1a6be78 100644 --- a/collector/pg_stat_database.go +++ b/collector/pg_stat_database.go @@ -29,193 +29,201 @@ func init() { } type PGStatDatabaseCollector struct { - log log.Logger + log log.Logger + statDatabaseNumbackends *prometheus.Desc + statDatabaseXactCommit *prometheus.Desc + statDatabaseXactRollback *prometheus.Desc + statDatabaseBlksRead *prometheus.Desc + statDatabaseBlksHit *prometheus.Desc + statDatabaseTupReturned *prometheus.Desc + statDatabaseTupFetched *prometheus.Desc + statDatabaseTupInserted *prometheus.Desc + statDatabaseTupUpdated *prometheus.Desc + statDatabaseTupDeleted *prometheus.Desc + statDatabaseConflicts *prometheus.Desc + statDatabaseTempFiles *prometheus.Desc + statDatabaseTempBytes *prometheus.Desc + statDatabaseDeadlocks *prometheus.Desc + statDatabaseBlkReadTime *prometheus.Desc + statDatabaseBlkWriteTime *prometheus.Desc + statDatabaseStatsReset *prometheus.Desc } func NewPGStatDatabaseCollector(config collectorConfig) (Collector, error) { - return &PGStatDatabaseCollector{log: config.logger}, nil -} - -var ( - statDatabaseNumbackends = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "numbackends", + return &PGStatDatabaseCollector{ + log: config.logger, + statDatabaseNumbackends: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "numbackends", + ), + "Number of backends currently connected to this database. This is the only column in this view that returns a value reflecting current state; all other columns return the accumulated values since the last reset.", + []string{"datid", "datname"}, + config.constantLabels, ), - "Number of backends currently connected to this database. This is the only column in this view that returns a value reflecting current state; all other columns return the accumulated values since the last reset.", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseXactCommit = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "xact_commit", + statDatabaseXactCommit: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "xact_commit", + ), + "Number of transactions in this database that have been committed", + []string{"datid", "datname"}, + config.constantLabels, ), - "Number of transactions in this database that have been committed", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseXactRollback = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "xact_rollback", + statDatabaseXactRollback: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "xact_rollback", + ), + "Number of transactions in this database that have been rolled back", + []string{"datid", "datname"}, + config.constantLabels, ), - "Number of transactions in this database that have been rolled back", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseBlksRead = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "blks_read", + statDatabaseBlksRead: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "blks_read", + ), + "Number of disk blocks read in this database", + []string{"datid", "datname"}, + config.constantLabels, ), - "Number of disk blocks read in this database", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseBlksHit = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "blks_hit", + statDatabaseBlksHit: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "blks_hit", + ), + "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)", + []string{"datid", "datname"}, + config.constantLabels, ), - "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseTupReturned = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "tup_returned", + statDatabaseTupReturned: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "tup_returned", + ), + "Number of rows returned by queries in this database", + []string{"datid", "datname"}, + config.constantLabels, ), - "Number of rows returned by queries in this database", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseTupFetched = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "tup_fetched", + statDatabaseTupFetched: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "tup_fetched", + ), + "Number of rows fetched by queries in this database", + []string{"datid", "datname"}, + config.constantLabels, ), - "Number of rows fetched by queries in this database", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseTupInserted = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "tup_inserted", + statDatabaseTupInserted: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "tup_inserted", + ), + "Number of rows inserted by queries in this database", + []string{"datid", "datname"}, + config.constantLabels, ), - "Number of rows inserted by queries in this database", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseTupUpdated = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "tup_updated", + statDatabaseTupUpdated: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "tup_updated", + ), + "Number of rows updated by queries in this database", + []string{"datid", "datname"}, + config.constantLabels, ), - "Number of rows updated by queries in this database", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseTupDeleted = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "tup_deleted", + statDatabaseTupDeleted: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "tup_deleted", + ), + "Number of rows deleted by queries in this database", + []string{"datid", "datname"}, + config.constantLabels, ), - "Number of rows deleted by queries in this database", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseConflicts = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "conflicts", + statDatabaseConflicts: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "conflicts", + ), + "Number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see pg_stat_database_conflicts for details.)", + []string{"datid", "datname"}, + config.constantLabels, ), - "Number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see pg_stat_database_conflicts for details.)", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseTempFiles = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "temp_files", + statDatabaseTempFiles: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "temp_files", + ), + "Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the log_temp_files setting.", + []string{"datid", "datname"}, + config.constantLabels, ), - "Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the log_temp_files setting.", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseTempBytes = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "temp_bytes", + statDatabaseTempBytes: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "temp_bytes", + ), + "Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting.", + []string{"datid", "datname"}, + config.constantLabels, ), - "Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting.", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseDeadlocks = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "deadlocks", + statDatabaseDeadlocks: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "deadlocks", + ), + "Number of deadlocks detected in this database", + []string{"datid", "datname"}, + config.constantLabels, ), - "Number of deadlocks detected in this database", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseBlkReadTime = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "blk_read_time", + statDatabaseBlkReadTime: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "blk_read_time", + ), + "Time spent reading data file blocks by backends in this database, in milliseconds", + []string{"datid", "datname"}, + config.constantLabels, ), - "Time spent reading data file blocks by backends in this database, in milliseconds", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseBlkWriteTime = prometheus.NewDesc( - prometheus.BuildFQName( + statDatabaseBlkWriteTime: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + statDatabaseSubsystem, + "blk_write_time", + ), + "Time spent writing data file blocks by backends in this database, in milliseconds", + []string{"datid", "datname"}, + config.constantLabels, + ), + statDatabaseStatsReset: prometheus.NewDesc(prometheus.BuildFQName( namespace, statDatabaseSubsystem, - "blk_write_time", + "stats_reset"), + "Time at which these statistics were last reset", + []string{"datid", "datname"}, + config.constantLabels, ), - "Time spent writing data file blocks by backends in this database, in milliseconds", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseStatsReset = prometheus.NewDesc(prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "stats_reset", - ), - "Time at which these statistics were last reset", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) - statDatabaseActiveTime = prometheus.NewDesc(prometheus.BuildFQName( - namespace, - statDatabaseSubsystem, - "active_time_seconds_total", - ), - "Time spent executing SQL statements in this database, in seconds", - []string{"datid", "datname"}, - prometheus.Labels{}, - ) + }, nil +} +var ( statDatabaseQuery = ` SELECT datid @@ -236,7 +244,6 @@ var ( ,deadlocks ,blk_read_time ,blk_write_time - ,active_time ,stats_reset FROM pg_stat_database; ` @@ -254,7 +261,7 @@ func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance for rows.Next() { var datid, datname sql.NullString - var numBackends, xactCommit, xactRollback, blksRead, blksHit, tupReturned, tupFetched, tupInserted, tupUpdated, tupDeleted, conflicts, tempFiles, tempBytes, deadlocks, blkReadTime, blkWriteTime, activeTime sql.NullFloat64 + var numBackends, xactCommit, xactRollback, blksRead, blksHit, tupReturned, tupFetched, tupInserted, tupUpdated, tupDeleted, conflicts, tempFiles, tempBytes, deadlocks, blkReadTime, blkWriteTime sql.NullFloat64 var statsReset sql.NullTime err := rows.Scan( @@ -276,7 +283,6 @@ func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance &deadlocks, &blkReadTime, &blkWriteTime, - &activeTime, &statsReset, ) if err != nil { @@ -355,10 +361,6 @@ func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no blk_write_time") continue } - if !activeTime.Valid { - level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no active_time") - continue - } statsResetMetric := 0.0 if !statsReset.Valid { @@ -371,126 +373,119 @@ func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance labels := []string{datid.String, datname.String} ch <- prometheus.MustNewConstMetric( - statDatabaseNumbackends, + c.statDatabaseNumbackends, prometheus.GaugeValue, numBackends.Float64, labels..., ) ch <- prometheus.MustNewConstMetric( - statDatabaseXactCommit, + c.statDatabaseXactCommit, prometheus.CounterValue, xactCommit.Float64, labels..., ) ch <- prometheus.MustNewConstMetric( - statDatabaseXactRollback, + c.statDatabaseXactRollback, prometheus.CounterValue, xactRollback.Float64, labels..., ) ch <- prometheus.MustNewConstMetric( - statDatabaseBlksRead, + c.statDatabaseBlksRead, prometheus.CounterValue, blksRead.Float64, labels..., ) ch <- prometheus.MustNewConstMetric( - statDatabaseBlksHit, + c.statDatabaseBlksHit, prometheus.CounterValue, blksHit.Float64, labels..., ) ch <- prometheus.MustNewConstMetric( - statDatabaseTupReturned, + c.statDatabaseTupReturned, prometheus.CounterValue, tupReturned.Float64, labels..., ) ch <- prometheus.MustNewConstMetric( - statDatabaseTupFetched, + c.statDatabaseTupFetched, prometheus.CounterValue, tupFetched.Float64, labels..., ) ch <- prometheus.MustNewConstMetric( - statDatabaseTupInserted, + c.statDatabaseTupInserted, prometheus.CounterValue, tupInserted.Float64, labels..., ) ch <- prometheus.MustNewConstMetric( - statDatabaseTupUpdated, + c.statDatabaseTupUpdated, prometheus.CounterValue, tupUpdated.Float64, labels..., ) ch <- prometheus.MustNewConstMetric( - statDatabaseTupDeleted, + c.statDatabaseTupDeleted, prometheus.CounterValue, tupDeleted.Float64, labels..., ) ch <- prometheus.MustNewConstMetric( - statDatabaseConflicts, + c.statDatabaseConflicts, prometheus.CounterValue, conflicts.Float64, labels..., ) ch <- prometheus.MustNewConstMetric( - statDatabaseTempFiles, + c.statDatabaseTempFiles, prometheus.CounterValue, tempFiles.Float64, labels..., ) ch <- prometheus.MustNewConstMetric( - statDatabaseTempBytes, + c.statDatabaseTempBytes, prometheus.CounterValue, tempBytes.Float64, labels..., ) ch <- prometheus.MustNewConstMetric( - statDatabaseDeadlocks, + c.statDatabaseDeadlocks, prometheus.CounterValue, deadlocks.Float64, labels..., ) ch <- prometheus.MustNewConstMetric( - statDatabaseBlkReadTime, + c.statDatabaseBlkReadTime, prometheus.CounterValue, blkReadTime.Float64, labels..., ) ch <- prometheus.MustNewConstMetric( - statDatabaseBlkWriteTime, + c.statDatabaseBlkWriteTime, prometheus.CounterValue, blkWriteTime.Float64, labels..., ) ch <- prometheus.MustNewConstMetric( - statDatabaseActiveTime, - prometheus.CounterValue, - activeTime.Float64/1000.0, - labels..., - ) - - ch <- prometheus.MustNewConstMetric( - statDatabaseStatsReset, + c.statDatabaseStatsReset, prometheus.CounterValue, statsResetMetric, labels..., diff --git a/collector/pg_stat_database_test.go b/collector/pg_stat_database_test.go index 2d4c25cf4..61cfe5f18 100644 --- a/collector/pg_stat_database_test.go +++ b/collector/pg_stat_database_test.go @@ -52,7 +52,6 @@ func TestPGStatDatabaseCollector(t *testing.T) { "deadlocks", "blk_read_time", "blk_write_time", - "active_time", "stats_reset", } @@ -81,7 +80,6 @@ func TestPGStatDatabaseCollector(t *testing.T) { 925, 16, 823, - 33, srT) mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows) @@ -89,9 +87,10 @@ func TestPGStatDatabaseCollector(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatDatabaseCollector{ - log: log.With(log.NewNopLogger(), "collector", "pg_stat_database"), - } + c, _ := NewPGStatDatabaseCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGStatDatabaseCollector.Update: %s", err) @@ -115,7 +114,6 @@ func TestPGStatDatabaseCollector(t *testing.T) { {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925}, {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16}, {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.033}, {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842}, } @@ -162,7 +160,6 @@ func TestPGStatDatabaseCollectorNullValues(t *testing.T) { "deadlocks", "blk_read_time", "blk_write_time", - "active_time", "stats_reset", } @@ -186,7 +183,6 @@ func TestPGStatDatabaseCollectorNullValues(t *testing.T) { 925, 16, 823, - 32, srT). AddRow( "pid", @@ -207,16 +203,16 @@ func TestPGStatDatabaseCollectorNullValues(t *testing.T) { 925, 16, 823, - 32, srT) mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatDatabaseCollector{ - log: log.With(log.NewNopLogger(), "collector", "pg_stat_database"), - } + c, _ := NewPGStatDatabaseCollector(collectorConfig{ + logger: log.With(log.NewNopLogger(), "collector", "pg_stat_database"), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGStatDatabaseCollector.Update: %s", err) @@ -240,7 +236,6 @@ func TestPGStatDatabaseCollectorNullValues(t *testing.T) { {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925}, {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16}, {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.032}, {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842}, } @@ -282,7 +277,6 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) { "deadlocks", "blk_read_time", "blk_write_time", - "active_time", "stats_reset", } @@ -311,7 +305,6 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) { 925, 16, 823, - 14, srT). AddRow( nil, @@ -333,7 +326,6 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) { nil, nil, nil, - nil, ). AddRow( "pid", @@ -354,16 +346,16 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) { 926, 17, 824, - 15, srT) mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows) ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatDatabaseCollector{ - log: log.With(log.NewNopLogger(), "collector", "pg_stat_database"), - } + c, _ := NewPGStatDatabaseCollector(collectorConfig{ + logger: log.With(log.NewNopLogger(), "collector", "pg_stat_database"), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGStatDatabaseCollector.Update: %s", err) @@ -387,7 +379,6 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) { {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925}, {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16}, {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.014}, {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842}, {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_GAUGE, value: 355}, @@ -406,7 +397,6 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) { {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 926}, {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 17}, {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 824}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.015}, {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842}, } @@ -449,7 +439,6 @@ func TestPGStatDatabaseCollectorTestNilStatReset(t *testing.T) { "deadlocks", "blk_read_time", "blk_write_time", - "active_time", "stats_reset", } @@ -473,7 +462,6 @@ func TestPGStatDatabaseCollectorTestNilStatReset(t *testing.T) { 925, 16, 823, - 7, nil) mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows) @@ -481,9 +469,10 @@ func TestPGStatDatabaseCollectorTestNilStatReset(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatDatabaseCollector{ - log: log.With(log.NewNopLogger(), "collector", "pg_stat_database"), - } + c, _ := NewPGStatDatabaseCollector(collectorConfig{ + logger: log.With(log.NewNopLogger(), "collector", "pg_stat_database"), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGStatDatabaseCollector.Update: %s", err) @@ -507,7 +496,6 @@ func TestPGStatDatabaseCollectorTestNilStatReset(t *testing.T) { {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925}, {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16}, {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823}, - {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.007}, {labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0}, } diff --git a/collector/pg_stat_statements.go b/collector/pg_stat_statements.go index c03e78b92..b024bab03 100644 --- a/collector/pg_stat_statements.go +++ b/collector/pg_stat_statements.go @@ -32,45 +32,51 @@ func init() { } type PGStatStatementsCollector struct { - log log.Logger + log log.Logger + statStatementsCallsTotal *prometheus.Desc + statStatementsSecondsTotal *prometheus.Desc + statStatementsRowsTotal *prometheus.Desc + statStatementsBlockReadSecondsTotal *prometheus.Desc + statStatementsBlockWriteSecondsTotal *prometheus.Desc } func NewPGStatStatementsCollector(config collectorConfig) (Collector, error) { - return &PGStatStatementsCollector{log: config.logger}, nil + return &PGStatStatementsCollector{ + log: config.logger, + statStatementsCallsTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "calls_total"), + "Number of times executed", + []string{"user", "datname", "queryid"}, + config.constantLabels, + ), + statStatementsSecondsTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "seconds_total"), + "Total time spent in the statement, in seconds", + []string{"user", "datname", "queryid"}, + config.constantLabels, + ), + statStatementsRowsTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "rows_total"), + "Total number of rows retrieved or affected by the statement", + []string{"user", "datname", "queryid"}, + config.constantLabels, + ), + statStatementsBlockReadSecondsTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_read_seconds_total"), + "Total time the statement spent reading blocks, in seconds", + []string{"user", "datname", "queryid"}, + config.constantLabels, + ), + statStatementsBlockWriteSecondsTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_write_seconds_total"), + "Total time the statement spent writing blocks, in seconds", + []string{"user", "datname", "queryid"}, + config.constantLabels, + ), + }, nil } var ( - statSTatementsCallsTotal = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "calls_total"), - "Number of times executed", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, - ) - statStatementsSecondsTotal = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "seconds_total"), - "Total time spent in the statement, in seconds", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, - ) - statStatementsRowsTotal = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "rows_total"), - "Total number of rows retrieved or affected by the statement", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, - ) - statStatementsBlockReadSecondsTotal = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_read_seconds_total"), - "Total time the statement spent reading blocks, in seconds", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, - ) - statStatementsBlockWriteSecondsTotal = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statStatementsSubsystem, "block_write_seconds_total"), - "Total time the statement spent writing blocks, in seconds", - []string{"user", "datname", "queryid"}, - prometheus.Labels{}, - ) - pgStatStatementsQuery = `SELECT pg_get_userbyid(userid) as user, pg_database.datname, @@ -114,7 +120,7 @@ var ( LIMIT 100;` ) -func (PGStatStatementsCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { +func (c *PGStatStatementsCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { query := pgStatStatementsQuery if instance.version.GE(semver.MustParse("13.0.0")) { query = pgStatStatementsNewQuery @@ -154,7 +160,7 @@ func (PGStatStatementsCollector) Update(ctx context.Context, instance *instance, callsTotalMetric = float64(callsTotal.Int64) } ch <- prometheus.MustNewConstMetric( - statSTatementsCallsTotal, + c.statStatementsCallsTotal, prometheus.CounterValue, callsTotalMetric, userLabel, datnameLabel, queryidLabel, @@ -165,7 +171,7 @@ func (PGStatStatementsCollector) Update(ctx context.Context, instance *instance, secondsTotalMetric = secondsTotal.Float64 } ch <- prometheus.MustNewConstMetric( - statStatementsSecondsTotal, + c.statStatementsSecondsTotal, prometheus.CounterValue, secondsTotalMetric, userLabel, datnameLabel, queryidLabel, @@ -176,7 +182,7 @@ func (PGStatStatementsCollector) Update(ctx context.Context, instance *instance, rowsTotalMetric = float64(rowsTotal.Int64) } ch <- prometheus.MustNewConstMetric( - statStatementsRowsTotal, + c.statStatementsRowsTotal, prometheus.CounterValue, rowsTotalMetric, userLabel, datnameLabel, queryidLabel, @@ -187,7 +193,7 @@ func (PGStatStatementsCollector) Update(ctx context.Context, instance *instance, blockReadSecondsTotalMetric = blockReadSecondsTotal.Float64 } ch <- prometheus.MustNewConstMetric( - statStatementsBlockReadSecondsTotal, + c.statStatementsBlockReadSecondsTotal, prometheus.CounterValue, blockReadSecondsTotalMetric, userLabel, datnameLabel, queryidLabel, @@ -198,7 +204,7 @@ func (PGStatStatementsCollector) Update(ctx context.Context, instance *instance, blockWriteSecondsTotalMetric = blockWriteSecondsTotal.Float64 } ch <- prometheus.MustNewConstMetric( - statStatementsBlockWriteSecondsTotal, + c.statStatementsBlockWriteSecondsTotal, prometheus.CounterValue, blockWriteSecondsTotalMetric, userLabel, datnameLabel, queryidLabel, diff --git a/collector/pg_stat_statements_summary.go b/collector/pg_stat_statements_summary.go new file mode 100644 index 000000000..26ce8ff81 --- /dev/null +++ b/collector/pg_stat_statements_summary.go @@ -0,0 +1,121 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "context" + "database/sql" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" +) + +const statStatementsSummarySubsystem = "stat_statements_summary" + +func init() { + registerCollector(statStatementsSummarySubsystem, defaultDisabled, NewPGStatStatementsSummaryCollector) +} + +type PGStatStatementsSummaryCollector struct { + log log.Logger + statStatementsSummaryCallsTotal *prometheus.Desc + statStatementsSummarySecondsTotal *prometheus.Desc +} + +func NewPGStatStatementsSummaryCollector(config collectorConfig) (Collector, error) { + return &PGStatStatementsSummaryCollector{ + log: config.logger, + statStatementsSummaryCallsTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSummarySubsystem, "calls_total"), + "Number of times executed", + []string{"datname"}, + config.constantLabels, + ), + statStatementsSummarySecondsTotal: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statStatementsSummarySubsystem, "seconds_total"), + "Total time spent in the statement, in seconds", + []string{"datname"}, + config.constantLabels, + ), + }, nil +} + +var ( + pgStatStatementsSummaryQuery = `SELECT + pg_database.datname, + SUM(pg_stat_statements.calls) as calls_total, + SUM(pg_stat_statements.total_exec_time) / 1000.0 as seconds_total + FROM pg_stat_statements + JOIN pg_database + ON pg_database.oid = pg_stat_statements.dbid + WHERE + total_exec_time > ( + SELECT percentile_cont(0.1) + WITHIN GROUP (ORDER BY total_exec_time) + FROM pg_stat_statements + ) + GROUP BY pg_database.datname;` +) + +func (c *PGStatStatementsSummaryCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { + query := pgStatStatementsSummaryQuery + + db := instance.getDB() + rows, err := db.QueryContext(ctx, query) + + if err != nil { + return err + } + defer rows.Close() + for rows.Next() { + var datname sql.NullString + var callsTotal sql.NullInt64 + var secondsTotal sql.NullFloat64 + + if err := rows.Scan(&datname, &callsTotal, &secondsTotal); err != nil { + return err + } + + datnameLabel := "unknown" + if datname.Valid { + datnameLabel = datname.String + } + + callsTotalMetric := 0.0 + if callsTotal.Valid { + callsTotalMetric = float64(callsTotal.Int64) + } + ch <- prometheus.MustNewConstMetric( + c.statStatementsSummaryCallsTotal, + prometheus.CounterValue, + callsTotalMetric, + datnameLabel, + ) + + secondsTotalMetric := 0.0 + if secondsTotal.Valid { + secondsTotalMetric = secondsTotal.Float64 + } + ch <- prometheus.MustNewConstMetric( + c.statStatementsSummarySecondsTotal, + prometheus.CounterValue, + secondsTotalMetric, + datnameLabel, + ) + } + if err := rows.Err(); err != nil { + return err + } + return nil +} diff --git a/collector/pg_stat_statements_summary_test.go b/collector/pg_stat_statements_summary_test.go new file mode 100644 index 000000000..f1c90761a --- /dev/null +++ b/collector/pg_stat_statements_summary_test.go @@ -0,0 +1,68 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package collector + +import ( + "context" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/blang/semver/v4" + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/smartystreets/goconvey/convey" +) + +func TestPGStateStatementsSummaryCollector(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("Error opening a stub db connection: %s", err) + } + defer db.Close() + + inst := &instance{db: db, version: semver.MustParse("13.3.7")} + + columns := []string{"datname", "calls_total", "seconds_total"} + rows := sqlmock.NewRows(columns). + AddRow("postgres", 5, 0.4) + mock.ExpectQuery(sanitizeQuery(pgStatStatementsSummaryQuery)).WillReturnRows(rows) + + ch := make(chan prometheus.Metric) + go func() { + defer close(ch) + c, _ := NewPGStatStatementsSummaryCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) + + if err := c.Update(context.Background(), inst, ch); err != nil { + t.Errorf("Error calling PGStatStatementsSummaryCollector.Update: %s", err) + } + }() + + expected := []MetricResult{ + {labels: labelMap{"datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 5}, + {labels: labelMap{"datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.4}, + } + + convey.Convey("Metrics comparison", t, func() { + for _, expect := range expected { + m := readMetric(<-ch) + convey.So(expect, convey.ShouldResemble, m) + } + }) + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled exceptions: %s", err) + } +} diff --git a/collector/pg_stat_statements_test.go b/collector/pg_stat_statements_test.go index 08aba34c2..f4fdd44c8 100644 --- a/collector/pg_stat_statements_test.go +++ b/collector/pg_stat_statements_test.go @@ -18,6 +18,7 @@ import ( "github.com/DATA-DOG/go-sqlmock" "github.com/blang/semver/v4" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" @@ -40,7 +41,10 @@ func TestPGStateStatementsCollector(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatStatementsCollector{} + c, _ := NewPGStatStatementsCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGStatStatementsCollector.Update: %s", err) @@ -83,7 +87,10 @@ func TestPGStateStatementsCollectorNull(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatStatementsCollector{} + c, _ := NewPGStatStatementsCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGStatStatementsCollector.Update: %s", err) @@ -126,7 +133,10 @@ func TestPGStateStatementsCollectorNewPG(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatStatementsCollector{} + c, _ := NewPGStatStatementsCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGStatStatementsCollector.Update: %s", err) diff --git a/collector/pg_stat_user_tables.go b/collector/pg_stat_user_tables.go index af3822ca8..a9b110aa4 100644 --- a/collector/pg_stat_user_tables.go +++ b/collector/pg_stat_user_tables.go @@ -28,136 +28,156 @@ func init() { } type PGStatUserTablesCollector struct { - log log.Logger + log log.Logger + statUserTablesSeqScan *prometheus.Desc + statUserTablesSeqTupRead *prometheus.Desc + statUserTablesIdxScan *prometheus.Desc + statUserTablesIdxTupFetch *prometheus.Desc + statUserTablesNTupIns *prometheus.Desc + statUserTablesNTupUpd *prometheus.Desc + statUserTablesNTupDel *prometheus.Desc + statUserTablesNTupHotUpd *prometheus.Desc + statUserTablesNLiveTup *prometheus.Desc + statUserTablesNDeadTup *prometheus.Desc + statUserTablesNModSinceAnalyze *prometheus.Desc + statUserTablesLastVacuum *prometheus.Desc + statUserTablesLastAutovacuum *prometheus.Desc + statUserTablesLastAnalyze *prometheus.Desc + statUserTablesLastAutoanalyze *prometheus.Desc + statUserTablesVacuumCount *prometheus.Desc + statUserTablesAutovacuumCount *prometheus.Desc + statUserTablesAnalyzeCount *prometheus.Desc + statUserTablesAutoanalyzeCount *prometheus.Desc + statUserTablesTotalSize *prometheus.Desc } func NewPGStatUserTablesCollector(config collectorConfig) (Collector, error) { - return &PGStatUserTablesCollector{log: config.logger}, nil + return &PGStatUserTablesCollector{ + log: config.logger, + statUserTablesSeqScan: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "seq_scan"), + "Number of sequential scans initiated on this table", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesSeqTupRead: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "seq_tup_read"), + "Number of live rows fetched by sequential scans", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesIdxScan: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "idx_scan"), + "Number of index scans initiated on this table", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesIdxTupFetch: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "idx_tup_fetch"), + "Number of live rows fetched by index scans", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesNTupIns: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_ins"), + "Number of rows inserted", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesNTupUpd: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_upd"), + "Number of rows updated", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesNTupDel: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_del"), + "Number of rows deleted", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesNTupHotUpd: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_hot_upd"), + "Number of rows HOT updated (i.e., with no separate index update required)", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesNLiveTup: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_live_tup"), + "Estimated number of live rows", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesNDeadTup: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_dead_tup"), + "Estimated number of dead rows", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesNModSinceAnalyze: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "n_mod_since_analyze"), + "Estimated number of rows changed since last analyze", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesLastVacuum: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "last_vacuum"), + "Last time at which this table was manually vacuumed (not counting VACUUM FULL)", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesLastAutovacuum: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "last_autovacuum"), + "Last time at which this table was vacuumed by the autovacuum daemon", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesLastAnalyze: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "last_analyze"), + "Last time at which this table was manually analyzed", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesLastAutoanalyze: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "last_autoanalyze"), + "Last time at which this table was analyzed by the autovacuum daemon", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesVacuumCount: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "vacuum_count"), + "Number of times this table has been manually vacuumed (not counting VACUUM FULL)", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesAutovacuumCount: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "autovacuum_count"), + "Number of times this table has been vacuumed by the autovacuum daemon", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesAnalyzeCount: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "analyze_count"), + "Number of times this table has been manually analyzed", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesAutoanalyzeCount: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "autoanalyze_count"), + "Number of times this table has been analyzed by the autovacuum daemon", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statUserTablesTotalSize: prometheus.NewDesc( + prometheus.BuildFQName(namespace, userTableSubsystem, "size_bytes"), + "Total disk space used by this table, in bytes, including all indexes and TOAST data", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + }, nil } -var ( - statUserTablesSeqScan = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "seq_scan"), - "Number of sequential scans initiated on this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesSeqTupRead = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "seq_tup_read"), - "Number of live rows fetched by sequential scans", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesIdxScan = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "idx_scan"), - "Number of index scans initiated on this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesIdxTupFetch = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "idx_tup_fetch"), - "Number of live rows fetched by index scans", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesNTupIns = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_ins"), - "Number of rows inserted", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesNTupUpd = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_upd"), - "Number of rows updated", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesNTupDel = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_del"), - "Number of rows deleted", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesNTupHotUpd = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_tup_hot_upd"), - "Number of rows HOT updated (i.e., with no separate index update required)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesNLiveTup = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_live_tup"), - "Estimated number of live rows", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesNDeadTup = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_dead_tup"), - "Estimated number of dead rows", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesNModSinceAnalyze = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "n_mod_since_analyze"), - "Estimated number of rows changed since last analyze", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesLastVacuum = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "last_vacuum"), - "Last time at which this table was manually vacuumed (not counting VACUUM FULL)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesLastAutovacuum = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "last_autovacuum"), - "Last time at which this table was vacuumed by the autovacuum daemon", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesLastAnalyze = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "last_analyze"), - "Last time at which this table was manually analyzed", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesLastAutoanalyze = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "last_autoanalyze"), - "Last time at which this table was analyzed by the autovacuum daemon", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesVacuumCount = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "vacuum_count"), - "Number of times this table has been manually vacuumed (not counting VACUUM FULL)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesAutovacuumCount = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "autovacuum_count"), - "Number of times this table has been vacuumed by the autovacuum daemon", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesAnalyzeCount = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "analyze_count"), - "Number of times this table has been manually analyzed", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesAutoanalyzeCount = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "autoanalyze_count"), - "Number of times this table has been analyzed by the autovacuum daemon", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statUserTablesTotalSize = prometheus.NewDesc( - prometheus.BuildFQName(namespace, userTableSubsystem, "size_bytes"), - "Total disk space used by this table, in bytes, including all indexes and TOAST data", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - - statUserTablesQuery = `SELECT +var statUserTablesQuery = `SELECT current_database() datname, schemaname, relname, @@ -183,7 +203,6 @@ var ( pg_total_relation_size(relid) as total_size FROM pg_stat_user_tables` -) func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { db := instance.getDB() @@ -223,7 +242,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan seqScanMetric = float64(seqScan.Int64) } ch <- prometheus.MustNewConstMetric( - statUserTablesSeqScan, + c.statUserTablesSeqScan, prometheus.CounterValue, seqScanMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -234,7 +253,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan seqTupReadMetric = float64(seqTupRead.Int64) } ch <- prometheus.MustNewConstMetric( - statUserTablesSeqTupRead, + c.statUserTablesSeqTupRead, prometheus.CounterValue, seqTupReadMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -245,7 +264,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan idxScanMetric = float64(idxScan.Int64) } ch <- prometheus.MustNewConstMetric( - statUserTablesIdxScan, + c.statUserTablesIdxScan, prometheus.CounterValue, idxScanMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -256,7 +275,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan idxTupFetchMetric = float64(idxTupFetch.Int64) } ch <- prometheus.MustNewConstMetric( - statUserTablesIdxTupFetch, + c.statUserTablesIdxTupFetch, prometheus.CounterValue, idxTupFetchMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -267,7 +286,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan nTupInsMetric = float64(nTupIns.Int64) } ch <- prometheus.MustNewConstMetric( - statUserTablesNTupIns, + c.statUserTablesNTupIns, prometheus.CounterValue, nTupInsMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -278,7 +297,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan nTupUpdMetric = float64(nTupUpd.Int64) } ch <- prometheus.MustNewConstMetric( - statUserTablesNTupUpd, + c.statUserTablesNTupUpd, prometheus.CounterValue, nTupUpdMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -289,7 +308,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan nTupDelMetric = float64(nTupDel.Int64) } ch <- prometheus.MustNewConstMetric( - statUserTablesNTupDel, + c.statUserTablesNTupDel, prometheus.CounterValue, nTupDelMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -300,7 +319,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan nTupHotUpdMetric = float64(nTupHotUpd.Int64) } ch <- prometheus.MustNewConstMetric( - statUserTablesNTupHotUpd, + c.statUserTablesNTupHotUpd, prometheus.CounterValue, nTupHotUpdMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -311,7 +330,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan nLiveTupMetric = float64(nLiveTup.Int64) } ch <- prometheus.MustNewConstMetric( - statUserTablesNLiveTup, + c.statUserTablesNLiveTup, prometheus.GaugeValue, nLiveTupMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -322,7 +341,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan nDeadTupMetric = float64(nDeadTup.Int64) } ch <- prometheus.MustNewConstMetric( - statUserTablesNDeadTup, + c.statUserTablesNDeadTup, prometheus.GaugeValue, nDeadTupMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -333,7 +352,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan nModSinceAnalyzeMetric = float64(nModSinceAnalyze.Int64) } ch <- prometheus.MustNewConstMetric( - statUserTablesNModSinceAnalyze, + c.statUserTablesNModSinceAnalyze, prometheus.GaugeValue, nModSinceAnalyzeMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -344,7 +363,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan lastVacuumMetric = float64(lastVacuum.Time.Unix()) } ch <- prometheus.MustNewConstMetric( - statUserTablesLastVacuum, + c.statUserTablesLastVacuum, prometheus.GaugeValue, lastVacuumMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -355,7 +374,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan lastAutovacuumMetric = float64(lastAutovacuum.Time.Unix()) } ch <- prometheus.MustNewConstMetric( - statUserTablesLastAutovacuum, + c.statUserTablesLastAutovacuum, prometheus.GaugeValue, lastAutovacuumMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -366,7 +385,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan lastAnalyzeMetric = float64(lastAnalyze.Time.Unix()) } ch <- prometheus.MustNewConstMetric( - statUserTablesLastAnalyze, + c.statUserTablesLastAnalyze, prometheus.GaugeValue, lastAnalyzeMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -377,7 +396,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan lastAutoanalyzeMetric = float64(lastAutoanalyze.Time.Unix()) } ch <- prometheus.MustNewConstMetric( - statUserTablesLastAutoanalyze, + c.statUserTablesLastAutoanalyze, prometheus.GaugeValue, lastAutoanalyzeMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -388,7 +407,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan vacuumCountMetric = float64(vacuumCount.Int64) } ch <- prometheus.MustNewConstMetric( - statUserTablesVacuumCount, + c.statUserTablesVacuumCount, prometheus.CounterValue, vacuumCountMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -399,7 +418,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan autovacuumCountMetric = float64(autovacuumCount.Int64) } ch <- prometheus.MustNewConstMetric( - statUserTablesAutovacuumCount, + c.statUserTablesAutovacuumCount, prometheus.CounterValue, autovacuumCountMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -410,7 +429,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan analyzeCountMetric = float64(analyzeCount.Int64) } ch <- prometheus.MustNewConstMetric( - statUserTablesAnalyzeCount, + c.statUserTablesAnalyzeCount, prometheus.CounterValue, analyzeCountMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -421,7 +440,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan autoanalyzeCountMetric = float64(autoanalyzeCount.Int64) } ch <- prometheus.MustNewConstMetric( - statUserTablesAutoanalyzeCount, + c.statUserTablesAutoanalyzeCount, prometheus.CounterValue, autoanalyzeCountMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -432,7 +451,7 @@ func (c *PGStatUserTablesCollector) Update(ctx context.Context, instance *instan totalSizeMetric = float64(totalSize.Int64) } ch <- prometheus.MustNewConstMetric( - statUserTablesTotalSize, + c.statUserTablesTotalSize, prometheus.GaugeValue, totalSizeMetric, datnameLabel, schemanameLabel, relnameLabel, diff --git a/collector/pg_stat_user_tables_test.go b/collector/pg_stat_user_tables_test.go index 5e82335c3..58c552d19 100644 --- a/collector/pg_stat_user_tables_test.go +++ b/collector/pg_stat_user_tables_test.go @@ -18,6 +18,7 @@ import ( "time" "github.com/DATA-DOG/go-sqlmock" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" @@ -101,7 +102,10 @@ func TestPGStatUserTablesCollector(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatUserTablesCollector{} + c, _ := NewPGStatUserTablesCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGStatUserTablesCollector.Update: %s", err) @@ -202,7 +206,10 @@ func TestPGStatUserTablesCollectorNullValues(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatUserTablesCollector{} + c, _ := NewPGStatUserTablesCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGStatUserTablesCollector.Update: %s", err) diff --git a/collector/pg_stat_walreceiver.go b/collector/pg_stat_walreceiver.go index db533ab55..7f56c0197 100644 --- a/collector/pg_stat_walreceiver.go +++ b/collector/pg_stat_walreceiver.go @@ -27,72 +27,82 @@ func init() { } type PGStatWalReceiverCollector struct { - log log.Logger + log log.Logger + statWalReceiverReceiveStartLsn *prometheus.Desc + statWalReceiverReceiveStartTli *prometheus.Desc + statWalReceiverFlushedLSN *prometheus.Desc + statWalReceiverReceivedTli *prometheus.Desc + statWalReceiverLastMsgSendTime *prometheus.Desc + statWalReceiverLastMsgReceiptTime *prometheus.Desc + statWalReceiverLatestEndLsn *prometheus.Desc + statWalReceiverLatestEndTime *prometheus.Desc + statWalReceiverUpstreamNode *prometheus.Desc } const statWalReceiverSubsystem = "stat_wal_receiver" func NewPGStatWalReceiverCollector(config collectorConfig) (Collector, error) { - return &PGStatWalReceiverCollector{log: config.logger}, nil + return &PGStatWalReceiverCollector{ + log: config.logger, + statWalReceiverReceiveStartLsn: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "receive_start_lsn"), + "First write-ahead log location used when WAL receiver is started represented as a decimal", + labelCats, + config.constantLabels, + ), + statWalReceiverReceiveStartTli: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "receive_start_tli"), + "First timeline number used when WAL receiver is started", + labelCats, + config.constantLabels, + ), + statWalReceiverFlushedLSN: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "flushed_lsn"), + "Last write-ahead log location already received and flushed to disk, the initial value of this field being the first log location used when WAL receiver is started represented as a decimal", + labelCats, + config.constantLabels, + ), + statWalReceiverReceivedTli: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "received_tli"), + "Timeline number of last write-ahead log location received and flushed to disk", + labelCats, + config.constantLabels, + ), + statWalReceiverLastMsgSendTime: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "last_msg_send_time"), + "Send time of last message received from origin WAL sender", + labelCats, + config.constantLabels, + ), + statWalReceiverLastMsgReceiptTime: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "last_msg_receipt_time"), + "Send time of last message received from origin WAL sender", + labelCats, + config.constantLabels, + ), + statWalReceiverLatestEndLsn: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "latest_end_lsn"), + "Last write-ahead log location reported to origin WAL sender as integer", + labelCats, + config.constantLabels, + ), + statWalReceiverLatestEndTime: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "latest_end_time"), + "Time of last write-ahead log location reported to origin WAL sender", + labelCats, + config.constantLabels, + ), + statWalReceiverUpstreamNode: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "upstream_node"), + "Node ID of the upstream node", + labelCats, + config.constantLabels, + ), + }, nil } var ( - labelCats = []string{"upstream_host", "slot_name", "status"} - statWalReceiverReceiveStartLsn = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "receive_start_lsn"), - "First write-ahead log location used when WAL receiver is started represented as a decimal", - labelCats, - prometheus.Labels{}, - ) - statWalReceiverReceiveStartTli = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "receive_start_tli"), - "First timeline number used when WAL receiver is started", - labelCats, - prometheus.Labels{}, - ) - statWalReceiverFlushedLSN = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "flushed_lsn"), - "Last write-ahead log location already received and flushed to disk, the initial value of this field being the first log location used when WAL receiver is started represented as a decimal", - labelCats, - prometheus.Labels{}, - ) - statWalReceiverReceivedTli = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "received_tli"), - "Timeline number of last write-ahead log location received and flushed to disk", - labelCats, - prometheus.Labels{}, - ) - statWalReceiverLastMsgSendTime = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "last_msg_send_time"), - "Send time of last message received from origin WAL sender", - labelCats, - prometheus.Labels{}, - ) - statWalReceiverLastMsgReceiptTime = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "last_msg_receipt_time"), - "Send time of last message received from origin WAL sender", - labelCats, - prometheus.Labels{}, - ) - statWalReceiverLatestEndLsn = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "latest_end_lsn"), - "Last write-ahead log location reported to origin WAL sender as integer", - labelCats, - prometheus.Labels{}, - ) - statWalReceiverLatestEndTime = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "latest_end_time"), - "Time of last write-ahead log location reported to origin WAL sender", - labelCats, - prometheus.Labels{}, - ) - statWalReceiverUpstreamNode = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statWalReceiverSubsystem, "upstream_node"), - "Node ID of the upstream node", - labelCats, - prometheus.Labels{}, - ) - + labelCats = []string{"upstream_host", "slot_name", "status"} pgStatWalColumnQuery = ` SELECT column_name @@ -209,57 +219,57 @@ func (c *PGStatWalReceiverCollector) Update(ctx context.Context, instance *insta continue } ch <- prometheus.MustNewConstMetric( - statWalReceiverReceiveStartLsn, + c.statWalReceiverReceiveStartLsn, prometheus.CounterValue, float64(receiveStartLsn.Int64), labels...) ch <- prometheus.MustNewConstMetric( - statWalReceiverReceiveStartTli, + c.statWalReceiverReceiveStartTli, prometheus.GaugeValue, float64(receiveStartTli.Int64), labels...) if hasFlushedLSN { ch <- prometheus.MustNewConstMetric( - statWalReceiverFlushedLSN, + c.statWalReceiverFlushedLSN, prometheus.CounterValue, float64(flushedLsn.Int64), labels...) } ch <- prometheus.MustNewConstMetric( - statWalReceiverReceivedTli, + c.statWalReceiverReceivedTli, prometheus.GaugeValue, float64(receivedTli.Int64), labels...) ch <- prometheus.MustNewConstMetric( - statWalReceiverLastMsgSendTime, + c.statWalReceiverLastMsgSendTime, prometheus.CounterValue, float64(lastMsgSendTime.Float64), labels...) ch <- prometheus.MustNewConstMetric( - statWalReceiverLastMsgReceiptTime, + c.statWalReceiverLastMsgReceiptTime, prometheus.CounterValue, float64(lastMsgReceiptTime.Float64), labels...) ch <- prometheus.MustNewConstMetric( - statWalReceiverLatestEndLsn, + c.statWalReceiverLatestEndLsn, prometheus.CounterValue, float64(latestEndLsn.Int64), labels...) ch <- prometheus.MustNewConstMetric( - statWalReceiverLatestEndTime, + c.statWalReceiverLatestEndTime, prometheus.CounterValue, latestEndTime.Float64, labels...) ch <- prometheus.MustNewConstMetric( - statWalReceiverUpstreamNode, + c.statWalReceiverUpstreamNode, prometheus.GaugeValue, float64(upstreamNode.Int64), labels...) diff --git a/collector/pg_stat_walreceiver_test.go b/collector/pg_stat_walreceiver_test.go index c81c9ecae..988201615 100644 --- a/collector/pg_stat_walreceiver_test.go +++ b/collector/pg_stat_walreceiver_test.go @@ -18,6 +18,7 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" @@ -64,13 +65,13 @@ func TestPGStatWalReceiverCollectorWithFlushedLSN(t *testing.T) { "foo", "bar", "stopping", - int64(1200668684563608), + 1200668684563608, 1687321285, - int64(1200668684563609), + 1200668684563609, 1687321280, 1687321275, 1687321276, - int64(1200668684563610), + 1200668684563610, 1687321277, 5, ) @@ -80,7 +81,10 @@ func TestPGStatWalReceiverCollectorWithFlushedLSN(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatWalReceiverCollector{} + c, _ := NewPGStatWalReceiverCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PgStatWalReceiverCollector.Update: %s", err) @@ -143,12 +147,12 @@ func TestPGStatWalReceiverCollectorWithNoFlushedLSN(t *testing.T) { "foo", "bar", "starting", - int64(1200668684563608), + 1200668684563608, 1687321285, 1687321280, 1687321275, 1687321276, - int64(1200668684563610), + 1200668684563610, 1687321277, 5, ) @@ -157,7 +161,10 @@ func TestPGStatWalReceiverCollectorWithNoFlushedLSN(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatWalReceiverCollector{} + c, _ := NewPGStatWalReceiverCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PgStatWalReceiverCollector.Update: %s", err) diff --git a/collector/pg_statio_user_indexes.go b/collector/pg_statio_user_indexes.go index b5516338d..1dfc864a9 100644 --- a/collector/pg_statio_user_indexes.go +++ b/collector/pg_statio_user_indexes.go @@ -25,30 +25,32 @@ func init() { } type PGStatioUserIndexesCollector struct { - log log.Logger + log log.Logger + statioUserIndexesIdxBlksRead *prometheus.Desc + statioUserIndexesIdxBlksHit *prometheus.Desc } const statioUserIndexesSubsystem = "statio_user_indexes" func NewPGStatioUserIndexesCollector(config collectorConfig) (Collector, error) { - return &PGStatioUserIndexesCollector{log: config.logger}, nil + return &PGStatioUserIndexesCollector{ + log: config.logger, + statioUserIndexesIdxBlksRead: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserIndexesSubsystem, "idx_blks_read_total"), + "Number of disk blocks read from this index", + []string{"schemaname", "relname", "indexrelname"}, + config.constantLabels, + ), + statioUserIndexesIdxBlksHit: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserIndexesSubsystem, "idx_blks_hit_total"), + "Number of buffer hits in this index", + []string{"schemaname", "relname", "indexrelname"}, + config.constantLabels, + ), + }, nil } -var ( - statioUserIndexesIdxBlksRead = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserIndexesSubsystem, "idx_blks_read_total"), - "Number of disk blocks read from this index", - []string{"schemaname", "relname", "indexrelname"}, - prometheus.Labels{}, - ) - statioUserIndexesIdxBlksHit = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserIndexesSubsystem, "idx_blks_hit_total"), - "Number of buffer hits in this index", - []string{"schemaname", "relname", "indexrelname"}, - prometheus.Labels{}, - ) - - statioUserIndexesQuery = ` +var statioUserIndexesQuery = ` SELECT schemaname, relname, @@ -57,8 +59,6 @@ var ( idx_blks_hit FROM pg_statio_user_indexes ` -) - func (c *PGStatioUserIndexesCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { db := instance.getDB() rows, err := db.QueryContext(ctx, @@ -94,7 +94,7 @@ func (c *PGStatioUserIndexesCollector) Update(ctx context.Context, instance *ins idxBlksReadMetric = idxBlksRead.Float64 } ch <- prometheus.MustNewConstMetric( - statioUserIndexesIdxBlksRead, + c.statioUserIndexesIdxBlksRead, prometheus.CounterValue, idxBlksReadMetric, labels..., @@ -105,7 +105,7 @@ func (c *PGStatioUserIndexesCollector) Update(ctx context.Context, instance *ins idxBlksHitMetric = idxBlksHit.Float64 } ch <- prometheus.MustNewConstMetric( - statioUserIndexesIdxBlksHit, + c.statioUserIndexesIdxBlksHit, prometheus.CounterValue, idxBlksHitMetric, labels..., diff --git a/collector/pg_statio_user_indexes_test.go b/collector/pg_statio_user_indexes_test.go index 174012162..2e16fd2ae 100644 --- a/collector/pg_statio_user_indexes_test.go +++ b/collector/pg_statio_user_indexes_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" @@ -44,7 +45,10 @@ func TestPgStatioUserIndexesCollector(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatioUserIndexesCollector{} + c, _ := NewPGStatioUserIndexesCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGStatioUserIndexesCollector.Update: %s", err) @@ -87,7 +91,10 @@ func TestPgStatioUserIndexesCollectorNull(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatioUserIndexesCollector{} + c, _ := NewPGStatioUserIndexesCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGStatioUserIndexesCollector.Update: %s", err) diff --git a/collector/pg_statio_user_tables.go b/collector/pg_statio_user_tables.go index 4315fda0a..3dea0a3c3 100644 --- a/collector/pg_statio_user_tables.go +++ b/collector/pg_statio_user_tables.go @@ -28,64 +28,72 @@ func init() { } type PGStatIOUserTablesCollector struct { - log log.Logger + log log.Logger + statioUserTablesHeapBlksRead *prometheus.Desc + statioUserTablesHeapBlksHit *prometheus.Desc + statioUserTablesIdxBlksRead *prometheus.Desc + statioUserTablesIdxBlksHit *prometheus.Desc + statioUserTablesToastBlksRead *prometheus.Desc + statioUserTablesToastBlksHit *prometheus.Desc + statioUserTablesTidxBlksRead *prometheus.Desc + statioUserTablesTidxBlksHit *prometheus.Desc } func NewPGStatIOUserTablesCollector(config collectorConfig) (Collector, error) { - return &PGStatIOUserTablesCollector{log: config.logger}, nil + return &PGStatIOUserTablesCollector{ + log: config.logger, + statioUserTablesHeapBlksRead: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_read"), + "Number of disk blocks read from this table", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statioUserTablesHeapBlksHit: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_hit"), + "Number of buffer hits in this table", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statioUserTablesIdxBlksRead: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_read"), + "Number of disk blocks read from all indexes on this table", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statioUserTablesIdxBlksHit: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_hit"), + "Number of buffer hits in all indexes on this table", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statioUserTablesToastBlksRead: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_read"), + "Number of disk blocks read from this table's TOAST table (if any)", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statioUserTablesToastBlksHit: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_hit"), + "Number of buffer hits in this table's TOAST table (if any)", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statioUserTablesTidxBlksRead: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_read"), + "Number of disk blocks read from this table's TOAST table indexes (if any)", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + statioUserTablesTidxBlksHit: prometheus.NewDesc( + prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_hit"), + "Number of buffer hits in this table's TOAST table indexes (if any)", + []string{"datname", "schemaname", "relname"}, + config.constantLabels, + ), + }, nil } -var ( - statioUserTablesHeapBlksRead = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_read"), - "Number of disk blocks read from this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statioUserTablesHeapBlksHit = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "heap_blocks_hit"), - "Number of buffer hits in this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statioUserTablesIdxBlksRead = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_read"), - "Number of disk blocks read from all indexes on this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statioUserTablesIdxBlksHit = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "idx_blocks_hit"), - "Number of buffer hits in all indexes on this table", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statioUserTablesToastBlksRead = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_read"), - "Number of disk blocks read from this table's TOAST table (if any)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statioUserTablesToastBlksHit = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "toast_blocks_hit"), - "Number of buffer hits in this table's TOAST table (if any)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statioUserTablesTidxBlksRead = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_read"), - "Number of disk blocks read from this table's TOAST table indexes (if any)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - statioUserTablesTidxBlksHit = prometheus.NewDesc( - prometheus.BuildFQName(namespace, statioUserTableSubsystem, "tidx_blocks_hit"), - "Number of buffer hits in this table's TOAST table indexes (if any)", - []string{"datname", "schemaname", "relname"}, - prometheus.Labels{}, - ) - - statioUserTablesQuery = `SELECT +var statioUserTablesQuery = `SELECT current_database() datname, schemaname, relname, @@ -98,9 +106,8 @@ var ( tidx_blks_read, tidx_blks_hit FROM pg_statio_user_tables` -) -func (PGStatIOUserTablesCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { +func (c *PGStatIOUserTablesCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { db := instance.getDB() rows, err := db.QueryContext(ctx, statioUserTablesQuery) @@ -135,7 +142,7 @@ func (PGStatIOUserTablesCollector) Update(ctx context.Context, instance *instanc heapBlksReadMetric = float64(heapBlksRead.Int64) } ch <- prometheus.MustNewConstMetric( - statioUserTablesHeapBlksRead, + c.statioUserTablesHeapBlksRead, prometheus.CounterValue, heapBlksReadMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -146,7 +153,7 @@ func (PGStatIOUserTablesCollector) Update(ctx context.Context, instance *instanc heapBlksHitMetric = float64(heapBlksHit.Int64) } ch <- prometheus.MustNewConstMetric( - statioUserTablesHeapBlksHit, + c.statioUserTablesHeapBlksHit, prometheus.CounterValue, heapBlksHitMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -157,7 +164,7 @@ func (PGStatIOUserTablesCollector) Update(ctx context.Context, instance *instanc idxBlksReadMetric = float64(idxBlksRead.Int64) } ch <- prometheus.MustNewConstMetric( - statioUserTablesIdxBlksRead, + c.statioUserTablesIdxBlksRead, prometheus.CounterValue, idxBlksReadMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -168,7 +175,7 @@ func (PGStatIOUserTablesCollector) Update(ctx context.Context, instance *instanc idxBlksHitMetric = float64(idxBlksHit.Int64) } ch <- prometheus.MustNewConstMetric( - statioUserTablesIdxBlksHit, + c.statioUserTablesIdxBlksHit, prometheus.CounterValue, idxBlksHitMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -179,7 +186,7 @@ func (PGStatIOUserTablesCollector) Update(ctx context.Context, instance *instanc toastBlksReadMetric = float64(toastBlksRead.Int64) } ch <- prometheus.MustNewConstMetric( - statioUserTablesToastBlksRead, + c.statioUserTablesToastBlksRead, prometheus.CounterValue, toastBlksReadMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -190,7 +197,7 @@ func (PGStatIOUserTablesCollector) Update(ctx context.Context, instance *instanc toastBlksHitMetric = float64(toastBlksHit.Int64) } ch <- prometheus.MustNewConstMetric( - statioUserTablesToastBlksHit, + c.statioUserTablesToastBlksHit, prometheus.CounterValue, toastBlksHitMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -201,7 +208,7 @@ func (PGStatIOUserTablesCollector) Update(ctx context.Context, instance *instanc tidxBlksReadMetric = float64(tidxBlksRead.Int64) } ch <- prometheus.MustNewConstMetric( - statioUserTablesTidxBlksRead, + c.statioUserTablesTidxBlksRead, prometheus.CounterValue, tidxBlksReadMetric, datnameLabel, schemanameLabel, relnameLabel, @@ -212,7 +219,7 @@ func (PGStatIOUserTablesCollector) Update(ctx context.Context, instance *instanc tidxBlksHitMetric = float64(tidxBlksHit.Int64) } ch <- prometheus.MustNewConstMetric( - statioUserTablesTidxBlksHit, + c.statioUserTablesTidxBlksHit, prometheus.CounterValue, tidxBlksHitMetric, datnameLabel, schemanameLabel, relnameLabel, diff --git a/collector/pg_statio_user_tables_test.go b/collector/pg_statio_user_tables_test.go index c7304a38c..35f1f4855 100644 --- a/collector/pg_statio_user_tables_test.go +++ b/collector/pg_statio_user_tables_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" @@ -60,7 +61,10 @@ func TestPGStatIOUserTablesCollector(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatIOUserTablesCollector{} + c, _ := NewPGStatIOUserTablesCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGStatIOUserTablesCollector.Update: %s", err) @@ -127,7 +131,10 @@ func TestPGStatIOUserTablesCollectorNullValues(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGStatIOUserTablesCollector{} + c, _ := NewPGStatIOUserTablesCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGStatIOUserTablesCollector.Update: %s", err) diff --git a/collector/pg_wal.go b/collector/pg_wal.go index afa8fcef6..01528dda3 100644 --- a/collector/pg_wal.go +++ b/collector/pg_wal.go @@ -26,41 +26,43 @@ func init() { } type PGWALCollector struct { + pgWALSegments *prometheus.Desc + pgWALSize *prometheus.Desc } func NewPGWALCollector(config collectorConfig) (Collector, error) { - return &PGWALCollector{}, nil -} - -var ( - pgWALSegments = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - walSubsystem, - "segments", + return &PGWALCollector{ + pgWALSegments: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + walSubsystem, + "segments", + ), + "Number of WAL segments", + []string{}, + config.constantLabels, ), - "Number of WAL segments", - []string{}, nil, - ) - pgWALSize = prometheus.NewDesc( - prometheus.BuildFQName( - namespace, - walSubsystem, - "size_bytes", + pgWALSize: prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + walSubsystem, + "size_bytes", + ), + "Total size of WAL segments", + []string{}, + config.constantLabels, ), - "Total size of WAL segments", - []string{}, nil, - ) + }, nil +} - pgWALQuery = ` +var pgWALQuery = ` SELECT COUNT(*) AS segments, SUM(size) AS size FROM pg_ls_waldir() WHERE name ~ '^[0-9A-F]{24}$'` -) -func (c PGWALCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { +func (c *PGWALCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { db := instance.getDB() row := db.QueryRowContext(ctx, pgWALQuery, @@ -73,11 +75,11 @@ func (c PGWALCollector) Update(ctx context.Context, instance *instance, ch chan< return err } ch <- prometheus.MustNewConstMetric( - pgWALSegments, + c.pgWALSegments, prometheus.GaugeValue, float64(segments), ) ch <- prometheus.MustNewConstMetric( - pgWALSize, + c.pgWALSize, prometheus.GaugeValue, float64(size), ) return nil diff --git a/collector/pg_wal_test.go b/collector/pg_wal_test.go index 745105a13..992835939 100644 --- a/collector/pg_wal_test.go +++ b/collector/pg_wal_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" @@ -39,7 +40,10 @@ func TestPgWALCollector(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGWALCollector{} + c, _ := NewPGWALCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGWALCollector.Update: %s", err) diff --git a/collector/pg_xlog_location.go b/collector/pg_xlog_location.go index 237204f7d..0a02acf3b 100644 --- a/collector/pg_xlog_location.go +++ b/collector/pg_xlog_location.go @@ -30,29 +30,29 @@ func init() { type PGXlogLocationCollector struct { log log.Logger + xlogLocationBytes *prometheus.Desc } func NewPGXlogLocationCollector(config collectorConfig) (Collector, error) { - return &PGXlogLocationCollector{log: config.logger}, nil + return &PGXlogLocationCollector{ + log: config.logger, + xlogLocationBytes: prometheus.NewDesc( + prometheus.BuildFQName(namespace, xlogLocationSubsystem, "bytes"), + "Postgres LSN (log sequence number) being generated on primary or replayed on replica (truncated to low 52 bits)", + []string{}, + config.constantLabels, + ), + }, nil } -var ( - xlogLocationBytes = prometheus.NewDesc( - prometheus.BuildFQName(namespace, xlogLocationSubsystem, "bytes"), - "Postgres LSN (log sequence number) being generated on primary or replayed on replica (truncated to low 52 bits)", - []string{}, - prometheus.Labels{}, - ) - - xlogLocationQuery = ` +var xlogLocationQuery = ` SELECT CASE WHEN pg_is_in_recovery() THEN (pg_last_xlog_replay_location() - '0/0') % (2^52)::bigint ELSE (pg_current_xlog_location() - '0/0') % (2^52)::bigint END AS bytes ` -) -func (c PGXlogLocationCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { +func (c *PGXlogLocationCollector) Update(ctx context.Context, instance *instance, ch chan<- prometheus.Metric) error { db := instance.getDB() // xlog was renmaed to WAL in PostgreSQL 10 @@ -79,7 +79,7 @@ func (c PGXlogLocationCollector) Update(ctx context.Context, instance *instance, } ch <- prometheus.MustNewConstMetric( - xlogLocationBytes, + c.xlogLocationBytes, prometheus.GaugeValue, bytes, ) diff --git a/collector/pg_xlog_location_test.go b/collector/pg_xlog_location_test.go index 561a7df94..04dba4384 100644 --- a/collector/pg_xlog_location_test.go +++ b/collector/pg_xlog_location_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/smartystreets/goconvey/convey" @@ -40,7 +41,10 @@ func TestPGXlogLocationCollector(t *testing.T) { ch := make(chan prometheus.Metric) go func() { defer close(ch) - c := PGXlogLocationCollector{} + c, _ := NewPGXlogLocationCollector(collectorConfig{ + logger: log.NewNopLogger(), + constantLabels: prometheus.Labels{}, + }) if err := c.Update(context.Background(), inst, ch); err != nil { t.Errorf("Error calling PGXlogLocationCollector.Update: %s", err) diff --git a/collector/probe.go b/collector/probe.go deleted file mode 100644 index 4c0f0419b..000000000 --- a/collector/probe.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collector - -import ( - "context" - "sync" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus-community/postgres_exporter/config" - "github.com/prometheus/client_golang/prometheus" -) - -type ProbeCollector struct { - registry *prometheus.Registry - collectors map[string]Collector - logger log.Logger - instance *instance -} - -func NewProbeCollector(logger log.Logger, excludeDatabases []string, registry *prometheus.Registry, dsn config.DSN) (*ProbeCollector, error) { - collectors := make(map[string]Collector) - initiatedCollectorsMtx.Lock() - defer initiatedCollectorsMtx.Unlock() - for key, enabled := range collectorState { - // TODO: Handle filters - // if !*enabled || (len(f) > 0 && !f[key]) { - // continue - // } - if !*enabled { - continue - } - if collector, ok := initiatedCollectors[key]; ok { - collectors[key] = collector - } else { - collector, err := factories[key]( - collectorConfig{ - logger: log.With(logger, "collector", key), - excludeDatabases: excludeDatabases, - }) - if err != nil { - return nil, err - } - collectors[key] = collector - initiatedCollectors[key] = collector - } - } - - instance, err := newInstance(dsn.GetConnectionString()) - if err != nil { - return nil, err - } - - return &ProbeCollector{ - registry: registry, - collectors: collectors, - logger: logger, - instance: instance, - }, nil -} - -func (pc *ProbeCollector) Describe(ch chan<- *prometheus.Desc) { -} - -func (pc *ProbeCollector) Collect(ch chan<- prometheus.Metric) { - // Set up the database connection for the collector. - err := pc.instance.setup() - if err != nil { - level.Error(pc.logger).Log("msg", "Error opening connection to database", "err", err) - return - } - defer pc.instance.Close() - - wg := sync.WaitGroup{} - wg.Add(len(pc.collectors)) - for name, c := range pc.collectors { - go func(name string, c Collector) { - execute(context.TODO(), name, c, pc.instance, ch, pc.logger) - wg.Done() - }(name, c) - } - wg.Wait() -} - -func (pc *ProbeCollector) Close() error { - return pc.instance.Close() -} diff --git a/gh-assets-clone.sh b/gh-assets-clone.sh index 506485e06..7e1915989 100755 --- a/gh-assets-clone.sh +++ b/gh-assets-clone.sh @@ -14,5 +14,5 @@ ASSETS_DIR=".assets-branch" # Clone the assets branch with the correct credentials git clone --single-branch -b "$GIT_ASSETS_BRANCH" \ - "https://$%7BGIT_API_KEY%7D@github.com/$%7BTRAVIS_REPO_SLUG%7D.git" "$ASSETS_DIR" || exit 1 + "https://github.com/$%7BTRAVIS_REPO_SLUG%7D.git" "$ASSETS_DIR" || exit 1 diff --git a/go.mod b/go.mod index e937de61d..3ea2cd1bc 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,12 @@ -module github.com/prometheus-community/postgres_exporter +module github.com/form3tech-oss/postgres_exporter -go 1.19 +go 1.22 require ( github.com/DATA-DOG/go-sqlmock v1.5.2 github.com/alecthomas/kingpin/v2 v2.4.0 github.com/blang/semver/v4 v4.0.0 + github.com/form3tech-oss/go-vault-client/v4 v4.3.0 github.com/go-kit/log v0.2.1 github.com/lib/pq v1.10.9 github.com/prometheus/client_golang v1.19.0 @@ -20,20 +21,45 @@ require ( require ( github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/aws/aws-sdk-go v1.45.24 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/fatih/color v1.14.1 // indirect + github.com/go-jose/go-jose/v3 v3.0.0 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/gopherjs/gopherjs v1.17.2 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.1 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect + github.com/hashicorp/go-sockaddr v1.0.2 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect + github.com/hashicorp/vault/api v1.10.0 // indirect + github.com/hashicorp/vault/sdk v0.1.14-0.20200817232951-d7307fcdfed7 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/ryanuber/go-glob v1.0.0 // indirect github.com/smarty/assertions v1.15.0 // indirect + github.com/stretchr/testify v1.8.3 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect golang.org/x/crypto v0.21.0 // indirect golang.org/x/net v0.23.0 // indirect @@ -41,6 +67,7 @@ require ( golang.org/x/sync v0.5.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.33.0 // indirect ) diff --git a/go.sum b/go.sum index 687657eaf..71507cba7 100644 --- a/go.sum +++ b/go.sum @@ -1,38 +1,212 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= +github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.34.7/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.45.24 h1:TZx/CizkmCQn8Rtsb11iLYutEQVGK5PK9wAhwouELBo= +github.com/aws/aws-sdk-go v1.45.24/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200709052629-daa8e1ccc0bc/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20200319182547-c7ad2b866182/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= +github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/form3tech-oss/go-vault-client/v4 v4.3.0 h1:5P8Hz3MPBl9CyJ1TmpGg9awCWPOx7fgrZrBXek/Kvgk= +github.com/form3tech-oss/go-vault-client/v4 v4.3.0/go.mod h1:0OhP57jdnU6fNgYzkqVIM6UapvteLKlKfCoJstmaj94= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= +github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/vault/api v1.0.5-0.20200519221902-385fac77e20f/go.mod h1:euTFbi2YJgwcju3imEt919lhJKF68nN1cQPq3aA+kBE= +github.com/hashicorp/vault/api v1.0.5-0.20200817232951-d7307fcdfed7/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= +github.com/hashicorp/vault/api v1.10.0 h1:/US7sIjWN6Imp4o/Rj1Ce2Nr5bki/AXi9vAW3p2tOJQ= +github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/hashicorp/vault/sdk v0.1.14-0.20200519221530-14615acda45f/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= +github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= +github.com/hashicorp/vault/sdk v0.1.14-0.20200817232951-d7307fcdfed7 h1:mEp9Iz4tIvgyr67QqSQPgSF6ZVhhV7AET/G8mHb1jV8= +github.com/hashicorp/vault/sdk v0.1.14-0.20200817232951-d7307fcdfed7/go.mod h1:+S2qzS1Tex9JgbHxb/Jv7CdZyKydxqg09G/qVvyVmUc= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -42,63 +216,286 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g= github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=