Skip to content

Commit 2463050

Browse files
committed
chore (auth): added basic auth from node_exporter
feature (pgq): added views for special monitoring user. Postgres role has to be created as: CREATE ROLE monit LOGIN NOSUPERUSER INHERIT NOCREATEDB NOCREATEROLE; COMMENT ON ROLE monit IS 'Monitoring role'; ALTER ROLE monit SET search_path=public,monit; Schema monit also have to be created and owned by role monit: CREATE SCHEMA monit AUTHORIZATION monit; Views have to be defined as: CREATE OR REPLACE VIEW monit.pgq_consumer AS SELECT get_consumer_info.consumer_name AS name, date_part('epoch'::text, get_consumer_info.lag) AS lag FROM pgq.get_consumer_info() get_consumer_info(queue_name, consumer_name, lag, last_seen, last_tick, current_batch, next_tick, pending_events);
1 parent c931b83 commit 2463050

File tree

1 file changed

+88
-59
lines changed

1 file changed

+88
-59
lines changed

postgres_exporter.go

Lines changed: 88 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@ var (
2727
"web.telemetry-path", "/metrics",
2828
"Path under which to expose metrics.",
2929
)
30+
authUser = flag.String("auth.user", "", "Username for basic auth.")
31+
authPass = flag.String("auth.pass", "", "Password for basic auth.")
3032
)
3133

3234
// Metric name parts.
@@ -35,7 +37,7 @@ const (
3537
namespace = "pg"
3638
// Subsystems.
3739
exporter = "exporter"
38-
40+
3941
)
4042

4143
// landingPage contains the HTML served at '/'.
@@ -84,66 +86,65 @@ type MetricMap struct {
8486

8587
// Metric descriptors for dynamically created metrics.
8688
var metricMaps = map[string]map[string]ColumnMapping {
89+
"pgq_queue" : map[string]ColumnMapping {
90+
"name" : { LABEL, "Queue name", nil },
91+
"lag" : { COUNTER, "Queue lag in seconds", nil },
92+
},
93+
"pgq_consumer" : map[string]ColumnMapping {
94+
"name" : { LABEL, "Consumer name", nil },
95+
"lag" : { COUNTER, "Consumer lag in seconds", nil },
96+
},
8797
"pg_stat_bgwriter" : map[string]ColumnMapping {
88-
"checkpoints_timed" : { COUNTER, "Number of scheduled checkpoints that have been performed", nil },
89-
"checkpoints_req" : { COUNTER, "Number of requested checkpoints that have been performed", nil },
90-
"checkpoint_write_time" : { COUNTER, "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds", nil },
91-
"checkpoint_sync_time" : { COUNTER, "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds", nil },
92-
"buffers_checkpoint" : { COUNTER, "Number of buffers written during checkpoints", nil },
93-
"buffers_clean" : { COUNTER, "Number of buffers written by the background writer", nil },
94-
"maxwritten_clean" : { COUNTER, "Number of times the background writer stopped a cleaning scan because it had written too many buffers", nil },
95-
"buffers_backend" : { COUNTER, "Number of buffers written directly by a backend", nil },
96-
"buffers_backend_fsync" : { COUNTER, "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)", nil },
97-
"buffers_alloc" : { COUNTER, "Number of buffers allocated", nil },
98+
"checkpoints_timed" : { COUNTER, "Number of scheduled checkpoints that have been performed", nil },
99+
"checkpoints_req" : { COUNTER, "Number of requested checkpoints that have been performed", nil },
100+
"checkpoint_write_time" : { COUNTER, "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds", nil },
101+
"checkpoint_sync_time" : { COUNTER, "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds", nil },
102+
"buffers_checkpoint" : { COUNTER, "Number of buffers written during checkpoints", nil },
103+
"buffers_clean" : { COUNTER, "Number of buffers written by the background writer", nil },
104+
"maxwritten_clean" : { COUNTER, "Number of times the background writer stopped a cleaning scan because it had written too many buffers", nil },
105+
"buffers_backend" : { COUNTER, "Number of buffers written directly by a backend", nil },
106+
"buffers_backend_fsync" : { COUNTER, "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)", nil },
107+
"buffers_alloc" : { COUNTER, "Number of buffers allocated", nil },
98108
"stats_reset" : { COUNTER, "Time at which these statistics were last reset", nil },
99109
},
100-
"pg_stat_database" : map[string]ColumnMapping {
101-
"datid" : { LABEL, "OID of a database", nil },
102-
"datname" : { LABEL, "Name of this database", nil },
103-
"numbackends" : { GAUGE, "Number of backends currently connected to this database. This is the only column in this view that returns a value reflecting current state; all other columns return the accumulated values since the last reset.", nil },
104-
"xact_commit" : { COUNTER, "Number of transactions in this database that have been committed", nil },
105-
"xact_rollback" : { COUNTER, "Number of transactions in this database that have been rolled back", nil },
106-
"blks_read" : { COUNTER, "Number of disk blocks read in this database", nil },
107-
"blks_hit" : { COUNTER, "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)", nil },
108-
"tup_returned" : { COUNTER, "Number of rows returned by queries in this database", nil },
109-
"tup_fetched" : { COUNTER, "Number of rows fetched by queries in this database", nil },
110-
"tup_inserted" : { COUNTER, "Number of rows inserted by queries in this database", nil },
111-
"tup_updated" : { COUNTER, "Number of rows updated by queries in this database", nil },
112-
"tup_deleted" : { COUNTER, "Number of rows deleted by queries in this database", nil },
113-
"conflicts" : { COUNTER, "Number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see pg_stat_database_conflicts for details.)", nil },
114-
"temp_files" : { COUNTER, "Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the log_temp_files setting.", nil },
115-
"temp_bytes" : { COUNTER, "Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting.", nil },
116-
"deadlocks" : { COUNTER, "Number of deadlocks detected in this database", nil },
117-
"blk_read_time" : { COUNTER, "Time spent reading data file blocks by backends in this database, in milliseconds", nil },
118-
"blk_write_time" : { COUNTER, "Time spent writing data file blocks by backends in this database, in milliseconds", nil },
119-
"stats_reset" : { COUNTER, "Time at which these statistics were last reset", nil },
120-
},
121-
"pg_stat_database_conflicts" : map[string]ColumnMapping {
122-
"datid" : { LABEL, "OID of a database", nil },
123-
"datname" : { LABEL, "Name of this database", nil },
124-
"confl_tablespace" : { COUNTER, "Number of queries in this database that have been canceled due to dropped tablespaces", nil },
125-
"confl_lock" : { COUNTER, "Number of queries in this database that have been canceled due to lock timeouts", nil },
126-
"confl_snapshot" : { COUNTER, "Number of queries in this database that have been canceled due to old snapshots", nil },
127-
"confl_bufferpin" : { COUNTER, "Number of queries in this database that have been canceled due to pinned buffers", nil },
128-
"confl_deadlock" : { COUNTER, "Number of queries in this database that have been canceled due to deadlocks", nil },
110+
"pg_stat_database" : map[string]ColumnMapping {
111+
"datid" : { LABEL, "OID of a database", nil },
112+
"datname" : { LABEL, "Name of this database", nil },
113+
"numbackends" : { GAUGE, "Number of backends currently connected to this database. This is the only column in this view that returns a value reflecting current state; all other columns return the accumulated values since the last reset.", nil },
114+
"xact_commit" : { COUNTER, "Number of transactions in this database that have been committed", nil },
115+
"xact_rollback" : { COUNTER, "Number of transactions in this database that have been rolled back", nil },
116+
"blks_read" : { COUNTER, "Number of disk blocks read in this database", nil },
117+
"blks_hit" : { COUNTER, "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)", nil },
118+
"tup_returned" : { COUNTER, "Number of rows returned by queries in this database", nil },
119+
"tup_fetched" : { COUNTER, "Number of rows fetched by queries in this database", nil },
120+
"tup_inserted" : { COUNTER, "Number of rows inserted by queries in this database", nil },
121+
"tup_updated" : { COUNTER, "Number of rows updated by queries in this database", nil },
122+
"tup_deleted" : { COUNTER, "Number of rows deleted by queries in this database", nil },
123+
"conflicts" : { COUNTER, "Number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see pg_stat_database_conflicts for details.)", nil },
124+
"temp_files" : { COUNTER, "Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the log_temp_files setting.", nil },
125+
"temp_bytes" : { COUNTER, "Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting.", nil },
126+
"deadlocks" : { COUNTER, "Number of deadlocks detected in this database", nil },
127+
"blk_read_time" : { COUNTER, "Time spent reading data file blocks by backends in this database, in milliseconds", nil },
128+
"blk_write_time" : { COUNTER, "Time spent writing data file blocks by backends in this database, in milliseconds", nil },
129+
"stats_reset" : { COUNTER, "Time at which these statistics were last reset", nil },
129130
},
130131
}
131132

132133
// Turn the MetricMap column mapping into a prometheus descriptor mapping.
133134
func makeDescMap(metricMaps map[string]map[string]ColumnMapping) map[string]MetricMapNamespace {
134135
var metricMap = make(map[string]MetricMapNamespace)
135-
136+
136137
for namespace, mappings := range metricMaps {
137138
thisMap := make(map[string]MetricMap)
138-
139+
139140
// Get the constant labels
140141
var constLabels []string
141142
for columnName, columnMapping := range mappings {
142143
if columnMapping.usage == LABEL {
143-
constLabels = append(constLabels, columnName)
144+
constLabels = append(constLabels, columnName)
144145
}
145146
}
146-
147+
147148
for columnName, columnMapping := range mappings {
148149
switch columnMapping.usage {
149150
case DISCARD, LABEL:
@@ -168,10 +169,10 @@ func makeDescMap(metricMaps map[string]map[string]ColumnMapping) map[string]Metr
168169
}
169170
}
170171
}
171-
172+
172173
metricMap[namespace] = MetricMapNamespace{ constLabels, thisMap }
173174
}
174-
175+
175176
return metricMap
176177
}
177178

@@ -213,15 +214,15 @@ func dbToString(t interface{}) (string, bool) {
213214
}
214215
}
215216

216-
// Exporter collects MySQL metrics. It implements prometheus.Collector.
217+
// Exporter collects metrics. It implements prometheus.Collector.
217218
type Exporter struct {
218219
dsn string
219220
duration, error prometheus.Gauge
220221
totalScrapes prometheus.Counter
221222
metricMap map[string]MetricMapNamespace
222223
}
223224

224-
// NewExporter returns a new MySQL exporter for the provided DSN.
225+
// NewExporter returns a new exporter for the provided DSN.
225226
func NewExporter(dsn string) *Exporter {
226227
return &Exporter{
227228
dsn: dsn,
@@ -317,42 +318,42 @@ func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
317318
return
318319
}
319320
defer rows.Close()
320-
321+
321322
var columnNames []string
322323
columnNames, err = rows.Columns()
323324
if err != nil {
324325
log.Println("Error retrieving column list for: ", namespace, err)
325326
e.error.Set(1)
326327
return
327328
}
328-
329+
329330
// Make a lookup map for the column indices
330331
var columnIdx = make(map[string]int, len(columnNames))
331332
for i, n := range columnNames {
332333
columnIdx[n] = i
333334
}
334-
335+
335336
var columnData = make([]interface{}, len(columnNames))
336337
var scanArgs = make([]interface{}, len(columnNames))
337338
for i := range columnData {
338339
scanArgs[i] = &columnData[i]
339340
}
340-
341+
341342
for rows.Next() {
342343
err = rows.Scan(scanArgs...)
343344
if err != nil {
344345
log.Println("Error retrieving rows:", namespace, err)
345346
e.error.Set(1)
346347
return
347348
}
348-
349+
349350
// Get the label values for this row
350351
var labels = make([]string, len(mapping.labels))
351352
for idx, columnName := range mapping.labels {
352353

353354
labels[idx], _ = dbToString(columnData[columnIdx[columnName]])
354355
}
355-
356+
356357
// Loop over column names, and match to scan data. Unknown columns
357358
// will be filled with an untyped metric number *if* they can be
358359
// converted to float64s. NULLs are allowed and treated as NaN.
@@ -369,30 +370,47 @@ func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
369370
log.Errorln("Unexpected error parsing column: ", namespace, columnName, columnData[idx])
370371
continue
371372
}
372-
373+
373374
// Generate the metric
374375
ch <- prometheus.MustNewConstMetric(metricMapping.desc, metricMapping.vtype, value, labels...)
375376
} else {
376377
// Unknown metric. Report as untyped if scan to float64 works, else note an error too.
377378
desc := prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), fmt.Sprintf("Unknown metric from %s", namespace), nil, nil)
378-
379+
379380
// Its not an error to fail here, since the values are
380381
// unexpected anyway.
381382
value, ok := dbToFloat64(columnData[idx])
382383
if ! ok {
383384
log.Warnln("Unparseable column type - discarding: ", namespace, columnName, err)
384385
continue
385386
}
386-
387+
387388
ch <- prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, value, labels...)
388389
}
389390
}
390-
391+
391392
}
392393
}()
393394
}
394395
}
395396

397+
type basicAuthHandler struct {
398+
handler http.HandlerFunc
399+
user string
400+
password string
401+
}
402+
403+
func (h *basicAuthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
404+
user, password, ok := r.BasicAuth()
405+
if !ok || password != h.password || user != h.user {
406+
w.Header().Set("WWW-Authenticate", "Basic realm=\"metrics\"")
407+
http.Error(w, "Invalid username or password", http.StatusUnauthorized)
408+
return
409+
}
410+
h.handler(w, r)
411+
return
412+
}
413+
396414
func main() {
397415
flag.Parse()
398416

@@ -404,7 +422,18 @@ func main() {
404422
exporter := NewExporter(dsn)
405423
prometheus.MustRegister(exporter)
406424

407-
http.Handle(*metricPath, prometheus.Handler())
425+
handler := prometheus.Handler()
426+
if *authUser != "" || *authPass != "" {
427+
if *authUser == "" || *authPass == "" {
428+
log.Fatal("You need to specify -auth.user and -auth.pass to enable basic auth")
429+
}
430+
handler = &basicAuthHandler{
431+
handler: prometheus.Handler().ServeHTTP,
432+
user: *authUser,
433+
password: *authPass,
434+
}
435+
}
436+
http.Handle(*metricPath, handler)
408437
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
409438
w.Write(landingPage)
410439
})

0 commit comments

Comments
 (0)