// Query the SHOW variables from the query map // TODO: make this more functional func queryShowVariables(ch chan<- prometheus.Metric, db *sql.DB, variableMap map[string]MetricMapNamespace) []error { log.Debugln("Querying SHOW variables") nonFatalErrors := []error{} for _, mapping := range variableMap { for columnName, columnMapping := range mapping.columnMappings { // Check for a discard request on this value if columnMapping.discard { continue } // Use SHOW to get the value row := db.QueryRow(fmt.Sprintf("SHOW %s;", columnName)) var val interface{} err := row.Scan(&val) if err != nil { nonFatalErrors = append(nonFatalErrors, errors.New(fmt.Sprintln("Error scanning runtime variable:", columnName, err))) continue } fval, ok := columnMapping.conversion(val) if !ok { nonFatalErrors = append(nonFatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName, val))) continue } ch <- prometheus.MustNewConstMetric(columnMapping.desc, columnMapping.vtype, fval) } } return nonFatalErrors }
// ScrapeClientStat collects from `information_schema.client_statistics`. func ScrapeClientStat(db *sql.DB, ch chan<- prometheus.Metric) error { var varName, varVal string err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal) if err != nil { log.Debugln("Detailed client stats are not available.") return nil } if varVal == "OFF" { log.Debugf("MySQL @@%s is OFF.", varName) return nil } informationSchemaClientStatisticsRows, err := db.Query(clientStatQuery) if err != nil { return err } defer informationSchemaClientStatisticsRows.Close() // The client column is assumed to be column[0], while all other data is assumed to be coerceable to float64. // Because of the client column, clientStatData[0] maps to columnNames[1] when reading off the metrics // (because clientStatScanArgs is mapped as [ &client, &clientData[0], &clientData[1] ... &clientdata[n] ] // To map metrics to names therefore we always range over columnNames[1:] columnNames, err := informationSchemaClientStatisticsRows.Columns() if err != nil { return err } var ( client string // Holds the client name, which should be in column 0. clientStatData = make([]float64, len(columnNames)-1) // 1 less because of the client column. clientStatScanArgs = make([]interface{}, len(columnNames)) ) clientStatScanArgs[0] = &client for i := range clientStatData { clientStatScanArgs[i+1] = &clientStatData[i] } for informationSchemaClientStatisticsRows.Next() { if err := informationSchemaClientStatisticsRows.Scan(clientStatScanArgs...); err != nil { return err } // Loop over column names, and match to scan data. Unknown columns // will be filled with an untyped metric number. We assume other then // cient, that we'll only get numbers. for idx, columnName := range columnNames[1:] { if metricType, ok := informationSchemaClientStatisticsTypes[columnName]; ok { ch <- prometheus.MustNewConstMetric(metricType.desc, metricType.vtype, float64(clientStatData[idx]), client) } else { // Unknown metric. Report as untyped. desc := prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, fmt.Sprintf("client_statistics_%s", strings.ToLower(columnName))), fmt.Sprintf("Unsupported metric from column %s", columnName), []string{"client"}, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, float64(clientStatData[idx]), client) } } } return nil }
// Collect implements prometheus.Collector. func (e *Exporter) Collect(ch chan<- prometheus.Metric) { log.Debugln("Collecting metrics") e.scrape(ch) ch <- e.duration ch <- e.totalScrapes ch <- e.totalErrors ch <- e.scrapeError ch <- e.up }
// ScrapeTableStat collects from `information_schema.table_statistics`. func ScrapeTableStat(db *sql.DB, ch chan<- prometheus.Metric) error { var varName, varVal string err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal) if err != nil { log.Debugln("Detailed table stats are not available.") return nil } if varVal == "OFF" { log.Debugf("MySQL @@%s is OFF.", varName) return nil } informationSchemaTableStatisticsRows, err := db.Query(tableStatQuery) if err != nil { return err } defer informationSchemaTableStatisticsRows.Close() var ( tableSchema string tableName string rowsRead uint64 rowsChanged uint64 rowsChangedXIndexes uint64 ) for informationSchemaTableStatisticsRows.Next() { err = informationSchemaTableStatisticsRows.Scan( &tableSchema, &tableName, &rowsRead, &rowsChanged, &rowsChangedXIndexes, ) if err != nil { return err } ch <- prometheus.MustNewConstMetric( infoSchemaTableStatsRowsReadDesc, prometheus.CounterValue, float64(rowsRead), tableSchema, tableName, ) ch <- prometheus.MustNewConstMetric( infoSchemaTableStatsRowsChangedDesc, prometheus.CounterValue, float64(rowsChanged), tableSchema, tableName, ) ch <- prometheus.MustNewConstMetric( infoSchemaTableStatsRowsChangedXIndexesDesc, prometheus.CounterValue, float64(rowsChangedXIndexes), tableSchema, tableName, ) } return nil }
// ScrapeQueryResponseTime collects from `information_schema.query_response_time`. func ScrapeQueryResponseTime(db *sql.DB, ch chan<- prometheus.Metric) error { var queryStats uint8 err := db.QueryRow(queryResponseCheckQuery).Scan(&queryStats) if err != nil { log.Debugln("Query response time distribution is not present.") return nil } if queryStats == 0 { log.Debugln("query_response_time_stats is OFF.") return nil } for i, query := range queryResponseTimeQueries { err := processQueryResponseTimeTable(db, ch, query, i) // The first query should not fail if query_response_time_stats is ON, // unlike the other two when the read/write tables exist only with Percona Server 5.6/5.7. if i == 0 && err != nil { return err } } return nil }
// Implements the real polling functionality, but returns the connection object // so other classes can inherit it. func (s *BasicService) doPoll() net.Conn { log.Debugln("Dialing basic service", s.Host().Hostname, s.Port(), s.Name()) conn, err := s.dialAndScrape() if err != nil { log.Infoln("Error", s.Host().Hostname, s.Port(), s.Name(), err) s.portOpen = FAILED } else { log.Infoln("Success", s.Host().Hostname, s.Port(), s.Name()) s.portOpen = SUCCESS } return conn }
// Stop all background processing. func (tm *TargetManager) Stop() { log.Infoln("Stopping target manager...") tm.mtx.Lock() // Cancel the base context, this will cause all target providers to shut down // and all in-flight scrapes to abort immmediately. // Started inserts will be finished before terminating. tm.cancel() tm.mtx.Unlock() // Wait for all scrape inserts to complete. tm.wg.Wait() log.Debugln("Target manager stopped") }
// Describe implements prometheus.Collector. func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { log.Debugln("Describing metrics") metricCh := make(chan prometheus.Metric) doneCh := make(chan struct{}) go func() { for m := range metricCh { ch <- m.Desc() } close(doneCh) }() e.Collect(metricCh) close(metricCh) <-doneCh }
// Send a list of notifications to the configured alert manager. func (n *NotificationHandler) sendNotifications(reqs NotificationReqs) error { n.mtx.RLock() defer n.mtx.RUnlock() alerts := make([]map[string]interface{}, 0, len(reqs)) for _, req := range reqs { for ln, lv := range n.externalLabels { if _, ok := req.Labels[ln]; !ok { req.Labels[ln] = lv } } alerts = append(alerts, map[string]interface{}{ "summary": req.Summary, "description": req.Description, "runbook": req.Runbook, "labels": req.Labels, "payload": map[string]interface{}{ "value": req.Value, "activeSince": req.ActiveSince, "generatorURL": req.GeneratorURL, "alertingRule": req.RuleString, }, }) } buf, err := json.Marshal(alerts) if err != nil { return err } log.Debugln("Sending notifications to alertmanager:", string(buf)) resp, err := n.httpClient.Post( n.alertmanagerURL+alertmanagerAPIEventsPath, contentTypeJSON, bytes.NewBuffer(buf), ) if err != nil { return err } defer resp.Body.Close() _, err = ioutil.ReadAll(resp.Body) if err != nil { return err } // BUG: Do we need to check the response code? return nil }
// Iterate through all the namespace mappings in the exporter and run their // queries. func queryNamespaceMappings(ch chan<- prometheus.Metric, db *sql.DB, metricMap map[string]MetricMapNamespace, queryOverrides map[string]string) map[string]error { // Return a map of namespace -> errors namespaceErrors := make(map[string]error) for namespace, mapping := range metricMap { log.Debugln("Querying namespace: ", namespace) nonFatalErrors, err := queryNamespaceMapping(ch, db, namespace, mapping, queryOverrides) // Serious error - a namespace disappeard if err != nil { namespaceErrors[namespace] = err log.Infoln(err) } // Non-serious errors - likely version or parsing problems. if len(nonFatalErrors) > 0 { for _, err := range nonFatalErrors { log.Infoln(err.Error()) } } } return namespaceErrors }
func parseMycnf(config interface{}) (string, error) { var dsn string cfg, err := ini.Load(config) if err != nil { return dsn, fmt.Errorf("failed reading ini file: %s", err) } user := cfg.Section("client").Key("user").String() password := cfg.Section("client").Key("password").String() if (user == "") || (password == "") { return dsn, fmt.Errorf("no user or password specified under [client] in %s", config) } host := cfg.Section("client").Key("host").MustString("localhost") port := cfg.Section("client").Key("port").MustUint(3306) socket := cfg.Section("client").Key("socket").String() if socket != "" { dsn = fmt.Sprintf("%s:%s@unix(%s)/", user, password, socket) } else { dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/", user, password, host, port) } log.Debugln(dsn) return dsn, nil }
// Check and update the exporters query maps if the version has changed. func (e *Exporter) checkMapVersions(ch chan<- prometheus.Metric, db *sql.DB) error { log.Debugln("Querying Postgres Version") versionRow := db.QueryRow("SELECT version();") var versionString string err := versionRow.Scan(&versionString) if err != nil { return errors.New(fmt.Sprintln("Error scanning version string:", err)) } semanticVersion, err := parseVersion(versionString) // Check if semantic version changed and recalculate maps if needed. if semanticVersion.NE(e.lastMapVersion) || e.variableMap == nil || e.metricMap == nil { log.Infoln("Semantic Version Changed:", e.lastMapVersion.String(), "->", semanticVersion.String()) e.mappingMtx.Lock() e.variableMap = makeDescMap(semanticVersion, variableMaps) e.metricMap = makeDescMap(semanticVersion, metricMaps) e.queryOverrides = makeQueryOverrideMap(semanticVersion, queryOverrides) e.lastMapVersion = semanticVersion if e.userQueriesPath != "" { if err := addQueries(e.userQueriesPath, semanticVersion, e.metricMap, e.queryOverrides); err != nil { log.Errorln("Failed to reload user queries:", e.userQueriesPath, err) } } e.mappingMtx.Unlock() } // Output the version as a special metric versionDesc := prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, staticLabelName), "Version string as reported by postgres", []string{"version", "short_version"}, nil) ch <- prometheus.MustNewConstMetric(versionDesc, prometheus.UntypedValue, 1, versionString, semanticVersion.String()) return nil }
func marathonConnect(uri *url.URL) error { config := marathon.NewDefaultConfig() config.URL = uri.String() if uri.User != nil { if passwd, ok := uri.User.Password(); ok { config.HTTPBasicPassword = passwd config.HTTPBasicAuthUser = uri.User.Username() } } config.HTTPClient = &http.Client{ Timeout: 10 * time.Second, Transport: &http.Transport{ Dial: (&net.Dialer{ Timeout: 10 * time.Second, }).Dial, TLSClientConfig: &tls.Config{ InsecureSkipVerify: true, }, }, } log.Debugln("Connecting to Marathon") client, err := marathon.NewClient(config) if err != nil { return err } info, err := client.Info() if err != nil { return err } log.Debugf("Connected to Marathon! Name=%s, Version=%s\n", info.Name, info.Version) return nil }
// Query within a namespace mapping and emit metrics. Returns fatal errors if // the scrape fails, and a slice of errors if they were non-fatal. func queryNamespaceMapping(ch chan<- prometheus.Metric, db *sql.DB, namespace string, mapping MetricMapNamespace, queryOverrides map[string]string) ([]error, error) { // Check for a query override for this namespace query, found := queryOverrides[namespace] if !found { // No query override - do a simple * search. query = fmt.Sprintf("SELECT * FROM %s;", namespace) } // Was this query disabled (i.e. nothing sensible can be queried on this // version of PostgreSQL? if query == "" { // Return success (no pertinent data) return []error{}, nil } // Don't fail on a bad scrape of one metric rows, err := db.Query(query) if err != nil { return []error{}, errors.New(fmt.Sprintln("Error running query on database: ", namespace, err)) } defer rows.Close() var columnNames []string columnNames, err = rows.Columns() if err != nil { return []error{}, errors.New(fmt.Sprintln("Error retrieving column list for: ", namespace, err)) } // Make a lookup map for the column indices var columnIdx = make(map[string]int, len(columnNames)) for i, n := range columnNames { columnIdx[n] = i } var columnData = make([]interface{}, len(columnNames)) var scanArgs = make([]interface{}, len(columnNames)) for i := range columnData { scanArgs[i] = &columnData[i] } nonfatalErrors := []error{} for rows.Next() { err = rows.Scan(scanArgs...) if err != nil { return []error{}, errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err)) } // Get the label values for this row var labels = make([]string, len(mapping.labels)) for idx, columnName := range mapping.labels { labels[idx], _ = dbToString(columnData[columnIdx[columnName]]) } // Loop over column names, and match to scan data. Unknown columns // will be filled with an untyped metric number *if* they can be // converted to float64s. NULLs are allowed and treated as NaN. for idx, columnName := range columnNames { if metricMapping, ok := mapping.columnMappings[columnName]; ok { // Is this a metricy metric? if metricMapping.discard { continue } value, ok := dbToFloat64(columnData[idx]) if !ok { nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName, columnData[idx]))) continue } // Generate the metric ch <- prometheus.MustNewConstMetric(metricMapping.desc, metricMapping.vtype, value, labels...) } else { // Unknown metric. Report as untyped if scan to float64 works, else note an error too. desc := prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), fmt.Sprintf("Unknown metric from %s", namespace), nil, nil) // Its not an error to fail here, since the values are // unexpected anyway. value, ok := dbToFloat64(columnData[idx]) if !ok { nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unparseable column type - discarding: ", namespace, columnName, err))) continue } log.Debugln(columnName, labels) ch <- prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, value, labels...) } } } return nonfatalErrors, nil }
// Turn the MetricMap column mapping into a prometheus descriptor mapping. func makeDescMap(pgVersion semver.Version, metricMaps map[string]map[string]ColumnMapping) map[string]MetricMapNamespace { var metricMap = make(map[string]MetricMapNamespace) for namespace, mappings := range metricMaps { thisMap := make(map[string]MetricMap) // Get the constant labels var constLabels []string for columnName, columnMapping := range mappings { if columnMapping.usage == LABEL { constLabels = append(constLabels, columnName) } } for columnName, columnMapping := range mappings { // Check column version compatibility for the current map // Force to discard if not compatible. if columnMapping.supportedVersions != nil { if !columnMapping.supportedVersions(pgVersion) { // It's very useful to be able to see what columns are being // rejected. log.Debugln(columnName, "is being forced to discard due to version incompatibility.") thisMap[columnName] = MetricMap{ discard: true, conversion: func(in interface{}) (float64, bool) { return math.NaN(), true }, } continue } } // Determine how to convert the column based on its usage. switch columnMapping.usage { case DISCARD, LABEL: thisMap[columnName] = MetricMap{ discard: true, conversion: func(in interface{}) (float64, bool) { return math.NaN(), true }, } case COUNTER: thisMap[columnName] = MetricMap{ vtype: prometheus.CounterValue, desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, constLabels, nil), conversion: func(in interface{}) (float64, bool) { return dbToFloat64(in) }, } case GAUGE: thisMap[columnName] = MetricMap{ vtype: prometheus.GaugeValue, desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, constLabels, nil), conversion: func(in interface{}) (float64, bool) { return dbToFloat64(in) }, } case MAPPEDMETRIC: thisMap[columnName] = MetricMap{ vtype: prometheus.GaugeValue, desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, constLabels, nil), conversion: func(in interface{}) (float64, bool) { text, ok := in.(string) if !ok { return math.NaN(), false } val, ok := columnMapping.mapping[text] if !ok { return math.NaN(), false } return val, true }, } case DURATION: thisMap[columnName] = MetricMap{ vtype: prometheus.GaugeValue, desc: prometheus.NewDesc(fmt.Sprintf("%s_%s_milliseconds", namespace, columnName), columnMapping.description, constLabels, nil), conversion: func(in interface{}) (float64, bool) { var durationString string switch t := in.(type) { case []byte: durationString = string(t) case string: durationString = t default: log.Errorln("DURATION conversion metric was not a string") return math.NaN(), false } if durationString == "-1" { return math.NaN(), false } d, err := time.ParseDuration(durationString) if err != nil { log.Errorln("Failed converting result to metric:", columnName, in, err) return math.NaN(), false } return float64(d / time.Millisecond), true }, } } } metricMap[namespace] = MetricMapNamespace{constLabels, thisMap} } return metricMap }
// Add queries to the metricMaps and queryOverrides maps. Added queries do not // respect version requirements, because it is assumed that the user knows // what they are doing with their version of postgres. // // This function modifies metricMap and queryOverrideMap to contain the new // queries. // TODO: test code for all this. // TODO: use proper struct type system // TODO: the YAML this supports is "non-standard" - we should move away from it. func addQueries(queriesPath string, pgVersion semver.Version, exporterMap map[string]MetricMapNamespace, queryOverrideMap map[string]string) error { var extra map[string]interface{} content, err := ioutil.ReadFile(queriesPath) if err != nil { return err } err = yaml.Unmarshal(content, &extra) if err != nil { return err } // Stores the loaded map representation metricMaps := make(map[string]map[string]ColumnMapping) newQueryOverrides := make(map[string]string) for metric, specs := range extra { log.Debugln("New user metric namespace from YAML:", metric) for key, value := range specs.(map[interface{}]interface{}) { switch key.(string) { case "query": query := value.(string) newQueryOverrides[metric] = query case "metrics": for _, c := range value.([]interface{}) { column := c.(map[interface{}]interface{}) for n, a := range column { var columnMapping ColumnMapping // Fetch the metric map we want to work on. metricMap, ok := metricMaps[metric] if !ok { // Namespace for metric not found - add it. metricMap = make(map[string]ColumnMapping) metricMaps[metric] = metricMap } // Get name. name := n.(string) for attrKey, attrVal := range a.(map[interface{}]interface{}) { switch attrKey.(string) { case "usage": usage, err := stringToColumnUsage(attrVal.(string)) if err != nil { return err } columnMapping.usage = usage case "description": columnMapping.description = attrVal.(string) } } // TODO: we should support this columnMapping.mapping = nil // Should we support this for users? columnMapping.supportedVersions = nil metricMap[name] = columnMapping } } } } } // Convert the loaded metric map into exporter representation partialExporterMap := makeDescMap(pgVersion, metricMaps) // Merge the two maps (which are now quite flatteend) for k, v := range partialExporterMap { _, found := exporterMap[k] if found { log.Debugln("Overriding metric", k, "from user YAML file.") } else { log.Debugln("Adding new metric", k, "from user YAML file.") } exporterMap[k] = v } // Merge the query override map for k, v := range newQueryOverrides { _, found := queryOverrideMap[k] if found { log.Debugln("Overriding query override", k, "from user YAML file.") } else { log.Debugln("Adding new query override", k, "from user YAML file.") } queryOverrideMap[k] = v } return nil }
// ScrapeQueryResponseTime collects from `information_schema.query_response_time`. func ScrapeQueryResponseTime(db *sql.DB, ch chan<- prometheus.Metric) error { var queryStats uint8 err := db.QueryRow(queryResponseCheckQuery).Scan(&queryStats) if err != nil { log.Debugln("Query response time distribution is not present.") return nil } if queryStats == 0 { log.Debugln("query_response_time_stats is OFF.") return nil } queryDistributionRows, err := db.Query(queryResponseTimeQuery) if err != nil { return err } defer queryDistributionRows.Close() var ( length string count uint64 total string histogramCnt uint64 histogramSum float64 countBuckets = map[float64]uint64{} ) for queryDistributionRows.Next() { err = queryDistributionRows.Scan( &length, &count, &total, ) if err != nil { return err } length, _ := strconv.ParseFloat(strings.TrimSpace(length), 64) total, _ := strconv.ParseFloat(strings.TrimSpace(total), 64) histogramCnt += count histogramSum += total // Special case for "TOO LONG" row where we take into account the count field which is the only available // and do not add it as a part of histogram or metric if length == 0 { continue } countBuckets[length] = histogramCnt // No histogram with query total times because they are float ch <- prometheus.MustNewConstMetric( infoSchemaQueryResponseTimeTotalDesc, prometheus.CounterValue, histogramSum, fmt.Sprintf("%v", length), ) } ch <- prometheus.MustNewConstMetric( infoSchemaQueryResponseTimeTotalDesc, prometheus.CounterValue, histogramSum, "+Inf", ) // Create histogram with query counts ch <- prometheus.MustNewConstHistogram( infoSchemaQueryResponseTimeCountDesc, histogramCnt, histogramSum, countBuckets, ) return nil }
func main() { rand.Seed(time.Now().Unix()) flag.Parse() // This is only used when we're running in -dev mode with bindata rootDir, _ = osext.ExecutableFolder() rootDir = path.Join(rootDir, "web") // Parse configuration cfg, err := config.LoadFromFile(*configFile) if err != nil { log.Fatalln("Error loading config", err) } // Templates amberTmpl, err := Asset("templates/index.amber") if err != nil { log.Fatalln("Could not load index template:", err) } tmpl := amber.MustCompile(string(amberTmpl), amber.Options{}) // Setup the web UI router := httprouter.New() router.Handler("GET", *metricsPath, prometheus.Handler()) // Prometheus // Static asset handling router.GET("/static/*filepath", func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { reqpath := ps.ByName("filepath") realpath := path.Join("static", reqpath) b, err := Asset(realpath) if err != nil { log.Debugln("Could not find asset: ", err) return } else { w.Write(b) } }) var monitoredHosts []*pollers.Host router.GET("/", func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { data := struct { Cfg *config.Config Hosts *[]*pollers.Host }{ Cfg: cfg, Hosts: &monitoredHosts, } err := tmpl.Execute(w, &data) if err != nil { log.Errorln("Error rendering template", err) } }) // Initialize the host pollers monitoredHosts = make([]*pollers.Host, len(cfg.Hosts)) // We don't allow duplicate hosts, but also don't want to panic just due // to a typo, so keep track and skip duplicates here. seenHosts := make(map[string]bool) realidx := 0 for _, hostCfg := range cfg.Hosts { log.Debugln("Setting up poller for: ", hostCfg.Hostname) if *skipPing { hostCfg.PingDisable = true } if _, ok := seenHosts[hostCfg.Hostname]; ok { log.Warnln("Discarding repeat configuration of same hostname", hostCfg.Hostname) continue } host := pollers.NewHost(hostCfg) monitoredHosts[realidx] = host prometheus.MustRegister(host) seenHosts[hostCfg.Hostname] = true realidx++ } // Trim monitoredHosts to the number we actually used monitoredHosts = monitoredHosts[0:realidx] // This is the dispatcher. It is responsible for invoking the doPoll method // of hosts. connectionLimiter := pollers.NewLimiter(*maxConnections) hostQueue := make(chan *pollers.Host) // Start the host dispatcher go func() { for host := range hostQueue { go host.Poll(connectionLimiter, hostQueue) } }() // Do the initial host dispatch go func() { for _, host := range monitoredHosts { log.Debugln("Starting polling for hosts") hostQueue <- host } }() var handler http.Handler // If basic auth is requested, enable it for the interface. if cfg.BasicAuthUsername != "" && cfg.BasicAuthPassword != "" { basicauth := httpauth.SimpleBasicAuth(cfg.BasicAuthUsername, cfg.BasicAuthPassword) handler = basicauth(router) } else { handler = router } // If TLS certificates are specificed, use TLS if cfg.TLSCertificatePath != "" && cfg.TLSKeyPath != "" { log.Infof("Listening on (TLS-enabled) %s", *listenAddress) err = http.ListenAndServeTLS(*listenAddress, cfg.TLSCertificatePath, cfg.TLSKeyPath, handler) } else { log.Infof("Listening on %s", *listenAddress) err = http.ListenAndServe(*listenAddress, handler) } if err != nil { log.Fatal(err) } }