func (g *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error { resp, err := client.Get(url) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", url, err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return fmt.Errorf("%s returned HTTP status %s", url, resp.Status) } format := expfmt.ResponseFormat(resp.Header) decoder := expfmt.NewDecoder(resp.Body, format) options := &expfmt.DecodeOptions{ Timestamp: model.Now(), } sampleDecoder := &expfmt.SampleDecoder{ Dec: decoder, Opts: options, } for { var samples model.Vector err := sampleDecoder.Decode(&samples) if err == io.EOF { break } else if err != nil { return fmt.Errorf("error getting processing samples for %s: %s", url, err) } for _, sample := range samples { tags := make(map[string]string) for key, value := range sample.Metric { if key == model.MetricNameLabel { continue } tags[string(key)] = string(value) } acc.Add("prometheus_"+string(sample.Metric[model.MetricNameLabel]), float64(sample.Value), tags) } } return nil }
func (s *Statsd) Gather(acc telegraf.Accumulator) error { s.Lock() defer s.Unlock() for _, metric := range s.timings { fields := make(map[string]interface{}) fields["mean"] = metric.stats.Mean() fields["stddev"] = metric.stats.Stddev() fields["upper"] = metric.stats.Upper() fields["lower"] = metric.stats.Lower() fields["count"] = metric.stats.Count() for _, percentile := range s.Percentiles { name := fmt.Sprintf("%v_percentile", percentile) fields[name] = metric.stats.Percentile(percentile) } acc.AddFields(metric.name, fields, metric.tags) } if s.DeleteTimings { s.timings = make(map[string]cachedtimings) } for _, metric := range s.gauges { acc.Add(metric.name, metric.value, metric.tags) } if s.DeleteGauges { s.gauges = make(map[string]cachedgauge) } for _, metric := range s.counters { acc.Add(metric.name, metric.value, metric.tags) } if s.DeleteCounters { s.counters = make(map[string]cachedcounter) } for _, metric := range s.sets { acc.Add(metric.name, int64(len(metric.set)), metric.tags) } if s.DeleteSets { s.sets = make(map[string]cachedset) } return nil }
// gatherTableSchema can be used to gather stats on each schema func (m *Mysql) gatherTableSchema(db *sql.DB, serv string, acc telegraf.Accumulator) error { var ( dbList []string servtag string ) servtag, err := parseDSN(serv) if err != nil { servtag = "localhost" } // if the list of databases if empty, then get all databases if len(m.TableSchemaDatabases) == 0 { rows, err := db.Query(dbListQuery) if err != nil { return err } defer rows.Close() var database string for rows.Next() { err = rows.Scan(&database) if err != nil { return err } dbList = append(dbList, database) } } else { dbList = m.TableSchemaDatabases } for _, database := range dbList { rows, err := db.Query(fmt.Sprintf(tableSchemaQuery, database)) if err != nil { return err } defer rows.Close() var ( tableSchema string tableName string tableType string engine string version float64 rowFormat string tableRows float64 dataLength float64 indexLength float64 dataFree float64 createOptions string ) for rows.Next() { err = rows.Scan( &tableSchema, &tableName, &tableType, &engine, &version, &rowFormat, &tableRows, &dataLength, &indexLength, &dataFree, &createOptions, ) if err != nil { return err } tags := map[string]string{"server": servtag} tags["schema"] = tableSchema tags["table"] = tableName acc.Add(newNamespace("info_schema", "table_rows"), tableRows, tags) dlTags := copyTags(tags) dlTags["component"] = "data_length" acc.Add(newNamespace("info_schema", "table_size", "data_length"), dataLength, dlTags) ilTags := copyTags(tags) ilTags["component"] = "index_length" acc.Add(newNamespace("info_schema", "table_size", "index_length"), indexLength, ilTags) dfTags := copyTags(tags) dfTags["component"] = "data_free" acc.Add(newNamespace("info_schema", "table_size", "data_free"), dataFree, dfTags) versionTags := copyTags(tags) versionTags["type"] = tableType versionTags["engine"] = engine versionTags["row_format"] = rowFormat versionTags["create_options"] = createOptions acc.Add(newNamespace("info_schema", "table_version"), version, versionTags) } } return nil }
func add(acc telegraf.Accumulator, name string, val float64, tags map[string]string) { if val >= 0 { acc.Add(name, val, tags) } }
func (s *Statsd) Gather(acc telegraf.Accumulator) error { s.Lock() defer s.Unlock() for _, metric := range s.timings { acc.Add(metric.name+"_mean", metric.stats.Mean(), metric.tags) acc.Add(metric.name+"_stddev", metric.stats.Stddev(), metric.tags) acc.Add(metric.name+"_upper", metric.stats.Upper(), metric.tags) acc.Add(metric.name+"_lower", metric.stats.Lower(), metric.tags) acc.Add(metric.name+"_count", metric.stats.Count(), metric.tags) for _, percentile := range s.Percentiles { name := fmt.Sprintf("%s_percentile_%v", metric.name, percentile) acc.Add(name, metric.stats.Percentile(percentile), metric.tags) } } if s.DeleteTimings { s.timings = make(map[string]cachedtimings) } for _, metric := range s.gauges { acc.Add(metric.name, metric.value, metric.tags) } if s.DeleteGauges { s.gauges = make(map[string]cachedgauge) } for _, metric := range s.counters { acc.Add(metric.name, metric.value, metric.tags) } if s.DeleteCounters { s.counters = make(map[string]cachedcounter) } for _, metric := range s.sets { acc.Add(metric.name, int64(len(metric.set)), metric.tags) } if s.DeleteSets { s.sets = make(map[string]cachedset) } return nil }