Пример #1
0
func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) {
	gauges := []*Gauge{}
	for fieldName, value := range m.Fields() {
		gauge := &Gauge{
			Name:        l.buildGaugeName(m, fieldName),
			MeasureTime: m.Time().Unix(),
		}
		if !gauge.verifyValue(value) {
			continue
		}
		if err := gauge.setValue(value); err != nil {
			return gauges, fmt.Errorf("unable to extract value from Fields, %s\n",
				err.Error())
		}
		if l.SourceTag != "" {
			if source, ok := m.Tags()[l.SourceTag]; ok {
				gauge.Source = source
			} else {
				return gauges,
					fmt.Errorf("undeterminable Source type from Field, %s\n",
						l.SourceTag)
			}
		}
		gauges = append(gauges, gauge)
	}
	if l.Debug {
		fmt.Printf("[DEBUG] Built gauges: %v\n", gauges)
	}
	return gauges, nil
}
Пример #2
0
// AddMetric adds a metric to the output. This function can also write cached
// points if FlushBufferWhenFull is true.
func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
	if ro.Config.Filter.IsActive {
		if !ro.Config.Filter.ShouldMetricPass(metric) {
			return
		}
	}

	// Filter any tagexclude/taginclude parameters before adding metric
	if len(ro.Config.Filter.TagExclude) != 0 || len(ro.Config.Filter.TagInclude) != 0 {
		// In order to filter out tags, we need to create a new metric, since
		// metrics are immutable once created.
		tags := metric.Tags()
		fields := metric.Fields()
		t := metric.Time()
		name := metric.Name()
		ro.Config.Filter.FilterTags(tags)
		// error is not possible if creating from another metric, so ignore.
		metric, _ = telegraf.NewMetric(name, tags, fields, t)
	}

	ro.metrics.Add(metric)
	if ro.metrics.Len() == ro.MetricBatchSize {
		batch := ro.metrics.Batch(ro.MetricBatchSize)
		err := ro.write(batch)
		if err != nil {
			ro.failMetrics.Add(batch...)
		}
	}
}
Пример #3
0
func buildEvents(p telegraf.Metric, s string) []*raidman.Event {
	events := []*raidman.Event{}
	for fieldName, value := range p.Fields() {
		host, ok := p.Tags()["host"]
		if !ok {
			hostname, err := os.Hostname()
			if err != nil {
				host = "unknown"
			} else {
				host = hostname
			}
		}

		event := &raidman.Event{
			Host:    host,
			Service: serviceName(s, p.Name(), p.Tags(), fieldName),
		}

		switch value.(type) {
		case string:
			event.State = value.(string)
		default:
			event.Metric = value
		}

		events = append(events, event)
	}

	return events
}
Пример #4
0
func (t *TestAggregator) Add(in telegraf.Metric) {
	for _, v := range in.Fields() {
		if vi, ok := v.(int64); ok {
			atomic.AddInt64(&t.sum, vi)
		}
	}
}
Пример #5
0
// parser is launched as a goroutine to watch the l.lines channel.
// when a line is available, parser parses it and adds the metric(s) to the
// accumulator.
func (l *LogParserPlugin) parser() {
	defer l.wg.Done()

	var m telegraf.Metric
	var err error
	var line string
	for {
		select {
		case <-l.done:
			return
		case line = <-l.lines:
			if line == "" || line == "\n" {
				continue
			}
		}

		for _, parser := range l.parsers {
			m, err = parser.ParseLine(line)
			if err == nil {
				if m != nil {
					l.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
				}
			} else {
				log.Printf("Malformed log line in [%s], Error: %s\n", line, err)
			}
		}
	}
}
Пример #6
0
func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]string, error) {
	out := []string{}
	// Get name
	name := metric.Name()
	// Convert UnixNano to Unix timestamps
	timestamp := metric.UnixNano() / 1000000000
	tag_str := buildTags(metric)

	for field_name, value := range metric.Fields() {
		// Convert value
		value_str := fmt.Sprintf("%#v", value)
		// Write graphite metric
		var graphitePoint string
		if name == field_name {
			graphitePoint = fmt.Sprintf("%s.%s %s %d",
				tag_str,
				strings.Replace(name, ".", "_", -1),
				value_str,
				timestamp)
		} else {
			graphitePoint = fmt.Sprintf("%s.%s.%s %s %d",
				tag_str,
				strings.Replace(name, ".", "_", -1),
				strings.Replace(field_name, ".", "_", -1),
				value_str,
				timestamp)
		}
		if s.Prefix != "" {
			graphitePoint = fmt.Sprintf("%s.%s", s.Prefix, graphitePoint)
		}
		out = append(out, graphitePoint)
	}
	return out, nil
}
Пример #7
0
func serialize(metric telegraf.Metric) ([]string, error) {
	out := []string{}

	m := make(map[string]interface{})
	m["version"] = "1.1"
	m["timestamp"] = metric.UnixNano() / 1000000000
	m["short_message"] = " "
	m["name"] = metric.Name()

	if host, ok := metric.Tags()["host"]; ok {
		m["host"] = host
	} else {
		host, err := os.Hostname()
		if err != nil {
			return []string{}, err
		}
		m["host"] = host
	}

	for key, value := range metric.Fields() {
		nkey := fmt.Sprintf("_%s", key)
		m[nkey] = value
	}

	serialized, err := ejson.Marshal(m)
	if err != nil {
		return []string{}, err
	}
	out = append(out, string(serialized))

	return out, nil
}
Пример #8
0
// AddMetric adds a metric to the output. This function can also write cached
// points if FlushBufferWhenFull is true.
func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
	if ro.Config.Filter.IsActive {
		if !ro.Config.Filter.ShouldMetricPass(metric) {
			return
		}
	}
	ro.Lock()
	defer ro.Unlock()

	// Filter any tagexclude/taginclude parameters before adding metric
	if len(ro.Config.Filter.TagExclude) != 0 || len(ro.Config.Filter.TagInclude) != 0 {
		// In order to filter out tags, we need to create a new metric, since
		// metrics are immutable once created.
		tags := metric.Tags()
		fields := metric.Fields()
		t := metric.Time()
		name := metric.Name()
		ro.Config.Filter.FilterTags(tags)
		// error is not possible if creating from another metric, so ignore.
		metric, _ = telegraf.NewMetric(name, tags, fields, t)
	}

	if len(ro.metrics) < ro.MetricBufferLimit {
		ro.metrics = append(ro.metrics, metric)
	} else {
		if ro.FlushBufferWhenFull {
			ro.metrics = append(ro.metrics, metric)
			tmpmetrics := make([]telegraf.Metric, len(ro.metrics))
			copy(tmpmetrics, ro.metrics)
			ro.metrics = make([]telegraf.Metric, 0)
			err := ro.write(tmpmetrics)
			if err != nil {
				log.Printf("ERROR writing full metric buffer to output %s, %s",
					ro.Name, err)
				if len(ro.tmpmetrics) == FULL_METRIC_BUFFERS_LIMIT {
					ro.mapI = 0
					// overwrite one
					ro.tmpmetrics[ro.mapI] = tmpmetrics
					ro.mapI++
				} else {
					ro.tmpmetrics[ro.mapI] = tmpmetrics
					ro.mapI++
				}
			}
		} else {
			if ro.overwriteI == 0 {
				log.Printf("WARNING: overwriting cached metrics, you may want to " +
					"increase the metric_buffer_limit setting in your [agent] " +
					"config if you do not wish to overwrite metrics.\n")
			}
			if ro.overwriteI == len(ro.metrics) {
				ro.overwriteI = 0
			}
			ro.metrics[ro.overwriteI] = metric
			ro.overwriteI++
		}
	}
}
Пример #9
0
func buildMetrics(m telegraf.Metric) (map[string]Point, error) {
	ms := make(map[string]Point)
	for k, v := range m.Fields() {
		var p Point
		if err := p.setValue(v); err != nil {
			return ms, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
		}
		p[0] = float64(m.Time().Unix())
		ms[k] = p
	}
	return ms, nil
}
Пример #10
0
func copyMetric(m telegraf.Metric) telegraf.Metric {
	t := time.Time(m.Time())

	tags := make(map[string]string)
	fields := make(map[string]interface{})
	for k, v := range m.Tags() {
		tags[k] = v
	}
	for k, v := range m.Fields() {
		fields[k] = v
	}

	out, _ := telegraf.NewMetric(m.Name(), tags, fields, t)
	return out
}
Пример #11
0
func (s *JsonSerializer) Serialize(metric telegraf.Metric) ([]string, error) {
	out := []string{}

	m := make(map[string]interface{})
	m["tags"] = metric.Tags()
	m["fields"] = metric.Fields()
	m["name"] = metric.Name()
	m["timestamp"] = metric.UnixNano() / 1000000000
	serialized, err := ejson.Marshal(m)
	if err != nil {
		return []string{}, err
	}
	out = append(out, string(serialized))

	return out, nil
}
Пример #12
0
// Add applies the given metric to the aggregator.
// Before applying to the plugin, it will run any defined filters on the metric.
// Apply returns true if the original metric should be dropped.
func (r *RunningAggregator) Add(in telegraf.Metric) bool {
	if r.Config.Filter.IsActive() {
		// check if the aggregator should apply this metric
		name := in.Name()
		fields := in.Fields()
		tags := in.Tags()
		t := in.Time()
		if ok := r.Config.Filter.Apply(name, fields, tags); !ok {
			// aggregator should not apply this metric
			return false
		}

		in, _ = telegraf.NewMetric(name, tags, fields, t)
	}

	r.metrics <- in
	return r.Config.DropOriginal
}
Пример #13
0
func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]string, error) {
	out := []string{}

	// Convert UnixNano to Unix timestamps
	timestamp := metric.UnixNano() / 1000000000

	for field_name, value := range metric.Fields() {
		// Convert value
		value_str := fmt.Sprintf("%#v", value)
		// Write graphite metric
		var graphitePoint string
		graphitePoint = fmt.Sprintf("%s %s %d",
			s.SerializeBucketName(metric, field_name),
			value_str,
			timestamp)
		out = append(out, graphitePoint)
	}
	return out, nil
}
Пример #14
0
func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) {

	gauges := []*Gauge{}
	if m.Time().Unix() == 0 {
		return gauges, fmt.Errorf(
			"Measure time must not be zero\n <%s> \n",
			m.String())
	}
	metricSource := graphite.InsertField(
		graphite.SerializeBucketName("", m.Tags(), l.Template, ""),
		"value")
	if metricSource == "" {
		return gauges,
			fmt.Errorf("undeterminable Source type from Field, %s\n",
				l.Template)
	}
	for fieldName, value := range m.Fields() {

		metricName := m.Name()
		if fieldName != "value" {
			metricName = fmt.Sprintf("%s.%s", m.Name(), fieldName)
		}

		gauge := &Gauge{
			Source:      reUnacceptedChar.ReplaceAllString(metricSource, "-"),
			Name:        reUnacceptedChar.ReplaceAllString(metricName, "-"),
			MeasureTime: m.Time().Unix(),
		}
		if !verifyValue(value) {
			continue
		}
		if err := gauge.setValue(value); err != nil {
			return gauges, fmt.Errorf(
				"unable to extract value from Fields, %s\n",
				err.Error())
		}
		gauges = append(gauges, gauge)
	}
	if l.Debug {
		fmt.Printf("[DEBUG] Built gauges: %v\n", gauges)
	}
	return gauges, nil
}
Пример #15
0
// Make a MetricDatum for each field in a Point. Only fields with values that can be
// converted to float64 are supported. Non-supported fields are skipped.
func BuildMetricDatum(point telegraf.Metric) []*cloudwatch.MetricDatum {
	datums := make([]*cloudwatch.MetricDatum, len(point.Fields()))
	i := 0

	var value float64

	for k, v := range point.Fields() {
		switch t := v.(type) {
		case int:
			value = float64(t)
		case int32:
			value = float64(t)
		case int64:
			value = float64(t)
		case float64:
			value = t
		case bool:
			if t {
				value = 1
			} else {
				value = 0
			}
		case time.Time:
			value = float64(t.Unix())
		default:
			// Skip unsupported type.
			datums = datums[:len(datums)-1]
			continue
		}

		datums[i] = &cloudwatch.MetricDatum{
			MetricName: aws.String(strings.Join([]string{point.Name(), k}, "_")),
			Value:      aws.Float64(value),
			Dimensions: BuildDimensions(point.Tags()),
			Timestamp:  aws.Time(point.Time()),
		}

		i += 1
	}

	return datums
}
Пример #16
0
func (m *MinMax) Add(in telegraf.Metric) {
	id := in.HashID()
	if _, ok := m.cache[id]; !ok {
		// hit an uncached metric, create caches for first time:
		a := aggregate{
			name:   in.Name(),
			tags:   in.Tags(),
			fields: make(map[string]minmax),
		}
		for k, v := range in.Fields() {
			if fv, ok := convert(v); ok {
				a.fields[k] = minmax{
					min: fv,
					max: fv,
				}
			}
		}
		m.cache[id] = a
	} else {
		for k, v := range in.Fields() {
			if fv, ok := convert(v); ok {
				if _, ok := m.cache[id].fields[k]; !ok {
					// hit an uncached field of a cached metric
					m.cache[id].fields[k] = minmax{
						min: fv,
						max: fv,
					}
					continue
				}
				if fv < m.cache[id].fields[k].min {
					tmp := m.cache[id].fields[k]
					tmp.min = fv
					m.cache[id].fields[k] = tmp
				} else if fv > m.cache[id].fields[k].max {
					tmp := m.cache[id].fields[k]
					tmp.max = fv
					m.cache[id].fields[k] = tmp
				}
			}
		}
	}
}
Пример #17
0
func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]string, error) {
	out := []string{}

	// Convert UnixNano to Unix timestamps
	timestamp := metric.UnixNano() / 1000000000

	bucket := s.SerializeBucketName(metric.Name(), metric.Tags())

	for fieldName, value := range metric.Fields() {
		// Convert value to string
		valueS := fmt.Sprintf("%#v", value)
		point := fmt.Sprintf("%s %s %d",
			// insert "field" section of template
			InsertField(bucket, fieldName),
			valueS,
			timestamp)
		out = append(out, point)
	}
	return out, nil
}
Пример #18
0
func buildMetrics(m telegraf.Metric, now time.Time, prefix string) []*MetricLine {
	ret := []*MetricLine{}
	for fieldName, value := range m.Fields() {
		metric := &MetricLine{
			Metric:    fmt.Sprintf("%s%s_%s", prefix, m.Name(), fieldName),
			Timestamp: now.Unix(),
		}

		metricValue, buildError := buildValue(value)
		if buildError != nil {
			fmt.Printf("OpenTSDB: %s\n", buildError.Error())
			continue
		}
		metric.Value = metricValue
		tagsSlice := buildTags(m.Tags())
		metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " "))
		ret = append(ret, metric)
	}
	return ret
}
Пример #19
0
// this is launched as a goroutine to continuously watch a tailed logfile
// for changes, parse any incoming msgs, and add to the accumulator.
func (t *Tail) receiver(tailer *tail.Tail) {
	defer t.wg.Done()

	var m telegraf.Metric
	var err error
	var line *tail.Line
	for line = range tailer.Lines {
		if line.Err != nil {
			log.Printf("ERROR tailing file %s, Error: %s\n",
				tailer.Filename, err)
			continue
		}
		m, err = t.parser.ParseLine(line.Text)
		if err == nil {
			t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
		} else {
			log.Printf("Malformed log line in %s: [%s], Error: %s\n",
				tailer.Filename, line.Text, err)
		}
	}
}
Пример #20
0
func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) {
	gauges := []*Gauge{}
	for fieldName, value := range m.Fields() {
		gauge := &Gauge{
			Name:        m.Name() + "_" + fieldName,
			MeasureTime: m.Time().Unix(),
		}
		if err := gauge.setValue(value); err != nil {
			return gauges, fmt.Errorf("unable to extract value from Fields, %s\n",
				err.Error())
		}
		if l.SourceTag != "" {
			if source, ok := m.Tags()[l.SourceTag]; ok {
				gauge.Source = source
			} else {
				return gauges,
					fmt.Errorf("undeterminable Source type from Field, %s\n",
						l.SourceTag)
			}
		}
	}
	return gauges, nil
}
Пример #21
0
func buildEvents(p telegraf.Metric) []*raidman.Event {
	events := []*raidman.Event{}
	for fieldName, value := range p.Fields() {
		host, ok := p.Tags()["host"]
		if !ok {
			hostname, err := os.Hostname()
			if err != nil {
				host = "unknown"
			} else {
				host = hostname
			}
		}

		event := &raidman.Event{
			Host:    host,
			Service: p.Name() + "_" + fieldName,
			Metric:  value,
		}
		events = append(events, event)
	}

	return events
}
Пример #22
0
func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]string, error) {
	out := []string{}

	// Convert UnixNano to Unix timestamps
	timestamp := metric.UnixNano() / 1000000000

	bucket := SerializeBucketName(metric.Name(), metric.Tags(), s.Template, s.Prefix)
	if bucket == "" {
		return out, nil
	}

	for fieldName, value := range metric.Fields() {
		// Convert value to string
		valueS := fmt.Sprintf("%#v", value)
		point := fmt.Sprintf("%s %s %d",
			// insert "field" section of template
			sanitizedChars.Replace(InsertField(bucket, fieldName)),
			sanitizedChars.Replace(valueS),
			timestamp)
		out = append(out, point)
	}
	return out, nil
}