Beispiel #1
0
func (e *Exec) Gather(acc telegraf.Accumulator) error {
	out, err := e.runner.Run(e)
	if err != nil {
		return err
	}

	switch e.DataFormat {
	case "", "json":
		var jsonOut interface{}
		err = json.Unmarshal(out, &jsonOut)
		if err != nil {
			return fmt.Errorf("exec: unable to parse output of '%s' as JSON, %s",
				e.Command, err)
		}

		f := internal.JSONFlattener{}
		err = f.FlattenJSON("", jsonOut)
		if err != nil {
			return err
		}
		acc.AddFields("exec", f.Fields, nil)
	case "influx":
		now := time.Now()
		metrics, err := telegraf.ParseMetrics(out)
		for _, metric := range metrics {
			acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), now)
		}
		return err
	default:
		return fmt.Errorf("Unsupported data format: %s. Must be either json "+
			"or influx.", e.DataFormat)
	}
	return nil
}
Beispiel #2
0
// parser() reads all incoming messages from the consumer, and parses them into
// influxdb metric points.
func (k *Kafka) parser() {
	for {
		select {
		case <-k.done:
			return
		case err := <-k.errs:
			log.Printf("Kafka Consumer Error: %s\n", err.Error())
		case msg := <-k.in:
			metrics, err := telegraf.ParseMetrics(msg.Value)
			if err != nil {
				log.Printf("Could not parse kafka message: %s, error: %s",
					string(msg.Value), err.Error())
			}

			for _, metric := range metrics {
				select {
				case k.metricC <- metric:
					continue
				default:
					log.Printf("Kafka Consumer buffer is full, dropping a metric." +
						" You may want to increase the point_buffer setting")
				}
			}

			if !k.doNotCommitMsgs {
				// TODO(cam) this locking can be removed if this PR gets merged:
				// https://github.com/wvanbergen/kafka/pull/84
				k.Lock()
				k.Consumer.CommitUpto(msg)
				k.Unlock()
			}
		}
	}
}
Beispiel #3
0
func (p *InfluxParser) Parse(buf []byte) ([]telegraf.Metric, error) {
	metrics, err := telegraf.ParseMetrics(buf)

	if err != nil {
		return nil, err
	}
	return metrics, nil
}