func (d *MongodbData) add(acc plugins.Accumulator, key string, val interface{}) { acc.AddFields( key, map[string]interface{}{ "value": val, }, d.Tags, d.StatLine.Time, ) }
func (k *Kafka) Gather(acc plugins.Accumulator) error { k.Lock() defer k.Unlock() npoints := len(k.pointChan) for i := 0; i < npoints; i++ { point := <-k.pointChan acc.AddFields(point.Name(), point.Fields(), point.Tags(), point.Time()) } return nil }
func gatherQueues(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan chan error) { // Gather information about queues queues := make([]Queue, 0) err := r.requestJSON(serv, "/api/queues", &queues) if err != nil { errChan <- err return } for _, queue := range queues { if !shouldGatherQueue(queue, serv) { continue } tags := map[string]string{ "url": serv.URL, "queue": queue.Name, "vhost": queue.Vhost, "node": queue.Node, "durable": strconv.FormatBool(queue.Durable), "auto_delete": strconv.FormatBool(queue.AutoDelete), } acc.AddFields( "queue", map[string]interface{}{ // common information "consumers": queue.Consumers, "consumer_utilisation": queue.ConsumerUtilisation, "memory": queue.Memory, // messages information "messages": queue.Messages, "messages_ready": queue.MessagesReady, "messages_unack": queue.MessagesUnacknowledged, "messages_ack": queue.MessageStats.Ack, "messages_ack_rate": queue.MessageStats.AckDetails.Rate, "messages_deliver": queue.MessageStats.Deliver, "messages_deliver_rate": queue.MessageStats.DeliverDetails.Rate, "messages_deliver_get": queue.MessageStats.DeliverGet, "messages_deliver_get_rate": queue.MessageStats.DeliverGetDetails.Rate, "messages_publish": queue.MessageStats.Publish, "messages_publish_rate": queue.MessageStats.PublishDetails.Rate, "messages_redeliver": queue.MessageStats.Redeliver, "messages_redeliver_rate": queue.MessageStats.RedeliverDetails.Rate, }, tags, ) } errChan <- nil }
func (s *Trig) Gather(acc plugins.Accumulator) error { sinner := math.Sin((s.x*math.Pi)/5.0) * s.Amplitude cosinner := math.Cos((s.x*math.Pi)/5.0) * s.Amplitude fields := make(map[string]interface{}) fields["sine"] = sinner fields["cosine"] = cosinner tags := make(map[string]string) s.x += 1.0 acc.AddFields("trig", fields, tags) return nil }
func (j *Jolokia) Gather(acc plugins.Accumulator) error { context := j.Context //"/jolokia/read" servers := j.Servers metrics := j.Metrics tags := j.Tags if tags == nil { tags = map[string]string{} } for _, server := range servers { for _, metric := range metrics { measurement := metric.Name jmxPath := metric.Jmx tags["server"] = server.Name tags["port"] = server.Port tags["host"] = server.Host // Prepare URL requestUrl, err := url.Parse("http://" + server.Host + ":" + server.Port + context + jmxPath) if err != nil { return err } if server.Username != "" || server.Password != "" { requestUrl.User = url.UserPassword(server.Username, server.Password) } out, _ := j.getAttr(requestUrl) if values, ok := out["value"]; ok { switch values.(type) { case map[string]interface{}: acc.AddFields(measurement, metric.filterFields(values.(map[string]interface{})), tags) case interface{}: acc.Add(measurement, values.(interface{}), tags) } } else { fmt.Printf("Missing key 'value' in '%s' output response\n", requestUrl.String()) } } } return nil }
func (e *Elasticsearch) gatherClusterStats(url string, acc plugins.Accumulator) error { clusterStats := &clusterHealth{} if err := e.gatherData(url, clusterStats); err != nil { return err } measurementTime := time.Now() clusterFields := map[string]interface{}{ "status": clusterStats.Status, "timed_out": clusterStats.TimedOut, "number_of_nodes": clusterStats.NumberOfNodes, "number_of_data_nodes": clusterStats.NumberOfDataNodes, "active_primary_shards": clusterStats.ActivePrimaryShards, "active_shards": clusterStats.ActiveShards, "relocating_shards": clusterStats.RelocatingShards, "initializing_shards": clusterStats.InitializingShards, "unassigned_shards": clusterStats.UnassignedShards, } acc.AddFields( "cluster_health", clusterFields, map[string]string{"name": clusterStats.ClusterName}, measurementTime, ) for name, health := range clusterStats.Indices { indexFields := map[string]interface{}{ "status": health.Status, "number_of_shards": health.NumberOfShards, "number_of_replicas": health.NumberOfReplicas, "active_primary_shards": health.ActivePrimaryShards, "active_shards": health.ActiveShards, "relocating_shards": health.RelocatingShards, "initializing_shards": health.InitializingShards, "unassigned_shards": health.UnassignedShards, } acc.AddFields( "indices", indexFields, map[string]string{"index": name}, measurementTime, ) } return nil }
func emitMetrics(k *Kafka, acc plugins.Accumulator, metricConsumer <-chan []byte) error { timeout := time.After(1 * time.Second) for { select { case batch := <-metricConsumer: var points []models.Point var err error if points, err = models.ParsePoints(batch); err != nil { return err } for _, point := range points { acc.AddFields(point.Name(), point.Fields(), point.Tags(), point.Time()) } case <-timeout: return nil } } }
func (j *Jolokia) Gather(acc plugins.Accumulator) error { context := j.Context //"/jolokia/read" servers := j.Servers metrics := j.Metrics var tags = map[string]string{ "group": "application_server", } for _, server := range servers { for _, metric := range metrics { measurement := metric.Name jmxPath := metric.Jmx tags["server"] = server.Name tags["port"] = server.Port tags["host"] = server.Host url := "http://" + server.Host + ":" + server.Port + context + jmxPath //fmt.Println(url) out, _ := getAttr(url) if values, ok := out["value"]; ok { switch values.(type) { case map[string]interface{}: acc.AddFields(measurement, metric.filterFields(values.(map[string]interface{})), tags) case interface{}: acc.Add(measurement, values.(interface{}), tags) } } else { fmt.Println("Missing key value") } } } return nil }
// Gathers data from a particular URL // Parameters: // acc : The telegraf Accumulator to use // url : endpoint to send request to // // Returns: // error: Any error that may have occurred func (i *InfluxDB) gatherURL( acc plugins.Accumulator, url string, ) error { resp, err := http.Get(url) if err != nil { return err } defer resp.Body.Close() // It would be nice to be able to decode into a map[string]point, but // we'll get a decoder error like: // `json: cannot unmarshal array into Go value of type influxdb.point` // if any of the values aren't objects. // To avoid that error, we decode by hand. dec := json.NewDecoder(resp.Body) // Parse beginning of object if t, err := dec.Token(); err != nil { return err } else if t != json.Delim('{') { return errors.New("document root must be a JSON object") } // Loop through rest of object for { // Nothing left in this object, we're done if !dec.More() { break } // Read in a string key. We don't do anything with the top-level keys, so it's discarded. _, err := dec.Token() if err != nil { return err } // Attempt to parse a whole object into a point. // It might be a non-object, like a string or array. // If we fail to decode it into a point, ignore it and move on. var p point if err := dec.Decode(&p); err != nil { continue } // If the object was a point, but was not fully initialized, ignore it and move on. if p.Name == "" || p.Tags == nil || p.Values == nil || len(p.Values) == 0 { continue } // Add a tag to indicate the source of the data. p.Tags["url"] = url acc.AddFields( p.Name, p.Values, p.Tags, ) } return nil }