Exemple #1
0
// Process pool data in Twemproxy stats
func (t *Twemproxy) processPool(
	acc inputs.Accumulator,
	tags map[string]string,
	data map[string]interface{},
) {
	serverTags := make(map[string]map[string]string)

	fields := make(map[string]interface{})
	for key, value := range data {
		switch key {
		case "client_connections", "forward_error", "client_err", "server_ejects", "fragments", "client_eof":
			if val, ok := value.(float64); ok {
				fields[key] = val
			}
		default:
			if data, ok := value.(map[string]interface{}); ok {
				if _, ok := serverTags[key]; !ok {
					serverTags[key] = copyTags(tags)
					serverTags[key]["server"] = key
				}
				t.processServer(acc, serverTags[key], data)
			}
		}
	}
	acc.AddFields("twemproxy_pool", fields, tags)
}
Exemple #2
0
// Process Twemproxy server stats
func (t *Twemproxy) processStat(
	acc inputs.Accumulator,
	tags map[string]string,
	data map[string]interface{},
) {
	if source, ok := data["source"]; ok {
		if val, ok := source.(string); ok {
			tags["source"] = val
		}
	}

	fields := make(map[string]interface{})
	metrics := []string{"total_connections", "curr_connections", "timestamp"}
	for _, m := range metrics {
		if value, ok := data[m]; ok {
			if val, ok := value.(float64); ok {
				fields[m] = val
			}
		}
	}
	acc.AddFields("twemproxy", fields, tags)

	for _, pool := range t.Pools {
		if poolStat, ok := data[pool]; ok {
			if data, ok := poolStat.(map[string]interface{}); ok {
				poolTags := copyTags(tags)
				poolTags["pool"] = pool
				t.processPool(acc, poolTags, data)
			}
		}
	}
}
Exemple #3
0
func gatherOverview(r *RabbitMQ, acc inputs.Accumulator, errChan chan error) {
	overview := &OverviewResponse{}

	err := r.requestJSON("/api/overview", &overview)
	if err != nil {
		errChan <- err
		return
	}

	if overview.QueueTotals == nil || overview.ObjectTotals == nil || overview.MessageStats == nil {
		errChan <- fmt.Errorf("Wrong answer from rabbitmq. Probably auth issue")
		return
	}

	tags := map[string]string{"url": r.URL}
	if r.Name != "" {
		tags["name"] = r.Name
	}
	fields := map[string]interface{}{
		"messages":           overview.QueueTotals.Messages,
		"messages_ready":     overview.QueueTotals.MessagesReady,
		"messages_unacked":   overview.QueueTotals.MessagesUnacknowledged,
		"channels":           overview.ObjectTotals.Channels,
		"connections":        overview.ObjectTotals.Connections,
		"consumers":          overview.ObjectTotals.Consumers,
		"exchanges":          overview.ObjectTotals.Exchanges,
		"queues":             overview.ObjectTotals.Queues,
		"messages_acked":     overview.MessageStats.Ack,
		"messages_delivered": overview.MessageStats.Deliver,
		"messages_published": overview.MessageStats.Publish,
	}
	acc.AddFields("rabbitmq_overview", fields, tags)

	errChan <- nil
}
Exemple #4
0
func (b *Bcache) gatherBcache(bdev string, acc inputs.Accumulator) error {
	tags := getTags(bdev)
	metrics, err := filepath.Glob(bdev + "/stats_total/*")
	if len(metrics) < 0 {
		return errors.New("Can't read any stats file")
	}
	file, err := ioutil.ReadFile(bdev + "/dirty_data")
	if err != nil {
		return err
	}
	rawValue := strings.TrimSpace(string(file))
	value := prettyToBytes(rawValue)

	fields := make(map[string]interface{})
	fields["dirty_data"] = value

	for _, path := range metrics {
		key := filepath.Base(path)
		file, err := ioutil.ReadFile(path)
		rawValue := strings.TrimSpace(string(file))
		if err != nil {
			return err
		}
		if key == "bypassed" {
			value := prettyToBytes(rawValue)
			fields[key] = value
		} else {
			value, _ := strconv.ParseUint(rawValue, 10, 64)
			fields[key] = value
		}
	}
	acc.AddFields("bcache", fields, tags)
	return nil
}
Exemple #5
0
func readAerospikeStats(
	stats map[string]string,
	acc inputs.Accumulator,
	host string,
	namespace string,
) {
	fields := make(map[string]interface{})
	tags := map[string]string{
		"aerospike_host": host,
		"namespace":      "_service",
	}

	if namespace != "" {
		tags["namespace"] = namespace
	}
	for key, value := range stats {
		// We are going to ignore all string based keys
		val, err := strconv.ParseInt(value, 10, 64)
		if err == nil {
			if strings.Contains(key, "-") {
				key = strings.Replace(key, "-", "_", -1)
			}
			fields[key] = val
		}
	}
	acc.AddFields("aerospike", fields, tags)
}
Exemple #6
0
func gatherPoolStats(pool poolInfo, acc inputs.Accumulator) error {
	lines, err := internal.ReadLines(pool.ioFilename)
	if err != nil {
		return err
	}

	if len(lines) != 3 {
		return err
	}

	keys := strings.Fields(lines[1])
	values := strings.Fields(lines[2])

	keyCount := len(keys)

	if keyCount != len(values) {
		return fmt.Errorf("Key and value count don't match Keys:%v Values:%v", keys, values)
	}

	tag := map[string]string{"pool": pool.name}
	fields := make(map[string]interface{})
	for i := 0; i < keyCount; i++ {
		value, err := strconv.ParseInt(values[i], 10, 64)
		if err != nil {
			return err
		}
		fields[keys[i]] = value
	}
	acc.AddFields("zfs_pool", fields, tag)

	return nil
}
Exemple #7
0
func (d *MongodbData) flush(acc inputs.Accumulator) {
	acc.AddFields(
		"mongodb",
		d.Fields,
		d.Tags,
		d.StatLine.Time,
	)
	d.Fields = make(map[string]interface{})
}
Exemple #8
0
func (k *Kafka) Gather(acc inputs.Accumulator) error {
	k.Lock()
	defer k.Unlock()
	npoints := len(k.pointChan)
	for i := 0; i < npoints; i++ {
		point := <-k.pointChan
		acc.AddFields(point.Name(), point.Fields(), point.Tags(), point.Time())
	}
	return nil
}
Exemple #9
0
// Import HTTP stat data into Telegraf system
func importMetric(r io.Reader, acc inputs.Accumulator, host string) (poolStat, error) {
	stats := make(poolStat)
	var currentPool string

	scanner := bufio.NewScanner(r)
	for scanner.Scan() {
		statLine := scanner.Text()
		keyvalue := strings.Split(statLine, ":")

		if len(keyvalue) < 2 {
			continue
		}
		fieldName := strings.Trim(keyvalue[0], " ")
		// We start to gather data for a new pool here
		if fieldName == PF_POOL {
			currentPool = strings.Trim(keyvalue[1], " ")
			stats[currentPool] = make(metric)
			continue
		}

		// Start to parse metric for current pool
		switch fieldName {
		case PF_ACCEPTED_CONN,
			PF_LISTEN_QUEUE,
			PF_MAX_LISTEN_QUEUE,
			PF_LISTEN_QUEUE_LEN,
			PF_IDLE_PROCESSES,
			PF_ACTIVE_PROCESSES,
			PF_TOTAL_PROCESSES,
			PF_MAX_ACTIVE_PROCESSES,
			PF_MAX_CHILDREN_REACHED,
			PF_SLOW_REQUESTS:
			fieldValue, err := strconv.ParseInt(strings.Trim(keyvalue[1], " "), 10, 64)
			if err == nil {
				stats[currentPool][fieldName] = fieldValue
			}
		}
	}

	// Finally, we push the pool metric
	for pool := range stats {
		tags := map[string]string{
			"url":  host,
			"pool": pool,
		}
		fields := make(map[string]interface{})
		for k, v := range stats[pool] {
			fields[strings.Replace(k, " ", "_", -1)] = v
		}
		acc.AddFields("phpfpm", fields, tags)
	}

	return stats, nil
}
Exemple #10
0
func (e *Engine) AddEngineStats(
	keys []string,
	acc inputs.Accumulator,
	tags map[string]string,
) {
	engine := reflect.ValueOf(e).Elem()
	fields := make(map[string]interface{})
	for _, key := range keys {
		fields[key] = engine.FieldByName(engineStats[key]).Interface()
	}
	acc.AddFields("rethinkdb_engine", fields, tags)
}
Exemple #11
0
func (p *Ping) Gather(acc inputs.Accumulator) error {

	var wg sync.WaitGroup
	errorChannel := make(chan error, len(p.Urls)*2)

	// Spin off a go routine for each url to ping
	for _, url := range p.Urls {
		wg.Add(1)
		go func(url string, acc inputs.Accumulator) {
			defer wg.Done()
			args := p.args(url)
			out, err := p.pingHost(args...)
			if err != nil {
				// Combine go err + stderr output
				errorChannel <- errors.New(
					strings.TrimSpace(out) + ", " + err.Error())
			}
			tags := map[string]string{"url": url}
			trans, rec, avg, err := processPingOutput(out)
			if err != nil {
				// fatal error
				errorChannel <- err
				return
			}
			// Calculate packet loss percentage
			loss := float64(trans-rec) / float64(trans) * 100.0
			fields := map[string]interface{}{
				"packets_transmitted": trans,
				"packets_received":    rec,
				"percent_packet_loss": loss,
				"average_response_ms": avg,
			}
			acc.AddFields("ping", fields, tags)
		}(url, acc)
	}

	wg.Wait()
	close(errorChannel)

	// Get all errors and return them as one giant error
	errorStrings := []string{}
	for err := range errorChannel {
		errorStrings = append(errorStrings, err.Error())
	}

	if len(errorStrings) == 0 {
		return nil
	}
	return errors.New(strings.Join(errorStrings, "\n"))
}
Exemple #12
0
func (s *Storage) AddStats(acc inputs.Accumulator, tags map[string]string) {
	fields := map[string]interface{}{
		"cache_bytes_in_use":            s.Cache.BytesInUse,
		"disk_read_bytes_per_sec":       s.Disk.ReadBytesPerSec,
		"disk_read_bytes_total":         s.Disk.ReadBytesTotal,
		"disk_written_bytes_per_sec":    s.Disk.WriteBytesPerSec,
		"disk_written_bytes_total":      s.Disk.WriteBytesTotal,
		"disk_usage_data_bytes":         s.Disk.SpaceUsage.Data,
		"disk_usage_garbage_bytes":      s.Disk.SpaceUsage.Garbage,
		"disk_usage_metadata_bytes":     s.Disk.SpaceUsage.Metadata,
		"disk_usage_preallocated_bytes": s.Disk.SpaceUsage.Prealloc,
	}
	acc.AddFields("rethinkdb", fields, tags)
}
Exemple #13
0
func gatherQueues(r *RabbitMQ, acc inputs.Accumulator, errChan chan error) {
	// Gather information about queues
	queues := make([]Queue, 0)
	err := r.requestJSON("/api/queues", &queues)
	if err != nil {
		errChan <- err
		return
	}

	for _, queue := range queues {
		if !r.shouldGatherQueue(queue) {
			continue
		}
		tags := map[string]string{
			"url":         r.URL,
			"queue":       queue.Name,
			"vhost":       queue.Vhost,
			"node":        queue.Node,
			"durable":     strconv.FormatBool(queue.Durable),
			"auto_delete": strconv.FormatBool(queue.AutoDelete),
		}

		acc.AddFields(
			"rabbitmq_queue",
			map[string]interface{}{
				// common information
				"consumers":            queue.Consumers,
				"consumer_utilisation": queue.ConsumerUtilisation,
				"memory":               queue.Memory,
				// messages information
				"messages":                  queue.Messages,
				"messages_ready":            queue.MessagesReady,
				"messages_unack":            queue.MessagesUnacknowledged,
				"messages_ack":              queue.MessageStats.Ack,
				"messages_ack_rate":         queue.MessageStats.AckDetails.Rate,
				"messages_deliver":          queue.MessageStats.Deliver,
				"messages_deliver_rate":     queue.MessageStats.DeliverDetails.Rate,
				"messages_deliver_get":      queue.MessageStats.DeliverGet,
				"messages_deliver_get_rate": queue.MessageStats.DeliverGetDetails.Rate,
				"messages_publish":          queue.MessageStats.Publish,
				"messages_publish_rate":     queue.MessageStats.PublishDetails.Rate,
				"messages_redeliver":        queue.MessageStats.Redeliver,
				"messages_redeliver_rate":   queue.MessageStats.RedeliverDetails.Rate,
			},
			tags,
		)
	}

	errChan <- nil
}
Exemple #14
0
// Gather reads stats from all lustre targets
func (l *Lustre2) Gather(acc inputs.Accumulator) error {
	l.allFields = make(map[string]map[string]interface{})

	if len(l.Ost_procfiles) == 0 {
		// read/write bytes are in obdfilter/<ost_name>/stats
		err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats",
			wanted_ost_fields, acc)
		if err != nil {
			return err
		}
		// cache counters are in osd-ldiskfs/<ost_name>/stats
		err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats",
			wanted_ost_fields, acc)
		if err != nil {
			return err
		}
	}

	if len(l.Mds_procfiles) == 0 {
		// Metadata server stats
		err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats",
			wanted_mds_fields, acc)
		if err != nil {
			return err
		}
	}

	for _, procfile := range l.Ost_procfiles {
		err := l.GetLustreProcStats(procfile, wanted_ost_fields, acc)
		if err != nil {
			return err
		}
	}
	for _, procfile := range l.Mds_procfiles {
		err := l.GetLustreProcStats(procfile, wanted_mds_fields, acc)
		if err != nil {
			return err
		}
	}

	for name, fields := range l.allFields {
		tags := map[string]string{
			"name": name,
		}
		acc.AddFields("lustre2", fields, tags)
	}

	return nil
}
Exemple #15
0
func (s *Trig) Gather(acc inputs.Accumulator) error {
	sinner := math.Sin((s.x*math.Pi)/5.0) * s.Amplitude
	cosinner := math.Cos((s.x*math.Pi)/5.0) * s.Amplitude

	fields := make(map[string]interface{})
	fields["sine"] = sinner
	fields["cosine"] = cosinner

	tags := make(map[string]string)

	s.x += 1.0
	acc.AddFields("trig", fields, tags)

	return nil
}
Exemple #16
0
func (z *Zookeeper) gatherServer(address string, acc inputs.Accumulator) error {
	_, _, err := net.SplitHostPort(address)
	if err != nil {
		address = address + ":2181"
	}

	c, err := net.DialTimeout("tcp", address, defaultTimeout)
	if err != nil {
		fmt.Fprintln(os.Stderr, err)
		return err
	}
	defer c.Close()

	fmt.Fprintf(c, "%s\n", "mntr")
	rdr := bufio.NewReader(c)
	scanner := bufio.NewScanner(rdr)

	service := strings.Split(address, ":")
	if len(service) != 2 {
		return fmt.Errorf("Invalid service address: %s", address)
	}
	tags := map[string]string{"server": service[0], "port": service[1]}

	fields := make(map[string]interface{})
	for scanner.Scan() {
		line := scanner.Text()

		re := regexp.MustCompile(`^zk_(\w+)\s+([\w\.\-]+)`)
		parts := re.FindStringSubmatch(string(line))

		if len(parts) != 3 {
			return fmt.Errorf("unexpected line in mntr response: %q", line)
		}

		measurement := strings.TrimPrefix(parts[1], "zk_")
		sValue := string(parts[2])

		iVal, err := strconv.ParseInt(sValue, 10, 64)
		if err == nil {
			fields[measurement] = iVal
		} else {
			fields[measurement] = sValue
		}
	}
	acc.AddFields("zookeeper", fields, tags)

	return nil
}
Exemple #17
0
// Process backend server(redis/memcached) stats
func (t *Twemproxy) processServer(
	acc inputs.Accumulator,
	tags map[string]string,
	data map[string]interface{},
) {
	fields := make(map[string]interface{})
	for key, value := range data {
		switch key {
		default:
			if val, ok := value.(float64); ok {
				fields[key] = val
			}
		}
	}
	acc.AddFields("twemproxy_pool_server", fields, tags)
}
Exemple #18
0
func (j *Jolokia) Gather(acc inputs.Accumulator) error {
	context := j.Context //"/jolokia/read"
	servers := j.Servers
	metrics := j.Metrics
	tags := make(map[string]string)

	for _, server := range servers {
		tags["server"] = server.Name
		tags["port"] = server.Port
		tags["host"] = server.Host
		fields := make(map[string]interface{})
		for _, metric := range metrics {

			measurement := metric.Name
			jmxPath := metric.Jmx

			// Prepare URL
			requestUrl, err := url.Parse("http://" + server.Host + ":" +
				server.Port + context + jmxPath)
			if err != nil {
				return err
			}
			if server.Username != "" || server.Password != "" {
				requestUrl.User = url.UserPassword(server.Username, server.Password)
			}

			out, _ := j.getAttr(requestUrl)

			if values, ok := out["value"]; ok {
				switch t := values.(type) {
				case map[string]interface{}:
					for k, v := range t {
						fields[measurement+"_"+k] = v
					}
				case interface{}:
					fields[measurement] = t
				}
			} else {
				fmt.Printf("Missing key 'value' in '%s' output response\n",
					requestUrl.String())
			}
		}
		acc.AddFields("jolokia", fields, tags)
	}

	return nil
}
Exemple #19
0
func (z *Zfs) Gather(acc inputs.Accumulator) error {
	kstatMetrics := z.KstatMetrics
	if len(kstatMetrics) == 0 {
		kstatMetrics = []string{"arcstats", "zfetchstats", "vdev_cache_stats"}
	}

	kstatPath := z.KstatPath
	if len(kstatPath) == 0 {
		kstatPath = "/proc/spl/kstat/zfs"
	}

	pools := getPools(kstatPath)
	tags := getTags(pools)

	if z.PoolMetrics {
		for _, pool := range pools {
			err := gatherPoolStats(pool, acc)
			if err != nil {
				return err
			}
		}
	}

	fields := make(map[string]interface{})
	for _, metric := range kstatMetrics {
		lines, err := internal.ReadLines(kstatPath + "/" + metric)
		if err != nil {
			return err
		}
		for i, line := range lines {
			if i == 0 || i == 1 {
				continue
			}
			if len(line) < 1 {
				continue
			}
			rawData := strings.Split(line, " ")
			key := metric + "_" + rawData[0]
			rawValue := rawData[len(rawData)-1]
			value, _ := strconv.ParseInt(rawValue, 10, 64)
			fields[key] = value
		}
	}
	acc.AddFields("zfs", fields, tags)
	return nil
}
Exemple #20
0
func (s *SwapStats) Gather(acc inputs.Accumulator) error {
	swap, err := s.ps.SwapStat()
	if err != nil {
		return fmt.Errorf("error getting swap memory info: %s", err)
	}

	fields := map[string]interface{}{
		"total":        swap.Total,
		"used":         swap.Used,
		"free":         swap.Free,
		"used_percent": swap.UsedPercent,
		"in":           swap.Sin,
		"out":          swap.Sout,
	}
	acc.AddFields("swap", fields, nil)

	return nil
}
Exemple #21
0
func (s *DiskIOStats) Gather(acc inputs.Accumulator) error {
	diskio, err := s.ps.DiskIO()
	if err != nil {
		return fmt.Errorf("error getting disk io info: %s", err)
	}

	var restrictDevices bool
	devices := make(map[string]bool)
	if len(s.Devices) != 0 {
		restrictDevices = true
		for _, dev := range s.Devices {
			devices[dev] = true
		}
	}

	for _, io := range diskio {
		_, member := devices[io.Name]
		if restrictDevices && !member {
			continue
		}
		tags := map[string]string{}
		tags["name"] = io.Name
		if !s.SkipSerialNumber {
			if len(io.SerialNumber) != 0 {
				tags["serial"] = io.SerialNumber
			} else {
				tags["serial"] = "unknown"
			}
		}

		fields := map[string]interface{}{
			"reads":       io.ReadCount,
			"writes":      io.WriteCount,
			"read_bytes":  io.ReadBytes,
			"write_bytes": io.WriteBytes,
			"read_time":   io.ReadTime,
			"write_time":  io.WriteTime,
			"io_time":     io.IoTime,
		}
		acc.AddFields("diskio", fields, tags)
	}

	return nil
}
Exemple #22
0
func (e *Elasticsearch) gatherNodeStats(url string, acc inputs.Accumulator) error {
	nodeStats := &struct {
		ClusterName string           `json:"cluster_name"`
		Nodes       map[string]*node `json:"nodes"`
	}{}
	if err := e.gatherData(url, nodeStats); err != nil {
		return err
	}
	for id, n := range nodeStats.Nodes {
		tags := map[string]string{
			"node_id":      id,
			"node_host":    n.Host,
			"node_name":    n.Name,
			"cluster_name": nodeStats.ClusterName,
		}

		for k, v := range n.Attributes {
			tags["node_attribute_"+k] = v
		}

		stats := map[string]interface{}{
			"indices":     n.Indices,
			"os":          n.OS,
			"process":     n.Process,
			"jvm":         n.JVM,
			"thread_pool": n.ThreadPool,
			"fs":          n.FS,
			"transport":   n.Transport,
			"http":        n.HTTP,
			"breakers":    n.Breakers,
		}

		now := time.Now()
		for p, s := range stats {
			f := internal.JSONFlattener{}
			err := f.FlattenJSON("", s)
			if err != nil {
				return err
			}
			acc.AddFields("elasticsearch_"+p, f.Fields, tags, now)
		}
	}
	return nil
}
Exemple #23
0
func (e *Elasticsearch) gatherClusterStats(url string, acc inputs.Accumulator) error {
	clusterStats := &clusterHealth{}
	if err := e.gatherData(url, clusterStats); err != nil {
		return err
	}
	measurementTime := time.Now()
	clusterFields := map[string]interface{}{
		"status":                clusterStats.Status,
		"timed_out":             clusterStats.TimedOut,
		"number_of_nodes":       clusterStats.NumberOfNodes,
		"number_of_data_nodes":  clusterStats.NumberOfDataNodes,
		"active_primary_shards": clusterStats.ActivePrimaryShards,
		"active_shards":         clusterStats.ActiveShards,
		"relocating_shards":     clusterStats.RelocatingShards,
		"initializing_shards":   clusterStats.InitializingShards,
		"unassigned_shards":     clusterStats.UnassignedShards,
	}
	acc.AddFields(
		"elasticsearch_cluster_health",
		clusterFields,
		map[string]string{"name": clusterStats.ClusterName},
		measurementTime,
	)

	for name, health := range clusterStats.Indices {
		indexFields := map[string]interface{}{
			"status":                health.Status,
			"number_of_shards":      health.NumberOfShards,
			"number_of_replicas":    health.NumberOfReplicas,
			"active_primary_shards": health.ActivePrimaryShards,
			"active_shards":         health.ActiveShards,
			"relocating_shards":     health.RelocatingShards,
			"initializing_shards":   health.InitializingShards,
			"unassigned_shards":     health.UnassignedShards,
		}
		acc.AddFields(
			"elasticsearch_indices",
			indexFields,
			map[string]string{"index": name},
			measurementTime,
		)
	}
	return nil
}
Exemple #24
0
func structPrinter(s *State, acc inputs.Accumulator, tags map[string]string) {
	e := reflect.ValueOf(s).Elem()

	fields := make(map[string]interface{})
	for tLevelFNum := 0; tLevelFNum < e.NumField(); tLevelFNum++ {
		name := e.Type().Field(tLevelFNum).Name
		nameNumField := e.FieldByName(name).NumField()

		for sLevelFNum := 0; sLevelFNum < nameNumField; sLevelFNum++ {
			sName := e.FieldByName(name).Type().Field(sLevelFNum).Name
			sValue := e.FieldByName(name).Field(sLevelFNum).Interface()

			lname := strings.ToLower(name)
			lsName := strings.ToLower(sName)
			fields[fmt.Sprintf("%s_%s", lname, lsName)] = sValue
		}
	}
	acc.AddFields("puppetagent", fields, tags)
}
Exemple #25
0
func (p *Postgresql) accRow(row scanner, acc inputs.Accumulator) error {
	var columnVars []interface{}
	var dbname bytes.Buffer

	// this is where we'll store the column name with its *interface{}
	columnMap := make(map[string]*interface{})

	for _, column := range p.OrderedColumns {
		columnMap[column] = new(interface{})
	}

	// populate the array of interface{} with the pointers in the right order
	for i := 0; i < len(columnMap); i++ {
		columnVars = append(columnVars, columnMap[p.OrderedColumns[i]])
	}

	// deconstruct array of variables and send to Scan
	err := row.Scan(columnVars...)

	if err != nil {
		return err
	}

	// extract the database name from the column map
	dbnameChars := (*columnMap["datname"]).([]uint8)
	for i := 0; i < len(dbnameChars); i++ {
		dbname.WriteString(string(dbnameChars[i]))
	}

	tags := map[string]string{"server": p.Address, "db": dbname.String()}

	fields := make(map[string]interface{})
	for col, val := range columnMap {
		_, ignore := ignoredColumns[col]
		if !ignore {
			fields[col] = *val
		}
	}
	acc.AddFields("postgresql", fields, tags)

	return nil
}
Exemple #26
0
func (s *MemStats) Gather(acc inputs.Accumulator) error {
	vm, err := s.ps.VMStat()
	if err != nil {
		return fmt.Errorf("error getting virtual memory info: %s", err)
	}

	fields := map[string]interface{}{
		"total":             vm.Total,
		"available":         vm.Available,
		"used":              vm.Used,
		"free":              vm.Free,
		"cached":            vm.Cached,
		"buffered":          vm.Buffers,
		"used_percent":      100 * float64(vm.Used) / float64(vm.Total),
		"available_percent": 100 * float64(vm.Available) / float64(vm.Total),
	}
	acc.AddFields("mem", fields, nil)

	return nil
}
Exemple #27
0
// Parse the special Keyspace line at end of redis stats
// This is a special line that looks something like:
//     db0:keys=2,expires=0,avg_ttl=0
// And there is one for each db on the redis instance
func gatherKeyspaceLine(
	name string,
	line string,
	acc inputs.Accumulator,
	tags map[string]string,
) {
	if strings.Contains(line, "keys=") {
		fields := make(map[string]interface{})
		tags["database"] = name
		dbparts := strings.Split(line, ",")
		for _, dbp := range dbparts {
			kv := strings.Split(dbp, "=")
			ival, err := strconv.ParseUint(kv[1], 10, 64)
			if err == nil {
				fields[kv[0]] = ival
			}
		}
		acc.AddFields("redis_keyspace", fields, tags)
	}
}
Exemple #28
0
// Gathers data from a particular server
// Parameters:
//     acc      : The telegraf Accumulator to use
//     serverURL: endpoint to send request to
//     service  : the service being queried
//
// Returns:
//     error: Any error that may have occurred
func (h *HttpJson) gatherServer(
	acc inputs.Accumulator,
	serverURL string,
) error {
	resp, err := h.sendRequest(serverURL)
	if err != nil {
		return err
	}

	var jsonOut map[string]interface{}
	if err = json.Unmarshal([]byte(resp), &jsonOut); err != nil {
		return errors.New("Error decoding JSON response")
	}

	tags := map[string]string{
		"server": serverURL,
	}

	for _, tag := range h.TagKeys {
		switch v := jsonOut[tag].(type) {
		case string:
			tags[tag] = v
		}
		delete(jsonOut, tag)
	}

	f := internal.JSONFlattener{}
	err = f.FlattenJSON("", jsonOut)
	if err != nil {
		return err
	}

	var msrmnt_name string
	if h.Name == "" {
		msrmnt_name = "httpjson"
	} else {
		msrmnt_name = "httpjson_" + h.Name
	}
	acc.AddFields(msrmnt_name, f.Fields, tags)
	return nil
}
Exemple #29
0
func (s *NetStats) Gather(acc inputs.Accumulator) error {
	netconns, err := s.ps.NetConnections()
	if err != nil {
		return fmt.Errorf("error getting net connections info: %s", err)
	}
	counts := make(map[string]int)
	counts["UDP"] = 0

	// TODO: add family to tags or else
	tags := map[string]string{}
	for _, netcon := range netconns {
		if netcon.Type == syscall.SOCK_DGRAM {
			counts["UDP"] += 1
			continue // UDP has no status
		}
		c, ok := counts[netcon.Status]
		if !ok {
			counts[netcon.Status] = 0
		}
		counts[netcon.Status] = c + 1
	}

	fields := map[string]interface{}{
		"tcp_established": counts["ESTABLISHED"],
		"tcp_syn_sent":    counts["SYN_SENT"],
		"tcp_syn_recv":    counts["SYN_RECV"],
		"tcp_fin_wait1":   counts["FIN_WAIT1"],
		"tcp_fin_wait2":   counts["FIN_WAIT2"],
		"tcp_time_wait":   counts["TIME_WAIT"],
		"tcp_close":       counts["CLOSE"],
		"tcp_close_wait":  counts["CLOSE_WAIT"],
		"tcp_last_ack":    counts["LAST_ACK"],
		"tcp_listen":      counts["LISTEN"],
		"tcp_closing":     counts["CLOSING"],
		"tcp_none":        counts["NONE"],
		"udp_socket":      counts["UDP"],
	}
	acc.AddFields("netstat", fields, tags)

	return nil
}
Exemple #30
0
func (_ *SystemStats) Gather(acc inputs.Accumulator) error {
	loadavg, err := load.LoadAvg()
	if err != nil {
		return err
	}

	hostinfo, err := host.HostInfo()
	if err != nil {
		return err
	}

	fields := map[string]interface{}{
		"load1":         loadavg.Load1,
		"load5":         loadavg.Load5,
		"load15":        loadavg.Load15,
		"uptime":        hostinfo.Uptime,
		"uptime_format": format_uptime(hostinfo.Uptime),
	}
	acc.AddFields("system", fields, nil)

	return nil
}