Esempio n. 1
0
func (u *Uwsgi) gatherWorkers(acc telegraf.Accumulator, s *StatsServer) {
	for _, w := range s.Workers {
		fields := map[string]interface{}{
			"requests":       w.Requests,
			"accepting":      w.Accepting,
			"delta_request":  w.DeltaRequests,
			"exceptions":     w.Exceptions,
			"harakiri_count": w.HarakiriCount,
			"signals":        w.Signals,
			"signal_queue":   w.SignalQueue,
			"status":         w.Status,
			"rss":            w.Rss,
			"vsz":            w.Vsz,
			"running_time":   w.RunningTime,
			"last_spawn":     w.LastSpawn,
			"respawn_count":  w.RespawnCount,
			"tx":             w.Tx,
			"avg_rt":         w.AvgRt,
		}
		tags := map[string]string{
			"worker_id": strconv.Itoa(w.WorkerId),
			"url":       s.Url,
			"pid":       strconv.Itoa(w.Pid),
		}

		acc.AddFields("uwsgi_workers", fields, tags)
	}
}
Esempio n. 2
0
func (s *Snmp) gatherTable(acc telegraf.Accumulator, gs snmpConnection, t Table, topTags map[string]string, walk bool) error {
	rt, err := t.Build(gs, walk)
	if err != nil {
		return err
	}

	for _, tr := range rt.Rows {
		if !walk {
			// top-level table. Add tags to topTags.
			for k, v := range tr.Tags {
				topTags[k] = v
			}
		} else {
			// real table. Inherit any specified tags.
			for _, k := range t.InheritTags {
				if v, ok := topTags[k]; ok {
					tr.Tags[k] = v
				}
			}
		}
		if _, ok := tr.Tags["agent_host"]; !ok {
			tr.Tags["agent_host"] = gs.Host()
		}
		acc.AddFields(rt.Name, tr.Fields, tr.Tags, rt.Time)
	}

	return nil
}
Esempio n. 3
0
func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error {
	tags := getTags(bdev)
	metrics, err := filepath.Glob(bdev + "/stats_total/*")
	if len(metrics) < 0 {
		return errors.New("Can't read any stats file")
	}
	file, err := ioutil.ReadFile(bdev + "/dirty_data")
	if err != nil {
		return err
	}
	rawValue := strings.TrimSpace(string(file))
	value := prettyToBytes(rawValue)

	fields := make(map[string]interface{})
	fields["dirty_data"] = value

	for _, path := range metrics {
		key := filepath.Base(path)
		file, err := ioutil.ReadFile(path)
		rawValue := strings.TrimSpace(string(file))
		if err != nil {
			return err
		}
		if key == "bypassed" {
			value := prettyToBytes(rawValue)
			fields[key] = value
		} else {
			value, _ := strconv.ParseUint(rawValue, 10, 64)
			fields[key] = value
		}
	}
	acc.AddFields("bcache", fields, tags)
	return nil
}
Esempio n. 4
0
// Gathers data from a particular server
// Parameters:
//     acc      : The telegraf Accumulator to use
//     serverURL: endpoint to send request to
//     service  : the service being queried
//
// Returns:
//     error: Any error that may have occurred
func (h *GrayLog) gatherServer(
	acc telegraf.Accumulator,
	serverURL string,
) error {
	resp, _, err := h.sendRequest(serverURL)
	if err != nil {
		return err
	}
	requestURL, err := url.Parse(serverURL)
	host, port, _ := net.SplitHostPort(requestURL.Host)
	var dat ResponseMetrics
	if err != nil {
		return err
	}
	if err := json.Unmarshal([]byte(resp), &dat); err != nil {
		return err
	}
	for _, m_item := range dat.Metrics {
		fields := make(map[string]interface{})
		tags := map[string]string{
			"server": host,
			"port":   port,
			"name":   m_item.Name,
			"type":   m_item.Type,
		}
		h.flatten(m_item.Fields, fields, "")
		acc.AddFields(m_item.FullName, fields, tags)
	}
	return nil
}
Esempio n. 5
0
func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error {
	collectDate := time.Now()
	var req, err = http.NewRequest("GET", url, nil)
	req.Header.Add("Accept", acceptHeader)
	var token []byte
	var resp *http.Response

	tlsCfg, err := internal.GetTLSConfig(
		p.SSLCert, p.SSLKey, p.SSLCA, p.InsecureSkipVerify)
	if err != nil {
		return err
	}

	var rt http.RoundTripper = &http.Transport{
		Dial: (&net.Dialer{
			Timeout:   5 * time.Second,
			KeepAlive: 30 * time.Second,
		}).Dial,
		TLSHandshakeTimeout:   5 * time.Second,
		TLSClientConfig:       tlsCfg,
		ResponseHeaderTimeout: time.Duration(3 * time.Second),
		DisableKeepAlives:     true,
	}

	if p.BearerToken != "" {
		token, err = ioutil.ReadFile(p.BearerToken)
		if err != nil {
			return err
		}
		req.Header.Set("Authorization", "Bearer "+string(token))
	}

	resp, err = rt.RoundTrip(req)
	if err != nil {
		return fmt.Errorf("error making HTTP request to %s: %s", url, err)
	}
	defer resp.Body.Close()
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("%s returned HTTP status %s", url, resp.Status)
	}

	body, err := ioutil.ReadAll(resp.Body)
	if err != nil {
		return fmt.Errorf("error reading body: %s", err)
	}

	metrics, err := Parse(body, resp.Header)
	if err != nil {
		return fmt.Errorf("error reading metrics for %s: %s",
			url, err)
	}
	// Add (or not) collected metrics
	for _, metric := range metrics {
		tags := metric.Tags()
		tags["url"] = url
		acc.AddFields(metric.Name(), metric.Fields(), tags, collectDate)
	}

	return nil
}
Esempio n. 6
0
func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error {
	lines := strings.Split(data, "\n")
	if len(lines) < 3 {
		return nil
	}
	mchain := chainNameRe.FindStringSubmatch(lines[0])
	if mchain == nil {
		return errParse
	}
	if !fieldsHeaderRe.MatchString(lines[1]) {
		return errParse
	}
	for _, line := range lines[2:] {
		mv := valuesRe.FindAllStringSubmatch(line, -1)
		// best effort : if line does not match or rule is not commented forget about it
		if len(mv) == 0 || len(mv[0]) != 5 || mv[0][4] == "" {
			continue
		}
		tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "ruleid": mv[0][4]}
		fields := make(map[string]interface{})
		// since parse error is already catched by the regexp,
		// we never enter ther error case here => no error check (but still need a test to cover the case)
		fields["pkts"], _ = strconv.ParseUint(mv[0][1], 10, 64)
		fields["bytes"], _ = strconv.ParseUint(mv[0][2], 10, 64)
		acc.AddFields(measurement, fields, tags)
	}
	return nil
}
Esempio n. 7
0
func (z *Zookeeper) gatherServer(address string, acc telegraf.Accumulator) error {
	var zookeeper_state string
	_, _, err := net.SplitHostPort(address)
	if err != nil {
		address = address + ":2181"
	}

	c, err := net.DialTimeout("tcp", address, defaultTimeout)
	if err != nil {
		fmt.Fprintln(os.Stderr, err)
		return err
	}
	defer c.Close()

	// Extend connection
	c.SetDeadline(time.Now().Add(defaultTimeout))

	fmt.Fprintf(c, "%s\n", "mntr")
	rdr := bufio.NewReader(c)
	scanner := bufio.NewScanner(rdr)

	service := strings.Split(address, ":")
	if len(service) != 2 {
		return fmt.Errorf("Invalid service address: %s", address)
	}

	fields := make(map[string]interface{})
	for scanner.Scan() {
		line := scanner.Text()

		re := regexp.MustCompile(`^zk_(\w+)\s+([\w\.\-]+)`)
		parts := re.FindStringSubmatch(string(line))

		if len(parts) != 3 {
			return fmt.Errorf("unexpected line in mntr response: %q", line)
		}

		measurement := strings.TrimPrefix(parts[1], "zk_")
		if measurement == "server_state" {
			zookeeper_state = parts[2]
		} else {
			sValue := string(parts[2])

			iVal, err := strconv.ParseInt(sValue, 10, 64)
			if err == nil {
				fields[measurement] = iVal
			} else {
				fields[measurement] = sValue
			}
		}
	}
	tags := map[string]string{
		"server": service[0],
		"port":   service[1],
		"state":  zookeeper_state,
	}
	acc.AddFields("zookeeper", fields, tags)

	return nil
}
Esempio n. 8
0
func (s *MemStats) Gather(acc telegraf.Accumulator) error {
	vm, err := s.ps.VMStat()
	if err != nil {
		return fmt.Errorf("error getting virtual memory info: %s", err)
	}

	fields := map[string]interface{}{
		"total":             vm.Total,
		"available":         vm.Available,
		"used":              vm.Used,
		"free":              vm.Free,
		"cached":            vm.Cached,
		"buffered":          vm.Buffers,
		"active":            vm.Active,
		"inactive":          vm.Inactive,
		"used_percent":      100 * float64(vm.Used) / float64(vm.Total),
		"available_percent": 100 * float64(vm.Available) / float64(vm.Total),
		"dirty":             vm.Dirty,
		"writeback":         vm.Writeback,
		"writebackTmp":      vm.WritebackTmp,
	}
	acc.AddFields("mem", fields, nil)

	return nil
}
Esempio n. 9
0
func (k *KernelVmstat) Gather(acc telegraf.Accumulator) error {
	data, err := k.getProcVmstat()
	if err != nil {
		return err
	}

	fields := make(map[string]interface{})

	dataFields := bytes.Fields(data)
	for i, field := range dataFields {

		// dataFields is an array of {"stat1_name", "stat1_value", "stat2_name",
		// "stat2_value", ...}
		// We only want the even number index as that contain the stat name.
		if i%2 == 0 {
			// Convert the stat value into an integer.
			m, err := strconv.Atoi(string(dataFields[i+1]))
			if err != nil {
				return err
			}

			fields[string(field)] = int64(m)
		}
	}

	acc.AddFields("kernel_vmstat", fields, map[string]string{})
	return nil
}
Esempio n. 10
0
// gatherBinaryLogs can be used to collect size and count of all binary files
// binlogs metric requires the MySQL server to turn it on in configuration
func (m *Mysql) gatherBinaryLogs(db *sql.DB, serv string, acc telegraf.Accumulator) error {
	// run query
	rows, err := db.Query(binaryLogsQuery)
	if err != nil {
		return err
	}
	defer rows.Close()

	// parse DSN and save host as a tag
	servtag := getDSNTag(serv)
	tags := map[string]string{"server": servtag}
	var (
		size     uint64 = 0
		count    uint64 = 0
		fileSize uint64
		fileName string
	)

	// iterate over rows and count the size and count of files
	for rows.Next() {
		if err := rows.Scan(&fileName, &fileSize); err != nil {
			return err
		}
		size += fileSize
		count++
	}
	fields := map[string]interface{}{
		"binary_size_bytes":  size,
		"binary_files_count": count,
	}
	acc.AddFields("mysql", fields, tags)
	return nil
}
Esempio n. 11
0
func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
	sockets, err := findSockets(c)
	if err != nil {
		return fmt.Errorf("failed to find sockets at path '%s': %v", c.SocketDir, err)
	}

	for _, s := range sockets {
		dump, err := perfDump(c.CephBinary, s)
		if err != nil {
			log.Printf("E! error reading from socket '%s': %v", s.socket, err)
			continue
		}
		data, err := parseDump(dump)
		if err != nil {
			log.Printf("E! error parsing dump from socket '%s': %v", s.socket, err)
			continue
		}
		for tag, metrics := range *data {
			acc.AddFields(measurement,
				map[string]interface{}(metrics),
				map[string]string{"type": s.sockType, "id": s.sockId, "collection": tag})
		}
	}
	return nil
}
Esempio n. 12
0
func clientStats(c ClientStats, acc telegraf.Accumulator, host, version, topic, channel string) {
	tags := map[string]string{
		"server_host":       host,
		"server_version":    version,
		"topic":             topic,
		"channel":           channel,
		"client_name":       c.Name,
		"client_id":         c.ID,
		"client_hostname":   c.Hostname,
		"client_version":    c.Version,
		"client_address":    c.RemoteAddress,
		"client_user_agent": c.UserAgent,
		"client_tls":        strconv.FormatBool(c.TLS),
		"client_snappy":     strconv.FormatBool(c.Snappy),
		"client_deflate":    strconv.FormatBool(c.Deflate),
	}

	fields := map[string]interface{}{
		"ready_count":    c.ReadyCount,
		"inflight_count": c.InFlightCount,
		"message_count":  c.MessageCount,
		"finish_count":   c.FinishCount,
		"requeue_count":  c.RequeueCount,
	}
	acc.AddFields("nsq_client", fields, tags)
}
Esempio n. 13
0
func (z *Zfs) Gather(acc telegraf.Accumulator) error {
	kstatMetrics := z.KstatMetrics
	if len(kstatMetrics) == 0 {
		kstatMetrics = []string{"arcstats", "zfetchstats", "vdev_cache_stats"}
	}

	tags := map[string]string{}
	poolNames, err := z.gatherPoolStats(acc)
	if err != nil {
		return err
	}
	tags["pools"] = poolNames

	fields := make(map[string]interface{})
	for _, metric := range kstatMetrics {
		stdout, err := z.sysctl(metric)
		if err != nil {
			return err
		}
		for _, line := range stdout {
			rawData := strings.Split(line, ": ")
			key := metric + "_" + strings.Split(rawData[0], ".")[4]
			value, _ := strconv.ParseInt(rawData[1], 10, 64)
			fields[key] = value
		}
	}
	acc.AddFields("zfs", fields, tags)
	return nil
}
Esempio n. 14
0
func gatherPoolStats(pool poolInfo, acc telegraf.Accumulator) error {
	lines, err := internal.ReadLines(pool.ioFilename)
	if err != nil {
		return err
	}

	if len(lines) != 3 {
		return err
	}

	keys := strings.Fields(lines[1])
	values := strings.Fields(lines[2])

	keyCount := len(keys)

	if keyCount != len(values) {
		return fmt.Errorf("Key and value count don't match Keys:%v Values:%v", keys, values)
	}

	tag := map[string]string{"pool": pool.name}
	fields := make(map[string]interface{})
	for i := 0; i < keyCount; i++ {
		value, err := strconv.ParseInt(values[i], 10, 64)
		if err != nil {
			return err
		}
		fields[keys[i]] = value
	}
	acc.AddFields("zfs_pool", fields, tag)

	return nil
}
Esempio n. 15
0
func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error {
	pgmap, ok := data["pgmap"].(map[string]interface{})
	if !ok {
		return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
	}
	fields := make(map[string]interface{})
	for key, value := range pgmap {
		switch value.(type) {
		case []interface{}:
			if key != "pgs_by_state" {
				continue
			}
			for _, state := range value.([]interface{}) {
				state_map, ok := state.(map[string]interface{})
				if !ok {
					return fmt.Errorf("WARNING %s - unable to decode pg state", measurement)
				}
				state_name, ok := state_map["state_name"].(string)
				if !ok {
					return fmt.Errorf("WARNING %s - unable to decode pg state name", measurement)
				}
				state_count, ok := state_map["count"].(float64)
				if !ok {
					return fmt.Errorf("WARNING %s - unable to decode pg state count", measurement)
				}
				fields[state_name] = state_count
			}
		}
	}
	acc.AddFields("ceph_pgmap_state", fields, map[string]string{})
	return nil
}
Esempio n. 16
0
func (e *Exec) Gather(acc telegraf.Accumulator) error {
	out, err := e.runner.Run(e)
	if err != nil {
		return err
	}

	switch e.DataFormat {
	case "", "json":
		var jsonOut interface{}
		err = json.Unmarshal(out, &jsonOut)
		if err != nil {
			return fmt.Errorf("exec: unable to parse output of '%s' as JSON, %s",
				e.Command, err)
		}

		f := internal.JSONFlattener{}
		err = f.FlattenJSON("", jsonOut)
		if err != nil {
			return err
		}
		acc.AddFields("exec", f.Fields, nil)
	case "influx":
		now := time.Now()
		metrics, err := telegraf.ParseMetrics(out)
		for _, metric := range metrics {
			acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), now)
		}
		return err
	default:
		return fmt.Errorf("Unsupported data format: %s. Must be either json "+
			"or influx.", e.DataFormat)
	}
	return nil
}
Esempio n. 17
0
func buildNodeMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) {
	tags := map[string]string{
		"node_name": summaryMetrics.Node.NodeName,
	}
	fields := make(map[string]interface{})
	fields["cpu_usage_nanocores"] = summaryMetrics.Node.CPU.UsageNanoCores
	fields["cpu_usage_core_nanoseconds"] = summaryMetrics.Node.CPU.UsageCoreNanoSeconds
	fields["memory_available_bytes"] = summaryMetrics.Node.Memory.AvailableBytes
	fields["memory_usage_bytes"] = summaryMetrics.Node.Memory.UsageBytes
	fields["memory_working_set_bytes"] = summaryMetrics.Node.Memory.WorkingSetBytes
	fields["memory_rss_bytes"] = summaryMetrics.Node.Memory.RSSBytes
	fields["memory_page_faults"] = summaryMetrics.Node.Memory.PageFaults
	fields["memory_major_page_faults"] = summaryMetrics.Node.Memory.MajorPageFaults
	fields["network_rx_bytes"] = summaryMetrics.Node.Network.RXBytes
	fields["network_rx_errors"] = summaryMetrics.Node.Network.RXErrors
	fields["network_tx_bytes"] = summaryMetrics.Node.Network.TXBytes
	fields["network_tx_errors"] = summaryMetrics.Node.Network.TXErrors
	fields["fs_available_bytes"] = summaryMetrics.Node.FileSystem.AvailableBytes
	fields["fs_capacity_bytes"] = summaryMetrics.Node.FileSystem.CapacityBytes
	fields["fs_used_bytes"] = summaryMetrics.Node.FileSystem.UsedBytes
	fields["runtime_image_fs_available_bytes"] = summaryMetrics.Node.Runtime.ImageFileSystem.AvailableBytes
	fields["runtime_image_fs_capacity_bytes"] = summaryMetrics.Node.Runtime.ImageFileSystem.CapacityBytes
	fields["runtime_image_fs_used_bytes"] = summaryMetrics.Node.Runtime.ImageFileSystem.UsedBytes
	acc.AddFields("kubernetes_node", fields, tags)
}
Esempio n. 18
0
func (g *CGroup) gatherDir(dir string, acc telegraf.Accumulator) error {
	fields := make(map[string]interface{})

	list := make(chan pathInfo)
	go g.generateFiles(dir, list)

	for file := range list {
		if file.err != nil {
			return file.err
		}

		raw, err := ioutil.ReadFile(file.path)
		if err != nil {
			return err
		}
		if len(raw) == 0 {
			continue
		}

		fd := fileData{data: raw, path: file.path}
		if err := fd.parse(fields); err != nil {
			return err
		}
	}
	fields["path"] = dir

	acc.AddFields(metricName, fields, nil)

	return nil
}
Esempio n. 19
0
func (_ *SystemStats) Gather(acc telegraf.Accumulator) error {
	loadavg, err := load.Avg()
	if err != nil {
		return err
	}

	hostinfo, err := host.Info()
	if err != nil {
		return err
	}

	users, err := host.Users()
	if err != nil {
		return err
	}

	fields := map[string]interface{}{
		"load1":         loadavg.Load1,
		"load5":         loadavg.Load5,
		"load15":        loadavg.Load15,
		"uptime":        hostinfo.Uptime,
		"n_users":       len(users),
		"uptime_format": format_uptime(hostinfo.Uptime),
		"n_cpus":        runtime.NumCPU(),
	}
	acc.AddFields("system", fields, nil)

	return nil
}
Esempio n. 20
0
func (p *Processes) Gather(acc telegraf.Accumulator) error {
	// Get an empty map of metric fields
	fields := getEmptyFields()

	// Decide if we will use 'ps' to get stats (use procfs otherwise)
	usePS := true
	if runtime.GOOS == "linux" {
		usePS = false
	}
	if p.forcePS {
		usePS = true
	} else if p.forceProc {
		usePS = false
	}

	// Gather stats from 'ps' or procfs
	if usePS {
		if err := p.gatherFromPS(fields); err != nil {
			return err
		}
	} else {
		if err := p.gatherFromProc(fields); err != nil {
			return err
		}
	}

	acc.AddFields("processes", fields, nil)
	return nil
}
Esempio n. 21
0
// gatherInfoSchemaAutoIncStatuses can be used to get auto incremented values of the column
func (m *Mysql) gatherInfoSchemaAutoIncStatuses(db *sql.DB, serv string, acc telegraf.Accumulator) error {
	rows, err := db.Query(infoSchemaAutoIncQuery)
	if err != nil {
		return err
	}
	defer rows.Close()

	var (
		schema, table, column string
		incValue, maxInt      uint64
	)

	servtag, err := parseDSN(serv)
	if err != nil {
		servtag = "localhost"
	}

	for rows.Next() {
		if err := rows.Scan(&schema, &table, &column, &incValue, &maxInt); err != nil {
			return err
		}
		tags := map[string]string{
			"server": servtag,
			"schema": schema,
			"table":  table,
			"column": column,
		}
		fields := make(map[string]interface{})
		fields["auto_increment_column"] = incValue
		fields["auto_increment_column_max"] = maxInt

		acc.AddFields("mysql_info_schema", fields, tags)
	}
	return nil
}
Esempio n. 22
0
// gatherPerfEventWaits can be used to get total time and number of event waits
func (m *Mysql) gatherPerfEventWaits(db *sql.DB, serv string, acc telegraf.Accumulator) error {
	rows, err := db.Query(perfEventWaitsQuery)
	if err != nil {
		return err
	}
	defer rows.Close()

	var (
		event               string
		starCount, timeWait float64
	)

	servtag, err := parseDSN(serv)
	if err != nil {
		servtag = "localhost"
	}
	tags := map[string]string{
		"server": servtag,
	}
	for rows.Next() {
		if err := rows.Scan(&event, &starCount, &timeWait); err != nil {
			return err
		}
		tags["event_name"] = event
		fields := map[string]interface{}{
			"events_waits_total":         starCount,
			"events_waits_seconds_total": timeWait / picoSeconds,
		}

		acc.AddFields("mysql_perf_schema", fields, tags)
	}
	return nil
}
Esempio n. 23
0
func gatherOverview(r *RabbitMQ, acc telegraf.Accumulator, errChan chan error) {
	overview := &OverviewResponse{}

	err := r.requestJSON("/api/overview", &overview)
	if err != nil {
		errChan <- err
		return
	}

	if overview.QueueTotals == nil || overview.ObjectTotals == nil || overview.MessageStats == nil {
		errChan <- fmt.Errorf("Wrong answer from rabbitmq. Probably auth issue")
		return
	}

	tags := map[string]string{"url": r.URL}
	if r.Name != "" {
		tags["name"] = r.Name
	}
	fields := map[string]interface{}{
		"messages":           overview.QueueTotals.Messages,
		"messages_ready":     overview.QueueTotals.MessagesReady,
		"messages_unacked":   overview.QueueTotals.MessagesUnacknowledged,
		"channels":           overview.ObjectTotals.Channels,
		"connections":        overview.ObjectTotals.Connections,
		"consumers":          overview.ObjectTotals.Consumers,
		"exchanges":          overview.ObjectTotals.Exchanges,
		"queues":             overview.ObjectTotals.Queues,
		"messages_acked":     overview.MessageStats.Ack,
		"messages_delivered": overview.MessageStats.Deliver,
		"messages_published": overview.MessageStats.Publish,
	}
	acc.AddFields("rabbitmq_overview", fields, tags)

	errChan <- nil
}
Esempio n. 24
0
// Gather gets all metric fields and tags and returns any errors it encounters
func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error {
	// Set default values
	if h.ResponseTimeout < 1 {
		h.ResponseTimeout = 5
	}
	// Check send and expected string
	if h.Method == "" {
		h.Method = "GET"
	}
	if h.Address == "" {
		h.Address = "http://localhost"
	}
	addr, err := url.Parse(h.Address)
	if err != nil {
		return err
	}
	if addr.Scheme != "http" && addr.Scheme != "https" {
		return errors.New("Only http and https are supported")
	}
	// Prepare data
	tags := map[string]string{"server": h.Address, "method": h.Method}
	var fields map[string]interface{}
	// Gather data
	fields, err = h.HTTPGather()
	if err != nil {
		return err
	}
	// Add metrics
	acc.AddFields("http_response", fields, tags)
	return nil
}
Esempio n. 25
0
func readAerospikeStats(
	stats map[string]string,
	acc telegraf.Accumulator,
	host string,
	namespace string,
) {
	fields := make(map[string]interface{})
	tags := map[string]string{
		"aerospike_host": host,
		"namespace":      "_service",
	}

	if namespace != "" {
		tags["namespace"] = namespace
	}
	for key, value := range stats {
		// We are going to ignore all string based keys
		val, err := strconv.ParseInt(value, 10, 64)
		if err == nil {
			if strings.Contains(key, "-") {
				key = strings.Replace(key, "-", "_", -1)
			}
			fields[key] = val
		}
	}
	acc.AddFields("aerospike", fields, tags)
}
Esempio n. 26
0
func (h *HDDTemp) Gather(acc telegraf.Accumulator) error {
	disks, err := gohddtemp.Fetch(h.Address)

	if err != nil {
		return err
	}

	for _, disk := range disks {
		for _, chosenDevice := range h.Devices {
			if chosenDevice == "*" || chosenDevice == disk.DeviceName {
				tags := map[string]string{
					"device": disk.DeviceName,
					"model":  disk.Model,
					"unit":   disk.Unit,
					"status": disk.Status,
				}

				fields := map[string]interface{}{
					disk.DeviceName: disk.Temperature,
				}

				acc.AddFields("hddtemp", fields, tags)
			}
		}
	}

	return nil
}
Esempio n. 27
0
// Process pool data in Twemproxy stats
func (t *Twemproxy) processPool(
	acc telegraf.Accumulator,
	tags map[string]string,
	data map[string]interface{},
) {
	serverTags := make(map[string]map[string]string)

	fields := make(map[string]interface{})
	for key, value := range data {
		switch key {
		case "client_connections", "forward_error", "client_err", "server_ejects", "fragments", "client_eof":
			if val, ok := value.(float64); ok {
				fields[key] = val
			}
		default:
			if data, ok := value.(map[string]interface{}); ok {
				if _, ok := serverTags[key]; !ok {
					serverTags[key] = copyTags(tags)
					serverTags[key]["server"] = key
				}
				t.processServer(acc, serverTags[key], data)
			}
		}
	}
	acc.AddFields("twemproxy_pool", fields, tags)
}
Esempio n. 28
0
// Process Twemproxy server stats
func (t *Twemproxy) processStat(
	acc telegraf.Accumulator,
	tags map[string]string,
	data map[string]interface{},
) {
	if source, ok := data["source"]; ok {
		if val, ok := source.(string); ok {
			tags["source"] = val
		}
	}

	fields := make(map[string]interface{})
	metrics := []string{"total_connections", "curr_connections", "timestamp"}
	for _, m := range metrics {
		if value, ok := data[m]; ok {
			if val, ok := value.(float64); ok {
				fields[m] = val
			}
		}
	}
	acc.AddFields("twemproxy", fields, tags)

	for _, pool := range t.Pools {
		if poolStat, ok := data[pool]; ok {
			if data, ok := poolStat.(map[string]interface{}); ok {
				poolTags := copyTags(tags)
				poolTags["pool"] = pool
				t.processPool(acc, poolTags, data)
			}
		}
	}
}
Esempio n. 29
0
func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host string) error {

	response, error := http.Get(host)
	if error != nil {
		return error
	}
	defer response.Body.Close()

	var stats Stats
	decoder := json.NewDecoder(response.Body)
	decoder.Decode(&stats)

	fields := map[string]interface{}{}

	// CouchDB meta stats:
	c.MapCopy(fields, c.generateFields("couchdb_auth_cache_misses", stats.Couchdb.AuthCacheMisses))
	c.MapCopy(fields, c.generateFields("couchdb_database_writes", stats.Couchdb.DatabaseWrites))
	c.MapCopy(fields, c.generateFields("couchdb_open_databases", stats.Couchdb.OpenDatabases))
	c.MapCopy(fields, c.generateFields("couchdb_auth_cache_hits", stats.Couchdb.AuthCacheHits))
	c.MapCopy(fields, c.generateFields("couchdb_request_time", stats.Couchdb.RequestTime))
	c.MapCopy(fields, c.generateFields("couchdb_database_reads", stats.Couchdb.DatabaseReads))
	c.MapCopy(fields, c.generateFields("couchdb_open_os_files", stats.Couchdb.OpenOsFiles))

	// http request methods stats:
	c.MapCopy(fields, c.generateFields("httpd_request_methods_put", stats.HttpdRequestMethods.Put))
	c.MapCopy(fields, c.generateFields("httpd_request_methods_get", stats.HttpdRequestMethods.Get))
	c.MapCopy(fields, c.generateFields("httpd_request_methods_copy", stats.HttpdRequestMethods.Copy))
	c.MapCopy(fields, c.generateFields("httpd_request_methods_delete", stats.HttpdRequestMethods.Delete))
	c.MapCopy(fields, c.generateFields("httpd_request_methods_post", stats.HttpdRequestMethods.Post))
	c.MapCopy(fields, c.generateFields("httpd_request_methods_head", stats.HttpdRequestMethods.Head))

	// status code stats:
	c.MapCopy(fields, c.generateFields("httpd_status_codes_200", stats.HttpdStatusCodes.Status200))
	c.MapCopy(fields, c.generateFields("httpd_status_codes_201", stats.HttpdStatusCodes.Status201))
	c.MapCopy(fields, c.generateFields("httpd_status_codes_202", stats.HttpdStatusCodes.Status202))
	c.MapCopy(fields, c.generateFields("httpd_status_codes_301", stats.HttpdStatusCodes.Status301))
	c.MapCopy(fields, c.generateFields("httpd_status_codes_304", stats.HttpdStatusCodes.Status304))
	c.MapCopy(fields, c.generateFields("httpd_status_codes_400", stats.HttpdStatusCodes.Status400))
	c.MapCopy(fields, c.generateFields("httpd_status_codes_401", stats.HttpdStatusCodes.Status401))
	c.MapCopy(fields, c.generateFields("httpd_status_codes_403", stats.HttpdStatusCodes.Status403))
	c.MapCopy(fields, c.generateFields("httpd_status_codes_404", stats.HttpdStatusCodes.Status404))
	c.MapCopy(fields, c.generateFields("httpd_status_codes_405", stats.HttpdStatusCodes.Status405))
	c.MapCopy(fields, c.generateFields("httpd_status_codes_409", stats.HttpdStatusCodes.Status409))
	c.MapCopy(fields, c.generateFields("httpd_status_codes_412", stats.HttpdStatusCodes.Status412))
	c.MapCopy(fields, c.generateFields("httpd_status_codes_500", stats.HttpdStatusCodes.Status500))

	// httpd stats:
	c.MapCopy(fields, c.generateFields("httpd_clients_requesting_changes", stats.Httpd.ClientsRequestingChanges))
	c.MapCopy(fields, c.generateFields("httpd_temporary_view_reads", stats.Httpd.TemporaryViewReads))
	c.MapCopy(fields, c.generateFields("httpd_requests", stats.Httpd.Requests))
	c.MapCopy(fields, c.generateFields("httpd_bulk_requests", stats.Httpd.BulkRequests))
	c.MapCopy(fields, c.generateFields("httpd_view_reads", stats.Httpd.ViewReads))

	tags := map[string]string{
		"server": host,
	}
	accumulator.AddFields("couchdb", fields, tags)
	return nil
}
Esempio n. 30
0
func (rb *RollbarWebhooks) Gather(acc telegraf.Accumulator) error {
	rb.Lock()
	defer rb.Unlock()
	for _, event := range rb.events {
		acc.AddFields("rollbar_webhooks", event.Fields(), event.Tags(), time.Now())
	}
	rb.events = make([]Event, 0)
	return nil
}