Exemplo n.º 1
0
func fastlyReflectAdd(md *opentsdb.MultiDataPoint, prefix, suffix string, st interface{}, timeStamp int64, ts opentsdb.TagSet) {
	t := reflect.TypeOf(st)
	valueOf := reflect.ValueOf(st)
	for i := 0; i < t.NumField(); i++ {
		field := t.Field(i)
		value := valueOf.Field(i).Interface()
		var (
			jsonTag   = field.Tag.Get("json")
			metricTag = field.Tag.Get("metric")
			rateTag   = field.Tag.Get("rate")
			unitTag   = field.Tag.Get("unit")
			divTag    = field.Tag.Get("div")
			descTag   = field.Tag.Get("desc")
			exclude   = field.Tag.Get("exclude") != ""
		)
		if exclude || descTag == "" {
			continue
		}
		metricName := jsonTag
		if metricTag != "" {
			metricName = metricTag
		}
		if metricName == "" {
			slog.Errorf("Unable to determine metric name for field %s. Skipping.", field.Name)
			continue
		}
		shouldDiv := divTag != ""
		if shouldDiv {
			descTag = fmt.Sprintf("%v %v", descTag, fastlyDivDesc)
		}
		fullMetric := fmt.Sprintf("%v.%v%v", prefix, metricName, suffix)
		switch value := value.(type) {
		case int64, float64:
			var v float64
			if f, found := value.(float64); found {
				v = f
			} else {
				v = float64(value.(int64))
			}
			if shouldDiv {
				v /= 60.0
			}
			AddTS(md, fullMetric, timeStamp, v, ts, metadata.RateType(rateTag), metadata.Unit(unitTag), descTag)
		case string:
			// Floats in strings, I know not why, precision perhaps?
			// err ignored since we expect non number strings in the struct
			if f, err := strconv.ParseFloat(value, 64); err != nil {
				if shouldDiv {
					f /= 60.0
				}
				AddTS(md, fullMetric, timeStamp, f, ts, metadata.RateType(rateTag), metadata.Unit(unitTag), descTag)
			}
		default:
			// Pass since there is no need to recurse
		}
	}
}
Exemplo n.º 2
0
// structProcessor.add() takes in a metric name prefix, an arbitrary struct, and a tagset.
// The processor recurses through the struct and builds metrics. The field tags direct how
// the field should be processed, as well as the metadata for the resulting metric.
//
// The field tags used are described as follows:
//
// version: typically set to '1' or '2'.
//	This is compared against the elastic cluster version. If the version from the tag
//      does not match the version in production, the metric will not be sent for this field.
//
// exclude:
//      If this tag is set to 'true', a metric will not be sent for this field.
//
// rate: one of 'gauge', 'counter', 'rate'
//	This tag dictates the metadata.RateType we send.
//
// unit: 'bytes', 'pages', etc
//	This tag dictates the metadata.Unit we send.
//
// metric:
//      This is the metric name which will be sent. If not present, the 'json'
//      tag is sent as the metric name.
//
// Special handling:
//
// Metrics having the json tag suffix of 'in_milliseconds' are automagically
// divided by 1000 and sent as seconds. The suffix is stripped from the name.
//
// Metrics having the json tag suffix of 'in_bytes' are automatically sent as
// gauge bytes. The suffix is stripped from the metric name.
func (s *structProcessor) add(prefix string, st interface{}, ts opentsdb.TagSet) {
	t := reflect.TypeOf(st)
	valueOf := reflect.ValueOf(st)
	for i := 0; i < t.NumField(); i++ {
		field := t.Field(i)
		value := valueOf.Field(i).Interface()
		if field.Tag.Get("exclude") == "true" {
			continue
		}
		var (
			jsonTag    = field.Tag.Get("json")
			metricTag  = field.Tag.Get("metric")
			versionTag = field.Tag.Get("version")
			rateTag    = field.Tag.Get("rate")
			unitTag    = field.Tag.Get("unit")
		)
		metricName := jsonTag
		if metricTag != "" {
			metricName = metricTag
		}
		if metricName == "" {
			slog.Errorf("Unable to determine metric name for field %s. Skipping.", field.Name)
			continue
		}
		if versionTag == "" || strings.HasPrefix(s.elasticVersion, versionTag) {
			switch value := value.(type) {
			case int, float64: // Number types in our structs are only ints and float64s.
				// Turn all millisecond metrics into seconds
				if strings.HasSuffix(metricName, "_in_millis") {
					switch value.(type) {
					case int:
						value = float64(value.(int)) / 1000
					case float64:
						value = value.(float64) / 1000
					}
					unitTag = "seconds"
					metricName = strings.TrimSuffix(metricName, "_in_millis")
				}
				// Set rate and unit for all "_in_bytes" metrics, and strip the "_in_bytes"
				if strings.HasSuffix(metricName, "_in_bytes") {
					if rateTag == "" {
						rateTag = "gauge"
					}
					unitTag = "bytes"
					metricName = strings.TrimSuffix(metricName, "_in_bytes")
				}
				Add(s.md, prefix+"."+metricName, value, ts, metadata.RateType(rateTag), metadata.Unit(unitTag), field.Tag.Get("desc"))
			case string:
				// The json data has a lot of strings, and we don't care about em.
			default:
				// If we hit another struct, recurse
				if reflect.ValueOf(value).Kind() == reflect.Struct {
					s.add(prefix+"."+metricName, value, ts)
				} else {
					slog.Errorf("Field %s for metric %s is non-numeric type. Cannot record as a metric.\n", field.Name, prefix+"."+metricName)
				}
			}
		}
	}
}
Exemplo n.º 3
0
func c_fastly_billing(c fastlyClient) (opentsdb.MultiDataPoint, error) {
	var md opentsdb.MultiDataPoint
	now := time.Now().UTC()
	year := now.Format("2006")
	month := now.Format("01")
	b, err := c.GetBilling(year, month)
	if err != nil {
		return md, err
	}
	Add(&md, fastlyBillingPrefix+"bandwidth", b.Total.Bandwidth, nil, metadata.Gauge, metadata.Unit(b.Total.BandwidthUnits), fastlyBillingBandwidthDesc)
	Add(&md, fastlyBillingPrefix+"bandwidth_cost", b.Total.BandwidthCost, nil, metadata.Gauge, metadata.USD, fastlyBillingBandwidthCostDesc)
	Add(&md, fastlyBillingPrefix+"requests", b.Total.Requests, nil, metadata.Gauge, metadata.Request, fastlyBillingRequestsDesc)
	Add(&md, fastlyBillingPrefix+"requests_cost", b.Total.RequestsCost, nil, metadata.Gauge, metadata.USD, fastlyBillingRequestsCostDesc)
	Add(&md, fastlyBillingPrefix+"incurred_cost", b.Total.IncurredCost, nil, metadata.Gauge, metadata.USD, fastlyBillingIncurredCostDesc)
	Add(&md, fastlyBillingPrefix+"overage", b.Total.Overage, nil, metadata.Gauge, metadata.Unit("unknown"), fastlyBillingOverageDesc)
	Add(&md, fastlyBillingPrefix+"extras_cost", b.Total.ExtrasCost, nil, metadata.Gauge, metadata.USD, fastlyBillingExtrasCostDesc)
	Add(&md, fastlyBillingPrefix+"cost_before_discount", b.Total.CostBeforeDiscount, nil, metadata.Gauge, metadata.USD, fastlyBillingBeforeDiscountDesc)
	Add(&md, fastlyBillingPrefix+"discount", b.Total.Discount, nil, metadata.Gauge, metadata.Pct, fastlyBillingDiscountDesc)
	Add(&md, fastlyBillingPrefix+"cost", b.Total.Cost, nil, metadata.Gauge, metadata.USD, fastlyBillingCostDesc)

	return md, nil
}
Exemplo n.º 4
0
func c_windows_processes() (opentsdb.MultiDataPoint, error) {
	var dst []Win32_PerfRawData_PerfProc_Process
	var q = wmi.CreateQuery(&dst, `WHERE Name <> '_Total'`)
	err := queryWmi(q, &dst)
	if err != nil {
		return nil, err
	}

	var svc_dst []Win32_Service
	var svc_q = wmi.CreateQuery(&svc_dst, "")
	err = queryWmi(svc_q, &svc_dst)
	if err != nil {
		return nil, err
	}

	var iis_dst []WorkerProcess
	iis_q := wmi.CreateQuery(&iis_dst, "")
	err = queryWmiNamespace(iis_q, &iis_dst, "root\\WebAdministration")
	if err != nil {
		// Don't return from this error since the name space might exist.
		iis_dst = nil
	}

	var numberOfLogicalProcessors uint64
	var core_dst []Win32_ComputerSystem
	var core_q = wmi.CreateQuery(&core_dst, "")
	err = queryWmi(core_q, &core_dst)
	if err != nil {
		return nil, err
	}
	for _, y := range core_dst {
		numberOfLogicalProcessors = uint64(y.NumberOfLogicalProcessors)
	}
	if numberOfLogicalProcessors == 0 {
		return nil, fmt.Errorf("invalid result: numberOfLogicalProcessors=%v", numberOfLogicalProcessors)
	}

	var md opentsdb.MultiDataPoint
	var svc_dst_started []Win32_Service
	for _, svc := range svc_dst {
		if util.NameMatches(svc.Name, regexesProcesses) {
			if svc.Started {
				svc_dst_started = append(svc_dst_started, svc)
			}
			tags := opentsdb.TagSet{"name": svc.Name}
			Add(&md, "win.service.started", util.Btoi(svc.Started), tags, metadata.Gauge, metadata.Bool, descWinServiceStarted)
			Add(&md, "win.service.status", util.Btoi(svc.Status != "OK"), tags, metadata.Gauge, metadata.Ok, descWinServiceStatus)
			Add(&md, "win.service.checkpoint", svc.CheckPoint, tags, metadata.Gauge, metadata.None, descWinServiceCheckPoint)
			Add(&md, "win.service.wait_hint", svc.WaitHint, tags, metadata.Gauge, metadata.MilliSecond, descWinServiceWaitHint)
			Add(&md, osServiceRunning, util.Btoi(svc.Started), tags, metadata.Gauge, metadata.Bool, osServiceRunningDesc)
		}
	}

	totalCPUByName := make(map[string]uint64)
	totalVirtualMemByName := make(map[string]uint64)
	totalPrivateWSMemByName := make(map[string]uint64)
	countByName := make(map[string]int)

	for _, v := range dst {
		var name string
		service_match := false
		iis_match := false

		process_match := util.NameMatches(v.Name, regexesProcesses)

		id := "0"

		if process_match {
			raw_name := strings.Split(v.Name, "#")
			name = raw_name[0]
			if len(raw_name) == 2 {
				id = raw_name[1]
			}
			// If you have a hash sign in your process name you don't deserve monitoring ;-)
			if len(raw_name) > 2 {
				continue
			}
		}

		// A Service match could "overwrite" a process match, but that is probably what we would want
		for _, svc := range svc_dst_started {
			// It is possible the pid has gone and been reused, but I think this unlikely
			// And I'm not aware of an atomic join we could do anyways
			if svc.ProcessId != 0 && svc.ProcessId == v.IDProcess {
				id = "0"
				service_match = true
				name = svc.Name
				break
			}
		}

		for _, a_pool := range iis_dst {
			if a_pool.ProcessId == v.IDProcess {
				id = "0"
				iis_match = true
				name = strings.Join([]string{"iis", a_pool.AppPoolName}, "_")
				break
			}
		}

		if v.IDProcess == uint32(os.Getpid()) {
			TotalScollectorMemoryMB = v.WorkingSetPrivate / 1024 / 1024
		}

		if !(service_match || process_match || iis_match) {
			continue
		}

		//Use timestamp from WMI to fix issues with CPU metrics
		ts := TSys100NStoEpoch(v.Timestamp_Sys100NS)
		tags := opentsdb.TagSet{"name": name, "id": id}
		AddTS(&md, "win.proc.cpu", ts, v.PercentPrivilegedTime/NS100_Seconds/numberOfLogicalProcessors, opentsdb.TagSet{"type": "privileged"}.Merge(tags), metadata.Counter, metadata.Pct, descWinProcCPU_priv)
		AddTS(&md, "win.proc.cpu", ts, v.PercentUserTime/NS100_Seconds/numberOfLogicalProcessors, opentsdb.TagSet{"type": "user"}.Merge(tags), metadata.Counter, metadata.Pct, descWinProcCPU_user)
		totalCPUByName[name] += v.PercentUserTime / NS100_Seconds / numberOfLogicalProcessors
		AddTS(&md, "win.proc.cpu_total", ts, v.PercentProcessorTime/NS100_Seconds/numberOfLogicalProcessors, tags, metadata.Counter, metadata.Pct, descWinProcCPU_total)
		if v.Frequency_Object != 0 {
			Add(&md, "win.proc.elapsed_time", (v.Timestamp_Object-v.ElapsedTime)/v.Frequency_Object, tags, metadata.Gauge, metadata.Second, descWinProcElapsed_time)
		}
		Add(&md, "win.proc.handle_count", v.HandleCount, tags, metadata.Gauge, metadata.Count, descWinProcHandle_count)
		Add(&md, "win.proc.io_bytes", v.IOOtherBytesPersec, opentsdb.TagSet{"type": "other"}.Merge(tags), metadata.Counter, metadata.BytesPerSecond, descWinProcIo_bytes_other)
		Add(&md, "win.proc.io_bytes", v.IOReadBytesPersec, opentsdb.TagSet{"type": "read"}.Merge(tags), metadata.Counter, metadata.BytesPerSecond, descWinProcIo_bytes_read)
		Add(&md, "win.proc.io_bytes", v.IOWriteBytesPersec, opentsdb.TagSet{"type": "write"}.Merge(tags), metadata.Counter, metadata.BytesPerSecond, descWinProcIo_bytes_write)
		Add(&md, "win.proc.io_operations", v.IOOtherOperationsPersec, opentsdb.TagSet{"type": "other"}.Merge(tags), metadata.Counter, metadata.Operation, descWinProcIo_operations)
		Add(&md, "win.proc.io_operations", v.IOReadOperationsPersec, opentsdb.TagSet{"type": "read"}.Merge(tags), metadata.Counter, metadata.Operation, descWinProcIo_operations_read)
		Add(&md, "win.proc.io_operations", v.IOWriteOperationsPersec, opentsdb.TagSet{"type": "write"}.Merge(tags), metadata.Counter, metadata.Operation, descWinProcIo_operations_write)
		Add(&md, "win.proc.mem.page_faults", v.PageFaultsPersec, tags, metadata.Counter, metadata.PerSecond, descWinProcMemPage_faults)
		Add(&md, "win.proc.mem.pagefile_bytes", v.PageFileBytes, tags, metadata.Gauge, metadata.Bytes, descWinProcMemPagefile_bytes)
		Add(&md, "win.proc.mem.pagefile_bytes_peak", v.PageFileBytesPeak, tags, metadata.Gauge, metadata.Bytes, descWinProcMemPagefile_bytes_peak)
		Add(&md, "win.proc.mem.pool_nonpaged_bytes", v.PoolNonpagedBytes, tags, metadata.Gauge, metadata.Bytes, descWinProcMemPool_nonpaged_bytes)
		Add(&md, "win.proc.mem.pool_paged_bytes", v.PoolPagedBytes, tags, metadata.Gauge, metadata.Bytes, descWinProcMemPool_paged_bytes)
		Add(&md, "win.proc.mem.vm.bytes", v.VirtualBytes, tags, metadata.Gauge, metadata.Bytes, descWinProcMemVmBytes)
		totalVirtualMemByName[name] += v.VirtualBytes
		Add(&md, "win.proc.mem.vm.bytes_peak", v.VirtualBytesPeak, tags, metadata.Gauge, metadata.Bytes, descWinProcMemVmBytes_peak)
		Add(&md, "win.proc.mem.working_set", v.WorkingSet, tags, metadata.Gauge, metadata.Bytes, descWinProcMemWorking_set)
		Add(&md, "win.proc.mem.working_set_peak", v.WorkingSetPeak, tags, metadata.Gauge, metadata.Bytes, descWinProcMemWorking_set_peak)
		Add(&md, "win.proc.mem.working_set_private", v.WorkingSetPrivate, tags, metadata.Gauge, metadata.Bytes, descWinProcMemWorking_set_private)
		totalPrivateWSMemByName[name] += v.WorkingSetPrivate
		Add(&md, "win.proc.priority_base", v.PriorityBase, tags, metadata.Gauge, metadata.None, descWinProcPriority_base)
		Add(&md, "win.proc.private_bytes", v.PrivateBytes, tags, metadata.Gauge, metadata.Bytes, descWinProcPrivate_bytes)
		Add(&md, "win.proc.thread_count", v.ThreadCount, tags, metadata.Gauge, metadata.Count, descWinProcthread_count)
		Add(&md, "win.proc.pid", v.IDProcess, tags, metadata.Gauge, metadata.Unit("PID"), osProcPID)
		countByName[name]++
	}
	for name, count := range countByName {
		if count < 1 {
			continue
		}
		Add(&md, osProcCount, count, opentsdb.TagSet{"name": name}, metadata.Gauge, metadata.Process, osProcCountDesc)
		if totalCPU, ok := totalCPUByName[name]; ok {
			Add(&md, osProcCPU, totalCPU, opentsdb.TagSet{"name": name}, metadata.Counter, metadata.Pct, osProcCPUDesc)
		}
		if totalVM, ok := totalVirtualMemByName[name]; ok {
			Add(&md, osProcMemVirtual, totalVM, opentsdb.TagSet{"name": name}, metadata.Gauge, metadata.Bytes, osProcMemVirtualDesc)
		}
		if totalPWS, ok := totalPrivateWSMemByName[name]; ok {
			Add(&md, osProcMemReal, totalPWS, opentsdb.TagSet{"name": name}, metadata.Gauge, metadata.Bytes, osProcMemRealDesc)
		}
	}
	return md, nil
}
Exemplo n.º 5
0
func c_elasticsearch(collectIndices bool) (opentsdb.MultiDataPoint, error) {
	var status ElasticStatus
	if err := esReq("/", "", &status); err != nil {
		return nil, err
	}
	var clusterStats ElasticClusterStats
	if err := esReq(esStatsURL(status.Version.Number), "", &clusterStats); err != nil {
		return nil, err
	}
	var clusterState ElasticClusterState
	if err := esReq("/_cluster/state/master_node", "", &clusterState); err != nil {
		return nil, err
	}
	var clusterHealth ElasticHealth
	if err := esReq("/_cluster/health", "level=indices", &clusterHealth); err != nil {
		return nil, err
	}
	var indexStats ElasticIndexStats
	if err := esReq("/_stats", "", &indexStats); err != nil {
		return nil, err
	}
	var md opentsdb.MultiDataPoint
	s := structProcessor{elasticVersion: status.Version.Number, md: &md}
	ts := opentsdb.TagSet{"cluster": clusterStats.ClusterName}
	isMaster := false
	// As we're pulling _local stats here, this will only process 1 node.
	for nodeID, nodeStats := range clusterStats.Nodes {
		isMaster = nodeID == clusterState.MasterNode
		if isMaster {
			s.add("elastic.health.cluster", clusterHealth, nil)
			if statusCode, ok := elasticStatusMap[clusterHealth.Status]; ok {
				Add(&md, "elastic.health.cluster.status", statusCode, ts, metadata.Gauge, metadata.StatusCode, "The current status of the cluster. Zero for green, one for yellow, two for red.")
			}
			indexStatusCount := map[string]int{
				"green":  0,
				"yellow": 0,
				"red":    0,
			}
			for _, index := range clusterHealth.Indices {
				indexStatusCount[index.Status] += 1
			}
			for status, count := range indexStatusCount {
				Add(&md, "elastic.health.cluster.index_status_count", count, opentsdb.TagSet{"status": status}.Merge(ts), metadata.Gauge, metadata.Unit("indices"), "Index counts by status.")
			}
		}
		s.add("elastic", nodeStats, ts)
		// These are index stats in aggregate for this node.
		s.add("elastic.indices.local", nodeStats.Indices, ts)
		s.add("elastic.jvm.gc", nodeStats.JVM.GC.Collectors.Old, opentsdb.TagSet{"gc": "old"}.Merge(ts))
		s.add("elastic.jvm.gc", nodeStats.JVM.GC.Collectors.Young, opentsdb.TagSet{"gc": "young"}.Merge(ts))
	}
	if collectIndices && isMaster {
		for k, index := range indexStats.Indices {
			if esSkipIndex(k) {
				continue
			}
			ts := opentsdb.TagSet{"index_name": k, "cluster": clusterStats.ClusterName}
			if indexHealth, ok := clusterHealth.Indices[k]; ok {
				s.add("elastic.health.indices", indexHealth, ts)
				if status, ok := elasticStatusMap[indexHealth.Status]; ok {
					Add(&md, "elastic.health.indices.status", status, ts, metadata.Gauge, metadata.StatusCode, "The current status of the index. Zero for green, one for yellow, two for red.")
				}
			}
			s.add("elastic.indices.cluster", index.Primaries, ts)
		}
	}
	return md, nil
}
Exemplo n.º 6
0
func GenericSnmp(cfg conf.SNMP, mib conf.MIB) (opentsdb.MultiDataPoint, error) {
	md := opentsdb.MultiDataPoint{}
	baseOid := mib.BaseOid

	rateUnitTags := func(m conf.MIBMetric) (r metadata.RateType, u metadata.Unit, t opentsdb.TagSet, err error) {
		if r = metadata.RateType(m.RateType); r == "" {
			r = metadata.Gauge
		}
		if u = metadata.Unit(m.Unit); u == "" {
			u = metadata.None
		}
		if m.Tags == "" {
			t = make(opentsdb.TagSet)
		} else {
			t, err = opentsdb.ParseTags(m.Tags)
			if err != nil {
				return "", "", nil, err
			}
		}
		t["host"] = cfg.Host
		return
	}

	for _, metric := range mib.Metrics {
		rate, unit, tagset, err := rateUnitTags(metric)
		if err != nil {
			return md, err
		}

		v, err := snmp_oid(cfg.Host, cfg.Community, combineOids(metric.Oid, baseOid))
		if err != nil && metric.FallbackOid != "" {
			v, err = snmp_oid(cfg.Host, cfg.Community, combineOids(metric.FallbackOid, baseOid))
		}
		if err != nil {
			return md, err
		}
		Add(&md, metric.Metric, v, tagset, rate, unit, metric.Description)
	}

	for _, tree := range mib.Trees {
		treeOid := combineOids(tree.BaseOid, baseOid)
		tagCache := make(map[string]map[string]interface{}) // tag key to map of values
		for _, tag := range tree.Tags {
			if tag.Oid == "idx" {
				continue
			}
			vals, err := snmp_subtree(cfg.Host, cfg.Community, combineOids(tag.Oid, treeOid))
			if err != nil {
				return md, err
			}
			tagCache[tag.Key] = vals
		}
		for _, metric := range tree.Metrics {
			rate, unit, tagset, err := rateUnitTags(metric)
			if err != nil {
				return md, err

			}
			nodes, err := snmp_subtree(cfg.Host, cfg.Community, combineOids(metric.Oid, treeOid))
			if err != nil && metric.FallbackOid != "" {
				nodes, err = snmp_subtree(cfg.Host, cfg.Community, combineOids(metric.FallbackOid, treeOid))
			}
			if err != nil {
				return md, err
			}
			// check all lengths
			for k, list := range tagCache {
				if len(list) != len(nodes) {
					return md, fmt.Errorf("snmp tree for tag key %s, and metric %s do not have same length", k, metric.Metric)
				}
			}
			for i, v := range nodes {
				for _, tag := range tree.Tags {
					var tagVal interface{}
					if tag.Oid == "idx" {
						tagVal = i
					} else {
						var ok bool
						tagVal, ok = tagCache[tag.Key][i]
						if !ok {
							return md, fmt.Errorf("tree for tag %s has no entry for metric %s index %s", tag.Key, metric.Metric, i)
						}
					}
					if byteSlice, ok := tagVal.([]byte); ok {
						tagVal = string(byteSlice)
					}
					tagset[tag.Key] = fmt.Sprint(tagVal)
				}
				Add(&md, metric.Metric, v, tagset, rate, unit, metric.Description)
			}
		}
	}
	return md, nil
}
Exemplo n.º 7
0
			v, err := sqlplusValueConv(fields[2])
			if err != nil {
				return err
			}

			name := sqlplusMetricNameConv(fields[0])

			period, err := strconv.Atoi(fields[1])
			if err != nil {
				return err
			}
			period = (period/100 + 4) / 5 * 5 // handle rounding error

			name = prefix + name + "_" + strconv.Itoa(period) + "s"

			Add(md, name, v, common, metadata.Gauge, metadata.Unit(fields[3]), fields[0])
			return nil
		},
	},
	{
		"select NAME || ',' || VALUE from v$sysstat where NAME not like '%this session%';\n",
		func(row string, md *opentsdb.MultiDataPoint, prefix string, common opentsdb.TagSet) error {
			fields := strings.Split(row, ",")
			if len(fields) != 2 {
				return sqlplusParserFieldCountErr
			}

			v, err := sqlplusValueConv(fields[1])
			if err != nil {
				return err
			}
Exemplo n.º 8
0
func linuxProcMonitor(w *WatchedProc, md *opentsdb.MultiDataPoint) error {
	var err error
	var processCount int
	var totalCPU int64
	var totalVirtualMem int64
	var totalRSSMem int64
	for proc, id := range w.Processes {
		pid := proc.Pid
		file_status, e := os.Stat("/proc/" + pid)
		if e != nil {
			w.Remove(proc)
			continue
		}
		processCount++
		stats_file, e := ioutil.ReadFile("/proc/" + pid + "/stat")
		if e != nil {
			w.Remove(proc)
			continue
		}
		io_file, e := ioutil.ReadFile("/proc/" + pid + "/io")
		if e != nil {
			w.Remove(proc)
			continue
		}
		limits, e := ioutil.ReadFile("/proc/" + pid + "/limits")
		if e != nil {
			w.Remove(proc)
			continue
		}
		fd_dir, e := os.Open("/proc/" + pid + "/fd")
		if e != nil {
			w.Remove(proc)
			continue
		}
		fds, e := fd_dir.Readdirnames(0)
		fd_dir.Close()
		if e != nil {
			w.Remove(proc)
			continue
		}
		stats := strings.Fields(string(stats_file))
		if len(stats) < 24 {
			err = fmt.Errorf("stats too short")
			continue
		}
		var io []string
		for _, line := range strings.Split(string(io_file), "\n") {
			f := strings.Fields(line)
			if len(f) == 2 {
				io = append(io, f[1])
			}
		}
		if len(io) < 6 {
			err = fmt.Errorf("io too short")
			continue
		}
		tags := opentsdb.TagSet{"name": w.Name, "id": strconv.Itoa(id)}
		for _, line := range strings.Split(string(limits), "\n") {
			f := strings.Fields(line)
			if len(f) == 6 && strings.Join(f[0:3], " ") == "Max open files" {
				if f[3] != "unlimited" {
					Add(md, "linux.proc.num_fds_slim", f[3], tags, metadata.Gauge, metadata.Files, descLinuxSoftFileLimit)
					Add(md, "linux.proc.num_fds_hlim", f[4], tags, metadata.Gauge, metadata.Files, descLinuxHardFileLimit)
				}
			}
		}
		start_ts := file_status.ModTime().Unix()
		user, err := strconv.ParseInt(stats[13], 10, 64)
		if err != nil {
			return fmt.Errorf("failed to convert process user cpu: %v", err)
		}
		sys, err := strconv.ParseInt(stats[14], 10, 64)
		if err != nil {
			return fmt.Errorf("failed to convert process system cpu: %v", err)
		}
		totalCPU += user + sys
		Add(md, "linux.proc.cpu", stats[13], opentsdb.TagSet{"type": "user"}.Merge(tags), metadata.Counter, metadata.Pct, descLinuxProcCPUUser)
		Add(md, "linux.proc.cpu", stats[14], opentsdb.TagSet{"type": "system"}.Merge(tags), metadata.Counter, metadata.Pct, descLinuxProcCPUSystem)
		Add(md, "linux.proc.mem.fault", stats[9], opentsdb.TagSet{"type": "minflt"}.Merge(tags), metadata.Counter, metadata.Fault, descLinuxProcMemFaultMin)
		Add(md, "linux.proc.mem.fault", stats[11], opentsdb.TagSet{"type": "majflt"}.Merge(tags), metadata.Counter, metadata.Fault, descLinuxProcMemFaultMax)
		virtual, err := strconv.ParseInt(stats[22], 10, 64)
		if err != nil {
			return fmt.Errorf("failed to convert process virtual memory: %v", err)
		}
		totalVirtualMem += virtual
		rss, err := strconv.ParseInt(stats[23], 10, 64)
		if err != nil {
			return fmt.Errorf("failed to convert process rss memory: %v", err)
		}
		if pid == string(os.Getpid()) {
			TotalScollectorMemoryMB = uint64(rss) * uint64(osPageSize) / 1024 / 1024
		}
		totalRSSMem += rss
		Add(md, "linux.proc.mem.virtual", stats[22], tags, metadata.Gauge, metadata.Bytes, descLinuxProcMemVirtual)
		Add(md, "linux.proc.mem.rss", stats[23], tags, metadata.Gauge, metadata.Page, descLinuxProcMemRss)
		Add(md, "linux.proc.mem.rss_bytes", rss*int64(osPageSize), tags, metadata.Gauge, metadata.Bytes, descLinuxProcMemRssBytes)
		Add(md, "linux.proc.char_io", io[0], opentsdb.TagSet{"type": "read"}.Merge(tags), metadata.Counter, metadata.Bytes, descLinuxProcCharIoRead)
		Add(md, "linux.proc.char_io", io[1], opentsdb.TagSet{"type": "write"}.Merge(tags), metadata.Counter, metadata.Bytes, descLinuxProcCharIoWrite)
		Add(md, "linux.proc.syscall", io[2], opentsdb.TagSet{"type": "read"}.Merge(tags), metadata.Counter, metadata.Syscall, descLinuxProcSyscallRead)
		Add(md, "linux.proc.syscall", io[3], opentsdb.TagSet{"type": "write"}.Merge(tags), metadata.Counter, metadata.Syscall, descLinuxProcSyscallWrite)
		Add(md, "linux.proc.io_bytes", io[4], opentsdb.TagSet{"type": "read"}.Merge(tags), metadata.Counter, metadata.Bytes, descLinuxProcIoBytesRead)
		Add(md, "linux.proc.io_bytes", io[5], opentsdb.TagSet{"type": "write"}.Merge(tags), metadata.Counter, metadata.Bytes, descLinuxProcIoBytesWrite)
		Add(md, "linux.proc.num_fds", len(fds), tags, metadata.Gauge, metadata.Files, descLinuxProcFd)
		Add(md, "linux.proc.start_time", start_ts, tags, metadata.Gauge, metadata.Timestamp, descLinuxProcStartTS)
		Add(md, "linux.proc.uptime", now()-start_ts, tags, metadata.Gauge, metadata.Second, descLinuxProcUptime)
		Add(md, "linux.proc.pid", pid, tags, metadata.Gauge, metadata.Unit("PID"), osProcPID)
	}
	coreCount, err := linuxCoreCount()
	if err != nil {
		return fmt.Errorf("failed to get core count: %v", err)
	}
	tsName := opentsdb.TagSet{"name": w.Name}
	if processCount > 0 {
		Add(md, osProcCPU, float64(totalCPU)/float64(coreCount), tsName, metadata.Counter, metadata.Pct, osProcCPUDesc)
		Add(md, osProcMemReal, totalRSSMem*int64(os.Getpagesize()), tsName, metadata.Gauge, metadata.Bytes, osProcMemRealDesc)
		Add(md, osProcMemVirtual, totalVirtualMem, tsName, metadata.Gauge, metadata.Bytes, osProcMemVirtualDesc)
		Add(md, osProcCount, processCount, tsName, metadata.Gauge, metadata.Process, osProcCountDesc)
	}
	if w.IncludeCount {
		Add(md, "linux.proc.count", processCount, tsName, metadata.Gauge, metadata.Process, descLinuxProcCount)
	}
	return err
}
Exemplo n.º 9
0
func c_varnish_unix() (opentsdb.MultiDataPoint, error) {
	var md opentsdb.MultiDataPoint
	const metric = "varnish."

	r, err := util.Command(5*time.Second, nil, "varnishstat", "-j")
	if err != nil {
		return nil, err
	}

	var stats varnishStats
	if err := json.NewDecoder(r).Decode(&stats); err != nil {
		return nil, err
	}

	for name, raw := range stats {
		if name == "timestamp" {
			continue
		}

		var v varnishStat
		if err := json.Unmarshal(raw, &v); err != nil {
			slog.Errorln("varnish parser error:", name, err)
			continue
		}

		ts := opentsdb.TagSet{}

		// special case for backend stats. extract backend name, host and port, put
		// them in tags and remove them in name.
		// the format is like "name(host,,port)" for the "ident" field of "VBE" type
		if v.Type == "VBE" {
			subtype := v.SubType

			name = strings.Replace(name, "."+subtype, "", -1)

			idx := strings.Index(subtype, "(")
			if idx < 0 || len(subtype)-idx < 4 {
				// output format changed, ignore
				continue
			}

			ss := strings.Split(subtype[idx+1:len(subtype)-1], ",")
			if len(ss) != 3 {
				// output format changed, ignore
				continue
			}

			ts.Merge(opentsdb.TagSet{"backend": subtype[:idx]})
			ts.Merge(opentsdb.TagSet{"endpoint": ss[0] + "_" + ss[2]})
		}

		rate := metadata.RateType(metadata.Gauge)
		if flag := v.Flag; flag == "a" || flag == "c" {
			rate = metadata.Counter
		}

		unit := metadata.Unit(metadata.Count)
		if v.Format == "B" {
			unit = metadata.Bytes
		}

		Add(&md, metric+strings.ToLower(name), v.Value, ts, rate, unit, v.Desc)
	}
	return md, nil
}