コード例 #1
0
// Stats returns stats on total connections, active connections, and total processing time
func (m *RequestCounter) Stats(dimensions map[string]string) []*datapoint.Datapoint {
	ret := []*datapoint.Datapoint{}
	stats := map[string]int64{
		"total_connections": atomic.LoadInt64(&m.TotalConnections),
		"total_time_ns":     atomic.LoadInt64(&m.TotalProcessingTimeNs),
	}
	for k, v := range stats {
		ret = append(
			ret,
			datapoint.New(
				k,
				dimensions,
				datapoint.NewIntValue(v),
				datapoint.Counter,
				time.Now()))
	}
	ret = append(
		ret,
		datapoint.New(
			"active_connections",
			dimensions,
			datapoint.NewIntValue(atomic.LoadInt64(&m.ActiveConnections)),
			datapoint.Gauge,
			time.Now()))
	return ret
}
コード例 #2
0
func pointc(name string, v int64) *datapoint.Datapoint {
	return dplocal.NewOnHostDatapointDimensions(
		name,
		datapoint.NewIntValue(v),
		datapoint.Counter,
		map[string]string{"stattype": "golang_sys"})
}
コード例 #3
0
func (c *CadvisorCollector) collectMachineInfo(ch chan<- datapoint.Datapoint) {
	machineInfo, err := c.infoProvider.GetMachineInfo()
	if err != nil {
		//c.errors.Set(1)
		glog.Warningf("Couldn't get machine info: %s", err)
		return
	}
	tt := time.Now()

	// CPU frequency.
	ch <- *datapoint.New("machine_cpu_frequency_khz", make(map[string]string), datapoint.NewIntValue(int64(machineInfo.CpuFrequency)), datapoint.Gauge, tt)

	// Number of CPU cores on the machine.
	ch <- *datapoint.New("machine_cpu_cores", make(map[string]string), datapoint.NewIntValue(int64(machineInfo.NumCores)), datapoint.Gauge, tt)

	// Amount of memory installed on the machine.
	ch <- *datapoint.New("machine_memory_bytes", make(map[string]string), datapoint.NewIntValue(int64(machineInfo.MemoryCapacity)), datapoint.Gauge, tt)
}
コード例 #4
0
ファイル: signalfx.go プロジェクト: baris/metricproxy
// NewDatumValue creates new datapoint value referenced from a value of the datum protobuf
func NewDatumValue(val *com_signalfx_metrics_protobuf.Datum) datapoint.Value {
	if val.DoubleValue != nil {
		return datapoint.NewFloatValue(val.GetDoubleValue())
	}
	if val.IntValue != nil {
		return datapoint.NewIntValue(val.GetIntValue())
	}
	return datapoint.NewStringValue(val.GetStrValue())
}
コード例 #5
0
// Stats returns the number of calls to AddDatapoint
func (e *ErrorTrackerHandler) Stats(dimensions map[string]string) []*datapoint.Datapoint {
	return []*datapoint.Datapoint{
		dplocal.NewOnHostDatapointDimensions(
			"total_errors",
			datapoint.NewIntValue(e.TotalErrors),
			datapoint.Counter,
			dimensions),
	}

}
コード例 #6
0
ファイル: signalfx.go プロジェクト: baris/metricproxy
// ValueToValue converts the v2 JSON value to a core api Value
func ValueToValue(v ValueToSend) (datapoint.Value, error) {
	f, ok := v.(float64)
	if ok {
		return datapoint.NewFloatValue(f), nil
	}
	i, ok := v.(int64)
	if ok {
		return datapoint.NewIntValue(i), nil
	}
	i2, ok := v.(int)
	if ok {
		return datapoint.NewIntValue(int64(i2)), nil
	}
	s, ok := v.(string)
	if ok {
		return datapoint.NewStringValue(s), nil
	}
	return nil, fmt.Errorf("unable to convert value: %s", v)
}
コード例 #7
0
// Stats gets the metrics for a ReqLatencyCounter.
func (a *ReqLatencyCounter) Stats(dimensions map[string]string) []*datapoint.Datapoint {
	now := a.timeKeeper.Now()
	getDp := func(key string, value int64) *datapoint.Datapoint {
		return datapoint.New(
			"ReqLatencyCounter.requestCounts", appendDimensions(dimensions, "requestClass", key),
			datapoint.NewIntValue(value), datapoint.Counter, now)
	}
	return []*datapoint.Datapoint{
		getDp("fast", a.fastRequests),
		getDp("slow", a.slowRequests),
	}
}
コード例 #8
0
func TestDefaultSource(t *testing.T) {
	finalDatapointDestination, l, forwarder := setupServerForwarder(t)
	defer l.Close()

	timeToSend := time.Now().Round(time.Second)
	dpSent := datapoint.New("metric", map[string]string{}, datapoint.NewIntValue(2), datapoint.Gauge, timeToSend)
	go forwarder.AddDatapoints(context.Background(), []*datapoint.Datapoint{dpSent})
	dpRecieved := finalDatapointDestination.Next()
	i := dpRecieved.Value.(datapoint.IntValue).Int()
	assert.Equal(t, int64(2), i, "Expect 2 back")
	assert.Equal(t, "proxy-source", dpRecieved.Dimensions["sf_source"], "Expect ahost back")
	assert.Equal(t, timeToSend, dpRecieved.Timestamp)
}
コード例 #9
0
// Stats related to this forwarder, including errors processing datapoints
func (forwarder *BufferedForwarder) Stats(dimensions map[string]string) []*datapoint.Datapoint {
	ret := make([]*datapoint.Datapoint, 0, 2)
	ret = append(ret, dplocal.NewOnHostDatapointDimensions(
		"datapoint_chan_backup_size",
		datapoint.NewIntValue(int64(len(forwarder.dpChan))),
		datapoint.Gauge,
		dimensions))
	ret = append(ret, dplocal.NewOnHostDatapointDimensions(
		"event_chan_backup_size",
		datapoint.NewIntValue(int64(len(forwarder.eChan))),
		datapoint.Gauge,
		dimensions))
	ret = append(ret, dplocal.NewOnHostDatapointDimensions(
		"datapoint_backup_size",
		datapoint.NewIntValue(atomic.LoadInt64(&forwarder.stats.totalDatapointsBuffered)),
		datapoint.Gauge,
		dimensions))
	ret = append(ret, dplocal.NewOnHostDatapointDimensions(
		"event_backup_size",
		datapoint.NewIntValue(atomic.LoadInt64(&forwarder.stats.totalEventsBuffered)),
		datapoint.Gauge,
		dimensions))
	return ret
}
コード例 #10
0
ファイル: signalfx_test.go プロジェクト: tomzhang/metricproxy
func TestNewProtobufDataPoint(t *testing.T) {
	protoDatapoint := &com_signalfx_metrics_protobuf.DataPoint{
		Source: workarounds.GolangDoesnotAllowPointerToStringLiteral("asource"),
		Metric: workarounds.GolangDoesnotAllowPointerToStringLiteral("ametric"),
		Value:  &com_signalfx_metrics_protobuf.Datum{IntValue: workarounds.GolangDoesnotAllowPointerToIntLiteral(2)},
		Dimensions: []*com_signalfx_metrics_protobuf.Dimension{{
			Key:   workarounds.GolangDoesnotAllowPointerToStringLiteral("key"),
			Value: workarounds.GolangDoesnotAllowPointerToStringLiteral("value"),
		}},
	}
	dp, err := NewProtobufDataPointWithType(protoDatapoint, com_signalfx_metrics_protobuf.MetricType_COUNTER)
	assert.Equal(t, "asource", dp.Dimensions["sf_source"], "Line should be invalid")
	assert.NoError(t, err)
	assert.Equal(t, datapoint.Count, dp.MetricType, "Line should be invalid")

	v := com_signalfx_metrics_protobuf.MetricType_CUMULATIVE_COUNTER
	protoDatapoint.MetricType = &v
	dp, err = NewProtobufDataPointWithType(protoDatapoint, com_signalfx_metrics_protobuf.MetricType_COUNTER)
	assert.NoError(t, err)
	assert.Equal(t, datapoint.Counter, dp.MetricType, "Line should be invalid")

	item := &BodySendFormatV2{
		Metric: "ametric",
		Value:  3.0,
	}
	assert.Contains(t, item.String(), "ametric", "Should get metric name back")
	f, _ := ValueToValue(item.Value)
	assert.Equal(t, datapoint.NewFloatValue(3.0), f, "Should get value 3 back")

	item.Value = 3
	i, _ := ValueToValue(item.Value)
	assert.Equal(t, datapoint.NewIntValue(3), i, "Should get value 3 back")

	item.Value = int64(3)
	ValueToValue(item.Value)

	item.Value = "abc"
	s, _ := ValueToValue(item.Value)
	assert.Equal(t, datapoint.NewStringValue("abc"), s, "Should get value abc back")

	item.Value = struct{}{}
	_, err = ValueToValue(item.Value)
	assert.Error(t, err)
}
コード例 #11
0
// Stats related to this c, including errors processing datapoints
func (c *Counter) Stats(dimensions map[string]string) []*datapoint.Datapoint {
	ret := make([]*datapoint.Datapoint, 0, 6)

	ret = append(ret, dplocal.NewOnHostDatapointDimensions(
		"total_process_errors",
		datapoint.NewIntValue(atomic.LoadInt64(&c.TotalProcessErrors)),
		datapoint.Counter,
		dimensions))

	ret = append(ret, dplocal.NewOnHostDatapointDimensions(
		"total_datapoints",
		datapoint.NewIntValue(atomic.LoadInt64(&c.TotalDatapoints)),
		datapoint.Counter,
		dimensions))

	ret = append(ret, dplocal.NewOnHostDatapointDimensions(
		"total_events",
		datapoint.NewIntValue(atomic.LoadInt64(&c.TotalEvents)),
		datapoint.Counter,
		dimensions))

	ret = append(ret, dplocal.NewOnHostDatapointDimensions(
		"total_process_calls",
		datapoint.NewIntValue(atomic.LoadInt64(&c.TotalProcessCalls)),
		datapoint.Counter,
		dimensions))

	ret = append(ret, dplocal.NewOnHostDatapointDimensions(
		"dropped_points",
		datapoint.NewIntValue(atomic.LoadInt64(&c.ProcessErrorPoints)),
		datapoint.Counter,
		dimensions))

	ret = append(ret, dplocal.NewOnHostDatapointDimensions(
		"process_time_ns",
		datapoint.NewIntValue(atomic.LoadInt64(&c.TotalProcessTimeNs)),
		datapoint.Counter,
		dimensions))

	ret = append(ret, dplocal.NewOnHostDatapointDimensions(
		"calls_in_flight",
		datapoint.NewIntValue(atomic.LoadInt64(&c.CallsInFlight)),
		datapoint.Gauge,
		dimensions))
	return ret
}
コード例 #12
0
ファイル: carbonlistener.go プロジェクト: baris/metricproxy
// Stats reports information about the total points seen by carbon
func (listener *Listener) Stats() []*datapoint.Datapoint {
	ret := []*datapoint.Datapoint{}
	stats := map[string]int64{
		"invalid_datapoints": atomic.LoadInt64(&listener.stats.invalidDatapoints),
		"total_connections":  atomic.LoadInt64(&listener.stats.totalConnections),
		"active_connections": atomic.LoadInt64(&listener.stats.activeConnections),
	}
	for k, v := range stats {
		var t datapoint.MetricType
		if k == "active_connections" {
			t = datapoint.Gauge
		} else {
			t = datapoint.Counter
		}
		ret = append(
			ret,
			dplocal.NewOnHostDatapointDimensions(
				k,
				datapoint.NewIntValue(v),
				t,
				map[string]string{"listener": listener.conf.name}))
	}
	return append(ret, listener.st.Stats()...)
}
コード例 #13
0
func (c *CadvisorCollector) collectContainersInfo(ch chan<- datapoint.Datapoint) {
	containers, err := c.infoProvider.SubcontainersInfo("/")
	if err != nil {
		//c.errors.Set(1)
		glog.Warningf("Couldn't get containers: %s", err)
		return
	}
	for _, container := range containers {
		dims := make(map[string]string)
		id := container.Name
		dims["id"] = id

		name := id
		if len(container.Aliases) > 0 {
			name = container.Aliases[0]
			dims["name"] = name
		}

		image := container.Spec.Image
		if len(image) > 0 {
			dims["image"] = image
		}

		if c.containerNameToLabels != nil {
			newLabels := c.containerNameToLabels(name)
			for k, v := range newLabels {
				dims[k] = v
			}
		}

		tt := time.Now()
		// Container spec
		// Start time of the container since unix epoch in seconds.
		ch <- *datapoint.New("container_start_time_seconds", copyDims(dims), datapoint.NewIntValue(container.Spec.CreationTime.Unix()), datapoint.Gauge, tt)

		if container.Spec.HasCpu {
			// CPU share of the container.
			ch <- *datapoint.New("container_spec_cpu_shares", copyDims(dims), datapoint.NewIntValue(int64(container.Spec.Cpu.Limit)), datapoint.Gauge, tt)
		}

		if container.Spec.HasMemory {
			// Memory limit for the container.
			ch <- *datapoint.New("container_spec_memory_limit_bytes", copyDims(dims), datapoint.NewIntValue(int64(container.Spec.Memory.Limit)), datapoint.Gauge, tt)
			// Memory swap limit for the container.
			ch <- *datapoint.New("container_spec_memory_swap_limit_bytes", copyDims(dims), datapoint.NewIntValue(int64(container.Spec.Memory.SwapLimit)), datapoint.Gauge, tt)
		}

		// Now for the actual metrics
		if len(container.Stats) > 0 {
			// only get the latest stats from this container. note/warning: the stats array contains historical statistics in earliest-to-latest order
			lastStatIndex := len(container.Stats) - 1
			stat := container.Stats[lastStatIndex]

			for _, cm := range c.containerMetrics {
				for _, metricValue := range cm.getValues(stat) {
					newDims := copyDims(dims)

					// Add extra dimensions
					for i, label := range cm.extraLabels {
						newDims[label] = metricValue.labels[i]
					}

					ch <- *datapoint.New(cm.name, newDims, metricValue.value, cm.valueType, stat.Timestamp)
				}
			}
		}
	}
}
コード例 #14
0
func TestConfigLoadDimensions(t *testing.T) {
	fileObj, _ := ioutil.TempFile("", "gotest")
	filename := fileObj.Name()
	defer os.Remove(filename)
	ctx := context.Background()

	psocket, err := net.Listen("tcp", "127.0.0.1:0")
	assert.NoError(t, err)
	defer psocket.Close()
	portParts := strings.Split(psocket.Addr().String(), ":")
	port := portParts[len(portParts)-1]
	conf := strings.Replace(config1, "<<PORT>>", port, 1)

	ioutil.WriteFile(filename, []byte(conf), os.FileMode(0666))
	myProxyCommandLineConfiguration := proxyCommandLineConfigurationT{
		configFileName:                filename,
		logDir:                        "-",
		logMaxSize:                    1,
		ctx:                           ctx,
		logMaxBackups:                 0,
		stopChannel:                   make(chan bool),
		closeWhenWaitingToStopChannel: make(chan struct{}),
	}

	go func() {
		myProxyCommandLineConfiguration.blockTillSetupReady()
		assert.Equal(t, 1, len(myProxyCommandLineConfiguration.allListeners))
		assert.Equal(t, 1, len(myProxyCommandLineConfiguration.allForwarders))
		dp := datapoint.New("metric", map[string]string{"source": "proxy", "forwarder": "testForwardTo"}, datapoint.NewIntValue(1), datapoint.Gauge, time.Now())
		myProxyCommandLineConfiguration.allForwarders[0].AddDatapoints(ctx, []*datapoint.Datapoint{dp})
		// Keep going, but skip empty line and EOF
		line := ""
		for {
			c, err := psocket.Accept()
			defer c.Close()
			assert.NoError(t, err)
			reader := bufio.NewReader(c)
			line, err = reader.ReadString((byte)('\n'))
			if line == "" && err == io.EOF {
				continue
			}
			break
		}
		assert.NoError(t, err)
		fmt.Printf("line is %s\n", line)
		log.Info(line)
		assert.Equal(t, "proxy.testForwardTo.", line[0:len("proxy.testForwardTo.")])
		myProxyCommandLineConfiguration.stopChannel <- true
	}()
	assert.NoError(t, myProxyCommandLineConfiguration.main())
}
コード例 #15
0
// Stats about this decoder, including how many datapoints it decoded
func (decoder *JSONDecoder) Stats(dimensions map[string]string) []*datapoint.Datapoint {
	return []*datapoint.Datapoint{
		datapoint.New("total_blank_dims", dimensions, datapoint.NewIntValue(decoder.TotalBlankDims), datapoint.Counter, time.Now()),
		datapoint.New("invalid_collectd_json", dimensions, datapoint.NewIntValue(decoder.TotalErrors), datapoint.Counter, time.Now()),
	}
}
コード例 #16
0
// NewCadvisorCollector creates new CadvisorCollector
func NewCadvisorCollector(infoProvider infoProvider, f ContainerNameToLabelsFunc) *CadvisorCollector {
	return &CadvisorCollector{
		infoProvider:          infoProvider,
		containerNameToLabels: f,
		containerMetrics: []containerMetric{
			{
				name:      "container_last_seen",
				help:      "Last time a container was seen by the exporter",
				valueType: datapoint.Timestamp,
				getValues: func(s *info.ContainerStats) metricValues {
					return metricValues{{value: datapoint.NewIntValue(time.Now().UnixNano())}}
				},
			},
			{
				name:      "container_cpu_user_seconds_total",
				help:      "Cumulative user cpu time consumed in nanoseconds.",
				valueType: datapoint.Counter,
				getValues: func(s *info.ContainerStats) metricValues {
					return metricValues{{value: datapoint.NewIntValue(int64(s.Cpu.Usage.User))}}
				},
			},
			{
				name:      "container_cpu_system_seconds_total",
				help:      "Cumulative system cpu time consumed in nanoseconds.",
				valueType: datapoint.Counter,
				getValues: func(s *info.ContainerStats) metricValues {
					return metricValues{{value: datapoint.NewIntValue(int64(s.Cpu.Usage.System))}}
				},
			},
			{
				name:      "container_cpu_usage_seconds_total",
				help:      "Cumulative cpu time consumed per cpu in nanoseconds.",
				valueType: datapoint.Counter,
				getValues: func(s *info.ContainerStats) metricValues {
					return metricValues{{value: datapoint.NewIntValue(int64(s.Cpu.Usage.Total))}}
				},
			},
			{
				name:      "container_cpu_utilization",
				help:      "Cumulative cpu utilization in percentages.",
				valueType: datapoint.Counter,
				getValues: func(s *info.ContainerStats) metricValues {
					return metricValues{{value: datapoint.NewIntValue(int64(s.Cpu.Usage.Total / 10000000))}}
				},
			},
			{
				name:        "container_cpu_utilization_per_core",
				help:        "Cumulative cpu utilization in percentages per core",
				valueType:   datapoint.Counter,
				extraLabels: []string{"cpu"},
				getValues: func(s *info.ContainerStats) metricValues {
					metricValues := make(metricValues, len(s.Cpu.Usage.PerCpu))
					for index, coreUsage := range s.Cpu.Usage.PerCpu {
						if coreUsage > 0 {
							metricValues[index] = metricValue{value: datapoint.NewIntValue(int64(coreUsage / 10000000)), labels: []string{"cpu" + strconv.Itoa(index)}}
						} else {
							metricValues[index] = metricValue{value: datapoint.NewIntValue(int64(0)), labels: []string{strconv.Itoa(index)}}
						}
					}
					return metricValues
				},
			},
			{
				name:      "container_memory_failcnt",
				help:      "Number of memory usage hits limits",
				valueType: datapoint.Counter,
				getValues: func(s *info.ContainerStats) metricValues {
					return metricValues{{value: datapoint.NewIntValue(int64(s.Memory.Failcnt))}}
				},
			},
			{
				name:      "container_memory_usage_bytes",
				help:      "Current memory usage in bytes.",
				valueType: datapoint.Gauge,
				getValues: func(s *info.ContainerStats) metricValues {
					return metricValues{{value: datapoint.NewIntValue(int64(s.Memory.Usage))}}
				},
			},
			{
				name:      "container_memory_working_set_bytes",
				help:      "Current working set in bytes.",
				valueType: datapoint.Gauge,
				getValues: func(s *info.ContainerStats) metricValues {
					return metricValues{{value: datapoint.NewIntValue(int64(s.Memory.WorkingSet))}}
				},
			},
			{
				name:        "container_memory_failures_total",
				help:        "Cumulative count of memory allocation failures.",
				valueType:   datapoint.Counter,
				extraLabels: []string{"type", "scope"},
				getValues: func(s *info.ContainerStats) metricValues {
					return metricValues{
						{
							value:  datapoint.NewIntValue(int64(s.Memory.ContainerData.Pgfault)),
							labels: []string{"pgfault", "container"},
						},
						{
							value:  datapoint.NewIntValue(int64(s.Memory.ContainerData.Pgmajfault)),
							labels: []string{"pgmajfault", "container"},
						},
						{
							value:  datapoint.NewIntValue(int64(s.Memory.HierarchicalData.Pgfault)),
							labels: []string{"pgfault", "hierarchy"},
						},
						{
							value:  datapoint.NewIntValue(int64(s.Memory.HierarchicalData.Pgmajfault)),
							labels: []string{"pgmajfault", "hierarchy"},
						},
					}
				},
			},
			{
				name:        "container_fs_limit_bytes",
				help:        "Number of bytes that can be consumed by the container on this filesystem.",
				valueType:   datapoint.Gauge,
				extraLabels: []string{"device"},
				getValues: func(s *info.ContainerStats) metricValues {
					return fsValues(s.Filesystem, func(fs *info.FsStats) datapoint.Value {
						return datapoint.NewIntValue(int64(fs.Limit))
					})
				},
			},
			{
				name:        "container_fs_usage_bytes",
				help:        "Number of bytes that are consumed by the container on this filesystem.",
				valueType:   datapoint.Gauge,
				extraLabels: []string{"device"},
				getValues: func(s *info.ContainerStats) metricValues {
					return fsValues(s.Filesystem, func(fs *info.FsStats) datapoint.Value {
						return datapoint.NewIntValue(int64(fs.Usage))
					})
				},
			},
			{
				name:        "container_fs_reads_total",
				help:        "Cumulative count of reads completed",
				valueType:   datapoint.Gauge,
				extraLabels: []string{"device"},
				getValues: func(s *info.ContainerStats) metricValues {
					return fsValues(s.Filesystem, func(fs *info.FsStats) datapoint.Value {
						return datapoint.NewIntValue(int64(fs.ReadsCompleted))
					})
				},
			},
			{
				name:        "container_fs_sector_reads_total",
				help:        "Cumulative count of sector reads completed",
				valueType:   datapoint.Counter,
				extraLabels: []string{"device"},
				getValues: func(s *info.ContainerStats) metricValues {
					return fsValues(s.Filesystem, func(fs *info.FsStats) datapoint.Value {
						return datapoint.NewIntValue(int64(fs.SectorsRead))
					})
				},
			},
			{
				name:        "container_fs_reads_merged_total",
				help:        "Cumulative count of reads merged",
				valueType:   datapoint.Counter,
				extraLabels: []string{"device"},
				getValues: func(s *info.ContainerStats) metricValues {
					return fsValues(s.Filesystem, func(fs *info.FsStats) datapoint.Value {
						return datapoint.NewIntValue(int64(fs.ReadsMerged))
					})
				},
			},
			{
				name:        "container_fs_read_seconds_total",
				help:        "Cumulative count of seconds spent reading",
				valueType:   datapoint.Counter,
				extraLabels: []string{"device"},
				getValues: func(s *info.ContainerStats) metricValues {
					return fsValues(s.Filesystem, func(fs *info.FsStats) datapoint.Value {
						return datapoint.NewIntValue(int64(fs.ReadTime / uint64(time.Second)))
					})
				},
			},
			{
				name:        "container_fs_writes_total",
				help:        "Cumulative count of writes completed",
				valueType:   datapoint.Counter,
				extraLabels: []string{"device"},
				getValues: func(s *info.ContainerStats) metricValues {
					return fsValues(s.Filesystem, func(fs *info.FsStats) datapoint.Value {
						return datapoint.NewIntValue(int64(fs.WritesCompleted))
					})
				},
			},
			{
				name:        "container_fs_sector_writes_total",
				help:        "Cumulative count of sector writes completed",
				valueType:   datapoint.Counter,
				extraLabels: []string{"device"},
				getValues: func(s *info.ContainerStats) metricValues {
					return fsValues(s.Filesystem, func(fs *info.FsStats) datapoint.Value {
						return datapoint.NewIntValue(int64(fs.SectorsWritten))
					})
				},
			},
			{
				name:        "container_fs_writes_merged_total",
				help:        "Cumulative count of writes merged",
				valueType:   datapoint.Counter,
				extraLabels: []string{"device"},
				getValues: func(s *info.ContainerStats) metricValues {
					return fsValues(s.Filesystem, func(fs *info.FsStats) datapoint.Value {
						return datapoint.NewIntValue(int64(fs.WritesMerged))
					})
				},
			},
			{
				name:        "container_fs_write_seconds_total",
				help:        "Cumulative count of seconds spent writing",
				valueType:   datapoint.Counter,
				extraLabels: []string{"device"},
				getValues: func(s *info.ContainerStats) metricValues {
					return fsValues(s.Filesystem, func(fs *info.FsStats) datapoint.Value {
						return datapoint.NewIntValue(int64(fs.WriteTime / uint64(time.Second)))
					})
				},
			},
			{
				name:        "container_fs_io_current",
				help:        "Number of I/Os currently in progress",
				valueType:   datapoint.Gauge,
				extraLabels: []string{"device"},
				getValues: func(s *info.ContainerStats) metricValues {
					return fsValues(s.Filesystem, func(fs *info.FsStats) datapoint.Value {
						return datapoint.NewIntValue(int64(fs.IoInProgress))
					})
				},
			},
			{
				name:        "container_fs_io_time_seconds_total",
				help:        "Cumulative count of seconds spent doing I/Os",
				valueType:   datapoint.Counter,
				extraLabels: []string{"device"},
				getValues: func(s *info.ContainerStats) metricValues {
					return fsValues(s.Filesystem, func(fs *info.FsStats) datapoint.Value {
						return datapoint.NewIntValue(int64(fs.IoTime / uint64(time.Second)))
					})
				},
			},
			{
				name:        "container_fs_io_time_weighted_seconds_total",
				help:        "Cumulative weighted I/O time in seconds",
				valueType:   datapoint.Counter,
				extraLabels: []string{"device"},
				getValues: func(s *info.ContainerStats) metricValues {
					return fsValues(s.Filesystem, func(fs *info.FsStats) datapoint.Value {
						return datapoint.NewIntValue(int64(fs.WeightedIoTime / uint64(time.Second)))
					})
				},
			},
			{
				name:        "pod_network_receive_bytes_total",
				help:        "Cumulative count of bytes received",
				valueType:   datapoint.Counter,
				extraLabels: []string{"interface"},
				getValues: func(s *info.ContainerStats) metricValues {
					return networkValues(s.Network.Interfaces, func(is *info.InterfaceStats) datapoint.Value {
						return datapoint.NewIntValue(int64(is.RxBytes))
					})
				},
			},
			{
				name:        "pod_network_receive_packets_total",
				help:        "Cumulative count of packets received",
				valueType:   datapoint.Counter,
				extraLabels: []string{"interface"},
				getValues: func(s *info.ContainerStats) metricValues {
					return networkValues(s.Network.Interfaces, func(is *info.InterfaceStats) datapoint.Value {
						return datapoint.NewIntValue(int64(is.RxPackets))
					})
				},
			},
			{
				name:        "pod_network_receive_packets_dropped_total",
				help:        "Cumulative count of packets dropped while receiving",
				valueType:   datapoint.Counter,
				extraLabels: []string{"interface"},
				getValues: func(s *info.ContainerStats) metricValues {
					return networkValues(s.Network.Interfaces, func(is *info.InterfaceStats) datapoint.Value {
						return datapoint.NewIntValue(int64(is.RxDropped))
					})
				},
			},
			{
				name:        "pod_network_receive_errors_total",
				help:        "Cumulative count of errors encountered while receiving",
				valueType:   datapoint.Counter,
				extraLabels: []string{"interface"},
				getValues: func(s *info.ContainerStats) metricValues {
					return networkValues(s.Network.Interfaces, func(is *info.InterfaceStats) datapoint.Value {
						return datapoint.NewIntValue(int64(is.RxErrors))
					})
				},
			},
			{
				name:        "pod_network_transmit_bytes_total",
				help:        "Cumulative count of bytes transmitted",
				valueType:   datapoint.Counter,
				extraLabels: []string{"interface"},
				getValues: func(s *info.ContainerStats) metricValues {
					return networkValues(s.Network.Interfaces, func(is *info.InterfaceStats) datapoint.Value {
						return datapoint.NewIntValue(int64(is.TxBytes))
					})
				},
			},
			{
				name:        "pod_network_transmit_packets_total",
				help:        "Cumulative count of packets transmitted",
				valueType:   datapoint.Counter,
				extraLabels: []string{"interface"},
				getValues: func(s *info.ContainerStats) metricValues {
					return networkValues(s.Network.Interfaces, func(is *info.InterfaceStats) datapoint.Value {
						return datapoint.NewIntValue(int64(is.TxPackets))
					})
				},
			},
			{
				name:        "pod_network_transmit_packets_dropped_total",
				help:        "Cumulative count of packets dropped while transmitting",
				valueType:   datapoint.Counter,
				extraLabels: []string{"interface"},
				getValues: func(s *info.ContainerStats) metricValues {
					return networkValues(s.Network.Interfaces, func(is *info.InterfaceStats) datapoint.Value {
						return datapoint.NewIntValue(int64(is.TxDropped))
					})
				},
			},
			{
				name:        "pod_network_transmit_errors_total",
				help:        "Cumulative count of errors encountered while transmitting",
				valueType:   datapoint.Counter,
				extraLabels: []string{"interface"},
				getValues: func(s *info.ContainerStats) metricValues {
					return networkValues(s.Network.Interfaces, func(is *info.InterfaceStats) datapoint.Value {
						return datapoint.NewIntValue(int64(is.TxErrors))
					})
				},
			},
			{
				name:        "container_tasks_state",
				help:        "Number of tasks in given state",
				extraLabels: []string{"state"},
				valueType:   datapoint.Gauge,
				getValues: func(s *info.ContainerStats) metricValues {
					return metricValues{
						{
							value:  datapoint.NewIntValue(int64(s.TaskStats.NrSleeping)),
							labels: []string{"sleeping"},
						},
						{
							value:  datapoint.NewIntValue(int64(s.TaskStats.NrRunning)),
							labels: []string{"running"},
						},
						{
							value:  datapoint.NewIntValue(int64(s.TaskStats.NrStopped)),
							labels: []string{"stopped"},
						},
						{
							value:  datapoint.NewIntValue(int64(s.TaskStats.NrUninterruptible)),
							labels: []string{"uninterruptible"},
						},
						{
							value:  datapoint.NewIntValue(int64(s.TaskStats.NrIoWait)),
							labels: []string{"iowaiting"},
						},
					}
				},
			},
		},
	}
}
コード例 #17
0
func TestDatumForPoint(t *testing.T) {
	assert.Equal(t, int64(3), datumForPoint(datapoint.NewIntValue(3)).GetIntValue())
	assert.Equal(t, 0.0, datumForPoint(datapoint.NewIntValue(3)).GetDoubleValue())
	assert.Equal(t, .1, datumForPoint(datapoint.NewFloatValue(.1)).GetDoubleValue())
	assert.Equal(t, "hi", datumForPoint(datapoint.NewStringValue("hi")).GetStrValue())
}
コード例 #18
0
ファイル: generator.go プロジェクト: baris/metricproxy
// Next returns a unique datapoint
func (d *DatapointSource) Next() *datapoint.Datapoint {
	d.mu.Lock()
	defer d.mu.Unlock()
	return datapoint.New(d.Metric+":"+strconv.FormatInt(atomic.AddInt64(&d.CurrentIndex, 1), 10), d.Dims, datapoint.NewIntValue(0), d.Dptype, d.TimeSource())
}