// Stats returns stats on total connections, active connections, and total processing time func (m *RequestCounter) Stats(dimensions map[string]string) []*datapoint.Datapoint { ret := []*datapoint.Datapoint{} stats := map[string]int64{ "total_connections": atomic.LoadInt64(&m.TotalConnections), "total_time_ns": atomic.LoadInt64(&m.TotalProcessingTimeNs), } for k, v := range stats { ret = append( ret, datapoint.New( k, dimensions, datapoint.NewIntValue(v), datapoint.Counter, time.Now())) } ret = append( ret, datapoint.New( "active_connections", dimensions, datapoint.NewIntValue(atomic.LoadInt64(&m.ActiveConnections)), datapoint.Gauge, time.Now())) return ret }
func TestNewOnHostDatapoint(t *testing.T) { hostname, _ := os.Hostname() dp1 := NewOnHostDatapoint("metrica", nil, datapoint.Counter) assert.Equal(t, "metrica", dp1.Metric) dp := Wrap(datapoint.New("metric", map[string]string{}, datapoint.NewFloatValue(3.0), datapoint.Counter, time.Now())) assert.Equal(t, hostname, dp.Dimensions["host"], "Should get source back") osXXXHostname = func() (string, error) { return "", errors.New("unable to get hostname") } dp = Wrap(datapoint.New("metric", map[string]string{}, datapoint.NewFloatValue(3.0), datapoint.Counter, time.Now())) assert.Equal(t, "unknown", dp.Dimensions["host"], "Should get source back") osXXXHostname = os.Hostname }
// NewDatapoint creates a new datapoint from collectd's write_http endpoint JSON format // defaultDimensions are added to the datapoint created, but will be overridden by any dimension // values in the JSON // Dimensions are pulled out of type_instance, plugin_instance and host in that order of precedence func NewDatapoint(point *JSONWriteFormat, index uint, defaultDimensions map[string]string) *datapoint.Datapoint { dstype, val, dsname := point.Dstypes[index], point.Values[index], point.Dsnames[index] // if you add another dimension that we read from the json update this number const MaxCollectDDims = 6 dimensions := make(map[string]string, len(defaultDimensions)+MaxCollectDDims) for k, v := range defaultDimensions { dimensions[k] = v } metricType := metricTypeFromDsType(dstype) metricName, usedParts := getReasonableMetricName(point, index) addIfNotNullOrEmpty(dimensions, "plugin", true, point.Plugin) _, usedInMetricName := usedParts["type_instance"] parseDimensionsOut(dimensions, usedInMetricName, point.TypeInstance, point.PluginInstance, point.Host) _, usedInMetricName = usedParts["type"] addIfNotNullOrEmpty(dimensions, "type", !usedInMetricName, point.TypeS) _, usedInMetricName = usedParts["dsname"] addIfNotNullOrEmpty(dimensions, "dsname", !usedInMetricName, dsname) timestamp := time.Unix(0, int64(float64(time.Second)**point.Time)) return datapoint.New(metricName, dimensions, datapoint.NewFloatValue(*val), metricType, timestamp) }
func (decoder *JSONDecoderV2) Read(ctx context.Context, req *http.Request) error { dec := json.NewDecoder(req.Body) var d JSONDatapointV2 if err := dec.Decode(&d); err != nil { return err } dps := make([]*datapoint.Datapoint, 0, len(d)) for metricType, datapoints := range d { mt, ok := com_signalfx_metrics_protobuf.MetricType_value[strings.ToUpper(metricType)] if !ok { log.WithField("metricType", metricType).Warn("Unknown metric type") continue } for _, jsonDatapoint := range datapoints { v, err := ValueToValue(jsonDatapoint.Value) if err != nil { log.WithField("err", err).Warn("Unable to get value for datapoint") } else { dp := datapoint.New(jsonDatapoint.Metric, jsonDatapoint.Dimensions, v, fromMT(com_signalfx_metrics_protobuf.MetricType(mt)), fromTs(jsonDatapoint.Timestamp)) dps = append(dps, dp) } } } return decoder.Sink.AddDatapoints(ctx, dps) }
func (c *CadvisorCollector) collectMachineInfo(ch chan<- datapoint.Datapoint) { machineInfo, err := c.infoProvider.GetMachineInfo() if err != nil { //c.errors.Set(1) glog.Warningf("Couldn't get machine info: %s", err) return } tt := time.Now() // CPU frequency. ch <- *datapoint.New("machine_cpu_frequency_khz", make(map[string]string), datapoint.NewIntValue(int64(machineInfo.CpuFrequency)), datapoint.Gauge, tt) // Number of CPU cores on the machine. ch <- *datapoint.New("machine_cpu_cores", make(map[string]string), datapoint.NewIntValue(int64(machineInfo.NumCores)), datapoint.Gauge, tt) // Amount of memory installed on the machine. ch <- *datapoint.New("machine_memory_bytes", make(map[string]string), datapoint.NewIntValue(int64(machineInfo.MemoryCapacity)), datapoint.Gauge, tt) }
// Stats gets the metrics for a ReqLatencyCounter. func (a *ReqLatencyCounter) Stats(dimensions map[string]string) []*datapoint.Datapoint { now := a.timeKeeper.Now() getDp := func(key string, value int64) *datapoint.Datapoint { return datapoint.New( "ReqLatencyCounter.requestCounts", appendDimensions(dimensions, "requestClass", key), datapoint.NewIntValue(value), datapoint.Counter, now) } return []*datapoint.Datapoint{ getDp("fast", a.fastRequests), getDp("slow", a.slowRequests), } }
func TestSetSource(t *testing.T) { finalDatapointDestination, l, forwarder := setupServerForwarder(t) defer l.Close() timeToSend := time.Now().Round(time.Second) dpSent := datapoint.New("metric", map[string]string{"cpusize": "big", "hostname": "ahost"}, datapoint.NewIntValue(2), datapoint.Gauge, timeToSend) go forwarder.AddDatapoints(context.Background(), []*datapoint.Datapoint{dpSent}) dpRecieved := finalDatapointDestination.Next() i := dpRecieved.Value.(datapoint.IntValue).Int() assert.Equal(t, int64(2), i, "Expect 2 back") assert.Equal(t, "ahost", dpRecieved.Dimensions["sf_source"], "Expect ahost back") assert.Equal(t, timeToSend, dpRecieved.Timestamp) }
func TestConfigLoadDimensions(t *testing.T) { fileObj, _ := ioutil.TempFile("", "gotest") filename := fileObj.Name() defer os.Remove(filename) ctx := context.Background() psocket, err := net.Listen("tcp", "127.0.0.1:0") assert.NoError(t, err) defer psocket.Close() portParts := strings.Split(psocket.Addr().String(), ":") port := portParts[len(portParts)-1] conf := strings.Replace(config1, "<<PORT>>", port, 1) ioutil.WriteFile(filename, []byte(conf), os.FileMode(0666)) myProxyCommandLineConfiguration := proxyCommandLineConfigurationT{ configFileName: filename, logDir: "-", logMaxSize: 1, ctx: ctx, logMaxBackups: 0, stopChannel: make(chan bool), closeWhenWaitingToStopChannel: make(chan struct{}), } go func() { myProxyCommandLineConfiguration.blockTillSetupReady() assert.Equal(t, 1, len(myProxyCommandLineConfiguration.allListeners)) assert.Equal(t, 1, len(myProxyCommandLineConfiguration.allForwarders)) dp := datapoint.New("metric", map[string]string{"source": "proxy", "forwarder": "testForwardTo"}, datapoint.NewIntValue(1), datapoint.Gauge, time.Now()) myProxyCommandLineConfiguration.allForwarders[0].AddDatapoints(ctx, []*datapoint.Datapoint{dp}) // Keep going, but skip empty line and EOF line := "" for { c, err := psocket.Accept() defer c.Close() assert.NoError(t, err) reader := bufio.NewReader(c) line, err = reader.ReadString((byte)('\n')) if line == "" && err == io.EOF { continue } break } assert.NoError(t, err) fmt.Printf("line is %s\n", line) log.Info(line) assert.Equal(t, "proxy.testForwardTo.", line[0:len("proxy.testForwardTo.")]) myProxyCommandLineConfiguration.stopChannel <- true }() assert.NoError(t, myProxyCommandLineConfiguration.main()) }
func TestNoSource(t *testing.T) { finalDatapointDestination, l, forwarder := setupServerForwarder(t) defer l.Close() forwarder.defaultSource = "" timeToSend := time.Now().Round(time.Second) dpSent := datapoint.New("metric", map[string]string{}, datapoint.NewIntValue(2), datapoint.Gauge, timeToSend) go forwarder.AddDatapoints(context.Background(), []*datapoint.Datapoint{dpSent}) dpRecieved := finalDatapointDestination.Next() i := dpRecieved.Value.(datapoint.IntValue).Int() assert.Equal(t, int64(2), i, "Expect 2 back") val, exists := dpRecieved.Dimensions["sf_source"] assert.False(t, exists, val) }
func (decoder *JSONDecoderV1) Read(ctx context.Context, req *http.Request) error { dec := json.NewDecoder(req.Body) for { var d JSONDatapointV1 if err := dec.Decode(&d); err == io.EOF { break } else if err != nil { return err } else { log.WithField("dp", d).Debug("Got a new point") if d.Metric == "" { log.WithField("dp", d).Debug("Invalid Datapoint") continue } mt := fromMT(decoder.TypeGetter.GetMetricTypeFromMap(d.Metric)) dp := datapoint.New(d.Metric, map[string]string{"sf_source": d.Source}, datapoint.NewFloatValue(d.Value), mt, time.Now()) decoder.Sink.AddDatapoints(ctx, []*datapoint.Datapoint{dp}) } } return nil }
// NewProtobufDataPointWithType creates a new datapoint from SignalFx's protobuf definition (backwards compatable with old API) func NewProtobufDataPointWithType(dp *com_signalfx_metrics_protobuf.DataPoint, mType com_signalfx_metrics_protobuf.MetricType) *datapoint.Datapoint { var mt com_signalfx_metrics_protobuf.MetricType if dp.MetricType != nil { mt = dp.GetMetricType() } else { mt = mType } dims := make(map[string]string, len(dp.GetDimensions())+1) if dp.GetSource() != "" { dims["sf_source"] = dp.GetSource() } dpdims := dp.GetDimensions() for _, dpdim := range dpdims { dims[dpdim.GetKey()] = dpdim.GetValue() } return datapoint.New(dp.GetMetric(), dims, NewDatumValue(dp.GetValue()), fromMT(mt), fromTs(dp.GetTimestamp())) }
// NewOnHostDatapointDimensions is like NewOnHostDatapoint but also with optional dimensions func NewOnHostDatapointDimensions(metric string, value datapoint.Value, metricType datapoint.MetricType, dimensions map[string]string) *datapoint.Datapoint { return Wrap(datapoint.New(metric, dimensions, value, metricType, time.Now())) }
// Stats about this decoder, including how many datapoints it decoded func (decoder *JSONDecoder) Stats(dimensions map[string]string) []*datapoint.Datapoint { return []*datapoint.Datapoint{ datapoint.New("total_blank_dims", dimensions, datapoint.NewIntValue(decoder.TotalBlankDims), datapoint.Counter, time.Now()), datapoint.New("invalid_collectd_json", dimensions, datapoint.NewIntValue(decoder.TotalErrors), datapoint.Counter, time.Now()), } }
func (c *CadvisorCollector) collectContainersInfo(ch chan<- datapoint.Datapoint) { containers, err := c.infoProvider.SubcontainersInfo("/") if err != nil { //c.errors.Set(1) glog.Warningf("Couldn't get containers: %s", err) return } for _, container := range containers { dims := make(map[string]string) id := container.Name dims["id"] = id name := id if len(container.Aliases) > 0 { name = container.Aliases[0] dims["name"] = name } image := container.Spec.Image if len(image) > 0 { dims["image"] = image } if c.containerNameToLabels != nil { newLabels := c.containerNameToLabels(name) for k, v := range newLabels { dims[k] = v } } tt := time.Now() // Container spec // Start time of the container since unix epoch in seconds. ch <- *datapoint.New("container_start_time_seconds", copyDims(dims), datapoint.NewIntValue(container.Spec.CreationTime.Unix()), datapoint.Gauge, tt) if container.Spec.HasCpu { // CPU share of the container. ch <- *datapoint.New("container_spec_cpu_shares", copyDims(dims), datapoint.NewIntValue(int64(container.Spec.Cpu.Limit)), datapoint.Gauge, tt) } if container.Spec.HasMemory { // Memory limit for the container. ch <- *datapoint.New("container_spec_memory_limit_bytes", copyDims(dims), datapoint.NewIntValue(int64(container.Spec.Memory.Limit)), datapoint.Gauge, tt) // Memory swap limit for the container. ch <- *datapoint.New("container_spec_memory_swap_limit_bytes", copyDims(dims), datapoint.NewIntValue(int64(container.Spec.Memory.SwapLimit)), datapoint.Gauge, tt) } // Now for the actual metrics if len(container.Stats) > 0 { // only get the latest stats from this container. note/warning: the stats array contains historical statistics in earliest-to-latest order lastStatIndex := len(container.Stats) - 1 stat := container.Stats[lastStatIndex] for _, cm := range c.containerMetrics { for _, metricValue := range cm.getValues(stat) { newDims := copyDims(dims) // Add extra dimensions for i, label := range cm.extraLabels { newDims[label] = metricValue.labels[i] } ch <- *datapoint.New(cm.name, newDims, metricValue.value, cm.valueType, stat.Timestamp) } } } } }
// Next returns a unique datapoint func (d *DatapointSource) Next() *datapoint.Datapoint { d.mu.Lock() defer d.mu.Unlock() return datapoint.New(d.Metric+":"+strconv.FormatInt(atomic.AddInt64(&d.CurrentIndex, 1), 10), d.Dims, datapoint.NewIntValue(0), d.Dptype, d.TimeSource()) }