func fastlyReflectAdd(md *opentsdb.MultiDataPoint, prefix, suffix string, st interface{}, timeStamp int64, ts opentsdb.TagSet) { t := reflect.TypeOf(st) valueOf := reflect.ValueOf(st) for i := 0; i < t.NumField(); i++ { field := t.Field(i) value := valueOf.Field(i).Interface() var ( jsonTag = field.Tag.Get("json") metricTag = field.Tag.Get("metric") rateTag = field.Tag.Get("rate") unitTag = field.Tag.Get("unit") divTag = field.Tag.Get("div") descTag = field.Tag.Get("desc") exclude = field.Tag.Get("exclude") != "" ) if exclude || descTag == "" { continue } metricName := jsonTag if metricTag != "" { metricName = metricTag } if metricName == "" { slog.Errorf("Unable to determine metric name for field %s. Skipping.", field.Name) continue } shouldDiv := divTag != "" if shouldDiv { descTag = fmt.Sprintf("%v %v", descTag, fastlyDivDesc) } fullMetric := fmt.Sprintf("%v.%v%v", prefix, metricName, suffix) switch value := value.(type) { case int64, float64: var v float64 if f, found := value.(float64); found { v = f } else { v = float64(value.(int64)) } if shouldDiv { v /= 60.0 } AddTS(md, fullMetric, timeStamp, v, ts, metadata.RateType(rateTag), metadata.Unit(unitTag), descTag) case string: // Floats in strings, I know not why, precision perhaps? // err ignored since we expect non number strings in the struct if f, err := strconv.ParseFloat(value, 64); err != nil { if shouldDiv { f /= 60.0 } AddTS(md, fullMetric, timeStamp, f, ts, metadata.RateType(rateTag), metadata.Unit(unitTag), descTag) } default: // Pass since there is no need to recurse } } }
// structProcessor.add() takes in a metric name prefix, an arbitrary struct, and a tagset. // The processor recurses through the struct and builds metrics. The field tags direct how // the field should be processed, as well as the metadata for the resulting metric. // // The field tags used are described as follows: // // version: typically set to '1' or '2'. // This is compared against the elastic cluster version. If the version from the tag // does not match the version in production, the metric will not be sent for this field. // // exclude: // If this tag is set to 'true', a metric will not be sent for this field. // // rate: one of 'gauge', 'counter', 'rate' // This tag dictates the metadata.RateType we send. // // unit: 'bytes', 'pages', etc // This tag dictates the metadata.Unit we send. // // metric: // This is the metric name which will be sent. If not present, the 'json' // tag is sent as the metric name. // // Special handling: // // Metrics having the json tag suffix of 'in_milliseconds' are automagically // divided by 1000 and sent as seconds. The suffix is stripped from the name. // // Metrics having the json tag suffix of 'in_bytes' are automatically sent as // gauge bytes. The suffix is stripped from the metric name. func (s *structProcessor) add(prefix string, st interface{}, ts opentsdb.TagSet) { t := reflect.TypeOf(st) valueOf := reflect.ValueOf(st) for i := 0; i < t.NumField(); i++ { field := t.Field(i) value := valueOf.Field(i).Interface() if field.Tag.Get("exclude") == "true" { continue } var ( jsonTag = field.Tag.Get("json") metricTag = field.Tag.Get("metric") versionTag = field.Tag.Get("version") rateTag = field.Tag.Get("rate") unitTag = field.Tag.Get("unit") ) metricName := jsonTag if metricTag != "" { metricName = metricTag } if metricName == "" { slog.Errorf("Unable to determine metric name for field %s. Skipping.", field.Name) continue } if versionTag == "" || strings.HasPrefix(s.elasticVersion, versionTag) { switch value := value.(type) { case int, float64: // Number types in our structs are only ints and float64s. // Turn all millisecond metrics into seconds if strings.HasSuffix(metricName, "_in_millis") { switch value.(type) { case int: value = float64(value.(int)) / 1000 case float64: value = value.(float64) / 1000 } unitTag = "seconds" metricName = strings.TrimSuffix(metricName, "_in_millis") } // Set rate and unit for all "_in_bytes" metrics, and strip the "_in_bytes" if strings.HasSuffix(metricName, "_in_bytes") { if rateTag == "" { rateTag = "gauge" } unitTag = "bytes" metricName = strings.TrimSuffix(metricName, "_in_bytes") } Add(s.md, prefix+"."+metricName, value, ts, metadata.RateType(rateTag), metadata.Unit(unitTag), field.Tag.Get("desc")) case string: // The json data has a lot of strings, and we don't care about em. default: // If we hit another struct, recurse if reflect.ValueOf(value).Kind() == reflect.Struct { s.add(prefix+"."+metricName, value, ts) } else { slog.Errorf("Field %s for metric %s is non-numeric type. Cannot record as a metric.\n", field.Name, prefix+"."+metricName) } } } } }
func GenericSnmp(cfg conf.SNMP, mib conf.MIB) (opentsdb.MultiDataPoint, error) { md := opentsdb.MultiDataPoint{} baseOid := mib.BaseOid rateUnitTags := func(m conf.MIBMetric) (r metadata.RateType, u metadata.Unit, t opentsdb.TagSet, err error) { if r = metadata.RateType(m.RateType); r == "" { r = metadata.Gauge } if u = metadata.Unit(m.Unit); u == "" { u = metadata.None } if m.Tags == "" { t = make(opentsdb.TagSet) } else { t, err = opentsdb.ParseTags(m.Tags) if err != nil { return "", "", nil, err } } t["host"] = cfg.Host return } for _, metric := range mib.Metrics { rate, unit, tagset, err := rateUnitTags(metric) if err != nil { return md, err } v, err := snmp_oid(cfg.Host, cfg.Community, combineOids(metric.Oid, baseOid)) if err != nil && metric.FallbackOid != "" { v, err = snmp_oid(cfg.Host, cfg.Community, combineOids(metric.FallbackOid, baseOid)) } if err != nil { return md, err } Add(&md, metric.Metric, v, tagset, rate, unit, metric.Description) } for _, tree := range mib.Trees { treeOid := combineOids(tree.BaseOid, baseOid) tagCache := make(map[string]map[string]interface{}) // tag key to map of values for _, tag := range tree.Tags { if tag.Oid == "idx" { continue } vals, err := snmp_subtree(cfg.Host, cfg.Community, combineOids(tag.Oid, treeOid)) if err != nil { return md, err } tagCache[tag.Key] = vals } for _, metric := range tree.Metrics { rate, unit, tagset, err := rateUnitTags(metric) if err != nil { return md, err } nodes, err := snmp_subtree(cfg.Host, cfg.Community, combineOids(metric.Oid, treeOid)) if err != nil && metric.FallbackOid != "" { nodes, err = snmp_subtree(cfg.Host, cfg.Community, combineOids(metric.FallbackOid, treeOid)) } if err != nil { return md, err } // check all lengths for k, list := range tagCache { if len(list) != len(nodes) { return md, fmt.Errorf("snmp tree for tag key %s, and metric %s do not have same length", k, metric.Metric) } } for i, v := range nodes { for _, tag := range tree.Tags { var tagVal interface{} if tag.Oid == "idx" { tagVal = i } else { var ok bool tagVal, ok = tagCache[tag.Key][i] if !ok { return md, fmt.Errorf("tree for tag %s has no entry for metric %s index %s", tag.Key, metric.Metric, i) } } if byteSlice, ok := tagVal.([]byte); ok { tagVal = string(byteSlice) } tagset[tag.Key] = fmt.Sprint(tagVal) } Add(&md, metric.Metric, v, tagset, rate, unit, metric.Description) } } } return md, nil }
"select NAME || ',' || VALUE from v$sysstat where NAME not like '%this session%';\n", func(row string, md *opentsdb.MultiDataPoint, prefix string, common opentsdb.TagSet) error { fields := strings.Split(row, ",") if len(fields) != 2 { return sqlplusParserFieldCountErr } v, err := sqlplusValueConv(fields[1]) if err != nil { return err } f0 := fields[0] name := sqlplusMetricNameConv(f0) rate := metadata.RateType(metadata.Counter) if f0 == "logons current" || strings.HasSuffix(f0, "cursors current") || strings.HasPrefix(f0, "gc current") { rate = metadata.Gauge } Add(md, prefix+name, v, common, rate, metadata.None, f0) return nil }, }, { "select TABLESPACE_NAME || ',' || USED_PERCENT from dba_tablespace_usage_metrics;\n", func(row string, md *opentsdb.MultiDataPoint, prefix string, common opentsdb.TagSet) error { fields := strings.Split(row, ",") if len(fields) != 2 { return sqlplusParserFieldCountErr
func c_varnish_unix() (opentsdb.MultiDataPoint, error) { var md opentsdb.MultiDataPoint const metric = "varnish." r, err := util.Command(5*time.Second, nil, "varnishstat", "-j") if err != nil { return nil, err } var stats varnishStats if err := json.NewDecoder(r).Decode(&stats); err != nil { return nil, err } for name, raw := range stats { if name == "timestamp" { continue } var v varnishStat if err := json.Unmarshal(raw, &v); err != nil { slog.Errorln("varnish parser error:", name, err) continue } ts := opentsdb.TagSet{} // special case for backend stats. extract backend name, host and port, put // them in tags and remove them in name. // the format is like "name(host,,port)" for the "ident" field of "VBE" type if v.Type == "VBE" { subtype := v.SubType name = strings.Replace(name, "."+subtype, "", -1) idx := strings.Index(subtype, "(") if idx < 0 || len(subtype)-idx < 4 { // output format changed, ignore continue } ss := strings.Split(subtype[idx+1:len(subtype)-1], ",") if len(ss) != 3 { // output format changed, ignore continue } ts.Merge(opentsdb.TagSet{"backend": subtype[:idx]}) ts.Merge(opentsdb.TagSet{"endpoint": ss[0] + "_" + ss[2]}) } rate := metadata.RateType(metadata.Gauge) if flag := v.Flag; flag == "a" || flag == "c" { rate = metadata.Counter } unit := metadata.Unit(metadata.Count) if v.Format == "B" { unit = metadata.Bytes } Add(&md, metric+strings.ToLower(name), v.Value, ts, rate, unit, v.Desc) } return md, nil }