// parseTcollectorValue parses a tcollector-style line into a data point. func parseTcollectorValue(line string) (*opentsdb.DataPoint, error) { sp := strings.Fields(line) if len(sp) < 3 { return nil, fmt.Errorf("bad line: %s", line) } ts, err := strconv.ParseInt(sp[1], 10, 64) if err != nil { return nil, fmt.Errorf("bad timestamp: %s", sp[1]) } val, err := strconv.ParseFloat(sp[2], 64) if err != nil { return nil, fmt.Errorf("bad value: %s", sp[2]) } if !opentsdb.ValidTag(sp[0]) { return nil, fmt.Errorf("bad metric: %s", sp[0]) } dp := opentsdb.DataPoint{ Metric: sp[0], Timestamp: ts, Value: val, } tags := opentsdb.TagSet{} for _, tag := range sp[3:] { ts, err := opentsdb.ParseTags(tag) if err != nil { return nil, fmt.Errorf("bad tag, metric %s: %v: %v", sp[0], tag, err) } tags.Merge(ts) } setExternalTags(tags) dp.Tags = tags return &dp, nil }
func (c *Context) LookupAll(table, key string, group interface{}) string { var t opentsdb.TagSet switch v := group.(type) { case string: var err error t, err = opentsdb.ParseTags(v) if err != nil { c.addError(err) return err.Error() } case opentsdb.TagSet: t = v } l := c.schedule.RuleConf.GetLookup(table) if l == nil { err := fmt.Errorf("unknown lookup table %v", table) c.addError(err) return err.Error() } if v, ok := l.ToExpr().Get(key, t); ok { return v } err := fmt.Errorf("no entry for key %v in table %v for tagset %v", key, table, c.AlertKey.Group()) c.addError(err) return err.Error() }
func (c *Context) GetMeta(metric, name string, v interface{}) interface{} { var t opentsdb.TagSet switch v := v.(type) { case string: var err error t, err = opentsdb.ParseTags(v) if err != nil { c.addError(err) return nil } case opentsdb.TagSet: t = v } meta, err := c.schedule.GetMetadata(metric, t) if err != nil { c.addError(err) return nil } if name == "" { return meta } for _, m := range meta { if m.Name == name { return m.Value } } return "metadta not found" }
func (c *Context) GetMeta(metric, name string, v interface{}) (interface{}, error) { var t opentsdb.TagSet switch v := v.(type) { case string: var err error t, err = opentsdb.ParseTags(v) if err != nil { return t, err } case opentsdb.TagSet: t = v } meta, err := c.schedule.GetMetadata(metric, t) if err != nil { return nil, err } if name == "" { return meta, nil } for _, m := range meta { if m.Name == name { return m.Value, nil } } return nil, nil }
func SeriesFunc(e *State, T miniprofiler.Timer, tags string, pairs ...float64) (*Results, error) { if len(pairs)%2 != 0 { return nil, fmt.Errorf("uneven number of time stamps and values") } group := opentsdb.TagSet{} if tags != "" { var err error group, err = opentsdb.ParseTags(tags) if err != nil { return nil, fmt.Errorf("unable to parse tags: %v", err) } } series := make(Series) for i := 0; i < len(pairs); i += 2 { series[time.Unix(int64(pairs[i]), 0)] = pairs[i+1] } return &Results{ Results: []*Result{ { Value: series, Group: group, }, }, }, nil }
// TagSet returns m's tags. func (m Metakey) TagSet() opentsdb.TagSet { tags, err := opentsdb.ParseTags(m.Tags) if err != nil { return nil } return tags }
func (s *Schedule) AddSilence(start, end time.Time, alert, tagList string, forget, confirm bool, edit, user, message string) (map[models.AlertKey]bool, error) { if start.IsZero() || end.IsZero() { return nil, fmt.Errorf("both start and end must be specified") } if start.After(end) { return nil, fmt.Errorf("start time must be before end time") } if time.Since(end) > 0 { return nil, fmt.Errorf("end time must be in the future") } if alert == "" && tagList == "" { return nil, fmt.Errorf("must specify either alert or tags") } si := &models.Silence{ Start: start, End: end, Alert: alert, Tags: make(opentsdb.TagSet), Forget: forget, User: user, Message: message, } if tagList != "" { tags, err := opentsdb.ParseTags(tagList) if err != nil && tags == nil { return nil, err } si.Tags = tags si.TagString = tags.Tags() } if confirm { if edit != "" { if err := s.DataAccess.Silence().DeleteSilence(edit); err != nil { return nil, err } } if err := s.DataAccess.Silence().DeleteSilence(si.ID()); err != nil { return nil, err } if err := s.DataAccess.Silence().AddSilence(si); err != nil { return nil, err } return nil, nil } aks := make(map[models.AlertKey]bool) open, err := s.DataAccess.State().GetAllOpenIncidents() if err != nil { return nil, err } for _, inc := range open { if si.Matches(inc.Alert, inc.AlertKey.Group()) { aks[inc.AlertKey] = true } } return aks, nil }
// Last returns the most recent datapoint for a metric+tagset. The metric+tagset // string should be formated like os.cpu{host=foo}. The tag porition expects the // that the keys will be in alphabetical order. func Last(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) { var counter bool if r.FormValue("counter") != "" { counter = true } tags, err := opentsdb.ParseTags(r.FormValue("tagset")) if err != nil { return nil, err } return schedule.Search.GetLast(r.FormValue("metric"), tags, counter) }
func c_redis_counters(server string, db int) (opentsdb.MultiDataPoint, error) { var md opentsdb.MultiDataPoint conn, err := redis.Dial("tcp", server, redis.DialDatabase(db)) if err != nil { return md, err } defer conn.Close() if _, err := conn.Do("CLIENT", "SETNAME", "scollector"); err != nil { return md, err } cursor := 0 for { vals, err := redis.Values(conn.Do("HSCAN", collect.RedisCountersKey, cursor)) if err != nil { return md, err } if len(vals) != 2 { return md, fmt.Errorf("Unexpected number of values") } cursor, err = redis.Int(vals[0], nil) if err != nil { return md, err } pairs, err := redis.StringMap(vals[1], nil) if err != nil { return md, err } for mts, val := range pairs { parts := strings.Split(mts, ":") if len(parts) != 2 { slog.Errorf("Invalid metric tag set counter: %s", mts) continue } metric := parts[0] tags, err := opentsdb.ParseTags(parts[1]) if err != nil { slog.Errorf("Invalid tags: %s", parts[1]) continue } v, err := strconv.Atoi(val) if err != nil { slog.Errorf("Invalid counter value: %s", val) continue } Add(&md, metric, v, tags, metadata.Counter, metadata.Count, "") } if cursor == 0 { break } } return md, nil }
func FilteredTagsetsByMetric(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) { vars := mux.Vars(r) metric := vars["metric"] tagset := opentsdb.TagSet{} var err error ts := r.FormValue("tags") if ts != "" { if tagset, err = opentsdb.ParseTags(ts); err != nil { return nil, err } } return schedule.Search.FilteredTagSets(metric, tagset) }
func (d *dataAccess) GetTagMetadata(tags opentsdb.TagSet, name string) ([]*TagMetadata, error) { defer collect.StartTimer("redis", opentsdb.TagSet{"op": "GetTagMeta"})() conn := d.GetConnection() defer conn.Close() args := []interface{}{} for tagK, tagV := range tags { args = append(args, tagMetaIdxKey(tagK, tagV)) } keys, err := redis.Strings(conn.Do("SINTER", args...)) if err != nil { return nil, err } args = []interface{}{} for _, key := range keys { if name == "" || strings.HasSuffix(key, ":"+name) { args = append(args, key) } } results, err := redis.Strings(conn.Do("MGET", args...)) data := []*TagMetadata{} for i := range args { // break up key to get tags and name key := args[i].(string)[len("tmeta:"):] sepIdx := strings.LastIndex(key, ":") tags := key[:sepIdx] name := key[sepIdx+1:] tagSet, err := opentsdb.ParseTags(tags) if err != nil { return nil, err } // break up response to get time and value parts := strings.SplitN(results[i], ":", 2) if len(parts) != 2 { return nil, fmt.Errorf("Expect metadata value to be `time:value`") } val := parts[1] time, err := strconv.ParseInt(parts[0], 10, 64) if err != nil { return nil, err } obj := &TagMetadata{ Tags: tagSet, Name: name, Value: val, LastTouched: time, } data = append(data, obj) } return data, nil }
func (s *Search) FilteredTagSets(metric string, tags opentsdb.TagSet) ([]opentsdb.TagSet, error) { sets, err := s.DataAccess.Search().GetMetricTagSets(metric, tags) if err != nil { return nil, err } r := []opentsdb.TagSet{} for k := range sets { ts, err := opentsdb.ParseTags(k) if err != nil { return nil, err } r = append(r, ts) } return r, nil }
// Group returns the tagset of this alert key. Will panic if a is not a valid // AlertKey. OpenTSDB tag validation errors are ignored. func (a AlertKey) Group() opentsdb.TagSet { sp := strings.SplitN(string(a), "{", 2) if len(sp) < 2 { panic(fmt.Errorf("invalid alert key %s", a)) } s := sp[1] s = s[:len(s)-1] if s == "" { return nil } g, err := opentsdb.ParseTags(s) if g == nil && err != nil { panic(err) } return g }
func (s *Squelches) Add(v string) error { tags, err := opentsdb.ParseTags(v) if tags == nil && err != nil { return err } sq := make(Squelch) for k, v := range tags { re, err := regexp.Compile(v) if err != nil { return err } sq[k] = re } s.s = append(s.s, sq) return nil }
func (s *Schedule) AddSilence(start, end time.Time, alert, tagList string, forget, confirm bool, edit, user, message string) (map[expr.AlertKey]bool, error) { if start.IsZero() || end.IsZero() { return nil, fmt.Errorf("both start and end must be specified") } if start.After(end) { return nil, fmt.Errorf("start time must be before end time") } if time.Since(end) > 0 { return nil, fmt.Errorf("end time must be in the future") } if alert == "" && tagList == "" { return nil, fmt.Errorf("must specify either alert or tags") } si := &Silence{ Start: start, End: end, Alert: alert, Tags: make(opentsdb.TagSet), Forget: forget, User: user, Message: message, } if tagList != "" { tags, err := opentsdb.ParseTags(tagList) if err != nil && tags == nil { return nil, err } si.Tags = tags } s.Lock("AddSilence") defer s.Unlock() if confirm { delete(s.Silence, edit) s.Silence[si.ID()] = si s.Save() return nil, nil } aks := make(map[expr.AlertKey]bool) for ak := range s.status { if si.Matches(ak.Name(), ak.Group()) { aks[ak] = s.status[ak].IsActive() } } return aks, nil }
func AddTags(e *State, T miniprofiler.Timer, series *Results, s string) (*Results, error) { if s == "" { return series, nil } tagSetToAdd, err := opentsdb.ParseTags(s) if err != nil { return nil, err } for tagKey, tagValue := range tagSetToAdd { for _, res := range series.Results { if _, ok := res.Group[tagKey]; ok { return nil, fmt.Errorf("%s key already in group", tagKey) } res.Group[tagKey] = tagValue } } return series, nil }
func (d *dataAccess) Search_GetMetricTagSets(metric string, tags opentsdb.TagSet) (map[string]int64, error) { defer collect.StartTimer("redis", opentsdb.TagSet{"op": "GetMetricTagSets"})() conn := d.GetConnection() defer conn.Close() mtss, err := stringInt64Map(conn.Do("HGETALL", searchMetricTagSetKey(metric))) if err != nil { return nil, err } for mts := range mtss { ts, err := opentsdb.ParseTags(mts) if err != nil { return nil, err } if !ts.Subset(tags) { delete(mtss, mts) } } return mtss, nil }
func (c *Context) LookupAll(table, key string, group interface{}) (string, error) { var t opentsdb.TagSet switch v := group.(type) { case string: var err error t, err = opentsdb.ParseTags(v) if err != nil { return "", err } case opentsdb.TagSet: t = v } l, ok := c.schedule.Conf.Lookups[table] if !ok { return "", fmt.Errorf("unknown lookup table %v", table) } if v, ok := l.ToExpr().Get(key, t); ok { return v, nil } return "", fmt.Errorf("no entry for key %v in table %v for tagset %v", key, table, c.Group) }
func FilteredTagsetsByMetric(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) { vars := mux.Vars(r) metric := vars["metric"] tagset := opentsdb.TagSet{} var err error ts := r.FormValue("tags") if ts != "" { if tagset, err = opentsdb.ParseTags(ts); err != nil { return nil, err } } since := int64(0) sinceStr := r.FormValue("since") if sinceStr != "" { since, err = strconv.ParseInt(sinceStr, 10, 64) //since will be set to 0 again in case of errors if err != nil { return nil, err } } return schedule.Search.FilteredTagSets(metric, tagset, since) }
func (d *dataAccess) GetMetricTagSets(metric string, tags opentsdb.TagSet) (map[string]int64, error) { defer collect.StartTimer("redis", opentsdb.TagSet{"op": "GetMetricTagSets"})() conn := d.GetConnection() defer conn.Close() var cursor = "0" result := map[string]int64{} for { vals, err := redis.Values(conn.Do(d.HSCAN(), searchMetricTagSetKey(metric), cursor)) if err != nil { return nil, slog.Wrap(err) } cursor, err = redis.String(vals[0], nil) if err != nil { return nil, slog.Wrap(err) } mtss, err := stringInt64Map(vals[1], nil) if err != nil { return nil, slog.Wrap(err) } for mts, t := range mtss { ts, err := opentsdb.ParseTags(mts) if err != nil { return nil, slog.Wrap(err) } if ts.Subset(tags) { result[mts] = t } } if cursor == "" || cursor == "0" { break } } return result, nil }
func toToml(fname string) { var c conf.Conf b, err := ioutil.ReadFile(*flagConf) if err != nil { slog.Fatal(err) } extra := new(bytes.Buffer) var hap conf.HAProxy for i, line := range strings.Split(string(b), "\n") { if strings.TrimSpace(line) == "" { continue } sp := strings.SplitN(line, "=", 2) if len(sp) != 2 { slog.Fatalf("expected = in %v:%v", *flagConf, i+1) } k := strings.TrimSpace(sp[0]) v := strings.TrimSpace(sp[1]) switch k { case "host": c.Host = v case "hostname": c.Hostname = v case "filter": c.Filter = strings.Split(v, ",") case "coldir": c.ColDir = v case "snmp": for _, s := range strings.Split(v, ",") { sp := strings.Split(s, "@") if len(sp) != 2 { slog.Fatal("invalid snmp string:", v) } c.SNMP = append(c.SNMP, conf.SNMP{ Community: sp[0], Host: sp[1], }) } case "icmp": for _, i := range strings.Split(v, ",") { c.ICMP = append(c.ICMP, conf.ICMP{Host: i}) } case "haproxy": if v != "" { for _, s := range strings.Split(v, ",") { sp := strings.SplitN(s, ":", 2) if len(sp) != 2 { slog.Fatal("invalid haproxy string:", v) } if hap.User != "" || hap.Password != "" { slog.Fatal("only one haproxy line allowed") } hap.User = sp[0] hap.Password = sp[1] } } case "haproxy_instance": sp := strings.SplitN(v, ":", 2) if len(sp) != 2 { slog.Fatal("invalid haproxy_instance string:", v) } hap.Instances = append(hap.Instances, conf.HAProxyInstance{ Tier: sp[0], URL: sp[1], }) case "tags": tags, err := opentsdb.ParseTags(v) if err != nil { slog.Fatal(err) } c.Tags = tags case "aws": for _, s := range strings.Split(v, ",") { sp := strings.SplitN(s, ":", 2) if len(sp) != 2 { slog.Fatal("invalid AWS string:", v) } accessKey := sp[0] idx := strings.LastIndex(sp[1], "@") if idx == -1 { slog.Fatal("invalid AWS string:", v) } secretKey := sp[1][:idx] region := sp[1][idx+1:] if len(accessKey) == 0 || len(secretKey) == 0 || len(region) == 0 { slog.Fatal("invalid AWS string:", v) } c.AWS = append(c.AWS, conf.AWS{ AccessKey: accessKey, SecretKey: secretKey, Region: region, }) } case "vsphere": for _, s := range strings.Split(v, ",") { sp := strings.SplitN(s, ":", 2) if len(sp) != 2 { slog.Fatal("invalid vsphere string:", v) } user := sp[0] idx := strings.LastIndex(sp[1], "@") if idx == -1 { slog.Fatal("invalid vsphere string:", v) } pwd := sp[1][:idx] host := sp[1][idx+1:] if len(user) == 0 || len(pwd) == 0 || len(host) == 0 { slog.Fatal("invalid vsphere string:", v) } c.Vsphere = append(c.Vsphere, conf.Vsphere{ User: user, Password: pwd, Host: host, }) } case "freq": freq, err := strconv.Atoi(v) if err != nil { slog.Fatal(err) } c.Freq = freq case "process": if runtime.GOOS == "linux" { var p struct { Command string Name string Args string } sp := strings.Split(v, ",") if len(sp) > 1 { p.Name = sp[1] } if len(sp) > 2 { p.Args = sp[2] } p.Command = sp[0] extra.WriteString(fmt.Sprintf(` [[Process]] Command = %q Name = %q Args = %q `, p.Command, p.Name, p.Args)) } else if runtime.GOOS == "windows" { extra.WriteString(fmt.Sprintf(` [[Process]] Name = %q `, v)) } case "process_dotnet": c.ProcessDotNet = append(c.ProcessDotNet, conf.ProcessDotNet{Name: v}) case "keepalived_community": c.KeepalivedCommunity = v default: slog.Fatalf("unknown key in %v:%v", *flagConf, i+1) } } if len(hap.Instances) > 0 { c.HAProxy = append(c.HAProxy, hap) } f, err := os.Create(fname) if err != nil { slog.Fatal(err) } if err := toml.NewEncoder(f).Encode(&c); err != nil { slog.Fatal(err) } if _, err := extra.WriteTo(f); err != nil { slog.Fatal(err) } f.Close() }
func procRule(t miniprofiler.Timer, c *conf.Conf, a *conf.Alert, now time.Time, summary bool, email string, template_group string) (*ruleResult, error) { s := &sched.Schedule{} s.DataAccess = schedule.DataAccess s.Search = schedule.Search if err := s.Init(c); err != nil { return nil, err } rh := s.NewRunHistory(now, cacheObj) if _, err := s.CheckExpr(t, rh, a, a.Warn, models.StWarning, nil); err != nil { return nil, err } if _, err := s.CheckExpr(t, rh, a, a.Crit, models.StCritical, nil); err != nil { return nil, err } keys := make(models.AlertKeys, len(rh.Events)) criticals, warnings, normals := make([]models.AlertKey, 0), make([]models.AlertKey, 0), make([]models.AlertKey, 0) i := 0 for k, v := range rh.Events { v.Time = now keys[i] = k i++ switch v.Status { case models.StNormal: normals = append(normals, k) case models.StWarning: warnings = append(warnings, k) case models.StCritical: criticals = append(criticals, k) default: return nil, fmt.Errorf("unknown state type %v", v.Status) } } sort.Sort(keys) var subject, body []byte var data interface{} warning := make([]string, 0) if !summary && len(keys) > 0 { var primaryIncident *models.IncidentState if template_group != "" { ts, err := opentsdb.ParseTags(template_group) if err != nil { return nil, err } for _, ak := range keys { if ak.Group().Subset(ts) { primaryIncident = sched.NewIncident(ak) primaryIncident.Events = []models.Event{*rh.Events[ak]} break } } } if primaryIncident == nil { primaryIncident = sched.NewIncident(keys[0]) primaryIncident.Events = []models.Event{*rh.Events[keys[0]]} if template_group != "" { warning = append(warning, fmt.Sprintf("template group %s was not a subset of any result", template_group)) } } if e := primaryIncident.Events[0]; e.Crit != nil { primaryIncident.Result = e.Crit } else if e.Warn != nil { primaryIncident.Result = e.Warn } var b_err, s_err error func() { defer func() { if err := recover(); err != nil { s := fmt.Sprint(err) warning = append(warning, s) b_err = fmt.Errorf(s) } }() if body, _, b_err = s.ExecuteBody(rh, a, primaryIncident, false); b_err != nil { warning = append(warning, b_err.Error()) } }() func() { defer func() { if err := recover(); err != nil { s := fmt.Sprint(err) warning = append(warning, s) s_err = fmt.Errorf(s) } }() subject, s_err = s.ExecuteSubject(rh, a, primaryIncident, false) if s_err != nil { warning = append(warning, s_err.Error()) } }() if s_err != nil || b_err != nil { var err error subject, body, err = s.ExecuteBadTemplate([]error{s_err, b_err}, rh, a, primaryIncident) if err != nil { subject = []byte(fmt.Sprintf("unable to create tempalate error notification: %v", err)) } } else if email != "" { m, err := mail.ParseAddress(email) if err != nil { return nil, err } n := conf.Notification{ Email: []*mail.Address{m}, } email, attachments, b_err := s.ExecuteBody(rh, a, primaryIncident, true) email_subject, s_err := s.ExecuteSubject(rh, a, primaryIncident, true) if b_err != nil { warning = append(warning, b_err.Error()) } else if s_err != nil { warning = append(warning, s_err.Error()) } else { n.DoEmail(email_subject, email, schedule.Conf, string(primaryIncident.AlertKey), attachments...) } } data = s.Data(rh, primaryIncident, a, false) } return &ruleResult{ criticals, warnings, normals, now, string(body), string(subject), data, rh.Events, warning, }, nil }
func (c *Conf) loadLookup(s *parse.SectionNode) { name := s.Name.Text if _, ok := c.Lookups[name]; ok { c.errorf("duplicate lookup name: %s", name) } l := Lookup{ Name: name, } l.Text = s.RawText var lookupTags opentsdb.TagSet saw := make(map[string]bool) for _, n := range s.Nodes.Nodes { c.at(n) switch n := n.(type) { case *parse.SectionNode: if n.SectionType.Text != "entry" { c.errorf("unexpected subsection type") } tags, err := opentsdb.ParseTags(n.Name.Text) if tags == nil && err != nil { c.error(err) } if _, ok := saw[tags.String()]; ok { c.errorf("duplicate entry") } saw[tags.String()] = true if len(tags) == 0 { c.errorf("lookup entries require tags") } empty := make(opentsdb.TagSet) for k := range tags { empty[k] = "" } if len(lookupTags) == 0 { lookupTags = empty for k := range empty { l.Tags = append(l.Tags, k) } } else if !lookupTags.Equal(empty) { c.errorf("lookup tags mismatch, expected %v", lookupTags) } e := Entry{ Def: n.RawText, Name: n.Name.Text, ExprEntry: &ExprEntry{ AlertKey: models.NewAlertKey("", tags), Values: make(map[string]string), }, } for _, en := range n.Nodes.Nodes { c.at(en) switch en := en.(type) { case *parse.PairNode: e.Values[en.Key.Text] = en.Val.Text default: c.errorf("unexpected node") } } l.Entries = append(l.Entries, &e) default: c.errorf("unexpected node") } } c.at(s) c.Lookups[name] = &l }
func GenericSnmp(cfg conf.SNMP, mib conf.MIB) (opentsdb.MultiDataPoint, error) { md := opentsdb.MultiDataPoint{} baseOid := mib.BaseOid rateUnitTags := func(m conf.MIBMetric) (r metadata.RateType, u metadata.Unit, t opentsdb.TagSet, err error) { if r = metadata.RateType(m.RateType); r == "" { r = metadata.Gauge } if u = metadata.Unit(m.Unit); u == "" { u = metadata.None } if m.Tags == "" { t = make(opentsdb.TagSet) } else { t, err = opentsdb.ParseTags(m.Tags) if err != nil { return "", "", nil, err } } t["host"] = cfg.Host return } for _, metric := range mib.Metrics { rate, unit, tagset, err := rateUnitTags(metric) if err != nil { return md, err } v, err := snmp_oid(cfg.Host, cfg.Community, combineOids(metric.Oid, baseOid)) if err != nil && metric.FallbackOid != "" { v, err = snmp_oid(cfg.Host, cfg.Community, combineOids(metric.FallbackOid, baseOid)) } if err != nil { return md, err } Add(&md, metric.Metric, v, tagset, rate, unit, metric.Description) } for _, tree := range mib.Trees { treeOid := combineOids(tree.BaseOid, baseOid) tagCache := make(map[string]map[string]interface{}) // tag key to map of values for _, tag := range tree.Tags { if tag.Oid == "idx" { continue } vals, err := snmp_subtree(cfg.Host, cfg.Community, combineOids(tag.Oid, treeOid)) if err != nil { return md, err } tagCache[tag.Key] = vals } for _, metric := range tree.Metrics { rate, unit, tagset, err := rateUnitTags(metric) if err != nil { return md, err } nodes, err := snmp_subtree(cfg.Host, cfg.Community, combineOids(metric.Oid, treeOid)) if err != nil && metric.FallbackOid != "" { nodes, err = snmp_subtree(cfg.Host, cfg.Community, combineOids(metric.FallbackOid, treeOid)) } if err != nil { return md, err } // check all lengths for k, list := range tagCache { if len(list) != len(nodes) { return md, fmt.Errorf("snmp tree for tag key %s, and metric %s do not have same length", k, metric.Metric) } } for i, v := range nodes { for _, tag := range tree.Tags { var tagVal interface{} if tag.Oid == "idx" { tagVal = i } else { var ok bool tagVal, ok = tagCache[tag.Key][i] if !ok { return md, fmt.Errorf("tree for tag %s has no entry for metric %s index %s", tag.Key, metric.Metric, i) } } if byteSlice, ok := tagVal.([]byte); ok { tagVal = string(byteSlice) } tagset[tag.Key] = fmt.Sprint(tagVal) } Add(&md, metric.Metric, v, tagset, rate, unit, metric.Description) } } } return md, nil }