func (to *TagOverride) AddTags(t opentsdb.TagSet) { if to.tags == nil { to.tags = t.Copy() } else { to.tags = to.tags.Merge(t) } }
// AddTS is the same as Add but lets you specify the timestamp func AddTS(md *opentsdb.MultiDataPoint, name string, ts int64, value interface{}, t opentsdb.TagSet, rate metadata.RateType, unit metadata.Unit, desc string) { if b, ok := value.(bool); ok { if b { value = 1 } else { value = 0 } } tags := t.Copy() if host, present := tags["host"]; !present { tags["host"] = util.Hostname } else if host == "" { delete(tags, "host") } if rate != metadata.Unknown { metadata.AddMeta(name, nil, "rate", rate, false) } if unit != metadata.None { metadata.AddMeta(name, nil, "unit", unit, false) } if desc != "" { metadata.AddMeta(name, tags, "desc", desc, false) } tags = AddTags.Copy().Merge(tags) d := opentsdb.DataPoint{ Metric: name, Timestamp: ts, Value: value, Tags: tags, } *md = append(*md, &d) }
func BenchmarkSimpleRewrite(b *testing.B) { rule := &DenormalizationRule{ Metric: "a.b.c", TagNames: []string{"host"}, } tags := opentsdb.TagSet{"host": "foo-bar", "baz": "qwerty"} dp := &opentsdb.DataPoint{ Metric: "a.b.c", Timestamp: 42, Value: 3, Tags: tags.Copy(), } for i := 0; i < b.N; i++ { err := rule.Translate(dp) if err != nil { b.Fatal(err) } //expectedName := "__foo-bar.a.b.c" /* if dp.Metric != expectedName { b.Errorf("metric name %s is not `%s`", dp.Metric, expectedName) } if dp.Timestamp != 42 { b.Errorf("new metric timestamp does not match. %d != 42", dp.Timestamp) } if dp.Value != 3 { b.Errorf("new metric value does not match. %d != 3", dp.Value) } if !dp.Tags.Equal(tags) { b.Errorf("new metric tags do not match. %v != %v", dp.Tags, tags) } */ } }
// Set registers a callback for the given metric and tags, calling f immediately // before queueing data for send. func Set(metric string, ts opentsdb.TagSet, f func() interface{}) error { if err := check(metric, &ts); err != nil { return err } tss := metric + ts.String() mlock.Lock() sets[tss] = &setMetric{metric, ts.Copy(), f} mlock.Unlock() return nil }
// Put is useful for capturing "events" that have a gauge value. Subsequent // calls between the sending interval will overwrite previous calls. func Put(metric string, ts opentsdb.TagSet, v interface{}) error { if err := check(metric, &ts); err != nil { return err } tss := metric + ts.String() mlock.Lock() puts[tss] = &putMetric{metric, ts.Copy(), v} mlock.Unlock() return nil }
// AddTS is the same as Add but lets you specify the timestamp func AddTS(md *opentsdb.MultiDataPoint, name string, ts int64, value interface{}, t opentsdb.TagSet, rate metadata.RateType, unit metadata.Unit, desc string) { // Check if we really want that metric if skipMetric(name) { return } tags := t.Copy() if host, present := tags["host"]; !present { tags["host"] = util.Hostname } else if host == "" { delete(tags, "host") } // if tags are not cleanable, log a message and skip it if err := tags.Clean(); err != nil { line := "" //attempt to log where Add was called from if _, filename, l, ok := runtime.Caller(1); ok { if filepath.Base(filename) == "collectors.go" { _, filename, l, ok = runtime.Caller(2) } if ok { line = fmt.Sprintf("%s:%d", filepath.Base(filename), l) } } slog.Errorf("Invalid tagset discovered: %s. Skipping datapoint. Added from: %s", tags.String(), line) return } if rate != metadata.Unknown { metadata.AddMeta(name, nil, "rate", rate, false) } if unit != metadata.None { metadata.AddMeta(name, nil, "unit", unit, false) } if desc != "" { metadata.AddMeta(name, tags, "desc", desc, false) } tags = AddTags.Copy().Merge(tags) if b, ok := value.(bool); ok { if b { value = 1 } else { value = 0 } } d := opentsdb.DataPoint{ Metric: name, Timestamp: ts, Value: value, Tags: tags, } *md = append(*md, &d) }
func Sample(metric string, ts opentsdb.TagSet, v float64) error { if err := check(metric, &ts); err != nil { return err } tss := metric + ts.String() mlock.Lock() if aggs[tss] == nil { aggs[tss] = &agMetric{ metric: metric, ts: ts.Copy(), } } aggs[tss].values = append(aggs[tss].values, v) mlock.Unlock() return nil }
// Add takes a metric and increments a counter for that metric. The metric name // is appended to the basename specified in the Init function. func Add(metric string, ts opentsdb.TagSet, inc int64) error { if err := check(metric, &ts); err != nil { return err } tss := metric + ts.String() mlock.Lock() if counters[tss] == nil { counters[tss] = &addMetric{ metric: metric, ts: ts.Copy(), } } counters[tss].value += inc mlock.Unlock() return nil }
func c_fortinet_os(host, community string, cpuIntegrators map[string]tsIntegrator) (opentsdb.MultiDataPoint, error) { var md opentsdb.MultiDataPoint ts := opentsdb.TagSet{"host": host} // CPU cpuRaw, err := snmp_subtree(host, community, fortinetBaseOID+fortinetCPU) if err != nil { return md, err } coreCount := len(cpuRaw) var totalPercent int for id, v := range cpuRaw { cpuVal, err := strconv.Atoi(fmt.Sprintf("%v", v)) if err != nil { return md, fmt.Errorf("couldn't convert cpu value to int for fortinet cpu utilization on host %v: %v", host, err) } ts := ts.Copy().Merge(opentsdb.TagSet{"processor": id}) Add(&md, "fortinet.cpu.percent_used", cpuVal, ts, metadata.Gauge, metadata.Pct, "") totalPercent += cpuVal } if _, ok := cpuIntegrators[host]; !ok { cpuIntegrators[host] = getTsIntegrator() } Add(&md, osCPU, cpuIntegrators[host](time.Now().Unix(), float64(totalPercent)/float64(coreCount)), opentsdb.TagSet{"host": host}, metadata.Counter, metadata.Pct, "") // Memory memTotal, err := snmp_oid(host, community, fortinetBaseOID+fortinetMemTotal) if err != nil { return md, fmt.Errorf("failed to get total memory for fortinet host %v: %v", host, err) } memTotalBytes := memTotal.Int64() * 2 << 9 // KiB to Bytes Add(&md, "fortinet.mem.total", memTotal, ts, metadata.Gauge, metadata.KBytes, "The total memory in kilobytes.") Add(&md, osMemTotal, memTotalBytes, ts, metadata.Gauge, metadata.Bytes, osMemTotalDesc) memPctUsed, err := snmp_oid(host, community, fortinetBaseOID+fortinetMemPercentUsed) if err != nil { return md, fmt.Errorf("failed to get percent of memory used for fortinet host %v: %v", host, err) } Add(&md, "fortinet.mem.percent_used", memPctUsed, ts, metadata.Gauge, metadata.Pct, "The percent of memory used.") memPctUsedFloat := float64(memPctUsed.Int64()) / 100 memPctFree := 100 - memPctUsed.Int64() Add(&md, osMemPctFree, memPctFree, ts, metadata.Gauge, metadata.Pct, osMemPctFreeDesc) memFree := float64(memTotalBytes) * (float64(1) - memPctUsedFloat) Add(&md, osMemFree, int64(memFree), ts, metadata.Gauge, metadata.Bytes, osMemFreeDesc) Add(&md, osMemUsed, int64(float64(memTotalBytes)-memFree), ts, metadata.Gauge, metadata.Bytes, osMemUsedDesc) return md, nil }
func (to *TagOverride) AddTagOverrides(sources map[string]string, t opentsdb.TagSet) error { if to.matchedTags == nil { to.matchedTags = make(map[string]*regexp.Regexp) } var err error for tag, re := range sources { to.matchedTags[tag], err = regexp.Compile(re) if err != nil { return fmt.Errorf("invalid regexp: %s error: %s", re, err) } } if to.tags == nil { to.tags = t.Copy() } else { to.tags = to.tags.Merge(t) } return nil }
func rabbitmqBackingQueueStatus(p string, ts opentsdb.TagSet, bqs rmqBackingQueueStatus) opentsdb.MultiDataPoint { var md opentsdb.MultiDataPoint Add(&md, p+"avg_rate", bqs.AvgAckEgressRate, ts.Copy().Merge(opentsdb.TagSet{"method": "ack", "direction": "out"}), metadata.Rate, metadata.Message, DescRmqBackingQueueStatusAvgAckEgressRate) Add(&md, p+"avg_rate", bqs.AvgAckIngressRate, ts.Copy().Merge(opentsdb.TagSet{"method": "ack", "direction": "in"}), metadata.Rate, metadata.Message, DescRmqBackingQueueStatusAvgAckIngressRate) Add(&md, p+"avg_rate", bqs.AvgEgressRate, ts.Copy().Merge(opentsdb.TagSet{"method": "noack", "direction": "out"}), metadata.Rate, metadata.Message, DescRmqBackingQueueStatusAvgEgressRate) Add(&md, p+"avg_rate", bqs.AvgIngressRate, ts.Copy().Merge(opentsdb.TagSet{"method": "noack", "direction": "in"}), metadata.Rate, metadata.Message, DescRmqBackingQueueStatusAvgIngressRate) Add(&md, p+"len", bqs.Len, ts, metadata.Gauge, metadata.Message, DescRmqBackingQueueStatusLen) return md }
func c_cisco_ios(host, community string, cpuIntegrator tsIntegrator) (opentsdb.MultiDataPoint, error) { var md opentsdb.MultiDataPoint ts := opentsdb.TagSet{"host": host} // CPU if err := ciscoCPU(host, community, ts, cpuIntegrator, &md); err != nil { return md, err } // ÎMemory memRaw, err := snmp_subtree(host, community, ciscoBaseOID+ciscoMemoryPoolTable) if err != nil { return md, fmt.Errorf("failed to get ciscoMemoryPoolTable for host %v: %v", host, err) } idToPoolEntry := make(map[string]*ciscoMemoryPoolEntry) for id, value := range memRaw { sp := strings.SplitN(id, ".", 2) if len(sp) != 2 { slog.Errorln("unexpected length of snmp sub OID (%v) for ciscoMemoryPoolTable for host %v: %v", id, host) } columnID := sp[0] entryID := sp[1] if _, ok := idToPoolEntry[entryID]; !ok { idToPoolEntry[entryID] = &ciscoMemoryPoolEntry{} } switch columnID { case "2": if v, ok := value.([]byte); ok { if m, ok := idToPoolEntry[entryID]; ok { m.PoolType = string(v) } else { slog.Errorf("failed to find cisco memory pool entry for entry id %v on host %v for memory pool type", entryID, host) } } else { slog.Errorf("failed to convert memory pool label %v to []byte for host %v", value, host) } case "5": if v, ok := value.(int64); ok { if m, ok := idToPoolEntry[entryID]; ok { m.Used = v } else { slog.Errorf("failed to find cisco memory pool entry for entry id %v on host %v for used memory", entryID, host) } } else { slog.Errorf("failed to convert used memory value %v to int64 for host %v", value, host) } case "6": if v, ok := value.(int64); ok { if m, ok := idToPoolEntry[entryID]; ok { m.Free = v } else { slog.Errorf("failed to find cisco memory pool entry for entry id %v on host %v for free memory", entryID, host) } } else { slog.Errorf("failed to convert used memory value %v to int64 for host %v", value, host) } } } var totalFreeMem int64 var totalUsedMem int64 for _, entry := range idToPoolEntry { ts := ts.Copy().Merge(opentsdb.TagSet{"name": entry.PoolType}) Add(&md, "cisco.mem.used", entry.Used, ts, metadata.Gauge, metadata.Bytes, ciscoMemoryPoolUsedDesc) Add(&md, "cisco.mem.free", entry.Free, ts, metadata.Gauge, metadata.Bytes, ciscoMemoryPoolFreeDesc) totalFreeMem += entry.Free totalUsedMem += entry.Used } Add(&md, osMemFree, totalFreeMem, ts, metadata.Gauge, metadata.Bytes, osMemFreeDesc) Add(&md, osMemUsed, totalUsedMem, ts, metadata.Gauge, metadata.Bytes, osMemUsedDesc) totalMem := totalFreeMem + totalUsedMem Add(&md, osMemTotal, totalMem, ts, metadata.Gauge, metadata.Bytes, osMemTotalDesc) Add(&md, osMemPctFree, int64(float64(totalFreeMem)/float64(totalMem)*100), ts, metadata.Gauge, metadata.Pct, osMemPctFreeDesc) return md, nil }
func rabbitmqMessageStats(p string, ts opentsdb.TagSet, ms rmqMessageStats) opentsdb.MultiDataPoint { var md opentsdb.MultiDataPoint Add(&md, p+"message_stats", ms.Ack, ts.Copy().Merge(opentsdb.TagSet{"method": "ack"}), metadata.Counter, metadata.Message, DescRmqMessageStatsAck) Add(&md, p+"message_stats", ms.Confirm, ts.Copy().Merge(opentsdb.TagSet{"method": "confirm"}), metadata.Counter, metadata.Message, DescRmqMessageStatsConfirm) Add(&md, p+"message_stats", ms.Deliver, ts.Copy().Merge(opentsdb.TagSet{"method": "deliver"}), metadata.Counter, metadata.Message, DescRmqMessageStatsDeliver) Add(&md, p+"message_stats", ms.DeliverGet, ts.Copy().Merge(opentsdb.TagSet{"method": "deliver_get"}), metadata.Counter, metadata.Message, DescRmqMessageStatsDeliverGet) Add(&md, p+"message_stats", ms.DeliverNoAck, ts.Copy().Merge(opentsdb.TagSet{"method": "deliver_noack"}), metadata.Counter, metadata.Message, DescRmqMessageStatsDeliverNoAck) Add(&md, p+"message_stats", ms.Get, ts.Copy().Merge(opentsdb.TagSet{"method": "get"}), metadata.Counter, metadata.Message, DescRmqMessageStatsGet) Add(&md, p+"message_stats", ms.GetNoAck, ts.Copy().Merge(opentsdb.TagSet{"method": "get_noack"}), metadata.Counter, metadata.Message, DescRmqMessageStatsGetNoack) Add(&md, p+"message_stats", ms.Publish, ts.Copy().Merge(opentsdb.TagSet{"method": "publish"}), metadata.Counter, metadata.Message, DescRmqMessageStatsPublish) Add(&md, p+"message_stats", ms.PublishIn, ts.Copy().Merge(opentsdb.TagSet{"method": "publish_in"}), metadata.Counter, metadata.Message, DescRmqMessageStatsPublishIn) Add(&md, p+"message_stats", ms.PublishOut, ts.Copy().Merge(opentsdb.TagSet{"method": "publish_out"}), metadata.Counter, metadata.Message, DescRmqMessageStatsPublishOut) Add(&md, p+"message_stats", ms.Redeliver, ts.Copy().Merge(opentsdb.TagSet{"method": "redeliver"}), metadata.Counter, metadata.Message, DescRmqMessageStatsRedeliver) Add(&md, p+"message_stats", ms.Return, ts.Copy().Merge(opentsdb.TagSet{"method": "return"}), metadata.Counter, metadata.Message, DescRmqMessageStatsReturn) return md }
// CollectStates sends various state information to bosun with collect. func (s *Schedule) CollectStates() { // [AlertName][Severity]Count severityCounts := make(map[string]map[string]int64) abnormalCounts := make(map[string]map[string]int64) ackStatusCounts := make(map[string]map[bool]int64) ackByNotificationCounts := make(map[string]map[bool]int64) unAckOldestByNotification := make(map[string]time.Time) activeStatusCounts := make(map[string]map[bool]int64) // Initalize the Counts for _, alert := range s.Conf.Alerts { severityCounts[alert.Name] = make(map[string]int64) abnormalCounts[alert.Name] = make(map[string]int64) var i models.Status for i = 1; i.String() != "none"; i++ { severityCounts[alert.Name][i.String()] = 0 abnormalCounts[alert.Name][i.String()] = 0 } ackStatusCounts[alert.Name] = make(map[bool]int64) activeStatusCounts[alert.Name] = make(map[bool]int64) ackStatusCounts[alert.Name][false] = 0 activeStatusCounts[alert.Name][false] = 0 ackStatusCounts[alert.Name][true] = 0 activeStatusCounts[alert.Name][true] = 0 } for notificationName := range s.Conf.Notifications { unAckOldestByNotification[notificationName] = time.Unix(1<<63-62135596801, 999999999) ackByNotificationCounts[notificationName] = make(map[bool]int64) ackByNotificationCounts[notificationName][false] = 0 ackByNotificationCounts[notificationName][true] = 0 } //TODO: // for _, state := range s.status { // if !state.Open { // continue // } // name := state.AlertKey.Name() // alertDef := s.Conf.Alerts[name] // nots := make(map[string]bool) // for name := range alertDef.WarnNotification.Get(s.Conf, state.Group) { // nots[name] = true // } // for name := range alertDef.CritNotification.Get(s.Conf, state.Group) { // nots[name] = true // } // incident, err := s.GetIncident(state.Last().IncidentId) // if err != nil { // slog.Errorln(err) // } // for notificationName := range nots { // ackByNotificationCounts[notificationName][state.NeedAck]++ // if incident != nil && incident.Start.Before(unAckOldestByNotification[notificationName]) && state.NeedAck { // unAckOldestByNotification[notificationName] = incident.Start // } // } // severity := state.CurrentStatus.String() // lastAbnormal := state.LastAbnormalStatus.String() // severityCounts[state.Alert][severity]++ // abnormalCounts[state.Alert][lastAbnormal]++ // ackStatusCounts[state.Alert][state.NeedAck]++ // activeStatusCounts[state.Alert][state.IsActive()]++ // } for notification := range ackByNotificationCounts { ts := opentsdb.TagSet{"notification": notification} err := collect.Put("alerts.acknowledgement_status_by_notification", ts.Copy().Merge(opentsdb.TagSet{"status": "unacknowledged"}), ackByNotificationCounts[notification][true]) if err != nil { slog.Errorln(err) } err = collect.Put("alerts.acknowledgement_status_by_notification", ts.Copy().Merge(opentsdb.TagSet{"status": "acknowledged"}), ackByNotificationCounts[notification][false]) if err != nil { slog.Errorln(err) } } for notification, timeStamp := range unAckOldestByNotification { ts := opentsdb.TagSet{"notification": notification} var ago time.Duration if !timeStamp.Equal(time.Unix(1<<63-62135596801, 999999999)) { ago = utcNow().Sub(timeStamp) } err := collect.Put("alerts.oldest_unacked_by_notification", ts, ago.Seconds()) if err != nil { slog.Errorln(err) } } for alertName := range severityCounts { ts := opentsdb.TagSet{"alert": alertName} // The tagset of the alert is not included because there is no way to // store the string of a group in OpenTSBD in a parsable way. This is // because any delimiter we chose could also be part of a tag key or tag // value. for severity := range severityCounts[alertName] { err := collect.Put("alerts.current_severity", ts.Copy().Merge(opentsdb.TagSet{"severity": severity}), severityCounts[alertName][severity]) if err != nil { slog.Errorln(err) } err = collect.Put("alerts.last_abnormal_severity", ts.Copy().Merge(opentsdb.TagSet{"severity": severity}), abnormalCounts[alertName][severity]) if err != nil { slog.Errorln(err) } } err := collect.Put("alerts.acknowledgement_status", ts.Copy().Merge(opentsdb.TagSet{"status": "unacknowledged"}), ackStatusCounts[alertName][true]) err = collect.Put("alerts.acknowledgement_status", ts.Copy().Merge(opentsdb.TagSet{"status": "acknowledged"}), ackStatusCounts[alertName][false]) if err != nil { slog.Errorln(err) } err = collect.Put("alerts.active_status", ts.Copy().Merge(opentsdb.TagSet{"status": "active"}), activeStatusCounts[alertName][true]) if err != nil { slog.Errorln(err) } err = collect.Put("alerts.active_status", ts.Copy().Merge(opentsdb.TagSet{"status": "inactive"}), activeStatusCounts[alertName][false]) if err != nil { slog.Errorln(err) } } }
func DatabaseParseResults(c conf.Database, query *DatabaseQuery, results *[][]string) (opentsdb.MultiDataPoint, error) { if len(*results) < 1 { return nil, nil } numTagLoops := len((*results)[0]) - 1 if query.hasTime { numTagLoops-- } if numTagLoops < 0 { return nil, fmt.Errorf("invalid number of columns") } numTagLoops = (numTagLoops - (numTagLoops % 2)) / 2 var md opentsdb.MultiDataPoint var tagName string var tagValue string var tagsBase opentsdb.TagSet baseName := c.Type + "." if c.DBName == "" { tagsBase = opentsdb.TagSet{"inst_id": strconv.Itoa(c.InstId)} } else { tagsBase = opentsdb.TagSet{"db_name": c.DBName, "inst_id": strconv.Itoa(c.InstId)} } for _, result := range *results { if result[0] == "" { continue } // Check result[0] is vaild metric value? tags := tagsBase.Copy() for i := 0; i < numTagLoops; i++ { if query.hasTime { tagName = strings.Replace(result[(i*2)+2], " ", "_", -1) tagValue = strings.Replace(result[(i*2)+3], " ", "_", -1) } else { tagName = strings.Replace(result[(i*2)+1], " ", "_", -1) tagValue = strings.Replace(result[(i*2)+2], " ", "_", -1) } tagName, _ = opentsdb.Clean(tagName) tagValue, _ = opentsdb.Clean(tagValue) tagName = ContinuousCollectorVars.reTwoOrMoreUnderscore.ReplaceAllString(tagName, "_") tagValue = ContinuousCollectorVars.reTwoOrMoreUnderscore.ReplaceAllString(tagValue, "_") if tagName == "" || tagValue == "" { continue } tags = tags.Merge(opentsdb.TagSet{tagName: tagValue}) } // Add metadata tag set type to configuration file? if query.hasTime { timestamp, _ := strconv.ParseInt(result[1], 10, 64) if timestamp < 1 { return nil, fmt.Errorf("invalid timestamp") } AddTS(&md, baseName+query.name, timestamp, result[0], tags, metadata.Unknown, metadata.None, query.description) } else { Add(&md, baseName+query.name, result[0], tags, metadata.Unknown, metadata.None, query.description) } } return md, nil }
// CollectStates sends various state information to bosun with collect. func (s *Schedule) CollectStates() { // [AlertName][Severity]Count severityCounts := make(map[string]map[string]int64) abnormalCounts := make(map[string]map[string]int64) ackStatusCounts := make(map[string]map[bool]int64) activeStatusCounts := make(map[string]map[bool]int64) // Initalize the Counts for _, alert := range s.Conf.Alerts { severityCounts[alert.Name] = make(map[string]int64) abnormalCounts[alert.Name] = make(map[string]int64) var i Status for i = 1; i.String() != "none"; i++ { severityCounts[alert.Name][i.String()] = 0 abnormalCounts[alert.Name][i.String()] = 0 } ackStatusCounts[alert.Name] = make(map[bool]int64) activeStatusCounts[alert.Name] = make(map[bool]int64) ackStatusCounts[alert.Name][false] = 0 activeStatusCounts[alert.Name][false] = 0 ackStatusCounts[alert.Name][true] = 0 activeStatusCounts[alert.Name][true] = 0 } for _, state := range s.status { if !state.Open { continue } severity := state.Status().String() lastAbnormal := state.AbnormalStatus().String() severityCounts[state.Alert][severity]++ abnormalCounts[state.Alert][lastAbnormal]++ ackStatusCounts[state.Alert][state.NeedAck]++ activeStatusCounts[state.Alert][state.IsActive()]++ } for alertName := range severityCounts { ts := opentsdb.TagSet{"alert": alertName} // The tagset of the alert is not included because there is no way to // store the string of a group in OpenTSBD in a parsable way. This is // because any delimiter we chose could also be part of a tag key or tag // value. for severity := range severityCounts[alertName] { err := collect.Put("alerts.current_severity", ts.Copy().Merge(opentsdb.TagSet{"severity": severity}), severityCounts[alertName][severity]) if err != nil { slog.Errorln(err) } err = collect.Put("alerts.last_abnormal_severity", ts.Copy().Merge(opentsdb.TagSet{"severity": severity}), abnormalCounts[alertName][severity]) if err != nil { slog.Errorln(err) } } err := collect.Put("alerts.acknowledgement_status", ts.Copy().Merge(opentsdb.TagSet{"status": "unacknowledged"}), ackStatusCounts[alertName][true]) err = collect.Put("alerts.acknowledgement_status", ts.Copy().Merge(opentsdb.TagSet{"status": "acknowledged"}), ackStatusCounts[alertName][false]) if err != nil { slog.Errorln(err) } err = collect.Put("alerts.active_status", ts.Copy().Merge(opentsdb.TagSet{"status": "active"}), activeStatusCounts[alertName][true]) if err != nil { slog.Errorln(err) } err = collect.Put("alerts.active_status", ts.Copy().Merge(opentsdb.TagSet{"status": "inactive"}), activeStatusCounts[alertName][false]) if err != nil { slog.Errorln(err) } } }
func haproxyFetch(user, pwd, tier, url string) (opentsdb.MultiDataPoint, error) { var md opentsdb.MultiDataPoint var err error const metric = "haproxy" parse := func(v string) (int64, error) { var i int64 if v != "" { i, err = strconv.ParseInt(v, 10, 64) if err != nil { return 0, err } return i, nil } return i, nil } req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, err } // Close connection after request. Default cached connections will get // failures in the event of server closing idle connections. // See https://github.com/golang/go/issues/8946 req.Close = true req.SetBasicAuth(user, pwd) resp, err := http.DefaultClient.Do(req) if err != nil { return nil, err } defer resp.Body.Close() reader := csv.NewReader(resp.Body) records, err := reader.ReadAll() if err != nil { return nil, err } if len(records) < 2 { return nil, nil } for _, rec := range records[1:] { // There is a trailing comma in haproxy's csv if len(rec) != len(haproxyCSVMeta)+1 { return nil, fmt.Errorf("expected %v lines. got: %v", len(haproxyCSVMeta)+1, len(rec)) } hType := haproxyType[rec[32]] pxname := rec[0] svname := rec[1] ts := opentsdb.TagSet{"pxname": pxname, "svname": svname, "tier": tier} for i, field := range haproxyCSVMeta { m := strings.Join([]string{metric, hType, field.Name}, ".") value := rec[i] if field.Ignore == true { continue } else if strings.HasPrefix(field.Name, "hrsp") { sp := strings.Split(field.Name, "_") if len(sp) != 2 { return nil, fmt.Errorf("unexpected field name %v in hrsp", field.Name) } ts := ts.Copy().Merge(opentsdb.TagSet{"status_code": sp[1]}) m = strings.Join([]string{metric, hType, sp[0]}, ".") v, err := parse(value) if err != nil { return nil, err } Add(&md, m, v, ts, metadata.Counter, metadata.Response, fmt.Sprintf("The number of http responses with a %v status code.", sp[1])) } else if field.Name == "status" { v, ok := haproxyStatus[value] // Not distinging between MAINT and MAINT via... if !ok { v = 3 } Add(&md, m, v, ts, field.RateType, field.Unit, field.Desc) } else if field.Name == "check_status" { if value == "" { continue } v, ok := haproxyCheckStatus[value] if !ok { return nil, fmt.Errorf("unknown check status %v", value) } Add(&md, m, v, ts, field.RateType, field.Unit, field.Desc) } else { v, err := parse(value) if err != nil { return nil, err } Add(&md, m, v, ts, field.RateType, field.Unit, field.Desc) } } } return md, nil }