// LSBaseQuery builds the base query that both LSCount and LSStat share func LSBaseQuery(now time.Time, indexRoot string, l LogstashElasticHosts, keystring string, filter, sduration, eduration string, size int) (*LogstashRequest, error) { start, err := opentsdb.ParseDuration(sduration) if err != nil { return nil, err } var end opentsdb.Duration if eduration != "" { end, err = opentsdb.ParseDuration(eduration) if err != nil { return nil, err } } st := now.Add(time.Duration(-start)) en := now.Add(time.Duration(-end)) r := LogstashRequest{ IndexRoot: indexRoot, Start: &st, End: &en, Source: elastic.NewSearchSource().Size(size), } tf := elastic.NewRangeFilter("@timestamp").Gte(st).Lte(en) filtered := elastic.NewFilteredQuery(tf) r.KeyMatches, err = ProcessLSKeys(keystring, filter, &filtered) if err != nil { return nil, err } r.Source = r.Source.Query(filtered) return &r, nil }
func GraphiteQuery(e *State, T miniprofiler.Timer, query string, sduration, eduration, format string) (r *Results, err error) { sd, err := opentsdb.ParseDuration(sduration) if err != nil { return } ed := opentsdb.Duration(0) if eduration != "" { ed, err = opentsdb.ParseDuration(eduration) if err != nil { return } } st := e.now.Add(-time.Duration(sd)) et := e.now.Add(-time.Duration(ed)) req := &graphite.Request{ Targets: []string{query}, Start: &st, End: &et, } s, err := timeGraphiteRequest(e, T, req) if err != nil { return nil, err } formatTags := strings.Split(format, ".") r = new(Results) results, err := parseGraphiteResponse(req, &s, formatTags) if err != nil { return nil, err } r.Results = results return }
// ESBaseQuery builds the base query that both ESCount and ESStat share func ESBaseQuery(now time.Time, indexer ESIndexer, l ElasticHosts, filter elastic.Query, sduration, eduration string, size int) (*ElasticRequest, error) { start, err := opentsdb.ParseDuration(sduration) if err != nil { return nil, err } var end opentsdb.Duration if eduration != "" { end, err = opentsdb.ParseDuration(eduration) if err != nil { return nil, err } } st := now.Add(time.Duration(-start)) en := now.Add(time.Duration(-end)) r := ElasticRequest{ Indexer: indexer, Start: &st, End: &en, Source: elastic.NewSearchSource().Size(size), } var q elastic.Query q = elastic.NewRangeQuery(indexer.TimeField).Gte(st).Lte(en) r.Source = r.Source.Query(q) r.Source = r.Source.Query(filter) return &r, nil }
func bandTSDB(e *State, T miniprofiler.Timer, query, duration, period string, num float64, rfunc func(*Results, *opentsdb.Response, time.Duration) error) (r *Results, err error) { r = new(Results) r.IgnoreOtherUnjoined = true r.IgnoreUnjoined = true T.Step("band", func(T miniprofiler.Timer) { var d, p opentsdb.Duration d, err = opentsdb.ParseDuration(duration) if err != nil { return } p, err = opentsdb.ParseDuration(period) if err != nil { return } if num < 1 || num > 100 { err = fmt.Errorf("num out of bounds") } var q *opentsdb.Query q, err = opentsdb.ParseQuery(query, e.tsdbContext.Version()) if err != nil { return } if !e.tsdbContext.Version().FilterSupport() { if err = e.Search.Expand(q); err != nil { return } } req := opentsdb.Request{ Queries: []*opentsdb.Query{q}, } now := e.now req.End = now.Unix() req.Start = now.Add(time.Duration(-d)).Unix() if err = req.SetTime(e.now); err != nil { return } for i := 0; i < int(num); i++ { now = now.Add(time.Duration(-p)) req.End = now.Unix() req.Start = now.Add(time.Duration(-d)).Unix() var s opentsdb.ResponseSet s, err = timeTSDBRequest(e, T, &req) if err != nil { return } for _, res := range s { if e.squelched(res.Tags) { continue } //offset := e.now.Sub(now.Add(time.Duration(p-d))) offset := e.now.Sub(now) if err = rfunc(r, res, offset); err != nil { return } } } }) return }
func Query(e *State, T miniprofiler.Timer, query, sduration, eduration string) (r *Results, err error) { r = new(Results) q, err := opentsdb.ParseQuery(query, e.tsdbContext.Version()) if q == nil && err != nil { return } if !e.tsdbContext.Version().FilterSupport() { if err = e.Search.Expand(q); err != nil { return } } sd, err := opentsdb.ParseDuration(sduration) if err != nil { return } req := opentsdb.Request{ Queries: []*opentsdb.Query{q}, Start: fmt.Sprintf("%s-ago", sd), } if eduration != "" { var ed opentsdb.Duration ed, err = opentsdb.ParseDuration(eduration) if err != nil { return } req.End = fmt.Sprintf("%s-ago", ed) } var s opentsdb.ResponseSet if err = req.SetTime(e.now); err != nil { return } s, err = timeTSDBRequest(e, T, &req) if err != nil { return } for _, res := range s { if e.squelched(res.Tags) { continue } values := make(Series) for k, v := range res.DPS { i, err := strconv.ParseInt(k, 10, 64) if err != nil { return nil, err } values[time.Unix(i, 0).UTC()] = float64(v) } r.Results = append(r.Results, &Result{ Value: values, Group: res.Tags, }) } return }
func procDuration(e *State, startDuration, endDuration string) (time.Time, time.Time, error) { start, err := opentsdb.ParseDuration(startDuration) if err != nil { return time.Time{}, time.Time{}, err } var end opentsdb.Duration if endDuration != "" { end, err = opentsdb.ParseDuration(endDuration) if err != nil { return time.Time{}, time.Time{}, err } } st := e.now.Add(time.Duration(-start)) en := e.now.Add(time.Duration(-end)) return st, en, nil }
func c_exim_mailq() (opentsdb.MultiDataPoint, error) { var md opentsdb.MultiDataPoint mailq, err := util.Command(time.Minute, nil, "/usr/bin/mailq") if err != nil { return nil, err } util.ReadCommandTimeout(time.Minute, func(line string) error { f := strings.Fields(line) if len(f) == 5 && f[4] == "TOTAL" { Add(&md, "exim.mailq_count", f[0], nil, metadata.Gauge, metadata.EMail, "The number of emails in exim's mail queue.") var multi int64 = 1 size, err := strconv.ParseInt(f[1], 10, 64) if err != nil && len(f[1]) > 3 { unit := f[1][len(f[1])-2:] switch unit { case "KB": multi = 1024 case "MB": multi = 1024 * 1024 default: return fmt.Errorf("error parsing size unit of exim's mail queue") } size, err = strconv.ParseInt(f[1][:len(f[1])-2], 10, 64) if err != nil { return fmt.Errorf("error parsing exim size field") } } Add(&md, "exim.mailq_size", size*multi, nil, metadata.Gauge, metadata.Bytes, descEximMailQSize) oldest, err := opentsdb.ParseDuration(f[2]) if err != nil { return err } Add(&md, "exim.mailq_oldest", oldest.Seconds(), nil, metadata.Gauge, metadata.Second, descEximMailQOldest) newest, err := opentsdb.ParseDuration(f[3]) if err != nil { return err } Add(&md, "exim.mailq_newest", newest.Seconds(), nil, metadata.Gauge, metadata.Second, descEximMailQNewest) } return nil }, mailq, eximExiqsumm) return md, nil }
func Duration(e *State, T miniprofiler.Timer, d string) (*Results, error) { duration, err := opentsdb.ParseDuration(d) if err != nil { return nil, err } return &Results{ Results: []*Result{ {Value: Scalar(duration.Seconds())}, }, }, nil }
func Change(e *State, T miniprofiler.Timer, query, sduration, eduration string) (r *Results, err error) { r = new(Results) sd, err := opentsdb.ParseDuration(sduration) if err != nil { return } var ed opentsdb.Duration if eduration != "" { ed, err = opentsdb.ParseDuration(eduration) if err != nil { return } } r, err = Query(e, T, query, sduration, eduration) if err != nil { return } r, err = reduce(e, T, r, change, fromScalar((sd - ed).Seconds())) return }
func Line_lr(e *State, T miniprofiler.Timer, series *Results, d string) (*Results, error) { dur, err := opentsdb.ParseDuration(d) if err != nil { return series, err } for _, res := range series.Results { res.Value = line_lr(res.Value.(Series), time.Duration(dur)) res.Group.Merge(opentsdb.TagSet{"regression": "line"}) } return series, nil }
func getSince(r *http.Request) (time.Duration, error) { s := r.FormValue("since") since := schedule.SystemConf.GetSearchSince() if s != "" && s != "default" { td, err := opentsdb.ParseDuration(s) if err != nil { return 0, err } since = time.Duration(td) } return since, nil }
func Shift(e *State, T miniprofiler.Timer, series *Results, d string) (*Results, error) { dur, err := opentsdb.ParseDuration(d) if err != nil { return series, err } for _, result := range series.Results { newSeries := make(Series) for t, v := range result.Value.Value().(Series) { newSeries[t.Add(time.Duration(dur))] = v } result.Group["shift"] = d result.Value = newSeries } return series, nil }
func TagValuesByTagKey(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) { vars := mux.Vars(r) tagk := vars["tagk"] s := r.FormValue("since") var since opentsdb.Duration if s == "default" { since = schedule.Conf.SearchSince } else if s != "" { var err error since, err = opentsdb.ParseDuration(s) if err != nil { return nil, err } } return schedule.Search.TagValuesByTagKey(tagk, time.Duration(since)) }
func SilenceSet(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) { var start, end time.Time var err error var data map[string]string j := json.NewDecoder(r.Body) if err := j.Decode(&data); err != nil { return nil, err } if s := data["start"]; s != "" { for _, layout := range silenceLayouts { start, err = time.Parse(layout, s) if err == nil { break } } if start.IsZero() { return nil, fmt.Errorf("unrecognized start time format: %s", s) } } if s := data["end"]; s != "" { for _, layout := range silenceLayouts { end, err = time.Parse(layout, s) if err == nil { break } } if end.IsZero() { return nil, fmt.Errorf("unrecognized end time format: %s", s) } } if start.IsZero() { start = time.Now().UTC() } if end.IsZero() { d, err := opentsdb.ParseDuration(data["duration"]) if err != nil { return nil, err } end = start.Add(time.Duration(d)) } return schedule.AddSilence(start, end, data["alert"], data["tags"], data["forget"] == "true", len(data["confirm"]) > 0, data["edit"], data["user"], data["message"]) }
// LSDateHistorgram builds the aggregation query using subaggregations. The result is a grouped timer series // that Bosun can understand func LSDateHistogram(e *State, T miniprofiler.Timer, index_root, keystring, filter, interval, sduration, eduration, stat_field, rstat string, size int) (r *Results, err error) { r = new(Results) req, err := LSBaseQuery(e.now, index_root, e.logstashHosts, keystring, filter, sduration, eduration, size) if err != nil { return nil, err } ts := elastic.NewDateHistogramAggregation().Field("@timestamp").Interval(strings.Replace(interval, "M", "n", -1)).MinDocCount(0) ds, err := opentsdb.ParseDuration(interval) if err != nil { return nil, err } if stat_field != "" { ts = ts.SubAggregation("stats", elastic.NewExtendedStatsAggregation().Field(stat_field)) switch rstat { case "avg", "min", "max", "sum", "sum_of_squares", "variance", "std_deviation": default: return r, fmt.Errorf("stat function %v not a valid option", rstat) } } if keystring == "" { req.Source = req.Source.Aggregation("ts", ts) result, err := timeLSRequest(e, T, req) if err != nil { return nil, err } ts, found := result.Aggregations.DateHistogram("ts") if !found { return nil, fmt.Errorf("expected time series not found in elastic reply") } series := make(Series) for _, v := range ts.Buckets { val := processBucketItem(v, rstat, ds) if val != nil { series[time.Unix(v.Key/1000, 0).UTC()] = *val } } if len(series) == 0 { return r, nil } r.Results = append(r.Results, &Result{ Value: series, Group: make(opentsdb.TagSet), }) return r, nil } keys := req.KeyMatches aggregation := elastic.NewTermsAggregation().Field(keys[len(keys)-1].Key).Size(0) aggregation = aggregation.SubAggregation("ts", ts) for i := len(keys) - 2; i > -1; i-- { aggregation = elastic.NewTermsAggregation().Field(keys[i].Key).Size(0).SubAggregation("g_"+keys[i+1].Key, aggregation) } req.Source = req.Source.Aggregation("g_"+keys[0].Key, aggregation) result, err := timeLSRequest(e, T, req) if err != nil { return nil, err } top, ok := result.Aggregations.Terms("g_" + keys[0].Key) if !ok { return nil, fmt.Errorf("top key g_%v not found in result", keys[0].Key) } var desc func(*elastic.AggregationBucketKeyItem, opentsdb.TagSet, []lsKeyMatch) error desc = func(b *elastic.AggregationBucketKeyItem, tags opentsdb.TagSet, keys []lsKeyMatch) error { if ts, found := b.DateHistogram("ts"); found { if e.squelched(tags) { return nil } series := make(Series) for _, v := range ts.Buckets { val := processBucketItem(v, rstat, ds) if val != nil { series[time.Unix(v.Key/1000, 0).UTC()] = *val } } if len(series) == 0 { return nil } r.Results = append(r.Results, &Result{ Value: series, Group: tags.Copy(), }) return nil } if len(keys) < 1 { return nil } n, _ := b.Aggregations.Terms("g_" + keys[0].Key) for _, item := range n.Buckets { key := fmt.Sprint(item.Key) if keys[0].Pattern != nil && !keys[0].Pattern.MatchString(key) { continue } tags[keys[0].Key] = key if err := desc(item, tags.Copy(), keys[1:]); err != nil { return err } } return nil } for _, b := range top.Buckets { tags := make(opentsdb.TagSet) key := fmt.Sprint(b.Key) if keys[0].Pattern != nil && !keys[0].Pattern.MatchString(key) { continue } tags[keys[0].Key] = key if err := desc(b, tags, keys[1:]); err != nil { return nil, err } } return r, nil }
func (is IncidentSummaryView) Ask(filter string) (bool, error) { sp := strings.SplitN(filter, ":", 2) if len(sp) != 2 { return false, fmt.Errorf("bad filter, filter must be in k:v format, got %v", filter) } key := sp[0] value := sp[1] switch key { case "ack": switch value { case "true": return is.NeedAck == false, nil case "false": return is.NeedAck == true, nil default: return false, fmt.Errorf("unknown %s value: %s", key, value) } case "hasTag": if strings.Contains(value, "=") { if strings.HasPrefix(value, "=") { q := strings.TrimPrefix(value, "=") for _, v := range is.Tags { if glob.Glob(q, v) { return true, nil } } return false, nil } if strings.HasSuffix(value, "=") { q := strings.TrimSuffix(value, "=") _, ok := is.Tags[q] return ok, nil } sp := strings.Split(value, "=") if len(sp) != 2 { return false, fmt.Errorf("unexpected tag specification: %v", value) } tagValues := strings.Split(sp[1], "|") for k, v := range is.Tags { for _, tagValue := range tagValues { if k == sp[0] && glob.Glob(tagValue, v) { return true, nil } } } return false, nil } q := strings.TrimRight(value, "=") _, ok := is.Tags[q] return ok, nil case "hidden": hide := is.Silenced || is.Unevaluated switch value { case "true": return hide == true, nil case "false": return hide == false, nil default: return false, fmt.Errorf("unknown %s value: %s", key, value) } case "name": return glob.Glob(value, is.AlertName), nil case "user": for _, action := range is.Actions { if action.User == value { return true, nil } } return false, nil case "notify": for _, chain := range is.WarnNotificationChains { for _, wn := range chain { if glob.Glob(value, wn) { return true, nil } } } for _, chain := range is.CritNotificationChains { for _, cn := range chain { if glob.Glob(value, cn) { return true, nil } } } return false, nil case "silenced": switch value { case "true": return is.Silenced == true, nil case "false": return is.Silenced == false, nil default: return false, fmt.Errorf("unknown %s value: %s", key, value) } case "start": var op string val := value if strings.HasPrefix(value, "<") { op = "<" val = strings.TrimLeft(value, op) } if strings.HasPrefix(value, ">") { op = ">" val = strings.TrimLeft(value, op) } d, err := opentsdb.ParseDuration(val) if err != nil { return false, err } startTime := time.Unix(is.Start, 0) // might want to make Now a property of incident summary for viewing things in the past // but not going there at the moment. This is because right now I'm working with open // incidents. And "What did incidents look like at this time?" is a different question // since those incidents will no longer be open. relativeTime := time.Now().UTC().Add(time.Duration(-d)) switch op { case ">", "": return startTime.After(relativeTime), nil case "<": return startTime.Before(relativeTime), nil default: return false, fmt.Errorf("unexpected op: %v", op) } case "unevaluated": switch value { case "true": return is.Unevaluated == true, nil case "false": return is.Unevaluated == false, nil default: return false, fmt.Errorf("unknown %s value: %s", key, value) } case "status": // CurrentStatus return is.CurrentStatus.String() == value, nil case "worstStatus": return is.WorstStatus.String() == value, nil case "lastAbnormalStatus": return is.LastAbnormalStatus.String() == value, nil case "subject": return glob.Glob(value, is.Subject), nil } return false, nil }
func (c *Conf) loadNotification(s *parse.SectionNode) { name := s.Name.Text if _, ok := c.Notifications[name]; ok { c.errorf("duplicate notification name: %s", name) } n := Notification{ Vars: make(map[string]string), ContentType: "application/x-www-form-urlencoded", Name: name, RunOnActions: true, } n.Text = s.RawText funcs := ttemplate.FuncMap{ "V": func(v string) string { return c.Expand(v, n.Vars, false) }, "json": func(v interface{}) string { b, err := json.Marshal(v) if err != nil { slog.Errorln(err) } return string(b) }, } c.Notifications[name] = &n pairs := c.getPairs(s, n.Vars, sNormal) for _, p := range pairs { c.at(p.node) v := p.val switch k := p.key; k { case "email": if c.SMTPHost == "" || c.EmailFrom == "" { c.errorf("email notifications require both smtpHost and emailFrom to be set") } n.email = v email, err := mail.ParseAddressList(n.email) if err != nil { c.error(err) } n.Email = email case "post": n.post = v post, err := url.Parse(n.post) if err != nil { c.error(err) } n.Post = post case "get": n.get = v get, err := url.Parse(n.get) if err != nil { c.error(err) } n.Get = get case "print": n.Print = true case "contentType": n.ContentType = v case "next": n.next = v next, ok := c.Notifications[n.next] if !ok { c.errorf("unknown notification %s", n.next) } n.Next = next case "timeout": d, err := opentsdb.ParseDuration(v) if err != nil { c.error(err) } n.Timeout = time.Duration(d) case "body": n.body = v tmpl := ttemplate.New(name).Funcs(funcs) _, err := tmpl.Parse(n.body) if err != nil { c.error(err) } n.Body = tmpl case "runOnActions": n.RunOnActions = v == "true" default: c.errorf("unknown key %s", k) } } c.at(s) if n.Timeout > 0 && n.Next == nil { c.errorf("timeout specified without next") } }
func (c *Conf) loadAlert(s *parse.SectionNode) { name := s.Name.Text if _, ok := c.Alerts[name]; ok { c.errorf("duplicate alert name: %s", name) } a := Alert{ Vars: make(map[string]string), Name: name, CritNotification: new(Notifications), WarnNotification: new(Notifications), } a.Text = s.RawText procNotification := func(v string, ns *Notifications) { if lookup := lookupNotificationRE.FindStringSubmatch(v); lookup != nil { if ns.Lookups == nil { ns.Lookups = make(map[string]*Lookup) } l := c.Lookups[lookup[1]] if l == nil { c.errorf("unknown lookup table %s", lookup[1]) } for _, e := range l.Entries { for k, v := range e.Values { if k != lookup[2] { continue } if _, err := c.parseNotifications(v); err != nil { c.errorf("lookup %s: %v", v, err) } } } ns.Lookups[lookup[2]] = l return } n, err := c.parseNotifications(v) if err != nil { c.error(err) } if ns.Notifications == nil { ns.Notifications = make(map[string]*Notification) } for k, v := range n { ns.Notifications[k] = v } } pairs := c.getPairs(s, a.Vars, sNormal) for _, p := range pairs { c.at(p.node) v := p.val switch p.key { case "template": a.template = v t, ok := c.Templates[a.template] if !ok { c.errorf("template not found %s", a.template) } a.Template = t case "crit": a.Crit = c.NewExpr(v) case "warn": a.Warn = c.NewExpr(v) case "depends": a.Depends = c.NewExpr(v) case "squelch": a.squelch = append(a.squelch, v) if err := a.Squelch.Add(v); err != nil { c.error(err) } case "critNotification": procNotification(v, a.CritNotification) case "warnNotification": procNotification(v, a.WarnNotification) case "unknown": od, err := opentsdb.ParseDuration(v) if err != nil { c.error(err) } d := time.Duration(od) if d < time.Second { c.errorf("unknown duration must be at least 1s") } a.Unknown = d case "maxLogFrequency": od, err := opentsdb.ParseDuration(v) if err != nil { c.error(err) } d := time.Duration(od) if d < time.Second { c.errorf("max log frequency must be at least 1s") } a.MaxLogFrequency = d case "unjoinedOk": a.UnjoinedOK = true case "ignoreUnknown": a.IgnoreUnknown = true case "log": a.Log = true case "runEvery": var err error a.RunEvery, err = strconv.Atoi(v) if err != nil { c.error(err) } default: c.errorf("unknown key %s", p.key) } } if a.MaxLogFrequency != 0 && !a.Log { c.errorf("maxLogFrequency can only be used on alerts with `log = true`.") } c.at(s) if a.Crit == nil && a.Warn == nil { c.errorf("neither crit or warn specified") } var tags eparse.Tags var ret eparse.FuncType if a.Crit != nil { ctags, err := a.Crit.Root.Tags() if err != nil { c.error(err) } tags = ctags ret = a.Crit.Root.Return() } if a.Warn != nil { wtags, err := a.Warn.Root.Tags() if err != nil { c.error(err) } wret := a.Warn.Root.Return() if a.Crit == nil { tags = wtags ret = wret } else if ret != wret { c.errorf("crit and warn expressions must return same type (%v != %v)", ret, wret) } else if !tags.Equal(wtags) { c.errorf("crit tags (%v) and warn tags (%v) must be equal", tags, wtags) } } if a.Depends != nil { depTags, err := a.Depends.Root.Tags() if err != nil { c.error(err) } if len(depTags.Intersection(tags)) < 1 { c.errorf("Depends and crit/warn must share at least one tag.") } } if a.Log { for _, n := range a.CritNotification.Notifications { if n.Next != nil { c.errorf("cannot use log with a chained notification") } } for _, n := range a.WarnNotification.Notifications { if n.Next != nil { c.errorf("cannot use log with a chained notification") } } if a.Crit != nil && len(a.CritNotification.Notifications) == 0 { c.errorf("log + crit specified, but no critNotification") } if a.Warn != nil && len(a.WarnNotification.Notifications) == 0 { c.errorf("log + warn specified, but no warnNotification") } } if len(a.WarnNotification.Notifications) != 0 { if a.Warn == nil { c.errorf("warnNotification specified, but no warn") } if a.Template == nil { c.errorf("warnNotification specified, but no template") } } if len(a.CritNotification.Notifications) != 0 { if a.Crit == nil { c.errorf("critNotification specified, but no crit") } if a.Template == nil { c.errorf("critNotification specified, but no template") } } if a.RunEvery == 0 { a.RunEvery = c.DefaultRunEvery } a.returnType = ret c.Alerts[name] = &a }
func (c *Conf) loadGlobal(p *parse.PairNode) { v := c.Expand(p.Val.Text, nil, false) switch k := p.Key.Text; k { case "checkFrequency": od, err := opentsdb.ParseDuration(v) if err != nil { c.error(err) } d := time.Duration(od) if d < time.Second { c.errorf("checkFrequency duration must be at least 1s") } c.CheckFrequency = d case "tsdbHost": if !strings.Contains(v, ":") && v != "" { v += ":4242" } c.TSDBHost = v case "graphiteHost": c.GraphiteHost = v case "graphiteHeader": if !strings.Contains(v, ":") { c.errorf("graphiteHeader must be in key:value form") } c.GraphiteHeaders = append(c.GraphiteHeaders, v) case "logstashElasticHosts": c.LogstashElasticHosts = strings.Split(v, ",") case "influxHost": c.InfluxConfig.URL.Host = v c.InfluxConfig.UserAgent = "bosun" // Default scheme to non-TLS c.InfluxConfig.URL.Scheme = "http" case "influxUsername": c.InfluxConfig.Username = v case "influxPassword": c.InfluxConfig.Password = v case "influxTLS": b, err := strconv.ParseBool(v) if err != nil { c.error(err) } if b { c.InfluxConfig.URL.Scheme = "https" } else { c.InfluxConfig.URL.Scheme = "http" } case "influxTimeout": od, err := opentsdb.ParseDuration(v) if err != nil { c.error(err) } d := time.Duration(od) c.InfluxConfig.Timeout = d case "httpListen": c.HTTPListen = v case "hostname": c.Hostname = v case "relayListen": c.RelayListen = v case "smtpHost": c.SMTPHost = v case "smtpUsername": c.SMTPUsername = v case "smtpPassword": c.SMTPPassword = v case "emailFrom": c.EmailFrom = v case "stateFile": c.StateFile = v case "ping": c.Ping = true case "pingDuration": d, err := time.ParseDuration(v) if err != nil { c.errorf(err.Error()) } c.PingDuration = d case "noSleep": c.NoSleep = true case "unknownThreshold": i, err := strconv.Atoi(v) if err != nil { c.error(err) } c.UnknownThreshold = i case "timeAndDate": sp := strings.Split(v, ",") var t []int for _, s := range sp { i, err := strconv.Atoi(strings.TrimSpace(s)) if err != nil { c.error(err) } t = append(t, i) } c.TimeAndDate = t case "responseLimit": i, err := strconv.ParseInt(v, 10, 64) if err != nil { c.error(err) } if i <= 0 { c.errorf("responseLimit must be > 0") } c.ResponseLimit = i case "defaultRunEvery": var err error c.DefaultRunEvery, err = strconv.Atoi(v) if err != nil { c.error(err) } if c.DefaultRunEvery <= 0 { c.errorf("defaultRunEvery must be > 0") } case "searchSince": s, err := opentsdb.ParseDuration(v) if err != nil { c.error(err) } c.SearchSince = s case "unknownTemplate": c.unknownTemplate = v t, ok := c.Templates[c.unknownTemplate] if !ok { c.errorf("template not found: %s", c.unknownTemplate) } c.UnknownTemplate = t case "squelch": c.squelch = append(c.squelch, v) if err := c.Squelch.Add(v); err != nil { c.error(err) } case "shortURLKey": c.ShortURLKey = v case "ledisDir": c.LedisDir = v case "redisHost": c.RedisHost = v case "minGroupSize": i, err := strconv.Atoi(v) if err != nil { c.error(err) } c.MinGroupSize = i default: if !strings.HasPrefix(k, "$") { c.errorf("unknown key %s", k) } c.Vars[k] = v c.Vars[k[1:]] = c.Vars[k] } }
// influxQueryDuration adds time WHERE clauses to query for the given start and end durations. func influxQueryDuration(now time.Time, query, start, end string) (string, error) { sd, err := opentsdb.ParseDuration(start) if err != nil { return "", err } ed, err := opentsdb.ParseDuration(end) if end == "" { ed = 0 } else if err != nil { return "", err } st, err := influxql.ParseStatement(query) if err != nil { return "", err } s, ok := st.(*influxql.SelectStatement) if !ok { return "", fmt.Errorf("influx: expected select statement") } isTime := func(n influxql.Node) bool { v, ok := n.(*influxql.VarRef) if !ok { return false } s := strings.ToLower(v.Val) return s == "time" } influxql.WalkFunc(s.Condition, func(n influxql.Node) { b, ok := n.(*influxql.BinaryExpr) if !ok { return } if isTime(b.LHS) || isTime(b.RHS) { err = fmt.Errorf("influx query must not contain time in WHERE") } }) if err != nil { return "", err } //Add New BinaryExpr for time clause startExpr := &influxql.BinaryExpr{ Op: influxql.GTE, LHS: &influxql.VarRef{Val: "time"}, RHS: &influxql.TimeLiteral{Val: now.Add(time.Duration(-sd))}, } stopExpr := &influxql.BinaryExpr{ Op: influxql.LTE, LHS: &influxql.VarRef{Val: "time"}, RHS: &influxql.TimeLiteral{Val: now.Add(time.Duration(-ed))}, } if s.Condition != nil { s.Condition = &influxql.BinaryExpr{ Op: influxql.AND, LHS: s.Condition, RHS: &influxql.BinaryExpr{ Op: influxql.AND, LHS: startExpr, RHS: stopExpr, }, } } else { s.Condition = &influxql.BinaryExpr{ Op: influxql.AND, LHS: startExpr, RHS: stopExpr, } } return s.String(), nil }
// influxQueryDuration adds time WHERE clauses to query for the given start and end durations. func influxQueryDuration(now time.Time, query, start, end, groupByInterval string) (string, error) { sd, err := opentsdb.ParseDuration(start) if err != nil { return "", err } ed, err := opentsdb.ParseDuration(end) if end == "" { ed = 0 } else if err != nil { return "", err } st, err := influxql.ParseStatement(query) if err != nil { return "", err } s, ok := st.(*influxql.SelectStatement) if !ok { return "", fmt.Errorf("influx: expected select statement") } isTime := func(n influxql.Node) bool { v, ok := n.(*influxql.VarRef) if !ok { return false } s := strings.ToLower(v.Val) return s == "time" } influxql.WalkFunc(s.Condition, func(n influxql.Node) { b, ok := n.(*influxql.BinaryExpr) if !ok { return } if isTime(b.LHS) || isTime(b.RHS) { err = fmt.Errorf("influx query must not contain time in WHERE") } }) if err != nil { return "", err } //Add New BinaryExpr for time clause startExpr := &influxql.BinaryExpr{ Op: influxql.GTE, LHS: &influxql.VarRef{Val: "time"}, RHS: &influxql.TimeLiteral{Val: now.Add(time.Duration(-sd))}, } stopExpr := &influxql.BinaryExpr{ Op: influxql.LTE, LHS: &influxql.VarRef{Val: "time"}, RHS: &influxql.TimeLiteral{Val: now.Add(time.Duration(-ed))}, } if s.Condition != nil { s.Condition = &influxql.BinaryExpr{ Op: influxql.AND, LHS: s.Condition, RHS: &influxql.BinaryExpr{ Op: influxql.AND, LHS: startExpr, RHS: stopExpr, }, } } else { s.Condition = &influxql.BinaryExpr{ Op: influxql.AND, LHS: startExpr, RHS: stopExpr, } } // parse last argument if len(groupByInterval) > 0 { gbi, err := time.ParseDuration(groupByInterval) if err != nil { return "", err } s.Dimensions = append(s.Dimensions, &influxql.Dimension{Expr: &influxql.Call{ Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: gbi}}, }, }) } // emtpy aggregate windows should be purged from the result // this default resembles the opentsdb results. if s.Fill == influxql.NullFill { s.Fill = influxql.NoFill s.FillValue = nil } return s.String(), nil }
func GraphiteBand(e *State, T miniprofiler.Timer, query, duration, period, format string, num float64) (r *Results, err error) { r = new(Results) r.IgnoreOtherUnjoined = true r.IgnoreUnjoined = true T.Step("graphiteBand", func(T miniprofiler.Timer) { var d, p opentsdb.Duration d, err = opentsdb.ParseDuration(duration) if err != nil { return } p, err = opentsdb.ParseDuration(period) if err != nil { return } if num < 1 || num > 100 { err = fmt.Errorf("expr: Band: num out of bounds") } req := &graphite.Request{ Targets: []string{query}, } now := e.now req.End = &now st := e.now.Add(-time.Duration(d)) req.Start = &st for i := 0; i < int(num); i++ { now = now.Add(time.Duration(-p)) req.End = &now st := now.Add(time.Duration(-d)) req.Start = &st var s graphite.Response s, err = timeGraphiteRequest(e, T, req) if err != nil { return } formatTags := strings.Split(format, ".") var results []*Result results, err = parseGraphiteResponse(req, &s, formatTags) if err != nil { return } if i == 0 { r.Results = results } else { // different graphite requests might return series with different id's. // i.e. a different set of tagsets. merge the data of corresponding tagsets for _, result := range results { updateKey := -1 for j, existing := range r.Results { if result.Group.Equal(existing.Group) { updateKey = j break } } if updateKey == -1 { // result tagset is new r.Results = append(r.Results, result) updateKey = len(r.Results) - 1 } for k, v := range result.Value.(Series) { r.Results[updateKey].Value.(Series)[k] = v } } } } }) if err != nil { return nil, fmt.Errorf("graphiteBand: %v", err) } return }
func bandRangeTSDB(e *State, T miniprofiler.Timer, query, rangeStart, rangeEnd, period string, num float64, rfunc func(*Results, *opentsdb.Response) error) (r *Results, err error) { r = new(Results) r.IgnoreOtherUnjoined = true r.IgnoreUnjoined = true T.Step("bandRange", func(T miniprofiler.Timer) { var from, to, p opentsdb.Duration from, err = opentsdb.ParseDuration(rangeStart) if err != nil { return } to, err = opentsdb.ParseDuration(rangeEnd) if err != nil { return } p, err = opentsdb.ParseDuration(period) if err != nil { return } if num < 1 || num > 100 { err = fmt.Errorf("num out of bounds") } var q *opentsdb.Query q, err = opentsdb.ParseQuery(query, e.tsdbContext.Version()) if err != nil { return } if err = e.Search.Expand(q); err != nil { return } req := opentsdb.Request{ Queries: []*opentsdb.Query{q}, } now := e.now req.End = now.Unix() st := now.Add(time.Duration(from)).Unix() req.Start = st if err = req.SetTime(e.now); err != nil { return } for i := 0; i < int(num); i++ { now = now.Add(time.Duration(-p)) end := now.Add(time.Duration(-to)).Unix() req.End = &end st := now.Add(time.Duration(-from)).Unix() req.Start = &st var s opentsdb.ResponseSet s, err = timeTSDBRequest(e, T, &req) if err != nil { return } for _, res := range s { if e.squelched(res.Tags) { continue } if err = rfunc(r, res); err != nil { return } } } }) return }
func Over(e *State, T miniprofiler.Timer, query, duration, period string, num float64) (r *Results, err error) { r = new(Results) r.IgnoreOtherUnjoined = true r.IgnoreUnjoined = true T.Step("band", func(T miniprofiler.Timer) { var d, p opentsdb.Duration d, err = opentsdb.ParseDuration(duration) if err != nil { return } p, err = opentsdb.ParseDuration(period) if err != nil { return } if num < 1 || num > 100 { err = fmt.Errorf("num out of bounds") } var q *opentsdb.Query q, err = opentsdb.ParseQuery(query, e.tsdbContext.Version()) if err != nil { return } if !e.tsdbContext.Version().FilterSupport() { if err = e.Search.Expand(q); err != nil { return } } req := opentsdb.Request{ Queries: []*opentsdb.Query{q}, } now := e.now req.End = now.Unix() req.Start = now.Add(time.Duration(-d)).Unix() for i := 0; i < int(num); i++ { var s opentsdb.ResponseSet s, err = timeTSDBRequest(e, T, &req) if err != nil { return } offset := e.now.Sub(now) for _, res := range s { if e.squelched(res.Tags) { continue } values := make(Series) a := &Result{Group: res.Tags.Merge(opentsdb.TagSet{"shift": offset.String()})} for k, v := range res.DPS { i, err := strconv.ParseInt(k, 10, 64) if err != nil { return } values[time.Unix(i, 0).Add(offset).UTC()] = float64(v) } a.Value = values r.Results = append(r.Results, a) } now = now.Add(time.Duration(-p)) req.End = now.Unix() req.Start = now.Add(time.Duration(-d)).Unix() } }) return }
func Band(e *State, T miniprofiler.Timer, query, duration, period string, num float64) (r *Results, err error) { r = new(Results) r.IgnoreOtherUnjoined = true r.IgnoreUnjoined = true T.Step("band", func(T miniprofiler.Timer) { var d, p opentsdb.Duration d, err = opentsdb.ParseDuration(duration) if err != nil { return } p, err = opentsdb.ParseDuration(period) if err != nil { return } if num < 1 || num > 100 { err = fmt.Errorf("expr: Band: num out of bounds") } q, err := opentsdb.ParseQuery(query) if q == nil && err != nil { return } if err = e.Search.Expand(q); err != nil { return } req := opentsdb.Request{ Queries: []*opentsdb.Query{q}, } now := e.now req.End = now.Unix() req.Start = now.Add(time.Duration(-d)).Unix() if err = req.SetTime(e.now); err != nil { return } for i := 0; i < int(num); i++ { now = now.Add(time.Duration(-p)) req.End = now.Unix() req.Start = now.Add(time.Duration(-d)).Unix() var s opentsdb.ResponseSet s, err = timeTSDBRequest(e, T, &req) if err != nil { return } for _, res := range s { if e.squelched(res.Tags) { continue } newarr := true for _, a := range r.Results { if !a.Group.Equal(res.Tags) { continue } newarr = false values := a.Value.(Series) for k, v := range res.DPS { i, e := strconv.ParseInt(k, 10, 64) if e != nil { err = e return } values[time.Unix(i, 0).UTC()] = float64(v) } } if newarr { values := make(Series) a := &Result{Group: res.Tags} for k, v := range res.DPS { i, e := strconv.ParseInt(k, 10, 64) if e != nil { err = e return } values[time.Unix(i, 0).UTC()] = float64(v) } a.Value = values r.Results = append(r.Results, a) } } } }) return }