func timeTSDBRequest(e *State, T miniprofiler.Timer, req *opentsdb.Request) (s opentsdb.ResponseSet, err error) { e.tsdbQueries = append(e.tsdbQueries, *req) if e.autods > 0 { for _, q := range req.Queries { if q.Downsample == "" { if err := req.AutoDownsample(e.autods); err != nil { return nil, err } } } } b, _ := json.MarshalIndent(req, "", " ") tries := 1 for { T.StepCustomTiming("tsdb", "query", string(b), func() { getFn := func() (interface{}, error) { return e.tsdbContext.Query(req) } var val interface{} val, err = e.cache.Get(string(b), getFn) s = val.(opentsdb.ResponseSet).Copy() }) if err == nil || tries == tsdbMaxTries { break } slog.Errorf("Error on tsdb query %d: %s", tries, err.Error()) tries++ } return }
func (e *State) walkUnary(node *parse.UnaryNode, T miniprofiler.Timer) *Results { a := e.walk(node.Arg, T) T.Step("walkUnary: "+node.OpStr, func(T miniprofiler.Timer) { for _, r := range a.Results { if an, aok := r.Value.(Scalar); aok && math.IsNaN(float64(an)) { r.Value = Scalar(math.NaN()) continue } switch rt := r.Value.(type) { case Scalar: r.Value = Scalar(uoperate(node.OpStr, float64(rt))) case Number: r.Value = Number(uoperate(node.OpStr, float64(rt))) case Series: s := make(Series) for k, v := range rt { s[k] = uoperate(node.OpStr, float64(v)) } r.Value = s default: panic(ErrUnknownOp) } } }) return a }
// timeESRequest execute the elasticsearch query (which may set or hit cache) and returns // the search results. func timeESRequest(e *State, T miniprofiler.Timer, req *ElasticRequest) (resp *elastic.SearchResult, err error) { e.elasticQueries = append(e.elasticQueries, *req.Source) var source interface{} source, err = req.Source.Source() if err != nil { return resp, fmt.Errorf("failed to get source of request while timing elastic request: %s", err) } b, err := json.MarshalIndent(source, "", " ") if err != nil { return resp, err } key, err := req.CacheKey() if err != nil { return nil, err } T.StepCustomTiming("elastic", "query", fmt.Sprintf("%v\n%s", req.Indices, b), func() { getFn := func() (interface{}, error) { return e.ElasticHosts.Query(req) } var val interface{} val, err = e.Cache.Get(key, getFn) resp = val.(*elastic.SearchResult) }) return }
func bandTSDB(e *State, T miniprofiler.Timer, query, duration, period string, num float64, rfunc func(*Results, *opentsdb.Response, time.Duration) error) (r *Results, err error) { r = new(Results) r.IgnoreOtherUnjoined = true r.IgnoreUnjoined = true T.Step("band", func(T miniprofiler.Timer) { var d, p opentsdb.Duration d, err = opentsdb.ParseDuration(duration) if err != nil { return } p, err = opentsdb.ParseDuration(period) if err != nil { return } if num < 1 || num > 100 { err = fmt.Errorf("num out of bounds") } var q *opentsdb.Query q, err = opentsdb.ParseQuery(query, e.tsdbContext.Version()) if err != nil { return } if !e.tsdbContext.Version().FilterSupport() { if err = e.Search.Expand(q); err != nil { return } } req := opentsdb.Request{ Queries: []*opentsdb.Query{q}, } now := e.now req.End = now.Unix() req.Start = now.Add(time.Duration(-d)).Unix() if err = req.SetTime(e.now); err != nil { return } for i := 0; i < int(num); i++ { now = now.Add(time.Duration(-p)) req.End = now.Unix() req.Start = now.Add(time.Duration(-d)).Unix() var s opentsdb.ResponseSet s, err = timeTSDBRequest(e, T, &req) if err != nil { return } for _, res := range s { if e.squelched(res.Tags) { continue } //offset := e.now.Sub(now.Add(time.Duration(p-d))) offset := e.now.Sub(now) if err = rfunc(r, res, offset); err != nil { return } } } }) return }
func (e *State) walkFunc(node *parse.FuncNode, T miniprofiler.Timer) *Results { var res *Results T.Step("func: "+node.Name, func(T miniprofiler.Timer) { var in []reflect.Value for i, a := range node.Args { var v interface{} switch t := a.(type) { case *parse.StringNode: v = t.Text case *parse.NumberNode: v = t.Float64 case *parse.FuncNode: v = extract(e.walkFunc(t, T)) case *parse.UnaryNode: v = extract(e.walkUnary(t, T)) case *parse.BinaryNode: v = extract(e.walkBinary(t, T)) case *parse.ExprNode: v = e.walkExpr(t, T) default: panic(fmt.Errorf("expr: unknown func arg type")) } var argType models.FuncType if i >= len(node.F.Args) { if !node.F.VArgs { panic("expr: shouldn't be here, more args then expected and not variable argument type func") } argType = node.F.Args[node.F.VArgsPos] } else { argType = node.F.Args[i] } if f, ok := v.(float64); ok && argType == models.TypeNumberSet { v = fromScalar(f) } in = append(in, reflect.ValueOf(v)) } f := reflect.ValueOf(node.F.F) fr := f.Call(append([]reflect.Value{reflect.ValueOf(e), reflect.ValueOf(T)}, in...)) res = fr[0].Interface().(*Results) if len(fr) > 1 && !fr[1].IsNil() { err := fr[1].Interface().(error) if err != nil { panic(err) } } if node.Return() == models.TypeNumberSet { for _, r := range res.Results { e.AddComputation(r, node.String(), r.Value.(Number)) } } }) return res }
func (e *Expr) ExecuteState(s *State, T miniprofiler.Timer) (r *Results, queries []opentsdb.Request, err error) { defer errRecover(&err) if T == nil { T = new(miniprofiler.Profile) } else { s.enableComputations = true } T.Step("expr execute", func(T miniprofiler.Timer) { r = s.walk(e.Tree.Root, T) }) queries = s.tsdbQueries return }
// timeLSRequest execute the elasticsearch query (which may set or hit cache) and returns // the search results. func timeLSRequest(e *State, T miniprofiler.Timer, req *LogstashRequest) (resp *elastic.SearchResult, err error) { e.logstashQueries = append(e.logstashQueries, *req.Source) b, _ := json.MarshalIndent(req.Source.Source(), "", " ") T.StepCustomTiming("logstash", "query", string(b), func() { getFn := func() (interface{}, error) { return e.LogstashHosts.Query(req) } var val interface{} val, err = e.Cache.Get(string(b), getFn) resp = val.(*elastic.SearchResult) }) return }
func Index(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) { db, _ := sql.Open("sqlite3", ":memory:") db.ExecTimer(t, "create table x(a, b, c)") db.ExecTimer(t, "insert into x (1, 2, 4), (3, 5, 6)") db.QueryTimer(t, "select * from x") t.Step("redissss", func(t miniprofiler.Timer) { conn, _ := redis.Dial("tcp", ":6379") defer conn.Close() conn.DoTimer(t, "set", "tes t", "value") conn.SendTimer(t, "get", "test t") }) fmt.Fprintf(w, `<html><body>%v</body></html>`, t.Includes()) }
func timeGraphiteRequest(e *State, T miniprofiler.Timer, req *graphite.Request) (resp graphite.Response, err error) { e.graphiteQueries = append(e.graphiteQueries, *req) b, _ := json.MarshalIndent(req, "", " ") T.StepCustomTiming("graphite", "query", string(b), func() { key := req.CacheKey() getFn := func() (interface{}, error) { return e.graphiteContext.Query(req) } var val interface{} val, err = e.cache.Get(key, getFn) resp = val.(graphite.Response) }) return }
func timeInfluxRequest(e *State, T miniprofiler.Timer, db, query, startDuration, endDuration, groupByInterval string) (s []influxModels.Row, err error) { q, err := influxQueryDuration(e.now, query, startDuration, endDuration, groupByInterval) if err != nil { return nil, err } conn, err := client.NewHTTPClient(e.InfluxConfig) if err != nil { return nil, err } T.StepCustomTiming("influx", "query", q, func() { getFn := func() (interface{}, error) { res, err := conn.Query(client.Query{ Command: q, Database: db, }) if err != nil { return nil, err } if res.Error() != nil { return nil, res.Error() } if len(res.Results) != 1 { return nil, fmt.Errorf("influx: expected one result") } r := res.Results[0] if r.Err == "" { return r.Series, nil } err = fmt.Errorf(r.Err) return r.Series, err } var val interface{} var ok bool val, err = e.Cache.Get(q, getFn) if s, ok = val.([]influxModels.Row); !ok { err = fmt.Errorf("influx: did not get a valid result from InfluxDB") } }) return }
func Index(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/graph" { r.ParseForm() if _, present := r.Form["png"]; present { if _, err := Graph(t, w, r); err != nil { serveError(w, err) } return } } r.Header.Set(miniprofilerHeader, "true") // Set some global settings for the UI to know about. This saves us from // having to make an HTTP call to see what features should be enabled // in the UI openTSDBVersion := opentsdb.Version{0, 0} if schedule.SystemConf.GetTSDBContext() != nil { openTSDBVersion = schedule.SystemConf.GetTSDBContext().Version() } settings, err := json.Marshal(appSetings{ schedule.SystemConf.SaveEnabled(), schedule.SystemConf.AnnotateEnabled(), schedule.GetQuiet(), openTSDBVersion, }) if err != nil { serveError(w, err) return } err = indexTemplate().Execute(w, indexVariables{ t.Includes(), string(settings), }) if err != nil { serveError(w, err) } }
// Graph takes an OpenTSDB request data structure and queries OpenTSDB. Use the // json parameter to pass JSON. Use the b64 parameter to pass base64-encoded // JSON. func Graph(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) { j := []byte(r.FormValue("json")) if bs := r.FormValue("b64"); bs != "" { b, err := base64.StdEncoding.DecodeString(bs) if err != nil { return nil, err } j = b } if len(j) == 0 { return nil, fmt.Errorf("either json or b64 required") } oreq, err := opentsdb.RequestFromJSON(j) if err != nil { return nil, err } if ads_v := r.FormValue("autods"); ads_v != "" { ads_i, err := strconv.Atoi(ads_v) if err != nil { return nil, err } if err := oreq.AutoDownsample(ads_i); err != nil { return nil, err } } ar := make(map[int]bool) for _, v := range r.Form["autorate"] { if i, err := strconv.Atoi(v); err == nil { ar[i] = true } } queries := make([]string, len(oreq.Queries)) var start, end string var startT, endT time.Time if s, ok := oreq.Start.(string); ok && strings.Contains(s, "-ago") { startT, err = opentsdb.ParseTime(s) if err != nil { return nil, err } start = strings.TrimSuffix(s, "-ago") } if s, ok := oreq.End.(string); ok && strings.Contains(s, "-ago") { endT, err = opentsdb.ParseTime(s) if err != nil { return nil, err } end = strings.TrimSuffix(s, "-ago") } if start == "" && end == "" { s, sok := oreq.Start.(int64) e, eok := oreq.End.(int64) if sok && eok { start = fmt.Sprintf("%vs", e-s) startT = time.Unix(s, 0) endT = time.Unix(e, 0) if err != nil { return nil, err } } } if endT.Equal(time.Time{}) { endT = time.Now().UTC() } m_units := make(map[string]string) for i, q := range oreq.Queries { if ar[i] { meta, err := schedule.MetadataMetrics(q.Metric) if err != nil { return nil, err } if meta == nil { return nil, fmt.Errorf("no metadata for %s: cannot use auto rate", q) } if meta.Unit != "" { m_units[q.Metric] = meta.Unit } if meta.Rate != "" { switch meta.Rate { case metadata.Gauge: // ignore case metadata.Rate: q.Rate = true case metadata.Counter: q.Rate = true q.RateOptions = opentsdb.RateOptions{ Counter: true, ResetValue: 1, } default: return nil, fmt.Errorf("unknown metadata rate: %s", meta.Rate) } } } queries[i] = fmt.Sprintf(`q("%v", "%v", "%v")`, q, start, end) if !schedule.SystemConf.GetTSDBContext().Version().FilterSupport() { if err := schedule.Search.Expand(q); err != nil { return nil, err } } } var tr opentsdb.ResponseSet b, _ := json.MarshalIndent(oreq, "", " ") t.StepCustomTiming("tsdb", "query", string(b), func() { h := schedule.SystemConf.GetTSDBHost() if h == "" { err = fmt.Errorf("tsdbHost not set") return } tr, err = oreq.Query(h) }) if err != nil { return nil, err } cs, err := makeChart(tr, m_units) if err != nil { return nil, err } if _, present := r.Form["png"]; present { c := chart.ScatterChart{ Title: fmt.Sprintf("%v - %v", oreq.Start, queries), } c.XRange.Time = true if min, err := strconv.ParseFloat(r.FormValue("min"), 64); err == nil { c.YRange.MinMode.Fixed = true c.YRange.MinMode.Value = min } if max, err := strconv.ParseFloat(r.FormValue("max"), 64); err == nil { c.YRange.MaxMode.Fixed = true c.YRange.MaxMode.Value = max } for ri, r := range cs { pts := make([]chart.EPoint, len(r.Data)) for idx, v := range r.Data { pts[idx].X = v[0] pts[idx].Y = v[1] } slice.Sort(pts, func(i, j int) bool { return pts[i].X < pts[j].X }) c.AddData(r.Name, pts, chart.PlotStyleLinesPoints, sched.Autostyle(ri)) } w.Header().Set("Content-Type", "image/svg+xml") white := color.RGBA{0xff, 0xff, 0xff, 0xff} const width = 800 const height = 600 s := svg.New(w) s.Start(width, height) s.Rect(0, 0, width, height, "fill: #ffffff") sgr := svgg.AddTo(s, 0, 0, width, height, "", 12, white) c.Plot(sgr) s.End() return nil, nil } var a []annotate.Annotation warnings := []string{} if schedule.SystemConf.AnnotateEnabled() { a, err = annotateBackend.GetAnnotations(&startT, &endT) if err != nil { warnings = append(warnings, fmt.Sprintf("unable to get annotations: %v", err)) } } return struct { Queries []string Series []*chartSeries Annotations []annotate.Annotation Warnings []string }{ queries, cs, a, warnings, }, nil }
func (s *Schedule) MarshalGroups(T miniprofiler.Timer, filter string) (*StateGroups, error) { var silenced SilenceTester T.Step("Silenced", func(miniprofiler.Timer) { silenced = s.Silenced() }) var groups map[StateTuple]States var err error status := make(States) t := StateGroups{ TimeAndDate: s.SystemConf.GetTimeAndDate(), } t.FailingAlerts, t.UnclosedErrors = s.getErrorCounts() T.Step("Setup", func(miniprofiler.Timer) { status2, err2 := s.GetOpenStates() if err2 != nil { err = err2 return } var parsedExpr *boolq.Tree parsedExpr, err2 = boolq.Parse(filter) if err2 != nil { err = err2 return } for k, v := range status2 { a := s.RuleConf.GetAlert(k.Name()) if a == nil { slog.Errorf("unknown alert %s. Force closing.", k.Name()) if err2 = s.ActionByAlertKey("bosun", "closing because alert doesn't exist.", models.ActionForceClose, k); err2 != nil { slog.Error(err2) } continue } is, err2 := MakeIncidentSummary(s.RuleConf, silenced, v) if err2 != nil { err = err2 return } match := false match, err2 = boolq.AskParsedExpr(parsedExpr, is) if err2 != nil { err = err2 return } if match { status[k] = v } } }) if err != nil { return nil, err } T.Step("GroupStates", func(T miniprofiler.Timer) { groups = status.GroupStates(silenced) }) T.Step("groups", func(T miniprofiler.Timer) { for tuple, states := range groups { var grouped []*StateGroup switch tuple.Status { case models.StWarning, models.StCritical, models.StUnknown: var sets map[string]models.AlertKeys T.Step(fmt.Sprintf("GroupSets (%d): %v", len(states), tuple), func(T miniprofiler.Timer) { sets = states.GroupSets(s.SystemConf.GetMinGroupSize()) }) for name, group := range sets { g := StateGroup{ Active: tuple.Active, Status: tuple.Status, CurrentStatus: tuple.CurrentStatus, Silenced: tuple.Silenced, Subject: fmt.Sprintf("%s - %s", tuple.Status, name), } for _, ak := range group { st := status[ak] st.Body = "" st.EmailBody = nil st.Attachments = nil g.Children = append(g.Children, &StateGroup{ Active: tuple.Active, Status: tuple.Status, Silenced: tuple.Silenced, AlertKey: ak, Alert: ak.Name(), Subject: string(st.Subject), Ago: marshalTime(st.Last().Time), State: st, IsError: !s.AlertSuccessful(ak.Name()), }) } if len(g.Children) == 1 && g.Children[0].Subject != "" { g.Subject = g.Children[0].Subject } grouped = append(grouped, &g) } default: continue } if tuple.NeedAck { t.Groups.NeedAck = append(t.Groups.NeedAck, grouped...) } else { t.Groups.Acknowledged = append(t.Groups.Acknowledged, grouped...) } } }) T.Step("sort", func(T miniprofiler.Timer) { gsort := func(grp []*StateGroup) func(i, j int) bool { return func(i, j int) bool { a := grp[i] b := grp[j] if a.Active && !b.Active { return true } else if !a.Active && b.Active { return false } if a.Status != b.Status { return a.Status > b.Status } if a.AlertKey != b.AlertKey { return a.AlertKey < b.AlertKey } return a.Subject < b.Subject } } slice.Sort(t.Groups.NeedAck, gsort(t.Groups.NeedAck)) slice.Sort(t.Groups.Acknowledged, gsort(t.Groups.Acknowledged)) }) return &t, nil }
func (d DB) QueryRowTimer(t miniprofiler.Timer, query string, args ...interface{}) (row *sql.Row) { t.StepCustomTiming("sql", "query", query, func() { row = d.DB.QueryRow(query, args...) }) return }
func (d DB) ExecTimer(t miniprofiler.Timer, query string, args ...interface{}) (result sql.Result, err error) { t.StepCustomTiming("sql", "exec", query, func() { result, err = d.DB.Exec(query, args...) }) return }
func Over(e *State, T miniprofiler.Timer, query, duration, period string, num float64) (r *Results, err error) { r = new(Results) r.IgnoreOtherUnjoined = true r.IgnoreUnjoined = true T.Step("band", func(T miniprofiler.Timer) { var d, p opentsdb.Duration d, err = opentsdb.ParseDuration(duration) if err != nil { return } p, err = opentsdb.ParseDuration(period) if err != nil { return } if num < 1 || num > 100 { err = fmt.Errorf("num out of bounds") } var q *opentsdb.Query q, err = opentsdb.ParseQuery(query, e.tsdbContext.Version()) if err != nil { return } if !e.tsdbContext.Version().FilterSupport() { if err = e.Search.Expand(q); err != nil { return } } req := opentsdb.Request{ Queries: []*opentsdb.Query{q}, } now := e.now req.End = now.Unix() req.Start = now.Add(time.Duration(-d)).Unix() for i := 0; i < int(num); i++ { var s opentsdb.ResponseSet s, err = timeTSDBRequest(e, T, &req) if err != nil { return } offset := e.now.Sub(now) for _, res := range s { if e.squelched(res.Tags) { continue } values := make(Series) a := &Result{Group: res.Tags.Merge(opentsdb.TagSet{"shift": offset.String()})} for k, v := range res.DPS { i, err := strconv.ParseInt(k, 10, 64) if err != nil { return } values[time.Unix(i, 0).Add(offset).UTC()] = float64(v) } a.Value = values r.Results = append(r.Results, a) } now = now.Add(time.Duration(-p)) req.End = now.Unix() req.Start = now.Add(time.Duration(-d)).Unix() } }) return }
func bandRangeTSDB(e *State, T miniprofiler.Timer, query, rangeStart, rangeEnd, period string, num float64, rfunc func(*Results, *opentsdb.Response) error) (r *Results, err error) { r = new(Results) r.IgnoreOtherUnjoined = true r.IgnoreUnjoined = true T.Step("bandRange", func(T miniprofiler.Timer) { var from, to, p opentsdb.Duration from, err = opentsdb.ParseDuration(rangeStart) if err != nil { return } to, err = opentsdb.ParseDuration(rangeEnd) if err != nil { return } p, err = opentsdb.ParseDuration(period) if err != nil { return } if num < 1 || num > 100 { err = fmt.Errorf("num out of bounds") } var q *opentsdb.Query q, err = opentsdb.ParseQuery(query, e.tsdbContext.Version()) if err != nil { return } if err = e.Search.Expand(q); err != nil { return } req := opentsdb.Request{ Queries: []*opentsdb.Query{q}, } now := e.now req.End = now.Unix() st := now.Add(time.Duration(from)).Unix() req.Start = st if err = req.SetTime(e.now); err != nil { return } for i := 0; i < int(num); i++ { now = now.Add(time.Duration(-p)) end := now.Add(time.Duration(-to)).Unix() req.End = &end st := now.Add(time.Duration(-from)).Unix() req.Start = &st var s opentsdb.ResponseSet s, err = timeTSDBRequest(e, T, &req) if err != nil { return } for _, res := range s { if e.squelched(res.Tags) { continue } if err = rfunc(r, res); err != nil { return } } } }) return }
func GraphiteBand(e *State, T miniprofiler.Timer, query, duration, period, format string, num float64) (r *Results, err error) { r = new(Results) r.IgnoreOtherUnjoined = true r.IgnoreUnjoined = true T.Step("graphiteBand", func(T miniprofiler.Timer) { var d, p opentsdb.Duration d, err = opentsdb.ParseDuration(duration) if err != nil { return } p, err = opentsdb.ParseDuration(period) if err != nil { return } if num < 1 || num > 100 { err = fmt.Errorf("expr: Band: num out of bounds") } req := &graphite.Request{ Targets: []string{query}, } now := e.now req.End = &now st := e.now.Add(-time.Duration(d)) req.Start = &st for i := 0; i < int(num); i++ { now = now.Add(time.Duration(-p)) req.End = &now st := now.Add(time.Duration(-d)) req.Start = &st var s graphite.Response s, err = timeGraphiteRequest(e, T, req) if err != nil { return } formatTags := strings.Split(format, ".") var results []*Result results, err = parseGraphiteResponse(req, &s, formatTags) if err != nil { return } if i == 0 { r.Results = results } else { // different graphite requests might return series with different id's. // i.e. a different set of tagsets. merge the data of corresponding tagsets for _, result := range results { updateKey := -1 for j, existing := range r.Results { if result.Group.Equal(existing.Group) { updateKey = j break } } if updateKey == -1 { // result tagset is new r.Results = append(r.Results, result) updateKey = len(r.Results) - 1 } for k, v := range result.Value.(Series) { r.Results[updateKey].Value.(Series)[k] = v } } } } }) if err != nil { return nil, fmt.Errorf("graphiteBand: %v", err) } return }
func (e *State) walkBinary(node *parse.BinaryNode, T miniprofiler.Timer) *Results { ar := e.walk(node.Args[0], T) br := e.walk(node.Args[1], T) res := Results{ IgnoreUnjoined: ar.IgnoreUnjoined || br.IgnoreUnjoined, IgnoreOtherUnjoined: ar.IgnoreOtherUnjoined || br.IgnoreOtherUnjoined, } T.Step("walkBinary: "+node.OpStr, func(T miniprofiler.Timer) { u := e.union(ar, br, node.String()) for _, v := range u { var value Value r := &Result{ Group: v.Group, Computations: v.Computations, } switch at := v.A.(type) { case Scalar: switch bt := v.B.(type) { case Scalar: n := Scalar(operate(node.OpStr, float64(at), float64(bt))) e.AddComputation(r, node.String(), Number(n)) value = n case Number: n := Number(operate(node.OpStr, float64(at), float64(bt))) e.AddComputation(r, node.String(), n) value = n case Series: s := make(Series) for k, v := range bt { s[k] = operate(node.OpStr, float64(at), float64(v)) } value = s default: panic(ErrUnknownOp) } case Number: switch bt := v.B.(type) { case Scalar: n := Number(operate(node.OpStr, float64(at), float64(bt))) e.AddComputation(r, node.String(), Number(n)) value = n case Number: n := Number(operate(node.OpStr, float64(at), float64(bt))) e.AddComputation(r, node.String(), n) value = n case Series: s := make(Series) for k, v := range bt { s[k] = operate(node.OpStr, float64(at), float64(v)) } value = s default: panic(ErrUnknownOp) } case Series: switch bt := v.B.(type) { case Number, Scalar: bv := reflect.ValueOf(bt).Float() s := make(Series) for k, v := range at { s[k] = operate(node.OpStr, float64(v), bv) } value = s case Series: s := make(Series) for k, av := range at { if bv, ok := bt[k]; ok { s[k] = operate(node.OpStr, av, bv) } } value = s default: panic(ErrUnknownOp) } default: panic(ErrUnknownOp) } r.Value = value res.Results = append(res.Results, r) } }) return &res }
func Rule(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) { var from, to time.Time var err error if f := r.FormValue("from"); len(f) > 0 { from, err = time.Parse(tsdbFormatSecs, f) if err != nil { return nil, err } } if f := r.FormValue("to"); len(f) > 0 { to, err = time.Parse(tsdbFormatSecs, f) if err != nil { return nil, err } } intervals := 1 if i := r.FormValue("intervals"); len(i) > 0 { intervals, err = strconv.Atoi(r.FormValue("intervals")) if err != nil { return nil, err } if intervals < 1 { return nil, fmt.Errorf("must be > 0 intervals") } } if fz, tz := from.IsZero(), to.IsZero(); fz && tz { from = time.Now() } else if fz && !tz { return nil, fmt.Errorf("cannot specify to without from") } else if !fz && tz && intervals > 1 { return nil, fmt.Errorf("cannot specify intervals without from and to") } c, a, hash, err := buildConfig(r) if err != nil { return nil, err } ch := make(chan int) errch := make(chan error, intervals) resch := make(chan *ruleResult, intervals) var wg sync.WaitGroup diff := -from.Sub(to) if intervals > 1 { diff /= time.Duration(intervals - 1) } worker := func() { wg.Add(1) for interval := range ch { t.Step(fmt.Sprintf("interval %v", interval), func(t miniprofiler.Timer) { now := from.Add(diff * time.Duration(interval)) res, err := procRule(t, c, a, now, interval != 0, r.FormValue("email"), r.FormValue("template_group")) resch <- res errch <- err }) } defer wg.Done() } for i := 0; i < 20; i++ { go worker() } for i := 0; i < intervals; i++ { ch <- i } close(ch) wg.Wait() close(errch) close(resch) type Result struct { Group models.AlertKey Result *models.Event } type Set struct { Critical, Warning, Normal int Time string Results []*Result `json:",omitempty"` } type History struct { Time, EndTime time.Time Status string } type Histories struct { History []*History } ret := struct { Errors []string `json:",omitempty"` Warnings []string `json:",omitempty"` Sets []*Set AlertHistory map[models.AlertKey]*Histories Body string `json:",omitempty"` Subject string `json:",omitempty"` Data interface{} `json:",omitempty"` Hash string }{ AlertHistory: make(map[models.AlertKey]*Histories), Hash: hash, } for err := range errch { if err == nil { continue } ret.Errors = append(ret.Errors, err.Error()) } for res := range resch { if res == nil { continue } set := Set{ Critical: len(res.Criticals), Warning: len(res.Warnings), Normal: len(res.Normals), Time: res.Time.Format(tsdbFormatSecs), } if res.Data != nil { ret.Body = res.Body ret.Subject = res.Subject ret.Data = res.Data for k, v := range res.Result { set.Results = append(set.Results, &Result{ Group: k, Result: v, }) } slice.Sort(set.Results, func(i, j int) bool { a := set.Results[i] b := set.Results[j] if a.Result.Status != b.Result.Status { return a.Result.Status > b.Result.Status } return a.Group < b.Group }) } for k, v := range res.Result { if ret.AlertHistory[k] == nil { ret.AlertHistory[k] = new(Histories) } h := ret.AlertHistory[k] h.History = append(h.History, &History{ Time: v.Time, Status: v.Status.String(), }) } ret.Sets = append(ret.Sets, &set) ret.Warnings = append(ret.Warnings, res.Warning...) } slice.Sort(ret.Sets, func(i, j int) bool { return ret.Sets[i].Time < ret.Sets[j].Time }) for _, histories := range ret.AlertHistory { hist := histories.History slice.Sort(hist, func(i, j int) bool { return hist[i].Time.Before(hist[j].Time) }) for i := 1; i < len(hist); i++ { if i < len(hist)-1 && hist[i].Status == hist[i-1].Status { hist = append(hist[:i], hist[i+1:]...) i-- } } for i, h := range hist[:len(hist)-1] { h.EndTime = hist[i+1].Time } histories.History = hist[:len(hist)-1] } return &ret, nil }