func timeTSDBRequest(e *State, T miniprofiler.Timer, req *opentsdb.Request) (s opentsdb.ResponseSet, err error) { e.tsdbQueries = append(e.tsdbQueries, *req) if e.autods > 0 { for _, q := range req.Queries { if q.Downsample == "" { if err := req.AutoDownsample(e.autods); err != nil { return nil, err } } } } b, _ := json.MarshalIndent(req, "", " ") tries := 1 for { T.StepCustomTiming("tsdb", "query", string(b), func() { getFn := func() (interface{}, error) { return e.tsdbContext.Query(req) } var val interface{} val, err = e.cache.Get(string(b), getFn) s = val.(opentsdb.ResponseSet).Copy() }) if err == nil || tries == tsdbMaxTries { break } slog.Errorf("Error on tsdb query %d: %s", tries, err.Error()) tries++ } return }
// timeLSRequest execute the elasticsearch query (which may set or hit cache) and returns // the search results. func timeLSRequest(e *State, T miniprofiler.Timer, req *LogstashRequest) (resp *elastic.SearchResult, err error) { e.logstashQueries = append(e.logstashQueries, *req.Source) b, _ := json.MarshalIndent(req.Source.Source(), "", " ") T.StepCustomTiming("logstash", "query", string(b), func() { getFn := func() (interface{}, error) { return e.logstashHosts.Query(req) } var val interface{} val, err = e.cache.Get(string(b), getFn) resp = val.(*elastic.SearchResult) }) return }
func timeGraphiteRequest(e *State, T miniprofiler.Timer, req *graphite.Request) (resp graphite.Response, err error) { e.graphiteQueries = append(e.graphiteQueries, *req) b, _ := json.MarshalIndent(req, "", " ") T.StepCustomTiming("graphite", "query", string(b), func() { key := req.CacheKey() getFn := func() (interface{}, error) { return e.graphiteContext.Query(req) } var val interface{} val, err = e.cache.Get(key, getFn) resp = val.(graphite.Response) }) return }
func timeTSDBRequest(e *State, T miniprofiler.Timer, req *opentsdb.Request) (s opentsdb.ResponseSet, err error) { e.tsdbQueries = append(e.tsdbQueries, *req) if e.autods > 0 { if err := req.AutoDownsample(e.autods); err != nil { return nil, err } } b, _ := json.MarshalIndent(req, "", " ") T.StepCustomTiming("tsdb", "query", string(b), func() { getFn := func() (interface{}, error) { return e.tsdbContext.Query(req) } var val interface{} val, err = e.cache.Get(string(b), getFn) s = val.(opentsdb.ResponseSet).Copy() }) return }
func timeInfluxRequest(e *State, T miniprofiler.Timer, db, query, startDuration, endDuration string) (s []influxql.Row, err error) { q, err := influxQueryDuration(e.now, query, startDuration, endDuration) if err != nil { return nil, err } conf := client.Config{ URL: url.URL{ Scheme: "http", Host: e.InfluxHost, }, Timeout: time.Minute, } conn, err := client.NewClient(conf) if err != nil { return nil, err } T.StepCustomTiming("influx", "query", q, func() { getFn := func() (interface{}, error) { res, err := conn.Query(client.Query{ Command: q, Database: db, }) if err != nil { return nil, err } if res.Err != nil { return nil, res.Err } if len(res.Results) != 1 { return nil, fmt.Errorf("influx: expected one result") } r := res.Results[0] return r.Series, r.Err } var val interface{} var ok bool val, err = e.cache.Get(q, getFn) if s, ok = val.([]influxql.Row); !ok { err = fmt.Errorf("influx: did not get a valid result from InfluxDB") } }) return }
// timeESRequest execute the elasticsearch query (which may set or hit cache) and returns // the search results. func timeESRequest(e *State, T miniprofiler.Timer, req *ElasticRequest) (resp *elastic.SearchResult, err error) { e.elasticQueries = append(e.elasticQueries, *req.Source) var source interface{} source, err = req.Source.Source() if err != nil { return resp, fmt.Errorf("failed to get source of request while timing elastic request: %s", err) } b, err := json.MarshalIndent(source, "", " ") if err != nil { return resp, err } T.StepCustomTiming("elastic", "query", string(b), func() { getFn := func() (interface{}, error) { return e.elasticHosts.Query(req) } var val interface{} val, err = e.cache.Get(string(b), getFn) resp = val.(*elastic.SearchResult) }) return }
// Graph takes an OpenTSDB request data structure and queries OpenTSDB. Use the // json parameter to pass JSON. Use the b64 parameter to pass base64-encoded // JSON. func Graph(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) { j := []byte(r.FormValue("json")) if bs := r.FormValue("b64"); bs != "" { b, err := base64.StdEncoding.DecodeString(bs) if err != nil { return nil, err } j = b } if len(j) == 0 { return nil, fmt.Errorf("either json or b64 required") } oreq, err := opentsdb.RequestFromJSON(j) if err != nil { return nil, err } if ads_v := r.FormValue("autods"); ads_v != "" { ads_i, err := strconv.Atoi(ads_v) if err != nil { return nil, err } if err := oreq.AutoDownsample(ads_i); err != nil { return nil, err } } ar := make(map[int]bool) for _, v := range r.Form["autorate"] { if i, err := strconv.Atoi(v); err == nil { ar[i] = true } } queries := make([]string, len(oreq.Queries)) var start, end string if s, ok := oreq.Start.(string); ok && strings.Contains(s, "-ago") { start = strings.TrimSuffix(s, "-ago") } if s, ok := oreq.End.(string); ok && strings.Contains(s, "-ago") { end = strings.TrimSuffix(s, "-ago") } if start == "" && end == "" { s, sok := oreq.Start.(int64) e, eok := oreq.End.(int64) if sok && eok { start = fmt.Sprintf("%vs", e-s) } } m_units := make(map[string]string) for i, q := range oreq.Queries { if ar[i] { meta, err := schedule.MetadataMetrics(q.Metric) if err != nil { return nil, err } if meta == nil { return nil, fmt.Errorf("no metadata for %s: cannot use auto rate", q) } if meta.Unit != "" { m_units[q.Metric] = meta.Unit } if meta.Rate != "" { switch meta.Rate { case metadata.Gauge: // ignore case metadata.Rate: q.Rate = true case metadata.Counter: q.Rate = true q.RateOptions = opentsdb.RateOptions{ Counter: true, ResetValue: 1, } default: return nil, fmt.Errorf("unknown metadata rate: %s", meta.Rate) } } } queries[i] = fmt.Sprintf(`q("%v", "%v", "%v")`, q, start, end) if !schedule.Conf.TSDBContext().Version().FilterSupport() { if err := schedule.Search.Expand(q); err != nil { return nil, err } } } var tr opentsdb.ResponseSet b, _ := json.MarshalIndent(oreq, "", " ") t.StepCustomTiming("tsdb", "query", string(b), func() { h := schedule.Conf.TSDBHost if h == "" { err = fmt.Errorf("tsdbHost not set") return } tr, err = oreq.Query(h) }) if err != nil { return nil, err } cs, err := makeChart(tr, m_units) if err != nil { return nil, err } if _, present := r.Form["png"]; present { c := chart.ScatterChart{ Title: fmt.Sprintf("%v - %v", oreq.Start, queries), } c.XRange.Time = true if min, err := strconv.ParseFloat(r.FormValue("min"), 64); err == nil { c.YRange.MinMode.Fixed = true c.YRange.MinMode.Value = min } if max, err := strconv.ParseFloat(r.FormValue("max"), 64); err == nil { c.YRange.MaxMode.Fixed = true c.YRange.MaxMode.Value = max } for ri, r := range cs { pts := make([]chart.EPoint, len(r.Data)) for idx, v := range r.Data { pts[idx].X = v[0] pts[idx].Y = v[1] } slice.Sort(pts, func(i, j int) bool { return pts[i].X < pts[j].X }) c.AddData(r.Name, pts, chart.PlotStyleLinesPoints, sched.Autostyle(ri)) } w.Header().Set("Content-Type", "image/svg+xml") white := color.RGBA{0xff, 0xff, 0xff, 0xff} const width = 800 const height = 600 s := svg.New(w) s.Start(width, height) s.Rect(0, 0, width, height, "fill: #ffffff") sgr := svgg.AddTo(s, 0, 0, width, height, "", 12, white) c.Plot(sgr) s.End() return nil, nil } return struct { Queries []string Series []*chartSeries }{ queries, cs, }, nil }