func timeTSDBRequest(e *State, T miniprofiler.Timer, req *opentsdb.Request) (s opentsdb.ResponseSet, err error) { e.tsdbQueries = append(e.tsdbQueries, *req) if e.autods > 0 { for _, q := range req.Queries { if q.Downsample == "" { if err := req.AutoDownsample(e.autods); err != nil { return nil, err } } } } b, _ := json.MarshalIndent(req, "", " ") tries := 1 for { T.StepCustomTiming("tsdb", "query", string(b), func() { getFn := func() (interface{}, error) { return e.tsdbContext.Query(req) } var val interface{} val, err = e.cache.Get(string(b), getFn) s = val.(opentsdb.ResponseSet).Copy() }) if err == nil || tries == tsdbMaxTries { break } slog.Errorf("Error on tsdb query %d: %s", tries, err.Error()) tries++ } return }
// timeESRequest execute the elasticsearch query (which may set or hit cache) and returns // the search results. func timeESRequest(e *State, T miniprofiler.Timer, req *ElasticRequest) (resp *elastic.SearchResult, err error) { e.elasticQueries = append(e.elasticQueries, *req.Source) var source interface{} source, err = req.Source.Source() if err != nil { return resp, fmt.Errorf("failed to get source of request while timing elastic request: %s", err) } b, err := json.MarshalIndent(source, "", " ") if err != nil { return resp, err } key, err := req.CacheKey() if err != nil { return nil, err } T.StepCustomTiming("elastic", "query", fmt.Sprintf("%v\n%s", req.Indices, b), func() { getFn := func() (interface{}, error) { return e.ElasticHosts.Query(req) } var val interface{} val, err = e.Cache.Get(key, getFn) resp = val.(*elastic.SearchResult) }) return }
// timeLSRequest execute the elasticsearch query (which may set or hit cache) and returns // the search results. func timeLSRequest(e *State, T miniprofiler.Timer, req *LogstashRequest) (resp *elastic.SearchResult, err error) { e.logstashQueries = append(e.logstashQueries, *req.Source) b, _ := json.MarshalIndent(req.Source.Source(), "", " ") T.StepCustomTiming("logstash", "query", string(b), func() { getFn := func() (interface{}, error) { return e.LogstashHosts.Query(req) } var val interface{} val, err = e.Cache.Get(string(b), getFn) resp = val.(*elastic.SearchResult) }) return }
func timeGraphiteRequest(e *State, T miniprofiler.Timer, req *graphite.Request) (resp graphite.Response, err error) { e.graphiteQueries = append(e.graphiteQueries, *req) b, _ := json.MarshalIndent(req, "", " ") T.StepCustomTiming("graphite", "query", string(b), func() { key := req.CacheKey() getFn := func() (interface{}, error) { return e.graphiteContext.Query(req) } var val interface{} val, err = e.cache.Get(key, getFn) resp = val.(graphite.Response) }) return }
func timeInfluxRequest(e *State, T miniprofiler.Timer, db, query, startDuration, endDuration, groupByInterval string) (s []influxModels.Row, err error) { q, err := influxQueryDuration(e.now, query, startDuration, endDuration, groupByInterval) if err != nil { return nil, err } conn, err := client.NewHTTPClient(e.InfluxConfig) if err != nil { return nil, err } T.StepCustomTiming("influx", "query", q, func() { getFn := func() (interface{}, error) { res, err := conn.Query(client.Query{ Command: q, Database: db, }) if err != nil { return nil, err } if res.Error() != nil { return nil, res.Error() } if len(res.Results) != 1 { return nil, fmt.Errorf("influx: expected one result") } r := res.Results[0] if r.Err == "" { return r.Series, nil } err = fmt.Errorf(r.Err) return r.Series, err } var val interface{} var ok bool val, err = e.Cache.Get(q, getFn) if s, ok = val.([]influxModels.Row); !ok { err = fmt.Errorf("influx: did not get a valid result from InfluxDB") } }) return }
// Graph takes an OpenTSDB request data structure and queries OpenTSDB. Use the // json parameter to pass JSON. Use the b64 parameter to pass base64-encoded // JSON. func Graph(t miniprofiler.Timer, w http.ResponseWriter, r *http.Request) (interface{}, error) { j := []byte(r.FormValue("json")) if bs := r.FormValue("b64"); bs != "" { b, err := base64.StdEncoding.DecodeString(bs) if err != nil { return nil, err } j = b } if len(j) == 0 { return nil, fmt.Errorf("either json or b64 required") } oreq, err := opentsdb.RequestFromJSON(j) if err != nil { return nil, err } if ads_v := r.FormValue("autods"); ads_v != "" { ads_i, err := strconv.Atoi(ads_v) if err != nil { return nil, err } if err := oreq.AutoDownsample(ads_i); err != nil { return nil, err } } ar := make(map[int]bool) for _, v := range r.Form["autorate"] { if i, err := strconv.Atoi(v); err == nil { ar[i] = true } } queries := make([]string, len(oreq.Queries)) var start, end string var startT, endT time.Time if s, ok := oreq.Start.(string); ok && strings.Contains(s, "-ago") { startT, err = opentsdb.ParseTime(s) if err != nil { return nil, err } start = strings.TrimSuffix(s, "-ago") } if s, ok := oreq.End.(string); ok && strings.Contains(s, "-ago") { endT, err = opentsdb.ParseTime(s) if err != nil { return nil, err } end = strings.TrimSuffix(s, "-ago") } if start == "" && end == "" { s, sok := oreq.Start.(int64) e, eok := oreq.End.(int64) if sok && eok { start = fmt.Sprintf("%vs", e-s) startT = time.Unix(s, 0) endT = time.Unix(e, 0) if err != nil { return nil, err } } } if endT.Equal(time.Time{}) { endT = time.Now().UTC() } m_units := make(map[string]string) for i, q := range oreq.Queries { if ar[i] { meta, err := schedule.MetadataMetrics(q.Metric) if err != nil { return nil, err } if meta == nil { return nil, fmt.Errorf("no metadata for %s: cannot use auto rate", q) } if meta.Unit != "" { m_units[q.Metric] = meta.Unit } if meta.Rate != "" { switch meta.Rate { case metadata.Gauge: // ignore case metadata.Rate: q.Rate = true case metadata.Counter: q.Rate = true q.RateOptions = opentsdb.RateOptions{ Counter: true, ResetValue: 1, } default: return nil, fmt.Errorf("unknown metadata rate: %s", meta.Rate) } } } queries[i] = fmt.Sprintf(`q("%v", "%v", "%v")`, q, start, end) if !schedule.SystemConf.GetTSDBContext().Version().FilterSupport() { if err := schedule.Search.Expand(q); err != nil { return nil, err } } } var tr opentsdb.ResponseSet b, _ := json.MarshalIndent(oreq, "", " ") t.StepCustomTiming("tsdb", "query", string(b), func() { h := schedule.SystemConf.GetTSDBHost() if h == "" { err = fmt.Errorf("tsdbHost not set") return } tr, err = oreq.Query(h) }) if err != nil { return nil, err } cs, err := makeChart(tr, m_units) if err != nil { return nil, err } if _, present := r.Form["png"]; present { c := chart.ScatterChart{ Title: fmt.Sprintf("%v - %v", oreq.Start, queries), } c.XRange.Time = true if min, err := strconv.ParseFloat(r.FormValue("min"), 64); err == nil { c.YRange.MinMode.Fixed = true c.YRange.MinMode.Value = min } if max, err := strconv.ParseFloat(r.FormValue("max"), 64); err == nil { c.YRange.MaxMode.Fixed = true c.YRange.MaxMode.Value = max } for ri, r := range cs { pts := make([]chart.EPoint, len(r.Data)) for idx, v := range r.Data { pts[idx].X = v[0] pts[idx].Y = v[1] } slice.Sort(pts, func(i, j int) bool { return pts[i].X < pts[j].X }) c.AddData(r.Name, pts, chart.PlotStyleLinesPoints, sched.Autostyle(ri)) } w.Header().Set("Content-Type", "image/svg+xml") white := color.RGBA{0xff, 0xff, 0xff, 0xff} const width = 800 const height = 600 s := svg.New(w) s.Start(width, height) s.Rect(0, 0, width, height, "fill: #ffffff") sgr := svgg.AddTo(s, 0, 0, width, height, "", 12, white) c.Plot(sgr) s.End() return nil, nil } var a []annotate.Annotation warnings := []string{} if schedule.SystemConf.AnnotateEnabled() { a, err = annotateBackend.GetAnnotations(&startT, &endT) if err != nil { warnings = append(warnings, fmt.Sprintf("unable to get annotations: %v", err)) } } return struct { Queries []string Series []*chartSeries Annotations []annotate.Annotation Warnings []string }{ queries, cs, a, warnings, }, nil }
func (d DB) QueryRowTimer(t miniprofiler.Timer, query string, args ...interface{}) (row *sql.Row) { t.StepCustomTiming("sql", "query", query, func() { row = d.DB.QueryRow(query, args...) }) return }
func (d DB) ExecTimer(t miniprofiler.Timer, query string, args ...interface{}) (result sql.Result, err error) { t.StepCustomTiming("sql", "exec", query, func() { result, err = d.DB.Exec(query, args...) }) return }