func findHandler(w http.ResponseWriter, req *http.Request) { logger.Debugln("request: ", req.URL.RequestURI()) Metrics.FindRequests.Add(1) rewrite, _ := url.ParseRequestURI(req.URL.RequestURI()) v := rewrite.Query() format := req.FormValue("format") v.Set("format", "protobuf") rewrite.RawQuery = v.Encode() query := req.FormValue("query") var tld string if i := strings.IndexByte(query, '.'); i > 0 { tld = query[:i] } // lookup tld in our map of where they live to reduce the set of // servers we bug with our find var backends []string var ok bool if backends, ok = Config.pathCache.get(tld); !ok || backends == nil || len(backends) == 0 { backends = Config.Backends } responses := multiGet(backends, rewrite.RequestURI()) if responses == nil || len(responses) == 0 { logger.Logln("find: error querying backends for: ", rewrite.RequestURI()) http.Error(w, "find: error querying backends", http.StatusInternalServerError) return } metrics, paths := findHandlerPB(w, req, responses) // update our cache of which servers have which metrics for k, v := range paths { Config.pathCache.set(k, v) } switch format { case "protobuf": w.Header().Set("Content-Type", "application/protobuf") var result pb.GlobResponse query := req.FormValue("query") result.Name = &query result.Matches = metrics b, _ := result.Marshal() w.Write(b) case "json": w.Header().Set("Content-Type", "application/json") jEnc := json.NewEncoder(w) jEnc.Encode(metrics) case "", "pickle": w.Header().Set("Content-Type", "application/pickle") var result []map[string]interface{} for _, metric := range metrics { mm := map[string]interface{}{ "metric_path": *metric.Path, "isLeaf": *metric.IsLeaf, } result = append(result, mm) } pEnc := pickle.NewEncoder(w) pEnc.Encode(result) } }
func renderHandler(w http.ResponseWriter, r *http.Request, stats *renderStats) { Metrics.Requests.Add(1) err := r.ParseForm() if err != nil { http.Error(w, http.StatusText(http.StatusBadRequest)+": "+err.Error(), http.StatusBadRequest) return } targets := r.Form["target"] from := r.FormValue("from") until := r.FormValue("until") format := r.FormValue("format") useCache := !expr.TruthyBool(r.FormValue("noCache")) var jsonp string if format == "json" { // TODO(dgryski): check jsonp only has valid characters jsonp = r.FormValue("jsonp") } if format == "" && (expr.TruthyBool(r.FormValue("rawData")) || expr.TruthyBool(r.FormValue("rawdata"))) { format = "raw" } if format == "" { format = "png" } cacheTimeout := int32(60) if tstr := r.FormValue("cacheTimeout"); tstr != "" { t, err := strconv.Atoi(tstr) if err != nil { logger.Logf("failed to parse cacheTimeout: %v: %v", tstr, err) } else { cacheTimeout = int32(t) } } // make sure the cache key doesn't say noCache, because it will never hit r.Form.Del("noCache") // jsonp callback names are frequently autogenerated and hurt our cache r.Form.Del("jsonp") // Strip some cache-busters. If you don't want to cache, use noCache=1 r.Form.Del("_salt") r.Form.Del("_ts") r.Form.Del("_t") // Used by jquery.graphite.js cacheKey := r.Form.Encode() if response, ok := queryCache.get(cacheKey); useCache && ok { Metrics.RequestCacheHits.Add(1) writeResponse(w, response, format, jsonp) return } // normalize from and until values // BUG(dgryski): doesn't handle timezones the same as graphite-web from32 := dateParamToEpoch(from, timeNow().Add(-24*time.Hour).Unix()) until32 := dateParamToEpoch(until, timeNow().Unix()) if from32 == until32 { http.Error(w, "Invalid empty time range", http.StatusBadRequest) return } var results []*expr.MetricData var errors []string metricMap := make(map[expr.MetricRequest][]*expr.MetricData) for _, target := range targets { exp, e, err := expr.ParseExpr(target) if err != nil || e != "" { msg := buildParseErrorString(target, e, err) http.Error(w, msg, http.StatusBadRequest) return } for _, m := range exp.Metrics() { mfetch := m mfetch.From += from32 mfetch.Until += until32 if _, ok := metricMap[mfetch]; ok { // already fetched this metric for this request continue } var glob pb.GlobResponse var haveCacheData bool if response, ok := findCache.get(m.Metric); useCache && ok { Metrics.FindCacheHits.Add(1) err := glob.Unmarshal(response) haveCacheData = err == nil } if !haveCacheData { var err error Metrics.FindRequests.Add(1) stats.zipperRequests++ glob, err = Zipper.Find(m.Metric) if err != nil { logger.Logf("Find: %v: %v", m.Metric, err) continue } b, err := glob.Marshal() if err == nil { findCache.set(m.Metric, b, 5*60) } } // For each metric returned in the Find response, query Render // This is a conscious decision to *not* cache render data rch := make(chan *expr.MetricData, len(glob.GetMatches())) leaves := 0 for _, m := range glob.GetMatches() { if !m.GetIsLeaf() { continue } Metrics.RenderRequests.Add(1) leaves++ Limiter.enter() stats.zipperRequests++ go func(m *pb.GlobMatch, from, until int32) { var rptr *expr.MetricData r, err := Zipper.Render(m.GetPath(), from, until) if err == nil { rptr = &r } else { logger.Logf("Render: %v: %v", m.GetPath(), err) } rch <- rptr Limiter.leave() }(m, mfetch.From, mfetch.Until) } for i := 0; i < leaves; i++ { r := <-rch if r != nil { metricMap[mfetch] = append(metricMap[mfetch], r) } } expr.SortMetrics(metricMap[mfetch], mfetch) } func() { defer func() { if r := recover(); r != nil { var buf [1024]byte runtime.Stack(buf[:], false) logger.Logf("panic during eval: %s: %s\n%s\n", cacheKey, r, string(buf[:])) } }() exprs, err := expr.EvalExpr(exp, from32, until32, metricMap) if err != nil && err != expr.ErrSeriesDoesNotExist { errors = append(errors, target+": "+err.Error()) return } results = append(results, exprs...) }() } if len(errors) > 0 { errors = append([]string{"Encountered the following errors:"}, errors...) http.Error(w, strings.Join(errors, "\n"), http.StatusBadRequest) return } var body []byte switch format { case "json": if maxDataPoints, _ := strconv.Atoi(r.FormValue("maxDataPoints")); maxDataPoints != 0 { expr.ConsolidateJSON(maxDataPoints, results) } body = expr.MarshalJSON(results) case "protobuf": body, err = expr.MarshalProtobuf(results) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } case "raw": body = expr.MarshalRaw(results) case "csv": body = expr.MarshalCSV(results) case "pickle": body = expr.MarshalPickle(results) case "png": body = expr.MarshalPNG(r, results) case "svg": body = expr.MarshalSVG(r, results) } writeResponse(w, body, format, jsonp) if len(results) != 0 { queryCache.set(cacheKey, body, cacheTimeout) } }