Exemplo n.º 1
0
func findHandlerPB(w http.ResponseWriter, req *http.Request, responses []serverResponse) ([]*pb.GlobMatch, map[string][]string) {

	// metric -> [server1, ... ]
	paths := make(map[string][]string)
	seen := make(map[nameleaf]bool)

	var metrics []*pb.GlobMatch
	for _, r := range responses {
		var metric pb.GlobResponse
		err := metric.Unmarshal(r.response)
		if err != nil && req != nil {
			logger.Logf("error decoding protobuf response from server:%s: req:%s: err=%s", r.server, req.URL.RequestURI(), err)
			logger.Traceln("\n" + hex.Dump(r.response))
			Metrics.FindErrors.Add(1)
			continue
		}

		for _, match := range metric.Matches {
			n := nameleaf{*match.Path, *match.IsLeaf}
			_, ok := seen[n]
			if !ok {
				// we haven't seen this name yet
				// add the metric to the list of metrics to return
				metrics = append(metrics, match)
				seen[n] = true
			}
			// add the server to the list of servers that know about this metric
			p := paths[*match.Path]
			p = append(p, r.server)
			paths[*match.Path] = p
		}
	}

	return metrics, paths
}
Exemplo n.º 2
0
func findCompleter(globs pb.GlobResponse) ([]byte, error) {
	var b bytes.Buffer

	var complete = make([]completer, 0)

	for _, g := range globs.GetMatches() {
		c := completer{
			Path: g.GetPath(),
		}

		if g.GetIsLeaf() {
			c.IsLeaf = "1"
		} else {
			c.IsLeaf = "0"
		}

		i := strings.LastIndex(c.Path, ".")

		if i != -1 {
			c.Name = c.Path[i+1:]
		}

		complete = append(complete, c)
	}

	err := json.NewEncoder(&b).Encode(struct {
		Metrics []completer `json:"metrics"`
	}{
		Metrics: complete},
	)
	return b.Bytes(), err
}
Exemplo n.º 3
0
func findList(globs pb.GlobResponse) ([]byte, error) {
	var b bytes.Buffer

	for _, g := range globs.GetMatches() {

		var dot string
		// make sure non-leaves end in one dot
		if !g.GetIsLeaf() && !strings.HasSuffix(g.GetPath(), ".") {
			dot = "."
		}

		fmt.Fprintln(&b, g.GetPath()+dot)
	}

	return b.Bytes(), nil
}
Exemplo n.º 4
0
func findTreejson(globs pb.GlobResponse) ([]byte, error) {
	var b bytes.Buffer

	var tree = make([]treejson, 0)

	seen := make(map[string]struct{})

	basepath := globs.GetName()

	if i := strings.LastIndex(basepath, "."); i != -1 {
		basepath = basepath[:i+1]
	}

	for _, g := range globs.GetMatches() {

		name := g.GetPath()

		if i := strings.LastIndex(name, "."); i != -1 {
			name = name[i+1:]
		}

		if _, ok := seen[name]; ok {
			continue
		}
		seen[name] = struct{}{}

		t := treejson{
			ID:      basepath + name,
			Context: treejsonContext,
			Text:    name,
		}

		if g.GetIsLeaf() {
			t.Leaf = 1
		} else {
			t.AllowChildren = 1
			t.Expandable = 1
		}

		tree = append(tree, t)
	}

	err := json.NewEncoder(&b).Encode(tree)
	return b.Bytes(), err
}
Exemplo n.º 5
0
func findHandler(w http.ResponseWriter, req *http.Request) {

	logger.Debugln("request: ", req.URL.RequestURI())

	Metrics.FindRequests.Add(1)

	rewrite, _ := url.ParseRequestURI(req.URL.RequestURI())
	v := rewrite.Query()
	format := req.FormValue("format")
	v.Set("format", "protobuf")
	rewrite.RawQuery = v.Encode()

	query := req.FormValue("query")
	var tld string
	if i := strings.IndexByte(query, '.'); i > 0 {
		tld = query[:i]
	}
	// lookup tld in our map of where they live to reduce the set of
	// servers we bug with our find
	Config.mu.RLock()
	var backends []string
	var ok bool
	if backends, ok = Config.metricPaths[tld]; !ok || backends == nil || len(backends) == 0 {
		backends = Config.Backends
	}
	Config.mu.RUnlock()

	responses := multiGet(backends, rewrite.RequestURI())

	if responses == nil || len(responses) == 0 {
		logger.Logln("find: error querying backends for: ", rewrite.RequestURI())
		http.Error(w, "find: error querying backends", http.StatusInternalServerError)
		return
	}

	metrics, paths := findHandlerPB(w, req, responses)

	// update our cache of which servers have which metrics
	Config.mu.Lock()
	for k, v := range paths {
		Config.metricPaths[k] = v
	}
	Config.mu.Unlock()

	switch format {
	case "protobuf":
		w.Header().Set("Content-Type", "application/protobuf")
		var result pb.GlobResponse
		query := req.FormValue("query")
		result.Name = &query
		result.Matches = metrics
		b, _ := proto.Marshal(&result)
		w.Write(b)
	case "json":
		w.Header().Set("Content-Type", "application/json")
		jEnc := json.NewEncoder(w)
		jEnc.Encode(metrics)
	case "", "pickle":
		w.Header().Set("Content-Type", "application/pickle")

		var result []map[string]interface{}

		for _, metric := range metrics {
			mm := map[string]interface{}{
				"metric_path": *metric.Path,
				"isLeaf":      *metric.IsLeaf,
			}
			result = append(result, mm)
		}

		pEnc := pickle.NewEncoder(w)
		pEnc.Encode(result)
	}
}
Exemplo n.º 6
0
func findHandler(wr http.ResponseWriter, req *http.Request) {
	// URL: /metrics/find/?local=1&format=pickle&query=the.metric.path.with.glob

	t0 := time.Now()

	Metrics.FindRequests.Add(1)

	req.ParseForm()
	format := req.FormValue("format")
	query := req.FormValue("query")

	if format != "json" && format != "pickle" && format != "protobuf" {
		Metrics.FindErrors.Add(1)
		logger.Logf("dropping invalid uri (format=%s): %s",
			format, req.URL.RequestURI())
		http.Error(wr, "Bad request (unsupported format)",
			http.StatusBadRequest)
		return
	}

	if query == "" {
		Metrics.FindErrors.Add(1)
		logger.Logf("dropping invalid request (query=): %s", req.URL.RequestURI())
		http.Error(wr, "Bad request (no query)", http.StatusBadRequest)
		return
	}

	files, leafs := expandGlobs(query)

	if format == "json" || format == "protobuf" {
		name := req.FormValue("query")
		response := pb.GlobResponse{
			Name:    &name,
			Matches: make([]*pb.GlobMatch, 0),
		}

		for i, p := range files {
			response.Matches = append(response.Matches, &pb.GlobMatch{Path: proto.String(p), IsLeaf: proto.Bool(leafs[i])})
		}

		var b []byte
		var err error
		switch format {
		case "json":
			b, err = json.Marshal(response)
		case "protobuf":
			b, err = proto.Marshal(&response)
		}
		if err != nil {
			Metrics.FindErrors.Add(1)
			logger.Logf("failed to create %s data for glob %s: %s",
				format, *response.Name, err)
			return
		}
		wr.Write(b)
	} else if format == "pickle" {
		// [{'metric_path': 'metric', 'intervals': [(x,y)], 'isLeaf': True},]
		var metrics []map[string]interface{}
		var m map[string]interface{}

		for i, p := range files {
			m = make(map[string]interface{})
			m["metric_path"] = p
			// m["intervals"] = dunno how to do a tuple here
			m["isLeaf"] = leafs[i]
			metrics = append(metrics, m)
		}

		wr.Header().Set("Content-Type", "application/pickle")
		pEnc := pickle.NewEncoder(wr)
		pEnc.Encode(metrics)
	}

	if len(files) == 0 {
		// to get an idea how often we search for nothing
		Metrics.FindZero.Add(1)
	}

	logger.Debugf("find: %d hits for %s in %v", len(files), req.FormValue("query"), time.Since(t0))
	return
}
Exemplo n.º 7
0
func renderHandler(w http.ResponseWriter, r *http.Request, stats *renderStats) {

	Metrics.Requests.Add(1)

	err := r.ParseForm()
	if err != nil {
		http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
		return
	}

	targets := r.Form["target"]
	from := r.FormValue("from")
	until := r.FormValue("until")
	format := r.FormValue("format")
	useCache := truthyBool(r.FormValue("noCache")) == false

	var jsonp string

	if format == "json" {
		// TODO(dgryski): check jsonp only has valid characters
		jsonp = r.FormValue("jsonp")
	}

	if format == "" && (truthyBool(r.FormValue("rawData")) || truthyBool(r.FormValue("rawdata"))) {
		format = "raw"
	}

	if format == "" {
		format = "png"
	}

	cacheTimeout := int32(60)

	if tstr := r.FormValue("cacheTimeout"); tstr != "" {
		t, err := strconv.Atoi(tstr)
		if err != nil {
			log.Printf("failed to parse cacheTimeout: %v: %v", tstr, err)
		} else {
			cacheTimeout = int32(t)
		}
	}

	// make sure the cache key doesn't say noCache, because it will never hit
	r.Form.Del("noCache")

	// jsonp callback names are frequently autogenerated and hurt our cache
	r.Form.Del("jsonp")

	// Strip some cache-busters.  If you don't want to cache, use noCache=1
	r.Form.Del("_salt")
	r.Form.Del("_ts")
	r.Form.Del("_t") // Used by jquery.graphite.js

	cacheKey := r.Form.Encode()

	if response, ok := queryCache.get(cacheKey); useCache && ok {
		Metrics.RequestCacheHits.Add(1)
		writeResponse(w, response, format, jsonp)
		return
	}

	// normalize from and until values
	// BUG(dgryski): doesn't handle timezones the same as graphite-web
	from32 := dateParamToEpoch(from, timeNow().Add(-24*time.Hour).Unix())
	until32 := dateParamToEpoch(until, timeNow().Unix())

	var results []*metricData
	metricMap := make(map[metricRequest][]*metricData)

	for _, target := range targets {

		exp, e, err := parseExpr(target)
		if err != nil || e != "" {
			http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
			return
		}

		for _, m := range exp.metrics() {

			mfetch := m
			mfetch.from += from32
			mfetch.until += until32

			if _, ok := metricMap[mfetch]; ok {
				// already fetched this metric for this request
				continue
			}

			var glob pb.GlobResponse
			var haveCacheData bool

			if response, ok := findCache.get(m.metric); useCache && ok {
				Metrics.FindCacheHits.Add(1)
				err := proto.Unmarshal(response, &glob)
				haveCacheData = err == nil
			}

			if !haveCacheData {
				var err error
				Metrics.FindRequests.Add(1)
				stats.zipperRequests++
				glob, err = Zipper.Find(m.metric)
				if err != nil {
					log.Printf("Find: %v: %v", m.metric, err)
					continue
				}
				b, err := proto.Marshal(&glob)
				if err == nil {
					findCache.set(m.metric, b, 5*60)
				}
			}

			// For each metric returned in the Find response, query Render
			// This is a conscious decision to *not* cache render data
			rch := make(chan *metricData, len(glob.GetMatches()))
			leaves := 0
			for _, m := range glob.GetMatches() {
				if !m.GetIsLeaf() {
					continue
				}
				Metrics.RenderRequests.Add(1)
				leaves++
				Limiter.enter()
				stats.zipperRequests++
				go func(m *pb.GlobMatch, from, until int32) {
					var rptr *metricData
					r, err := Zipper.Render(m.GetPath(), from, until)
					if err == nil {
						rptr = &r
					} else {
						log.Printf("Render: %v: %v", m.GetPath(), err)
					}
					rch <- rptr
					Limiter.leave()
				}(m, mfetch.from, mfetch.until)
			}

			for i := 0; i < leaves; i++ {
				r := <-rch
				if r != nil {
					metricMap[mfetch] = append(metricMap[mfetch], r)
				}
			}
		}

		func() {
			defer func() {
				if r := recover(); r != nil {
					var buf [1024]byte
					runtime.Stack(buf[:], false)
					log.Printf("panic during eval: %s: %s\n%s\n", cacheKey, r, string(buf[:]))
				}
			}()
			exprs := evalExpr(exp, from32, until32, metricMap)
			results = append(results, exprs...)
		}()
	}

	var body []byte

	switch format {
	case "json":
		body = marshalJSON(results)
	case "protobuf":
		body = marshalProtobuf(results)
	case "raw":
		body = marshalRaw(results)
	case "pickle":
		body = marshalPickle(results)
	case "png":
		body = marshalPNG(r, results)
	}

	writeResponse(w, body, format, jsonp)

	if len(results) != 0 {
		queryCache.set(cacheKey, body, cacheTimeout)
	}
}