func (id *Id) Delay(t time.Time) int64 { t0 := utils.RoundTime(id.Time, id.Resolution).Unix() t1 := utils.RoundTime(t, id.Resolution).Unix() base := id.Resolution / time.Second if base != 0 { return (t1 - t0) / int64(base) } return 0 }
func getBuckets(w http.ResponseWriter, r *http.Request) { defer utils.MeasureT(time.Now(), "get-buckets") if r.Method != "GET" { http.Error(w, "Invalid Request", 400) return } token, err := utils.ParseToken(r) if err != nil { errmsg := map[string]string{"error": "Missing authorization."} utils.WriteJson(w, 401, errmsg) return } q := r.URL.Query() limit, err := strconv.ParseInt(q.Get("limit"), 10, 32) if err != nil { errmsg := map[string]string{"error": "Missing limit parameter."} utils.WriteJson(w, 400, errmsg) return } max := utils.RoundTime(time.Now(), time.Minute) min := max.Add(-1 * time.Minute * time.Duration(limit)) buckets, err := store.GetBuckets(token, min, max) if err != nil { errmsg := map[string]string{"error": "Unable to find buckets"} utils.WriteJson(w, 500, errmsg) return } utils.WriteJson(w, 200, buckets) }
func NewBucket(token string, rdr *bufio.Reader) <-chan *Bucket { buckets := make(chan *Bucket, 1000) go func(c chan<- *Bucket) { defer close(c) defer utils.MeasureT("new-bucket", time.Now()) lp := logplex.NewReader(rdr) for { packet, err := lp.ReadMsg() if err != nil { if err == io.EOF { break } fmt.Printf("at=logplex-error err=%s\n", err) return } d, err := encoding.ParseMsgData(packet.Msg) if err != nil { continue } name, ok := d["measure"] if !ok { continue } source, ok := d["source"] if !ok { source = "" } var val float64 tmpVal, ok := d["val"] if ok { val, err = strconv.ParseFloat(tmpVal, 64) if err != nil { fmt.Printf("at=error error=\"unable to parse val.\"\n") continue } } else { val = float64(1) } t, err := packet.Time() if err != nil { fmt.Printf("at=time-error error=%s\n", err) continue } t = utils.RoundTime(t, time.Minute) k := BKey{Token: token, Name: name, Source: source, Time: t} b := &Bucket{Key: k} b.Vals = append(b.Vals, val) c <- b } }(buckets) return buckets }
func NewBucket(token string, rdr *bufio.Reader) ([]*Bucket, error) { var buckets []*Bucket lp := logplex.NewReader(rdr) for { packet, err := lp.ReadMsg() if err != nil { if err == io.EOF { break } fmt.Printf("at=logplex-error err=%s\n", err) return nil, err } d, err := encoding.ParseMsgData(packet.Msg) if err != nil { continue } name, ok := d["measure"] if !ok { continue } source, ok := d["source"] if !ok { source = "" } var val float64 tmpVal, ok := d["val"] if ok { val, err = strconv.ParseFloat(tmpVal, 64) if err != nil { fmt.Printf("at=error error=\"unable to parse val.\"\n") continue } } else { val = float64(1) } t, err := packet.Time() if err != nil { fmt.Printf("at=time-error error=%s\n", err) continue } m := &Bucket{} m.Token = token m.Time = utils.RoundTime(t, time.Minute) m.Name = name m.Source = source m.Vals = append(m.Vals, val) buckets = append(buckets, m) } utils.MeasureI("received-measurements", int64(len(buckets))) return buckets, nil }
func getMetrics(w http.ResponseWriter, r *http.Request) { defer utils.MeasureT("http-metrics", time.Now()) // Support CORS. w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "Authorization") w.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS") if r.Method == "OPTIONS" { return } names := metricsPat.FindStringSubmatch(r.URL.Path) if len(names) < 2 { fmt.Printf("at=error error=%q\n", "Name parameter not provided.") errmsg := map[string]string{"error": "Name parameter not provided."} utils.WriteJson(w, 401, errmsg) return } name := names[1] token, err := utils.ParseToken(r) if err != nil { fmt.Printf("at=error error=%q\n", err) errmsg := map[string]string{"error": "Missing authorization."} utils.WriteJson(w, 401, errmsg) return } q := r.URL.Query() limit, err := strconv.ParseInt(q.Get("limit"), 10, 32) if err != nil { errmsg := map[string]string{"error": "Missing limit parameter."} utils.WriteJson(w, 400, errmsg) return } resolution, err := strconv.ParseInt(q.Get("resolution"), 10, 32) if err != nil { errmsg := map[string]string{"error": "Missing resolution parameter."} utils.WriteJson(w, 400, errmsg) return } max := utils.RoundTime(time.Now(), (time.Minute * time.Duration(resolution))) min := max.Add(-1 * time.Minute * time.Duration(limit*resolution)) metrics, err := store.GetMetrics(token, name, resolution, min, max) if err != nil { errmsg := map[string]string{"error": "Unable to find metrics."} utils.WriteJson(w, 500, errmsg) return } utils.WriteJson(w, 200, metrics) }
func NewBucket(token string, rdr *bufio.Reader, bSize time.Duration) <-chan *Bucket { //TODO(ryandotsmith): Can we eliminate the magical number? buckets := make(chan *Bucket, 10000) go func(c chan<- *Bucket) { defer close(c) lp := logplex.NewReader(rdr) for { packet, err := lp.ReadMsg() if err != nil { if err == io.EOF { break } fmt.Printf("at=logplex-error err=%s\n", err) return } d, err := encoding.ParseMsgData(packet.Msg) if err != nil { continue } measure, ok := d["measure"] if !ok { continue } source, ok := d["source"] if !ok { source = "" } t, err := packet.Time() if err != nil { fmt.Printf("at=time-error error=%s\n", err) continue } t = utils.RoundTime(t, bSize) val := float64(1) tmpVal, present := d["val"] if present { v, err := strconv.ParseFloat(tmpVal, 64) if err == nil { val = v } } k := &Id{Token: token, Name: measure, Source: source, Time: t} b := &Bucket{Id: k} b.Vals = append(b.Vals, val) c <- b } }(buckets) return buckets }
// Fetch should kick off the librato outlet process. // Its responsibility is to get the ids of buckets for the current time, // make empty Buckets, then place the buckets in an inbox to be filled // (load the vals into the bucket) and processed. func fetch(out chan<- *store.Bucket) { for _ = range time.Tick(time.Duration(*processInterval) * time.Second) { go func(out chan<- *store.Bucket) { startPoll := time.Now() max := utils.RoundTime(time.Now(), time.Minute) min := max.Add(-time.Minute) ids, err := allBucketIds(min, max) if err != nil { utils.MeasureE("find-failed", err) return } for i := range ids { b := store.Bucket{Id: ids[i]} out <- &b } utils.MeasureT(startPoll, "librato.fetch") }(out) } }
//TODO(ryandotsmith): NewBucket should be broken up. This func is too big. func NewBucket(user, pass string, rdr *bufio.Reader, opts map[string][]string) <-chan *Bucket { //TODO(ryandotsmith): Can we eliminate the magical number? buckets := make(chan *Bucket, 10000) go func(c chan<- *Bucket) { defer close(c) lp := logplex.NewReader(rdr) for { logLine, err := lp.ReadMsg() if err != nil { if err == io.EOF { break } fmt.Printf("at=logplex-error err=%s\n", err) return } logData, err := encoding.ParseMsgData(logLine.Msg) if err != nil { continue } ts, err := logLine.Time() if err != nil { fmt.Printf("at=time-error error=%s\n", err) continue } //The resolution determines how long a bucket is //left to linger. E.g. a bucket with 1 second resolution //will hang around for 1 second and provide metrics //with 1 second resolution. resQuery, ok := opts["resolution"] if !ok { resQuery = []string{"60"} } resTmp, err := strconv.Atoi(resQuery[0]) if err != nil { continue } res := time.Duration(time.Second * time.Duration(resTmp)) ts = utils.RoundTime(ts, res) //Src can be overridden by the heroku router messages. src := logData["source"] //You can prefix all measurments by adding the //prefix option on your drain url. var prefix string if prefixQuery, ok := opts["prefix"]; ok { if len(prefixQuery[0]) > 0 { prefix = prefixQuery[0] + "." } } //Special case the Heroku router. //In this case, we will massage logData //to include connect, service, and bytes. if string(logLine.Pid) == "router" { p := "measure.router." if len(logData["host"]) > 0 { src = logData["host"] } if len(logData["connect"]) > 0 { logData[p+"connect"] = logData["connect"] } if len(logData["service"]) > 0 { logData[p+"service"] = logData["service"] } if len(logData["bytes"]) > 0 { logData[p+"bytes"] = logData["bytes"] + "bytes" } } for k, v := range logData { switch k { //TODO(ryandotsmith): this case is measre=something val=x //It is deprecated and not mentioned in the docs. //We should remove this sometime in the near future. case "measure": units, val := parseVal(logData["val"]) name := prefix + v id := &Id{ts, res, user, pass, name, units, src} bucket := &Bucket{Id: id} bucket.Vals = []float64{val} c <- bucket default: if !strings.HasPrefix(k, "measure.") { break } name := prefix + k[8:] // len("measure.") == 8 units, val := parseVal(v) id := &Id{ts, res, user, pass, name, units, src} bucket := &Bucket{Id: id} bucket.Vals = []float64{val} c <- bucket } } } }(buckets) return buckets }
func (h *HttpOutlet) ServeReadBucket(w http.ResponseWriter, r *http.Request) { // need to extract: token, source, name, time // https://l2met:[email protected]/buckets/:name user, pass, err := auth.Parse(r) if err != nil { http.Error(w, "Inavalid Authentication", 401) return } // Shortcut so we can quickly access query params. h.Query = r.URL.Query() //We need to build the identity of a bucket before we can fetch //it from the store. Thus, the following attrs are parsed and held //for the bucket.Id. src := h.Query.Get("source") name := h.Query.Get("name") if len(name) == 0 { http.Error(w, "Invalid Request. Name is required.", 400) return } res, err := h.parseAssertion("resolution", 60) if err != nil { http.Error(w, "Invalid Request.", 400) return } resolution := time.Second * time.Duration(res) units := h.Query.Get("units") if len(units) == 0 { units = bucket.DefaultUnit } //The limit and offset are shortcuts to work with the time //field on the bucket. This makes it easy for the client to not have //to worry about keeping correct time. limit, err := h.parseAssertion("limit", 1) if err != nil { http.Error(w, "Invalid Request.", 400) return } //The offset is handy because you may not want to take the most recent //bucket. For instance, the current minute will not have a complete view //of the data; however, the last minute should. offset, err := h.parseAssertion("offset", 1) if err != nil { http.Error(w, "Invalid Request.", 400) return } //The API supports the ability to assert what metrics should be. //If the value of the assertion is negative, the assertion can //be skipped. By default, the value is negative. countAssertion, err := h.parseAssertion("count", -1) if err != nil { http.Error(w, "Invalid Request.", 400) return } meanAssertion, err := h.parseAssertion("mean", -1) if err != nil { http.Error(w, "Invalid Request.", 400) return } sumAssertion, err := h.parseAssertion("sum", -1) if err != nil { http.Error(w, "Invalid Request.", 400) return } //The tolerance is a way to work with assertions that would like to use //less than or greater than operators. tol, err := h.parseAssertion("tol", 0) if err != nil { http.Error(w, "Invalid Request.", 400) return } //Build one bucket.Id to share across all the buckets that we fetch //with respect to the limit. //We will set the time in proceeding for loop. id := &bucket.Id{ User: user, Pass: pass, Name: name, Source: src, Resolution: resolution, Units: units, } resBucket := &bucket.Bucket{Id: id} anchorTime := time.Now() for i := 0; i < limit; i++ { x := time.Duration((i+offset)*-1) * resolution id.Time = utils.RoundTime(anchorTime.Add(x), resolution) b := &bucket.Bucket{Id: id} //Fetch the bucket from our store. //This will fill in the vals. h.Store.Get(b) //We are only returning 1 bucket from the API. The //bucket will contain an aggregate view of the data based on limit. resBucket.Add(b) } //If any of the assertion values are -1 then they were not //defined in the request query params. Thus, we only do our assertions //if the assertion parameter is > 0. if countAssertion > 0 { if math.Abs(float64(resBucket.Count()-countAssertion)) > float64(tol) { http.Error(w, "Count assertion failed.", 404) return } } if meanAssertion > 0 { if math.Abs(float64(resBucket.Mean()-float64(meanAssertion))) > float64(tol) { http.Error(w, "Mean assertion failed.", 404) return } } if sumAssertion > 0 { if math.Abs(float64(resBucket.Sum()-float64(sumAssertion))) > float64(tol) { http.Error(w, "Sum assertion failed.", 404) return } } //Assuming there was not a failed assertion, we can return the result //bucket which may contain an aggregate of other buckets via bucket.Add() utils.WriteJson(w, 200, resBucket) }