func NewBucket(token string, rdr *bufio.Reader) ([]*Bucket, error) { var buckets []*Bucket lp := logplex.NewReader(rdr) for { packet, err := lp.ReadMsg() if err != nil { if err == io.EOF { break } fmt.Printf("at=logplex-error err=%s\n", err) return nil, err } d, err := encoding.ParseMsgData(packet.Msg) if err != nil { continue } name, ok := d["measure"] if !ok { continue } source, ok := d["source"] if !ok { source = "" } var val float64 tmpVal, ok := d["val"] if ok { val, err = strconv.ParseFloat(tmpVal, 64) if err != nil { fmt.Printf("at=error error=\"unable to parse val.\"\n") continue } } else { val = float64(1) } t, err := packet.Time() if err != nil { fmt.Printf("at=time-error error=%s\n", err) continue } m := &Bucket{} m.Token = token m.Time = utils.RoundTime(t, time.Minute) m.Name = name m.Source = source m.Vals = append(m.Vals, val) buckets = append(buckets, m) } utils.MeasureI("received-measurements", int64(len(buckets))) return buckets, nil }
func NewBucket(token string, rdr *bufio.Reader) <-chan *Bucket { buckets := make(chan *Bucket, 1000) go func(c chan<- *Bucket) { defer close(c) defer utils.MeasureT("new-bucket", time.Now()) lp := logplex.NewReader(rdr) for { packet, err := lp.ReadMsg() if err != nil { if err == io.EOF { break } fmt.Printf("at=logplex-error err=%s\n", err) return } d, err := encoding.ParseMsgData(packet.Msg) if err != nil { continue } name, ok := d["measure"] if !ok { continue } source, ok := d["source"] if !ok { source = "" } var val float64 tmpVal, ok := d["val"] if ok { val, err = strconv.ParseFloat(tmpVal, 64) if err != nil { fmt.Printf("at=error error=\"unable to parse val.\"\n") continue } } else { val = float64(1) } t, err := packet.Time() if err != nil { fmt.Printf("at=time-error error=%s\n", err) continue } t = utils.RoundTime(t, time.Minute) k := BKey{Token: token, Name: name, Source: source, Time: t} b := &Bucket{Key: k} b.Vals = append(b.Vals, val) c <- b } }(buckets) return buckets }
func NewBucket(token string, rdr *bufio.Reader, bSize time.Duration) <-chan *Bucket { //TODO(ryandotsmith): Can we eliminate the magical number? buckets := make(chan *Bucket, 10000) go func(c chan<- *Bucket) { defer close(c) lp := logplex.NewReader(rdr) for { packet, err := lp.ReadMsg() if err != nil { if err == io.EOF { break } fmt.Printf("at=logplex-error err=%s\n", err) return } d, err := encoding.ParseMsgData(packet.Msg) if err != nil { continue } measure, ok := d["measure"] if !ok { continue } source, ok := d["source"] if !ok { source = "" } t, err := packet.Time() if err != nil { fmt.Printf("at=time-error error=%s\n", err) continue } t = utils.RoundTime(t, bSize) val := float64(1) tmpVal, present := d["val"] if present { v, err := strconv.ParseFloat(tmpVal, 64) if err == nil { val = v } } k := &Id{Token: token, Name: measure, Source: source, Time: t} b := &Bucket{Id: k} b.Vals = append(b.Vals, val) c <- b } }(buckets) return buckets }
//TODO(ryandotsmith): NewBucket should be broken up. This func is too big. func NewBucket(user, pass string, rdr *bufio.Reader, opts map[string][]string) <-chan *Bucket { //TODO(ryandotsmith): Can we eliminate the magical number? buckets := make(chan *Bucket, 10000) go func(c chan<- *Bucket) { defer close(c) lp := logplex.NewReader(rdr) for { logLine, err := lp.ReadMsg() if err != nil { if err == io.EOF { break } fmt.Printf("at=logplex-error err=%s\n", err) return } logData, err := encoding.ParseMsgData(logLine.Msg) if err != nil { continue } ts, err := logLine.Time() if err != nil { fmt.Printf("at=time-error error=%s\n", err) continue } //The resolution determines how long a bucket is //left to linger. E.g. a bucket with 1 second resolution //will hang around for 1 second and provide metrics //with 1 second resolution. resQuery, ok := opts["resolution"] if !ok { resQuery = []string{"60"} } resTmp, err := strconv.Atoi(resQuery[0]) if err != nil { continue } res := time.Duration(time.Second * time.Duration(resTmp)) ts = utils.RoundTime(ts, res) //Src can be overridden by the heroku router messages. src := logData["source"] //You can prefix all measurments by adding the //prefix option on your drain url. var prefix string if prefixQuery, ok := opts["prefix"]; ok { if len(prefixQuery[0]) > 0 { prefix = prefixQuery[0] + "." } } //Special case the Heroku router. //In this case, we will massage logData //to include connect, service, and bytes. if string(logLine.Pid) == "router" { p := "measure.router." if len(logData["host"]) > 0 { src = logData["host"] } if len(logData["connect"]) > 0 { logData[p+"connect"] = logData["connect"] } if len(logData["service"]) > 0 { logData[p+"service"] = logData["service"] } if len(logData["bytes"]) > 0 { logData[p+"bytes"] = logData["bytes"] + "bytes" } } for k, v := range logData { switch k { //TODO(ryandotsmith): this case is measre=something val=x //It is deprecated and not mentioned in the docs. //We should remove this sometime in the near future. case "measure": units, val := parseVal(logData["val"]) name := prefix + v id := &Id{ts, res, user, pass, name, units, src} bucket := &Bucket{Id: id} bucket.Vals = []float64{val} c <- bucket default: if !strings.HasPrefix(k, "measure.") { break } name := prefix + k[8:] // len("measure.") == 8 units, val := parseVal(v) id := &Id{ts, res, user, pass, name, units, src} bucket := &Bucket{Id: id} bucket.Vals = []float64{val} c <- bucket } } } }(buckets) return buckets }