func BenchmarkKRDecode(b *testing.B) { const rows = 10000 data := []byte{} for i := 0; i < rows; i++ { data = append(data, "a=1 b=\"bar\" ƒ=2h3s r=\"esc\\tmore stuff\" d x=sf \n"...) } b.SetBytes(int64(len(data))) b.ResetTimer() for i := 0; i < b.N; i++ { var ( s = bufio.NewScanner(bytes.NewReader(data)) err error j = 0 dh discardHandler ) for err == nil && s.Scan() { err = kr.Unmarshal(s.Bytes(), &dh) j++ } if err == nil { err = s.Err() } if err != nil { b.Errorf("got %v, want %v", err, nil) } if j != rows { b.Errorf("got %v, want %v", j, rows) } } }
func parseLogData(msg []byte) (tuples, error) { tups := make(tuples, 0) if err := logfmt.Unmarshal(msg, &tups); err != nil { return nil, err } return tups, nil }
// This is called every time we receive log lines from an app func processLogs(w http.ResponseWriter, r *http.Request) { c := redisPool.Get() defer c.Close() lp := lpx.NewReader(bufio.NewReader(r.Body)) // a single request may contain multiple log lines. Loop over each of them for lp.Next() { // we only care about logs from the heroku router if string(lp.Header().Procid) == "router" { rl := new(routerLog) if err := logfmt.Unmarshal(lp.Bytes(), rl); err != nil { fmt.Printf("Error parsing log line: %v\n", err) } else { timeBucket, err := timestamp2Bucket(lp.Header().Time) if err != nil { fmt.Printf("Error parsing time: %v", err) continue } _, err = c.Do("INCR", fmt.Sprintf("%v:%v", rl.host, timeBucket)) if err != nil { fmt.Printf("Error running INCR on Redis: %v\n", err) } _, err = c.Do("INCR", "host:"+rl.host) if err != nil { fmt.Printf("Error running INCR on Redis: %v\n", err) } fmt.Printf("%v @ %v: +1\n", rl.host, timeBucket) } } } }
func parseMetrics(typ int, ld *logData, data *string, out chan *logMetrics) { lm := logMetrics{typ, ld.app, ld.tags, ld.prefix, make(map[string]logValue, 5)} if err := logfmt.Unmarshal([]byte(*data), &lm); err != nil { log.Fatalf("err=%q", err) } out <- &lm }
func forwardRuncLogsToLager(log lager.Logger, buff []byte) { parsedLogLine := struct{ Msg string }{} for _, logLine := range strings.Split(string(buff), "\n") { if err := logfmt.Unmarshal([]byte(logLine), &parsedLogLine); err == nil { log.Debug("runc", lager.Data{ "message": parsedLogLine.Msg, }) } } }
func Example_customHandler() { var data = []byte("measure.a=1ms measure.b=10 measure.c=100MB measure.d=1s garbage") mm := make(Measurements, 0) if err := logfmt.Unmarshal(data, &mm); err != nil { log.Fatalf("err=%q", err) } for _, m := range mm { fmt.Printf("%v\n", *m) } // Output: // {a 1 ms} // {b 10 } // {c 100 MB} // {d 1 s} }
func (p *Parser) Parse(d []byte) (Message, error) { bits := bytes.SplitN(d, []byte(" "), 2) name, rest := bits[0], []byte("") if len(bits) > 1 { rest = bits[1] } fn, ok := p.types[string(name)] if !ok { return nil, ErrUnknownType(fmt.Errorf("unknown type %q", name)) } m := fn() return m, logfmt.Unmarshal(rest, m) }
func parseKR(data []byte) ([][]kv, error) { var ( s = bufio.NewScanner(bytes.NewReader(data)) err error h saveHandler got [][]kv ) for err == nil && s.Scan() { h.kvs = nil err = kr.Unmarshal(s.Bytes(), &h) got = append(got, h.kvs) } if err == nil { err = s.Err() } return got, err }
func parseMetrics(typ int, ld *logData, data *string, out chan *logMetrics) { var myslice []string lm := logMetrics{typ, ld.app, ld.tags, ld.prefix, make(map[string]logValue, 5), myslice} if typ == scalingMsg { events := append(lm.events, *data) lm.events = events } if err := logfmt.Unmarshal([]byte(*data), &lm); err != nil { log.WithFields(log.Fields{ "err": err, }).Warn() return } if source, ok := lm.metrics["source"]; ok { tags := append(*lm.tags, "type:"+dynoNumber.ReplaceAllString(source.Val, "")) lm.tags = &tags } out <- &lm }
func ParseLogfmt(in <-chan map[string]interface{}, key string) <-chan map[string]interface{} { return Map(in, func(m map[string]interface{}) map[string]interface{} { f, ok := m[key] if !ok { return m } s, ok := f.(string) if !ok { return m } e := make(logfmtHandler) if err := logfmt.Unmarshal([]byte(s), &e); err != nil { return m } for k, v := range e { m[k] = v } return m }) }
func wrapWithErrorFromRuncLog(log lager.Logger, originalError error, buff []byte) error { parsedLogLine := struct{ Msg string }{} logfmt.Unmarshal(buff, &parsedLogLine) return fmt.Errorf("runc exec: %s: %s", originalError, parsedLogLine.Msg) }
// "Parse tree" from hell func serveDrain(w http.ResponseWriter, r *http.Request) { ctx := slog.Context{} defer func() { LogWithContext(ctx) }() w.Header().Set("Content-Length", "0") if r.Method != "POST" { w.WriteHeader(http.StatusMethodNotAllowed) ctx.Count("errors.drain.wrong.method", 1) return } id := r.Header.Get("Logplex-Drain-Token") if id == "" { if err := checkAuth(r); err != nil { w.WriteHeader(http.StatusForbidden) ctx.Count("errors.auth.failure", 1) return } } ctx.Count("batch", 1) parseStart := time.Now() lp := lpx.NewReader(bufio.NewReader(r.Body)) for lp.Next() { ctx.Count("lines.total", 1) header := lp.Header() // If the syslog App Name Header field containts what looks like a log token, // let's assume it's an override of the id and we're getting the data from the magic // channel if bytes.HasPrefix(header.Name, TokenPrefix) { id = string(header.Name) } // If we still don't have an id, throw an error and try the next line if id == "" { ctx.Count("errors.token.missing", 1) continue } chanGroup := hashRing.Get(id) msg := lp.Bytes() switch { case bytes.Equal(header.Name, Heroku), bytes.HasPrefix(header.Name, TokenPrefix): t, e := time.Parse("2006-01-02T15:04:05.000000+00:00", string(lp.Header().Time)) if e != nil { log.Printf("Error Parsing Time(%s): %q\n", string(lp.Header().Time), e) continue } timestamp := t.UnixNano() / int64(time.Microsecond) pid := string(header.Procid) switch pid { case "router": switch { // router logs with a H error code in them case bytes.Contains(msg, keyCodeH): ctx.Count("lines.router.error", 1) re := routerError{} err := logfmt.Unmarshal(msg, &re) if err != nil { log.Printf("logfmt unmarshal error: %s\n", err) continue } chanGroup.points[EventsRouter] <- []interface{}{timestamp, id, re.Code} // likely a standard router log default: ctx.Count("lines.router", 1) rm := routerMsg{} err := logfmt.Unmarshal(msg, &rm) if err != nil { log.Printf("logfmt unmarshal error: %s\n", err) continue } chanGroup.points[Router] <- []interface{}{timestamp, id, rm.Status, rm.Service} } // Non router logs, so either dynos, runtime, etc default: switch { // Dyno error messages case bytes.HasPrefix(msg, dynoErrorSentinel): ctx.Count("lines.dyno.error", 1) de, err := parseBytesToDynoError(msg) if err != nil { log.Printf("Unable to parse dyno error message: %q\n", err) } what := string(lp.Header().Procid) chanGroup.points[EventsDyno] <- []interface{}{ timestamp, id, what, "R", de.Code, string(msg), dynoType(what), } // Dyno log-runtime-metrics memory messages case bytes.Contains(msg, dynoMemMsgSentinel): ctx.Count("lines.dyno.mem", 1) dm := dynoMemMsg{} err := logfmt.Unmarshal(msg, &dm) if err != nil { log.Printf("logfmt unmarshal error: %s\n", err) continue } if dm.Source != "" { chanGroup.points[DynoMem] <- []interface{}{ timestamp, id, dm.Source, dm.MemoryCache, dm.MemoryPgpgin, dm.MemoryPgpgout, dm.MemoryRSS, dm.MemorySwap, dm.MemoryTotal, dynoType(dm.Source), } } // Dyno log-runtime-metrics load messages case bytes.Contains(msg, dynoLoadMsgSentinel): ctx.Count("lines.dyno.load", 1) dm := dynoLoadMsg{} err := logfmt.Unmarshal(msg, &dm) if err != nil { log.Printf("logfmt unmarshal error: %s\n", err) continue } if dm.Source != "" { chanGroup.points[DynoLoad] <- []interface{}{ timestamp, id, dm.Source, dm.LoadAvg1Min, dm.LoadAvg5Min, dm.LoadAvg15Min, dynoType(dm.Source), } } // unknown default: ctx.Count("lines.unknown.heroku", 1) if Debug { log.Printf("Unknown Heroku Line - Header: PRI: %s, Time: %s, Hostname: %s, Name: %s, ProcId: %s, MsgId: %s - Body: %s", header.PrivalVersion, header.Time, header.Hostname, header.Name, header.Procid, header.Msgid, string(msg), ) } } } // non heroku lines default: ctx.Count("lines.unknown.user", 1) if Debug { log.Printf("Unknown User Line - Header: PRI: %s, Time: %s, Hostname: %s, Name: %s, ProcId: %s, MsgId: %s - Body: %s", header.PrivalVersion, header.Time, header.Hostname, header.Name, header.Procid, header.Msgid, string(msg), ) } } } ctx.MeasureSince("lines.parse.time", parseStart) // If we are told to close the connection after the reply, do so. select { case <-connectionCloser: w.Header().Set("Connection", "close") default: //Nothing } w.WriteHeader(http.StatusNoContent) }
func (ld *logData) Read(d []byte) error { if err := logfmt.Unmarshal(d, &ld.Tuples); err != nil { return err } return nil }