func getField(msg *message.Message, name string) interface{} { switch name { case "Uuid": if msg.Uuid == nil { return nil } return msg.GetUuidString() case "Timestamp": return msg.Timestamp case "Type": return msg.Type case "Logger": return msg.Logger case "Severity": return msg.Severity case "Payload": return msg.Payload case "EnvVersion": return msg.EnvVersion case "Pid": return msg.Pid case "Hostname": return msg.Hostname case "_hekaTimestampMicro": if msg.Timestamp != nil { return *msg.Timestamp / 1000 // nano -> micro } return nil default: val, _ := msg.GetFieldValue(name) return val } }
func (self *LogOutput) Run(or OutputRunner, h PluginHelper) (err error) { inChan := or.InChan() var ( pack *PipelinePack msg *message.Message ) for plc := range inChan { pack = plc.Pack msg = pack.Message if self.payloadOnly { log.Printf(msg.GetPayload()) } else { log.Printf("<\n\tTimestamp: %s\n"+ "\tType: %s\n"+ "\tHostname: %s\n"+ "\tPid: %d\n"+ "\tUUID: %s\n"+ "\tLogger: %s\n"+ "\tPayload: %s\n"+ "\tEnvVersion: %s\n"+ "\tSeverity: %d\n"+ "\tFields: %+v\n"+ "\tCaptures: %v\n>\n", time.Unix(0, msg.GetTimestamp()), msg.GetType(), msg.GetHostname(), msg.GetPid(), msg.GetUuidString(), msg.GetLogger(), msg.GetPayload(), msg.GetEnvVersion(), msg.GetSeverity(), msg.Fields, plc.Captures) } pack.Recycle() } return }
func (c *CleanMessageFormatter) Format(m *message.Message) (doc []byte, err error) { buf := bytes.Buffer{} buf.WriteString(`{`) // Iterates over fields configured for clean formating for _, f := range c.fields { switch strings.ToLower(f) { case "uuid": writeField(&buf, f, strconv.Quote(m.GetUuidString())) case "timestamp": t := time.Unix(0, m.GetTimestamp()).UTC() writeField(&buf, f, strconv.Quote(t.Format(c.timestampFormat))) case "type": writeField(&buf, f, strconv.Quote(m.GetType())) case "logger": writeField(&buf, f, strconv.Quote(m.GetLogger())) case "severity": writeField(&buf, f, strconv.Itoa(int(m.GetSeverity()))) case "payload": if utf8.ValidString(m.GetPayload()) { writeField(&buf, f, strconv.Quote(m.GetPayload())) } case "envversion": writeField(&buf, f, strconv.Quote(m.GetEnvVersion())) case "pid": writeField(&buf, f, strconv.Itoa(int(m.GetPid()))) case "hostname": writeField(&buf, f, strconv.Quote(m.GetHostname())) case "fields": for _, field := range m.Fields { switch field.GetValueType() { case message.Field_STRING: writeField(&buf, *field.Name, strconv.Quote(field.GetValue().(string))) case message.Field_BYTES: data := field.GetValue().([]byte)[:] writeField(&buf, *field.Name, strconv.Quote(base64.StdEncoding.EncodeToString(data))) case message.Field_INTEGER: writeField(&buf, *field.Name, strconv.FormatInt(field.GetValue().(int64), 10)) case message.Field_DOUBLE: writeField(&buf, *field.Name, strconv.FormatFloat(field.GetValue().(float64), 'g', -1, 64)) case message.Field_BOOL: writeField(&buf, *field.Name, strconv.FormatBool(field.GetValue().(bool))) } } default: // Search fo a given fields in the message err = fmt.Errorf("Unable to find field: %s", f) return } } buf.WriteString(`}`) doc = buf.Bytes() return }
// Replaces a date pattern (ex: %{2012.09.19} in the index name func interpolateFlag(e *ElasticSearchCoordinates, m *message.Message, name string) ( interpolatedValue string, err error) { iSlice := strings.Split(name, "%{") for i, element := range iSlice { elEnd := strings.Index(element, "}") if elEnd > -1 { elVal := element[:elEnd] switch elVal { case "Type": iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], m.GetType(), -1) case "Hostname": iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], m.GetHostname(), -1) case "Pid": iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], strconv.Itoa(int(m.GetPid())), -1) case "UUID": iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], m.GetUuidString(), -1) case "Logger": iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], m.GetLogger(), -1) case "EnvVersion": iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], m.GetEnvVersion(), -1) case "Severity": iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], strconv.Itoa(int(m.GetSeverity())), -1) default: if fname, ok := m.GetFieldValue(elVal); ok { iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], fname.(string), -1) } else { var t time.Time if e.ESIndexFromTimestamp && m.Timestamp != nil { t = time.Unix(0, *m.Timestamp).UTC() } else { t = time.Now().UTC() } iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], t.Format(elVal), -1) } } if iSlice[i] == elVal { err = fmt.Errorf("Could not interpolate field from config: %s", name) } } } interpolatedValue = strings.Join(iSlice, "") return }
func (c *KibanaFormatter) Format(m *message.Message) (doc []byte, err error) { buf := bytes.Buffer{} buf.WriteString(`{`) writeStringField(true, &buf, `@uuid`, m.GetUuidString()) t := time.Unix(0, m.GetTimestamp()) // time.Unix gives local time back writeStringField(false, &buf, `@timestamp`, t.UTC().Format("2006-01-02T15:04:05.000Z")) writeStringField(false, &buf, `@type`, m.GetType()) writeStringField(false, &buf, `@logger`, m.GetLogger()) writeRawField(false, &buf, `@severity`, strconv.Itoa(int(m.GetSeverity()))) writeStringField(false, &buf, `@message`, m.GetPayload()) writeRawField(false, &buf, `@envversion`, strconv.Quote(m.GetEnvVersion())) writeRawField(false, &buf, `@pid`, strconv.Itoa(int(m.GetPid()))) writeStringField(false, &buf, `@source_host`, m.GetHostname()) buf.WriteString(`,"@fields":{`) first := true for _, field := range m.Fields { switch field.GetValueType() { case message.Field_STRING: writeStringField(first, &buf, *field.Name, field.GetValue().(string)) first = false case message.Field_BYTES: data := field.GetValue().([]byte)[:] writeStringField(first, &buf, *field.Name, base64.StdEncoding.EncodeToString(data)) first = false case message.Field_INTEGER: writeRawField(first, &buf, *field.Name, strconv.FormatInt(field.GetValue().(int64), 10)) first = false case message.Field_DOUBLE: writeRawField(first, &buf, *field.Name, strconv.FormatFloat(field.GetValue().(float64), 'g', -1, 64)) first = false case message.Field_BOOL: writeRawField(first, &buf, *field.Name, strconv.FormatBool(field.GetValue().(bool))) first = false } } buf.WriteString(`}`) // end of fields buf.WriteString(`}`) doc = buf.Bytes() return }
func main() { flagMatch := flag.String("match", "TRUE", "message_matcher filter expression") flagFormat := flag.String("format", "txt", "output format [txt|json|heka|count]") flagOutput := flag.String("output", "", "output filename, defaults to stdout") flagTail := flag.Bool("tail", false, "don't exit on EOF") flagOffset := flag.Int64("offset", 0, "starting offset for the input file in bytes") flagMaxMessageSize := flag.Uint64("max-message-size", 4*1024*1024, "maximum message size in bytes") flag.Parse() if flag.NArg() != 1 { flag.PrintDefaults() os.Exit(1) } if *flagMaxMessageSize < math.MaxUint32 { maxSize := uint32(*flagMaxMessageSize) message.SetMaxMessageSize(maxSize) } else { fmt.Printf("Message size is too large: %d\n", flagMaxMessageSize) os.Exit(8) } var err error var match *message.MatcherSpecification if match, err = message.CreateMatcherSpecification(*flagMatch); err != nil { fmt.Printf("Match specification - %s\n", err) os.Exit(2) } var file *os.File if file, err = os.Open(flag.Arg(0)); err != nil { fmt.Printf("%s\n", err) os.Exit(3) } defer file.Close() var out *os.File if "" == *flagOutput { out = os.Stdout } else { if out, err = os.OpenFile(*flagOutput, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil { fmt.Printf("%s\n", err) os.Exit(4) } defer out.Close() } var offset int64 if offset, err = file.Seek(*flagOffset, 0); err != nil { fmt.Printf("%s\n", err) os.Exit(5) } sRunner, err := makeSplitterRunner() if err != nil { fmt.Println(err) os.Exit(7) } msg := new(message.Message) var processed, matched int64 fmt.Printf("Input:%s Offset:%d Match:%s Format:%s Tail:%t Output:%s\n", flag.Arg(0), *flagOffset, *flagMatch, *flagFormat, *flagTail, *flagOutput) for true { n, record, err := sRunner.GetRecordFromStream(file) if n > 0 && n != len(record) { fmt.Printf("Corruption detected at offset: %d bytes: %d\n", offset, n-len(record)) } if err != nil { if err == io.EOF { if !*flagTail || "count" == *flagFormat { break } time.Sleep(time.Duration(500) * time.Millisecond) } else { break } } else { if len(record) > 0 { processed += 1 headerLen := int(record[1]) + message.HEADER_FRAMING_SIZE if err = proto.Unmarshal(record[headerLen:], msg); err != nil { fmt.Printf("Error unmarshalling message at offset: %d error: %s\n", offset, err) continue } if !match.Match(msg) { continue } matched += 1 switch *flagFormat { case "count": // no op case "json": contents, _ := json.Marshal(msg) fmt.Fprintf(out, "%s\n", contents) case "heka": fmt.Fprintf(out, "%s", record) default: fmt.Fprintf(out, "Timestamp: %s\n"+ "Type: %s\n"+ "Hostname: %s\n"+ "Pid: %d\n"+ "UUID: %s\n"+ "Logger: %s\n"+ "Payload: %s\n"+ "EnvVersion: %s\n"+ "Severity: %d\n"+ "Fields: %+v\n\n", time.Unix(0, msg.GetTimestamp()), msg.GetType(), msg.GetHostname(), msg.GetPid(), msg.GetUuidString(), msg.GetLogger(), msg.GetPayload(), msg.GetEnvVersion(), msg.GetSeverity(), msg.Fields) } } } offset += int64(n) } fmt.Printf("Processed: %d, matched: %d messages\n", processed, matched) if err != nil { fmt.Printf("%s\n", err) os.Exit(6) } }
// Save matching client records locally to the given output file in the given // format. func save(recordChannel <-chan s3splitfile.S3Record, match *message.MatcherSpecification, format string, out *os.File, done chan<- int) { processed := 0 matched := 0 bytes := 0 msg := new(message.Message) ok := true for ok { r, ok := <-recordChannel if !ok { // Channel is closed done <- bytes break } bytes += len(r.Record) processed += 1 headerLen := int(r.Record[1]) + message.HEADER_FRAMING_SIZE messageBytes := r.Record[headerLen:] unsnappy, decodeErr := snappy.Decode(nil, messageBytes) if decodeErr == nil { messageBytes = unsnappy } if err := proto.Unmarshal(messageBytes, msg); err != nil { fmt.Fprintf(os.Stderr, "Error unmarshalling message %d in %s, error: %s\n", processed, r.Key, err) continue } if !match.Match(msg) { continue } matched += 1 switch format { case "count": // no op case "json": contents, _ := json.Marshal(msg) fmt.Fprintf(out, "%s\n", contents) case "heka": fmt.Fprintf(out, "%s", r.Record) case "offsets": // Use offsets mode for indexing the S3 files by clientId clientId, ok := msg.GetFieldValue("clientId") recordLength := len(r.Record) - headerLen if ok { fmt.Fprintf(out, "%s\t%s\t%d\t%d\n", r.Key, clientId, (r.Offset + uint64(headerLen)), recordLength) } else { fmt.Fprintf(os.Stderr, "Missing client id in %s @ %d+%d\n", r.Key, r.Offset, recordLength) } default: fmt.Fprintf(out, "Timestamp: %s\n"+ "Type: %s\n"+ "Hostname: %s\n"+ "Pid: %d\n"+ "UUID: %s\n"+ "Logger: %s\n"+ "Payload: %s\n"+ "EnvVersion: %s\n"+ "Severity: %d\n"+ "Fields: %+v\n\n", time.Unix(0, msg.GetTimestamp()), msg.GetType(), msg.GetHostname(), msg.GetPid(), msg.GetUuidString(), msg.GetLogger(), msg.GetPayload(), msg.GetEnvVersion(), msg.GetSeverity(), msg.Fields) } } fmt.Fprintf(os.Stderr, "Processed: %d, matched: %d messages (%.2f MB)\n", processed, matched, (float64(bytes) / 1024.0 / 1024.0)) }