func (self *FileOutput) startRotateNotifier() { now := time.Now() interval := time.Duration(self.config.RotationInterval) * time.Hour last := now.Truncate(interval) next := last.Add(interval) until := next.Sub(now) after := time.After(until) self.path = gostrftime.Strftime(self.config.Path, now) go func() { ok := true for ok { select { case _, ok = <-self.closing: break case <-after: last = next next = next.Add(interval) until = next.Sub(time.Now()) after = time.After(until) self.rotateChan <- last } } }() }
// Runs in a separate goroutine, waits for buffered data on the committer // channel, writes it out to the filesystem, and puts the now empty buffer on // the return channel for reuse. func (o *FileOutput) committer(or OutputRunner, errChan chan error) { initBatch := newOutBatch() o.backChan <- initBatch var out *outBatch var err error ok := true hupChan := make(chan interface{}) notify.Start(RELOAD, hupChan) for ok { select { case out, ok = <-o.batchChan: if !ok { // Channel is closed => we're shutting down, exit cleanly. o.file.Close() close(o.closing) break } n, err := o.file.Write(out.data) if err != nil { or.LogError(fmt.Errorf("Can't write to %s: %s", o.path, err)) } else if n != len(out.data) { or.LogError(fmt.Errorf("data loss - truncated output for %s", o.path)) or.UpdateCursor(out.cursor) } else { o.file.Sync() or.UpdateCursor(out.cursor) } out.data = out.data[:0] o.backChan <- out case <-hupChan: o.file.Close() if err = o.openFile(); err != nil { close(o.closing) err = fmt.Errorf("unable to reopen file '%s': %s", o.path, err) errChan <- err ok = false break } case rotateTime := <-o.rotateChan: o.file.Close() o.path = gostrftime.Strftime(o.FileOutputConfig.Path, rotateTime) if err = o.openFile(); err != nil { close(o.closing) err = fmt.Errorf("unable to open rotated file '%s': %s", o.path, err) errChan <- err ok = false break } } } }
// Replaces a date pattern (ex: %{2012.09.19} in the index name func interpolateFlag(e *ElasticSearchCoordinates, m *message.Message, name string) ( interpolatedValue string, err error) { iSlice := strings.Split(name, "%{") for i, element := range iSlice { elEnd := strings.Index(element, "}") if elEnd > -1 { elVal := element[:elEnd] switch elVal { case "Type": iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], m.GetType(), -1) case "Hostname": iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], m.GetHostname(), -1) case "Pid": iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], strconv.Itoa(int(m.GetPid())), -1) case "UUID": iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], m.GetUuidString(), -1) case "Logger": iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], m.GetLogger(), -1) case "EnvVersion": iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], m.GetEnvVersion(), -1) case "Severity": iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], strconv.Itoa(int(m.GetSeverity())), -1) default: if fname, ok := m.GetFieldValue(elVal); ok { iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], fname.(string), -1) } else { var t time.Time if e.ESIndexFromTimestamp && m.Timestamp != nil { t = time.Unix(0, *m.Timestamp).UTC() } else { t = time.Now().UTC() } iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], gostrftime.Strftime(elVal, t), -1) } } if iSlice[i] == elVal { err = fmt.Errorf("Could not interpolate field from config: %s", name) } } } interpolatedValue = strings.Join(iSlice, "") return }
// Runs in a separate goroutine, waits for buffered data on the committer // channel, writes it out to the filesystem, and puts the now empty buffer on // the return channel for reuse. func (self *FileOutput) committer(or plugins.OutputRunner, errChan chan error) { initBatch := newOutBatch() self.backChan <- initBatch var out *outBatch var err error ok := true for ok { select { case out, ok = <-self.batchChan: if !ok { // Channel is closed => we're shutting down, exit cleanly. self.file.Close() close(self.closing) break } n, err := self.file.Write(out.data) if err != nil { log.Println(fmt.Errorf("Can't write to %s: %s", self.path, err)) } else if n != len(out.data) { log.Println(fmt.Errorf("data loss - truncated output for %s", self.path)) } else { self.file.Sync() } out.data = out.data[:0] self.backChan <- out case rotateTime := <-self.rotateChan: self.file.Close() self.path = gostrftime.Strftime(self.config.Path, rotateTime) if err = self.openFile(); err != nil { close(self.closing) err = fmt.Errorf("unable to open rotated file '%s': %s", self.path, err) errChan <- err ok = false break } } } }
func (pe *PayloadEncoder) Encode(pack *pipeline.PipelinePack) (output []byte, err error) { payload := pack.Message.GetPayload() if !pe.config.AppendNewlines && !pe.config.PrefixTs { // Just the payload, ma'am. output = []byte(payload) return } if !pe.config.PrefixTs { // Payload + newline. output = make([]byte, 0, len(payload)+1) output = append(output, []byte(payload)...) output = append(output, '\n') return } // We're using a timestamp. var tm time.Time if pe.config.TsFromMessage { tm = time.Unix(0, pack.Message.GetTimestamp()) } else { tm = time.Now() } ts := gostrftime.Strftime(pe.config.TsFormat, tm) // Timestamp + payload [+ optional newline]. l := len(ts) + len(payload) output = make([]byte, 0, l+1) output = append(output, []byte(ts)...) output = append(output, []byte(payload)...) if pe.config.AppendNewlines { output = append(output, '\n') } return }
func (e *ESLogstashV0Encoder) Encode(pack *PipelinePack) (output []byte, err error) { m := pack.Message buf := bytes.Buffer{} e.coord.PopulateBuffer(pack.Message, &buf) buf.WriteByte(NEWLINE) buf.WriteString(`{`) first := true for _, f := range e.fields { switch strings.ToLower(f) { case "uuid": writeStringField(first, &buf, `@uuid`, m.GetUuidString()) case "timestamp": t := time.Unix(0, m.GetTimestamp()).UTC() writeStringField(first, &buf, `@timestamp`, gostrftime.Strftime(e.timestampFormat, t)) case "type": if e.useMessageType || len(e.coord.Type) < 1 { writeStringField(first, &buf, `@type`, m.GetType()) } else { var interpType string interpType, err = interpolateFlag(e.coord, m, e.coord.Type) if len(interpType) > 0 && err == nil { writeStringField(first, &buf, `@type`, interpType) } else { // fall back on writing the uninterpolated string writeStringField(first, &buf, `@type`, e.coord.Type) } } case "logger": writeStringField(first, &buf, `@logger`, m.GetLogger()) case "severity": writeIntField(first, &buf, `@severity`, m.GetSeverity()) case "payload": writeStringField(first, &buf, `@message`, m.GetPayload()) case "envversion": writeStringField(first, &buf, `@envversion`, m.GetEnvVersion()) case "pid": writeIntField(first, &buf, `@pid`, m.GetPid()) case "hostname": writeStringField(first, &buf, `@source_host`, m.GetHostname()) case "dynamicfields": if !first { buf.WriteString(`,`) } buf.WriteString(`"@fields":{`) firstfield := true listsDynamicFields := len(e.dynamicFields) > 0 for _, field := range m.Fields { dynamicFieldMatch := false if listsDynamicFields { for _, fieldName := range e.dynamicFields { if *field.Name == fieldName { dynamicFieldMatch = true } } } else { dynamicFieldMatch = true } if dynamicFieldMatch { raw := false if len(e.rawBytesFields) > 0 { for _, raw_field_name := range e.rawBytesFields { if *field.Name == raw_field_name { raw = true } } } writeField(firstfield, &buf, field, raw) firstfield = false } } buf.WriteString(`}`) // end of fields default: err = fmt.Errorf("Unable to find field: %s", f) return } first = false } buf.WriteString(`}`) buf.WriteByte(NEWLINE) return buf.Bytes(), err }
func (e *ESJsonEncoder) Encode(pack *PipelinePack) (output []byte, err error) { m := pack.Message buf := bytes.Buffer{} e.coord.PopulateBuffer(pack.Message, &buf) buf.WriteByte(NEWLINE) buf.WriteString(`{`) first := true for _, f := range e.fields { switch strings.ToLower(f) { case "uuid": writeStringField(first, &buf, e.fieldMappings.Uuid, m.GetUuidString()) case "timestamp": t := time.Unix(0, m.GetTimestamp()).UTC() writeStringField(first, &buf, e.fieldMappings.Timestamp, gostrftime.Strftime(e.timestampFormat, t)) case "type": writeStringField(first, &buf, e.fieldMappings.Type, m.GetType()) case "logger": writeStringField(first, &buf, e.fieldMappings.Logger, m.GetLogger()) case "severity": writeIntField(first, &buf, e.fieldMappings.Severity, m.GetSeverity()) case "payload": writeStringField(first, &buf, e.fieldMappings.Payload, m.GetPayload()) case "envversion": writeStringField(first, &buf, e.fieldMappings.EnvVersion, m.GetEnvVersion()) case "pid": writeIntField(first, &buf, e.fieldMappings.Pid, m.GetPid()) case "hostname": writeStringField(first, &buf, e.fieldMappings.Hostname, m.GetHostname()) case "dynamicfields": listsDynamicFields := len(e.dynamicFields) > 0 for _, field := range m.Fields { dynamicFieldMatch := false if listsDynamicFields { for _, fieldName := range e.dynamicFields { if *field.Name == fieldName { dynamicFieldMatch = true } } } else { dynamicFieldMatch = true } if dynamicFieldMatch { raw := false if len(e.rawBytesFields) > 0 { for _, raw_field_name := range e.rawBytesFields { if *field.Name == raw_field_name { raw = true } } } writeField(first, &buf, field, raw) first = false } } default: err = fmt.Errorf("Unable to find field: %s", f) return } first = false } buf.WriteString(`}`) buf.WriteByte(NEWLINE) return buf.Bytes(), err }