func (o *HttpOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) { if or.Encoder() == nil { return errors.New("Encoder must be specified.") } var ( e error outBytes []byte ) inChan := or.InChan() for pack := range inChan { outBytes, e = or.Encode(pack) pack.Recycle() if e != nil { or.LogError(e) continue } if outBytes == nil { continue } if e = o.request(or, outBytes); e != nil { or.LogError(e) } } return }
func (k *KafkaOutput) processKafkaErrors(or pipeline.OutputRunner, errChan <-chan *sarama.ProducerError, shutdownChan chan struct{}, wg *sync.WaitGroup) { var ( ok = true pErr *sarama.ProducerError ) for ok { select { case pErr, ok = <-errChan: if !ok { break } err := pErr.Err switch err.(type) { case sarama.PacketEncodingError: atomic.AddInt64(&k.kafkaEncodingErrors, 1) or.LogError(fmt.Errorf("kafka encoding error: %s", err.Error())) default: atomic.AddInt64(&k.kafkaDroppedMessages, 1) if err != nil { msgValue, _ := pErr.Msg.Value.Encode() or.LogError(fmt.Errorf("kafka error '%s' for message '%s'", err.Error(), string(msgValue))) } } case <-shutdownChan: ok = false break } } wg.Done() }
func (o *UdpOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) { if or.Encoder() == nil { return errors.New("Encoder required.") } var ( outBytes []byte e error ) for pack := range or.InChan() { if outBytes, e = or.Encode(pack); e != nil { or.LogError(fmt.Errorf("Error encoding message: %s", e.Error())) } else if outBytes != nil { msgSize := len(outBytes) if msgSize > o.UdpOutputConfig.MaxMessageSize { or.LogError(fmt.Errorf("Message has exceeded allowed UDP data size: %d > %d", msgSize, o.UdpOutputConfig.MaxMessageSize)) } else { o.conn.Write(outBytes) } } pack.Recycle() } return }
func (clo *CloudLoggingOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) { var ( pack *pipeline.PipelinePack e error k string m *logging.LogEntry exist bool ok = true inChan = or.InChan() groupBatch = make(map[string]*LogBatch) outBatch *LogBatch ticker = time.Tick(time.Duration(clo.conf.FlushInterval) * time.Millisecond) ) clo.or = or go clo.committer() for ok { select { case pack, ok = <-inChan: // Closed inChan => we're shutting down, flush data. if !ok { clo.sendGroupBatch(groupBatch) close(clo.batchChan) <-clo.outputExit break } k, m, e = clo.Encode(pack) pack.Recycle() if e != nil { or.LogError(e) continue } if k != "" && m != nil { outBatch, exist = groupBatch[k] if !exist { outBatch = &LogBatch{count: 0, batch: make([]*logging.LogEntry, 0, 100), name: k} groupBatch[k] = outBatch } outBatch.batch = append(outBatch.batch, m) if outBatch.count++; clo.CheckFlush(int(outBatch.count), len(outBatch.batch)) { if len(outBatch.batch) > 0 { outBatch.batch = clo.sendBatch(k, outBatch.batch, outBatch.count) outBatch.count = 0 } } } case <-ticker: clo.sendGroupBatch(groupBatch) case err = <-clo.outputExit: ok = false } } return }
func (cef *CefOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) { var ( facility, priority syslog.Priority ident string ok bool p syslog.Priority e error pack *pipeline.PipelinePack ) syslogMsg := new(SyslogMsg) for pack = range or.InChan() { // default values facility, priority = syslog.LOG_LOCAL4, syslog.LOG_INFO ident = "heka_no_ident" priField := pack.Message.FindFirstField("cef_meta.syslog_priority") if priField != nil { priStr := priField.ValueString[0] if p, ok = SYSLOG_PRIORITY[priStr]; ok { priority = p } } facField := pack.Message.FindFirstField("cef_meta.syslog_facility") if facField != nil { facStr := facField.ValueString[0] if p, ok = SYSLOG_FACILITY[facStr]; ok { facility = p } } idField := pack.Message.FindFirstField("cef_meta.syslog_ident") if idField != nil { ident = idField.ValueString[0] } syslogMsg.priority = priority | facility syslogMsg.prefix = ident syslogMsg.payload = pack.Message.GetPayload() pack.Recycle() _, e = cef.syslogWriter.WriteString(syslogMsg.priority, syslogMsg.prefix, syslogMsg.payload) if e != nil { or.LogError(e) } } cef.syslogWriter.Close() return }
func (cmo *CloudMonitoringOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) { var ( pack *pipeline.PipelinePack e error m *cloudmonitoring.TimeseriesPoint ok = true count int64 inChan = or.InChan() outBatch = make([]*cloudmonitoring.TimeseriesPoint, 0, 200) ticker = time.Tick(time.Duration(cmo.conf.FlushInterval) * time.Millisecond) ) cmo.or = or go cmo.committer() for ok { select { case pack, ok = <-inChan: // Closed inChan => we're shutting down, flush data. if !ok { if len(outBatch) > 0 { cmo.sendBatch(outBatch, count) } close(cmo.batchChan) <-cmo.outputExit break } m, e = cmo.Encode(pack) pack.Recycle() if e != nil { or.LogError(e) continue } if m != nil { outBatch = append(outBatch, m) if count++; cmo.CheckFlush(int(count), len(outBatch)) { if len(outBatch) > 0 { outBatch = cmo.sendBatch(outBatch, count) count = 0 } } } case <-ticker: if len(outBatch) > 0 { outBatch = cmo.sendBatch(outBatch, count) } count = 0 case err = <-cmo.outputExit: ok = false } } return }
func (k *KafkaOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) { defer func() { k.producer.Close() k.client.Close() }() if or.Encoder() == nil { return errors.New("Encoder required.") } inChan := or.InChan() errChan := k.producer.Errors() var wg sync.WaitGroup wg.Add(1) go k.processKafkaErrors(or, errChan, &wg) var ( pack *pipeline.PipelinePack topic = k.config.Topic key sarama.Encoder ) for pack = range inChan { atomic.AddInt64(&k.processMessageCount, 1) if k.topicVariable != nil { topic = getMessageVariable(pack.Message, k.topicVariable) } if k.hashVariable != nil { key = sarama.StringEncoder(getMessageVariable(pack.Message, k.hashVariable)) } if msgBytes, err := or.Encode(pack); err == nil { if msgBytes != nil { err = k.producer.QueueMessage(topic, key, sarama.ByteEncoder(msgBytes)) if err != nil { atomic.AddInt64(&k.processMessageFailures, 1) or.LogError(err) } } else { atomic.AddInt64(&k.processMessageDiscards, 1) } } else { atomic.AddInt64(&k.processMessageFailures, 1) or.LogError(err) } pack.Recycle() } errChan <- Shutdown wg.Wait() return }
func (rlo *RedisListOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) error { inChan := or.InChan() for pack := range inChan { payload := pack.Message.GetPayload() _, err := rlo.conn.Do("LPUSH", rlo.conf.ListName, payload) if err != nil { or.LogError(fmt.Errorf("Redis LPUSH error: %s", err)) continue } pack.Recycle(nil) } return nil }
func (rop *RedisOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) error { inChan := or.InChan() for pack := range inChan { payload := pack.Message.GetPayload() _, err := rop.conn.Do("LPUSH", rop.conf.Key, payload) if err != nil { or.LogError(err) continue } pack.Recycle() } return nil }
func (k *KinesisOutput) HandlePackage(or pipeline.OutputRunner, pack *pipeline.PipelinePack) error { // If we are flushing, wait until we have finished. k.flushLock.Lock() defer k.flushLock.Unlock() // encode the packages. msg, err := or.Encode(pack) if err != nil { errOut := fmt.Errorf("Error encoding message: %s", err) or.LogError(errOut) pack.Recycle(nil) return errOut } // If we only care about the Payload... if k.config.PayloadOnly { msg = []byte(pack.Message.GetPayload()) } var tmp []byte // if we already have data then we should append. if len(k.batchedData) > 0 { tmp = append(append(k.batchedData, []byte(",")...), msg...) } else { tmp = msg } // if we can't fit the data in this record if len(tmp) > k.KINESIS_RECORD_SIZE { // add the existing data to the output batch array := append(append([]byte("["), k.batchedData...), []byte("]")...) k.AddToRecordBatch(or, array) // update the batched data to only contain the current message. k.batchedData = msg } else { // otherwise we add the existing data to a batch k.batchedData = tmp } // do reporting and tidy up atomic.AddInt64(&k.processMessageCount, 1) pack.Recycle(nil) return nil }
func (k *KinesisOutput) SendEntries(or pipeline.OutputRunner, entries []*kin.PutRecordsRequestEntry, backoff time.Duration, retries int) error { k.hasTriedToSend = true multParams := &kin.PutRecordsInput{ Records: entries, StreamName: aws.String(k.config.Stream), } data, err := k.Client.PutRecords(multParams) // Update statistics & handle errors if err != nil { if or != nil { or.LogError(fmt.Errorf("Batch: Error pushing message to Kinesis: %s", err)) } atomic.AddInt64(&k.batchesFailed, 1) if retries <= k.config.MaxRetries || k.config.MaxRetries == -1 { atomic.AddInt64(&k.retryCount, 1) time.Sleep(backoff + k.backoffIncrement) // filter down to only the failed records: retryEntries := []*kin.PutRecordsRequestEntry{} for i, entry := range entries { response := data.Records[i] if response.ErrorCode != nil { // incase we are rate limited push the entry to a new shard. entry.PartitionKey = aws.String(fmt.Sprintf("%d", rand.Int63())) retryEntries = append(retryEntries, entry) } } k.SendEntries(or, retryEntries, backoff+k.backoffIncrement, retries+1) } else { atomic.AddInt64(&k.dropMessageCount, int64(len(entries))) if or != nil { or.LogError(fmt.Errorf("Batch: Hit max retries when attempting to send data")) } } } atomic.AddInt64(&k.batchesSent, 1) return nil }
func (k *KafkaOutput) processKafkaErrors(or pipeline.OutputRunner, errChan chan error, wg *sync.WaitGroup) { shutdown: for err := range errChan { switch err { case nil: case Shutdown: break shutdown case sarama.EncodingError: atomic.AddInt64(&k.kafkaEncodingErrors, 1) default: if e, ok := err.(sarama.DroppedMessagesError); ok { atomic.AddInt64(&k.kafkaDroppedMessages, int64(e.DroppedMessages)) } or.LogError(err) } } wg.Done() }
func (o *UnixOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) { if or.Encoder() == nil { return errors.New("Encoder required.") } var ( outBytes []byte e error ) for pack := range or.InChan() { if outBytes, e = or.Encode(pack); e != nil { or.LogError(fmt.Errorf("Error encoding message: %s", e.Error())) } else if outBytes != nil { o.conn.Write(outBytes) } pack.Recycle() } return }
func (ko *KeenOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) error { for pack := range or.InChan() { payload := pack.Message.GetPayload() pack.Recycle() event := make(map[string]interface{}) err := json.Unmarshal([]byte(payload), &event) if err != nil { or.LogError(err) continue } err = ko.client.AddEvent(ko.collection, event) if err != nil { or.LogError(err) continue } } return nil }
func (f *FirehoseOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) error { for pack := range or.InChan() { payload := pack.Message.GetPayload() timestamp := time.Unix(0, pack.Message.GetTimestamp()).Format("2006-01-02 15:04:05.000") pack.Recycle(nil) // Verify input is valid json object := make(map[string]interface{}) err := json.Unmarshal([]byte(payload), &object) if err != nil { or.LogError(err) continue } if f.timestampColumn != "" { // add Heka message's timestamp to column named in timestampColumn object[f.timestampColumn] = timestamp } record, err := json.Marshal(object) if err != nil { or.LogError(err) continue } // Send data to the firehose err = f.client.PutRecord(record) if err != nil { or.LogError(err) continue } } return nil }
func (k *KinesisOutput) Run(or pipeline.OutputRunner, helper pipeline.PluginHelper) error { var ( pack *pipeline.PipelinePack msg []byte pk string err error params *kin.PutRecordInput ) if or.Encoder() == nil { return fmt.Errorf("Encoder required.") } for pack = range or.InChan() { msg, err = or.Encode(pack) if err != nil { or.LogError(fmt.Errorf("Error encoding message: %s", err)) pack.Recycle(nil) continue } pk = fmt.Sprintf("%d-%s", pack.Message.Timestamp, pack.Message.Hostname) if k.config.PayloadOnly { msg = []byte(pack.Message.GetPayload()) } params = &kin.PutRecordInput{ Data: msg, PartitionKey: aws.String(pk), StreamName: aws.String(k.config.Stream), } _, err = k.Client.PutRecord(params) if err != nil { or.LogError(fmt.Errorf("Error pushing message to Kinesis: %s", err)) pack.Recycle(nil) continue } pack.Recycle(nil) } return nil }
func (o *OpenTsdbOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) { if or.Encoder() == nil { return errors.New("Encoder must be specified.") } var ( e error outBytes []byte ) inChan := or.InChan() for i := 0; i < o.TsdbWriterCount; i++ { go WriteDataToOpenTSDB(o) } for pack := range inChan { outBytes, e = or.Encode(pack) pack.Recycle(e) if e != nil { or.LogError(e) continue } if outBytes == nil { continue } if e != nil { log.Printf("OpenTsdbOutput-%s", e.Error()) continue } //fmt.Printf("OpenTsdbOutput-165-%v", logMsg) o.logMsgChan <- outBytes //fmt.Println("OpenTsdbOutput:", string(outBytes)) } return }
func (cwo *CloudwatchOutput) Submitter(payloads chan CloudwatchDatapoints, or pipeline.OutputRunner) { var ( payload CloudwatchDatapoints curTry int backOff time.Duration = time.Duration(10) * time.Millisecond err error stopping bool ) curDuration := backOff for !stopping { select { case stopping = <-cwo.stopChan: continue case payload = <-payloads: for curTry < cwo.retries { _, err = cwo.cw.PutMetricData(payload.Datapoints) if err != nil { curTry += 1 time.Sleep(curDuration) curDuration *= 2 } else { break } } curDuration = backOff curTry = 0 if err != nil { or.LogError(err) err = nil } } } close(cwo.stopChan) }
func (so *SentryOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) { var ( udpAddrStr string udpAddr *net.UDPAddr socket net.Conn e error ok bool pack *pipeline.PipelinePack ) sentryMsg := &SentryMsg{ dataPacket: make([]byte, 0, so.config.MaxSentryBytes), } for pack = range or.InChan() { e = so.prepSentryMsg(pack, sentryMsg) pack.Recycle() if e != nil { or.LogError(e) continue } udpAddrStr = sentryMsg.parsedDsn.Host if socket, ok = so.udpMap[udpAddrStr]; !ok { if len(so.udpMap) > so.config.MaxUdpSockets { or.LogError(fmt.Errorf("Max # of UDP sockets [%d] reached.", so.config.MaxUdpSockets)) continue } if udpAddr, e = net.ResolveUDPAddr("udp", udpAddrStr); e != nil { or.LogError(fmt.Errorf("can't resolve UDP address %s: %s", udpAddrStr, e)) continue } if socket, e = net.DialUDP("udp", nil, udpAddr); e != nil { or.LogError(fmt.Errorf("can't dial UDP socket: %s", e)) continue } so.udpMap[sentryMsg.parsedDsn.Host] = socket } socket.Write(sentryMsg.dataPacket) } return }
func (no *NsqOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) { var ( encoder client.Encoder msg *message.Message msgBody []byte = make([]byte, 0, 1024) pack *pipeline.PipelinePack ) conf := no.conf encoder = client.NewProtobufEncoder(nil) for pack = range or.InChan() { if conf.Serialize { msg = pack.Message if err = encoder.EncodeMessageStream(msg, &msgBody); err != nil { or.LogError(err) err = nil pack.Recycle() continue } //err := no.nsqwriter.PublishAsync(conf.Topic, []byte(pack.Message.GetPayload()), nil) //err = no.nsqwriter.PublishAsync(conf.Topic, msgBody, nil) _, _, err = no.nsqwriter.Publish(conf.Topic, msgBody) if err != nil { or.LogError(fmt.Errorf("error in writer.PublishAsync")) } msgBody = msgBody[:0] } else { err = no.nsqwriter.PublishAsync(conf.Topic, []byte(pack.Message.GetPayload()), nil) if err != nil { or.LogError(fmt.Errorf("error in writer.PublishAsync")) } } pack.Recycle() } return }
func (s *SandboxOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) { var ( pack *pipeline.PipelinePack retval int inChan = or.InChan() duration int64 startTime time.Time ok = true ticker = or.Ticker() ) for ok { select { case pack, ok = <-inChan: if !ok { break } if s.sample { startTime = time.Now() } retval = s.sb.ProcessMessage(pack) if s.sample { duration = time.Since(startTime).Nanoseconds() s.reportLock.Lock() s.processMessageDuration += duration s.processMessageSamples++ s.reportLock.Unlock() } s.sample = 0 == rand.Intn(s.sampleDenominator) or.UpdateCursor(pack.QueueCursor) // TODO: support retries? if retval == 0 { atomic.AddInt64(&s.processMessageCount, 1) pack.Recycle(nil) } else if retval < 0 { atomic.AddInt64(&s.processMessageFailures, 1) var e error em := s.sb.LastError() if len(em) > 0 { e = errors.New(em) } pack.Recycle(e) } else { err = fmt.Errorf("FATAL: %s", s.sb.LastError()) pack.Recycle(err) ok = false } case t := <-ticker: startTime = time.Now() if retval = s.sb.TimerEvent(t.UnixNano()); retval != 0 { err = fmt.Errorf("FATAL: %s", s.sb.LastError()) ok = false } duration = time.Since(startTime).Nanoseconds() s.reportLock.Lock() s.timerEventDuration += duration s.timerEventSamples++ s.reportLock.Unlock() } } if err == nil && s.sbc.TimerEventOnShutdown { if retval = s.sb.TimerEvent(time.Now().UnixNano()); retval != 0 { err = fmt.Errorf("FATAL: %s", s.sb.LastError()) } } destroyErr := s.destroy() if destroyErr != nil { if err != nil { or.LogError(err) } err = destroyErr } return err }
func (s *SandboxOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) { var ( pack *pipeline.PipelinePack retval int inChan = or.InChan() duration int64 startTime time.Time ok = true ticker = or.Ticker() ) for ok { select { case pack, ok = <-inChan: if !ok { break } if s.sample { startTime = time.Now() } retval = s.sb.ProcessMessage(pack) if s.sample { duration = time.Since(startTime).Nanoseconds() s.reportLock.Lock() s.processMessageDuration += duration s.processMessageSamples++ s.reportLock.Unlock() } s.sample = 0 == rand.Intn(s.sampleDenominator) pack.Recycle() if retval == 0 { atomic.AddInt64(&s.processMessageCount, 1) } else if retval < 0 { atomic.AddInt64(&s.processMessageFailures, 1) em := s.sb.LastError() if len(em) > 0 { or.LogError(errors.New(em)) } } else { err = fmt.Errorf("FATAL: %s", s.sb.LastError()) ok = false } case t := <-ticker: startTime = time.Now() if retval = s.sb.TimerEvent(t.UnixNano()); retval != 0 { err = fmt.Errorf("FATAL: %s", s.sb.LastError()) ok = false } duration = time.Since(startTime).Nanoseconds() s.reportLock.Lock() s.timerEventDuration += duration s.timerEventSamples++ s.reportLock.Unlock() } } s.reportLock.Lock() var destroyErr error if s.sbc.PreserveData { destroyErr = s.sb.Destroy(s.preservationFile) } else { destroyErr = s.sb.Destroy("") } if destroyErr != nil { err = destroyErr } s.sb = nil s.reportLock.Unlock() return }
func (output *NsqOutput) Run(runner pipeline.OutputRunner, helper pipeline.PluginHelper) (err error) { if runner.Encoder() == nil { return errors.New("Encoder required.") } var ( pack *pipeline.PipelinePack outgoing []byte msg RetryMsg ) output.runner = runner inChan := runner.InChan() ok := true defer output.cleanup() for ok { select { case pack, ok = <-inChan: if !ok { return nil } outgoing, err = output.runner.Encode(pack) if err != nil { runner.LogError(err) } else { err = output.sendMessage(outgoing) if err != nil { output.runner.LogError(err) err = output.retryHelper.Wait() if err != nil { return } // Create a retry msg, and requeue it msg := RetryMsg{Body: outgoing, retryChan: output.retryChan, maxCount: output.MaxMsgRetries} err = msg.Retry() if err != nil { output.runner.LogError(err) } } else { output.retryHelper.Reset() } } pack.Recycle() case msg, ok = <-output.retryChan: if !ok { return nil } err = output.sendMessage(msg.Body) if err != nil { output.runner.LogError(err) err = output.retryHelper.Wait() if err != nil { return } // requeue the message err = msg.Retry() if err != nil { output.runner.LogError(err) } } else { output.retryHelper.Reset() } } } return nil }
func (k *KafkaOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) { defer func() { k.producer.Close() k.client.Close() }() if or.Encoder() == nil { return errors.New("Encoder required.") } inChan := or.InChan() errChan := k.producer.Errors() pInChan := k.producer.Input() shutdownChan := make(chan struct{}) var wg sync.WaitGroup wg.Add(1) go k.processKafkaErrors(or, errChan, shutdownChan, &wg) var ( pack *pipeline.PipelinePack topic = k.config.Topic key sarama.Encoder ) for pack = range inChan { atomic.AddInt64(&k.processMessageCount, 1) if k.topicVariable != nil { topic = getMessageVariable(pack.Message, k.topicVariable) } if k.hashVariable != nil { key = sarama.StringEncoder(getMessageVariable(pack.Message, k.hashVariable)) } msgBytes, err := or.Encode(pack) if err != nil { atomic.AddInt64(&k.processMessageFailures, 1) or.LogError(err) // Don't retry encoding errors. or.UpdateCursor(pack.QueueCursor) pack.Recycle(nil) continue } if msgBytes == nil { atomic.AddInt64(&k.processMessageDiscards, 1) or.UpdateCursor(pack.QueueCursor) pack.Recycle(nil) continue } pMessage := &sarama.ProducerMessage{ Topic: topic, Key: key, Value: sarama.ByteEncoder(msgBytes), } pInChan <- pMessage pack.Recycle(nil) } close(shutdownChan) wg.Wait() return }