func (self *FileOutput) receiver(runner plugins.OutputRunner, errChan chan error) (err error) { var ( pack *plugins.PipelinePack timer *time.Timer timerDuration time.Duration msgCounter uint32 outBytes []byte ) ok := true out := newOutBatch() inChan := runner.InChan() timerDuration = time.Duration(self.config.FlushInterval) * time.Millisecond if self.config.FlushInterval > 0 { timer = time.NewTimer(timerDuration) if self.timerChan == nil { // Tests might have set this already. self.timerChan = timer.C } } for ok { select { case pack = <-inChan: pack, err = plugins.PipeDecoder(self.common.Decoder, pack) if err != nil { log.Printf("PipeDecoder :%s", err) pack.Recycle() continue } pack, err = plugins.PipeEncoder(self.common.Encoder, pack) if err != nil { log.Printf("PipeEncoder :%s", err) pack.Recycle() continue } outBytes = pack.Msg.MsgBytes if len(outBytes) == 0 { continue } if outBytes != nil { out.data = append(out.data, outBytes...) out.data = append(out.data, '\n') msgCounter++ } pack.Recycle() case <-self.timerChan: // This will block until the other side is ready to accept // this batch, freeing us to start on the next one. if msgCounter >= 0 { self.batchChan <- out out = <-self.backChan msgCounter = 0 } timer.Reset(timerDuration) case err = <-errChan: ok = false break } } return err }
func (self *KafkaOutput) Run(runner plugins.OutputRunner) (err error) { var ( timer *time.Timer timerDuration time.Duration pack *plugins.PipelinePack //message *proto.Message //outMessages []*proto.Message ) errChan := make(chan error, 1) go self.committer(runner, errChan) out := newOutBatch() message := &proto.Message{Value: nil} ok := true var ticker = time.NewTicker(time.Duration(5) * time.Second) defer ticker.Stop() timerDuration = time.Duration(self.config.FlushInterval) * time.Millisecond timer = time.NewTimer(timerDuration) if self.distributingProducer != nil { for ok { select { case pack = <-runner.InChan(): pack, err = plugins.PipeDecoder(self.common.Decoder, pack) if err != nil { log.Printf("PipeDecoder :%s", err) pack.Recycle() continue } pack, err = plugins.PipeEncoder(self.common.Encoder, pack) if err != nil { log.Printf("PipeEncoder :%s", err) pack.Recycle() continue } message = &proto.Message{Value: pack.Msg.MsgBytes} out.data = append(out.data, message) pack.Recycle() case <-timer.C: self.batchChan <- out out = <-self.backChan timer.Reset(timerDuration) case <-ticker.C: if err != nil { bcf := kafka.NewBrokerConf(self.config.ClientId) //bcf.AllowTopicCreation = true // connect to kafka cluster self.broker, err = kafka.Dial(self.config.Addrs, bcf) if err != nil { log.Printf("cannot reconnect to kafka cluster: %s", err) } } } } } else { for { pack = <-runner.InChan() message = &proto.Message{Value: pack.Msg.MsgBytes} if _, err = self.producer.Produce(self.config.Topic, self.config.Partition, message); err != nil { log.Printf("cannot produce message to %s:%d: %s", self.config.Topic, self.config.Partition, err) } pack.Recycle() } } return nil }