func (self *UdpOutput) Run(runner plugins.OutputRunner) error { var ( outBytes []byte e error ) for pack := range runner.InChan() { outBytes = pack.MsgBytes msgSize := len(outBytes) if msgSize > self.config.MaxMessageSize { e = fmt.Errorf("Message has exceeded allowed UDP data size: %d > %d", msgSize, self.config.MaxMessageSize) } else { self.conn.Write(outBytes) } pack.Recycle() } return e }
func (self *MongodbOutput) Run(runner plugins.OutputRunner) error { //[mongodb://][user:pass@]host1[:port1][,host2[:port2],...][/database][?options] url := "mongodb://" if len(self.config.User) != 0 && len(self.config.Password) != 0 { url += self.config.User + ":" + self.config.Password + "@" } url += self.config.Host + ":" + self.config.Port + "/" + self.config.Database session, err := mgo.Dial(url) if err != nil { log.Println("mgo.Dial failed, err:", err) return err } info := &mgo.CollectionInfo{ Capped: self.config.Capped, MaxBytes: self.config.CappedSize * 1024 * 1024, } coll := session.DB(self.config.Database).C(self.config.Collection) err = coll.Create(info) if err != nil && err.Error() != "collection already exists" { return err } for { session.Refresh() coll := session.DB(self.config.Database).C(self.config.Collection) pack := <-runner.InChan() err = coll.Insert(pack.Msg.Data) if err != nil { self.FailedCount++ log.Println("insert failed, count=", self.FailedCount, "err:", err) pack.Recycle() continue } pack.Recycle() } return nil }
func (self *StdoutOutput) Run(runner plugins.OutputRunner) (err error) { for { pack := <-runner.InChan() pack, err = plugins.PipeDecoder(self.common.Decoder, pack) if err != nil { log.Printf("PipeDecoder :%s", err) pack.Recycle() continue } pack, err = plugins.PipeEncoder(self.common.Encoder, pack) if err != nil { log.Printf("PipeEncoder :%s", err) pack.Recycle() continue } fmt.Printf("%s\n", pack.Msg.MsgBytes) pack.Recycle() } return nil }
func (self *FileOutput) receiver(runner plugins.OutputRunner, errChan chan error) (err error) { var ( pack *plugins.PipelinePack timer *time.Timer timerDuration time.Duration msgCounter uint32 outBytes []byte ) ok := true out := newOutBatch() inChan := runner.InChan() timerDuration = time.Duration(self.config.FlushInterval) * time.Millisecond if self.config.FlushInterval > 0 { timer = time.NewTimer(timerDuration) if self.timerChan == nil { // Tests might have set this already. self.timerChan = timer.C } } for ok { select { case pack = <-inChan: pack, err = plugins.PipeDecoder(self.common.Decoder, pack) if err != nil { log.Printf("PipeDecoder :%s", err) pack.Recycle() continue } pack, err = plugins.PipeEncoder(self.common.Encoder, pack) if err != nil { log.Printf("PipeEncoder :%s", err) pack.Recycle() continue } outBytes = pack.Msg.MsgBytes if len(outBytes) == 0 { continue } if outBytes != nil { out.data = append(out.data, outBytes...) out.data = append(out.data, '\n') msgCounter++ } pack.Recycle() case <-self.timerChan: // This will block until the other side is ready to accept // this batch, freeing us to start on the next one. if msgCounter >= 0 { self.batchChan <- out out = <-self.backChan msgCounter = 0 } timer.Reset(timerDuration) case err = <-errChan: ok = false break } } return err }
func (self *KafkaOutput) Run(runner plugins.OutputRunner) (err error) { var ( timer *time.Timer timerDuration time.Duration pack *plugins.PipelinePack //message *proto.Message //outMessages []*proto.Message ) errChan := make(chan error, 1) go self.committer(runner, errChan) out := newOutBatch() message := &proto.Message{Value: nil} ok := true var ticker = time.NewTicker(time.Duration(5) * time.Second) defer ticker.Stop() timerDuration = time.Duration(self.config.FlushInterval) * time.Millisecond timer = time.NewTimer(timerDuration) if self.distributingProducer != nil { for ok { select { case pack = <-runner.InChan(): pack, err = plugins.PipeDecoder(self.common.Decoder, pack) if err != nil { log.Printf("PipeDecoder :%s", err) pack.Recycle() continue } pack, err = plugins.PipeEncoder(self.common.Encoder, pack) if err != nil { log.Printf("PipeEncoder :%s", err) pack.Recycle() continue } message = &proto.Message{Value: pack.Msg.MsgBytes} out.data = append(out.data, message) pack.Recycle() case <-timer.C: self.batchChan <- out out = <-self.backChan timer.Reset(timerDuration) case <-ticker.C: if err != nil { bcf := kafka.NewBrokerConf(self.config.ClientId) //bcf.AllowTopicCreation = true // connect to kafka cluster self.broker, err = kafka.Dial(self.config.Addrs, bcf) if err != nil { log.Printf("cannot reconnect to kafka cluster: %s", err) } } } } } else { for { pack = <-runner.InChan() message = &proto.Message{Value: pack.Msg.MsgBytes} if _, err = self.producer.Produce(self.config.Topic, self.config.Partition, message); err != nil { log.Printf("cannot produce message to %s:%d: %s", self.config.Topic, self.config.Partition, err) } pack.Recycle() } } return nil }