Esempio n. 1
0
func (self *FileOutput) receiver(runner plugins.OutputRunner, errChan chan error) (err error) {
	var (
		pack          *plugins.PipelinePack
		timer         *time.Timer
		timerDuration time.Duration
		msgCounter    uint32
		outBytes      []byte
	)
	ok := true
	out := newOutBatch()
	inChan := runner.InChan()

	timerDuration = time.Duration(self.config.FlushInterval) * time.Millisecond
	if self.config.FlushInterval > 0 {
		timer = time.NewTimer(timerDuration)
		if self.timerChan == nil { // Tests might have set this already.
			self.timerChan = timer.C
		}
	}

	for ok {
		select {
		case pack = <-inChan:
			pack, err = plugins.PipeDecoder(self.common.Decoder, pack)
			if err != nil {
				log.Printf("PipeDecoder :%s", err)
				pack.Recycle()
				continue
			}
			pack, err = plugins.PipeEncoder(self.common.Encoder, pack)
			if err != nil {
				log.Printf("PipeEncoder :%s", err)
				pack.Recycle()
				continue
			}
			outBytes = pack.Msg.MsgBytes

			if len(outBytes) == 0 {
				continue
			}
			if outBytes != nil {
				out.data = append(out.data, outBytes...)
				out.data = append(out.data, '\n')
				msgCounter++
			}
			pack.Recycle()

		case <-self.timerChan:
			// This will block until the other side is ready to accept
			// this batch, freeing us to start on the next one.
			if msgCounter >= 0 {
				self.batchChan <- out
				out = <-self.backChan
				msgCounter = 0
			}
			timer.Reset(timerDuration)
		case err = <-errChan:
			ok = false
			break
		}
	}
	return err
}
Esempio n. 2
0
// Listen on the provided TCP connection, extracting messages from the incoming
// data until the connection is closed or Stop is called on the input.
func (self *TcpInput) handleConnection(conn net.Conn) {
	var (
		frag []byte
		err  error = nil
		pack *plugins.PipelinePack
	)
	//raddr := conn.RemoteAddr().String()
	//host, _, err := net.SplitHostPort(raddr)
	//if err != nil {
	//	host = raddr
	//}
	//log.Printf("handle conn: %s, host: %s", raddr, host)
	counter := fmt.Sprintf("Tag:%s,Type:%s", self.common.Tag, self.common.Type)
	mc := metrics.NewCounter(counter)
	defer func() {
		conn.Close()
		self.wg.Done()
	}()

	buf := make([]byte, 1024)
	b1 := []byte{}
	count := 0
	limit_run_times := 60
	stopped := false
	reader := bufio.NewReaderSize(conn, 8192)

	ticker := time.NewTicker(time.Duration(1) * time.Minute)
	defer ticker.Stop()
	for !stopped {
		conn.SetReadDeadline(time.Now().Add(5 * time.Second))
		select {
		case <-self.stopChan:
			stopped = true
		case <-ticker.C:
			if count == 0 || limit_run_times <= 0 {
				//log.Printf("remove unused conn : %s", raddr)
				stopped = true
			}
			limit_run_times -= 1
			count = 0
		default:
			frag, err = reader.ReadSlice('\n')
			if err != nil {
				//log.Printf("disconnect : %s", raddr)
				stopped = true
			}

			if len(frag) == 0 {
				continue
			}
			buf = append(b1, frag...)
			count++
			pack = <-self.runner.InChan()
			pack.MsgBytes = bytes.TrimSpace(buf[:])
			pack.Msg.Tag = self.common.Tag
			pack.Msg.Timestamp = time.Now().Unix()
			mc.Add(1)
			self.runner.RouterChan() <- pack
			buf = buf[:0]
		}
	}
	buf = nil
	reader = nil
}
Esempio n. 3
0
func (self *KafkaOutput) Run(runner plugins.OutputRunner) (err error) {
	var (
		timer         *time.Timer
		timerDuration time.Duration
		pack          *plugins.PipelinePack
		//message       *proto.Message
		//outMessages   []*proto.Message
	)
	errChan := make(chan error, 1)

	go self.committer(runner, errChan)

	out := newOutBatch()
	message := &proto.Message{Value: nil}
	ok := true
	var ticker = time.NewTicker(time.Duration(5) * time.Second)
	defer ticker.Stop()
	timerDuration = time.Duration(self.config.FlushInterval) * time.Millisecond
	timer = time.NewTimer(timerDuration)
	if self.distributingProducer != nil {
		for ok {
			select {
			case pack = <-runner.InChan():
				pack, err = plugins.PipeDecoder(self.common.Decoder, pack)
				if err != nil {
					log.Printf("PipeDecoder :%s", err)
					pack.Recycle()
					continue
				}
				pack, err = plugins.PipeEncoder(self.common.Encoder, pack)
				if err != nil {
					log.Printf("PipeEncoder :%s", err)
					pack.Recycle()
					continue
				}
				message = &proto.Message{Value: pack.Msg.MsgBytes}
				out.data = append(out.data, message)
				pack.Recycle()
			case <-timer.C:
				self.batchChan <- out
				out = <-self.backChan
				timer.Reset(timerDuration)
			case <-ticker.C:
				if err != nil {
					bcf := kafka.NewBrokerConf(self.config.ClientId)
					//bcf.AllowTopicCreation = true

					// connect to kafka cluster
					self.broker, err = kafka.Dial(self.config.Addrs, bcf)
					if err != nil {
						log.Printf("cannot reconnect to kafka cluster: %s", err)
					}
				}
			}
		}
	} else {
		for {
			pack = <-runner.InChan()
			message = &proto.Message{Value: pack.Msg.MsgBytes}
			if _, err = self.producer.Produce(self.config.Topic, self.config.Partition, message); err != nil {
				log.Printf("cannot produce message to %s:%d: %s", self.config.Topic, self.config.Partition, err)
			}
			pack.Recycle()
		}
	}
	return nil
}