コード例 #1
0
ファイル: input.go プロジェクト: swan-go/heka-redis
func (rpsi *RedisPubSubInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error {
	var (
		dRunner pipeline.DecoderRunner
		decoder pipeline.Decoder
		pack    *pipeline.PipelinePack
		e       error
		ok      bool
	)
	// Get the InputRunner's chan to receive empty PipelinePacks
	packSupply := ir.InChan()

	if rpsi.conf.DecoderName != "" {
		if dRunner, ok = h.DecoderRunner(rpsi.conf.DecoderName, fmt.Sprintf("%s-%s", ir.Name(), rpsi.conf.DecoderName)); !ok {
			return fmt.Errorf("Decoder not found: %s", rpsi.conf.DecoderName)
		}
		decoder = dRunner.Decoder()
	}

	//Connect to the channel
	psc := redis.PubSubConn{Conn: rpsi.conn}
	psc.PSubscribe(rpsi.conf.Channel)

	for {
		switch n := psc.Receive().(type) {
		case redis.PMessage:
			// Grab an empty PipelinePack from the InputRunner
			pack = <-packSupply
			pack.Message.SetType("redis_pub_sub")
			pack.Message.SetLogger(n.Channel)
			pack.Message.SetPayload(string(n.Data))
			pack.Message.SetTimestamp(time.Now().UnixNano())
			var packs []*pipeline.PipelinePack
			if decoder == nil {
				packs = []*pipeline.PipelinePack{pack}
			} else {
				packs, e = decoder.Decode(pack)
			}
			if packs != nil {
				for _, p := range packs {
					ir.Inject(p)
				}
			} else {
				if e != nil {
					ir.LogError(fmt.Errorf("Couldn't parse Redis message: %s", n.Data))
				}
				pack.Recycle(nil)
			}
		case redis.Subscription:
			ir.LogMessage(fmt.Sprintf("Subscription: %s %s %d\n", n.Kind, n.Channel, n.Count))
			if n.Count == 0 {
				return errors.New("No channel to subscribe")
			}
		case error:
			fmt.Printf("error: %v\n", n)
			return n
		}
	}

	return nil
}
コード例 #2
0
ファイル: uptime.go プロジェクト: kamilsmuga/UptimeFilter
func (f *UptimeFilter) Run(runner pipeline.FilterRunner, helper pipeline.PluginHelper) (err error) {
	var (
		pack    *pipeline.PipelinePack
		payload string
	)

	inChan := runner.InChan()
	for pack = range inChan {
		payload = pack.Message.GetPayload()
		runner.LogMessage("Payload: " + payload)
		if f.hours == nil {
			f.hours = make(map[int64]bool)
		}
		var epoch int64 = f.GetEpoch(payload)
		f.startHour, f.endHour = f.FigureOutStartAndEndHour(epoch)
		if !f.hours[f.startHour] {
			f.InitFilterForStartHour(f.startHour, payload)
		} else {
			f.CalculateUptimeFor(f.startHour, f.endHour)
			// f.hours[&f.startHour] = false
			log.Printf("Length of map: &d", len(f.hours))
		}
		log.Printf("Start hour: %d", f.startHour)
		log.Printf("End hour: %d", f.endHour)
		log.Printf("EPOCH: %d", epoch)
		pack.Recycle()
	}
	return
}
コード例 #3
0
// Framed protobuf message parser
func (lsi *LogstreamInput) messageProtoParser(ir p.InputRunner, deliver Deliver, stop chan chan bool) (err error) {
	var (
		pack   *p.PipelinePack
		record []byte
		n      int
	)
	for err == nil {
		select {
		case lsi.stopped = <-stop:
			return
		default:
		}
		n, record, err = lsi.parser.Parse(lsi.stream)
		if n > 0 {
			lsi.stream.FlushBuffer(n)
		}
		if len(record) > 0 {
			pack = <-ir.InChan()
			headerLen := int(record[1]) + 3 // recsep+len+header+unitsep
			messageLen := len(record) - headerLen
			// ignore authentication headers
			if messageLen > cap(pack.MsgBytes) {
				pack.MsgBytes = make([]byte, messageLen)
			}
			pack.MsgBytes = pack.MsgBytes[:messageLen]
			copy(pack.MsgBytes, record[headerLen:])
			deliver(pack)
			lsi.countRecord()
		}
	}
	return
}
コード例 #4
0
ファイル: redis_input.go プロジェクト: swan-go/heka-redis
func (rli *RedisInput) InsertMessage(ir pipeline.InputRunner, decoder pipeline.Decoder, msg string) {
	var (
		pack *pipeline.PipelinePack
		e    error
	)
	// Get the InputRunner's chan to receive empty PipelinePacks
	packSupply := ir.InChan()

	pack = <-packSupply
	pack.Message.SetType(rli.conf.Key)
	pack.Message.SetLogger("Redis")
	pack.Message.SetPayload(msg)
	pack.Message.SetTimestamp(time.Now().UnixNano())

	var packs []*pipeline.PipelinePack
	if decoder == nil {
		packs = []*pipeline.PipelinePack{pack}
	} else {
		packs, e = decoder.Decode(pack)
	}

	if packs != nil {
		for _, p := range packs {
			ir.Inject(p)
		}
	} else {
		if e != nil {
			ir.LogError(fmt.Errorf("Couldn't parse %s", msg))
			pack.Recycle(e)
		} else {
			pack.Recycle(nil)
			fmt.Println("pack recycle!")
		}
	}
}
コード例 #5
0
ファイル: input.go プロジェクト: liuyangc3/heka-redis-plugin
func (rli *RedisListInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error {
	var (
		pack  *pipeline.PipelinePack
		packs []*pipeline.PipelinePack
	)

	// Get the InputRunner's chan to receive empty PipelinePacks
	inChan := ir.InChan()

	for {
		message, err := rli.conn.Do("RPOP", rli.conf.ListName)
		if err != nil {
			ir.LogError(fmt.Errorf("Redis RPOP error: %s", err))
			// TODO: should reconnect redis rather than close it
			rli.Stop()
			break
		}
		if message != nil {
			pack = <-inChan
			pack.Message.SetType("redis_list")
			pack.Message.SetPayload(string(message.([]uint8)))
			packs = []*pipeline.PipelinePack{pack}
			if packs != nil {
				for _, p := range packs {
					ir.Inject(p)
				}
			} else {
				pack.Recycle(nil)
			}
		} else {
			time.Sleep(time.Second)
		}
	}
	return nil
}
コード例 #6
0
ファイル: scamp_requester.go プロジェクト: gudTECH/heka-scamp
func (sop *SCAMPOutputPlugin) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	var pack *pipeline.PipelinePack

	// We have no default encoder
	if or.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	for pack = range or.InChan() {
		scamp.Info.Printf("received pipeline pack")
		encoded, err := or.Encode(pack) // pack.Message.GetPayload()

		if err == nil {
			scamp.Info.Printf("payload: %s", encoded)
			msg := scamp.NewMessage()
			msg.SetEnvelope(scamp.ENVELOPE_JSON)
			msg.SetAction(sop.conf.Action)
			msg.SetVersion(1)
			msg.Write(encoded)
		}

		pack.Recycle(err)
	}
	fmt.Println("sup from end of for loop in Run")
	return
}
コード例 #7
0
ファイル: irc_output.go プロジェクト: salekseev/heka
func (output *IrcOutput) Run(runner pipeline.OutputRunner,
	helper pipeline.PluginHelper) error {
	if runner.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	output.runner = runner

	// Register callbacks to handle events
	registerCallbacks(output)

	var err error

	// Connect to the Irc Server
	err = output.Conn.Connect(output.Server)
	if err != nil {
		return fmt.Errorf("Unable to connect to irc server %s: %s",
			output.Server, err)
	}

	// Start a goroutine for recieving messages, and throttling before sending
	// to the Irc Server
	output.wg.Add(1)
	go processOutQueue(output)

	var outgoing []byte
	ok := true
	inChan := runner.InChan()
	var pack *pipeline.PipelinePack
	for ok {
		select {
		case pack, ok = <-inChan:
		case <-output.die:
			ok = false
		}
		if !ok {
			break
		}
		outgoing, err = runner.Encode(pack)
		if err != nil {
			output.runner.LogError(err)
		} else if outgoing != nil {
			// Send the message to each irc channel. If the out queue is full,
			// then we need to drop the message and log an error.
			for i, ircChannel := range output.Channels {
				ircMsg := IrcMsg{outgoing, ircChannel, i}
				select {
				case output.OutQueue <- ircMsg:
				default:
					output.runner.LogError(ErrOutQueueFull)
				}
			}
		}
		pack.Recycle()
	}
	output.cleanup()
	return nil
}
コード例 #8
0
func (clo *CloudLoggingOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	var (
		pack       *pipeline.PipelinePack
		e          error
		k          string
		m          *logging.LogEntry
		exist      bool
		ok         = true
		inChan     = or.InChan()
		groupBatch = make(map[string]*LogBatch)
		outBatch   *LogBatch
		ticker     = time.Tick(time.Duration(clo.conf.FlushInterval) * time.Millisecond)
	)
	clo.or = or
	go clo.committer()
	for ok {
		select {
		case pack, ok = <-inChan:
			// Closed inChan => we're shutting down, flush data.
			if !ok {
				clo.sendGroupBatch(groupBatch)
				close(clo.batchChan)
				<-clo.outputExit
				break
			}

			k, m, e = clo.Encode(pack)
			pack.Recycle()
			if e != nil {
				or.LogError(e)
				continue
			}

			if k != "" && m != nil {
				outBatch, exist = groupBatch[k]
				if !exist {
					outBatch = &LogBatch{count: 0, batch: make([]*logging.LogEntry, 0, 100), name: k}
					groupBatch[k] = outBatch
				}

				outBatch.batch = append(outBatch.batch, m)
				if outBatch.count++; clo.CheckFlush(int(outBatch.count), len(outBatch.batch)) {
					if len(outBatch.batch) > 0 {
						outBatch.batch = clo.sendBatch(k, outBatch.batch, outBatch.count)
						outBatch.count = 0
					}
				}
			}
		case <-ticker:
			clo.sendGroupBatch(groupBatch)
		case err = <-clo.outputExit:
			ok = false
		}
	}
	return
}
コード例 #9
0
func (cmo *CloudMonitoringOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	var (
		pack     *pipeline.PipelinePack
		e        error
		m        *cloudmonitoring.TimeseriesPoint
		ok       = true
		count    int64
		inChan   = or.InChan()
		outBatch = make([]*cloudmonitoring.TimeseriesPoint, 0, 200)
		ticker   = time.Tick(time.Duration(cmo.conf.FlushInterval) * time.Millisecond)
	)
	cmo.or = or
	go cmo.committer()
	for ok {
		select {
		case pack, ok = <-inChan:
			// Closed inChan => we're shutting down, flush data.
			if !ok {
				if len(outBatch) > 0 {
					cmo.sendBatch(outBatch, count)
				}
				close(cmo.batchChan)
				<-cmo.outputExit
				break
			}

			m, e = cmo.Encode(pack)
			pack.Recycle()
			if e != nil {
				or.LogError(e)
				continue
			}

			if m != nil {
				outBatch = append(outBatch, m)

				if count++; cmo.CheckFlush(int(count), len(outBatch)) {
					if len(outBatch) > 0 {
						outBatch = cmo.sendBatch(outBatch, count)
						count = 0
					}
				}
			}
		case <-ticker:
			if len(outBatch) > 0 {
				outBatch = cmo.sendBatch(outBatch, count)
			}
			count = 0
		case err = <-cmo.outputExit:
			ok = false
		}
	}
	return
}
コード例 #10
0
func (cef *CefOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {

	var (
		facility, priority syslog.Priority
		ident              string
		ok                 bool
		p                  syslog.Priority
		e                  error
		pack               *pipeline.PipelinePack
	)
	syslogMsg := new(SyslogMsg)
	for pack = range or.InChan() {

		// default values
		facility, priority = syslog.LOG_LOCAL4, syslog.LOG_INFO
		ident = "heka_no_ident"

		priField := pack.Message.FindFirstField("cef_meta.syslog_priority")
		if priField != nil {
			priStr := priField.ValueString[0]
			if p, ok = SYSLOG_PRIORITY[priStr]; ok {
				priority = p
			}
		}

		facField := pack.Message.FindFirstField("cef_meta.syslog_facility")
		if facField != nil {
			facStr := facField.ValueString[0]
			if p, ok = SYSLOG_FACILITY[facStr]; ok {
				facility = p
			}
		}

		idField := pack.Message.FindFirstField("cef_meta.syslog_ident")
		if idField != nil {
			ident = idField.ValueString[0]
		}

		syslogMsg.priority = priority | facility
		syslogMsg.prefix = ident
		syslogMsg.payload = pack.Message.GetPayload()
		pack.Recycle()

		_, e = cef.syslogWriter.WriteString(syslogMsg.priority, syslogMsg.prefix,
			syslogMsg.payload)

		if e != nil {
			or.LogError(e)
		}
	}

	cef.syslogWriter.Close()
	return
}
コード例 #11
0
func (cwo *CloudwatchOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	inChan := or.InChan()

	payloads := make(chan CloudwatchDatapoints, cwo.backlog)
	go cwo.Submitter(payloads, or)

	var (
		pack          *pipeline.PipelinePack
		msg           *message.Message
		rawDataPoints *CloudwatchDatapointPayload
		dataPoints    *CloudwatchDatapoints
	)
	dataPoints = new(CloudwatchDatapoints)
	dataPoints.Datapoints = make([]cloudwatch.MetricDatum, 0, 0)

	for pack = range inChan {
		rawDataPoints = new(CloudwatchDatapointPayload)
		msg = pack.Message
		err = json.Unmarshal([]byte(msg.GetPayload()), rawDataPoints)
		if err != nil {
			or.LogMessage(fmt.Sprintf("warning, unable to parse payload: %s", err))
			err = nil
			continue
		}
		// Run through the list and convert them to CloudwatchDatapoints
		for _, rawDatum := range rawDataPoints.Datapoints {
			datum := cloudwatch.MetricDatum{
				Dimensions:      rawDatum.Dimensions,
				MetricName:      rawDatum.MetricName,
				Unit:            rawDatum.Unit,
				Value:           rawDatum.Value,
				StatisticValues: rawDatum.StatisticValues,
			}
			if rawDatum.Timestamp != "" {
				parsedTime, err := message.ForgivingTimeParse("", rawDatum.Timestamp, cwo.tzLocation)
				if err != nil {
					or.LogMessage(fmt.Sprintf("unable to parse timestamp for datum: %s", rawDatum))
					continue
				}
				datum.Timestamp = parsedTime
			}
			dataPoints.Datapoints = append(dataPoints.Datapoints, datum)
		}
		payloads <- *dataPoints
		dataPoints.Datapoints = dataPoints.Datapoints[:0]
		rawDataPoints.Datapoints = rawDataPoints.Datapoints[:0]
		pack.Recycle()
	}
	or.LogMessage("shutting down AWS Cloudwatch submitter")
	cwo.stopChan <- true
	<-cwo.stopChan
	return
}
コード例 #12
0
ファイル: kafka_output.go プロジェクト: orangemi/heka
func (k *KafkaOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	defer func() {
		k.producer.Close()
		k.client.Close()
	}()

	if or.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	inChan := or.InChan()
	errChan := k.producer.Errors()
	var wg sync.WaitGroup
	wg.Add(1)
	go k.processKafkaErrors(or, errChan, &wg)

	var (
		pack  *pipeline.PipelinePack
		topic = k.config.Topic
		key   sarama.Encoder
	)

	for pack = range inChan {
		atomic.AddInt64(&k.processMessageCount, 1)

		if k.topicVariable != nil {
			topic = getMessageVariable(pack.Message, k.topicVariable)
		}
		if k.hashVariable != nil {
			key = sarama.StringEncoder(getMessageVariable(pack.Message, k.hashVariable))
		}

		if msgBytes, err := or.Encode(pack); err == nil {
			if msgBytes != nil {
				err = k.producer.QueueMessage(topic, key, sarama.ByteEncoder(msgBytes))
				if err != nil {
					atomic.AddInt64(&k.processMessageFailures, 1)
					or.LogError(err)
				}
			} else {
				atomic.AddInt64(&k.processMessageDiscards, 1)
			}
		} else {
			atomic.AddInt64(&k.processMessageFailures, 1)
			or.LogError(err)
		}
		pack.Recycle()
	}
	errChan <- Shutdown
	wg.Wait()
	return
}
コード例 #13
0
ファイル: input.go プロジェクト: benjaminws/heka-zeromq
func (zi *ZeroMQInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error {
	// Get the InputRunner's chan to receive empty PipelinePacks
	packs := ir.InChan()

	var decoding chan<- *pipeline.PipelinePack
	if zi.conf.Decoder != "" {
		// Fetch specified decoder
		decoder, ok := h.DecoderSet().ByName(zi.conf.Decoder)
		if !ok {
			err := fmt.Errorf("Could not find decoder", zi.conf.Decoder)
			return err
		}

		// Get the decoder's receiving chan
		decoding = decoder.InChan()
	}

	var pack *pipeline.PipelinePack
	var count int
	var b []byte
	var err error

	// Read data from websocket broadcast chan
	for {
		b, err = zi.socket.Recv(0)
		if err != nil {
			ir.LogError(err)
			continue
		}

		// Grab an empty PipelinePack from the InputRunner
		pack = <-packs

		// Trim the excess empty bytes
		count = len(b)
		pack.MsgBytes = pack.MsgBytes[:count]

		// Copy ws bytes into pack's bytes
		copy(pack.MsgBytes, b)

		if decoding != nil {
			// Send pack onto decoder
			decoding <- pack
		} else {
			// Send pack into Heka pipeline
			ir.Inject(pack)
		}
	}

	return nil
}
コード例 #14
0
func (this *SandboxManagerFilter) Run(fr pipeline.FilterRunner,
	h pipeline.PluginHelper) (err error) {

	inChan := fr.InChan()

	var ok = true
	var pack *pipeline.PipelinePack
	var delta int64

	this.restoreSandboxes(fr, h, this.workingDirectory)
	for ok {
		select {
		case pack, ok = <-inChan:
			if !ok {
				break
			}
			atomic.AddInt64(&this.processMessageCount, 1)
			delta = time.Now().UnixNano() - pack.Message.GetTimestamp()
			if math.Abs(float64(delta)) >= 5e9 {
				fr.LogError(fmt.Errorf("Discarded control message: %d seconds skew",
					delta/1e9))
				pack.Recycle()
				break
			}
			action, _ := pack.Message.GetFieldValue("action")
			switch action {
			case "load":
				current := int(atomic.LoadInt32(&this.currentFilters))
				if current < this.maxFilters {
					err := this.loadSandbox(fr, h, this.workingDirectory, pack.Message)
					if err != nil {
						fr.LogError(err)
					}
				} else {
					fr.LogError(fmt.Errorf("%s attempted to load more than %d filters",
						fr.Name(), this.maxFilters))
				}
			case "unload":
				fv, _ := pack.Message.GetFieldValue("name")
				if name, ok := fv.(string); ok {
					name = getSandboxName(fr.Name(), name)
					if this.pConfig.RemoveFilterRunner(name) {
						removeAll(this.workingDirectory, fmt.Sprintf("%s.*", name))
					}
				}
			}
			pack.Recycle()
		}
	}
	return
}
コード例 #15
0
ファイル: sandbox_encoder.go プロジェクト: Nitro/heka
func (s *SandboxEncoder) Encode(pack *pipeline.PipelinePack) (output []byte, err error) {
	if s.sb == nil {
		err = errors.New("No sandbox.")
		return
	}
	atomic.AddInt64(&s.processMessageCount, 1)
	s.injected = false

	var startTime time.Time
	if s.sample {
		startTime = time.Now()
	}
	cowpack := new(pipeline.PipelinePack)
	cowpack.Message = pack.Message   // the actual copy will happen if write_message is called
	cowpack.MsgBytes = pack.MsgBytes // no copying is necessary since we don't change it
	retval := s.sb.ProcessMessage(cowpack)
	if retval == 0 && !s.injected {
		// `inject_message` was never called, protobuf encode the copy on write
		// message.
		if s.output, err = s.cEncoder.EncodeMessage(cowpack.Message); err != nil {
			return
		}
	}

	if s.sample {
		duration := time.Since(startTime).Nanoseconds()
		s.reportLock.Lock()
		s.processMessageDuration += duration
		s.processMessageSamples++
		s.reportLock.Unlock()
	}
	s.sample = 0 == rand.Intn(s.sampleDenominator)

	if retval > 0 {
		err = fmt.Errorf("FATAL: %s", s.sb.LastError())
		return
	}
	if retval == -2 {
		// Encoder has nothing to return.
		return nil, nil
	}
	if retval < 0 {
		atomic.AddInt64(&s.processMessageFailures, 1)
		err = fmt.Errorf("Failed serializing: %s", s.sb.LastError())
		return
	}
	return s.output, nil
}
コード例 #16
0
func (k *KinesisOutput) HandlePackage(or pipeline.OutputRunner, pack *pipeline.PipelinePack) error {

	// If we are flushing, wait until we have finished.
	k.flushLock.Lock()
	defer k.flushLock.Unlock()

	// encode the packages.
	msg, err := or.Encode(pack)
	if err != nil {
		errOut := fmt.Errorf("Error encoding message: %s", err)
		or.LogError(errOut)
		pack.Recycle(nil)
		return errOut
	}

	// If we only care about the Payload...
	if k.config.PayloadOnly {
		msg = []byte(pack.Message.GetPayload())
	}

	var tmp []byte
	// if we already have data then we should append.
	if len(k.batchedData) > 0 {
		tmp = append(append(k.batchedData, []byte(",")...), msg...)
	} else {
		tmp = msg
	}

	// if we can't fit the data in this record
	if len(tmp) > k.KINESIS_RECORD_SIZE {
		// add the existing data to the output batch
		array := append(append([]byte("["), k.batchedData...), []byte("]")...)
		k.AddToRecordBatch(or, array)

		// update the batched data to only contain the current message.
		k.batchedData = msg
	} else {
		// otherwise we add the existing data to a batch
		k.batchedData = tmp
	}

	// do reporting and tidy up
	atomic.AddInt64(&k.processMessageCount, 1)
	pack.Recycle(nil)

	return nil
}
コード例 #17
0
func (so *SentryOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	var (
		udpAddrStr string
		udpAddr    *net.UDPAddr
		socket     net.Conn
		e          error
		ok         bool
		pack       *pipeline.PipelinePack
	)

	sentryMsg := &SentryMsg{
		dataPacket: make([]byte, 0, so.config.MaxSentryBytes),
	}

	for pack = range or.InChan() {
		e = so.prepSentryMsg(pack, sentryMsg)
		pack.Recycle()
		if e != nil {
			or.LogError(e)
			continue
		}

		udpAddrStr = sentryMsg.parsedDsn.Host
		if socket, ok = so.udpMap[udpAddrStr]; !ok {
			if len(so.udpMap) > so.config.MaxUdpSockets {
				or.LogError(fmt.Errorf("Max # of UDP sockets [%d] reached.",
					so.config.MaxUdpSockets))
				continue
			}

			if udpAddr, e = net.ResolveUDPAddr("udp", udpAddrStr); e != nil {
				or.LogError(fmt.Errorf("can't resolve UDP address %s: %s",
					udpAddrStr, e))
				continue
			}

			if socket, e = net.DialUDP("udp", nil, udpAddr); e != nil {
				or.LogError(fmt.Errorf("can't dial UDP socket: %s", e))
				continue
			}
			so.udpMap[sentryMsg.parsedDsn.Host] = socket
		}
		socket.Write(sentryMsg.dataPacket)
	}
	return
}
コード例 #18
0
ファイル: nsq_output.go プロジェクト: jbli/jbli_svn
func (no *NsqOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	var (
		encoder client.Encoder
		msg     *message.Message
		msgBody []byte = make([]byte, 0, 1024)
		pack    *pipeline.PipelinePack
	)

	conf := no.conf
	encoder = client.NewProtobufEncoder(nil)

	for pack = range or.InChan() {
		if conf.Serialize {
			msg = pack.Message
			if err = encoder.EncodeMessageStream(msg, &msgBody); err != nil {
				or.LogError(err)
				err = nil
				pack.Recycle()
				continue
			}
			//err := no.nsqwriter.PublishAsync(conf.Topic, []byte(pack.Message.GetPayload()), nil)
			//err = no.nsqwriter.PublishAsync(conf.Topic, msgBody, nil)
			_, _, err = no.nsqwriter.Publish(conf.Topic, msgBody)
			if err != nil {
				or.LogError(fmt.Errorf("error in writer.PublishAsync"))
			}
			msgBody = msgBody[:0]
		} else {
			err = no.nsqwriter.PublishAsync(conf.Topic, []byte(pack.Message.GetPayload()), nil)
			if err != nil {
				or.LogError(fmt.Errorf("error in writer.PublishAsync"))
			}
		}
		pack.Recycle()
	}

	return
}
コード例 #19
0
ファイル: kinesis_output.go プロジェクト: slank/heka-plugins
func (k *KinesisOutput) Run(or pipeline.OutputRunner, helper pipeline.PluginHelper) error {
	var (
		pack   *pipeline.PipelinePack
		msg    []byte
		pk     string
		err    error
		params *kin.PutRecordInput
	)

	if or.Encoder() == nil {
		return fmt.Errorf("Encoder required.")
	}

	for pack = range or.InChan() {
		msg, err = or.Encode(pack)
		if err != nil {
			or.LogError(fmt.Errorf("Error encoding message: %s", err))
			pack.Recycle(nil)
			continue
		}
		pk = fmt.Sprintf("%d-%s", pack.Message.Timestamp, pack.Message.Hostname)
		if k.config.PayloadOnly {
			msg = []byte(pack.Message.GetPayload())
		}
		params = &kin.PutRecordInput{
			Data:         msg,
			PartitionKey: aws.String(pk),
			StreamName:   aws.String(k.config.Stream),
		}
		_, err = k.Client.PutRecord(params)
		if err != nil {
			or.LogError(fmt.Errorf("Error pushing message to Kinesis: %s", err))
			pack.Recycle(nil)
			continue
		}
		pack.Recycle(nil)
	}

	return nil
}
コード例 #20
0
ファイル: kafka_output.go プロジェクト: intoximeters/heka
func (k *KafkaOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	defer func() {
		k.producer.Close()
		k.client.Close()
	}()

	if or.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	inChan := or.InChan()
	errChan := k.producer.Errors()
	pInChan := k.producer.Input()
	shutdownChan := make(chan struct{})
	var wg sync.WaitGroup
	wg.Add(1)
	go k.processKafkaErrors(or, errChan, shutdownChan, &wg)

	var (
		pack  *pipeline.PipelinePack
		topic = k.config.Topic
		key   sarama.Encoder
	)

	for pack = range inChan {
		atomic.AddInt64(&k.processMessageCount, 1)

		if k.topicVariable != nil {
			topic = getMessageVariable(pack.Message, k.topicVariable)
		}
		if k.hashVariable != nil {
			key = sarama.StringEncoder(getMessageVariable(pack.Message, k.hashVariable))
		}

		msgBytes, err := or.Encode(pack)
		if err != nil {
			atomic.AddInt64(&k.processMessageFailures, 1)
			or.LogError(err)
			// Don't retry encoding errors.
			or.UpdateCursor(pack.QueueCursor)
			pack.Recycle(nil)
			continue
		}
		if msgBytes == nil {
			atomic.AddInt64(&k.processMessageDiscards, 1)
			or.UpdateCursor(pack.QueueCursor)
			pack.Recycle(nil)
			continue
		}
		pMessage := &sarama.ProducerMessage{
			Topic: topic,
			Key:   key,
			Value: sarama.ByteEncoder(msgBytes),
		}
		pInChan <- pMessage
		pack.Recycle(nil)
	}

	close(shutdownChan)
	wg.Wait()
	return
}
コード例 #21
0
ファイル: sandbox_output.go プロジェクト: orangemi/heka
func (s *SandboxOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	var (
		pack      *pipeline.PipelinePack
		retval    int
		inChan    = or.InChan()
		duration  int64
		startTime time.Time
		ok        = true
		ticker    = or.Ticker()
	)

	for ok {
		select {
		case pack, ok = <-inChan:
			if !ok {
				break
			}
			if s.sample {
				startTime = time.Now()
			}
			retval = s.sb.ProcessMessage(pack)
			if s.sample {
				duration = time.Since(startTime).Nanoseconds()
				s.reportLock.Lock()
				s.processMessageDuration += duration
				s.processMessageSamples++
				s.reportLock.Unlock()
			}
			s.sample = 0 == rand.Intn(s.sampleDenominator)
			pack.Recycle()

			if retval == 0 {
				atomic.AddInt64(&s.processMessageCount, 1)
			} else if retval < 0 {
				atomic.AddInt64(&s.processMessageFailures, 1)
				em := s.sb.LastError()
				if len(em) > 0 {
					or.LogError(errors.New(em))
				}
			} else {
				err = fmt.Errorf("FATAL: %s", s.sb.LastError())
				ok = false
			}

		case t := <-ticker:
			startTime = time.Now()
			if retval = s.sb.TimerEvent(t.UnixNano()); retval != 0 {
				err = fmt.Errorf("FATAL: %s", s.sb.LastError())
				ok = false
			}
			duration = time.Since(startTime).Nanoseconds()
			s.reportLock.Lock()
			s.timerEventDuration += duration
			s.timerEventSamples++
			s.reportLock.Unlock()
		}
	}

	s.reportLock.Lock()
	var destroyErr error
	if s.sbc.PreserveData {
		destroyErr = s.sb.Destroy(s.preservationFile)
	} else {
		destroyErr = s.sb.Destroy("")
	}
	if destroyErr != nil {
		err = destroyErr
	}

	s.sb = nil
	s.reportLock.Unlock()
	return
}
コード例 #22
0
ファイル: output.go プロジェクト: imgix/heka-nsq
func (output *NsqOutput) Run(runner pipeline.OutputRunner,
	helper pipeline.PluginHelper) (err error) {
	if runner.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	var (
		pack     *pipeline.PipelinePack
		outgoing []byte
		msg      RetryMsg
	)

	output.runner = runner
	inChan := runner.InChan()
	ok := true

	defer output.cleanup()

	for ok {
		select {
		case pack, ok = <-inChan:
			if !ok {
				return nil
			}
			outgoing, err = output.runner.Encode(pack)
			if err != nil {
				runner.LogError(err)
			} else {
				err = output.sendMessage(outgoing)
				if err != nil {
					output.runner.LogError(err)
					err = output.retryHelper.Wait()
					if err != nil {
						return
					}
					// Create a retry msg, and requeue it
					msg := RetryMsg{Body: outgoing, retryChan: output.retryChan, maxCount: output.MaxMsgRetries}
					err = msg.Retry()
					if err != nil {
						output.runner.LogError(err)
					}
				} else {
					output.retryHelper.Reset()
				}
			}
			pack.Recycle()
		case msg, ok = <-output.retryChan:
			if !ok {
				return nil
			}
			err = output.sendMessage(msg.Body)
			if err != nil {
				output.runner.LogError(err)
				err = output.retryHelper.Wait()
				if err != nil {
					return
				}
				// requeue the message
				err = msg.Retry()
				if err != nil {
					output.runner.LogError(err)
				}
			} else {
				output.retryHelper.Reset()
			}
		}
	}

	return nil
}
コード例 #23
0
ファイル: sandbox_input.go プロジェクト: Nitro/heka
func (s *SandboxInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) (err error) {
	abortChan := s.pConfig.Globals.AbortChan()
	s.sb.InjectMessage(func(payload, payload_type, payload_name string) int {
		var pack *pipeline.PipelinePack
		select {
		case pack = <-ir.InChan():
		case <-abortChan:
			pack.Recycle(nil)
			return 5
		}
		if err := proto.Unmarshal([]byte(payload), pack.Message); err != nil {
			pack.Recycle(nil)
			return 1
		}
		if s.tz != time.UTC {
			const layout = "2006-01-02T15:04:05.999999999" // remove the incorrect UTC tz info
			t := time.Unix(0, pack.Message.GetTimestamp())
			t = t.In(time.UTC)
			ct, _ := time.ParseInLocation(layout, t.Format(layout), s.tz)
			pack.Message.SetTimestamp(ct.UnixNano())
		}
		if err := ir.Inject(pack); err != nil {
			pack.Recycle(nil)
			return 5
		}
		atomic.AddInt64(&s.processMessageCount, 1)
		atomic.AddInt64(&s.processMessageBytes, int64(len(payload)))
		return 0
	})

	ticker := ir.Ticker()

	ok := true
	for ok {
		retval := s.sb.ProcessMessage(nil)
		if retval <= 0 { // Sandbox is in polling mode
			if retval < 0 {
				atomic.AddInt64(&s.processMessageFailures, 1)
				em := s.sb.LastError()
				if len(em) > 0 {
					ir.LogError(errors.New(em))
				}
			}
			if ticker == nil {
				ir.LogMessage("single run completed")
				break
			}
			select { // block until stop or poll interval
			case _, ok = <-s.stopChan:
			case <-ticker:
			}
		} else { // Sandbox is shutting down
			em := s.sb.LastError()
			if !strings.HasSuffix(em, "shutting down") {
				ir.LogError(errors.New(em))
			}
			break
		}
	}

	return s.destroy()
}
コード例 #24
0
ファイル: sandbox_filter.go プロジェクト: orangemi/heka
func (this *SandboxFilter) Run(fr pipeline.FilterRunner, h pipeline.PluginHelper) (err error) {
	inChan := fr.InChan()
	ticker := fr.Ticker()

	var (
		ok             = true
		terminated     = false
		sample         = true
		blocking       = false
		backpressure   = false
		pack           *pipeline.PipelinePack
		retval         int
		msgLoopCount   uint
		injectionCount uint
		startTime      time.Time
		slowDuration   int64 = int64(this.pConfig.Globals.MaxMsgProcessDuration)
		duration       int64
		capacity       = cap(inChan) - 1
	)

	// We assign to the return value of Run() for errors in the closure so that
	// the plugin runner can determine what caused the SandboxFilter to return.
	this.sb.InjectMessage(func(payload, payload_type, payload_name string) int {
		if injectionCount == 0 {
			err = pipeline.TerminatedError("exceeded InjectMessage count")
			return 2
		}
		injectionCount--
		pack := h.PipelinePack(msgLoopCount)
		if pack == nil {
			err = pipeline.TerminatedError(fmt.Sprintf("exceeded MaxMsgLoops = %d",
				this.pConfig.Globals.MaxMsgLoops))
			return 3
		}
		if len(payload_type) == 0 { // heka protobuf message
			hostname := pack.Message.GetHostname()
			err := proto.Unmarshal([]byte(payload), pack.Message)
			if err == nil {
				// do not allow filters to override the following
				pack.Message.SetType("heka.sandbox." + pack.Message.GetType())
				pack.Message.SetLogger(fr.Name())
				pack.Message.SetHostname(hostname)
			} else {
				return 1
			}
		} else {
			pack.Message.SetType("heka.sandbox-output")
			pack.Message.SetLogger(fr.Name())
			pack.Message.SetPayload(payload)
			ptype, _ := message.NewField("payload_type", payload_type, "file-extension")
			pack.Message.AddField(ptype)
			pname, _ := message.NewField("payload_name", payload_name, "")
			pack.Message.AddField(pname)
		}
		if !fr.Inject(pack) {
			return 4
		}
		atomic.AddInt64(&this.injectMessageCount, 1)
		return 0
	})

	for ok {
		select {
		case pack, ok = <-inChan:
			if !ok {
				break
			}
			atomic.AddInt64(&this.processMessageCount, 1)
			injectionCount = this.pConfig.Globals.MaxMsgProcessInject
			msgLoopCount = pack.MsgLoopCount

			if this.manager != nil { // only check for backpressure on dynamic plugins
				// reading a channel length is generally fast ~1ns
				// we need to check the entire chain back to the router
				backpressure = len(inChan) >= capacity ||
					fr.MatchRunner().InChanLen() >= capacity ||
					len(h.PipelineConfig().Router().InChan()) >= capacity
			}

			// performing the timing is expensive ~40ns but if we are
			// backpressured we need a decent sample set before triggering
			// termination
			if sample ||
				(backpressure && this.processMessageSamples < int64(capacity)) ||
				this.sbc.Profile {
				startTime = time.Now()
				sample = true
			}
			retval = this.sb.ProcessMessage(pack)
			if sample {
				duration = time.Since(startTime).Nanoseconds()
				this.reportLock.Lock()
				this.processMessageDuration += duration
				this.processMessageSamples++
				if this.sbc.Profile {
					this.profileMessageDuration = this.processMessageDuration
					this.profileMessageSamples = this.processMessageSamples
					if this.profileMessageSamples == int64(capacity)*10 {
						this.sbc.Profile = false
						// reset the normal sampling so it isn't heavily skewed by the profile values
						// i.e. process messages fast during profiling and then switch to malicious code
						this.processMessageDuration = this.profileMessageDuration / this.profileMessageSamples
						this.processMessageSamples = 1
					}
				}
				this.reportLock.Unlock()
			}
			if retval <= 0 {
				if backpressure && this.processMessageSamples >= int64(capacity) {
					if this.processMessageDuration/this.processMessageSamples > slowDuration ||
						fr.MatchRunner().GetAvgDuration() > slowDuration/5 {
						terminated = true
						blocking = true
					}
				}
				if retval < 0 {
					atomic.AddInt64(&this.processMessageFailures, 1)
					em := this.sb.LastError()
					if len(em) > 0 {
						fr.LogError(errors.New(em))
					}
				}
				sample = 0 == rand.Intn(this.sampleDenominator)
			} else {
				terminated = true
			}
			pack.Recycle()

		case t := <-ticker:
			injectionCount = this.pConfig.Globals.MaxMsgTimerInject
			startTime = time.Now()
			if retval = this.sb.TimerEvent(t.UnixNano()); retval != 0 {
				terminated = true
			}
			duration = time.Since(startTime).Nanoseconds()
			this.reportLock.Lock()
			this.timerEventDuration += duration
			this.timerEventSamples++
			this.reportLock.Unlock()
		}

		if terminated {
			pack := h.PipelinePack(0)
			pack.Message.SetType("heka.sandbox-terminated")
			pack.Message.SetLogger(pipeline.HEKA_DAEMON)
			message.NewStringField(pack.Message, "plugin", fr.Name())
			if blocking {
				pack.Message.SetPayload("sandbox is running slowly and blocking the router")
				// no lock on the ProcessMessage variables here because there are no active writers
				message.NewInt64Field(pack.Message, "ProcessMessageCount", this.processMessageCount, "count")
				message.NewInt64Field(pack.Message, "ProcessMessageFailures", this.processMessageFailures, "count")
				message.NewInt64Field(pack.Message, "ProcessMessageSamples", this.processMessageSamples, "count")
				message.NewInt64Field(pack.Message, "ProcessMessageAvgDuration",
					this.processMessageDuration/this.processMessageSamples, "ns")
				message.NewInt64Field(pack.Message, "MatchAvgDuration", fr.MatchRunner().GetAvgDuration(), "ns")
				message.NewIntField(pack.Message, "FilterChanLength", len(inChan), "count")
				message.NewIntField(pack.Message, "MatchChanLength", fr.MatchRunner().InChanLen(), "count")
				message.NewIntField(pack.Message, "RouterChanLength", len(h.PipelineConfig().Router().InChan()), "count")
			} else {
				pack.Message.SetPayload(this.sb.LastError())
			}
			fr.Inject(pack)
			break
		}
	}

	if this.manager != nil {
		this.manager.PluginExited()
	}

	this.reportLock.Lock()
	var destroyErr error
	if this.sbc.PreserveData {
		destroyErr = this.sb.Destroy(this.preservationFile)
	} else {
		destroyErr = this.sb.Destroy("")
	}
	if destroyErr != nil {
		err = destroyErr
	}

	this.sb = nil
	this.reportLock.Unlock()
	return
}
コード例 #25
0
ファイル: readfile.go プロジェクト: wxdublin/heka-plugins
// Run runs the FileReadFilter filter, which inspects each message, and appends
// the content of the file named as the executed template to the existing payload.
// The resulting message will be injected back, and have newType type.
func (fr FileReadFilter) Run(r pipeline.FilterRunner, h pipeline.PluginHelper) (err error) {
	if fr.tmpl == nil {
		return errors.New("FileReadFilter: empty template")
	}
	var (
		fh           *os.File
		inp          io.Reader
		npack, opack *pipeline.PipelinePack
	)
	out := bytes.NewBuffer(make([]byte, 0, 4096))
	log.Printf("FileReadFilter: Starting with template %s", fr.tmpl)
	for opack = range r.InChan() {
		//log.Printf("opack=%v", opack)
		//if opack.Decoded {
		out.Reset()
		if err = fr.tmpl.Execute(out, extendedMessage{opack.Message}); err != nil {
			opack.Recycle()
			return fmt.Errorf("FileReadFilter: error executing template %v with message %v: %v",
				fr.tmpl, opack.Message, err)
		}
		//log.Printf("out=%q", out)
		if fh, err = os.Open(out.String()); err != nil {
			log.Printf("FileReadFilter: cannot read %q: %v", out, err)
			opack.Recycle()
			continue
		}
		out.Reset()
		//if _, err = io.Copy(out, io.LimitedReader{R: fh, N: 65000}); err != nil && err != io.EOF {
		inp = fh
		if fr.decoder != nil {
			inp = transform.NewReader(fh, fr.decoder)
		}
		if _, err = io.Copy(out, inp); err != nil && err != io.EOF {
			log.Printf("FileReadFilter: error reading %q: %v", fh.Name(), err)
			opack.Recycle()
			fh.Close()
			continue
		}
		fh.Close()

		npack = h.PipelinePack(opack.MsgLoopCount)
		if npack == nil {
			opack.Recycle()
			return errors.New("FileReadFilter: no output pack - infinite loop?")
		}
		npack.Decoded = true
		npack.Message = message.CopyMessage(opack.Message)
		npack.Message.SetType(fr.newType)
		npack.Message.SetPayload(npack.Message.GetPayload() + "\n" + out.String())
		if !r.Inject(npack) {
			log.Printf("FileReadFilter: cannot inject new pack %v", npack)
		}
		//}
		opack.Recycle()
	}
	return nil
}
コード例 #26
0
ファイル: sandbox_output.go プロジェクト: Nitro/heka
func (s *SandboxOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	var (
		pack      *pipeline.PipelinePack
		retval    int
		inChan    = or.InChan()
		duration  int64
		startTime time.Time
		ok        = true
		ticker    = or.Ticker()
	)

	for ok {
		select {
		case pack, ok = <-inChan:
			if !ok {
				break
			}
			if s.sample {
				startTime = time.Now()
			}
			retval = s.sb.ProcessMessage(pack)
			if s.sample {
				duration = time.Since(startTime).Nanoseconds()
				s.reportLock.Lock()
				s.processMessageDuration += duration
				s.processMessageSamples++
				s.reportLock.Unlock()
			}
			s.sample = 0 == rand.Intn(s.sampleDenominator)

			or.UpdateCursor(pack.QueueCursor) // TODO: support retries?
			if retval == 0 {
				atomic.AddInt64(&s.processMessageCount, 1)
				pack.Recycle(nil)
			} else if retval < 0 {
				atomic.AddInt64(&s.processMessageFailures, 1)
				var e error
				em := s.sb.LastError()
				if len(em) > 0 {
					e = errors.New(em)
				}
				pack.Recycle(e)
			} else {
				err = fmt.Errorf("FATAL: %s", s.sb.LastError())
				pack.Recycle(err)
				ok = false
			}

		case t := <-ticker:
			startTime = time.Now()
			if retval = s.sb.TimerEvent(t.UnixNano()); retval != 0 {
				err = fmt.Errorf("FATAL: %s", s.sb.LastError())
				ok = false
			}
			duration = time.Since(startTime).Nanoseconds()
			s.reportLock.Lock()
			s.timerEventDuration += duration
			s.timerEventSamples++
			s.reportLock.Unlock()
		}
	}

	if err == nil && s.sbc.TimerEventOnShutdown {
		if retval = s.sb.TimerEvent(time.Now().UnixNano()); retval != 0 {
			err = fmt.Errorf("FATAL: %s", s.sb.LastError())
		}
	}

	destroyErr := s.destroy()
	if destroyErr != nil {
		if err != nil {
			or.LogError(err)
		}
		err = destroyErr
	}
	return err
}
コード例 #27
0
ファイル: redis_Input.go プロジェクト: jbli/jbli_svn
func (ri *RedisMQInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error {
	// Get the InputRunner's chan to receive empty PipelinePacks
	packs := ir.InChan()

	var decoding chan<- *pipeline.PipelinePack
	if ri.conf.Decoder != "" {
		// Fetch specified decoder
		decoder, ok := h.DecoderRunner(ri.conf.Decoder)
		if !ok {
			err := fmt.Errorf("Could not find decoder", ri.conf.Decoder)
			return err
		}

		// Get the decoder's receiving chan
		decoding = decoder.InChan()
	}

	var pack *pipeline.PipelinePack
	//var p []*redismq.Package
	var p *redismq.Package
	var count int
	var b []byte
	var err error

	for {
		p, err = ri.rdconsumer.Get()
		if err != nil {
			ir.LogError(err)
			continue
		}
		err = p.Ack()
		if err != nil {
			ir.LogError(err)
		}
		b = []byte(p.Payload)
		// Grab an empty PipelinePack from the InputRunner
		pack = <-packs

		// Trim the excess empty bytes
		count = len(b)
		pack.MsgBytes = pack.MsgBytes[:count]

		// Copy ws bytes into pack's bytes
		copy(pack.MsgBytes, b)

		if decoding != nil {
			// Send pack onto decoder
			decoding <- pack
		} else {
			// Send pack into Heka pipeline
			ir.Inject(pack)
		}
	}
	/*
	           checkStat := time.Tick(ri.statInterval)
	           ok := true
	           for ok {
	               select {
	   		case _, ok = <-ri.stopChan:
	   			break
	   		case <-checkStat:
	                       p, err = ri.rdconsumer.MultiGet(500)
	                       if err != nil {
	                           ir.LogError(err)
	                           continue
	                       }
	                       err = p[len(p)-1].MultiAck()
	                       if err != nil {
	                           ir.LogError(err)
	                       }
	                       for _, v := range p {
	                         b = []byte(v.Payload)
	                         // Grab an empty PipelinePack from the InputRunner
	                         pack = <-packs

	                         // Trim the excess empty bytes
	                         count = len(b)
	                         pack.MsgBytes = pack.MsgBytes[:count]

	                         // Copy ws bytes into pack's bytes
	                         copy(pack.MsgBytes, b)

	                         if decoding != nil {
	                           // Send pack onto decoder
	                           decoding <- pack
	                         } else {
	                           // Send pack into Heka pipeline
	                           ir.Inject(pack)
	                         }
	                       }
	                   }
	           }
	*/
	return nil
}
コード例 #28
0
ファイル: nsq_input.go プロジェクト: jbli/jbli_svn
func (ni *NsqInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error {
	// Get the InputRunner's chan to receive empty PipelinePacks
	var pack *pipeline.PipelinePack
	var err error
	var dRunner pipeline.DecoderRunner
	var decoder pipeline.Decoder
	var ok bool
	var e error

	//pos := 0
	//output := make([]*Message, 2)
	packSupply := ir.InChan()

	if ni.conf.Decoder != "" {
		if dRunner, ok = h.DecoderRunner(ni.conf.Decoder); !ok {
			return fmt.Errorf("Decoder not found: %s", ni.conf.Decoder)
		}
		decoder = dRunner.Decoder()
	}

	err = ni.nsqReader.ConnectToLookupd(ni.conf.Address)
	if err != nil {
		ir.LogError(errors.New("ConnectToLookupd failed."))
	}

	header := &message.Header{}

	stopped := false
	//readLoop:
	for !stopped {
		//stopped = true
		select {
		case <-ni.stopChan:
			ir.LogError(errors.New("get ni.stopChan, set stopped=true"))
			stopped = true
		default:
			pack = <-packSupply
			m, ok1 := <-ni.handler.logChan
			if !ok1 {
				stopped = true
				break
			}

			if ni.conf.Serialize {
				if dRunner == nil {
					pack.Recycle()
					ir.LogError(errors.New("Serialize messages require a decoder."))
				}
				//header := &message.Header{}
				_, msgOk := findMessage(m.msg.Body, header, &(pack.MsgBytes))
				if msgOk {
					dRunner.InChan() <- pack
				} else {
					pack.Recycle()
					ir.LogError(errors.New("Can't find Heka message."))
				}
				header.Reset()
			} else {
				//ir.LogError(fmt.Errorf("message body: %s", m.msg.Body))
				pack.Message.SetType("nsq")
				pack.Message.SetPayload(string(m.msg.Body))
				pack.Message.SetTimestamp(time.Now().UnixNano())
				var packs []*pipeline.PipelinePack
				if decoder == nil {
					packs = []*pipeline.PipelinePack{pack}
				} else {
					packs, e = decoder.Decode(pack)
				}
				if packs != nil {
					for _, p := range packs {
						ir.Inject(p)
					}
				} else {
					if e != nil {
						ir.LogError(fmt.Errorf("Couldn't parse Nsq message: %s", m.msg.Body))
					}
					pack.Recycle()
				}
			}
			m.returnChannel <- &nsq.FinishedMessage{m.msg.Id, 0, true}
			/*
			   output[pos] = m
			   pos++
			   if pos == 2 {
			           for pos > 0 {
			                   pos--
			                   m1 := output[pos]
			                   m1.returnChannel <- &nsq.FinishedMessage{m1.msg.Id, 0, true}
			                   output[pos] = nil
			           }
			   }
			*/
		}
	}
	return nil
}