// Standard text log file parser func (lsi *LogstreamInput) payloadParser(ir p.InputRunner, deliver Deliver, stop chan chan bool) (err error) { var ( pack *p.PipelinePack record []byte n int ) for err == nil { select { case lsi.stopped = <-stop: return default: } n, record, err = lsi.parser.Parse(lsi.stream) if err == io.ErrShortBuffer { ir.LogError(fmt.Errorf("record exceeded MAX_RECORD_SIZE %d", message.MAX_RECORD_SIZE)) err = nil // non-fatal, keep going } if n > 0 { lsi.stream.FlushBuffer(n) } if len(record) > 0 { payload := string(record) pack = <-ir.InChan() pack.Message.SetUuid(uuid.NewRandom()) pack.Message.SetTimestamp(time.Now().UnixNano()) pack.Message.SetType("logfile") pack.Message.SetHostname(lsi.hostName) pack.Message.SetLogger(lsi.loggerIdent) pack.Message.SetPayload(payload) deliver(pack) lsi.countRecord() } } return }
func (input *NsqInput) Run(runner pipeline.InputRunner, helper pipeline.PluginHelper) (err error) { var ( dRunner pipeline.DecoderRunner ok bool ) if input.DecoderName != "" { if dRunner, ok = helper.DecoderRunner(input.DecoderName, fmt.Sprintf("%s-%s", runner.Name(), input.DecoderName)); !ok { return fmt.Errorf("Decoder not found: %s", input.DecoderName) } input.decoderChan = dRunner.InChan() } input.runner = runner input.packSupply = runner.InChan() input.consumer.AddHandler(input) err = input.consumer.ConnectToNSQDs(input.NsqdAddrs) if err != nil { return err } err = input.consumer.ConnectToNSQLookupds(input.LookupdAddrs) if err != nil { return err } <-input.consumer.StoppedChan() return nil }
func (rli *RedisListInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error { var ( pack *pipeline.PipelinePack packs []*pipeline.PipelinePack ) // Get the InputRunner's chan to receive empty PipelinePacks inChan := ir.InChan() for { message, err := rli.conn.Do("RPOP", rli.conf.ListName) if err != nil { ir.LogError(fmt.Errorf("Redis RPOP error: %s", err)) // TODO: should reconnect redis rather than close it rli.Stop() break } if message != nil { pack = <-inChan pack.Message.SetType("redis_list") pack.Message.SetPayload(string(message.([]uint8))) packs = []*pipeline.PipelinePack{pack} if packs != nil { for _, p := range packs { ir.Inject(p) } } else { pack.Recycle(nil) } } else { time.Sleep(time.Second) } } return nil }
func (rpsi *RedisPubSubInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error { var ( dRunner pipeline.DecoderRunner decoder pipeline.Decoder pack *pipeline.PipelinePack e error ok bool ) // Get the InputRunner's chan to receive empty PipelinePacks packSupply := ir.InChan() if rpsi.conf.DecoderName != "" { if dRunner, ok = h.DecoderRunner(rpsi.conf.DecoderName, fmt.Sprintf("%s-%s", ir.Name(), rpsi.conf.DecoderName)); !ok { return fmt.Errorf("Decoder not found: %s", rpsi.conf.DecoderName) } decoder = dRunner.Decoder() } //Connect to the channel psc := redis.PubSubConn{Conn: rpsi.conn} psc.PSubscribe(rpsi.conf.Channel) for { switch n := psc.Receive().(type) { case redis.PMessage: // Grab an empty PipelinePack from the InputRunner pack = <-packSupply pack.Message.SetType("redis_pub_sub") pack.Message.SetLogger(n.Channel) pack.Message.SetPayload(string(n.Data)) pack.Message.SetTimestamp(time.Now().UnixNano()) var packs []*pipeline.PipelinePack if decoder == nil { packs = []*pipeline.PipelinePack{pack} } else { packs, e = decoder.Decode(pack) } if packs != nil { for _, p := range packs { ir.Inject(p) } } else { if e != nil { ir.LogError(fmt.Errorf("Couldn't parse Redis message: %s", n.Data)) } pack.Recycle(nil) } case redis.Subscription: ir.LogMessage(fmt.Sprintf("Subscription: %s %s %d\n", n.Kind, n.Channel, n.Count)) if n.Count == 0 { return errors.New("No channel to subscribe") } case error: fmt.Printf("error: %v\n", n) return n } } return nil }
func (rli *RedisInput) InsertMessage(ir pipeline.InputRunner, decoder pipeline.Decoder, msg string) { var ( pack *pipeline.PipelinePack e error ) // Get the InputRunner's chan to receive empty PipelinePacks packSupply := ir.InChan() pack = <-packSupply pack.Message.SetType(rli.conf.Key) pack.Message.SetLogger("Redis") pack.Message.SetPayload(msg) pack.Message.SetTimestamp(time.Now().UnixNano()) var packs []*pipeline.PipelinePack if decoder == nil { packs = []*pipeline.PipelinePack{pack} } else { packs, e = decoder.Decode(pack) } if packs != nil { for _, p := range packs { ir.Inject(p) } } else { if e != nil { ir.LogError(fmt.Errorf("Couldn't parse %s", msg)) pack.Recycle(e) } else { pack.Recycle(nil) fmt.Println("pack recycle!") } } }
// Run is the main loop which listens for incoming requests and injects the // messages read into the heka machinery func (hsi *HTTPSimpleInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) (err error) { hsi.stop = make(chan bool) hsi.input = make(chan *pipeline.PipelinePack) hsi.errch = make(chan error, 1) hsi.packs = ir.InChan() hsi.DecoderRunner = h.DecoderRunner go hsi.listen() var pack *pipeline.PipelinePack INPUT: for { select { case err = <-hsi.errch: if err != nil { return } case pack = <-hsi.input: ir.Inject(pack) case _ = <-hsi.stop: if hsi.listener != nil { hsi.listener.Close() hsi.packs = nil } break INPUT } } select { case err = <-hsi.errch: return default: close(hsi.errch) hsi.errch = nil } return nil }
// Framed protobuf message parser func (lsi *LogstreamInput) messageProtoParser(ir p.InputRunner, deliver Deliver, stop chan chan bool) (err error) { var ( pack *p.PipelinePack record []byte n int ) for err == nil { select { case lsi.stopped = <-stop: return default: } n, record, err = lsi.parser.Parse(lsi.stream) if n > 0 { lsi.stream.FlushBuffer(n) } if len(record) > 0 { pack = <-ir.InChan() headerLen := int(record[1]) + 3 // recsep+len+header+unitsep messageLen := len(record) - headerLen // ignore authentication headers if messageLen > cap(pack.MsgBytes) { pack.MsgBytes = make([]byte, messageLen) } pack.MsgBytes = pack.MsgBytes[:messageLen] copy(pack.MsgBytes, record[headerLen:]) deliver(pack) lsi.countRecord() } } return }
func (s *SandboxInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) (err error) { s.sb.InjectMessage(func(payload, payload_type, payload_name string) int { pack := <-ir.InChan() if err := proto.Unmarshal([]byte(payload), pack.Message); err != nil { pack.Recycle() return 1 } if s.tz != time.UTC { const layout = "2006-01-02T15:04:05.999999999" // remove the incorrect UTC tz info t := time.Unix(0, pack.Message.GetTimestamp()) t = t.In(time.UTC) ct, _ := time.ParseInLocation(layout, t.Format(layout), s.tz) pack.Message.SetTimestamp(ct.UnixNano()) } ir.Inject(pack) atomic.AddInt64(&s.processMessageCount, 1) atomic.AddInt64(&s.processMessageBytes, int64(len(payload))) return 0 }) ticker := ir.Ticker() for true { retval := s.sb.ProcessMessage(nil) if retval <= 0 { // Sandbox is in polling mode if retval < 0 { atomic.AddInt64(&s.processMessageFailures, 1) em := s.sb.LastError() if len(em) > 0 { ir.LogError(errors.New(em)) } } if ticker == nil { ir.LogMessage("single run completed") break } select { // block until stop or poll interval case <-s.stopChan: case <-ticker: } } else { // Sandbox is shutting down em := s.sb.LastError() if !strings.HasSuffix(em, "shutting down") { ir.LogError(errors.New(em)) } break } } s.reportLock.Lock() if s.sbc.PreserveData { err = s.sb.Destroy(s.preservationFile) } else { err = s.sb.Destroy("") } s.sb = nil s.reportLock.Unlock() return }
func (input *FilePollingInput) Run(runner pipeline.InputRunner, helper pipeline.PluginHelper) error { var ( data []byte pack *pipeline.PipelinePack dRunner pipeline.DecoderRunner ok bool err error ) if input.DecoderName != "" { if dRunner, ok = helper.DecoderRunner(input.DecoderName, fmt.Sprintf("%s-%s", runner.Name(), input.DecoderName)); !ok { return fmt.Errorf("Decoder not found: %s", input.DecoderName) } input.decoderChan = dRunner.InChan() } input.runner = runner hostname := helper.PipelineConfig().Hostname() packSupply := runner.InChan() tickChan := runner.Ticker() for { select { case <-input.stop: return nil case <-tickChan: } data, err = ioutil.ReadFile(input.FilePath) if err != nil { runner.LogError(fmt.Errorf("Error reading file: %s", err)) continue } pack = <-packSupply pack.Message.SetUuid(uuid.NewRandom()) pack.Message.SetTimestamp(time.Now().UnixNano()) pack.Message.SetType("heka.file.polling") pack.Message.SetHostname(hostname) pack.Message.SetPayload(string(data)) if field, err := message.NewField("TickerInterval", int(input.TickerInterval), ""); err != nil { runner.LogError(err) } else { pack.Message.AddField(field) } if field, err := message.NewField("FilePath", input.FilePath, ""); err != nil { runner.LogError(err) } else { pack.Message.AddField(field) } input.sendPack(pack) } return nil }
func (cwi *CloudwatchInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) (err error) { cwi.stopChan = make(chan bool) cwi.req.StartTime = time.Now() ticker := time.NewTicker(cwi.pollInterval) ok := true var ( resp *cloudwatch.GetMetricStatisticsResponse point cloudwatch.Datapoint pack *pipeline.PipelinePack dim cloudwatch.Dimension ) metricLoop: for ok { select { case _, ok = <-cwi.stopChan: continue case <-ticker.C: cwi.req.EndTime = time.Now() resp, err = cwi.cw.GetMetricStatistics(cwi.req) if err != nil { ir.LogError(err) err = nil continue } for _, point = range resp.GetMetricStatisticsResult.Datapoints { pack, ok = <-ir.InChan() if !ok { break metricLoop } pack.Message.SetType("cloudwatch") for _, dim = range cwi.req.Dimensions { newField(pack, "Dimension."+dim.Name, dim.Value) } newField(pack, "Period", cwi.req.Period) newField(pack, "Average", point.Average) newField(pack, "Maximum", point.Maximum) newField(pack, "Minimum", point.Minimum) newField(pack, "SampleCount", point.SampleCount) newField(pack, "Unit", point.Unit) newField(pack, "Sum", point.Sum) pack.Message.SetUuid(uuid.NewRandom()) pack.Message.SetTimestamp(point.Timestamp.UTC().UnixNano()) pack.Message.SetLogger(cwi.namespace) pack.Message.SetPayload(cwi.req.MetricName) ir.Inject(pack) } cwi.req.StartTime = cwi.req.EndTime.Add(time.Duration(1) * time.Nanosecond) } } return nil }
func (zi *ZeroMQInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error { // Get the InputRunner's chan to receive empty PipelinePacks packs := ir.InChan() var decoding chan<- *pipeline.PipelinePack if zi.conf.Decoder != "" { // Fetch specified decoder decoder, ok := h.DecoderSet().ByName(zi.conf.Decoder) if !ok { err := fmt.Errorf("Could not find decoder", zi.conf.Decoder) return err } // Get the decoder's receiving chan decoding = decoder.InChan() } var pack *pipeline.PipelinePack var count int var b []byte var err error // Read data from websocket broadcast chan for { b, err = zi.socket.Recv(0) if err != nil { ir.LogError(err) continue } // Grab an empty PipelinePack from the InputRunner pack = <-packs // Trim the excess empty bytes count = len(b) pack.MsgBytes = pack.MsgBytes[:count] // Copy ws bytes into pack's bytes copy(pack.MsgBytes, b) if decoding != nil { // Send pack onto decoder decoding <- pack } else { // Send pack into Heka pipeline ir.Inject(pack) } } return nil }
func (sip *SCAMPInputPlugin) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) (err error) { sip.service, err = scamp.NewService(sip.conf.Service, sip.conf.Name) if err != nil { return } announcer, err := scamp.NewDiscoveryAnnouncer() if err != nil { scamp.Error.Printf("failed to create announcer: `%s`", err) return } announcer.Track(sip.service) go announcer.AnnounceLoop() var handlerConfig SCAMPInputHandlerConfig for _, handlerConfig = range sip.conf.Handlers { scamp.Trace.Printf("registering handler: `%s`", handlerConfig) sip.service.Register(handlerConfig.Action, func(msg *scamp.Message, client *scamp.Client) { var pack *pipeline.PipelinePack pack = <-ir.InChan() pack.Message.SetUuid(uuid.NewRandom()) pack.Message.SetTimestamp(time.Now().UnixNano()) pack.Message.SetPayload(string(msg.Bytes()[:])) pack.Message.SetSeverity(int32(handlerConfig.Severity)) pack.Message.SetLogger(handlerConfig.Logger) // TODO not sure what this means ir.Deliver(pack) reply := scamp.NewMessage() reply.SetMessageType(scamp.MESSAGE_TYPE_REPLY) reply.SetEnvelope(scamp.ENVELOPE_JSON) reply.SetRequestId(msg.RequestId) reply.Write([]byte("{}")) scamp.Trace.Printf("sending msg: {requestId: %d, type: `%s`, envelope: `%s`, body: `%s`}", reply.RequestId, reply.MessageType, reply.Envelope, reply.Bytes()) _, err = client.Send(reply) if err != nil { scamp.Error.Printf("could not reply to message: `%s`", err) client.Close() return } }) } sip.service.Run() return }
func (di *DockerLogInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error { var ( pack *pipeline.PipelinePack ok bool ) hostname := h.Hostname() go di.attachMgr.Listen(di.logstream, di.closer) // Get the InputRunner's chan to receive empty PipelinePacks packSupply := ir.InChan() ok = true var err error for ok { select { case logline := <-di.logstream: pack = <-packSupply pack.Message.SetType("DockerLog") pack.Message.SetLogger(logline.Type) // stderr or stdout pack.Message.SetHostname(hostname) // Use the host's hosntame pack.Message.SetPayload(logline.Data) pack.Message.SetTimestamp(time.Now().UnixNano()) pack.Message.SetUuid(uuid.NewRandom()) for k, v := range logline.Fields { message.NewStringField(pack.Message, k, v) } ir.Deliver(pack) case err, ok = <-di.attachErrors: if !ok { err = errors.New("Docker event channel closed") break } ir.LogError(fmt.Errorf("Attacher error: %s", err)) case err = <-di.stopChan: ok = false } } di.closer <- struct{}{} close(di.logstream) return err }
func (dei *DockerEventInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error { defer dei.dockerClient.RemoveEventListener(dei.eventStream) defer close(dei.eventStream) var ( ok bool err error pack *pipeline.PipelinePack ) hostname := h.Hostname() // Provides empty PipelinePacks packSupply := ir.InChan() ok = true for ok { select { case event := <-dei.eventStream: pack = <-packSupply pack.Message.SetType("DockerEvent") pack.Message.SetLogger(event.ID) pack.Message.SetHostname(hostname) payload := fmt.Sprintf("id:%s status:%s from:%s time:%d", event.ID, event.Status, event.From, event.Time) pack.Message.SetPayload(payload) pack.Message.SetTimestamp(time.Now().UnixNano()) pack.Message.SetUuid(uuid.NewRandom()) message.NewStringField(pack.Message, "ID", event.ID) message.NewStringField(pack.Message, "Status", event.Status) message.NewStringField(pack.Message, "From", event.From) message.NewInt64Field(pack.Message, "Time", event.Time, "ts") ir.Deliver(pack) case err = <-dei.stopChan: ok = false } } return err }
func (ri *RedisMQInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error { // Get the InputRunner's chan to receive empty PipelinePacks packs := ir.InChan() var decoding chan<- *pipeline.PipelinePack if ri.conf.Decoder != "" { // Fetch specified decoder decoder, ok := h.DecoderRunner(ri.conf.Decoder) if !ok { err := fmt.Errorf("Could not find decoder", ri.conf.Decoder) return err } // Get the decoder's receiving chan decoding = decoder.InChan() } var pack *pipeline.PipelinePack //var p []*redismq.Package var p *redismq.Package var count int var b []byte var err error for { p, err = ri.rdconsumer.Get() if err != nil { ir.LogError(err) continue } err = p.Ack() if err != nil { ir.LogError(err) } b = []byte(p.Payload) // Grab an empty PipelinePack from the InputRunner pack = <-packs // Trim the excess empty bytes count = len(b) pack.MsgBytes = pack.MsgBytes[:count] // Copy ws bytes into pack's bytes copy(pack.MsgBytes, b) if decoding != nil { // Send pack onto decoder decoding <- pack } else { // Send pack into Heka pipeline ir.Inject(pack) } } /* checkStat := time.Tick(ri.statInterval) ok := true for ok { select { case _, ok = <-ri.stopChan: break case <-checkStat: p, err = ri.rdconsumer.MultiGet(500) if err != nil { ir.LogError(err) continue } err = p[len(p)-1].MultiAck() if err != nil { ir.LogError(err) } for _, v := range p { b = []byte(v.Payload) // Grab an empty PipelinePack from the InputRunner pack = <-packs // Trim the excess empty bytes count = len(b) pack.MsgBytes = pack.MsgBytes[:count] // Copy ws bytes into pack's bytes copy(pack.MsgBytes, b) if decoding != nil { // Send pack onto decoder decoding <- pack } else { // Send pack into Heka pipeline ir.Inject(pack) } } } } */ return nil }
func (input *DockerStatsInput) Run(runner pipeline.InputRunner, helper pipeline.PluginHelper) error { var pack *pipeline.PipelinePack input.runner = runner packSupply := runner.InChan() tickChan := runner.Ticker() hostname := helper.PipelineConfig().Hostname() for { select { case <-input.stop: return nil case <-tickChan: } var ( // test chan bool //err error previousCPU, previousSystem uint64 mstats *dockerStat preCPUStats, stats *docker.Stats ) endpoint := "unix:///var/run/docker.sock" client, _ := docker.NewClient(endpoint) containers, _ := client.ListContainers(docker.ListContainersOptions{Filters: map[string][]string{"status": {"running"}}}) for _, container := range containers { if containerName, exists := input.cacheHostnames[container.ID]; !exists { containerName = strings.Replace(container.Names[0], "/", "", -1) input.cacheHostnames[container.ID] = containerName if input.NameFromEnv != "" { con, _ := client.InspectContainer(container.ID) for _, value := range con.Config.Env { parts := strings.SplitN(value, "=", 2) if len(parts) == 2 { if input.NameFromEnv == parts[0] { containerName = parts[1] input.cacheHostnames[container.ID] = containerName break } } } } } opts := docker.StatsStaticOptions{ID: container.ID, Stream: false} preCPUStats, _ = client.StatsStatic(opts) if preCPUStats == nil { continue } previousCPU = preCPUStats.CPUStats.CPUUsage.TotalUsage previousSystem = preCPUStats.CPUStats.SystemCPUUsage stats, _ = client.StatsStatic(opts) if stats == nil { continue } mstats = &dockerStat{} mstats.CPUPercent = calculateCPUPercent(previousCPU, previousSystem, stats) mstats.MemPercent = calculateMemPercent(stats) mstats.MemUsage = stats.MemoryStats.Usage mstats.MemLimit = stats.MemoryStats.Limit mstats.BlockRead, mstats.BlockWrite = calculateBlockIO(stats) for _, networkstat := range stats.Networks { mstats.NetworkRx = networkstat.RxBytes mstats.NetworkTx = networkstat.TxBytes } pack = <-packSupply pack.Message.SetUuid(uuid.NewRandom()) pack.Message.SetTimestamp(time.Now().Unix()) pack.Message.SetType("DockerStats") pack.Message.SetHostname(hostname) containerName, _ := message.NewField("ContainerName", string(strings.Replace(input.cacheHostnames[container.ID], "-", "_", -1)), "") pack.Message.AddField(containerName) cpuPercent, _ := message.NewField("CPUPercent", float64(mstats.CPUPercent), "") pack.Message.AddField(cpuPercent) memPercent, _ := message.NewField("MemoryPercent", float64(mstats.MemPercent), "") pack.Message.AddField(memPercent) memLimit, _ := message.NewField("MemoryLimit", int64(mstats.MemLimit), "") pack.Message.AddField(memLimit) memUsage, _ := message.NewField("MemoryUsage", int64(mstats.MemUsage), "") pack.Message.AddField(memUsage) netInput, _ := message.NewField("NetworkInput", int64(mstats.NetworkRx), "") pack.Message.AddField(netInput) netOutput, _ := message.NewField("NetworkOutput", int64(mstats.NetworkTx), "") pack.Message.AddField(netOutput) blockInput, _ := message.NewField("BlockInput", int64(mstats.BlockRead), "") pack.Message.AddField(blockInput) blockOutput, _ := message.NewField("BlockOutput", int64(mstats.BlockWrite), "") pack.Message.AddField(blockOutput) pack.Message.SetPayload(fmt.Sprintf("container_name %s\ncpu %.2f\nmem_usage %d\nmem_limit %d\nmem %.2f\nnet_input %d\nnet_output %d\nblock_input %d\nblock_output %d", strings.Replace(input.cacheHostnames[container.ID], "-", "_", -1), mstats.CPUPercent, mstats.MemUsage, mstats.MemLimit, mstats.MemPercent, mstats.NetworkRx, mstats.NetworkTx, mstats.BlockRead, mstats.BlockWrite)) runner.Deliver(pack) } } return nil }
func (ni *NsqInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error { // Get the InputRunner's chan to receive empty PipelinePacks var pack *pipeline.PipelinePack var err error var dRunner pipeline.DecoderRunner var decoder pipeline.Decoder var ok bool var e error //pos := 0 //output := make([]*Message, 2) packSupply := ir.InChan() if ni.conf.Decoder != "" { if dRunner, ok = h.DecoderRunner(ni.conf.Decoder); !ok { return fmt.Errorf("Decoder not found: %s", ni.conf.Decoder) } decoder = dRunner.Decoder() } err = ni.nsqReader.ConnectToLookupd(ni.conf.Address) if err != nil { ir.LogError(errors.New("ConnectToLookupd failed.")) } header := &message.Header{} stopped := false //readLoop: for !stopped { //stopped = true select { case <-ni.stopChan: ir.LogError(errors.New("get ni.stopChan, set stopped=true")) stopped = true default: pack = <-packSupply m, ok1 := <-ni.handler.logChan if !ok1 { stopped = true break } if ni.conf.Serialize { if dRunner == nil { pack.Recycle() ir.LogError(errors.New("Serialize messages require a decoder.")) } //header := &message.Header{} _, msgOk := findMessage(m.msg.Body, header, &(pack.MsgBytes)) if msgOk { dRunner.InChan() <- pack } else { pack.Recycle() ir.LogError(errors.New("Can't find Heka message.")) } header.Reset() } else { //ir.LogError(fmt.Errorf("message body: %s", m.msg.Body)) pack.Message.SetType("nsq") pack.Message.SetPayload(string(m.msg.Body)) pack.Message.SetTimestamp(time.Now().UnixNano()) var packs []*pipeline.PipelinePack if decoder == nil { packs = []*pipeline.PipelinePack{pack} } else { packs, e = decoder.Decode(pack) } if packs != nil { for _, p := range packs { ir.Inject(p) } } else { if e != nil { ir.LogError(fmt.Errorf("Couldn't parse Nsq message: %s", m.msg.Body)) } pack.Recycle() } } m.returnChannel <- &nsq.FinishedMessage{m.msg.Id, 0, true} /* output[pos] = m pos++ if pos == 2 { for pos > 0 { pos-- m1 := output[pos] m1.returnChannel <- &nsq.FinishedMessage{m1.msg.Id, 0, true} output[pos] = nil } } */ } } return nil }