func (k *KafkaOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) { defer func() { k.producer.Close() k.client.Close() }() if or.Encoder() == nil { return errors.New("Encoder required.") } inChan := or.InChan() useBuffering := or.UsesBuffering() errChan := k.producer.Errors() var wg sync.WaitGroup wg.Add(1) go k.processKafkaErrors(or, errChan, &wg) var ( pack *pipeline.PipelinePack topic = k.config.Topic key sarama.Encoder ) for pack = range inChan { atomic.AddInt64(&k.processMessageCount, 1) if k.topicVariable != nil { topic = getMessageVariable(pack.Message, k.topicVariable) } if k.hashVariable != nil { key = sarama.StringEncoder(getMessageVariable(pack.Message, k.hashVariable)) } msgBytes, err := or.Encode(pack) if err != nil { atomic.AddInt64(&k.processMessageFailures, 1) or.LogError(err) // Don't retry encoding errors. or.UpdateCursor(pack.QueueCursor) pack.Recycle(nil) continue } if msgBytes == nil { atomic.AddInt64(&k.processMessageDiscards, 1) or.UpdateCursor(pack.QueueCursor) pack.Recycle(nil) continue } err = k.producer.QueueMessage(topic, key, sarama.ByteEncoder(msgBytes)) if err != nil { if !useBuffering { atomic.AddInt64(&k.processMessageFailures, 1) } or.LogError(err) } pack.Recycle(err) } errChan <- Shutdown wg.Wait() return }