Exemplo n.º 1
0
func (o *HttpOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	if or.Encoder() == nil {
		return errors.New("Encoder must be specified.")
	}

	var (
		e        error
		outBytes []byte
	)
	inChan := or.InChan()

	for pack := range inChan {
		outBytes, e = or.Encode(pack)
		if e != nil {
			or.UpdateCursor(pack.QueueCursor)
			pack.Recycle(fmt.Errorf("can't encode: %s", e))
			continue
		}
		if outBytes == nil {
			or.UpdateCursor(pack.QueueCursor)
			pack.Recycle(nil)
			continue
		}
		if e = o.request(or, outBytes); e != nil {
			e = pipeline.NewRetryMessageError(e.Error())
			pack.Recycle(e)
		} else {
			or.UpdateCursor(pack.QueueCursor)
			pack.Recycle(nil)
		}
	}

	return
}
Exemplo n.º 2
0
func (o *UdpOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {

	if or.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	var (
		outBytes []byte
		e        error
	)

	for pack := range or.InChan() {
		if outBytes, e = or.Encode(pack); e != nil {
			or.UpdateCursor(pack.QueueCursor)
			e = fmt.Errorf("Error encoding message: %s", e.Error())
			pack.Recycle(e)
			continue
		} else if outBytes != nil {
			msgSize := len(outBytes)
			if msgSize > o.UdpOutputConfig.MaxMessageSize {
				or.UpdateCursor(pack.QueueCursor)
				e = fmt.Errorf("Message has exceeded allowed UDP data size: %d > %d",
					msgSize, o.UdpOutputConfig.MaxMessageSize)
				pack.Recycle(e)
				continue
			} else {
				o.conn.Write(outBytes)
			}
		}
		or.UpdateCursor(pack.QueueCursor)
		pack.Recycle(nil)
	}
	return
}
Exemplo n.º 3
0
func (k *KafkaOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	defer func() {
		k.producer.Close()
		k.client.Close()
	}()

	if or.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	inChan := or.InChan()
	useBuffering := or.UsesBuffering()
	errChan := k.producer.Errors()
	var wg sync.WaitGroup
	wg.Add(1)
	go k.processKafkaErrors(or, errChan, &wg)

	var (
		pack  *pipeline.PipelinePack
		topic = k.config.Topic
		key   sarama.Encoder
	)

	for pack = range inChan {
		atomic.AddInt64(&k.processMessageCount, 1)

		if k.topicVariable != nil {
			topic = getMessageVariable(pack.Message, k.topicVariable)
		}
		if k.hashVariable != nil {
			key = sarama.StringEncoder(getMessageVariable(pack.Message, k.hashVariable))
		}

		msgBytes, err := or.Encode(pack)
		if err != nil {
			atomic.AddInt64(&k.processMessageFailures, 1)
			or.LogError(err)
			// Don't retry encoding errors.
			or.UpdateCursor(pack.QueueCursor)
			pack.Recycle(nil)
			continue
		}
		if msgBytes == nil {
			atomic.AddInt64(&k.processMessageDiscards, 1)
			or.UpdateCursor(pack.QueueCursor)
			pack.Recycle(nil)
			continue
		}
		err = k.producer.QueueMessage(topic, key, sarama.ByteEncoder(msgBytes))
		if err != nil {
			if !useBuffering {
				atomic.AddInt64(&k.processMessageFailures, 1)
			}
			or.LogError(err)
		}
		pack.Recycle(err)
	}

	errChan <- Shutdown
	wg.Wait()
	return
}
Exemplo n.º 4
0
func (s *SandboxOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	var (
		pack      *pipeline.PipelinePack
		retval    int
		inChan    = or.InChan()
		duration  int64
		startTime time.Time
		ok        = true
		ticker    = or.Ticker()
	)

	for ok {
		select {
		case pack, ok = <-inChan:
			if !ok {
				break
			}
			if s.sample {
				startTime = time.Now()
			}
			retval = s.sb.ProcessMessage(pack)
			if s.sample {
				duration = time.Since(startTime).Nanoseconds()
				s.reportLock.Lock()
				s.processMessageDuration += duration
				s.processMessageSamples++
				s.reportLock.Unlock()
			}
			s.sample = 0 == rand.Intn(s.sampleDenominator)

			or.UpdateCursor(pack.QueueCursor) // TODO: support retries?
			if retval == 0 {
				atomic.AddInt64(&s.processMessageCount, 1)
				pack.Recycle(nil)
			} else if retval < 0 {
				atomic.AddInt64(&s.processMessageFailures, 1)
				var e error
				em := s.sb.LastError()
				if len(em) > 0 {
					e = errors.New(em)
				}
				pack.Recycle(e)
			} else {
				err = fmt.Errorf("FATAL: %s", s.sb.LastError())
				pack.Recycle(err)
				ok = false
			}

		case t := <-ticker:
			startTime = time.Now()
			if retval = s.sb.TimerEvent(t.UnixNano()); retval != 0 {
				err = fmt.Errorf("FATAL: %s", s.sb.LastError())
				ok = false
			}
			duration = time.Since(startTime).Nanoseconds()
			s.reportLock.Lock()
			s.timerEventDuration += duration
			s.timerEventSamples++
			s.reportLock.Unlock()
		}
	}

	if err == nil && s.sbc.TimerEventOnShutdown {
		if retval = s.sb.TimerEvent(time.Now().UnixNano()); retval != 0 {
			err = fmt.Errorf("FATAL: %s", s.sb.LastError())
		}
	}

	destroyErr := s.destroy()
	if destroyErr != nil {
		if err != nil {
			or.LogError(err)
		}
		err = destroyErr
	}
	return err
}
Exemplo n.º 5
0
func (k *KafkaOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	defer func() {
		k.producer.Close()
		k.client.Close()
	}()

	if or.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	inChan := or.InChan()
	errChan := k.producer.Errors()
	pInChan := k.producer.Input()
	shutdownChan := make(chan struct{})
	var wg sync.WaitGroup
	wg.Add(1)
	go k.processKafkaErrors(or, errChan, shutdownChan, &wg)

	var (
		pack  *pipeline.PipelinePack
		topic = k.config.Topic
		key   sarama.Encoder
	)

	for pack = range inChan {
		atomic.AddInt64(&k.processMessageCount, 1)

		if k.topicVariable != nil {
			topic = getMessageVariable(pack.Message, k.topicVariable)
		}
		if k.hashVariable != nil {
			key = sarama.StringEncoder(getMessageVariable(pack.Message, k.hashVariable))
		}

		msgBytes, err := or.Encode(pack)
		if err != nil {
			atomic.AddInt64(&k.processMessageFailures, 1)
			or.LogError(err)
			// Don't retry encoding errors.
			or.UpdateCursor(pack.QueueCursor)
			pack.Recycle(nil)
			continue
		}
		if msgBytes == nil {
			atomic.AddInt64(&k.processMessageDiscards, 1)
			or.UpdateCursor(pack.QueueCursor)
			pack.Recycle(nil)
			continue
		}
		pMessage := &sarama.ProducerMessage{
			Topic: topic,
			Key:   key,
			Value: sarama.ByteEncoder(msgBytes),
		}
		pInChan <- pMessage
		pack.Recycle(nil)
	}

	close(shutdownChan)
	wg.Wait()
	return
}