Exemplo n.º 1
0
func (o *UdpOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {

	if or.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	var (
		outBytes []byte
		e        error
	)

	for pack := range or.InChan() {
		if outBytes, e = or.Encode(pack); e != nil {
			or.LogError(fmt.Errorf("Error encoding message: %s", e.Error()))
		} else if outBytes != nil {
			msgSize := len(outBytes)
			if msgSize > o.UdpOutputConfig.MaxMessageSize {
				or.LogError(fmt.Errorf("Message has exceeded allowed UDP data size: %d > %d", msgSize, o.UdpOutputConfig.MaxMessageSize))
			} else {
				o.conn.Write(outBytes)
			}
		}
		pack.Recycle()
	}
	return
}
Exemplo n.º 2
0
func (o *HttpOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	if or.Encoder() == nil {
		return errors.New("Encoder must be specified.")
	}

	var (
		e        error
		outBytes []byte
	)
	inChan := or.InChan()

	for pack := range inChan {
		outBytes, e = or.Encode(pack)
		pack.Recycle()
		if e != nil {
			or.LogError(e)
			continue
		}
		if outBytes == nil {
			continue
		}
		if e = o.request(or, outBytes); e != nil {
			or.LogError(e)
		}
	}

	return
}
Exemplo n.º 3
0
func (o *HttpOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	if or.Encoder() == nil {
		return errors.New("Encoder must be specified.")
	}

	var (
		e        error
		outBytes []byte
	)
	inChan := or.InChan()

	for pack := range inChan {
		outBytes, e = or.Encode(pack)
		if e != nil {
			or.UpdateCursor(pack.QueueCursor)
			pack.Recycle(fmt.Errorf("can't encode: %s", e))
			continue
		}
		if outBytes == nil {
			or.UpdateCursor(pack.QueueCursor)
			pack.Recycle(nil)
			continue
		}
		if e = o.request(or, outBytes); e != nil {
			e = pipeline.NewRetryMessageError(e.Error())
			pack.Recycle(e)
		} else {
			or.UpdateCursor(pack.QueueCursor)
			pack.Recycle(nil)
		}
	}

	return
}
Exemplo n.º 4
0
func (sop *SCAMPOutputPlugin) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	var pack *pipeline.PipelinePack

	// We have no default encoder
	if or.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	for pack = range or.InChan() {
		scamp.Info.Printf("received pipeline pack")
		encoded, err := or.Encode(pack) // pack.Message.GetPayload()

		if err == nil {
			scamp.Info.Printf("payload: %s", encoded)
			msg := scamp.NewMessage()
			msg.SetEnvelope(scamp.ENVELOPE_JSON)
			msg.SetAction(sop.conf.Action)
			msg.SetVersion(1)
			msg.Write(encoded)
		}

		pack.Recycle(err)
	}
	fmt.Println("sup from end of for loop in Run")
	return
}
Exemplo n.º 5
0
func (output *IrcOutput) Run(runner pipeline.OutputRunner,
	helper pipeline.PluginHelper) error {
	if runner.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	output.runner = runner

	// Register callbacks to handle events
	registerCallbacks(output)

	var err error

	// Connect to the Irc Server
	err = output.Conn.Connect(output.Server)
	if err != nil {
		return fmt.Errorf("Unable to connect to irc server %s: %s",
			output.Server, err)
	}

	// Start a goroutine for recieving messages, and throttling before sending
	// to the Irc Server
	output.wg.Add(1)
	go processOutQueue(output)

	var outgoing []byte
	ok := true
	inChan := runner.InChan()
	var pack *pipeline.PipelinePack
	for ok {
		select {
		case pack, ok = <-inChan:
		case <-output.die:
			ok = false
		}
		if !ok {
			break
		}
		outgoing, err = runner.Encode(pack)
		if err != nil {
			output.runner.LogError(err)
		} else if outgoing != nil {
			// Send the message to each irc channel. If the out queue is full,
			// then we need to drop the message and log an error.
			for i, ircChannel := range output.Channels {
				ircMsg := IrcMsg{outgoing, ircChannel, i}
				select {
				case output.OutQueue <- ircMsg:
				default:
					output.runner.LogError(ErrOutQueueFull)
				}
			}
		}
		pack.Recycle()
	}
	output.cleanup()
	return nil
}
Exemplo n.º 6
0
func (k *KafkaOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	defer func() {
		k.producer.Close()
		k.client.Close()
	}()

	if or.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	inChan := or.InChan()
	errChan := k.producer.Errors()
	var wg sync.WaitGroup
	wg.Add(1)
	go k.processKafkaErrors(or, errChan, &wg)

	var (
		pack  *pipeline.PipelinePack
		topic = k.config.Topic
		key   sarama.Encoder
	)

	for pack = range inChan {
		atomic.AddInt64(&k.processMessageCount, 1)

		if k.topicVariable != nil {
			topic = getMessageVariable(pack.Message, k.topicVariable)
		}
		if k.hashVariable != nil {
			key = sarama.StringEncoder(getMessageVariable(pack.Message, k.hashVariable))
		}

		if msgBytes, err := or.Encode(pack); err == nil {
			if msgBytes != nil {
				err = k.producer.QueueMessage(topic, key, sarama.ByteEncoder(msgBytes))
				if err != nil {
					atomic.AddInt64(&k.processMessageFailures, 1)
					or.LogError(err)
				}
			} else {
				atomic.AddInt64(&k.processMessageDiscards, 1)
			}
		} else {
			atomic.AddInt64(&k.processMessageFailures, 1)
			or.LogError(err)
		}
		pack.Recycle()
	}
	errChan <- Shutdown
	wg.Wait()
	return
}
func (k *KinesisOutput) HandlePackage(or pipeline.OutputRunner, pack *pipeline.PipelinePack) error {

	// If we are flushing, wait until we have finished.
	k.flushLock.Lock()
	defer k.flushLock.Unlock()

	// encode the packages.
	msg, err := or.Encode(pack)
	if err != nil {
		errOut := fmt.Errorf("Error encoding message: %s", err)
		or.LogError(errOut)
		pack.Recycle(nil)
		return errOut
	}

	// If we only care about the Payload...
	if k.config.PayloadOnly {
		msg = []byte(pack.Message.GetPayload())
	}

	var tmp []byte
	// if we already have data then we should append.
	if len(k.batchedData) > 0 {
		tmp = append(append(k.batchedData, []byte(",")...), msg...)
	} else {
		tmp = msg
	}

	// if we can't fit the data in this record
	if len(tmp) > k.KINESIS_RECORD_SIZE {
		// add the existing data to the output batch
		array := append(append([]byte("["), k.batchedData...), []byte("]")...)
		k.AddToRecordBatch(or, array)

		// update the batched data to only contain the current message.
		k.batchedData = msg
	} else {
		// otherwise we add the existing data to a batch
		k.batchedData = tmp
	}

	// do reporting and tidy up
	atomic.AddInt64(&k.processMessageCount, 1)
	pack.Recycle(nil)

	return nil
}
Exemplo n.º 8
0
func (o *UnixOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {

	if or.Encoder() == nil {
		return errors.New("Encoder required.")
	}
	var (
		outBytes []byte
		e        error
	)
	for pack := range or.InChan() {
		if outBytes, e = or.Encode(pack); e != nil {
			or.LogError(fmt.Errorf("Error encoding message: %s", e.Error()))
		} else if outBytes != nil {
			o.conn.Write(outBytes)
		}
		pack.Recycle()
	}
	return
}
Exemplo n.º 9
0
func (k *KinesisOutput) Run(or pipeline.OutputRunner, helper pipeline.PluginHelper) error {
	var (
		pack   *pipeline.PipelinePack
		msg    []byte
		pk     string
		err    error
		params *kin.PutRecordInput
	)

	if or.Encoder() == nil {
		return fmt.Errorf("Encoder required.")
	}

	for pack = range or.InChan() {
		msg, err = or.Encode(pack)
		if err != nil {
			or.LogError(fmt.Errorf("Error encoding message: %s", err))
			pack.Recycle(nil)
			continue
		}
		pk = fmt.Sprintf("%d-%s", pack.Message.Timestamp, pack.Message.Hostname)
		if k.config.PayloadOnly {
			msg = []byte(pack.Message.GetPayload())
		}
		params = &kin.PutRecordInput{
			Data:         msg,
			PartitionKey: aws.String(pk),
			StreamName:   aws.String(k.config.Stream),
		}
		_, err = k.Client.PutRecord(params)
		if err != nil {
			or.LogError(fmt.Errorf("Error pushing message to Kinesis: %s", err))
			pack.Recycle(nil)
			continue
		}
		pack.Recycle(nil)
	}

	return nil
}
Exemplo n.º 10
0
func (o *OpenTsdbOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	if or.Encoder() == nil {
		return errors.New("Encoder must be specified.")
	}

	var (
		e        error
		outBytes []byte
	)
	inChan := or.InChan()

	for i := 0; i < o.TsdbWriterCount; i++ {
		go WriteDataToOpenTSDB(o)
	}

	for pack := range inChan {
		outBytes, e = or.Encode(pack)
		pack.Recycle(e)
		if e != nil {
			or.LogError(e)
			continue
		}
		if outBytes == nil {
			continue
		}

		if e != nil {
			log.Printf("OpenTsdbOutput-%s", e.Error())
			continue
		}

		//fmt.Printf("OpenTsdbOutput-165-%v", logMsg)
		o.logMsgChan <- outBytes
		//fmt.Println("OpenTsdbOutput:", string(outBytes))
	}

	return
}
Exemplo n.º 11
0
func (k *KafkaOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	defer func() {
		k.producer.Close()
		k.client.Close()
	}()

	if or.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	inChan := or.InChan()
	errChan := k.producer.Errors()
	pInChan := k.producer.Input()
	shutdownChan := make(chan struct{})
	var wg sync.WaitGroup
	wg.Add(1)
	go k.processKafkaErrors(or, errChan, shutdownChan, &wg)

	var (
		pack  *pipeline.PipelinePack
		topic = k.config.Topic
		key   sarama.Encoder
	)

	for pack = range inChan {
		atomic.AddInt64(&k.processMessageCount, 1)

		if k.topicVariable != nil {
			topic = getMessageVariable(pack.Message, k.topicVariable)
		}
		if k.hashVariable != nil {
			key = sarama.StringEncoder(getMessageVariable(pack.Message, k.hashVariable))
		}

		msgBytes, err := or.Encode(pack)
		if err != nil {
			atomic.AddInt64(&k.processMessageFailures, 1)
			or.LogError(err)
			// Don't retry encoding errors.
			or.UpdateCursor(pack.QueueCursor)
			pack.Recycle(nil)
			continue
		}
		if msgBytes == nil {
			atomic.AddInt64(&k.processMessageDiscards, 1)
			or.UpdateCursor(pack.QueueCursor)
			pack.Recycle(nil)
			continue
		}
		pMessage := &sarama.ProducerMessage{
			Topic: topic,
			Key:   key,
			Value: sarama.ByteEncoder(msgBytes),
		}
		pInChan <- pMessage
		pack.Recycle(nil)
	}

	close(shutdownChan)
	wg.Wait()
	return
}