Example #1
0
// Run is the plugin's main loop
//iterates over received messages, checking against
//message hostname and delivering to the output if hostname is in our config.
func (o *EmailOutput) Run(runner pipeline.OutputRunner, helper pipeline.PluginHelper) (
	err error) {

	var (
		payload string
	)
	body := bytes.NewBuffer(nil)

	for pack := range runner.InChan() {
		payload = pack.Message.GetPayload()
		if len(payload) > 100 {
			payload = payload[:100]
		}
		body.WriteString(fmt.Sprintf("Subject: %s [%d] %s@%s: ",
			utils.TsTime(pack.Message.GetTimestamp()).Format(time.RFC3339),
			pack.Message.GetSeverity(), pack.Message.GetLogger(),
			pack.Message.GetHostname()))
		body.WriteString(payload)
		body.WriteString("\r\n\r\n")
		body.WriteString(pack.Message.GetPayload())
		pack.Recycle()
		err = o.sendMail(body.Bytes())
		body.Reset()
		if err != nil {
			return fmt.Errorf("error sending email: %s", err)
		}

	}
	return
}
Example #2
0
func (o *HttpOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	if or.Encoder() == nil {
		return errors.New("Encoder must be specified.")
	}

	var (
		e        error
		outBytes []byte
	)
	inChan := or.InChan()

	for pack := range inChan {
		outBytes, e = or.Encode(pack)
		pack.Recycle()
		if e != nil {
			or.LogError(e)
			continue
		}
		if outBytes == nil {
			continue
		}
		if e = o.request(or, outBytes); e != nil {
			or.LogError(e)
		}
	}

	return
}
Example #3
0
func (sop *SCAMPOutputPlugin) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	var pack *pipeline.PipelinePack

	// We have no default encoder
	if or.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	for pack = range or.InChan() {
		scamp.Info.Printf("received pipeline pack")
		encoded, err := or.Encode(pack) // pack.Message.GetPayload()

		if err == nil {
			scamp.Info.Printf("payload: %s", encoded)
			msg := scamp.NewMessage()
			msg.SetEnvelope(scamp.ENVELOPE_JSON)
			msg.SetAction(sop.conf.Action)
			msg.SetVersion(1)
			msg.Write(encoded)
		}

		pack.Recycle(err)
	}
	fmt.Println("sup from end of for loop in Run")
	return
}
Example #4
0
func (o *UdpOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {

	if or.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	var (
		outBytes []byte
		e        error
	)

	for pack := range or.InChan() {
		if outBytes, e = or.Encode(pack); e != nil {
			or.LogError(fmt.Errorf("Error encoding message: %s", e.Error()))
		} else if outBytes != nil {
			msgSize := len(outBytes)
			if msgSize > o.UdpOutputConfig.MaxMessageSize {
				or.LogError(fmt.Errorf("Message has exceeded allowed UDP data size: %d > %d", msgSize, o.UdpOutputConfig.MaxMessageSize))
			} else {
				o.conn.Write(outBytes)
			}
		}
		pack.Recycle()
	}
	return
}
func (f *FirehoseOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) error {
	for pack := range or.InChan() {
		payload := pack.Message.GetPayload()
		timestamp := time.Unix(0, pack.Message.GetTimestamp()).Format("2006-01-02 15:04:05.000")
		pack.Recycle(nil)

		// Verify input is valid json
		object := make(map[string]interface{})
		err := json.Unmarshal([]byte(payload), &object)
		if err != nil {
			or.LogError(err)
			continue
		}

		if f.timestampColumn != "" {
			// add Heka message's timestamp to column named in timestampColumn
			object[f.timestampColumn] = timestamp
		}

		record, err := json.Marshal(object)
		if err != nil {
			or.LogError(err)
			continue
		}

		// Send data to the firehose
		err = f.client.PutRecord(record)
		if err != nil {
			or.LogError(err)
			continue
		}
	}
	return nil
}
func (wso *WebSocketsOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) error {
	for pc := range or.InChan() {
		wso.broadcast <- pc.Pack.Message
		pc.Pack.Recycle()
	}
	return nil
}
Example #7
0
func (o *HttpOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	if or.Encoder() == nil {
		return errors.New("Encoder must be specified.")
	}

	var (
		e        error
		outBytes []byte
	)
	inChan := or.InChan()

	for pack := range inChan {
		outBytes, e = or.Encode(pack)
		if e != nil {
			or.UpdateCursor(pack.QueueCursor)
			pack.Recycle(fmt.Errorf("can't encode: %s", e))
			continue
		}
		if outBytes == nil {
			or.UpdateCursor(pack.QueueCursor)
			pack.Recycle(nil)
			continue
		}
		if e = o.request(or, outBytes); e != nil {
			e = pipeline.NewRetryMessageError(e.Error())
			pack.Recycle(e)
		} else {
			or.UpdateCursor(pack.QueueCursor)
			pack.Recycle(nil)
		}
	}

	return
}
Example #8
0
func (output *IrcOutput) Run(runner pipeline.OutputRunner,
	helper pipeline.PluginHelper) error {
	if runner.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	output.runner = runner

	// Register callbacks to handle events
	registerCallbacks(output)

	var err error

	// Connect to the Irc Server
	err = output.Conn.Connect(output.Server)
	if err != nil {
		return fmt.Errorf("Unable to connect to irc server %s: %s",
			output.Server, err)
	}

	// Start a goroutine for recieving messages, and throttling before sending
	// to the Irc Server
	output.wg.Add(1)
	go processOutQueue(output)

	var outgoing []byte
	ok := true
	inChan := runner.InChan()
	var pack *pipeline.PipelinePack
	for ok {
		select {
		case pack, ok = <-inChan:
		case <-output.die:
			ok = false
		}
		if !ok {
			break
		}
		outgoing, err = runner.Encode(pack)
		if err != nil {
			output.runner.LogError(err)
		} else if outgoing != nil {
			// Send the message to each irc channel. If the out queue is full,
			// then we need to drop the message and log an error.
			for i, ircChannel := range output.Channels {
				ircMsg := IrcMsg{outgoing, ircChannel, i}
				select {
				case output.OutQueue <- ircMsg:
				default:
					output.runner.LogError(ErrOutQueueFull)
				}
			}
		}
		pack.Recycle()
	}
	output.cleanup()
	return nil
}
func (clo *CloudLoggingOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	var (
		pack       *pipeline.PipelinePack
		e          error
		k          string
		m          *logging.LogEntry
		exist      bool
		ok         = true
		inChan     = or.InChan()
		groupBatch = make(map[string]*LogBatch)
		outBatch   *LogBatch
		ticker     = time.Tick(time.Duration(clo.conf.FlushInterval) * time.Millisecond)
	)
	clo.or = or
	go clo.committer()
	for ok {
		select {
		case pack, ok = <-inChan:
			// Closed inChan => we're shutting down, flush data.
			if !ok {
				clo.sendGroupBatch(groupBatch)
				close(clo.batchChan)
				<-clo.outputExit
				break
			}

			k, m, e = clo.Encode(pack)
			pack.Recycle()
			if e != nil {
				or.LogError(e)
				continue
			}

			if k != "" && m != nil {
				outBatch, exist = groupBatch[k]
				if !exist {
					outBatch = &LogBatch{count: 0, batch: make([]*logging.LogEntry, 0, 100), name: k}
					groupBatch[k] = outBatch
				}

				outBatch.batch = append(outBatch.batch, m)
				if outBatch.count++; clo.CheckFlush(int(outBatch.count), len(outBatch.batch)) {
					if len(outBatch.batch) > 0 {
						outBatch.batch = clo.sendBatch(k, outBatch.batch, outBatch.count)
						outBatch.count = 0
					}
				}
			}
		case <-ticker:
			clo.sendGroupBatch(groupBatch)
		case err = <-clo.outputExit:
			ok = false
		}
	}
	return
}
Example #10
0
func (ro *RedisMQOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) error {
	var outgoing string
	for pack := range or.InChan() {
		outgoing = fmt.Sprintf("%s", pack.Message.GetPayload())
		ro.rdqueue.Put(outgoing)
		pack.Recycle()
	}

	return nil
}
func (cmo *CloudMonitoringOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	var (
		pack     *pipeline.PipelinePack
		e        error
		m        *cloudmonitoring.TimeseriesPoint
		ok       = true
		count    int64
		inChan   = or.InChan()
		outBatch = make([]*cloudmonitoring.TimeseriesPoint, 0, 200)
		ticker   = time.Tick(time.Duration(cmo.conf.FlushInterval) * time.Millisecond)
	)
	cmo.or = or
	go cmo.committer()
	for ok {
		select {
		case pack, ok = <-inChan:
			// Closed inChan => we're shutting down, flush data.
			if !ok {
				if len(outBatch) > 0 {
					cmo.sendBatch(outBatch, count)
				}
				close(cmo.batchChan)
				<-cmo.outputExit
				break
			}

			m, e = cmo.Encode(pack)
			pack.Recycle()
			if e != nil {
				or.LogError(e)
				continue
			}

			if m != nil {
				outBatch = append(outBatch, m)

				if count++; cmo.CheckFlush(int(count), len(outBatch)) {
					if len(outBatch) > 0 {
						outBatch = cmo.sendBatch(outBatch, count)
						count = 0
					}
				}
			}
		case <-ticker:
			if len(outBatch) > 0 {
				outBatch = cmo.sendBatch(outBatch, count)
			}
			count = 0
		case err = <-cmo.outputExit:
			ok = false
		}
	}
	return
}
Example #12
0
func (cef *CefOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {

	var (
		facility, priority syslog.Priority
		ident              string
		ok                 bool
		p                  syslog.Priority
		e                  error
		pack               *pipeline.PipelinePack
	)
	syslogMsg := new(SyslogMsg)
	for pack = range or.InChan() {

		// default values
		facility, priority = syslog.LOG_LOCAL4, syslog.LOG_INFO
		ident = "heka_no_ident"

		priField := pack.Message.FindFirstField("cef_meta.syslog_priority")
		if priField != nil {
			priStr := priField.ValueString[0]
			if p, ok = SYSLOG_PRIORITY[priStr]; ok {
				priority = p
			}
		}

		facField := pack.Message.FindFirstField("cef_meta.syslog_facility")
		if facField != nil {
			facStr := facField.ValueString[0]
			if p, ok = SYSLOG_FACILITY[facStr]; ok {
				facility = p
			}
		}

		idField := pack.Message.FindFirstField("cef_meta.syslog_ident")
		if idField != nil {
			ident = idField.ValueString[0]
		}

		syslogMsg.priority = priority | facility
		syslogMsg.prefix = ident
		syslogMsg.payload = pack.Message.GetPayload()
		pack.Recycle()

		_, e = cef.syslogWriter.WriteString(syslogMsg.priority, syslogMsg.prefix,
			syslogMsg.payload)

		if e != nil {
			or.LogError(e)
		}
	}

	cef.syslogWriter.Close()
	return
}
Example #13
0
func (o *UdpOutput) Run(runner pipeline.OutputRunner, helper pipeline.PluginHelper) (
	err error) {

	var outgoing string
	for pack := range runner.InChan() {
		outgoing = fmt.Sprintf("%s\n", pack.Message.GetPayload())
		o.conn.Write([]byte(outgoing))
		pack.Recycle()
	}
	return
}
func (cwo *CloudwatchOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	inChan := or.InChan()

	payloads := make(chan CloudwatchDatapoints, cwo.backlog)
	go cwo.Submitter(payloads, or)

	var (
		pack          *pipeline.PipelinePack
		msg           *message.Message
		rawDataPoints *CloudwatchDatapointPayload
		dataPoints    *CloudwatchDatapoints
	)
	dataPoints = new(CloudwatchDatapoints)
	dataPoints.Datapoints = make([]cloudwatch.MetricDatum, 0, 0)

	for pack = range inChan {
		rawDataPoints = new(CloudwatchDatapointPayload)
		msg = pack.Message
		err = json.Unmarshal([]byte(msg.GetPayload()), rawDataPoints)
		if err != nil {
			or.LogMessage(fmt.Sprintf("warning, unable to parse payload: %s", err))
			err = nil
			continue
		}
		// Run through the list and convert them to CloudwatchDatapoints
		for _, rawDatum := range rawDataPoints.Datapoints {
			datum := cloudwatch.MetricDatum{
				Dimensions:      rawDatum.Dimensions,
				MetricName:      rawDatum.MetricName,
				Unit:            rawDatum.Unit,
				Value:           rawDatum.Value,
				StatisticValues: rawDatum.StatisticValues,
			}
			if rawDatum.Timestamp != "" {
				parsedTime, err := message.ForgivingTimeParse("", rawDatum.Timestamp, cwo.tzLocation)
				if err != nil {
					or.LogMessage(fmt.Sprintf("unable to parse timestamp for datum: %s", rawDatum))
					continue
				}
				datum.Timestamp = parsedTime
			}
			dataPoints.Datapoints = append(dataPoints.Datapoints, datum)
		}
		payloads <- *dataPoints
		dataPoints.Datapoints = dataPoints.Datapoints[:0]
		rawDataPoints.Datapoints = rawDataPoints.Datapoints[:0]
		pack.Recycle()
	}
	or.LogMessage("shutting down AWS Cloudwatch submitter")
	cwo.stopChan <- true
	<-cwo.stopChan
	return
}
Example #15
0
func (k *KafkaOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	defer func() {
		k.producer.Close()
		k.client.Close()
	}()

	if or.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	inChan := or.InChan()
	errChan := k.producer.Errors()
	var wg sync.WaitGroup
	wg.Add(1)
	go k.processKafkaErrors(or, errChan, &wg)

	var (
		pack  *pipeline.PipelinePack
		topic = k.config.Topic
		key   sarama.Encoder
	)

	for pack = range inChan {
		atomic.AddInt64(&k.processMessageCount, 1)

		if k.topicVariable != nil {
			topic = getMessageVariable(pack.Message, k.topicVariable)
		}
		if k.hashVariable != nil {
			key = sarama.StringEncoder(getMessageVariable(pack.Message, k.hashVariable))
		}

		if msgBytes, err := or.Encode(pack); err == nil {
			if msgBytes != nil {
				err = k.producer.QueueMessage(topic, key, sarama.ByteEncoder(msgBytes))
				if err != nil {
					atomic.AddInt64(&k.processMessageFailures, 1)
					or.LogError(err)
				}
			} else {
				atomic.AddInt64(&k.processMessageDiscards, 1)
			}
		} else {
			atomic.AddInt64(&k.processMessageFailures, 1)
			or.LogError(err)
		}
		pack.Recycle()
	}
	errChan <- Shutdown
	wg.Wait()
	return
}
Example #16
0
func (rlo *RedisListOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) error {
	inChan := or.InChan()
	for pack := range inChan {
		payload := pack.Message.GetPayload()
		_, err := rlo.conn.Do("LPUSH", rlo.conf.ListName, payload)
		if err != nil {
			or.LogError(fmt.Errorf("Redis LPUSH error: %s", err))
			continue
		}
		pack.Recycle(nil)
	}
	return nil
}
Example #17
0
func (rop *RedisOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) error {
	inChan := or.InChan()
	for pack := range inChan {
		payload := pack.Message.GetPayload()
		_, err := rop.conn.Do("LPUSH", rop.conf.Key, payload)
		if err != nil {
			or.LogError(err)
			continue
		}
		pack.Recycle()
	}
	return nil
}
Example #18
0
func (so *SentryOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	var (
		udpAddrStr string
		udpAddr    *net.UDPAddr
		socket     net.Conn
		e          error
		ok         bool
		pack       *pipeline.PipelinePack
	)

	sentryMsg := &SentryMsg{
		dataPacket: make([]byte, 0, so.config.MaxSentryBytes),
	}

	for pack = range or.InChan() {
		e = so.prepSentryMsg(pack, sentryMsg)
		pack.Recycle()
		if e != nil {
			or.LogError(e)
			continue
		}

		udpAddrStr = sentryMsg.parsedDsn.Host
		if socket, ok = so.udpMap[udpAddrStr]; !ok {
			if len(so.udpMap) > so.config.MaxUdpSockets {
				or.LogError(fmt.Errorf("Max # of UDP sockets [%d] reached.",
					so.config.MaxUdpSockets))
				continue
			}

			if udpAddr, e = net.ResolveUDPAddr("udp", udpAddrStr); e != nil {
				or.LogError(fmt.Errorf("can't resolve UDP address %s: %s",
					udpAddrStr, e))
				continue
			}

			if socket, e = net.DialUDP("udp", nil, udpAddr); e != nil {
				or.LogError(fmt.Errorf("can't dial UDP socket: %s", e))
				continue
			}
			so.udpMap[sentryMsg.parsedDsn.Host] = socket
		}
		socket.Write(sentryMsg.dataPacket)
	}
	return
}
Example #19
0
func (zo *ZeroMQOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) error {
	defer func() {
		zo.socket.Close()
		zo.context.Close()
	}()

	var b []byte
	var p [][]byte
	for pc := range or.InChan() {
		b = pc.Pack.MsgBytes
		p = [][]byte{nil, b}
		zo.socket.SendMultipart(p, 0)
		pc.Pack.Recycle()
	}

	return nil
}
func (k *KinesisOutput) Run(or pipeline.OutputRunner, helper pipeline.PluginHelper) error {
	var pack *pipeline.PipelinePack

	if or.Encoder() == nil {
		return fmt.Errorf("Encoder required.")
	}

	// Handle the ticks from the Ticker asyncronously
	go k.HandleTick(or.Ticker())

	// handle packages
	for pack = range or.InChan() {
		k.HandlePackage(or, pack)
	}

	return nil
}
Example #21
0
func (o *UnixOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {

	if or.Encoder() == nil {
		return errors.New("Encoder required.")
	}
	var (
		outBytes []byte
		e        error
	)
	for pack := range or.InChan() {
		if outBytes, e = or.Encode(pack); e != nil {
			or.LogError(fmt.Errorf("Error encoding message: %s", e.Error()))
		} else if outBytes != nil {
			o.conn.Write(outBytes)
		}
		pack.Recycle()
	}
	return
}
func (ko *KeenOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) error {
	for pack := range or.InChan() {
		payload := pack.Message.GetPayload()
		pack.Recycle()

		event := make(map[string]interface{})
		err := json.Unmarshal([]byte(payload), &event)
		if err != nil {
			or.LogError(err)
			continue
		}
		err = ko.client.AddEvent(ko.collection, event)
		if err != nil {
			or.LogError(err)
			continue
		}
	}
	return nil
}
Example #23
0
func (k *KinesisOutput) Run(or pipeline.OutputRunner, helper pipeline.PluginHelper) error {
	var (
		pack   *pipeline.PipelinePack
		msg    []byte
		pk     string
		err    error
		params *kin.PutRecordInput
	)

	if or.Encoder() == nil {
		return fmt.Errorf("Encoder required.")
	}

	for pack = range or.InChan() {
		msg, err = or.Encode(pack)
		if err != nil {
			or.LogError(fmt.Errorf("Error encoding message: %s", err))
			pack.Recycle(nil)
			continue
		}
		pk = fmt.Sprintf("%d-%s", pack.Message.Timestamp, pack.Message.Hostname)
		if k.config.PayloadOnly {
			msg = []byte(pack.Message.GetPayload())
		}
		params = &kin.PutRecordInput{
			Data:         msg,
			PartitionKey: aws.String(pk),
			StreamName:   aws.String(k.config.Stream),
		}
		_, err = k.Client.PutRecord(params)
		if err != nil {
			or.LogError(fmt.Errorf("Error pushing message to Kinesis: %s", err))
			pack.Recycle(nil)
			continue
		}
		pack.Recycle(nil)
	}

	return nil
}
Example #24
0
// Run is the plugin's main loop
//iterates over received messages, checking against
//message hostname and delivering to the output if hostname is in our config.
func (o *MantisOutput) Run(runner pipeline.OutputRunner, helper pipeline.PluginHelper) (
	err error) {

	var (
		short, long string
		//issue       int
	)

	for pack := range runner.InChan() {
		long = pack.Message.GetPayload()
		short = fmt.Sprintf("%s [%d] %s@%s: %s",
			utils.TsTime(pack.Message.GetTimestamp()).Format(time.RFC3339),
			pack.Message.GetSeverity(), pack.Message.GetLogger(),
			pack.Message.GetHostname(), long)
		pack.Recycle()
		if _, err = o.sender.Send(short, long); err != nil {
			return fmt.Errorf("error sending to %s: %s", o.sender.URL, err)
		}

	}
	return
}
Example #25
0
// Fetch correct output and iterate over received messages, checking against
// message hostname and delivering to the output if hostname is in our config.
func (f *HostFilter) Run(runner pipeline.FilterRunner, helper pipeline.PluginHelper) (
	err error) {

	var (
		hostname string
		output   pipeline.OutputRunner
		ok       bool
	)

	if output, ok = helper.Output(f.output); !ok {
		return fmt.Errorf("No output: %s", output)
	}
	for pack := range runner.InChan() {
		hostname = pack.Message.GetHostname()
		if f.hosts[hostname] {
			output.InChan() <- pack
		} else {
			pack.Recycle()
		}
	}
	return
}
Example #26
0
func (o *OpenTsdbOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	if or.Encoder() == nil {
		return errors.New("Encoder must be specified.")
	}

	var (
		e        error
		outBytes []byte
	)
	inChan := or.InChan()

	for i := 0; i < o.TsdbWriterCount; i++ {
		go WriteDataToOpenTSDB(o)
	}

	for pack := range inChan {
		outBytes, e = or.Encode(pack)
		pack.Recycle(e)
		if e != nil {
			or.LogError(e)
			continue
		}
		if outBytes == nil {
			continue
		}

		if e != nil {
			log.Printf("OpenTsdbOutput-%s", e.Error())
			continue
		}

		//fmt.Printf("OpenTsdbOutput-165-%v", logMsg)
		o.logMsgChan <- outBytes
		//fmt.Println("OpenTsdbOutput:", string(outBytes))
	}

	return
}
Example #27
0
func (no *NsqOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	var (
		encoder client.Encoder
		msg     *message.Message
		msgBody []byte = make([]byte, 0, 1024)
		pack    *pipeline.PipelinePack
	)

	conf := no.conf
	encoder = client.NewProtobufEncoder(nil)

	for pack = range or.InChan() {
		if conf.Serialize {
			msg = pack.Message
			if err = encoder.EncodeMessageStream(msg, &msgBody); err != nil {
				or.LogError(err)
				err = nil
				pack.Recycle()
				continue
			}
			//err := no.nsqwriter.PublishAsync(conf.Topic, []byte(pack.Message.GetPayload()), nil)
			//err = no.nsqwriter.PublishAsync(conf.Topic, msgBody, nil)
			_, _, err = no.nsqwriter.Publish(conf.Topic, msgBody)
			if err != nil {
				or.LogError(fmt.Errorf("error in writer.PublishAsync"))
			}
			msgBody = msgBody[:0]
		} else {
			err = no.nsqwriter.PublishAsync(conf.Topic, []byte(pack.Message.GetPayload()), nil)
			if err != nil {
				or.LogError(fmt.Errorf("error in writer.PublishAsync"))
			}
		}
		pack.Recycle()
	}

	return
}
Example #28
0
// Run is the plugin's main loop
//iterates over received messages, checking against
//message hostname and delivering to the output if hostname is in our config.
func (o *TwilioOutput) Run(runner pipeline.OutputRunner, helper pipeline.PluginHelper) (
	err error) {

	var (
		to, sms string
		exc     *gotwilio.Exception
	)

	for pack := range runner.InChan() {
		sms = fmt.Sprintf("%s [%d] %s@%s: %s",
			utils.TsTime(pack.Message.GetTimestamp()).Format(time.RFC3339),
			pack.Message.GetSeverity(), pack.Message.GetLogger(),
			pack.Message.GetHostname(), pack.Message.GetPayload())
		pack.Recycle()
		for _, to = range o.To {
			_, exc, err = o.client.SendSMS(o.From, to, sms, "", "")
			if err == nil && exc != nil {
				return fmt.Errorf("%s: %d\n%s", exc.Message, exc.Code, exc.MoreInfo)
			}
		}

	}
	return
}
Example #29
0
func (s *SandboxOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	var (
		pack      *pipeline.PipelinePack
		retval    int
		inChan    = or.InChan()
		duration  int64
		startTime time.Time
		ok        = true
		ticker    = or.Ticker()
	)

	for ok {
		select {
		case pack, ok = <-inChan:
			if !ok {
				break
			}
			if s.sample {
				startTime = time.Now()
			}
			retval = s.sb.ProcessMessage(pack)
			if s.sample {
				duration = time.Since(startTime).Nanoseconds()
				s.reportLock.Lock()
				s.processMessageDuration += duration
				s.processMessageSamples++
				s.reportLock.Unlock()
			}
			s.sample = 0 == rand.Intn(s.sampleDenominator)
			pack.Recycle()

			if retval == 0 {
				atomic.AddInt64(&s.processMessageCount, 1)
			} else if retval < 0 {
				atomic.AddInt64(&s.processMessageFailures, 1)
				em := s.sb.LastError()
				if len(em) > 0 {
					or.LogError(errors.New(em))
				}
			} else {
				err = fmt.Errorf("FATAL: %s", s.sb.LastError())
				ok = false
			}

		case t := <-ticker:
			startTime = time.Now()
			if retval = s.sb.TimerEvent(t.UnixNano()); retval != 0 {
				err = fmt.Errorf("FATAL: %s", s.sb.LastError())
				ok = false
			}
			duration = time.Since(startTime).Nanoseconds()
			s.reportLock.Lock()
			s.timerEventDuration += duration
			s.timerEventSamples++
			s.reportLock.Unlock()
		}
	}

	s.reportLock.Lock()
	var destroyErr error
	if s.sbc.PreserveData {
		destroyErr = s.sb.Destroy(s.preservationFile)
	} else {
		destroyErr = s.sb.Destroy("")
	}
	if destroyErr != nil {
		err = destroyErr
	}

	s.sb = nil
	s.reportLock.Unlock()
	return
}
Example #30
0
func (output *NsqOutput) Run(runner pipeline.OutputRunner,
	helper pipeline.PluginHelper) (err error) {
	if runner.Encoder() == nil {
		return errors.New("Encoder required.")
	}

	var (
		pack     *pipeline.PipelinePack
		outgoing []byte
		msg      RetryMsg
	)

	output.runner = runner
	inChan := runner.InChan()
	ok := true

	defer output.cleanup()

	for ok {
		select {
		case pack, ok = <-inChan:
			if !ok {
				return nil
			}
			outgoing, err = output.runner.Encode(pack)
			if err != nil {
				runner.LogError(err)
			} else {
				err = output.sendMessage(outgoing)
				if err != nil {
					output.runner.LogError(err)
					err = output.retryHelper.Wait()
					if err != nil {
						return
					}
					// Create a retry msg, and requeue it
					msg := RetryMsg{Body: outgoing, retryChan: output.retryChan, maxCount: output.MaxMsgRetries}
					err = msg.Retry()
					if err != nil {
						output.runner.LogError(err)
					}
				} else {
					output.retryHelper.Reset()
				}
			}
			pack.Recycle()
		case msg, ok = <-output.retryChan:
			if !ok {
				return nil
			}
			err = output.sendMessage(msg.Body)
			if err != nil {
				output.runner.LogError(err)
				err = output.retryHelper.Wait()
				if err != nil {
					return
				}
				// requeue the message
				err = msg.Retry()
				if err != nil {
					output.runner.LogError(err)
				}
			} else {
				output.retryHelper.Reset()
			}
		}
	}

	return nil
}