// convertMessageToValue reads a Heka Message and returns a slice of field values
func (po *PostgresOutput) convertMessageToValues(m *message.Message, insertFields []string) (fieldValues []interface{}, err error) {
	fieldValues = []interface{}{}
	missingFields := []string{}
	for _, field := range insertFields {
		// Special case: get "Timestamp" from Heka message
		if field == "Timestamp" {
			// Convert Heka time (Unix timestamp in nanoseconds) to Golang time
			v := time.Unix(0, m.GetTimestamp())
			fieldValues = append(fieldValues, v)
		} else {
			v, ok := m.GetFieldValue(field)
			if !ok {
				// If configured to do so, write NULL when a FieldValue isn't found in the Heka message
				if po.allowMissingMessageFields {
					v = nil
				} else {
					missingFields = append(missingFields, field)
					continue
				}
			}
			fieldValues = append(fieldValues, v)
		}
	}

	if len(missingFields) > 0 {
		return []interface{}{}, fmt.Errorf("message is missing expected fields: %s", strings.Join(missingFields, ", "))
	}

	return fieldValues, nil
}
Пример #2
0
// Convenience function for creating a new int64 field on a message object.
func newInt64Field(msg *message.Message, name string, val int64, representation string) {
	if f, err := message.NewField(name, val, representation); err == nil {
		msg.AddField(f)
	} else {
		fmt.Println("Report error adding int64 field: ", err)
	}
}
Пример #3
0
// Given a PluginRunner and a Message struct, this function will populate the
// Message struct's field values with the plugin's input channel length and
// capacity, plus any additional data that the plugin might provide through
// implementation of the `ReportingPlugin` interface defined above.
func PopulateReportMsg(pr PluginRunner, msg *message.Message) (err error) {
	defer func() {
		if r := recover(); r != nil {
			err = fmt.Errorf("'%s' `populateReportMsg` panic: %s", pr.Name(), r)
		}
	}()

	if reporter, ok := pr.Plugin().(ReportingPlugin); ok {
		if err = reporter.ReportMsg(msg); err != nil {
			return
		}
	}

	if fRunner, ok := pr.(FilterRunner); ok {
		newIntField(msg, "InChanCapacity", cap(fRunner.InChan()), "count")
		newIntField(msg, "InChanLength", len(fRunner.InChan()), "count")
		newIntField(msg, "MatchChanCapacity", cap(fRunner.MatchRunner().inChan), "count")
		newIntField(msg, "MatchChanLength", len(fRunner.MatchRunner().inChan), "count")
		var tmp int64 = 0
		fRunner.MatchRunner().reportLock.Lock()
		if fRunner.MatchRunner().matchSamples > 0 {
			tmp = fRunner.MatchRunner().matchDuration / fRunner.MatchRunner().matchSamples
		}
		fRunner.MatchRunner().reportLock.Unlock()
		newInt64Field(msg, "MatchAvgDuration", tmp, "ns")
	} else if dRunner, ok := pr.(DecoderRunner); ok {
		newIntField(msg, "InChanCapacity", cap(dRunner.InChan()), "count")
		newIntField(msg, "InChanLength", len(dRunner.InChan()), "count")
	}
	msg.SetType("heka.plugin-report")
	return
}
Пример #4
0
// Convenience function for creating and setting a string field called "name"
// on a message object.
func newStringField(msg *message.Message, name string, val string) {
	if f, err := message.NewField(name, val, ""); err == nil {
		msg.AddField(f)
	} else {
		fmt.Println("Report error adding string field: ", err)
	}
}
Пример #5
0
// Given a PluginRunner and a Message struct, this function will populate the
// Message struct's field values with the plugin's input channel length and
// capacity, plus any additional data that the plugin might provide through
// implementation of the `ReportingPlugin` interface defined above.
func PopulateReportMsg(pr PluginRunner, msg *message.Message) (err error) {
	if reporter, ok := pr.Plugin().(ReportingPlugin); ok {
		if err = reporter.ReportMsg(msg); err != nil {
			return
		}
	}

	if fRunner, ok := pr.(FilterRunner); ok {
		message.NewIntField(msg, "InChanCapacity", cap(fRunner.InChan()), "count")
		message.NewIntField(msg, "InChanLength", len(fRunner.InChan()), "count")
		message.NewIntField(msg, "MatchChanCapacity", cap(fRunner.MatchRunner().inChan), "count")
		message.NewIntField(msg, "MatchChanLength", len(fRunner.MatchRunner().inChan), "count")
		message.NewIntField(msg, "LeakCount", fRunner.LeakCount(), "count")
		var tmp int64 = 0
		fRunner.MatchRunner().reportLock.Lock()
		if fRunner.MatchRunner().matchSamples > 0 {
			tmp = fRunner.MatchRunner().matchDuration / fRunner.MatchRunner().matchSamples
		}
		fRunner.MatchRunner().reportLock.Unlock()
		message.NewInt64Field(msg, "MatchAvgDuration", tmp, "ns")
	} else if dRunner, ok := pr.(DecoderRunner); ok {
		message.NewIntField(msg, "InChanCapacity", cap(dRunner.InChan()), "count")
		message.NewIntField(msg, "InChanLength", len(dRunner.InChan()), "count")
	}
	msg.SetType("heka.plugin-report")
	return
}
Пример #6
0
// Given a PluginRunner and a Message struct, this function will populate the
// Message struct's field values with the plugin's input channel length and
// capacity, plus any additional data that the plugin might provide through
// implementation of the `ReportingPlugin` interface defined above.
func PopulateReportMsg(pr PluginRunner, msg *message.Message) (err error) {
	defer func() {
		if r := recover(); r != nil {
			err = fmt.Errorf("'%s' `populateReportMsg` panic: %s", pr.Name(), r)
		}
	}()

	if reporter, ok := pr.Plugin().(ReportingPlugin); ok {
		if err = reporter.ReportMsg(msg); err != nil {
			return
		}
	}

	if fRunner, ok := pr.(FilterRunner); ok {
		newIntField(msg, "InChanCapacity", cap(fRunner.InChan()))
		newIntField(msg, "InChanLength", len(fRunner.InChan()))
		newIntField(msg, "MatchChanCapacity", cap(fRunner.MatchRunner().inChan))
		newIntField(msg, "MatchChanLength", len(fRunner.MatchRunner().inChan))
	} else if dRunner, ok := pr.(DecoderRunner); ok {
		newIntField(msg, "InChanCapacity", cap(dRunner.InChan()))
		newIntField(msg, "InChanLength", len(dRunner.InChan()))
	}
	msg.SetType("heka.plugin-report")
	return
}
func getField(msg *message.Message, name string) interface{} {
	switch name {
	case "Uuid":
		if msg.Uuid == nil {
			return nil
		}
		return msg.GetUuidString()
	case "Timestamp":
		return msg.Timestamp
	case "Type":
		return msg.Type
	case "Logger":
		return msg.Logger
	case "Severity":
		return msg.Severity
	case "Payload":
		return msg.Payload
	case "EnvVersion":
		return msg.EnvVersion
	case "Pid":
		return msg.Pid
	case "Hostname":
		return msg.Hostname
	case "_hekaTimestampMicro":
		if msg.Timestamp != nil {
			return *msg.Timestamp / 1000 // nano -> micro
		}
		return nil
	default:
		val, _ := msg.GetFieldValue(name)
		return val
	}
}
Пример #8
0
func (so *S3Output) WriteToBuffer(buffer *bytes.Buffer, msg *message.Message, or OutputRunner) (err error) {
	_, err = buffer.Write([]byte(msg.GetPayload()))
	if err != nil {
		return
	}
	if buffer.Len() > so.config.BufferChunkLimit {
		err = so.SaveToDisk(buffer, or)
	}
	return
}
Пример #9
0
func (cwo *CloudwatchOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	inChan := or.InChan()

	payloads := make(chan CloudwatchDatapoints, cwo.backlog)
	go cwo.Submitter(payloads, or)

	var (
		pack          *pipeline.PipelinePack
		msg           *message.Message
		rawDataPoints *CloudwatchDatapointPayload
		dataPoints    *CloudwatchDatapoints
	)
	dataPoints = new(CloudwatchDatapoints)
	dataPoints.Datapoints = make([]cloudwatch.MetricDatum, 0, 0)

	for pack = range inChan {
		rawDataPoints = new(CloudwatchDatapointPayload)
		msg = pack.Message
		err = json.Unmarshal([]byte(msg.GetPayload()), rawDataPoints)
		if err != nil {
			or.LogMessage(fmt.Sprintf("warning, unable to parse payload: %s", err))
			err = nil
			continue
		}
		// Run through the list and convert them to CloudwatchDatapoints
		for _, rawDatum := range rawDataPoints.Datapoints {
			datum := cloudwatch.MetricDatum{
				Dimensions:      rawDatum.Dimensions,
				MetricName:      rawDatum.MetricName,
				Unit:            rawDatum.Unit,
				Value:           rawDatum.Value,
				StatisticValues: rawDatum.StatisticValues,
			}
			if rawDatum.Timestamp != "" {
				parsedTime, err := message.ForgivingTimeParse("", rawDatum.Timestamp, cwo.tzLocation)
				if err != nil {
					or.LogMessage(fmt.Sprintf("unable to parse timestamp for datum: %s", rawDatum))
					continue
				}
				datum.Timestamp = parsedTime
			}
			dataPoints.Datapoints = append(dataPoints.Datapoints, datum)
		}
		payloads <- *dataPoints
		dataPoints.Datapoints = dataPoints.Datapoints[:0]
		rawDataPoints.Datapoints = rawDataPoints.Datapoints[:0]
		pack.Recycle()
	}
	or.LogMessage("shutting down AWS Cloudwatch submitter")
	cwo.stopChan <- true
	<-cwo.stopChan
	return
}
Пример #10
0
// Parses a Heka message and extracts the information necessary to start a new
// SandboxFilter
func (this *SandboxManagerFilter) loadSandbox(fr pipeline.FilterRunner,
	h pipeline.PluginHelper, dir string, msg *message.Message) (err error) {
	fv, _ := msg.GetFieldValue("config")
	if config, ok := fv.(string); ok {
		var configFile pipeline.ConfigFile
		if _, err = toml.Decode(config, &configFile); err != nil {
			return fmt.Errorf("loadSandbox failed: %s\n", err)
		} else {
			for name, conf := range configFile {
				name = getSandboxName(fr.Name(), name)
				if _, ok := h.Filter(name); ok {
					// todo support reload
					return fmt.Errorf("loadSandbox failed: %s is already running", name)
				}
				fr.LogMessage(fmt.Sprintf("Loading: %s", name))
				confFile := filepath.Join(dir, fmt.Sprintf("%s.toml", name))
				err = ioutil.WriteFile(confFile, []byte(config), 0600)
				if err != nil {
					return
				}
				var sbc SandboxConfig
				if err = toml.PrimitiveDecode(conf, &sbc); err != nil {
					return fmt.Errorf("loadSandbox failed: %s\n", err)
				}
				scriptFile := filepath.Join(dir, fmt.Sprintf("%s.%s", name, sbc.ScriptType))
				err = ioutil.WriteFile(scriptFile, []byte(msg.GetPayload()), 0600)
				if err != nil {
					removeAll(dir, fmt.Sprintf("%s.*", name))
					return
				}
				// check/clear the old state preservation file
				// this avoids issues with changes to the data model since the last load
				// and prevents holes in the graph from looking like anomalies
				os.Remove(filepath.Join(pipeline.PrependBaseDir(DATA_DIR), name+DATA_EXT))
				var runner pipeline.FilterRunner
				runner, err = this.createRunner(dir, name, conf)
				if err != nil {
					removeAll(dir, fmt.Sprintf("%s.*", name))
					return
				}
				err = h.PipelineConfig().AddFilterRunner(runner)
				if err == nil {
					this.currentFilters++
				}
				break // only interested in the first item
			}
		}
	}
	return
}
Пример #11
0
func (s *SmtpOutput) Run(or OutputRunner, h PluginHelper) (err error) {
	inChan := or.InChan()

	var (
		pack       *PipelinePack
		msg        *message.Message
		contents   []byte
		subject    string
		message    string
		headerText string
	)

	if s.conf.Subject == "" {
		subject = "Heka [" + or.Name() + "]"
	} else {
		subject = s.conf.Subject
	}

	header := make(map[string]string)
	header["From"] = s.conf.SendFrom
	header["Subject"] = subject
	header["MIME-Version"] = "1.0"
	header["Content-Type"] = "text/plain; charset=\"utf-8\""
	header["Content-Transfer-Encoding"] = "base64"

	for k, v := range header {
		headerText += fmt.Sprintf("%s: %s\r\n", k, v)
	}

	for pack = range inChan {
		msg = pack.Message
		message = headerText
		if s.conf.PayloadOnly {
			message += "\r\n" + base64.StdEncoding.EncodeToString([]byte(msg.GetPayload()))
			err = s.sendFunction(s.conf.Host, s.auth, s.conf.SendFrom, s.conf.SendTo, []byte(message))
		} else {
			if contents, err = json.Marshal(msg); err == nil {
				message += "\r\n" + base64.StdEncoding.EncodeToString(contents)
				err = s.sendFunction(s.conf.Host, s.auth, s.conf.SendFrom, s.conf.SendTo, []byte(message))
			} else {
				or.LogError(err)
			}
		}
		if err != nil {
			or.LogError(err)
		}
		pack.Recycle()
	}
	return
}
Пример #12
0
// Parses a Heka message and extracts the information necessary to start a new
// SandboxFilter
func (this *SandboxManagerFilter) loadSandbox(fr pipeline.FilterRunner,
	h pipeline.PluginHelper, dir string, msg *message.Message) (err error) {

	fv, _ := msg.GetFieldValue("config")
	if config, ok := fv.(string); ok {
		var configFile pipeline.ConfigFile
		if _, err = toml.Decode(config, &configFile); err != nil {
			return fmt.Errorf("loadSandbox failed: %s\n", err)
		}

		for name, conf := range configFile {
			name = getSandboxName(fr.Name(), name)
			if _, ok := h.Filter(name); ok {
				// todo support reload
				return fmt.Errorf("loadSandbox failed: %s is already running", name)
			}
			fr.LogMessage(fmt.Sprintf("Loading: %s", name))
			confFile := filepath.Join(dir, fmt.Sprintf("%s.toml", name))
			err = ioutil.WriteFile(confFile, []byte(config), 0600)
			if err != nil {
				return
			}
			var sbc SandboxConfig
			// Default, will get overwritten if necessary
			sbc.ScriptType = "lua"
			if err = toml.PrimitiveDecode(conf, &sbc); err != nil {
				return fmt.Errorf("loadSandbox failed: %s\n", err)
			}
			scriptFile := filepath.Join(dir, fmt.Sprintf("%s.%s", name, sbc.ScriptType))
			err = ioutil.WriteFile(scriptFile, []byte(msg.GetPayload()), 0600)
			if err != nil {
				removeAll(dir, fmt.Sprintf("%s.*", name))
				return
			}
			var runner pipeline.FilterRunner
			runner, err = this.createRunner(dir, name, conf)
			if err != nil {
				removeAll(dir, fmt.Sprintf("%s.*", name))
				return
			}
			err = this.pConfig.AddFilterRunner(runner)
			if err == nil {
				atomic.AddInt32(&this.currentFilters, 1)
			}
			break // only interested in the first item
		}
	}
	return
}
Пример #13
0
func lookup_field(msg *message.Message, fn string, fi int, ai int) (int,
	unsafe.Pointer, int) {

	var field *message.Field
	if fi != 0 {
		fields := msg.FindAllFields(fn)
		if fi >= len(fields) {
			return 0, unsafe.Pointer(nil), 0
		}
		field = fields[fi]
	} else {
		if field = msg.FindFirstField(fn); field == nil {
			return 0, unsafe.Pointer(nil), 0
		}
	}
	fieldType := int(field.GetValueType())
	switch field.GetValueType() {
	case message.Field_STRING:
		if ai >= len(field.ValueString) {
			return fieldType, unsafe.Pointer(nil), 0
		}
		value := field.ValueString[ai]
		cs := C.CString(value) // freed by the caller
		return fieldType, unsafe.Pointer(cs), len(value)
	case message.Field_BYTES:
		if ai >= len(field.ValueBytes) {
			return fieldType, unsafe.Pointer(nil), 0
		}
		value := field.ValueBytes[ai]
		return fieldType, unsafe.Pointer(&field.ValueBytes[ai][0]), len(value)
	case message.Field_INTEGER:
		if ai >= len(field.ValueInteger) {
			return fieldType, unsafe.Pointer(nil), 0
		}
		return fieldType, unsafe.Pointer(&field.ValueInteger[ai]), 0
	case message.Field_DOUBLE:
		if ai >= len(field.ValueDouble) {
			return fieldType, unsafe.Pointer(nil), 0
		}
		return fieldType, unsafe.Pointer(&field.ValueDouble[ai]), 0
	case message.Field_BOOL:
		if ai >= len(field.ValueBool) {
			return fieldType, unsafe.Pointer(nil), 0
		}
		return fieldType, unsafe.Pointer(&field.ValueBool[ai]), 0
	}
	return 0, unsafe.Pointer(nil), 0
}
Пример #14
0
// Fields are additional logging data passed to Heka. They are technically
// undefined, but searchable and actionable.
func addFields(msg *message.Message, fields Fields) (err error) {
	for key, ival := range fields {
		var field *message.Field
		if ival == "" {
			ival = "*empty*"
		}
		if key == "" {
			continue
		}
		field, err = message.NewField(key, ival, ival)
		if err != nil {
			return err
		}
		msg.AddField(field)
	}
	return err
}
Пример #15
0
func (this *SandboxManagerFilter) loadSandbox(fr FilterRunner,
	h PluginHelper, dir string, msg *message.Message) (err error) {
	fv, _ := msg.GetFieldValue("config")
	if config, ok := fv.(string); ok {
		var configFile ConfigFile
		if _, err = toml.Decode(config, &configFile); err != nil {
			return fmt.Errorf("loadSandbox failed: %s\n", err)
		} else {
			for name, conf := range configFile {
				name = getSandboxName(fr.Name(), name)
				if _, ok := h.Filter(name); ok {
					// todo support reload
					return fmt.Errorf("loadSandbox failed: %s is already running", name)
				}
				fr.LogMessage(fmt.Sprintf("Loading: %s", name))
				confFile := path.Join(dir, fmt.Sprintf("%s.toml", name))
				err = ioutil.WriteFile(confFile, []byte(config), 0600)
				if err != nil {
					return
				}
				var sbfc SandboxFilterConfig
				if err = toml.PrimitiveDecode(conf, &sbfc); err != nil {
					return fmt.Errorf("loadSandbox failed: %s\n", err)
				}
				scriptFile := path.Join(dir, fmt.Sprintf("%s.%s", name, sbfc.Sbc.ScriptType))
				err = ioutil.WriteFile(scriptFile, []byte(msg.GetPayload()), 0600)
				if err != nil {
					removeAll(dir, fmt.Sprintf("%s.*", name))
					return
				}
				var runner FilterRunner
				runner, err = createRunner(dir, name, conf)
				if err != nil {
					removeAll(dir, fmt.Sprintf("%s.*", name))
					return
				}
				err = h.PipelineConfig().AddFilterRunner(runner)
				if err == nil {
					this.currentFilters++
				}
				break // only interested in the first item
			}
		}
	}
	return
}
Пример #16
0
// AddDecodeFailureFields adds two fields to the provided message object. The
// first field is a boolean field called `decode_failure`, set to true. The
// second is a string field called `decode_error` which will contain the
// provided error message, truncated to 500 bytes if necessary.
func AddDecodeFailureFields(m *message.Message, errMsg string) error {
	field0, err := message.NewField("decode_failure", true, "")
	if err != nil {
		err = fmt.Errorf("field creation error: %s", err.Error())
		return err
	}
	if len(errMsg) > 500 {
		errMsg = errMsg[:500]
	}
	field1, err := message.NewField("decode_error", errMsg, "")
	if err != nil {
		err = fmt.Errorf("field creation error: %s", err.Error())
		return err
	}
	m.AddField(field0)
	m.AddField(field1)
	return nil
}
Пример #17
0
func getFieldAsString(msg *message.Message, mvar *messageVariable) string {
	var field *message.Field
	if mvar.fi != 0 {
		fields := msg.FindAllFields(mvar.name)
		if mvar.fi >= len(fields) {
			return ""
		}
		field = fields[mvar.fi]
	} else {
		if field = msg.FindFirstField(mvar.name); field == nil {
			return ""
		}
	}
	switch field.GetValueType() {
	case message.Field_STRING:
		if mvar.ai >= len(field.ValueString) {
			return ""
		}
		return field.ValueString[mvar.ai]
	case message.Field_BYTES:
		if mvar.ai >= len(field.ValueBytes) {
			return ""
		}
		return string(field.ValueBytes[mvar.ai])
	case message.Field_INTEGER:
		if mvar.ai >= len(field.ValueInteger) {
			return ""
		}
		return fmt.Sprintf("%d", field.ValueInteger[mvar.ai])
	case message.Field_DOUBLE:
		if mvar.ai >= len(field.ValueDouble) {
			return ""
		}
		return fmt.Sprintf("%g", field.ValueDouble[mvar.ai])
	case message.Field_BOOL:
		if mvar.ai >= len(field.ValueBool) {
			return ""
		}
		return fmt.Sprintf("%t", field.ValueBool[mvar.ai])
	}
	return ""
}
Пример #18
0
func getMessageVariable(msg *message.Message, mvar *messageVariable) string {
	if mvar.header {
		switch mvar.name {
		case "Type":
			return msg.GetType()
		case "Logger":
			return msg.GetLogger()
		case "Hostname":
			return msg.GetHostname()
		case "Payload":
			return msg.GetPayload()
		default:
			return ""
		}
	} else {
		return getFieldAsString(msg, mvar)
	}
}
Пример #19
0
func (n *NagiosOutput) Run(or OutputRunner, h PluginHelper) (err error) {
	inChan := or.InChan()

	var (
		pack    *PipelinePack
		msg     *message.Message
		payload string
	)

	for pack = range inChan {
		msg = pack.Message
		payload = msg.GetPayload()
		pos := strings.IndexAny(payload, ":")
		state := "3" // UNKNOWN
		if pos != -1 {
			switch payload[:pos] {
			case "OK":
				state = "0"
			case "WARNING":
				state = "1"
			case "CRITICAL":
				state = "2"
			}
		}

		data := url.Values{
			"cmd_typ":          {"30"}, // PROCESS_SERVICE_CHECK_RESULT
			"cmd_mod":          {"2"},  // CMDMODE_COMMIT
			"host":             {msg.GetHostname()},
			"service":          {msg.GetLogger()},
			"plugin_state":     {state},
			"plugin_output":    {payload[pos+1:]},
			"performance_data": {""}}
		req, err := http.NewRequest("POST", n.conf.Url,
			strings.NewReader(data.Encode()))
		if err == nil {
			req.SetBasicAuth(n.conf.Username, n.conf.Password)
			if resp, err := n.client.Do(req); err == nil {
				resp.Body.Close()
			} else {
				or.LogError(err)
			}
		} else {
			or.LogError(err)
		}
		pack.Recycle()
	}
	return
}
Пример #20
0
func (n *NagiosOutput) Run(or OutputRunner, h PluginHelper) (err error) {
	inChan := or.InChan()

	var (
		pack    *PipelinePack
		msg     *message.Message
		payload string
	)

	for pack = range inChan {
		msg = pack.Message
		payload = msg.GetPayload()
		pos := strings.IndexAny(payload, ":")
		state := "3" // UNKNOWN
		if pos != -1 {
			switch payload[:pos] {
			case "OK":
				state = "0"
			case "WARNING":
				state = "1"
			case "CRITICAL":
				state = "2"
			}
		}

		host := n.conf.NagiosHost
		if host == "" {
			host = msg.GetHostname()
		}
		service_description := n.conf.NagiosServiceDescription
		if service_description == "" {
			service_description = msg.GetLogger()
		}
		payload = payload[pos+1:]

		err = n.submitter(host, service_description, state, payload)
		if err != nil {
			err = NewRetryMessageError(err.Error())
			pack.Recycle(err)
			continue
		}
		or.UpdateCursor(pack.QueueCursor)
		pack.Recycle(nil)
	}
	return
}
Пример #21
0
// Generate recycle channel and plugin report messages and put them on the
// provided channel as they're ready.
func (pc *PipelineConfig) reports(reportChan chan *PipelinePack) {
	var (
		f      *message.Field
		pack   *PipelinePack
		msg    *message.Message
		err, e error
	)

	pack = <-pc.reportRecycleChan
	msg = pack.Message
	message.NewIntField(msg, "InChanCapacity", cap(pc.inputRecycleChan), "count")
	message.NewIntField(msg, "InChanLength", len(pc.inputRecycleChan), "count")
	msg.SetType("heka.input-report")
	message.NewStringField(msg, "name", "inputRecycleChan")
	message.NewStringField(msg, "key", "globals")
	reportChan <- pack

	pack = <-pc.reportRecycleChan
	msg = pack.Message
	message.NewIntField(msg, "InChanCapacity", cap(pc.injectRecycleChan), "count")
	message.NewIntField(msg, "InChanLength", len(pc.injectRecycleChan), "count")
	msg.SetType("heka.inject-report")
	message.NewStringField(msg, "name", "injectRecycleChan")
	message.NewStringField(msg, "key", "globals")
	reportChan <- pack

	pack = <-pc.reportRecycleChan
	msg = pack.Message
	message.NewIntField(msg, "InChanCapacity", cap(pc.router.InChan()), "count")
	message.NewIntField(msg, "InChanLength", len(pc.router.InChan()), "count")
	message.NewInt64Field(msg, "ProcessMessageCount", atomic.LoadInt64(&pc.router.processMessageCount), "count")
	msg.SetType("heka.router-report")
	message.NewStringField(msg, "name", "Router")
	message.NewStringField(msg, "key", "globals")
	reportChan <- pack

	getReport := func(runner PluginRunner) (pack *PipelinePack) {
		pack = <-pc.reportRecycleChan
		if err = PopulateReportMsg(runner, pack.Message); err != nil {
			msg = pack.Message
			f, e = message.NewField("Error", err.Error(), "")
			if e == nil {
				msg.AddField(f)
			}
			msg.SetType("heka.plugin-report")
		}
		return
	}

	pc.inputsLock.Lock()
	for name, runner := range pc.InputRunners {
		if runner.Transient() {
			continue
		}
		pack = getReport(runner)
		message.NewStringField(pack.Message, "name", name)
		message.NewStringField(pack.Message, "key", "inputs")
		reportChan <- pack
	}
	pc.inputsLock.Unlock()

	for _, runner := range pc.allDecoders {
		pack = getReport(runner)
		message.NewStringField(pack.Message, "name", runner.Name())
		message.NewStringField(pack.Message, "key", "decoders")
		reportChan <- pack
	}

	pc.filtersLock.Lock()
	for name, runner := range pc.FilterRunners {
		pack = getReport(runner)
		message.NewStringField(pack.Message, "name", name)
		message.NewStringField(pack.Message, "key", "filters")
		reportChan <- pack
	}
	pc.filtersLock.Unlock()

	for name, runner := range pc.OutputRunners {
		pack = getReport(runner)
		message.NewStringField(pack.Message, "name", name)
		message.NewStringField(pack.Message, "key", "outputs")
		reportChan <- pack
	}
	close(reportChan)
}
Пример #22
0
func EncoderSpec(c gs.Context) {
	t := new(ts.SimpleT)
	ctrl := gomock.NewController(t)
	defer ctrl.Finish()

	// NewPipelineConfig sets up Globals which is needed for the
	// pipeline.Prepend*Dir functions to not die during plugin Init().
	_ = pipeline.NewPipelineConfig(nil)

	c.Specify("A SandboxEncoder", func() {

		encoder := new(SandboxEncoder)
		conf := encoder.ConfigStruct().(*SandboxEncoderConfig)
		supply := make(chan *pipeline.PipelinePack, 1)
		pack := pipeline.NewPipelinePack(supply)
		pack.Message.SetPayload("original")
		pack.Message.SetType("my_type")
		pack.Message.SetPid(12345)
		pack.Message.SetSeverity(4)
		pack.Message.SetHostname("hostname")
		pack.Message.SetTimestamp(54321)
		pack.Message.SetUuid(uuid.NewRandom())
		var (
			result []byte
			err    error
		)

		c.Specify("emits JSON correctly", func() {
			conf.ScriptFilename = "../lua/testsupport/encoder_json.lua"
			err = encoder.Init(conf)
			c.Expect(err, gs.IsNil)

			result, err = encoder.Encode(pack)
			c.Expect(err, gs.IsNil)
			msg := new(message.Message)
			err = json.Unmarshal(result, msg)
			c.Expect(err, gs.IsNil)
			c.Expect(msg.GetTimestamp(), gs.Equals, int64(54321))
			c.Expect(msg.GetPid(), gs.Equals, int32(12345))
			c.Expect(msg.GetSeverity(), gs.Equals, int32(4))
			c.Expect(msg.GetHostname(), gs.Equals, "hostname")
			c.Expect(msg.GetPayload(), gs.Equals, "original")
			c.Expect(msg.GetType(), gs.Equals, "my_type")
		})

		c.Specify("emits text correctly", func() {
			conf.ScriptFilename = "../lua/testsupport/encoder_text.lua"
			err = encoder.Init(conf)
			c.Expect(err, gs.IsNil)

			result, err = encoder.Encode(pack)
			c.Expect(err, gs.IsNil)
			c.Expect(string(result), gs.Equals, "Prefixed original")
		})

		c.Specify("emits protobuf correctly", func() {

			c.Specify("when inject_message is used", func() {
				conf.ScriptFilename = "../lua/testsupport/encoder_protobuf.lua"
				err = encoder.Init(conf)
				c.Expect(err, gs.IsNil)

				result, err = encoder.Encode(pack)
				c.Expect(err, gs.IsNil)

				msg := new(message.Message)
				err = proto.Unmarshal(result, msg)
				c.Expect(err, gs.IsNil)
				c.Expect(msg.GetTimestamp(), gs.Equals, int64(54321))
				c.Expect(msg.GetPid(), gs.Equals, int32(12345))
				c.Expect(msg.GetSeverity(), gs.Equals, int32(4))
				c.Expect(msg.GetHostname(), gs.Equals, "hostname")
				c.Expect(msg.GetPayload(), gs.Equals, "mutated")
				c.Expect(msg.GetType(), gs.Equals, "after")
			})

			c.Specify("when `write_message` is used", func() {
				conf.ScriptFilename = "../lua/testsupport/encoder_writemessage.lua"
				err = encoder.Init(conf)
				c.Expect(err, gs.IsNil)

				result, err = encoder.Encode(pack)
				c.Expect(err, gs.IsNil)

				msg := new(message.Message)
				err = proto.Unmarshal(result, msg)
				c.Expect(err, gs.IsNil)
				c.Expect(msg.GetPayload(), gs.Equals, "mutated payload")
				c.Expect(pack.Message.GetPayload(), gs.Equals, "original")
			})
		})

	})
}
Пример #23
0
func AMQPPluginSpec(c gs.Context) {
	t := &pipeline_ts.SimpleT{}
	ctrl := gomock.NewController(t)
	defer ctrl.Finish()

	config := NewPipelineConfig(nil)

	// Our two user/conn waitgroups
	ug := new(sync.WaitGroup)
	cg := new(sync.WaitGroup)

	// Setup the mock channel
	mch := NewMockAMQPChannel(ctrl)

	// Setup the mock amqpHub with the mock chan return
	aqh := NewMockAMQPConnectionHub(ctrl)
	aqh.EXPECT().GetChannel("", AMQPDialer{}).Return(mch, ug, cg, nil)

	errChan := make(chan error, 1)

	c.Specify("An amqp input", func() {
		// Setup all the mock calls for Init
		mch.EXPECT().ExchangeDeclare("", "", false, true, false, false,
			gomock.Any()).Return(nil)
		mch.EXPECT().QueueDeclare("", false, true, false, false,
			gomock.Any()).Return(amqp.Queue{}, nil)
		mch.EXPECT().QueueBind("", "test", "", false, gomock.Any()).Return(nil)
		mch.EXPECT().Qos(2, 0, false).Return(nil)

		ith := new(plugins_ts.InputTestHelper)
		ith.Msg = pipeline_ts.GetTestMessage()
		ith.Pack = NewPipelinePack(config.InputRecycleChan())

		// set up mock helper, decoder set, and packSupply channel
		ith.MockHelper = NewMockPluginHelper(ctrl)
		ith.MockInputRunner = NewMockInputRunner(ctrl)
		mockDRunner := NewMockDecoderRunner(ctrl)
		ith.PackSupply = make(chan *PipelinePack, 1)
		ith.DecodeChan = make(chan *PipelinePack)

		ith.MockInputRunner.EXPECT().InChan().Return(ith.PackSupply)

		amqpInput := new(AMQPInput)
		amqpInput.amqpHub = aqh
		config := amqpInput.ConfigStruct().(*AMQPInputConfig)
		config.URL = ""
		config.Exchange = ""
		config.ExchangeType = ""
		config.RoutingKey = "test"
		config.QueueTTL = 300000

		c.Specify("with a valid setup and no decoder", func() {
			err := amqpInput.Init(config)
			c.Assume(err, gs.IsNil)
			c.Expect(amqpInput.ch, gs.Equals, mch)

			c.Specify("consumes a message", func() {

				// Create a channel to send data to the input
				// Drop a message on there and close the channel
				streamChan := make(chan amqp.Delivery, 1)
				ack := plugins_ts.NewMockAcknowledger(ctrl)
				ack.EXPECT().Ack(gomock.Any(), false)
				streamChan <- amqp.Delivery{
					ContentType:  "text/plain",
					Body:         []byte("This is a message"),
					Timestamp:    time.Now(),
					Acknowledger: ack,
				}
				mch.EXPECT().Consume("", "", false, false, false, false,
					gomock.Any()).Return(streamChan, nil)

				// Expect the injected packet
				ith.MockInputRunner.EXPECT().Inject(gomock.Any())

				// Increase the usage since Run decrements it on close
				ug.Add(1)

				ith.PackSupply <- ith.Pack
				go func() {
					err := amqpInput.Run(ith.MockInputRunner, ith.MockHelper)
					errChan <- err
				}()
				ith.PackSupply <- ith.Pack
				close(streamChan)
				err = <-errChan
				c.Expect(err, gs.IsNil)
				c.Expect(ith.Pack.Message.GetType(), gs.Equals, "amqp")
				c.Expect(ith.Pack.Message.GetPayload(), gs.Equals, "This is a message")
			})
		})

		c.Specify("with a valid setup using a decoder", func() {
			decoderName := "defaultDecoder"
			config.Decoder = decoderName
			err := amqpInput.Init(config)
			c.Assume(err, gs.IsNil)
			c.Expect(amqpInput.ch, gs.Equals, mch)

			// Mock up our default decoder runner and decoder.
			ith.MockInputRunner.EXPECT().Name().Return("AMQPInput")
			decCall := ith.MockHelper.EXPECT().DecoderRunner(decoderName, "AMQPInput-defaultDecoder")
			decCall.Return(mockDRunner, true)
			mockDecoder := NewMockDecoder(ctrl)
			mockDRunner.EXPECT().Decoder().Return(mockDecoder)

			c.Specify("consumes a message", func() {
				packs := []*PipelinePack{ith.Pack}
				mockDecoder.EXPECT().Decode(ith.Pack).Return(packs, nil)

				// Create a channel to send data to the input
				// Drop a message on there and close the channel
				streamChan := make(chan amqp.Delivery, 1)
				ack := plugins_ts.NewMockAcknowledger(ctrl)
				ack.EXPECT().Ack(gomock.Any(), false)
				streamChan <- amqp.Delivery{
					ContentType:  "text/plain",
					Body:         []byte("This is a message"),
					Timestamp:    time.Now(),
					Acknowledger: ack,
				}
				mch.EXPECT().Consume("", "", false, false, false, false,
					gomock.Any()).Return(streamChan, nil)

				// Expect the injected packet
				ith.MockInputRunner.EXPECT().Inject(gomock.Any())

				// Increase the usage since Run decrements it on close
				ug.Add(1)

				ith.PackSupply <- ith.Pack
				go func() {
					err := amqpInput.Run(ith.MockInputRunner, ith.MockHelper)
					errChan <- err
				}()
				ith.PackSupply <- ith.Pack
				close(streamChan)
				err = <-errChan
				c.Expect(ith.Pack.Message.GetType(), gs.Equals, "amqp")
				c.Expect(ith.Pack.Message.GetPayload(), gs.Equals, "This is a message")
			})

			c.Specify("consumes a serialized message", func() {
				encoder := client.NewProtobufEncoder(nil)
				streamChan := make(chan amqp.Delivery, 1)

				msg := new(message.Message)
				msg.SetUuid(uuid.NewRandom())
				msg.SetTimestamp(time.Now().UnixNano())
				msg.SetType("logfile")
				msg.SetLogger("/a/nice/path")
				msg.SetSeverity(int32(0))
				msg.SetEnvVersion("0.2")
				msg.SetPid(0)
				msg.SetPayload("This is a message")
				msg.SetHostname("TestHost")

				msgBody := make([]byte, 0, 500)
				_ = encoder.EncodeMessageStream(msg, &msgBody)

				ack := plugins_ts.NewMockAcknowledger(ctrl)
				ack.EXPECT().Ack(gomock.Any(), false)

				streamChan <- amqp.Delivery{
					ContentType:  "application/hekad",
					Body:         msgBody,
					Timestamp:    time.Now(),
					Acknowledger: ack,
				}
				mch.EXPECT().Consume("", "", false, false, false, false,
					gomock.Any()).Return(streamChan, nil)

				// Expect the decoded pack
				mockDRunner.EXPECT().InChan().Return(ith.DecodeChan)

				// Increase the usage since Run decrements it on close
				ug.Add(1)

				ith.PackSupply <- ith.Pack
				go func() {
					err := amqpInput.Run(ith.MockInputRunner, ith.MockHelper)
					errChan <- err
				}()
				packRef := <-ith.DecodeChan
				c.Expect(ith.Pack, gs.Equals, packRef)
				// Ignore leading 5 bytes of encoded message as thats the header
				c.Expect(string(packRef.MsgBytes), gs.Equals, string(msgBody[5:]))
				ith.PackSupply <- ith.Pack
				close(streamChan)
				err = <-errChan
				c.Expect(err, gs.IsNil)
			})
		})
	})

	c.Specify("An amqp output", func() {
		oth := plugins_ts.NewOutputTestHelper(ctrl)
		pConfig := NewPipelineConfig(nil)

		amqpOutput := new(AMQPOutput)
		amqpOutput.amqpHub = aqh
		config := amqpOutput.ConfigStruct().(*AMQPOutputConfig)
		config.URL = ""
		config.Exchange = ""
		config.ExchangeType = ""
		config.RoutingKey = "test"

		closeChan := make(chan *amqp.Error)

		inChan := make(chan *PipelinePack, 1)

		mch.EXPECT().NotifyClose(gomock.Any()).Return(closeChan)
		mch.EXPECT().ExchangeDeclare("", "", false, true, false, false,
			gomock.Any()).Return(nil)

		// Increase the usage since Run decrements it on close
		ug.Add(1)

		// Expect the close and the InChan calls
		aqh.EXPECT().Close("", cg)
		oth.MockOutputRunner.EXPECT().InChan().Return(inChan)

		msg := pipeline_ts.GetTestMessage()
		pack := NewPipelinePack(pConfig.InputRecycleChan())
		pack.Message = msg
		pack.Decoded = true

		c.Specify("publishes a plain message", func() {
			encoder := new(plugins.PayloadEncoder)
			econfig := encoder.ConfigStruct().(*plugins.PayloadEncoderConfig)
			econfig.AppendNewlines = false
			encoder.Init(econfig)
			payloadBytes, err := encoder.Encode(pack)

			config.Encoder = "PayloadEncoder"
			config.ContentType = "text/plain"
			oth.MockOutputRunner.EXPECT().Encoder().Return(encoder)
			oth.MockOutputRunner.EXPECT().Encode(pack).Return(payloadBytes, nil)

			err = amqpOutput.Init(config)
			c.Assume(err, gs.IsNil)
			c.Expect(amqpOutput.ch, gs.Equals, mch)

			mch.EXPECT().Publish("", "test", false, false, gomock.Any()).Return(nil)
			inChan <- pack
			close(inChan)
			close(closeChan)

			go func() {
				err := amqpOutput.Run(oth.MockOutputRunner, oth.MockHelper)
				errChan <- err
			}()

			ug.Wait()
			err = <-errChan
			c.Expect(err, gs.IsNil)
		})

		c.Specify("publishes a serialized message", func() {
			encoder := new(ProtobufEncoder)
			encoder.SetPipelineConfig(pConfig)
			encoder.Init(nil)
			protoBytes, err := encoder.Encode(pack)
			c.Expect(err, gs.IsNil)
			oth.MockOutputRunner.EXPECT().Encoder().Return(encoder)
			oth.MockOutputRunner.EXPECT().Encode(pack).Return(protoBytes, nil)

			err = amqpOutput.Init(config)
			c.Assume(err, gs.IsNil)
			c.Expect(amqpOutput.ch, gs.Equals, mch)

			mch.EXPECT().Publish("", "test", false, false, gomock.Any()).Return(nil)
			inChan <- pack
			close(inChan)
			close(closeChan)

			go func() {
				err := amqpOutput.Run(oth.MockOutputRunner, oth.MockHelper)
				errChan <- err
			}()
			ug.Wait()
			err = <-errChan
			c.Expect(err, gs.IsNil)
		})
	})
}
Пример #24
0
func (i *StatAccumInput) ReportMsg(msg *message.Message) (err error) {
	msg.AddField(f0)
	msg.AddField(f1)
	return
}
Пример #25
0
func (self *DashboardOutput) Run(or OutputRunner, h PluginHelper) (err error) {
	inChan := or.InChan()
	ticker := or.Ticker()
	go self.starterFunc(self)

	var (
		ok   = true
		pack *PipelinePack
		msg  *message.Message
	)

	// Maps sandbox names to plugin list items used to generate the
	// sandboxes.json file.
	sandboxes := make(map[string]*DashPluginListItem)
	sbxsLock := new(sync.Mutex)
	reNotWord, _ := regexp.Compile("\\W")
	for ok {
		select {
		case pack, ok = <-inChan:
			if !ok {
				break
			}
			msg = pack.Message
			switch msg.GetType() {
			case "heka.all-report":
				fn := filepath.Join(self.dataDirectory, "heka_report.json")
				overwriteFile(fn, msg.GetPayload())
				sbxsLock.Lock()
				if err := overwritePluginListFile(self.dataDirectory, sandboxes); err != nil {
					or.LogError(fmt.Errorf("Can't write plugin list file to '%s': %s",
						self.dataDirectory, err))
				}
				sbxsLock.Unlock()
			case "heka.sandbox-output":
				tmp, _ := msg.GetFieldValue("payload_type")
				if payloadType, ok := tmp.(string); ok {
					var payloadName, nameExt string
					tmp, _ := msg.GetFieldValue("payload_name")
					if payloadName, ok = tmp.(string); ok {
						nameExt = reNotWord.ReplaceAllString(payloadName, "")
					}
					if len(nameExt) > 64 {
						nameExt = nameExt[:64]
					}
					nameExt = "." + nameExt

					payloadType = reNotWord.ReplaceAllString(payloadType, "")
					filterName := msg.GetLogger()
					fn := filterName + nameExt + "." + payloadType
					ofn := filepath.Join(self.dataDirectory, fn)
					relPath := path.Join(self.relDataPath, fn) // Used for generating HTTP URLs.
					overwriteFile(ofn, msg.GetPayload())
					sbxsLock.Lock()
					if listItem, ok := sandboxes[filterName]; !ok {
						// First time we've seen this sandbox, add it to the set.
						output := &DashPluginOutput{
							Name:     payloadName,
							Filename: relPath,
						}
						sandboxes[filterName] = &DashPluginListItem{
							Name:    filterName,
							Outputs: []*DashPluginOutput{output},
						}
					} else {
						// We've seen the sandbox, see if we already have this output.
						found := false
						for _, output := range listItem.Outputs {
							if output.Name == payloadName {
								found = true
								break
							}
						}
						if !found {
							output := &DashPluginOutput{
								Name:     payloadName,
								Filename: relPath,
							}
							listItem.Outputs = append(listItem.Outputs, output)
						}
					}
					sbxsLock.Unlock()
				}
			case "heka.sandbox-terminated":
				fn := filepath.Join(self.dataDirectory, "heka_sandbox_termination.tsv")
				filterName := msg.GetLogger()
				if file, err := os.OpenFile(fn, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644); err == nil {
					var line string
					if _, ok := msg.GetFieldValue("ProcessMessageCount"); !ok {
						line = fmt.Sprintf("%d\t%s\t%v\n", msg.GetTimestamp()/1e9,
							msg.GetLogger(), msg.GetPayload())
					} else {
						pmc, _ := msg.GetFieldValue("ProcessMessageCount")
						pms, _ := msg.GetFieldValue("ProcessMessageSamples")
						pmd, _ := msg.GetFieldValue("ProcessMessageAvgDuration")
						mad, _ := msg.GetFieldValue("MatchAvgDuration")
						fcl, _ := msg.GetFieldValue("FilterChanLength")
						mcl, _ := msg.GetFieldValue("MatchChanLength")
						rcl, _ := msg.GetFieldValue("RouterChanLength")
						line = fmt.Sprintf("%d\t%s\t%v"+
							" ProcessMessageCount:%v"+
							" ProcessMessageSamples:%v"+
							" ProcessMessageAvgDuration:%v"+
							" MatchAvgDuration:%v"+
							" FilterChanLength:%v"+
							" MatchChanLength:%v"+
							" RouterChanLength:%v\n",
							msg.GetTimestamp()/1e9,
							filterName, msg.GetPayload(), pmc, pms, pmd,
							mad, fcl, mcl, rcl)
					}
					file.WriteString(line)
					file.Close()
				}
				sbxsLock.Lock()
				delete(sandboxes, filterName)
				sbxsLock.Unlock()
			}
			pack.Recycle()
		case <-ticker:
			go h.PipelineConfig().AllReportsMsg()
		}
	}
	return
}
Пример #26
0
func (f *CounterFilter) ReportMsg(msg *message.Message) (err error) {
	msg.AddField(f0)
	msg.AddField(f1)
	return
}
Пример #27
0
func (s *SandboxDecoder) SetDecoderRunner(dr pipeline.DecoderRunner) {
	if s.sb != nil {
		return // no-op already initialized
	}

	s.dRunner = dr
	var original *message.Message

	switch s.sbc.ScriptType {
	case "lua":
		s.sb, s.err = lua.CreateLuaSandbox(s.sbc)
	default:
		s.err = fmt.Errorf("unsupported script type: %s", s.sbc.ScriptType)
	}

	if s.err == nil {
		s.preservationFile = filepath.Join(s.pConfig.Globals.PrependBaseDir(DATA_DIR),
			dr.Name()+DATA_EXT)
		if s.sbc.PreserveData && fileExists(s.preservationFile) {
			s.err = s.sb.Init(s.preservationFile, "decoder")
		} else {
			s.err = s.sb.Init("", "decoder")
		}
	}
	if s.err != nil {
		dr.LogError(s.err)
		if s.sb != nil {
			s.sb.Destroy("")
			s.sb = nil
		}
		s.pConfig.Globals.ShutDown()
		return
	}

	s.sb.InjectMessage(func(payload, payload_type, payload_name string) int {
		if s.pack == nil {
			s.pack = dr.NewPack()
			if original == nil && len(s.packs) > 0 {
				original = s.packs[0].Message // payload injections have the original header data in the first pack
			}
		} else {
			original = nil // processing a new message, clear the old message
		}
		if len(payload_type) == 0 { // heka protobuf message
			if original == nil {
				original = new(message.Message)
				copyMessageHeaders(original, s.pack.Message) // save off the header values since unmarshal will wipe them out
			}
			if nil != proto.Unmarshal([]byte(payload), s.pack.Message) {
				return 1
			}
			if s.tz != time.UTC {
				const layout = "2006-01-02T15:04:05.999999999" // remove the incorrect UTC tz info
				t := time.Unix(0, s.pack.Message.GetTimestamp())
				t = t.In(time.UTC)
				ct, _ := time.ParseInLocation(layout, t.Format(layout), s.tz)
				s.pack.Message.SetTimestamp(ct.UnixNano())
			}
		} else {
			s.pack.Message.SetPayload(payload)
			ptype, _ := message.NewField("payload_type", payload_type, "file-extension")
			s.pack.Message.AddField(ptype)
			pname, _ := message.NewField("payload_name", payload_name, "")
			s.pack.Message.AddField(pname)
		}
		if original != nil {
			// if future injections fail to set the standard headers, use the values
			// from the original message.
			if s.pack.Message.Uuid == nil {
				s.pack.Message.SetUuid(original.GetUuid())
			}
			if s.pack.Message.Timestamp == nil {
				s.pack.Message.SetTimestamp(original.GetTimestamp())
			}
			if s.pack.Message.Type == nil {
				s.pack.Message.SetType(original.GetType())
			}
			if s.pack.Message.Hostname == nil {
				s.pack.Message.SetHostname(original.GetHostname())
			}
			if s.pack.Message.Logger == nil {
				s.pack.Message.SetLogger(original.GetLogger())
			}
			if s.pack.Message.Severity == nil {
				s.pack.Message.SetSeverity(original.GetSeverity())
			}
			if s.pack.Message.Pid == nil {
				s.pack.Message.SetPid(original.GetPid())
			}
		}
		s.packs = append(s.packs, s.pack)
		s.pack = nil
		return 0
	})
}
Пример #28
0
func copyMessageHeaders(dst *message.Message, src *message.Message) {
	if src == nil || dst == nil || src == dst {
		return
	}

	if cap(src.Uuid) > 0 {
		dst.SetUuid(src.Uuid)
	} else {
		dst.Uuid = nil
	}
	if src.Timestamp != nil {
		dst.SetTimestamp(*src.Timestamp)
	} else {
		dst.Timestamp = nil
	}
	if src.Type != nil {
		dst.SetType(*src.Type)
	} else {
		dst.Type = nil
	}
	if src.Logger != nil {
		dst.SetLogger(*src.Logger)
	} else {
		dst.Logger = nil
	}
	if src.Severity != nil {
		dst.SetSeverity(*src.Severity)
	} else {
		dst.Severity = nil
	}
	if src.Pid != nil {
		dst.SetPid(*src.Pid)
	} else {
		dst.Pid = nil
	}
	if src.Hostname != nil {
		dst.SetHostname(*src.Hostname)
	} else {
		dst.Hostname = nil
	}
}
Пример #29
0
func (self *LogOutput) Run(or OutputRunner, h PluginHelper) (err error) {
	inChan := or.InChan()

	var (
		pack *PipelinePack
		msg  *message.Message
	)
	for plc := range inChan {
		pack = plc.Pack
		msg = pack.Message
		if self.payloadOnly {
			log.Printf(msg.GetPayload())
		} else {
			log.Printf("<\n\tTimestamp: %s\n"+
				"\tType: %s\n"+
				"\tHostname: %s\n"+
				"\tPid: %d\n"+
				"\tUUID: %s\n"+
				"\tLogger: %s\n"+
				"\tPayload: %s\n"+
				"\tEnvVersion: %s\n"+
				"\tSeverity: %d\n"+
				"\tFields: %+v\n"+
				"\tCaptures: %v\n>\n",
				time.Unix(0, msg.GetTimestamp()), msg.GetType(),
				msg.GetHostname(), msg.GetPid(), msg.GetUuidString(),
				msg.GetLogger(), msg.GetPayload(), msg.GetEnvVersion(),
				msg.GetSeverity(), msg.Fields, plc.Captures)
		}
		pack.Recycle()
	}
	return
}
Пример #30
0
func main() {
	flagMatch := flag.String("match", "TRUE", "message_matcher filter expression")
	flagFormat := flag.String("format", "txt", "output format [txt|json|heka|count]")
	flagOutput := flag.String("output", "", "output filename, defaults to stdout")
	flagTail := flag.Bool("tail", false, "don't exit on EOF")
	flagOffset := flag.Int64("offset", 0, "starting offset for the input file in bytes")
	flagMaxMessageSize := flag.Uint64("max-message-size", 4*1024*1024, "maximum message size in bytes")
	flag.Parse()

	if flag.NArg() != 1 {
		flag.PrintDefaults()
		os.Exit(1)
	}

	if *flagMaxMessageSize < math.MaxUint32 {
		maxSize := uint32(*flagMaxMessageSize)
		message.SetMaxMessageSize(maxSize)
	} else {
		fmt.Printf("Message size is too large: %d\n", flagMaxMessageSize)
		os.Exit(8)
	}

	var err error
	var match *message.MatcherSpecification
	if match, err = message.CreateMatcherSpecification(*flagMatch); err != nil {
		fmt.Printf("Match specification - %s\n", err)
		os.Exit(2)
	}

	var file *os.File
	if file, err = os.Open(flag.Arg(0)); err != nil {
		fmt.Printf("%s\n", err)
		os.Exit(3)
	}
	defer file.Close()

	var out *os.File
	if "" == *flagOutput {
		out = os.Stdout
	} else {
		if out, err = os.OpenFile(*flagOutput, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil {
			fmt.Printf("%s\n", err)
			os.Exit(4)
		}
		defer out.Close()
	}

	var offset int64
	if offset, err = file.Seek(*flagOffset, 0); err != nil {
		fmt.Printf("%s\n", err)
		os.Exit(5)
	}

	sRunner, err := makeSplitterRunner()
	if err != nil {
		fmt.Println(err)
		os.Exit(7)
	}
	msg := new(message.Message)
	var processed, matched int64

	fmt.Printf("Input:%s  Offset:%d  Match:%s  Format:%s  Tail:%t  Output:%s\n",
		flag.Arg(0), *flagOffset, *flagMatch, *flagFormat, *flagTail, *flagOutput)
	for true {
		n, record, err := sRunner.GetRecordFromStream(file)
		if n > 0 && n != len(record) {
			fmt.Printf("Corruption detected at offset: %d bytes: %d\n", offset, n-len(record))
		}
		if err != nil {
			if err == io.EOF {
				if !*flagTail || "count" == *flagFormat {
					break
				}
				time.Sleep(time.Duration(500) * time.Millisecond)
			} else {
				break
			}
		} else {
			if len(record) > 0 {
				processed += 1
				headerLen := int(record[1]) + message.HEADER_FRAMING_SIZE
				if err = proto.Unmarshal(record[headerLen:], msg); err != nil {
					fmt.Printf("Error unmarshalling message at offset: %d error: %s\n", offset, err)
					continue
				}

				if !match.Match(msg) {
					continue
				}
				matched += 1

				switch *flagFormat {
				case "count":
					// no op
				case "json":
					contents, _ := json.Marshal(msg)
					fmt.Fprintf(out, "%s\n", contents)
				case "heka":
					fmt.Fprintf(out, "%s", record)
				default:
					fmt.Fprintf(out, "Timestamp: %s\n"+
						"Type: %s\n"+
						"Hostname: %s\n"+
						"Pid: %d\n"+
						"UUID: %s\n"+
						"Logger: %s\n"+
						"Payload: %s\n"+
						"EnvVersion: %s\n"+
						"Severity: %d\n"+
						"Fields: %+v\n\n",
						time.Unix(0, msg.GetTimestamp()), msg.GetType(),
						msg.GetHostname(), msg.GetPid(), msg.GetUuidString(),
						msg.GetLogger(), msg.GetPayload(), msg.GetEnvVersion(),
						msg.GetSeverity(), msg.Fields)
				}
			}
		}
		offset += int64(n)
	}
	fmt.Printf("Processed: %d, matched: %d messages\n", processed, matched)
	if err != nil {
		fmt.Printf("%s\n", err)
		os.Exit(6)
	}
}