Beispiel #1
0
func (self *LogOutput) Run(or OutputRunner, h PluginHelper) (err error) {
	inChan := or.InChan()

	var (
		pack *PipelinePack
		msg  *message.Message
	)
	for plc := range inChan {
		pack = plc.Pack
		msg = pack.Message
		if self.payloadOnly {
			log.Printf(msg.GetPayload())
		} else {
			log.Printf("<\n\tTimestamp: %s\n"+
				"\tType: %s\n"+
				"\tHostname: %s\n"+
				"\tPid: %d\n"+
				"\tUUID: %s\n"+
				"\tLogger: %s\n"+
				"\tPayload: %s\n"+
				"\tEnvVersion: %s\n"+
				"\tSeverity: %d\n"+
				"\tFields: %+v\n"+
				"\tCaptures: %v\n>\n",
				time.Unix(0, msg.GetTimestamp()), msg.GetType(),
				msg.GetHostname(), msg.GetPid(), msg.GetUuidString(),
				msg.GetLogger(), msg.GetPayload(), msg.GetEnvVersion(),
				msg.GetSeverity(), msg.Fields, plc.Captures)
		}
		pack.Recycle()
	}
	return
}
Beispiel #2
0
func (so *S3Output) WriteToBuffer(buffer *bytes.Buffer, msg *message.Message, or OutputRunner) (err error) {
	_, err = buffer.Write([]byte(msg.GetPayload()))
	if err != nil {
		return
	}
	if buffer.Len() > so.config.BufferChunkLimit {
		err = so.SaveToDisk(buffer, or)
	}
	return
}
Beispiel #3
0
func (c *CleanMessageFormatter) Format(m *message.Message) (doc []byte, err error) {
	buf := bytes.Buffer{}
	buf.WriteString(`{`)
	// Iterates over fields configured for clean formating
	for _, f := range c.fields {
		switch strings.ToLower(f) {
		case "uuid":
			writeField(&buf, f, strconv.Quote(m.GetUuidString()))
		case "timestamp":
			t := time.Unix(0, m.GetTimestamp()).UTC()
			writeField(&buf, f, strconv.Quote(t.Format(c.timestampFormat)))
		case "type":
			writeField(&buf, f, strconv.Quote(m.GetType()))
		case "logger":
			writeField(&buf, f, strconv.Quote(m.GetLogger()))
		case "severity":
			writeField(&buf, f, strconv.Itoa(int(m.GetSeverity())))
		case "payload":
			if utf8.ValidString(m.GetPayload()) {
				writeField(&buf, f, strconv.Quote(m.GetPayload()))
			}
		case "envversion":
			writeField(&buf, f, strconv.Quote(m.GetEnvVersion()))
		case "pid":
			writeField(&buf, f, strconv.Itoa(int(m.GetPid())))
		case "hostname":
			writeField(&buf, f, strconv.Quote(m.GetHostname()))
		case "fields":
			for _, field := range m.Fields {
				switch field.GetValueType() {
				case message.Field_STRING:
					writeField(&buf, *field.Name, strconv.Quote(field.GetValue().(string)))
				case message.Field_BYTES:
					data := field.GetValue().([]byte)[:]
					writeField(&buf, *field.Name, strconv.Quote(base64.StdEncoding.EncodeToString(data)))
				case message.Field_INTEGER:
					writeField(&buf, *field.Name, strconv.FormatInt(field.GetValue().(int64), 10))
				case message.Field_DOUBLE:
					writeField(&buf, *field.Name, strconv.FormatFloat(field.GetValue().(float64),
						'g', -1, 64))
				case message.Field_BOOL:
					writeField(&buf, *field.Name, strconv.FormatBool(field.GetValue().(bool)))
				}
			}
		default:
			// Search fo a given fields in the message
			err = fmt.Errorf("Unable to find field: %s", f)
			return
		}
	}
	buf.WriteString(`}`)
	doc = buf.Bytes()
	return
}
func (cwo *CloudwatchOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
	inChan := or.InChan()

	payloads := make(chan CloudwatchDatapoints, cwo.backlog)
	go cwo.Submitter(payloads, or)

	var (
		pack          *pipeline.PipelinePack
		msg           *message.Message
		rawDataPoints *CloudwatchDatapointPayload
		dataPoints    *CloudwatchDatapoints
	)
	dataPoints = new(CloudwatchDatapoints)
	dataPoints.Datapoints = make([]cloudwatch.MetricDatum, 0, 0)

	for pack = range inChan {
		rawDataPoints = new(CloudwatchDatapointPayload)
		msg = pack.Message
		err = json.Unmarshal([]byte(msg.GetPayload()), rawDataPoints)
		if err != nil {
			or.LogMessage(fmt.Sprintf("warning, unable to parse payload: %s", err))
			err = nil
			continue
		}
		// Run through the list and convert them to CloudwatchDatapoints
		for _, rawDatum := range rawDataPoints.Datapoints {
			datum := cloudwatch.MetricDatum{
				Dimensions:      rawDatum.Dimensions,
				MetricName:      rawDatum.MetricName,
				Unit:            rawDatum.Unit,
				Value:           rawDatum.Value,
				StatisticValues: rawDatum.StatisticValues,
			}
			if rawDatum.Timestamp != "" {
				parsedTime, err := message.ForgivingTimeParse("", rawDatum.Timestamp, cwo.tzLocation)
				if err != nil {
					or.LogMessage(fmt.Sprintf("unable to parse timestamp for datum: %s", rawDatum))
					continue
				}
				datum.Timestamp = parsedTime
			}
			dataPoints.Datapoints = append(dataPoints.Datapoints, datum)
		}
		payloads <- *dataPoints
		dataPoints.Datapoints = dataPoints.Datapoints[:0]
		rawDataPoints.Datapoints = rawDataPoints.Datapoints[:0]
		pack.Recycle()
	}
	or.LogMessage("shutting down AWS Cloudwatch submitter")
	cwo.stopChan <- true
	<-cwo.stopChan
	return
}
Beispiel #5
0
func (s *SmtpOutput) Run(or OutputRunner, h PluginHelper) (err error) {
	inChan := or.InChan()

	var (
		pack       *PipelinePack
		msg        *message.Message
		contents   []byte
		subject    string
		message    string
		headerText string
	)

	if s.conf.Subject == "" {
		subject = "Heka [" + or.Name() + "]"
	} else {
		subject = s.conf.Subject
	}

	header := make(map[string]string)
	header["From"] = s.conf.SendFrom
	header["Subject"] = subject
	header["MIME-Version"] = "1.0"
	header["Content-Type"] = "text/plain; charset=\"utf-8\""
	header["Content-Transfer-Encoding"] = "base64"

	for k, v := range header {
		headerText += fmt.Sprintf("%s: %s\r\n", k, v)
	}

	for pack = range inChan {
		msg = pack.Message
		message = headerText
		if s.conf.PayloadOnly {
			message += "\r\n" + base64.StdEncoding.EncodeToString([]byte(msg.GetPayload()))
			err = s.sendFunction(s.conf.Host, s.auth, s.conf.SendFrom, s.conf.SendTo, []byte(message))
		} else {
			if contents, err = json.Marshal(msg); err == nil {
				message += "\r\n" + base64.StdEncoding.EncodeToString(contents)
				err = s.sendFunction(s.conf.Host, s.auth, s.conf.SendFrom, s.conf.SendTo, []byte(message))
			} else {
				or.LogError(err)
			}
		}
		if err != nil {
			or.LogError(err)
		}
		pack.Recycle()
	}
	return
}
Beispiel #6
0
// Parses a Heka message and extracts the information necessary to start a new
// SandboxFilter
func (this *SandboxManagerFilter) loadSandbox(fr pipeline.FilterRunner,
	h pipeline.PluginHelper, dir string, msg *message.Message) (err error) {
	fv, _ := msg.GetFieldValue("config")
	if config, ok := fv.(string); ok {
		var configFile pipeline.ConfigFile
		if _, err = toml.Decode(config, &configFile); err != nil {
			return fmt.Errorf("loadSandbox failed: %s\n", err)
		} else {
			for name, conf := range configFile {
				name = getSandboxName(fr.Name(), name)
				if _, ok := h.Filter(name); ok {
					// todo support reload
					return fmt.Errorf("loadSandbox failed: %s is already running", name)
				}
				fr.LogMessage(fmt.Sprintf("Loading: %s", name))
				confFile := filepath.Join(dir, fmt.Sprintf("%s.toml", name))
				err = ioutil.WriteFile(confFile, []byte(config), 0600)
				if err != nil {
					return
				}
				var sbc SandboxConfig
				if err = toml.PrimitiveDecode(conf, &sbc); err != nil {
					return fmt.Errorf("loadSandbox failed: %s\n", err)
				}
				scriptFile := filepath.Join(dir, fmt.Sprintf("%s.%s", name, sbc.ScriptType))
				err = ioutil.WriteFile(scriptFile, []byte(msg.GetPayload()), 0600)
				if err != nil {
					removeAll(dir, fmt.Sprintf("%s.*", name))
					return
				}
				// check/clear the old state preservation file
				// this avoids issues with changes to the data model since the last load
				// and prevents holes in the graph from looking like anomalies
				os.Remove(filepath.Join(pipeline.PrependBaseDir(DATA_DIR), name+DATA_EXT))
				var runner pipeline.FilterRunner
				runner, err = this.createRunner(dir, name, conf)
				if err != nil {
					removeAll(dir, fmt.Sprintf("%s.*", name))
					return
				}
				err = h.PipelineConfig().AddFilterRunner(runner)
				if err == nil {
					this.currentFilters++
				}
				break // only interested in the first item
			}
		}
	}
	return
}
// Parses a Heka message and extracts the information necessary to start a new
// SandboxFilter
func (this *SandboxManagerFilter) loadSandbox(fr pipeline.FilterRunner,
	h pipeline.PluginHelper, dir string, msg *message.Message) (err error) {

	fv, _ := msg.GetFieldValue("config")
	if config, ok := fv.(string); ok {
		var configFile pipeline.ConfigFile
		if _, err = toml.Decode(config, &configFile); err != nil {
			return fmt.Errorf("loadSandbox failed: %s\n", err)
		}

		for name, conf := range configFile {
			name = getSandboxName(fr.Name(), name)
			if _, ok := h.Filter(name); ok {
				// todo support reload
				return fmt.Errorf("loadSandbox failed: %s is already running", name)
			}
			fr.LogMessage(fmt.Sprintf("Loading: %s", name))
			confFile := filepath.Join(dir, fmt.Sprintf("%s.toml", name))
			err = ioutil.WriteFile(confFile, []byte(config), 0600)
			if err != nil {
				return
			}
			var sbc SandboxConfig
			// Default, will get overwritten if necessary
			sbc.ScriptType = "lua"
			if err = toml.PrimitiveDecode(conf, &sbc); err != nil {
				return fmt.Errorf("loadSandbox failed: %s\n", err)
			}
			scriptFile := filepath.Join(dir, fmt.Sprintf("%s.%s", name, sbc.ScriptType))
			err = ioutil.WriteFile(scriptFile, []byte(msg.GetPayload()), 0600)
			if err != nil {
				removeAll(dir, fmt.Sprintf("%s.*", name))
				return
			}
			var runner pipeline.FilterRunner
			runner, err = this.createRunner(dir, name, conf)
			if err != nil {
				removeAll(dir, fmt.Sprintf("%s.*", name))
				return
			}
			err = this.pConfig.AddFilterRunner(runner)
			if err == nil {
				atomic.AddInt32(&this.currentFilters, 1)
			}
			break // only interested in the first item
		}
	}
	return
}
func (n *NagiosOutput) Run(or OutputRunner, h PluginHelper) (err error) {
	inChan := or.InChan()

	var (
		pack    *PipelinePack
		msg     *message.Message
		payload string
	)

	for pack = range inChan {
		msg = pack.Message
		payload = msg.GetPayload()
		pos := strings.IndexAny(payload, ":")
		state := "3" // UNKNOWN
		if pos != -1 {
			switch payload[:pos] {
			case "OK":
				state = "0"
			case "WARNING":
				state = "1"
			case "CRITICAL":
				state = "2"
			}
		}

		data := url.Values{
			"cmd_typ":          {"30"}, // PROCESS_SERVICE_CHECK_RESULT
			"cmd_mod":          {"2"},  // CMDMODE_COMMIT
			"host":             {msg.GetHostname()},
			"service":          {msg.GetLogger()},
			"plugin_state":     {state},
			"plugin_output":    {payload[pos+1:]},
			"performance_data": {""}}
		req, err := http.NewRequest("POST", n.conf.Url,
			strings.NewReader(data.Encode()))
		if err == nil {
			req.SetBasicAuth(n.conf.Username, n.conf.Password)
			if resp, err := n.client.Do(req); err == nil {
				resp.Body.Close()
			} else {
				or.LogError(err)
			}
		} else {
			or.LogError(err)
		}
		pack.Recycle()
	}
	return
}
func (this *SandboxManagerFilter) loadSandbox(fr FilterRunner,
	h PluginHelper, dir string, msg *message.Message) (err error) {
	fv, _ := msg.GetFieldValue("config")
	if config, ok := fv.(string); ok {
		var configFile ConfigFile
		if _, err = toml.Decode(config, &configFile); err != nil {
			return fmt.Errorf("loadSandbox failed: %s\n", err)
		} else {
			for name, conf := range configFile {
				name = getSandboxName(fr.Name(), name)
				if _, ok := h.Filter(name); ok {
					// todo support reload
					return fmt.Errorf("loadSandbox failed: %s is already running", name)
				}
				fr.LogMessage(fmt.Sprintf("Loading: %s", name))
				confFile := path.Join(dir, fmt.Sprintf("%s.toml", name))
				err = ioutil.WriteFile(confFile, []byte(config), 0600)
				if err != nil {
					return
				}
				var sbfc SandboxFilterConfig
				if err = toml.PrimitiveDecode(conf, &sbfc); err != nil {
					return fmt.Errorf("loadSandbox failed: %s\n", err)
				}
				scriptFile := path.Join(dir, fmt.Sprintf("%s.%s", name, sbfc.Sbc.ScriptType))
				err = ioutil.WriteFile(scriptFile, []byte(msg.GetPayload()), 0600)
				if err != nil {
					removeAll(dir, fmt.Sprintf("%s.*", name))
					return
				}
				var runner FilterRunner
				runner, err = createRunner(dir, name, conf)
				if err != nil {
					removeAll(dir, fmt.Sprintf("%s.*", name))
					return
				}
				err = h.PipelineConfig().AddFilterRunner(runner)
				if err == nil {
					this.currentFilters++
				}
				break // only interested in the first item
			}
		}
	}
	return
}
Beispiel #10
0
func (n *NagiosOutput) Run(or OutputRunner, h PluginHelper) (err error) {
	inChan := or.InChan()

	var (
		pack    *PipelinePack
		msg     *message.Message
		payload string
	)

	for pack = range inChan {
		msg = pack.Message
		payload = msg.GetPayload()
		pos := strings.IndexAny(payload, ":")
		state := "3" // UNKNOWN
		if pos != -1 {
			switch payload[:pos] {
			case "OK":
				state = "0"
			case "WARNING":
				state = "1"
			case "CRITICAL":
				state = "2"
			}
		}

		host := n.conf.NagiosHost
		if host == "" {
			host = msg.GetHostname()
		}
		service_description := n.conf.NagiosServiceDescription
		if service_description == "" {
			service_description = msg.GetLogger()
		}
		payload = payload[pos+1:]

		err = n.submitter(host, service_description, state, payload)
		if err != nil {
			err = NewRetryMessageError(err.Error())
			pack.Recycle(err)
			continue
		}
		or.UpdateCursor(pack.QueueCursor)
		pack.Recycle(nil)
	}
	return
}
Beispiel #11
0
func getMessageVariable(msg *message.Message, mvar *messageVariable) string {
	if mvar.header {
		switch mvar.name {
		case "Type":
			return msg.GetType()
		case "Logger":
			return msg.GetLogger()
		case "Hostname":
			return msg.GetHostname()
		case "Payload":
			return msg.GetPayload()
		default:
			return ""
		}
	} else {
		return getFieldAsString(msg, mvar)
	}
}
Beispiel #12
0
func (c *KibanaFormatter) Format(m *message.Message) (doc []byte, err error) {
	buf := bytes.Buffer{}
	buf.WriteString(`{`)

	writeStringField(true, &buf, `@uuid`, m.GetUuidString())
	t := time.Unix(0, m.GetTimestamp()) // time.Unix gives local time back
	writeStringField(false, &buf, `@timestamp`, t.UTC().Format("2006-01-02T15:04:05.000Z"))
	writeStringField(false, &buf, `@type`, m.GetType())
	writeStringField(false, &buf, `@logger`, m.GetLogger())
	writeRawField(false, &buf, `@severity`, strconv.Itoa(int(m.GetSeverity())))
	writeStringField(false, &buf, `@message`, m.GetPayload())
	writeRawField(false, &buf, `@envversion`, strconv.Quote(m.GetEnvVersion()))
	writeRawField(false, &buf, `@pid`, strconv.Itoa(int(m.GetPid())))
	writeStringField(false, &buf, `@source_host`, m.GetHostname())

	buf.WriteString(`,"@fields":{`)
	first := true
	for _, field := range m.Fields {
		switch field.GetValueType() {
		case message.Field_STRING:
			writeStringField(first, &buf, *field.Name, field.GetValue().(string))
			first = false
		case message.Field_BYTES:
			data := field.GetValue().([]byte)[:]
			writeStringField(first, &buf, *field.Name, base64.StdEncoding.EncodeToString(data))
			first = false
		case message.Field_INTEGER:
			writeRawField(first, &buf, *field.Name, strconv.FormatInt(field.GetValue().(int64), 10))
			first = false
		case message.Field_DOUBLE:
			writeRawField(first, &buf, *field.Name, strconv.FormatFloat(field.GetValue().(float64),
				'g', -1, 64))
			first = false
		case message.Field_BOOL:
			writeRawField(first, &buf, *field.Name, strconv.FormatBool(field.GetValue().(bool)))
			first = false
		}
	}
	buf.WriteString(`}`) // end of fields
	buf.WriteString(`}`)
	doc = buf.Bytes()
	return
}
Beispiel #13
0
func (self *DashboardOutput) Run(or OutputRunner, h PluginHelper) (err error) {
	inChan := or.InChan()
	ticker := or.Ticker()

	var (
		ok   = true
		plc  *PipelineCapture
		pack *PipelinePack
		msg  *message.Message
	)
	for ok {
		select {
		case plc, ok = <-inChan:
			if !ok {
				break
			}
			pack = plc.Pack
			msg = pack.Message
			switch msg.GetType() {
			case "heka.all-report":
				fn := path.Join(self.workingDirectory, "heka_report.json")
				overwriteFile(fn, msg.GetPayload())
			case "heka.sandbox-output":
				tmp, ok := msg.GetFieldValue("payload_type")
				if ok {
					if pt, ok := tmp.(string); ok && pt == "cbuf" {
						html := path.Join(self.workingDirectory, msg.GetLogger()+".html")
						_, err := os.Stat(html)
						if err != nil {
							overwriteFile(html, fmt.Sprintf(getCbufTemplate(), msg.GetLogger(), msg.GetLogger()))
						}
						fn := path.Join(self.workingDirectory, msg.GetLogger()+"."+pt)
						overwriteFile(fn, msg.GetPayload())
					}
				}
			case "heka.sandbox-terminated":
				fn := path.Join(self.workingDirectory, self.terminationFile)
				if file, err := os.OpenFile(fn, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644); err == nil {
					line := fmt.Sprintf("%d\t%s\t%v\n", msg.GetTimestamp()/1e9, msg.GetLogger(), msg.GetPayload())
					file.WriteString(line)
					file.Close()
				}
			}
			plc.Pack.Recycle()
		case <-ticker:
			go h.PipelineConfig().allReportsMsg()
		}
	}
	return
}
Beispiel #14
0
func main() {
	flagMatch := flag.String("match", "TRUE", "message_matcher filter expression")
	flagFormat := flag.String("format", "txt", "output format [txt|json|heka|count]")
	flagOutput := flag.String("output", "", "output filename, defaults to stdout")
	flagTail := flag.Bool("tail", false, "don't exit on EOF")
	flagOffset := flag.Int64("offset", 0, "starting offset for the input file in bytes")
	flagMaxMessageSize := flag.Uint64("max-message-size", 4*1024*1024, "maximum message size in bytes")
	flag.Parse()

	if flag.NArg() != 1 {
		flag.PrintDefaults()
		os.Exit(1)
	}

	if *flagMaxMessageSize < math.MaxUint32 {
		maxSize := uint32(*flagMaxMessageSize)
		message.SetMaxMessageSize(maxSize)
	} else {
		fmt.Printf("Message size is too large: %d\n", flagMaxMessageSize)
		os.Exit(8)
	}

	var err error
	var match *message.MatcherSpecification
	if match, err = message.CreateMatcherSpecification(*flagMatch); err != nil {
		fmt.Printf("Match specification - %s\n", err)
		os.Exit(2)
	}

	var file *os.File
	if file, err = os.Open(flag.Arg(0)); err != nil {
		fmt.Printf("%s\n", err)
		os.Exit(3)
	}
	defer file.Close()

	var out *os.File
	if "" == *flagOutput {
		out = os.Stdout
	} else {
		if out, err = os.OpenFile(*flagOutput, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil {
			fmt.Printf("%s\n", err)
			os.Exit(4)
		}
		defer out.Close()
	}

	var offset int64
	if offset, err = file.Seek(*flagOffset, 0); err != nil {
		fmt.Printf("%s\n", err)
		os.Exit(5)
	}

	sRunner, err := makeSplitterRunner()
	if err != nil {
		fmt.Println(err)
		os.Exit(7)
	}
	msg := new(message.Message)
	var processed, matched int64

	fmt.Printf("Input:%s  Offset:%d  Match:%s  Format:%s  Tail:%t  Output:%s\n",
		flag.Arg(0), *flagOffset, *flagMatch, *flagFormat, *flagTail, *flagOutput)
	for true {
		n, record, err := sRunner.GetRecordFromStream(file)
		if n > 0 && n != len(record) {
			fmt.Printf("Corruption detected at offset: %d bytes: %d\n", offset, n-len(record))
		}
		if err != nil {
			if err == io.EOF {
				if !*flagTail || "count" == *flagFormat {
					break
				}
				time.Sleep(time.Duration(500) * time.Millisecond)
			} else {
				break
			}
		} else {
			if len(record) > 0 {
				processed += 1
				headerLen := int(record[1]) + message.HEADER_FRAMING_SIZE
				if err = proto.Unmarshal(record[headerLen:], msg); err != nil {
					fmt.Printf("Error unmarshalling message at offset: %d error: %s\n", offset, err)
					continue
				}

				if !match.Match(msg) {
					continue
				}
				matched += 1

				switch *flagFormat {
				case "count":
					// no op
				case "json":
					contents, _ := json.Marshal(msg)
					fmt.Fprintf(out, "%s\n", contents)
				case "heka":
					fmt.Fprintf(out, "%s", record)
				default:
					fmt.Fprintf(out, "Timestamp: %s\n"+
						"Type: %s\n"+
						"Hostname: %s\n"+
						"Pid: %d\n"+
						"UUID: %s\n"+
						"Logger: %s\n"+
						"Payload: %s\n"+
						"EnvVersion: %s\n"+
						"Severity: %d\n"+
						"Fields: %+v\n\n",
						time.Unix(0, msg.GetTimestamp()), msg.GetType(),
						msg.GetHostname(), msg.GetPid(), msg.GetUuidString(),
						msg.GetLogger(), msg.GetPayload(), msg.GetEnvVersion(),
						msg.GetSeverity(), msg.Fields)
				}
			}
		}
		offset += int64(n)
	}
	fmt.Printf("Processed: %d, matched: %d messages\n", processed, matched)
	if err != nil {
		fmt.Printf("%s\n", err)
		os.Exit(6)
	}
}
Beispiel #15
0
func EncoderSpec(c gs.Context) {
	t := new(ts.SimpleT)
	ctrl := gomock.NewController(t)
	defer ctrl.Finish()

	// NewPipelineConfig sets up Globals which is needed for the
	// pipeline.Prepend*Dir functions to not die during plugin Init().
	_ = pipeline.NewPipelineConfig(nil)

	c.Specify("A SandboxEncoder", func() {

		encoder := new(SandboxEncoder)
		conf := encoder.ConfigStruct().(*SandboxEncoderConfig)
		supply := make(chan *pipeline.PipelinePack, 1)
		pack := pipeline.NewPipelinePack(supply)
		pack.Message.SetPayload("original")
		pack.Message.SetType("my_type")
		pack.Message.SetPid(12345)
		pack.Message.SetSeverity(4)
		pack.Message.SetHostname("hostname")
		pack.Message.SetTimestamp(54321)
		pack.Message.SetUuid(uuid.NewRandom())
		var (
			result []byte
			err    error
		)

		c.Specify("emits JSON correctly", func() {
			conf.ScriptFilename = "../lua/testsupport/encoder_json.lua"
			err = encoder.Init(conf)
			c.Expect(err, gs.IsNil)

			result, err = encoder.Encode(pack)
			c.Expect(err, gs.IsNil)
			msg := new(message.Message)
			err = json.Unmarshal(result, msg)
			c.Expect(err, gs.IsNil)
			c.Expect(msg.GetTimestamp(), gs.Equals, int64(54321))
			c.Expect(msg.GetPid(), gs.Equals, int32(12345))
			c.Expect(msg.GetSeverity(), gs.Equals, int32(4))
			c.Expect(msg.GetHostname(), gs.Equals, "hostname")
			c.Expect(msg.GetPayload(), gs.Equals, "original")
			c.Expect(msg.GetType(), gs.Equals, "my_type")
		})

		c.Specify("emits text correctly", func() {
			conf.ScriptFilename = "../lua/testsupport/encoder_text.lua"
			err = encoder.Init(conf)
			c.Expect(err, gs.IsNil)

			result, err = encoder.Encode(pack)
			c.Expect(err, gs.IsNil)
			c.Expect(string(result), gs.Equals, "Prefixed original")
		})

		c.Specify("emits protobuf correctly", func() {

			c.Specify("when inject_message is used", func() {
				conf.ScriptFilename = "../lua/testsupport/encoder_protobuf.lua"
				err = encoder.Init(conf)
				c.Expect(err, gs.IsNil)

				result, err = encoder.Encode(pack)
				c.Expect(err, gs.IsNil)

				msg := new(message.Message)
				err = proto.Unmarshal(result, msg)
				c.Expect(err, gs.IsNil)
				c.Expect(msg.GetTimestamp(), gs.Equals, int64(54321))
				c.Expect(msg.GetPid(), gs.Equals, int32(12345))
				c.Expect(msg.GetSeverity(), gs.Equals, int32(4))
				c.Expect(msg.GetHostname(), gs.Equals, "hostname")
				c.Expect(msg.GetPayload(), gs.Equals, "mutated")
				c.Expect(msg.GetType(), gs.Equals, "after")
			})

			c.Specify("when `write_message` is used", func() {
				conf.ScriptFilename = "../lua/testsupport/encoder_writemessage.lua"
				err = encoder.Init(conf)
				c.Expect(err, gs.IsNil)

				result, err = encoder.Encode(pack)
				c.Expect(err, gs.IsNil)

				msg := new(message.Message)
				err = proto.Unmarshal(result, msg)
				c.Expect(err, gs.IsNil)
				c.Expect(msg.GetPayload(), gs.Equals, "mutated payload")
				c.Expect(pack.Message.GetPayload(), gs.Equals, "original")
			})
		})

	})
}
Beispiel #16
0
func EncoderSpec(c gs.Context) {
	t := new(ts.SimpleT)
	ctrl := gomock.NewController(t)
	defer ctrl.Finish()

	// NewPipelineConfig sets up Globals which is needed for the
	// pipeline.Prepend*Dir functions to not die during plugin Init().
	pConfig := pipeline.NewPipelineConfig(nil)

	c.Specify("A SandboxEncoder", func() {

		encoder := new(SandboxEncoder)
		encoder.SetPipelineConfig(pConfig)
		conf := encoder.ConfigStruct().(*SandboxEncoderConfig)
		supply := make(chan *pipeline.PipelinePack, 1)
		pack := pipeline.NewPipelinePack(supply)
		pack.Message.SetPayload("original")
		pack.Message.SetType("my_type")
		pack.Message.SetPid(12345)
		pack.Message.SetSeverity(4)
		pack.Message.SetHostname("hostname")
		pack.Message.SetTimestamp(54321)
		pack.Message.SetUuid(uuid.NewRandom())
		var (
			result []byte
			err    error
		)

		c.Specify("emits JSON correctly", func() {
			conf.ScriptFilename = "../lua/testsupport/encoder_json.lua"
			err = encoder.Init(conf)
			c.Expect(err, gs.IsNil)

			result, err = encoder.Encode(pack)
			c.Expect(err, gs.IsNil)
			msg := new(message.Message)
			err = json.Unmarshal(result, msg)
			c.Expect(err, gs.IsNil)
			c.Expect(msg.GetTimestamp(), gs.Equals, int64(54321))
			c.Expect(msg.GetPid(), gs.Equals, int32(12345))
			c.Expect(msg.GetSeverity(), gs.Equals, int32(4))
			c.Expect(msg.GetHostname(), gs.Equals, "hostname")
			c.Expect(msg.GetPayload(), gs.Equals, "original")
			c.Expect(msg.GetType(), gs.Equals, "my_type")
		})

		c.Specify("emits text correctly", func() {
			conf.ScriptFilename = "../lua/testsupport/encoder_text.lua"
			err = encoder.Init(conf)
			c.Expect(err, gs.IsNil)

			result, err = encoder.Encode(pack)
			c.Expect(err, gs.IsNil)
			c.Expect(string(result), gs.Equals, "Prefixed original")
		})

		c.Specify("emits protobuf correctly", func() {

			c.Specify("when inject_message is used", func() {
				conf.ScriptFilename = "../lua/testsupport/encoder_protobuf.lua"
				err = encoder.Init(conf)
				c.Expect(err, gs.IsNil)

				result, err = encoder.Encode(pack)
				c.Expect(err, gs.IsNil)

				msg := new(message.Message)
				err = proto.Unmarshal(result, msg)
				c.Expect(err, gs.IsNil)
				c.Expect(msg.GetTimestamp(), gs.Equals, int64(54321))
				c.Expect(msg.GetPid(), gs.Equals, int32(12345))
				c.Expect(msg.GetSeverity(), gs.Equals, int32(4))
				c.Expect(msg.GetHostname(), gs.Equals, "hostname")
				c.Expect(msg.GetPayload(), gs.Equals, "mutated")
				c.Expect(msg.GetType(), gs.Equals, "after")
			})

			c.Specify("when `write_message` is used", func() {
				conf.ScriptFilename = "../lua/testsupport/encoder_writemessage.lua"
				err = encoder.Init(conf)
				c.Expect(err, gs.IsNil)

				result, err = encoder.Encode(pack)
				c.Expect(err, gs.IsNil)

				msg := new(message.Message)
				err = proto.Unmarshal(result, msg)
				c.Expect(err, gs.IsNil)
				c.Expect(msg.GetPayload(), gs.Equals, "mutated payload")
				c.Expect(pack.Message.GetPayload(), gs.Equals, "original")
			})
		})
	})

	c.Specify("cbuf librato encoder", func() {
		encoder := new(SandboxEncoder)
		encoder.SetPipelineConfig(pConfig)
		conf := encoder.ConfigStruct().(*SandboxEncoderConfig)
		supply := make(chan *pipeline.PipelinePack, 1)
		pack := pipeline.NewPipelinePack(supply)
		pack.Message.SetType("my_type")
		pack.Message.SetPid(12345)
		pack.Message.SetSeverity(4)
		pack.Message.SetHostname("hostname")
		pack.Message.SetTimestamp(54321)
		pack.Message.SetUuid(uuid.NewRandom())
		var (
			result []byte
			err    error
		)
		conf.ScriptFilename = "../lua/encoders/cbuf_librato.lua"
		conf.ModuleDirectory = "../../../../../../modules"
		conf.Config = make(map[string]interface{})
		err = encoder.Init(conf)
		c.Assume(err, gs.IsNil)

		c.Specify("encodes cbuf data", func() {
			payload := `{"time":1410823460,"rows":5,"columns":5,"seconds_per_row":5,"column_info":[{"name":"HTTP_200","unit":"count","aggregation":"sum"},{"name":"HTTP_300","unit":"count","aggregation":"sum"},{"name":"HTTP_400","unit":"count","aggregation":"sum"},{"name":"HTTP_500","unit":"count","aggregation":"sum"},{"name":"HTTP_UNKNOWN","unit":"count","aggregation":"sum"}]}
1	2	3	4	5
6	7	8	9	10
11	12	13	14	15
16	17	18	19	20
21	22	23	24	25
`
			pack.Message.SetPayload(payload)
			result, err = encoder.Encode(pack)
			c.Expect(err, gs.IsNil)
			expected := `{"gauges":[{"value":1,"measure_time":1410823460,"name":"HTTP_200","source":"hostname"},{"value":2,"measure_time":1410823460,"name":"HTTP_300","source":"hostname"},{"value":3,"measure_time":1410823460,"name":"HTTP_400","source":"hostname"},{"value":4,"measure_time":1410823460,"name":"HTTP_500","source":"hostname"},{"value":5,"measure_time":1410823460,"name":"HTTP_UNKNOWN","source":"hostname"},{"value":6,"measure_time":1410823465,"name":"HTTP_200","source":"hostname"},{"value":7,"measure_time":1410823465,"name":"HTTP_300","source":"hostname"},{"value":8,"measure_time":1410823465,"name":"HTTP_400","source":"hostname"},{"value":9,"measure_time":1410823465,"name":"HTTP_500","source":"hostname"},{"value":10,"measure_time":1410823465,"name":"HTTP_UNKNOWN","source":"hostname"},{"value":11,"measure_time":1410823470,"name":"HTTP_200","source":"hostname"},{"value":12,"measure_time":1410823470,"name":"HTTP_300","source":"hostname"},{"value":13,"measure_time":1410823470,"name":"HTTP_400","source":"hostname"},{"value":14,"measure_time":1410823470,"name":"HTTP_500","source":"hostname"},{"value":15,"measure_time":1410823470,"name":"HTTP_UNKNOWN","source":"hostname"},{"value":16,"measure_time":1410823475,"name":"HTTP_200","source":"hostname"},{"value":17,"measure_time":1410823475,"name":"HTTP_300","source":"hostname"},{"value":18,"measure_time":1410823475,"name":"HTTP_400","source":"hostname"},{"value":19,"measure_time":1410823475,"name":"HTTP_500","source":"hostname"},{"value":20,"measure_time":1410823475,"name":"HTTP_UNKNOWN","source":"hostname"}]}`
			c.Expect(string(result), gs.Equals, expected)

			c.Specify("and correctly advances", func() {
				payload := `{"time":1410823475,"rows":5,"columns":5,"seconds_per_row":5,"column_info":[{"name":"HTTP_200","unit":"count","aggregation":"sum"},{"name":"HTTP_300","unit":"count","aggregation":"sum"},{"name":"HTTP_400","unit":"count","aggregation":"sum"},{"name":"HTTP_500","unit":"count","aggregation":"sum"},{"name":"HTTP_UNKNOWN","unit":"count","aggregation":"sum"}]}
16	17	18	19	20
21	22	23	24	25
1	2	3	4	5
6	nan	8	nan	10
5	4	3	2	1
`
				pack.Message.SetPayload(payload)
				result, err = encoder.Encode(pack)
				c.Expect(err, gs.IsNil)
				expected := `{"gauges":[{"value":21,"measure_time":1410823480,"name":"HTTP_200","source":"hostname"},{"value":22,"measure_time":1410823480,"name":"HTTP_300","source":"hostname"},{"value":23,"measure_time":1410823480,"name":"HTTP_400","source":"hostname"},{"value":24,"measure_time":1410823480,"name":"HTTP_500","source":"hostname"},{"value":25,"measure_time":1410823480,"name":"HTTP_UNKNOWN","source":"hostname"},{"value":1,"measure_time":1410823485,"name":"HTTP_200","source":"hostname"},{"value":2,"measure_time":1410823485,"name":"HTTP_300","source":"hostname"},{"value":3,"measure_time":1410823485,"name":"HTTP_400","source":"hostname"},{"value":4,"measure_time":1410823485,"name":"HTTP_500","source":"hostname"},{"value":5,"measure_time":1410823485,"name":"HTTP_UNKNOWN","source":"hostname"},{"value":6,"measure_time":1410823490,"name":"HTTP_200","source":"hostname"},{"value":8,"measure_time":1410823490,"name":"HTTP_400","source":"hostname"},{"value":10,"measure_time":1410823490,"name":"HTTP_UNKNOWN","source":"hostname"}]}`
				c.Expect(string(result), gs.Equals, expected)
			})
		})
	})

	c.Specify("schema influx encoder", func() {
		encoder := new(SandboxEncoder)
		encoder.SetPipelineConfig(pConfig)
		conf := encoder.ConfigStruct().(*SandboxEncoderConfig)
		supply := make(chan *pipeline.PipelinePack, 1)
		pack := pipeline.NewPipelinePack(supply)
		pack.Message.SetType("my_type")
		pack.Message.SetPid(12345)
		pack.Message.SetSeverity(4)
		pack.Message.SetHostname("hostname")
		pack.Message.SetTimestamp(54321 * 1e9)
		pack.Message.SetLogger("Logger")
		pack.Message.SetPayload("Payload value lorem ipsum")

		f, err := message.NewField("intField", 123, "")
		c.Assume(err, gs.IsNil)
		err = f.AddValue(456)
		c.Assume(err, gs.IsNil)
		pack.Message.AddField(f)

		f, err = message.NewField("strField", "0_first", "")
		c.Assume(err, gs.IsNil)
		err = f.AddValue("0_second")
		c.Assume(err, gs.IsNil)
		pack.Message.AddField(f)

		f, err = message.NewField("strField", "1_first", "")
		c.Assume(err, gs.IsNil)
		err = f.AddValue("1_second")
		c.Assume(err, gs.IsNil)
		pack.Message.AddField(f)

		f, err = message.NewField("byteField", []byte("first"), "")
		c.Assume(err, gs.IsNil)
		err = f.AddValue([]byte("second"))
		c.Assume(err, gs.IsNil)
		pack.Message.AddField(f)

		conf.ScriptFilename = "../lua/encoders/schema_influx.lua"
		conf.ModuleDirectory = "../../../../../../modules"
		conf.Config = make(map[string]interface{})

		c.Specify("encodes a basic message", func() {
			err = encoder.Init(conf)
			c.Assume(err, gs.IsNil)
			result, err := encoder.Encode(pack)
			c.Expect(err, gs.IsNil)
			expected := `[{"points":[[54321000,"my_type","Payload value lorem ipsum","hostname",12345,"Logger",4,"",[123,456],["0_first","0_second"],["1_first","1_second"]]],"name":"series","columns":["time","Type","Payload","Hostname","Pid","Logger","Severity","EnvVersion","intField","strField","strField2"]}]`
			c.Expect(string(result), gs.Equals, expected)
		})

		c.Specify("interpolates series name correctly", func() {
			conf.Config["series"] = "series.%{Pid}.%{Type}.%{strField}.%{intField}"
			err = encoder.Init(conf)
			c.Assume(err, gs.IsNil)
			result, err := encoder.Encode(pack)
			c.Expect(err, gs.IsNil)
			expected := `[{"points":[[54321000,"my_type","Payload value lorem ipsum","hostname",12345,"Logger",4,"",[123,456],["0_first","0_second"],["1_first","1_second"]]],"name":"series.12345.my_type.0_first.123","columns":["time","Type","Payload","Hostname","Pid","Logger","Severity","EnvVersion","intField","strField","strField2"]}]`
			c.Expect(string(result), gs.Equals, expected)
		})

		c.Specify("skips specified correctly", func() {
			conf.Config["skip_fields"] = "Payload strField Type"
			err = encoder.Init(conf)
			c.Assume(err, gs.IsNil)
			result, err := encoder.Encode(pack)
			c.Expect(err, gs.IsNil)
			expected := `[{"points":[[54321000,"hostname",12345,"Logger",4,"",[123,456]]],"name":"series","columns":["time","Hostname","Pid","Logger","Severity","EnvVersion","intField"]}]`
			c.Expect(string(result), gs.Equals, expected)
		})
	})
}
Beispiel #17
0
// Save matching client records locally to the given output file in the given
// format.
func save(recordChannel <-chan s3splitfile.S3Record, match *message.MatcherSpecification, format string, out *os.File, done chan<- int) {
	processed := 0
	matched := 0
	bytes := 0
	msg := new(message.Message)
	ok := true
	for ok {
		r, ok := <-recordChannel
		if !ok {
			// Channel is closed
			done <- bytes
			break
		}

		bytes += len(r.Record)

		processed += 1
		headerLen := int(r.Record[1]) + message.HEADER_FRAMING_SIZE
		messageBytes := r.Record[headerLen:]
		unsnappy, decodeErr := snappy.Decode(nil, messageBytes)
		if decodeErr == nil {
			messageBytes = unsnappy
		}
		if err := proto.Unmarshal(messageBytes, msg); err != nil {
			fmt.Fprintf(os.Stderr, "Error unmarshalling message %d in %s, error: %s\n", processed, r.Key, err)
			continue
		}

		if !match.Match(msg) {
			continue
		}

		matched += 1

		switch format {
		case "count":
			// no op
		case "json":
			contents, _ := json.Marshal(msg)
			fmt.Fprintf(out, "%s\n", contents)
		case "heka":
			fmt.Fprintf(out, "%s", r.Record)
		case "offsets":
			// Use offsets mode for indexing the S3 files by clientId
			clientId, ok := msg.GetFieldValue("clientId")
			recordLength := len(r.Record) - headerLen
			if ok {
				fmt.Fprintf(out, "%s\t%s\t%d\t%d\n", r.Key, clientId, (r.Offset + uint64(headerLen)), recordLength)
			} else {
				fmt.Fprintf(os.Stderr, "Missing client id in %s @ %d+%d\n", r.Key, r.Offset, recordLength)
			}
		default:
			fmt.Fprintf(out, "Timestamp: %s\n"+
				"Type: %s\n"+
				"Hostname: %s\n"+
				"Pid: %d\n"+
				"UUID: %s\n"+
				"Logger: %s\n"+
				"Payload: %s\n"+
				"EnvVersion: %s\n"+
				"Severity: %d\n"+
				"Fields: %+v\n\n",
				time.Unix(0, msg.GetTimestamp()), msg.GetType(),
				msg.GetHostname(), msg.GetPid(), msg.GetUuidString(),
				msg.GetLogger(), msg.GetPayload(), msg.GetEnvVersion(),
				msg.GetSeverity(), msg.Fields)
		}
	}
	fmt.Fprintf(os.Stderr, "Processed: %d, matched: %d messages (%.2f MB)\n", processed, matched, (float64(bytes) / 1024.0 / 1024.0))
}
Beispiel #18
0
func (ao *AMQPOutput) Run(or OutputRunner, h PluginHelper) (err error) {
	inChan := or.InChan()
	conf := ao.config

	var (
		pack    *PipelinePack
		msg     *message.Message
		persist uint8
		ok      bool = true
		amqpMsg amqp.Publishing
		encoder client.Encoder
		msgBody []byte = make([]byte, 0, 500)
	)
	if conf.Persistent {
		persist = uint8(1)
	} else {
		persist = uint8(0)
	}
	encoder = client.NewProtobufEncoder(nil)

	for ok {
		select {
		case <-ao.closeChan:
			ok = false
		case pack, ok = <-inChan:
			if !ok {
				break
			}
			msg = pack.Message
			if conf.Serialize {
				if err = encoder.EncodeMessageStream(msg, &msgBody); err != nil {
					or.LogError(err)
					err = nil
					pack.Recycle()
					continue
				}
				amqpMsg = amqp.Publishing{
					DeliveryMode: persist,
					Timestamp:    time.Now(),
					ContentType:  "application/hekad",
					Body:         msgBody,
				}
			} else {
				amqpMsg = amqp.Publishing{
					DeliveryMode: persist,
					Timestamp:    time.Now(),
					ContentType:  "text/plain",
					Body:         []byte(msg.GetPayload()),
				}
			}
			err = ao.ch.Publish(conf.Exchange, conf.RoutingKey,
				false, false, amqpMsg)
			if err != nil {
				ok = false
			} else {
				pack.Recycle()
			}
			msgBody = msgBody[:0]
		}
	}
	ao.usageWg.Done()
	amqpHub.Close(conf.URL, ao.connWg)
	ao.connWg.Wait()
	return
}
Beispiel #19
0
func (pf *PayloadFormatter) Format(m *message.Message) (doc []byte, err error) {
	return []byte(m.GetPayload()), nil
}
Beispiel #20
0
func (self *DashboardOutput) Run(or OutputRunner, h PluginHelper) (err error) {
	inChan := or.InChan()
	ticker := or.Ticker()

	var (
		ok   = true
		pack *PipelinePack
		msg  *message.Message
	)

	reNotWord, _ := regexp.Compile("\\W")
	for ok {
		select {
		case pack, ok = <-inChan:
			if !ok {
				break
			}
			msg = pack.Message
			switch msg.GetType() {
			case "heka.all-report":
				fn := path.Join(self.workingDirectory, "heka_report.json")
				createPluginPages(self.workingDirectory, msg.GetPayload())
				overwriteFile(fn, msg.GetPayload())
			case "heka.sandbox-output":
				tmp, _ := msg.GetFieldValue("payload_type")
				if payloadType, ok := tmp.(string); ok {
					var payloadName, nameExt string
					tmp, _ := msg.GetFieldValue("payload_name")
					if payloadName, ok = tmp.(string); ok {
						nameExt = reNotWord.ReplaceAllString(payloadName, "")
					}
					if len(nameExt) > 64 {
						nameExt = nameExt[:64]
					}
					nameExt = "." + nameExt

					payloadType = reNotWord.ReplaceAllString(payloadType, "")
					fn := msg.GetLogger() + nameExt + "." + payloadType
					ofn := path.Join(self.workingDirectory, fn)
					if payloadType == "cbuf" {
						html := msg.GetLogger() + nameExt + ".html"
						ohtml := path.Join(self.workingDirectory, html)
						_, err := os.Stat(ohtml)
						if err != nil {
							overwriteFile(ohtml, fmt.Sprintf(getCbufTemplate(),
								msg.GetLogger(),
								payloadName,
								fn))
						}
						overwriteFile(ofn, msg.GetPayload())
						updatePluginMetadata(self.workingDirectory, msg.GetLogger(), html, payloadName)
					} else {
						overwriteFile(ofn, msg.GetPayload())
						updatePluginMetadata(self.workingDirectory, msg.GetLogger(), fn, payloadName)
					}
				}
			case "heka.sandbox-terminated":
				fn := path.Join(self.workingDirectory, "heka_sandbox_termination.tsv")
				if file, err := os.OpenFile(fn, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644); err == nil {
					var line string
					if _, ok := msg.GetFieldValue("ProcessMessageCount"); !ok {
						line = fmt.Sprintf("%d\t%s\t%v\n", msg.GetTimestamp()/1e9, msg.GetLogger(), msg.GetPayload())
					} else {
						pmc, _ := msg.GetFieldValue("ProcessMessageCount")
						pms, _ := msg.GetFieldValue("ProcessMessageSamples")
						pmd, _ := msg.GetFieldValue("ProcessMessageAvgDuration")
						ms, _ := msg.GetFieldValue("MatchSamples")
						mad, _ := msg.GetFieldValue("MatchAvgDuration")
						fcl, _ := msg.GetFieldValue("FilterChanLength")
						mcl, _ := msg.GetFieldValue("MatchChanLength")
						rcl, _ := msg.GetFieldValue("RouterChanLength")
						line = fmt.Sprintf("%d\t%s\t%v"+
							" ProcessMessageCount:%v"+
							" ProcessMessageSamples:%v"+
							" ProcessMessageAvgDuration:%v"+
							" MatchSamples:%v"+
							" MatchAvgDuration:%v"+
							" FilterChanLength:%v"+
							" MatchChanLength:%v"+
							" RouterChanLength:%v\n",
							msg.GetTimestamp()/1e9,
							msg.GetLogger(), msg.GetPayload(), pmc, pms, pmd,
							ms, mad, fcl, mcl, rcl)
					}
					file.WriteString(line)
					file.Close()
				}
			}
			pack.Recycle()
		case <-ticker:
			go h.PipelineConfig().allReportsMsg()
		}
	}
	return
}
Beispiel #21
0
func BufferedOutputSpec(c gs.Context) {
	tmpDir, tmpErr := ioutil.TempDir("", "bufferedout-tests")

	defer func() {
		tmpErr = os.RemoveAll(tmpDir)
		c.Expect(tmpErr, gs.Equals, nil)
	}()

	t := &ts.SimpleT{}
	ctrl := gomock.NewController(t)
	defer ctrl.Finish()

	c.Specify("BufferedOutput Internals", func() {
		encoder := new(ProtobufEncoder)
		encoder.sample = false
		encoder.sampleDenominator = 1000
		or := NewMockOutputRunner(ctrl)

		bufferedOutput, err := NewBufferedOutput(tmpDir, "test", or)
		c.Expect(err, gs.IsNil)
		msg := ts.GetTestMessage()

		c.Specify("fileExists", func() {
			c.Expect(fileExists(tmpDir), gs.IsTrue)
			c.Expect(fileExists(filepath.Join(tmpDir, "test.log")), gs.IsFalse)
		})

		c.Specify("extractBufferId", func() {
			id, err := extractBufferId("555.log")
			c.Expect(err, gs.IsNil)
			c.Expect(id, gs.Equals, uint(555))
			id, err = extractBufferId("")
			c.Expect(err, gs.Not(gs.IsNil))
			id, err = extractBufferId("a.log")
			c.Expect(err, gs.Not(gs.IsNil))
		})

		c.Specify("findBufferId", func() {
			c.Expect(findBufferId(tmpDir, true), gs.Equals, uint(0))
			c.Expect(findBufferId(tmpDir, false), gs.Equals, uint(0))
			fd, err := os.Create(filepath.Join(tmpDir, "4.log"))
			c.Expect(err, gs.IsNil)
			fd.Close()
			fd, err = os.Create(filepath.Join(tmpDir, "5.log"))
			c.Expect(err, gs.IsNil)
			fd.Close()
			fd, err = os.Create(filepath.Join(tmpDir, "6a.log"))
			c.Expect(err, gs.IsNil)
			fd.Close()
			c.Expect(findBufferId(tmpDir, false), gs.Equals, uint(4))
			c.Expect(findBufferId(tmpDir, true), gs.Equals, uint(5))
		})

		c.Specify("writeCheckpoint", func() {
			bufferedOutput.checkpointFilename = filepath.Join(tmpDir, "cp.txt")
			err := bufferedOutput.writeCheckpoint(43, 99999)
			c.Expect(err, gs.IsNil)
			c.Expect(fileExists(bufferedOutput.checkpointFilename), gs.IsTrue)

			id, offset, err := readCheckpoint(bufferedOutput.checkpointFilename)
			c.Expect(err, gs.IsNil)
			c.Expect(id, gs.Equals, uint(43))
			c.Expect(offset, gs.Equals, int64(99999))

			err = bufferedOutput.writeCheckpoint(43, 1)
			c.Expect(err, gs.IsNil)
			id, offset, err = readCheckpoint(bufferedOutput.checkpointFilename)
			c.Expect(err, gs.IsNil)
			c.Expect(id, gs.Equals, uint(43))
			c.Expect(offset, gs.Equals, int64(1))
			bufferedOutput.checkpointFile.Close()
		})

		c.Specify("readCheckpoint", func() {
			cp := filepath.Join(tmpDir, "cp.txt")
			file, err := os.OpenFile(cp, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
			c.Expect(err, gs.IsNil)
			id, offset, err := readCheckpoint(cp)
			c.Expect(err, gs.Not(gs.IsNil))

			file.WriteString("22")
			id, offset, err = readCheckpoint(cp)
			c.Expect(err.Error(), gs.Equals, "invalid checkpoint format")

			file.Seek(0, 0)
			file.WriteString("aa 22")
			id, offset, err = readCheckpoint(cp)
			c.Expect(err.Error(), gs.Equals, "invalid checkpoint id")

			file.Seek(0, 0)
			file.WriteString("43 aa")
			id, offset, err = readCheckpoint(cp)
			c.Expect(err.Error(), gs.Equals, "invalid checkpoint offset")

			file.Seek(0, 0)
			file.WriteString("43 22")
			file.Close()

			id, offset, err = readCheckpoint(cp)
			c.Expect(err, gs.IsNil)
			c.Expect(id, gs.Equals, uint(43))
			c.Expect(offset, gs.Equals, int64(22))
		})

		c.Specify("RollQueue", func() {
			bufferedOutput.checkpointFilename = filepath.Join(tmpDir, "cp.txt")
			bufferedOutput.queue = tmpDir
			err := bufferedOutput.RollQueue()
			c.Expect(err, gs.IsNil)
			c.Expect(fileExists(getQueueFilename(bufferedOutput.queue,
				bufferedOutput.writeId)), gs.IsTrue)
			bufferedOutput.writeFile.WriteString("this is a test item")
			bufferedOutput.writeFile.Close()
			bufferedOutput.writeCheckpoint(bufferedOutput.writeId, 10)
			bufferedOutput.checkpointFile.Close()
			err = bufferedOutput.readFromNextFile()
			buf := make([]byte, 4)
			n, err := bufferedOutput.readFile.Read(buf)
			c.Expect(n, gs.Equals, 4)
			c.Expect("test", gs.Equals, string(buf))
			bufferedOutput.writeFile.Close()
			bufferedOutput.readFile.Close()
		})

		c.Specify("QueueRecord", func() {
			bufferedOutput.checkpointFilename = filepath.Join(tmpDir, "cp.txt")
			bufferedOutput.queue = tmpDir
			newpack := NewPipelinePack(nil)
			newpack.Message = msg
			newpack.Decoded = true
			payload := "Write me out to the network"
			newpack.Message.SetPayload(payload)
			protoBytes, err := encoder.Encode(newpack)
			expectedLen := 115

			c.Specify("adds framing when necessary", func() {
				or.EXPECT().Encode(newpack).Return(protoBytes, err)
				or.EXPECT().UsesFraming().Return(false)
				err = bufferedOutput.RollQueue()
				c.Expect(err, gs.IsNil)
				err = bufferedOutput.QueueRecord(newpack)
				fName := getQueueFilename(bufferedOutput.queue, bufferedOutput.writeId)
				c.Expect(fileExists(fName), gs.IsTrue)
				c.Expect(err, gs.IsNil)
				bufferedOutput.writeFile.Close()

				f, err := os.Open(fName)
				c.Expect(err, gs.IsNil)

				n, record, err := bufferedOutput.parser.Parse(f)
				f.Close()
				c.Expect(n, gs.Equals, expectedLen)
				c.Expect(err, gs.IsNil)
				headerLen := int(record[1]) + message.HEADER_FRAMING_SIZE
				record = record[headerLen:]
				outMsg := new(message.Message)
				proto.Unmarshal(record, outMsg)
				c.Expect(outMsg.GetPayload(), gs.Equals, payload)
			})

			c.Specify("doesn't add framing if it's already there", func() {
				var framed []byte
				client.CreateHekaStream(protoBytes, &framed, nil)
				or.EXPECT().Encode(newpack).Return(framed, err)
				or.EXPECT().UsesFraming().Return(true)
				err = bufferedOutput.RollQueue()
				c.Expect(err, gs.IsNil)
				err = bufferedOutput.QueueRecord(newpack)
				fName := getQueueFilename(bufferedOutput.queue, bufferedOutput.writeId)
				c.Expect(fileExists(fName), gs.IsTrue)
				c.Expect(err, gs.IsNil)
				bufferedOutput.writeFile.Close()

				f, err := os.Open(fName)
				c.Expect(err, gs.IsNil)

				n, record, err := bufferedOutput.parser.Parse(f)
				f.Close()
				c.Expect(n, gs.Equals, expectedLen)
				c.Expect(err, gs.IsNil)
				headerLen := int(record[1]) + message.HEADER_FRAMING_SIZE
				record = record[headerLen:]
				outMsg := new(message.Message)
				proto.Unmarshal(record, outMsg)
				c.Expect(outMsg.GetPayload(), gs.Equals, payload)
			})

		})
	})
}
Beispiel #22
0
func (self *DashboardOutput) Run(or OutputRunner, h PluginHelper) (err error) {
	inChan := or.InChan()
	ticker := or.Ticker()
	go self.starterFunc(self)

	var (
		ok   = true
		pack *PipelinePack
		msg  *message.Message
	)

	// Maps sandbox names to plugin list items used to generate the
	// sandboxes.json file.
	sandboxes := make(map[string]*DashPluginListItem)
	sbxsLock := new(sync.Mutex)
	reNotWord, _ := regexp.Compile("\\W")
	for ok {
		select {
		case pack, ok = <-inChan:
			if !ok {
				break
			}
			msg = pack.Message
			switch msg.GetType() {
			case "heka.all-report":
				fn := filepath.Join(self.dataDirectory, "heka_report.json")
				overwriteFile(fn, msg.GetPayload())
				sbxsLock.Lock()
				if err := overwritePluginListFile(self.dataDirectory, sandboxes); err != nil {
					or.LogError(fmt.Errorf("Can't write plugin list file to '%s': %s",
						self.dataDirectory, err))
				}
				sbxsLock.Unlock()
			case "heka.sandbox-output":
				tmp, _ := msg.GetFieldValue("payload_type")
				if payloadType, ok := tmp.(string); ok {
					var payloadName, nameExt string
					tmp, _ := msg.GetFieldValue("payload_name")
					if payloadName, ok = tmp.(string); ok {
						nameExt = reNotWord.ReplaceAllString(payloadName, "")
					}
					if len(nameExt) > 64 {
						nameExt = nameExt[:64]
					}
					nameExt = "." + nameExt

					payloadType = reNotWord.ReplaceAllString(payloadType, "")
					filterName := msg.GetLogger()
					fn := filterName + nameExt + "." + payloadType
					ofn := filepath.Join(self.dataDirectory, fn)
					relPath := path.Join(self.relDataPath, fn) // Used for generating HTTP URLs.
					overwriteFile(ofn, msg.GetPayload())
					sbxsLock.Lock()
					if listItem, ok := sandboxes[filterName]; !ok {
						// First time we've seen this sandbox, add it to the set.
						output := &DashPluginOutput{
							Name:     payloadName,
							Filename: relPath,
						}
						sandboxes[filterName] = &DashPluginListItem{
							Name:    filterName,
							Outputs: []*DashPluginOutput{output},
						}
					} else {
						// We've seen the sandbox, see if we already have this output.
						found := false
						for _, output := range listItem.Outputs {
							if output.Name == payloadName {
								found = true
								break
							}
						}
						if !found {
							output := &DashPluginOutput{
								Name:     payloadName,
								Filename: relPath,
							}
							listItem.Outputs = append(listItem.Outputs, output)
						}
					}
					sbxsLock.Unlock()
				}
			case "heka.sandbox-terminated":
				fn := filepath.Join(self.dataDirectory, "heka_sandbox_termination.tsv")
				filterName := msg.GetLogger()
				if file, err := os.OpenFile(fn, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644); err == nil {
					var line string
					if _, ok := msg.GetFieldValue("ProcessMessageCount"); !ok {
						line = fmt.Sprintf("%d\t%s\t%v\n", msg.GetTimestamp()/1e9,
							msg.GetLogger(), msg.GetPayload())
					} else {
						pmc, _ := msg.GetFieldValue("ProcessMessageCount")
						pms, _ := msg.GetFieldValue("ProcessMessageSamples")
						pmd, _ := msg.GetFieldValue("ProcessMessageAvgDuration")
						mad, _ := msg.GetFieldValue("MatchAvgDuration")
						fcl, _ := msg.GetFieldValue("FilterChanLength")
						mcl, _ := msg.GetFieldValue("MatchChanLength")
						rcl, _ := msg.GetFieldValue("RouterChanLength")
						line = fmt.Sprintf("%d\t%s\t%v"+
							" ProcessMessageCount:%v"+
							" ProcessMessageSamples:%v"+
							" ProcessMessageAvgDuration:%v"+
							" MatchAvgDuration:%v"+
							" FilterChanLength:%v"+
							" MatchChanLength:%v"+
							" RouterChanLength:%v\n",
							msg.GetTimestamp()/1e9,
							filterName, msg.GetPayload(), pmc, pms, pmd,
							mad, fcl, mcl, rcl)
					}
					file.WriteString(line)
					file.Close()
				}
				sbxsLock.Lock()
				delete(sandboxes, filterName)
				sbxsLock.Unlock()
			}
			pack.Recycle()
		case <-ticker:
			go h.PipelineConfig().AllReportsMsg()
		}
	}
	return
}
Beispiel #23
0
func QueueBufferSpec(c gs.Context) {
	tmpDir, tmpErr := ioutil.TempDir("", "queuebuffer-tests")

	defer func() {
		tmpErr = os.RemoveAll(tmpDir)
		c.Expect(tmpErr, gs.Equals, nil)
	}()

	t := &ts.SimpleT{}
	ctrl := gomock.NewController(t)
	defer ctrl.Finish()

	c.Specify("QueueBuffer Internals", func() {
		pConfig := NewPipelineConfig(nil)

		err := pConfig.RegisterDefault("HekaFramingSplitter")
		c.Assume(err, gs.IsNil)
		RegisterPlugin("FooOutput", func() interface{} {
			return &FooOutput{}
		})

		outputToml := `[FooOutput]
        message_matcher = "TRUE"
        `

		var configFile ConfigFile
		_, err = toml.Decode(outputToml, &configFile)
		c.Assume(err, gs.IsNil)
		section, ok := configFile["FooOutput"]
		c.Assume(ok, gs.IsTrue)
		maker, err := NewPluginMaker("FooOutput", pConfig, section)
		c.Assume(err, gs.IsNil)

		orTmp, err := maker.MakeRunner("FooOutput")
		c.Assume(err, gs.IsNil)
		or := orTmp.(*foRunner)

		// h := NewMockPluginHelper(ctrl)
		// h.EXPECT().PipelineConfig().Return(pConfig)

		qConfig := &QueueBufferConfig{
			FullAction:        "block",
			CursorUpdateCount: 1,
			MaxFileSize:       66000,
		}
		feeder, reader, err := NewBufferSet(tmpDir, "test", qConfig, or, pConfig)
		// bufferedOutput, err := NewBufferedOutput(tmpDir, "test", or, h, uint64(0))
		c.Assume(err, gs.IsNil)
		msg := ts.GetTestMessage()

		c.Specify("fileExists", func() {
			c.Expect(fileExists(tmpDir), gs.IsTrue)
			c.Expect(fileExists(filepath.Join(tmpDir, "test.log")), gs.IsFalse)
		})

		c.Specify("extractBufferId", func() {
			id, err := extractBufferId("555.log")
			c.Expect(err, gs.IsNil)
			c.Expect(id, gs.Equals, uint(555))
			id, err = extractBufferId("")
			c.Expect(err, gs.Not(gs.IsNil))
			id, err = extractBufferId("a.log")
			c.Expect(err, gs.Not(gs.IsNil))
		})

		c.Specify("findBufferId", func() {
			c.Expect(findBufferId(tmpDir, true), gs.Equals, uint(0))
			c.Expect(findBufferId(tmpDir, false), gs.Equals, uint(0))
			fd, err := os.Create(filepath.Join(tmpDir, "4.log"))
			c.Expect(err, gs.IsNil)
			fd.Close()
			fd, err = os.Create(filepath.Join(tmpDir, "5.log"))
			c.Expect(err, gs.IsNil)
			fd.Close()
			fd, err = os.Create(filepath.Join(tmpDir, "6a.log"))
			c.Expect(err, gs.IsNil)
			fd.Close()
			c.Expect(findBufferId(tmpDir, false), gs.Equals, uint(4))
			c.Expect(findBufferId(tmpDir, true), gs.Equals, uint(5))
		})

		c.Specify("writeCheckpoint", func() {
			reader.checkpointFilename = filepath.Join(tmpDir, "cp.txt")
			err := reader.writeCheckpoint("43 99999")
			c.Expect(err, gs.IsNil)
			c.Expect(fileExists(reader.checkpointFilename), gs.IsTrue)

			id, offset, err := readCheckpoint(reader.checkpointFilename)
			c.Expect(err, gs.IsNil)
			c.Expect(id, gs.Equals, uint(43))
			c.Expect(offset, gs.Equals, int64(99999))

			err = reader.writeCheckpoint("43 1")
			c.Expect(err, gs.IsNil)
			id, offset, err = readCheckpoint(reader.checkpointFilename)
			c.Expect(err, gs.IsNil)
			c.Expect(id, gs.Equals, uint(43))
			c.Expect(offset, gs.Equals, int64(1))
			reader.checkpointFile.Close()
		})

		c.Specify("readCheckpoint", func() {
			cp := filepath.Join(tmpDir, "cp.txt")
			file, err := os.OpenFile(cp, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
			c.Expect(err, gs.IsNil)
			id, offset, err := readCheckpoint(cp)
			c.Expect(err, gs.Not(gs.IsNil))

			file.WriteString("22")
			id, offset, err = readCheckpoint(cp)
			c.Expect(err.Error(), gs.Equals, "invalid checkpoint format")

			file.Seek(0, 0)
			file.WriteString("aa 22")
			id, offset, err = readCheckpoint(cp)
			c.Expect(err.Error(), gs.Equals, "invalid checkpoint id")

			file.Seek(0, 0)
			file.WriteString("43 aa")
			id, offset, err = readCheckpoint(cp)
			c.Expect(err.Error(), gs.Equals, "invalid checkpoint offset")

			file.Seek(0, 0)
			file.WriteString("43 22")
			file.Close()

			id, offset, err = readCheckpoint(cp)
			c.Expect(err, gs.IsNil)
			c.Expect(id, gs.Equals, uint(43))
			c.Expect(offset, gs.Equals, int64(22))
		})

		c.Specify("RollQueue", func() {
			feeder.queue = tmpDir
			err := feeder.RollQueue()
			c.Expect(err, gs.IsNil)
			c.Expect(fileExists(getQueueFilename(feeder.queue,
				feeder.writeId)), gs.IsTrue)
			feeder.writeFile.WriteString("this is a test item")
			feeder.writeFile.Close()

			reader.checkpointFilename = filepath.Join(tmpDir, "cp.txt")
			reader.queue = tmpDir
			reader.writeCheckpoint(fmt.Sprintf("%d 10", feeder.writeId))
			reader.checkpointFile.Close()
			err = reader.initReadFile()
			c.Assume(err, gs.IsNil)
			buf := make([]byte, 4)
			n, err := reader.readFile.Read(buf)
			c.Expect(n, gs.Equals, 4)
			c.Expect(string(buf), gs.Equals, "test")
			feeder.writeFile.Close()
			reader.readFile.Close()
		})

		c.Specify("QueueRecord", func() {
			reader.checkpointFilename = filepath.Join(tmpDir, "cp.txt")
			reader.queue = tmpDir
			newpack := NewPipelinePack(nil)
			newpack.Message = msg
			payload := "Write me out to the network"
			newpack.Message.SetPayload(payload)
			encoder := client.NewProtobufEncoder(nil)
			protoBytes, err := encoder.EncodeMessage(newpack.Message)
			newpack.MsgBytes = protoBytes
			expectedLen := 115

			c.Specify("adds framing", func() {
				err = feeder.RollQueue()
				c.Expect(err, gs.IsNil)
				err = feeder.QueueRecord(newpack)
				fName := getQueueFilename(feeder.queue, feeder.writeId)
				c.Expect(fileExists(fName), gs.IsTrue)
				c.Expect(err, gs.IsNil)
				feeder.writeFile.Close()

				f, err := os.Open(fName)
				c.Expect(err, gs.IsNil)

				n, record, err := reader.sRunner.GetRecordFromStream(f)
				f.Close()
				c.Expect(n, gs.Equals, expectedLen)
				c.Expect(err, gs.IsNil)
				headerLen := int(record[1]) + message.HEADER_FRAMING_SIZE
				record = record[headerLen:]
				outMsg := new(message.Message)
				proto.Unmarshal(record, outMsg)
				c.Expect(outMsg.GetPayload(), gs.Equals, payload)
			})

			c.Specify("when queue has limit", func() {
				feeder.Config.MaxBufferSize = uint64(200)
				c.Expect(feeder.queueSize.Get(), gs.Equals, uint64(0))

				err = feeder.RollQueue()
				c.Expect(err, gs.IsNil)

				err = feeder.QueueRecord(newpack)
				c.Expect(err, gs.IsNil)
				c.Expect(feeder.queueSize.Get(), gs.Equals, uint64(expectedLen))
			})

			c.Specify("when queue has limit and is full", func() {
				feeder.Config.MaxBufferSize = uint64(50)
				feeder.Config.MaxFileSize = uint64(50)

				c.Expect(feeder.queueSize.Get(), gs.Equals, uint64(0))
				err = feeder.RollQueue()
				c.Expect(err, gs.IsNil)
				queueFiles, err := ioutil.ReadDir(feeder.queue)
				c.Expect(err, gs.IsNil)
				numFiles := len(queueFiles)

				err = feeder.QueueRecord(newpack)
				c.Expect(err, gs.Equals, QueueIsFull)
				c.Expect(feeder.queueSize.Get(), gs.Equals, uint64(0))

				// Bump the max queue size so it will accept a record.
				feeder.Config.MaxBufferSize = uint64(120)
				err = feeder.QueueRecord(newpack)
				c.Expect(err, gs.IsNil)

				// Queue should have rolled.
				queueFiles, err = ioutil.ReadDir(feeder.queue)
				c.Expect(err, gs.IsNil)
				c.Expect(len(queueFiles), gs.Equals, numFiles+1)

				// Try to queue one last time, it should fail.
				err = feeder.QueueRecord(newpack)
				c.Expect(err, gs.Equals, QueueIsFull)

				// Ensure queue didn't roll twice.
				queueFiles, err = ioutil.ReadDir(feeder.queue)
				c.Expect(err, gs.IsNil)
				c.Expect(len(queueFiles), gs.Equals, numFiles+1)
			})

			c.Specify("rolls when queue file hits max size", func() {
				feeder.Config.MaxFileSize = uint64(300)
				c.Assume(feeder.writeFileSize, gs.Equals, uint64(0))

				// First two shouldn't trigger roll.
				err = feeder.QueueRecord(newpack)
				c.Expect(err, gs.IsNil)
				err = feeder.QueueRecord(newpack)
				c.Expect(err, gs.IsNil)
				c.Expect(feeder.writeFileSize, gs.Equals, uint64(expectedLen*2))
				queueFiles, err := ioutil.ReadDir(feeder.queue)
				c.Expect(err, gs.IsNil)
				c.Expect(len(queueFiles), gs.Equals, 1)

				// Third one should.
				err = feeder.QueueRecord(newpack)
				c.Expect(err, gs.IsNil)
				c.Expect(feeder.writeFileSize, gs.Equals, uint64(expectedLen))
				queueFiles, err = ioutil.ReadDir(feeder.queue)
				c.Expect(err, gs.IsNil)
				c.Expect(len(queueFiles), gs.Equals, 2)
			})
		})

		c.Specify("getQueueBufferSize", func() {
			c.Expect(getQueueBufferSize(tmpDir), gs.Equals, uint64(0))

			fd, _ := os.Create(filepath.Join(tmpDir, "4.log"))
			fd.WriteString("0123456789")
			fd.Close()

			fd, _ = os.Create(filepath.Join(tmpDir, "5.log"))
			fd.WriteString("0123456789")
			fd.Close()

			// Only size of *.log files should be taken in calculations.
			fd, _ = os.Create(filepath.Join(tmpDir, "random_file"))
			fd.WriteString("0123456789")
			fd.Close()

			c.Expect(getQueueBufferSize(tmpDir), gs.Equals, uint64(20))
		})
	})
}