func (rpsi *RedisPubSubInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error { var ( dRunner pipeline.DecoderRunner decoder pipeline.Decoder pack *pipeline.PipelinePack e error ok bool ) // Get the InputRunner's chan to receive empty PipelinePacks packSupply := ir.InChan() if rpsi.conf.DecoderName != "" { if dRunner, ok = h.DecoderRunner(rpsi.conf.DecoderName, fmt.Sprintf("%s-%s", ir.Name(), rpsi.conf.DecoderName)); !ok { return fmt.Errorf("Decoder not found: %s", rpsi.conf.DecoderName) } decoder = dRunner.Decoder() } //Connect to the channel psc := redis.PubSubConn{Conn: rpsi.conn} psc.PSubscribe(rpsi.conf.Channel) for { switch n := psc.Receive().(type) { case redis.PMessage: // Grab an empty PipelinePack from the InputRunner pack = <-packSupply pack.Message.SetType("redis_pub_sub") pack.Message.SetLogger(n.Channel) pack.Message.SetPayload(string(n.Data)) pack.Message.SetTimestamp(time.Now().UnixNano()) var packs []*pipeline.PipelinePack if decoder == nil { packs = []*pipeline.PipelinePack{pack} } else { packs, e = decoder.Decode(pack) } if packs != nil { for _, p := range packs { ir.Inject(p) } } else { if e != nil { ir.LogError(fmt.Errorf("Couldn't parse Redis message: %s", n.Data)) } pack.Recycle(nil) } case redis.Subscription: ir.LogMessage(fmt.Sprintf("Subscription: %s %s %d\n", n.Kind, n.Channel, n.Count)) if n.Count == 0 { return errors.New("No channel to subscribe") } case error: fmt.Printf("error: %v\n", n) return n } } return nil }
func (input *NsqInput) Run(runner pipeline.InputRunner, helper pipeline.PluginHelper) (err error) { var ( dRunner pipeline.DecoderRunner ok bool ) if input.DecoderName != "" { if dRunner, ok = helper.DecoderRunner(input.DecoderName, fmt.Sprintf("%s-%s", runner.Name(), input.DecoderName)); !ok { return fmt.Errorf("Decoder not found: %s", input.DecoderName) } input.decoderChan = dRunner.InChan() } input.runner = runner input.packSupply = runner.InChan() input.consumer.AddHandler(input) err = input.consumer.ConnectToNSQDs(input.NsqdAddrs) if err != nil { return err } err = input.consumer.ConnectToNSQLookupds(input.LookupdAddrs) if err != nil { return err } <-input.consumer.StoppedChan() return nil }
func (input *FilePollingInput) Run(runner pipeline.InputRunner, helper pipeline.PluginHelper) error { input.runner = runner input.hostname = helper.PipelineConfig().Hostname() tickChan := runner.Ticker() sRunner := runner.NewSplitterRunner("") if !sRunner.UseMsgBytes() { sRunner.SetPackDecorator(input.packDecorator) } for { select { case <-input.stop: return nil case <-tickChan: } f, err := os.Open(input.FilePath) if err != nil { runner.LogError(fmt.Errorf("Error opening file: %s", err.Error())) continue } for err == nil { err = sRunner.SplitStream(f, nil) if err != io.EOF && err != nil { runner.LogError(fmt.Errorf("Error reading file: %s", err.Error())) } } } return nil }
// On Heka restarts this function reloads all previously running SandboxFilters // using the script, configuration, and preservation files in the working // directory. func (this *SandboxManagerFilter) restoreSandboxes(fr pipeline.FilterRunner, h pipeline.PluginHelper, dir string) { glob := fmt.Sprintf("%s-*.toml", getNormalizedName(fr.Name())) if matches, err := filepath.Glob(filepath.Join(dir, glob)); err == nil { for _, fn := range matches { var configFile pipeline.ConfigFile if _, err = toml.DecodeFile(fn, &configFile); err != nil { fr.LogError(fmt.Errorf("restoreSandboxes failed: %s\n", err)) continue } else { for _, conf := range configFile { var runner pipeline.FilterRunner name := path.Base(fn[:len(fn)-5]) fr.LogMessage(fmt.Sprintf("Loading: %s", name)) runner, err = this.createRunner(dir, name, conf) if err != nil { fr.LogError(fmt.Errorf("createRunner failed: %s\n", err.Error())) removeAll(dir, fmt.Sprintf("%s.*", name)) break } err = h.PipelineConfig().AddFilterRunner(runner) if err != nil { fr.LogError(err) } else { atomic.AddInt32(&this.currentFilters, 1) } break // only interested in the first item } } } } }
func (input *FilePollingInput) Run(runner pipeline.InputRunner, helper pipeline.PluginHelper) error { var ( data []byte pack *pipeline.PipelinePack dRunner pipeline.DecoderRunner ok bool err error ) if input.DecoderName != "" { if dRunner, ok = helper.DecoderRunner(input.DecoderName, fmt.Sprintf("%s-%s", runner.Name(), input.DecoderName)); !ok { return fmt.Errorf("Decoder not found: %s", input.DecoderName) } input.decoderChan = dRunner.InChan() } input.runner = runner hostname := helper.PipelineConfig().Hostname() packSupply := runner.InChan() tickChan := runner.Ticker() for { select { case <-input.stop: return nil case <-tickChan: } data, err = ioutil.ReadFile(input.FilePath) if err != nil { runner.LogError(fmt.Errorf("Error reading file: %s", err)) continue } pack = <-packSupply pack.Message.SetUuid(uuid.NewRandom()) pack.Message.SetTimestamp(time.Now().UnixNano()) pack.Message.SetType("heka.file.polling") pack.Message.SetHostname(hostname) pack.Message.SetPayload(string(data)) if field, err := message.NewField("TickerInterval", int(input.TickerInterval), ""); err != nil { runner.LogError(err) } else { pack.Message.AddField(field) } if field, err := message.NewField("FilePath", input.FilePath, ""); err != nil { runner.LogError(err) } else { pack.Message.AddField(field) } input.sendPack(pack) } return nil }
// Run runs the FileReadFilter filter, which inspects each message, and appends // the content of the file named as the executed template to the existing payload. // The resulting message will be injected back, and have newType type. func (fr FileReadFilter) Run(r pipeline.FilterRunner, h pipeline.PluginHelper) (err error) { if fr.tmpl == nil { return errors.New("FileReadFilter: empty template") } var ( fh *os.File inp io.Reader npack, opack *pipeline.PipelinePack ) out := bytes.NewBuffer(make([]byte, 0, 4096)) log.Printf("FileReadFilter: Starting with template %s", fr.tmpl) for opack = range r.InChan() { //log.Printf("opack=%v", opack) //if opack.Decoded { out.Reset() if err = fr.tmpl.Execute(out, extendedMessage{opack.Message}); err != nil { opack.Recycle() return fmt.Errorf("FileReadFilter: error executing template %v with message %v: %v", fr.tmpl, opack.Message, err) } //log.Printf("out=%q", out) if fh, err = os.Open(out.String()); err != nil { log.Printf("FileReadFilter: cannot read %q: %v", out, err) opack.Recycle() continue } out.Reset() //if _, err = io.Copy(out, io.LimitedReader{R: fh, N: 65000}); err != nil && err != io.EOF { inp = fh if fr.decoder != nil { inp = transform.NewReader(fh, fr.decoder) } if _, err = io.Copy(out, inp); err != nil && err != io.EOF { log.Printf("FileReadFilter: error reading %q: %v", fh.Name(), err) opack.Recycle() fh.Close() continue } fh.Close() npack = h.PipelinePack(opack.MsgLoopCount) if npack == nil { opack.Recycle() return errors.New("FileReadFilter: no output pack - infinite loop?") } npack.Decoded = true npack.Message = message.CopyMessage(opack.Message) npack.Message.SetType(fr.newType) npack.Message.SetPayload(npack.Message.GetPayload() + "\n" + out.String()) if !r.Inject(npack) { log.Printf("FileReadFilter: cannot inject new pack %v", npack) } //} opack.Recycle() } return nil }
// Creates DecoderRunner and stop channel and starts the provided // LogstreamInput plugin. func (li *LogstreamerInput) startLogstreamInput(logstream *LogstreamInput, i int, ir p.InputRunner, h p.PluginHelper) { fullName := fmt.Sprintf("%s-%s-%d", li.pluginName, li.decoderName, i) dRunner, _ := h.DecoderRunner(li.decoderName, fullName) stop := make(chan chan bool, 1) li.stopLogstreamChans = append(li.stopLogstreamChans, stop) go logstream.Run(ir, h, stop, dRunner) }
func (zi *ZeroMQInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error { // Get the InputRunner's chan to receive empty PipelinePacks packs := ir.InChan() var decoding chan<- *pipeline.PipelinePack if zi.conf.Decoder != "" { // Fetch specified decoder decoder, ok := h.DecoderSet().ByName(zi.conf.Decoder) if !ok { err := fmt.Errorf("Could not find decoder", zi.conf.Decoder) return err } // Get the decoder's receiving chan decoding = decoder.InChan() } var pack *pipeline.PipelinePack var count int var b []byte var err error // Read data from websocket broadcast chan for { b, err = zi.socket.Recv(0) if err != nil { ir.LogError(err) continue } // Grab an empty PipelinePack from the InputRunner pack = <-packs // Trim the excess empty bytes count = len(b) pack.MsgBytes = pack.MsgBytes[:count] // Copy ws bytes into pack's bytes copy(pack.MsgBytes, b) if decoding != nil { // Send pack onto decoder decoding <- pack } else { // Send pack into Heka pipeline ir.Inject(pack) } } return nil }
// Parses a Heka message and extracts the information necessary to start a new // SandboxFilter func (this *SandboxManagerFilter) loadSandbox(fr pipeline.FilterRunner, h pipeline.PluginHelper, dir string, msg *message.Message) (err error) { fv, _ := msg.GetFieldValue("config") if config, ok := fv.(string); ok { var configFile pipeline.ConfigFile if _, err = toml.Decode(config, &configFile); err != nil { return fmt.Errorf("loadSandbox failed: %s\n", err) } else { for name, conf := range configFile { name = getSandboxName(fr.Name(), name) if _, ok := h.Filter(name); ok { // todo support reload return fmt.Errorf("loadSandbox failed: %s is already running", name) } fr.LogMessage(fmt.Sprintf("Loading: %s", name)) confFile := filepath.Join(dir, fmt.Sprintf("%s.toml", name)) err = ioutil.WriteFile(confFile, []byte(config), 0600) if err != nil { return } var sbc SandboxConfig if err = toml.PrimitiveDecode(conf, &sbc); err != nil { return fmt.Errorf("loadSandbox failed: %s\n", err) } scriptFile := filepath.Join(dir, fmt.Sprintf("%s.%s", name, sbc.ScriptType)) err = ioutil.WriteFile(scriptFile, []byte(msg.GetPayload()), 0600) if err != nil { removeAll(dir, fmt.Sprintf("%s.*", name)) return } // check/clear the old state preservation file // this avoids issues with changes to the data model since the last load // and prevents holes in the graph from looking like anomalies os.Remove(filepath.Join(pipeline.PrependBaseDir(DATA_DIR), name+DATA_EXT)) var runner pipeline.FilterRunner runner, err = this.createRunner(dir, name, conf) if err != nil { removeAll(dir, fmt.Sprintf("%s.*", name)) return } err = h.PipelineConfig().AddFilterRunner(runner) if err == nil { this.currentFilters++ } break // only interested in the first item } } } return }
// Parses a Heka message and extracts the information necessary to start a new // SandboxFilter func (this *SandboxManagerFilter) loadSandbox(fr pipeline.FilterRunner, h pipeline.PluginHelper, dir string, msg *message.Message) (err error) { fv, _ := msg.GetFieldValue("config") if config, ok := fv.(string); ok { var configFile pipeline.ConfigFile if _, err = toml.Decode(config, &configFile); err != nil { return fmt.Errorf("loadSandbox failed: %s\n", err) } for name, conf := range configFile { name = getSandboxName(fr.Name(), name) if _, ok := h.Filter(name); ok { // todo support reload return fmt.Errorf("loadSandbox failed: %s is already running", name) } fr.LogMessage(fmt.Sprintf("Loading: %s", name)) confFile := filepath.Join(dir, fmt.Sprintf("%s.toml", name)) err = ioutil.WriteFile(confFile, []byte(config), 0600) if err != nil { return } var sbc SandboxConfig // Default, will get overwritten if necessary sbc.ScriptType = "lua" if err = toml.PrimitiveDecode(conf, &sbc); err != nil { return fmt.Errorf("loadSandbox failed: %s\n", err) } scriptFile := filepath.Join(dir, fmt.Sprintf("%s.%s", name, sbc.ScriptType)) err = ioutil.WriteFile(scriptFile, []byte(msg.GetPayload()), 0600) if err != nil { removeAll(dir, fmt.Sprintf("%s.*", name)) return } var runner pipeline.FilterRunner runner, err = this.createRunner(dir, name, conf) if err != nil { removeAll(dir, fmt.Sprintf("%s.*", name)) return } err = this.pConfig.AddFilterRunner(runner) if err == nil { atomic.AddInt32(&this.currentFilters, 1) } break // only interested in the first item } } return }
func (rli *RedisInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error { fmt.Println("Addr", rli.conf.Address) fmt.Println("key", rli.conf.Key) fmt.Println("batch_count:", rli.conf.Batch_count) fmt.Println("decoder:", rli.conf.Decoder) var ( dRunner pipeline.DecoderRunner decoder pipeline.Decoder ok bool e error reply interface{} vals []string msg string ) if rli.conf.Decoder != "" { if dRunner, ok = h.DecoderRunner(rli.conf.Decoder, fmt.Sprintf("%s-%s", ir.Name(), rli.conf.Decoder)); !ok { return fmt.Errorf("Decoder not found: %s", rli.conf.Decoder) } decoder = dRunner.Decoder() } for { reply, e = rli.conn.Do("BLPOP", rli.conf.Key, "0") if e == nil { vals, e = redis.Strings(reply, nil) msg = vals[1] if e == nil { rli.InsertMessage(ir, decoder, msg) } } reply, e = rli.batchlpop.Do(rli.conn, rli.conf.Key, rli.conf.Batch_count) if e == nil { vals, e = redis.Strings(reply, nil) if e == nil { for _, msg = range vals { rli.InsertMessage(ir, decoder, msg) } } else { fmt.Printf("err: %v\n", e) } } else { fmt.Printf("type: %T, error: %v\n", reply, e) return e } } return nil }
func (di *DockerLogInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error { var ( pack *pipeline.PipelinePack ok bool ) hostname := h.Hostname() go di.attachMgr.Listen(di.logstream, di.closer) // Get the InputRunner's chan to receive empty PipelinePacks packSupply := ir.InChan() ok = true var err error for ok { select { case logline := <-di.logstream: pack = <-packSupply pack.Message.SetType("DockerLog") pack.Message.SetLogger(logline.Type) // stderr or stdout pack.Message.SetHostname(hostname) // Use the host's hosntame pack.Message.SetPayload(logline.Data) pack.Message.SetTimestamp(time.Now().UnixNano()) pack.Message.SetUuid(uuid.NewRandom()) for k, v := range logline.Fields { message.NewStringField(pack.Message, k, v) } ir.Deliver(pack) case err, ok = <-di.attachErrors: if !ok { err = errors.New("Docker event channel closed") break } ir.LogError(fmt.Errorf("Attacher error: %s", err)) case err = <-di.stopChan: ok = false } } di.closer <- struct{}{} close(di.logstream) return err }
func (this *SandboxManagerFilter) Run(fr pipeline.FilterRunner, h pipeline.PluginHelper) (err error) { inChan := fr.InChan() var ok = true var pack *pipeline.PipelinePack var delta int64 this.restoreSandboxes(fr, h, this.workingDirectory) for ok { select { case pack, ok = <-inChan: if !ok { break } atomic.AddInt64(&this.processMessageCount, 1) delta = time.Now().UnixNano() - pack.Message.GetTimestamp() if math.Abs(float64(delta)) >= 5e9 { fr.LogError(fmt.Errorf("Discarded control message: %d seconds skew", delta/1e9)) pack.Recycle() break } action, _ := pack.Message.GetFieldValue("action") switch action { case "load": current := int(atomic.LoadInt32(&this.currentFilters)) if current < this.maxFilters { err := this.loadSandbox(fr, h, this.workingDirectory, pack.Message) if err != nil { fr.LogError(err) } } else { fr.LogError(fmt.Errorf("%s attempted to load more than %d filters", fr.Name(), this.maxFilters)) } case "unload": fv, _ := pack.Message.GetFieldValue("name") if name, ok := fv.(string); ok { name = getSandboxName(fr.Name(), name) if h.PipelineConfig().RemoveFilterRunner(name) { removeAll(this.workingDirectory, fmt.Sprintf("%s.*", name)) } } } pack.Recycle() } } return }
// Fetch correct output and iterate over received messages, checking against // message hostname and delivering to the output if hostname is in our config. func (f *HostFilter) Run(runner pipeline.FilterRunner, helper pipeline.PluginHelper) ( err error) { var ( hostname string output pipeline.OutputRunner ok bool ) if output, ok = helper.Output(f.output); !ok { return fmt.Errorf("No output: %s", output) } for pack := range runner.InChan() { hostname = pack.Message.GetHostname() if f.hosts[hostname] { output.InChan() <- pack } else { pack.Recycle() } } return }
func (dei *DockerEventInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error { defer dei.dockerClient.RemoveEventListener(dei.eventStream) defer close(dei.eventStream) var ( ok bool err error pack *pipeline.PipelinePack ) hostname := h.Hostname() // Provides empty PipelinePacks packSupply := ir.InChan() ok = true for ok { select { case event := <-dei.eventStream: pack = <-packSupply pack.Message.SetType("DockerEvent") pack.Message.SetLogger(event.ID) pack.Message.SetHostname(hostname) payload := fmt.Sprintf("id:%s status:%s from:%s time:%d", event.ID, event.Status, event.From, event.Time) pack.Message.SetPayload(payload) pack.Message.SetTimestamp(time.Now().UnixNano()) pack.Message.SetUuid(uuid.NewRandom()) message.NewStringField(pack.Message, "ID", event.ID) message.NewStringField(pack.Message, "Status", event.Status) message.NewStringField(pack.Message, "From", event.From) message.NewInt64Field(pack.Message, "Time", event.Time, "ts") ir.Deliver(pack) case err = <-dei.stopChan: ok = false } } return err }
func (input *DockerStatsInput) Run(runner pipeline.InputRunner, helper pipeline.PluginHelper) error { var pack *pipeline.PipelinePack input.runner = runner packSupply := runner.InChan() tickChan := runner.Ticker() hostname := helper.PipelineConfig().Hostname() for { select { case <-input.stop: return nil case <-tickChan: } var ( // test chan bool //err error previousCPU, previousSystem uint64 mstats *dockerStat preCPUStats, stats *docker.Stats ) endpoint := "unix:///var/run/docker.sock" client, _ := docker.NewClient(endpoint) containers, _ := client.ListContainers(docker.ListContainersOptions{Filters: map[string][]string{"status": {"running"}}}) for _, container := range containers { if containerName, exists := input.cacheHostnames[container.ID]; !exists { containerName = strings.Replace(container.Names[0], "/", "", -1) input.cacheHostnames[container.ID] = containerName if input.NameFromEnv != "" { con, _ := client.InspectContainer(container.ID) for _, value := range con.Config.Env { parts := strings.SplitN(value, "=", 2) if len(parts) == 2 { if input.NameFromEnv == parts[0] { containerName = parts[1] input.cacheHostnames[container.ID] = containerName break } } } } } opts := docker.StatsStaticOptions{ID: container.ID, Stream: false} preCPUStats, _ = client.StatsStatic(opts) if preCPUStats == nil { continue } previousCPU = preCPUStats.CPUStats.CPUUsage.TotalUsage previousSystem = preCPUStats.CPUStats.SystemCPUUsage stats, _ = client.StatsStatic(opts) if stats == nil { continue } mstats = &dockerStat{} mstats.CPUPercent = calculateCPUPercent(previousCPU, previousSystem, stats) mstats.MemPercent = calculateMemPercent(stats) mstats.MemUsage = stats.MemoryStats.Usage mstats.MemLimit = stats.MemoryStats.Limit mstats.BlockRead, mstats.BlockWrite = calculateBlockIO(stats) for _, networkstat := range stats.Networks { mstats.NetworkRx = networkstat.RxBytes mstats.NetworkTx = networkstat.TxBytes } pack = <-packSupply pack.Message.SetUuid(uuid.NewRandom()) pack.Message.SetTimestamp(time.Now().Unix()) pack.Message.SetType("DockerStats") pack.Message.SetHostname(hostname) containerName, _ := message.NewField("ContainerName", string(strings.Replace(input.cacheHostnames[container.ID], "-", "_", -1)), "") pack.Message.AddField(containerName) cpuPercent, _ := message.NewField("CPUPercent", float64(mstats.CPUPercent), "") pack.Message.AddField(cpuPercent) memPercent, _ := message.NewField("MemoryPercent", float64(mstats.MemPercent), "") pack.Message.AddField(memPercent) memLimit, _ := message.NewField("MemoryLimit", int64(mstats.MemLimit), "") pack.Message.AddField(memLimit) memUsage, _ := message.NewField("MemoryUsage", int64(mstats.MemUsage), "") pack.Message.AddField(memUsage) netInput, _ := message.NewField("NetworkInput", int64(mstats.NetworkRx), "") pack.Message.AddField(netInput) netOutput, _ := message.NewField("NetworkOutput", int64(mstats.NetworkTx), "") pack.Message.AddField(netOutput) blockInput, _ := message.NewField("BlockInput", int64(mstats.BlockRead), "") pack.Message.AddField(blockInput) blockOutput, _ := message.NewField("BlockOutput", int64(mstats.BlockWrite), "") pack.Message.AddField(blockOutput) pack.Message.SetPayload(fmt.Sprintf("container_name %s\ncpu %.2f\nmem_usage %d\nmem_limit %d\nmem %.2f\nnet_input %d\nnet_output %d\nblock_input %d\nblock_output %d", strings.Replace(input.cacheHostnames[container.ID], "-", "_", -1), mstats.CPUPercent, mstats.MemUsage, mstats.MemLimit, mstats.MemPercent, mstats.NetworkRx, mstats.NetworkTx, mstats.BlockRead, mstats.BlockWrite)) runner.Deliver(pack) } } return nil }
func (lsi *LogstreamInput) Run(ir p.InputRunner, h p.PluginHelper, stopChan chan chan bool, dRunner p.DecoderRunner) { var ( parser func(ir p.InputRunner, deliver Deliver, stop chan chan bool) error err error ) if lsi.parseFunction == "payload" { parser = lsi.payloadParser } else if lsi.parseFunction == "messageProto" { parser = lsi.messageProtoParser } var deliver func(*p.PipelinePack) // Setup our pack delivery function appropriately for the configuration. if dRunner == nil { deliver = func(pack *p.PipelinePack) { ir.Inject(pack) } } else { inChan := dRunner.InChan() deliver = func(pack *p.PipelinePack) { inChan <- pack } } // Check for more data interval interval, _ := time.ParseDuration("250ms") tick := time.Tick(interval) ok := true for ok { // Clear our error err = nil // Attempt to read as many as we can err = parser(ir, deliver, stopChan) // Save our position if the stream hasn't done so for us. if err != io.EOF { lsi.stream.SavePosition() } lsi.recordCount = 0 if err != nil && err != io.EOF { ir.LogError(err) } // Did our parser func get stopped? if lsi.stopped != nil { ok = false continue } // Wait for our next interval, stop if needed select { case lsi.stopped = <-stopChan: ok = false case <-tick: continue } } close(lsi.stopped) if dRunner != nil { h.StopDecoderRunner(dRunner) } }
func (this *SandboxManagerFilter) Run(fr pipeline.FilterRunner, h pipeline.PluginHelper) (err error) { inChan := fr.InChan() var ok = true var pack *pipeline.PipelinePack var delta int64 this.restoreSandboxes(fr, h, this.workingDirectory) for ok { select { case pack, ok = <-inChan: if !ok { break } atomic.AddInt64(&this.processMessageCount, 1) delta = time.Now().UnixNano() - pack.Message.GetTimestamp() if math.Abs(float64(delta)) >= 5e9 { fr.UpdateCursor(pack.QueueCursor) pack.Recycle(fmt.Errorf("Discarded control message: %d seconds skew", delta/1e9)) break } action, _ := pack.Message.GetFieldValue("action") switch action { case "load": current := int(atomic.LoadInt32(&this.currentFilters)) if current < this.maxFilters { err := this.loadSandbox(fr, h, this.workingDirectory, pack.Message) if err != nil { p, e := h.PipelinePack(0) if e != nil { fr.LogError(err) fr.LogError(fmt.Errorf("can't send termination message: %s", e.Error())) break } p.Message.SetType("heka.sandbox-terminated") p.Message.SetLogger(pipeline.HEKA_DAEMON) message.NewStringField(p.Message, "plugin", fr.Name()) p.Message.SetPayload(err.Error()) fr.Inject(p) fr.LogError(err) } } else { fr.LogError(fmt.Errorf("%s attempted to load more than %d filters", fr.Name(), this.maxFilters)) } case "unload": fv, _ := pack.Message.GetFieldValue("name") if name, ok := fv.(string); ok { name = getSandboxName(fr.Name(), name) if this.pConfig.RemoveFilterRunner(name) { removeAll(this.workingDirectory, fmt.Sprintf("%s.*", name)) } } } pack.Recycle(nil) } } return }
func (ni *NsqInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error { // Get the InputRunner's chan to receive empty PipelinePacks var pack *pipeline.PipelinePack var err error var dRunner pipeline.DecoderRunner var decoder pipeline.Decoder var ok bool var e error //pos := 0 //output := make([]*Message, 2) packSupply := ir.InChan() if ni.conf.Decoder != "" { if dRunner, ok = h.DecoderRunner(ni.conf.Decoder); !ok { return fmt.Errorf("Decoder not found: %s", ni.conf.Decoder) } decoder = dRunner.Decoder() } err = ni.nsqReader.ConnectToLookupd(ni.conf.Address) if err != nil { ir.LogError(errors.New("ConnectToLookupd failed.")) } header := &message.Header{} stopped := false //readLoop: for !stopped { //stopped = true select { case <-ni.stopChan: ir.LogError(errors.New("get ni.stopChan, set stopped=true")) stopped = true default: pack = <-packSupply m, ok1 := <-ni.handler.logChan if !ok1 { stopped = true break } if ni.conf.Serialize { if dRunner == nil { pack.Recycle() ir.LogError(errors.New("Serialize messages require a decoder.")) } //header := &message.Header{} _, msgOk := findMessage(m.msg.Body, header, &(pack.MsgBytes)) if msgOk { dRunner.InChan() <- pack } else { pack.Recycle() ir.LogError(errors.New("Can't find Heka message.")) } header.Reset() } else { //ir.LogError(fmt.Errorf("message body: %s", m.msg.Body)) pack.Message.SetType("nsq") pack.Message.SetPayload(string(m.msg.Body)) pack.Message.SetTimestamp(time.Now().UnixNano()) var packs []*pipeline.PipelinePack if decoder == nil { packs = []*pipeline.PipelinePack{pack} } else { packs, e = decoder.Decode(pack) } if packs != nil { for _, p := range packs { ir.Inject(p) } } else { if e != nil { ir.LogError(fmt.Errorf("Couldn't parse Nsq message: %s", m.msg.Body)) } pack.Recycle() } } m.returnChannel <- &nsq.FinishedMessage{m.msg.Id, 0, true} /* output[pos] = m pos++ if pos == 2 { for pos > 0 { pos-- m1 := output[pos] m1.returnChannel <- &nsq.FinishedMessage{m1.msg.Id, 0, true} output[pos] = nil } } */ } } return nil }
func (this *SandboxFilter) Run(fr pipeline.FilterRunner, h pipeline.PluginHelper) (err error) { inChan := fr.InChan() ticker := fr.Ticker() var ( ok = true terminated = false sample = true blocking = false backpressure = false pack *pipeline.PipelinePack retval int msgLoopCount uint injectionCount uint startTime time.Time slowDuration int64 = int64(this.pConfig.Globals.MaxMsgProcessDuration) duration int64 capacity = cap(inChan) - 1 ) // We assign to the return value of Run() for errors in the closure so that // the plugin runner can determine what caused the SandboxFilter to return. this.sb.InjectMessage(func(payload, payload_type, payload_name string) int { if injectionCount == 0 { err = pipeline.TerminatedError("exceeded InjectMessage count") return 2 } injectionCount-- pack := h.PipelinePack(msgLoopCount) if pack == nil { err = pipeline.TerminatedError(fmt.Sprintf("exceeded MaxMsgLoops = %d", this.pConfig.Globals.MaxMsgLoops)) return 3 } if len(payload_type) == 0 { // heka protobuf message hostname := pack.Message.GetHostname() err := proto.Unmarshal([]byte(payload), pack.Message) if err == nil { // do not allow filters to override the following pack.Message.SetType("heka.sandbox." + pack.Message.GetType()) pack.Message.SetLogger(fr.Name()) pack.Message.SetHostname(hostname) } else { return 1 } } else { pack.Message.SetType("heka.sandbox-output") pack.Message.SetLogger(fr.Name()) pack.Message.SetPayload(payload) ptype, _ := message.NewField("payload_type", payload_type, "file-extension") pack.Message.AddField(ptype) pname, _ := message.NewField("payload_name", payload_name, "") pack.Message.AddField(pname) } if !fr.Inject(pack) { return 4 } atomic.AddInt64(&this.injectMessageCount, 1) return 0 }) for ok { select { case pack, ok = <-inChan: if !ok { break } atomic.AddInt64(&this.processMessageCount, 1) injectionCount = this.pConfig.Globals.MaxMsgProcessInject msgLoopCount = pack.MsgLoopCount if this.manager != nil { // only check for backpressure on dynamic plugins // reading a channel length is generally fast ~1ns // we need to check the entire chain back to the router backpressure = len(inChan) >= capacity || fr.MatchRunner().InChanLen() >= capacity || len(h.PipelineConfig().Router().InChan()) >= capacity } // performing the timing is expensive ~40ns but if we are // backpressured we need a decent sample set before triggering // termination if sample || (backpressure && this.processMessageSamples < int64(capacity)) || this.sbc.Profile { startTime = time.Now() sample = true } retval = this.sb.ProcessMessage(pack) if sample { duration = time.Since(startTime).Nanoseconds() this.reportLock.Lock() this.processMessageDuration += duration this.processMessageSamples++ if this.sbc.Profile { this.profileMessageDuration = this.processMessageDuration this.profileMessageSamples = this.processMessageSamples if this.profileMessageSamples == int64(capacity)*10 { this.sbc.Profile = false // reset the normal sampling so it isn't heavily skewed by the profile values // i.e. process messages fast during profiling and then switch to malicious code this.processMessageDuration = this.profileMessageDuration / this.profileMessageSamples this.processMessageSamples = 1 } } this.reportLock.Unlock() } if retval <= 0 { if backpressure && this.processMessageSamples >= int64(capacity) { if this.processMessageDuration/this.processMessageSamples > slowDuration || fr.MatchRunner().GetAvgDuration() > slowDuration/5 { terminated = true blocking = true } } if retval < 0 { atomic.AddInt64(&this.processMessageFailures, 1) em := this.sb.LastError() if len(em) > 0 { fr.LogError(errors.New(em)) } } sample = 0 == rand.Intn(this.sampleDenominator) } else { terminated = true } pack.Recycle() case t := <-ticker: injectionCount = this.pConfig.Globals.MaxMsgTimerInject startTime = time.Now() if retval = this.sb.TimerEvent(t.UnixNano()); retval != 0 { terminated = true } duration = time.Since(startTime).Nanoseconds() this.reportLock.Lock() this.timerEventDuration += duration this.timerEventSamples++ this.reportLock.Unlock() } if terminated { pack := h.PipelinePack(0) pack.Message.SetType("heka.sandbox-terminated") pack.Message.SetLogger(pipeline.HEKA_DAEMON) message.NewStringField(pack.Message, "plugin", fr.Name()) if blocking { pack.Message.SetPayload("sandbox is running slowly and blocking the router") // no lock on the ProcessMessage variables here because there are no active writers message.NewInt64Field(pack.Message, "ProcessMessageCount", this.processMessageCount, "count") message.NewInt64Field(pack.Message, "ProcessMessageFailures", this.processMessageFailures, "count") message.NewInt64Field(pack.Message, "ProcessMessageSamples", this.processMessageSamples, "count") message.NewInt64Field(pack.Message, "ProcessMessageAvgDuration", this.processMessageDuration/this.processMessageSamples, "ns") message.NewInt64Field(pack.Message, "MatchAvgDuration", fr.MatchRunner().GetAvgDuration(), "ns") message.NewIntField(pack.Message, "FilterChanLength", len(inChan), "count") message.NewIntField(pack.Message, "MatchChanLength", fr.MatchRunner().InChanLen(), "count") message.NewIntField(pack.Message, "RouterChanLength", len(h.PipelineConfig().Router().InChan()), "count") } else { pack.Message.SetPayload(this.sb.LastError()) } fr.Inject(pack) break } } if this.manager != nil { this.manager.PluginExited() } this.reportLock.Lock() var destroyErr error if this.sbc.PreserveData { destroyErr = this.sb.Destroy(this.preservationFile) } else { destroyErr = this.sb.Destroy("") } if destroyErr != nil { err = destroyErr } this.sb = nil this.reportLock.Unlock() return }
func (ri *RedisMQInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error { // Get the InputRunner's chan to receive empty PipelinePacks packs := ir.InChan() var decoding chan<- *pipeline.PipelinePack if ri.conf.Decoder != "" { // Fetch specified decoder decoder, ok := h.DecoderRunner(ri.conf.Decoder) if !ok { err := fmt.Errorf("Could not find decoder", ri.conf.Decoder) return err } // Get the decoder's receiving chan decoding = decoder.InChan() } var pack *pipeline.PipelinePack //var p []*redismq.Package var p *redismq.Package var count int var b []byte var err error for { p, err = ri.rdconsumer.Get() if err != nil { ir.LogError(err) continue } err = p.Ack() if err != nil { ir.LogError(err) } b = []byte(p.Payload) // Grab an empty PipelinePack from the InputRunner pack = <-packs // Trim the excess empty bytes count = len(b) pack.MsgBytes = pack.MsgBytes[:count] // Copy ws bytes into pack's bytes copy(pack.MsgBytes, b) if decoding != nil { // Send pack onto decoder decoding <- pack } else { // Send pack into Heka pipeline ir.Inject(pack) } } /* checkStat := time.Tick(ri.statInterval) ok := true for ok { select { case _, ok = <-ri.stopChan: break case <-checkStat: p, err = ri.rdconsumer.MultiGet(500) if err != nil { ir.LogError(err) continue } err = p[len(p)-1].MultiAck() if err != nil { ir.LogError(err) } for _, v := range p { b = []byte(v.Payload) // Grab an empty PipelinePack from the InputRunner pack = <-packs // Trim the excess empty bytes count = len(b) pack.MsgBytes = pack.MsgBytes[:count] // Copy ws bytes into pack's bytes copy(pack.MsgBytes, b) if decoding != nil { // Send pack onto decoder decoding <- pack } else { // Send pack into Heka pipeline ir.Inject(pack) } } } } */ return nil }
// Main Logstreamer Input runner // This runner kicks off all the other logstream inputs, and handles rescanning for // updates to the filesystem that might affect file visibility for the logstream // inputs func (li *LogstreamerInput) Run(ir p.InputRunner, h p.PluginHelper) (err error) { var ( ok bool dRunner p.DecoderRunner errs *ls.MultipleError newstreams []string ) // Setup the decoder runner that will be used if li.decoderName != "" { if dRunner, ok = h.DecoderRunner(li.decoderName, fmt.Sprintf("%s-%s", li.pluginName, li.decoderName)); !ok { return fmt.Errorf("Decoder not found: %s", li.decoderName) } } // Kick off all the current logstreams we know of for _, logstream := range li.plugins { stop := make(chan chan bool, 1) go logstream.Run(ir, h, stop, dRunner) li.stopLogstreamChans = append(li.stopLogstreamChans, stop) } ok = true rescan := time.Tick(li.rescanInterval) // Our main rescan loop that handles shutting down for ok { select { case <-li.stopChan: ok = false returnChans := make([]chan bool, len(li.stopLogstreamChans)) // Send out all the stop signals for i, ch := range li.stopLogstreamChans { ret := make(chan bool) ch <- ret returnChans[i] = ret } // Wait for all the stops for _, ch := range returnChans { <-ch } // Close our own stopChan to indicate we shut down close(li.stopChan) case <-rescan: li.logstreamSetLock.Lock() newstreams, errs = li.logstreamSet.ScanForLogstreams() if errs.IsError() { ir.LogError(errs) } for _, name := range newstreams { stream, ok := li.logstreamSet.GetLogstream(name) if !ok { ir.LogError(fmt.Errorf("Found new logstream: %s, but couldn't fetch it.", name)) continue } // Setup a new logstream input for this logstream and start it running stParser, parserFunc, _ := CreateParser(li.parser, li.delimiter, li.delimiterLocation, li.decoderName) lsi := NewLogstreamInput(stream, stParser, parserFunc, name, li.hostName) li.plugins[name] = lsi stop := make(chan chan bool, 1) go lsi.Run(ir, h, stop, dRunner) li.stopLogstreamChans = append(li.stopLogstreamChans, stop) } li.logstreamSetLock.Unlock() } } err = nil return }