// LoadFromConfigFile loads a TOML configuration file and stores the // result in the value pointed to by config. The maps in the config // will be initialized as needed. // // The PipelineConfig should be already initialized before passed in via // its Init function. func (self *PipelineConfig) LoadFromConfigFile(filename string) (err error) { var configFile ConfigFile if _, err = toml.DecodeFile(filename, &configFile); err != nil { return fmt.Errorf("Error decoding config file: %s", err) } // Load all the plugins var errcnt uint for name, conf := range configFile { if name == "hekad" { continue } log.Printf("Loading: [%s]\n", name) errcnt += self.loadSection(name, conf) } // Add JSON/PROTOCOL_BUFFER decoders if none were configured var configDefault ConfigFile toml.Decode(defaultDecoderTOML, &configDefault) dWrappers := self.DecoderWrappers if _, ok := dWrappers["ProtobufDecoder"]; !ok { log.Println("Loading: [ProtobufDecoder]") errcnt += self.loadSection("ProtobufDecoder", configDefault["ProtobufDecoder"]) } if errcnt != 0 { return fmt.Errorf("%d errors loading plugins", errcnt) } return }
func BenchmarkMultiDecodeProtobuf(b *testing.B) { b.StopTimer() pConfig := NewPipelineConfig(nil) // initializes Globals msg := pipeline_ts.GetTestMessage() msg.SetPayload("This is a test") pack := NewPipelinePack(pConfig.InputRecycleChan()) pack.MsgBytes, _ = proto.Marshal(msg) decoder := new(MultiDecoder) decoder.SetPipelineConfig(pConfig) conf := decoder.ConfigStruct().(*MultiDecoderConfig) RegisterPlugin("ProtobufDecoder", func() interface{} { return &ProtobufDecoder{} }) defer delete(AvailablePlugins, "ProtobufDecoder") var section PluginConfig _, err := toml.Decode("", §ion) if err != nil { b.Fatalf("Error decoding empty TOML: %s", err.Error()) } maker, err := NewPluginMaker("ProtobufDecoder", pConfig, section) if err != nil { b.Fatalf("Error decoding empty TOML: %s", err.Error()) } pConfig.DecoderMakers["ProtobufDecoder"] = maker conf.CascadeStrategy = "first-wins" conf.Subs = []string{"sub"} decoder.Init(conf) b.StartTimer() for i := 0; i < b.N; i++ { decoder.Decode(pack) } }
func TestConfigFile(t *testing.T) { var configFile ConfigFile if _, err := toml.Decode(configSource, &configFile); err != nil { t.Fatalf("Error decoding config: %s", err) } app, err := LoadApplication(configFile, env, 0) if err != nil { t.Fatalf("Error initializing app: %s", err) } defer app.Stop() hostname := "push.services.mozilla.com" if app.hostname != hostname { t.Errorf("Mismatched hostname: got %#v; want %#v", app.hostname, hostname) } origin, _ := url.ParseRequestURI("https://push.services.mozilla.com") origins := []*url.URL{origin} if !reflect.DeepEqual(origins, app.origins) { t.Errorf("Mismatched origins: got %#v; want %#v", app.origins, origins) } logger := app.Logger() if stdOutLogger, ok := logger.Logger.(*StdOutLogger); ok { if stdOutLogger.filter != 0 { t.Errorf("Mismatched log levels: got %#v; want 0", stdOutLogger.filter) } emitter := stdOutLogger.LogEmitter if _, ok := emitter.(*TextEmitter); !ok { t.Errorf("Log emitter type assertion failed: %#v", emitter) } } else { t.Errorf("Logger type assertion failed: %#v", logger) } store := app.Store() if noStore, ok := store.(*NoStore); ok { defaultChans := noStore.ConfigStruct().(*NoStoreConfig).MaxChannels if noStore.maxChannels != defaultChans { t.Errorf("Wrong default channel count for storage: got %d; want %d", noStore.maxChannels, defaultChans) } } else { t.Errorf("Storage type assertion failed: %#v", store) } pinger := app.PropPinger() if udpPing, ok := pinger.(*UDPPing); ok { if udpPing.app != app { t.Errorf("Wrong app instance for pinger: got %#v; want %#v", udpPing.app, app) } url := "http://push.services.mozilla.com/ping" if udpPing.config.URL != url { t.Errorf("Wrong pinger URL: got %#v; want %#v", udpPing.config.URL, url) } } else { t.Errorf("Pinger type assertion failed: %#v", pinger) } }
// PreloadFromConfigFile loads all plugin configuration from a TOML // configuration file, generates a PluginMaker for each loaded section, and // stores the created PluginMakers in the makersByCategory map. The // PipelineConfig should be already initialized via the Init function before // this method is called. PreloadFromConfigFile is not reentrant, so it should // only be called serially, not from multiple concurrent goroutines. func (self *PipelineConfig) PreloadFromConfigFile(filename string) error { var ( configFile ConfigFile err error ) contents, err := ReplaceEnvsFile(filename) if err != nil { return err } if _, err = toml.Decode(contents, &configFile); err != nil { return fmt.Errorf("Error decoding config file: %s", err) } if self.makersByCategory == nil { self.makersByCategory = make(map[string][]PluginMaker) } if self.defaultConfigs == nil { self.defaultConfigs = makeDefaultConfigs() } // Load all the plugin makers and file them by category. for name, conf := range configFile { if name == HEKA_DAEMON { continue } if _, ok := self.defaultConfigs[name]; ok { self.defaultConfigs[name] = true } LogInfo.Printf("Pre-loading: [%s]\n", name) maker, err := NewPluginMaker(name, self, conf) if err != nil { self.log(err.Error()) self.errcnt++ continue } if maker.Type() == "MultiDecoder" { // Special case MultiDecoders so we can make sure they get // registered *after* all possible subdecoders. self.makersByCategory["MultiDecoder"] = append( self.makersByCategory["MultiDecoder"], maker) } else { category := maker.Category() self.makersByCategory[category] = append( self.makersByCategory[category], maker) } } return nil }
// Parses a Heka message and extracts the information necessary to start a new // SandboxFilter func (this *SandboxManagerFilter) loadSandbox(fr pipeline.FilterRunner, h pipeline.PluginHelper, dir string, msg *message.Message) (err error) { fv, _ := msg.GetFieldValue("config") if config, ok := fv.(string); ok { var configFile pipeline.ConfigFile if _, err = toml.Decode(config, &configFile); err != nil { return fmt.Errorf("loadSandbox failed: %s\n", err) } else { for name, conf := range configFile { name = getSandboxName(fr.Name(), name) if _, ok := h.Filter(name); ok { // todo support reload return fmt.Errorf("loadSandbox failed: %s is already running", name) } fr.LogMessage(fmt.Sprintf("Loading: %s", name)) confFile := filepath.Join(dir, fmt.Sprintf("%s.toml", name)) err = ioutil.WriteFile(confFile, []byte(config), 0600) if err != nil { return } var sbc SandboxConfig if err = toml.PrimitiveDecode(conf, &sbc); err != nil { return fmt.Errorf("loadSandbox failed: %s\n", err) } scriptFile := filepath.Join(dir, fmt.Sprintf("%s.%s", name, sbc.ScriptType)) err = ioutil.WriteFile(scriptFile, []byte(msg.GetPayload()), 0600) if err != nil { removeAll(dir, fmt.Sprintf("%s.*", name)) return } // check/clear the old state preservation file // this avoids issues with changes to the data model since the last load // and prevents holes in the graph from looking like anomalies os.Remove(filepath.Join(pipeline.PrependBaseDir(DATA_DIR), name+DATA_EXT)) var runner pipeline.FilterRunner runner, err = this.createRunner(dir, name, conf) if err != nil { removeAll(dir, fmt.Sprintf("%s.*", name)) return } err = h.PipelineConfig().AddFilterRunner(runner) if err == nil { this.currentFilters++ } break // only interested in the first item } } } return }
// Parses a Heka message and extracts the information necessary to start a new // SandboxFilter func (this *SandboxManagerFilter) loadSandbox(fr pipeline.FilterRunner, h pipeline.PluginHelper, dir string, msg *message.Message) (err error) { fv, _ := msg.GetFieldValue("config") if config, ok := fv.(string); ok { var configFile pipeline.ConfigFile if _, err = toml.Decode(config, &configFile); err != nil { return fmt.Errorf("loadSandbox failed: %s\n", err) } for name, conf := range configFile { name = getSandboxName(fr.Name(), name) if _, ok := h.Filter(name); ok { // todo support reload return fmt.Errorf("loadSandbox failed: %s is already running", name) } fr.LogMessage(fmt.Sprintf("Loading: %s", name)) confFile := filepath.Join(dir, fmt.Sprintf("%s.toml", name)) err = ioutil.WriteFile(confFile, []byte(config), 0600) if err != nil { return } var sbc SandboxConfig // Default, will get overwritten if necessary sbc.ScriptType = "lua" if err = toml.PrimitiveDecode(conf, &sbc); err != nil { return fmt.Errorf("loadSandbox failed: %s\n", err) } scriptFile := filepath.Join(dir, fmt.Sprintf("%s.%s", name, sbc.ScriptType)) err = ioutil.WriteFile(scriptFile, []byte(msg.GetPayload()), 0600) if err != nil { removeAll(dir, fmt.Sprintf("%s.*", name)) return } var runner pipeline.FilterRunner runner, err = this.createRunner(dir, name, conf) if err != nil { removeAll(dir, fmt.Sprintf("%s.*", name)) return } err = this.pConfig.AddFilterRunner(runner) if err == nil { atomic.AddInt32(&this.currentFilters, 1) } break // only interested in the first item } } return }
// LoadFromConfigFile loads a TOML configuration file and stores the // result in the value pointed to by config. The maps in the config // will be initialized as needed. // // The PipelineConfig should be already initialized before passed in via // its Init function. func (self *PipelineConfig) LoadFromConfigFile(filename string) (err error) { var configFile ConfigFile if _, err = toml.DecodeFile(filename, &configFile); err != nil { return fmt.Errorf("Error decoding config file: %s", err) } // Load all the plugins var errcnt uint for name, conf := range configFile { log.Println("Loading: ", name) errcnt += self.loadSection(name, conf) } // Add JSON/PROTOCOL_BUFFER decoders if none were configured var configDefault ConfigFile toml.Decode(defaultDecoderTOML, &configDefault) dWrappers := self.DecoderWrappers if _, ok := dWrappers["JsonDecoder"]; !ok { log.Println("Loading: JsonDecoder") errcnt += self.loadSection("JsonDecoder", configDefault["JsonDecoder"]) } if _, ok := dWrappers["ProtobufDecoder"]; !ok { log.Println("Loading: ProtobufDecoder") errcnt += self.loadSection("ProtobufDecoder", configDefault["ProtobufDecoder"]) } // Create / prep the DecoderSet pool var dRunner DecoderRunner for i := 0; i < Globals().DecoderPoolSize; i++ { if self.DecoderSets[i], err = newDecoderSet(dWrappers); err != nil { log.Println(err) errcnt += 1 } for _, dRunner = range self.DecoderSets[i].AllByName() { dRunner.Start(self, &self.decodersWg) } self.decodersChan <- self.DecoderSets[i] } if errcnt != 0 { return fmt.Errorf("%d errors loading plugins", errcnt) } return }
func (this *SandboxManagerFilter) loadSandbox(fr FilterRunner, h PluginHelper, dir string, msg *message.Message) (err error) { fv, _ := msg.GetFieldValue("config") if config, ok := fv.(string); ok { var configFile ConfigFile if _, err = toml.Decode(config, &configFile); err != nil { return fmt.Errorf("loadSandbox failed: %s\n", err) } else { for name, conf := range configFile { name = getSandboxName(fr.Name(), name) if _, ok := h.Filter(name); ok { // todo support reload return fmt.Errorf("loadSandbox failed: %s is already running", name) } fr.LogMessage(fmt.Sprintf("Loading: %s", name)) confFile := path.Join(dir, fmt.Sprintf("%s.toml", name)) err = ioutil.WriteFile(confFile, []byte(config), 0600) if err != nil { return } var sbfc SandboxFilterConfig if err = toml.PrimitiveDecode(conf, &sbfc); err != nil { return fmt.Errorf("loadSandbox failed: %s\n", err) } scriptFile := path.Join(dir, fmt.Sprintf("%s.%s", name, sbfc.Sbc.ScriptType)) err = ioutil.WriteFile(scriptFile, []byte(msg.GetPayload()), 0600) if err != nil { removeAll(dir, fmt.Sprintf("%s.*", name)) return } var runner FilterRunner runner, err = createRunner(dir, name, conf) if err != nil { removeAll(dir, fmt.Sprintf("%s.*", name)) return } err = h.PipelineConfig().AddFilterRunner(runner) if err == nil { this.currentFilters++ } break // only interested in the first item } } } return }
func (self *PipelineConfig) RegisterDefault(name string) error { var config ConfigFile confStr := fmt.Sprintf("[%s]", name) toml.Decode(confStr, &config) LogInfo.Printf("Pre-loading: %s\n", confStr) maker, err := NewPluginMaker(name, self, config[name]) if err != nil { // This really shouldn't happen. return err } LogInfo.Printf("Loading: [%s]\n", maker.Name()) if _, err = maker.PrepConfig(); err != nil { return err } category := maker.Category() self.makersLock.Lock() self.makers[category][name] = maker self.makersLock.Unlock() // If we ever add a default input, filter, or output we'd need to call // maker.MakeRunner() here and store the runner on the PipelineConfig. return nil }
func LoadHekadConfig(configPath string) (config *HekadConfig, err error) { idle, _ := time.ParseDuration("2m") config = &HekadConfig{Maxprocs: 1, PoolSize: 100, ChanSize: 50, CpuProfName: "", MemProfName: "", MaxMsgLoops: 4, MaxMsgProcessInject: 1, MaxMsgProcessDuration: 100000, MaxMsgTimerInject: 10, MaxPackIdle: idle, BaseDir: filepath.FromSlash("/var/cache/hekad"), ShareDir: filepath.FromSlash("/usr/share/heka"), SampleDenominator: 1000, PidFile: "", } var configFile map[string]toml.Primitive p, err := os.Open(configPath) if err != nil { return nil, fmt.Errorf("Error opening config file: %s", err) } fi, err := p.Stat() if err != nil { return nil, fmt.Errorf("Error fetching config file info: %s", err) } if fi.IsDir() { files, _ := ioutil.ReadDir(configPath) for _, f := range files { fName := f.Name() if !strings.HasSuffix(fName, ".toml") { // Skip non *.toml files in a config dir. continue } fPath := filepath.Join(configPath, fName) contents, err := pipeline.ReplaceEnvsFile(fPath) if err != nil { return nil, err } if _, err = toml.Decode(contents, &configFile); err != nil { return nil, fmt.Errorf("Error decoding config file: %s", err) } } } else { contents, err := pipeline.ReplaceEnvsFile(configPath) if err != nil { return nil, err } if _, err = toml.Decode(contents, &configFile); err != nil { return nil, fmt.Errorf("Error decoding config file: %s", err) } } empty_ignore := map[string]interface{}{} parsed_config, ok := configFile[pipeline.HEKA_DAEMON] if ok { if err = toml.PrimitiveDecodeStrict(parsed_config, config, empty_ignore); err != nil { err = fmt.Errorf("Can't unmarshal config: %s", err) } } return }
func MultiDecoderSpec(c gospec.Context) { t := &pipeline_ts.SimpleT{} ctrl := gomock.NewController(t) defer ctrl.Finish() pConfig := NewPipelineConfig(nil) // initializes Globals() c.Specify("A MultiDecoder", func() { subsTOML := `[StartsWithM] type = "PayloadRegexDecoder" match_regex = '^(?P<TheData>m.*)' log_errors = true [StartsWithM.message_fields] StartsWithM = "%TheData%" [StartsWithS] type = "PayloadRegexDecoder" match_regex = '^(?P<TheData>s.*)' log_errors = true [StartsWithS.message_fields] StartsWithS = "%TheData%" [StartsWithM2] type = "PayloadRegexDecoder" match_regex = '^(?P<TheData>m.*)' log_errors = true [StartsWithM2.message_fields] StartsWithM2 = "%TheData%" ` RegisterPlugin("PayloadRegexDecoder", func() interface{} { return &PayloadRegexDecoder{} }) defer delete(AvailablePlugins, "PayloadRegexDecoder") var configFile ConfigFile _, err := toml.Decode(subsTOML, &configFile) c.Assume(err, gs.IsNil) decoder := new(MultiDecoder) decoder.SetName("MyMultiDecoder") decoder.SetPipelineConfig(pConfig) conf := decoder.ConfigStruct().(*MultiDecoderConfig) supply := make(chan *PipelinePack, 1) pack := NewPipelinePack(supply) mSection, ok := configFile["StartsWithM"] c.Assume(ok, gs.IsTrue) mMaker, err := NewPluginMaker("StartsWithM", pConfig, mSection) c.Assume(err, gs.IsNil) pConfig.DecoderMakers["StartsWithM"] = mMaker conf.Subs = []string{"StartsWithM"} errMsg := "All subdecoders failed." dRunner := pipelinemock.NewMockDecoderRunner(ctrl) // An error will be spit out b/c there's no real *dRunner in there; // doesn't impact the tests. dRunner.EXPECT().LogError(gomock.Any()) c.Specify("decodes simple messages", func() { err := decoder.Init(conf) c.Assume(err, gs.IsNil) decoder.SetDecoderRunner(dRunner) regex_data := "matching text" pack.Message.SetPayload(regex_data) _, err = decoder.Decode(pack) c.Assume(err, gs.IsNil) value, ok := pack.Message.GetFieldValue("StartsWithM") c.Assume(ok, gs.IsTrue) c.Expect(value, gs.Equals, regex_data) }) c.Specify("returns an error if all decoders fail", func() { err := decoder.Init(conf) c.Assume(err, gs.IsNil) decoder.SetDecoderRunner(dRunner) regex_data := "non-matching text" pack.Message.SetPayload(regex_data) packs, err := decoder.Decode(pack) c.Expect(len(packs), gs.Equals, 0) c.Expect(err.Error(), gs.Equals, errMsg) }) c.Specify("logs subdecoder failures when configured to do so", func() { conf.LogSubErrors = true err := decoder.Init(conf) c.Assume(err, gs.IsNil) decoder.SetDecoderRunner(dRunner) regex_data := "non-matching text" pack.Message.SetPayload(regex_data) // Expect that we log an error for undecoded message. dRunner.EXPECT().LogError(fmt.Errorf( "Subdecoder 'StartsWithM' decode error: No match: %s", regex_data)).AnyTimes() packs, err := decoder.Decode(pack) c.Expect(len(packs), gs.Equals, 0) c.Expect(err.Error(), gs.Equals, errMsg) }) c.Specify("sets subdecoder runner correctly", func() { err := decoder.Init(conf) c.Assume(err, gs.IsNil) // Call LogError to appease the angry gomock gods. dRunner.LogError(errors.New("foo")) // Now create a real *dRunner, pass it in, make sure a wrapper // gets handed to the subdecoder. dr := NewDecoderRunner(decoder.Name, decoder, 10) decoder.SetDecoderRunner(dr) sub := decoder.Decoders[0] subRunner := sub.(*PayloadRegexDecoder).dRunner c.Expect(subRunner.Name(), gs.Equals, fmt.Sprintf("%s-StartsWithM", decoder.Name)) c.Expect(subRunner.Decoder(), gs.Equals, sub) }) c.Specify("with multiple registered decoders", func() { sSection, ok := configFile["StartsWithS"] c.Assume(ok, gs.IsTrue) sMaker, err := NewPluginMaker("StartsWithS", pConfig, sSection) c.Assume(err, gs.IsNil) pConfig.DecoderMakers["StartsWithS"] = sMaker m2Section, ok := configFile["StartsWithM2"] c.Assume(ok, gs.IsTrue) m2Maker, err := NewPluginMaker("StartsWithM2", pConfig, m2Section) c.Assume(err, gs.IsNil) pConfig.DecoderMakers["StartsWithM2"] = m2Maker conf.Subs = append(conf.Subs, "StartsWithS", "StartsWithM2") // Two more subdecoders means two more LogError calls. dRunner.EXPECT().LogError(gomock.Any()).Times(2) c.Specify("defaults to `first-wins` cascading", func() { err := decoder.Init(conf) c.Assume(err, gs.IsNil) decoder.SetDecoderRunner(dRunner) c.Specify("on a first match condition", func() { pack.Message.SetPayload("match first") _, err = decoder.Decode(pack) c.Expect(err, gs.IsNil) _, ok = pack.Message.GetFieldValue("StartsWithM") c.Expect(ok, gs.IsTrue) _, ok = pack.Message.GetFieldValue("StartsWithS") c.Expect(ok, gs.IsFalse) _, ok = pack.Message.GetFieldValue("StartsWithM2") c.Expect(ok, gs.IsFalse) }) c.Specify("and a second match condition", func() { pack.Message.SetPayload("second match") _, err = decoder.Decode(pack) c.Expect(err, gs.IsNil) _, ok = pack.Message.GetFieldValue("StartsWithM") c.Expect(ok, gs.IsFalse) _, ok = pack.Message.GetFieldValue("StartsWithS") c.Expect(ok, gs.IsTrue) _, ok = pack.Message.GetFieldValue("StartsWithM2") c.Expect(ok, gs.IsFalse) }) c.Specify("returning an error if they all fail", func() { pack.Message.SetPayload("won't match") packs, err := decoder.Decode(pack) c.Expect(len(packs), gs.Equals, 0) c.Expect(err.Error(), gs.Equals, errMsg) _, ok = pack.Message.GetFieldValue("StartsWithM") c.Expect(ok, gs.IsFalse) _, ok = pack.Message.GetFieldValue("StartsWithS") c.Expect(ok, gs.IsFalse) _, ok = pack.Message.GetFieldValue("StartsWithM2") c.Expect(ok, gs.IsFalse) }) }) c.Specify("and using `all` cascading", func() { conf.CascadeStrategy = "all" err := decoder.Init(conf) c.Assume(err, gs.IsNil) decoder.SetDecoderRunner(dRunner) c.Specify("matches multiples when appropriate", func() { pack.Message.SetPayload("matches twice") _, err = decoder.Decode(pack) c.Expect(err, gs.IsNil) _, ok = pack.Message.GetFieldValue("StartsWithM") c.Expect(ok, gs.IsTrue) _, ok = pack.Message.GetFieldValue("StartsWithS") c.Expect(ok, gs.IsFalse) _, ok = pack.Message.GetFieldValue("StartsWithM2") c.Expect(ok, gs.IsTrue) }) c.Specify("matches singles when appropriate", func() { pack.Message.SetPayload("second match") _, err = decoder.Decode(pack) c.Expect(err, gs.IsNil) _, ok = pack.Message.GetFieldValue("StartsWithM") c.Expect(ok, gs.IsFalse) _, ok = pack.Message.GetFieldValue("StartsWithS") c.Expect(ok, gs.IsTrue) _, ok = pack.Message.GetFieldValue("StartsWithM2") c.Expect(ok, gs.IsFalse) }) c.Specify("returns an error if they all fail", func() { pack.Message.SetPayload("won't match") packs, err := decoder.Decode(pack) c.Expect(len(packs), gs.Equals, 0) c.Expect(err.Error(), gs.Equals, errMsg) _, ok = pack.Message.GetFieldValue("StartsWithM") c.Expect(ok, gs.IsFalse) _, ok = pack.Message.GetFieldValue("StartsWithS") c.Expect(ok, gs.IsFalse) _, ok = pack.Message.GetFieldValue("StartsWithM2") c.Expect(ok, gs.IsFalse) }) }) }) }) c.Specify("A MultiDecoder w/ MultiOutput", func() { subsTOML := `[sub0] type = "MultiOutputDecoder" [sub1] type = "MultiOutputDecoder" [sub2] type = "MultiOutputDecoder" ` RegisterPlugin("MultiOutputDecoder", func() interface{} { return &MultiOutputDecoder{} }) defer delete(AvailablePlugins, "MultiOutputDecoder") var configFile ConfigFile _, err := toml.Decode(subsTOML, &configFile) decoder := new(MultiDecoder) decoder.SetName("MyMultiDecoder") decoder.SetPipelineConfig(pConfig) conf := decoder.ConfigStruct().(*MultiDecoderConfig) conf.CascadeStrategy = "all" supply := make(chan *PipelinePack, 10) pack := NewPipelinePack(supply) sub0Section, ok := configFile["sub0"] c.Assume(ok, gs.IsTrue) sub0Maker, err := NewPluginMaker("sub0", pConfig, sub0Section) c.Assume(err, gs.IsNil) pConfig.DecoderMakers["sub0"] = sub0Maker sub1Section, ok := configFile["sub1"] c.Assume(ok, gs.IsTrue) sub1Maker, err := NewPluginMaker("sub1", pConfig, sub1Section) c.Assume(err, gs.IsNil) pConfig.DecoderMakers["sub1"] = sub1Maker sub2Section, ok := configFile["sub2"] c.Assume(ok, gs.IsTrue) sub2Maker, err := NewPluginMaker("sub2", pConfig, sub2Section) c.Assume(err, gs.IsNil) pConfig.DecoderMakers["sub2"] = sub2Maker conf.Subs = []string{"sub0", "sub1", "sub2"} dRunner := pipelinemock.NewMockDecoderRunner(ctrl) c.Specify("always tries to decode all packs", func() { err := decoder.Init(conf) c.Assume(err, gs.IsNil) decoder.SetDecoderRunner(dRunner) packs, err := decoder.Decode(pack) c.Expect(err, gs.IsNil) c.Expect(len(packs), gs.Equals, 15) }) }) }
func LoadConfig(configPath *string) *GlobalConfig { f, err := os.Open(*configPath) if err != nil { log.Fatalln("open config file or dir failed: ", *configPath) } defer f.Close() fs, err := os.Stat(*configPath) if err != nil { log.Fatalln("get stat of file or dir failed: ", *configPath) } if fs.IsDir() { log.Fatalln("config file must be file: ", *configPath) } if !strings.HasSuffix(*configPath, ".toml") { log.Fatalln("config file must has .toml suffix") } data, err := ioutil.ReadAll(f) if err != nil { log.Fatalln("ioutil.ReadAll config file failed: ", *configPath) } // log.Println(data) globals := &GlobalConfig{} _, err = toml.Decode(string(data), &cfgs) if err != nil { log.Fatalln("toml.Decode data failed: ", err) } log.Println(cfgs) // log.Println(cfgs) // kafkacfg := &KafkaInputConfig{} base := &BaseConfig{} kafkacfg := NewKafkaInputConfig() influxcfg := NewInfluxDBOutputConfig() empty_ignore := map[string]interface{}{} parsed_globals, ok := cfgs["global"] if !ok { log.Fatalln("global base toml must be set") } if err = toml.PrimitiveDecodeStrict(parsed_globals, base, empty_ignore); err != nil { log.Fatalln("global base decode failed: ", err) } globals.Base = base log.Println(globals.Base) parsed_config, ok := cfgs[globals.Base.Input] if ok { if err = toml.PrimitiveDecodeStrict(parsed_config, kafkacfg, empty_ignore); err != nil { // err = fmt.Errorf("Can't unmarshal config: %s", err) log.Fatalln("can't unmarshal config: ", err) } // log.Println(kafkacfg) globals.KafkaConfig = kafkacfg } parsed_config_influxdb, ok := cfgs[globals.Base.Output] if ok { if err = toml.PrimitiveDecodeStrict(parsed_config_influxdb, influxcfg, empty_ignore); err != nil { // err = fmt.Errorf("Can't unmarshal config: %s", err) log.Fatalln("can't unmarshal config: ", err) } // log.Println(kafkacfg) globals.InfluxDBConfig = influxcfg } return globals }
func QueueBufferSpec(c gs.Context) { tmpDir, tmpErr := ioutil.TempDir("", "queuebuffer-tests") defer func() { tmpErr = os.RemoveAll(tmpDir) c.Expect(tmpErr, gs.Equals, nil) }() t := &ts.SimpleT{} ctrl := gomock.NewController(t) defer ctrl.Finish() c.Specify("QueueBuffer Internals", func() { pConfig := NewPipelineConfig(nil) err := pConfig.RegisterDefault("HekaFramingSplitter") c.Assume(err, gs.IsNil) RegisterPlugin("FooOutput", func() interface{} { return &FooOutput{} }) outputToml := `[FooOutput] message_matcher = "TRUE" ` var configFile ConfigFile _, err = toml.Decode(outputToml, &configFile) c.Assume(err, gs.IsNil) section, ok := configFile["FooOutput"] c.Assume(ok, gs.IsTrue) maker, err := NewPluginMaker("FooOutput", pConfig, section) c.Assume(err, gs.IsNil) orTmp, err := maker.MakeRunner("FooOutput") c.Assume(err, gs.IsNil) or := orTmp.(*foRunner) // h := NewMockPluginHelper(ctrl) // h.EXPECT().PipelineConfig().Return(pConfig) qConfig := &QueueBufferConfig{ FullAction: "block", CursorUpdateCount: 1, MaxFileSize: 66000, } feeder, reader, err := NewBufferSet(tmpDir, "test", qConfig, or, pConfig) // bufferedOutput, err := NewBufferedOutput(tmpDir, "test", or, h, uint64(0)) c.Assume(err, gs.IsNil) msg := ts.GetTestMessage() c.Specify("fileExists", func() { c.Expect(fileExists(tmpDir), gs.IsTrue) c.Expect(fileExists(filepath.Join(tmpDir, "test.log")), gs.IsFalse) }) c.Specify("extractBufferId", func() { id, err := extractBufferId("555.log") c.Expect(err, gs.IsNil) c.Expect(id, gs.Equals, uint(555)) id, err = extractBufferId("") c.Expect(err, gs.Not(gs.IsNil)) id, err = extractBufferId("a.log") c.Expect(err, gs.Not(gs.IsNil)) }) c.Specify("findBufferId", func() { c.Expect(findBufferId(tmpDir, true), gs.Equals, uint(0)) c.Expect(findBufferId(tmpDir, false), gs.Equals, uint(0)) fd, err := os.Create(filepath.Join(tmpDir, "4.log")) c.Expect(err, gs.IsNil) fd.Close() fd, err = os.Create(filepath.Join(tmpDir, "5.log")) c.Expect(err, gs.IsNil) fd.Close() fd, err = os.Create(filepath.Join(tmpDir, "6a.log")) c.Expect(err, gs.IsNil) fd.Close() c.Expect(findBufferId(tmpDir, false), gs.Equals, uint(4)) c.Expect(findBufferId(tmpDir, true), gs.Equals, uint(5)) }) c.Specify("writeCheckpoint", func() { reader.checkpointFilename = filepath.Join(tmpDir, "cp.txt") err := reader.writeCheckpoint("43 99999") c.Expect(err, gs.IsNil) c.Expect(fileExists(reader.checkpointFilename), gs.IsTrue) id, offset, err := readCheckpoint(reader.checkpointFilename) c.Expect(err, gs.IsNil) c.Expect(id, gs.Equals, uint(43)) c.Expect(offset, gs.Equals, int64(99999)) err = reader.writeCheckpoint("43 1") c.Expect(err, gs.IsNil) id, offset, err = readCheckpoint(reader.checkpointFilename) c.Expect(err, gs.IsNil) c.Expect(id, gs.Equals, uint(43)) c.Expect(offset, gs.Equals, int64(1)) reader.checkpointFile.Close() }) c.Specify("readCheckpoint", func() { cp := filepath.Join(tmpDir, "cp.txt") file, err := os.OpenFile(cp, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) c.Expect(err, gs.IsNil) id, offset, err := readCheckpoint(cp) c.Expect(err, gs.Not(gs.IsNil)) file.WriteString("22") id, offset, err = readCheckpoint(cp) c.Expect(err.Error(), gs.Equals, "invalid checkpoint format") file.Seek(0, 0) file.WriteString("aa 22") id, offset, err = readCheckpoint(cp) c.Expect(err.Error(), gs.Equals, "invalid checkpoint id") file.Seek(0, 0) file.WriteString("43 aa") id, offset, err = readCheckpoint(cp) c.Expect(err.Error(), gs.Equals, "invalid checkpoint offset") file.Seek(0, 0) file.WriteString("43 22") file.Close() id, offset, err = readCheckpoint(cp) c.Expect(err, gs.IsNil) c.Expect(id, gs.Equals, uint(43)) c.Expect(offset, gs.Equals, int64(22)) }) c.Specify("RollQueue", func() { feeder.queue = tmpDir err := feeder.RollQueue() c.Expect(err, gs.IsNil) c.Expect(fileExists(getQueueFilename(feeder.queue, feeder.writeId)), gs.IsTrue) feeder.writeFile.WriteString("this is a test item") feeder.writeFile.Close() reader.checkpointFilename = filepath.Join(tmpDir, "cp.txt") reader.queue = tmpDir reader.writeCheckpoint(fmt.Sprintf("%d 10", feeder.writeId)) reader.checkpointFile.Close() err = reader.initReadFile() c.Assume(err, gs.IsNil) buf := make([]byte, 4) n, err := reader.readFile.Read(buf) c.Expect(n, gs.Equals, 4) c.Expect(string(buf), gs.Equals, "test") feeder.writeFile.Close() reader.readFile.Close() }) c.Specify("QueueRecord", func() { reader.checkpointFilename = filepath.Join(tmpDir, "cp.txt") reader.queue = tmpDir newpack := NewPipelinePack(nil) newpack.Message = msg payload := "Write me out to the network" newpack.Message.SetPayload(payload) encoder := client.NewProtobufEncoder(nil) protoBytes, err := encoder.EncodeMessage(newpack.Message) newpack.MsgBytes = protoBytes expectedLen := 115 c.Specify("adds framing", func() { err = feeder.RollQueue() c.Expect(err, gs.IsNil) err = feeder.QueueRecord(newpack) fName := getQueueFilename(feeder.queue, feeder.writeId) c.Expect(fileExists(fName), gs.IsTrue) c.Expect(err, gs.IsNil) feeder.writeFile.Close() f, err := os.Open(fName) c.Expect(err, gs.IsNil) n, record, err := reader.sRunner.GetRecordFromStream(f) f.Close() c.Expect(n, gs.Equals, expectedLen) c.Expect(err, gs.IsNil) headerLen := int(record[1]) + message.HEADER_FRAMING_SIZE record = record[headerLen:] outMsg := new(message.Message) proto.Unmarshal(record, outMsg) c.Expect(outMsg.GetPayload(), gs.Equals, payload) }) c.Specify("when queue has limit", func() { feeder.Config.MaxBufferSize = uint64(200) c.Expect(feeder.queueSize.Get(), gs.Equals, uint64(0)) err = feeder.RollQueue() c.Expect(err, gs.IsNil) err = feeder.QueueRecord(newpack) c.Expect(err, gs.IsNil) c.Expect(feeder.queueSize.Get(), gs.Equals, uint64(expectedLen)) }) c.Specify("when queue has limit and is full", func() { feeder.Config.MaxBufferSize = uint64(50) feeder.Config.MaxFileSize = uint64(50) c.Expect(feeder.queueSize.Get(), gs.Equals, uint64(0)) err = feeder.RollQueue() c.Expect(err, gs.IsNil) queueFiles, err := ioutil.ReadDir(feeder.queue) c.Expect(err, gs.IsNil) numFiles := len(queueFiles) err = feeder.QueueRecord(newpack) c.Expect(err, gs.Equals, QueueIsFull) c.Expect(feeder.queueSize.Get(), gs.Equals, uint64(0)) // Bump the max queue size so it will accept a record. feeder.Config.MaxBufferSize = uint64(120) err = feeder.QueueRecord(newpack) c.Expect(err, gs.IsNil) // Queue should have rolled. queueFiles, err = ioutil.ReadDir(feeder.queue) c.Expect(err, gs.IsNil) c.Expect(len(queueFiles), gs.Equals, numFiles+1) // Try to queue one last time, it should fail. err = feeder.QueueRecord(newpack) c.Expect(err, gs.Equals, QueueIsFull) // Ensure queue didn't roll twice. queueFiles, err = ioutil.ReadDir(feeder.queue) c.Expect(err, gs.IsNil) c.Expect(len(queueFiles), gs.Equals, numFiles+1) }) c.Specify("rolls when queue file hits max size", func() { feeder.Config.MaxFileSize = uint64(300) c.Assume(feeder.writeFileSize, gs.Equals, uint64(0)) // First two shouldn't trigger roll. err = feeder.QueueRecord(newpack) c.Expect(err, gs.IsNil) err = feeder.QueueRecord(newpack) c.Expect(err, gs.IsNil) c.Expect(feeder.writeFileSize, gs.Equals, uint64(expectedLen*2)) queueFiles, err := ioutil.ReadDir(feeder.queue) c.Expect(err, gs.IsNil) c.Expect(len(queueFiles), gs.Equals, 1) // Third one should. err = feeder.QueueRecord(newpack) c.Expect(err, gs.IsNil) c.Expect(feeder.writeFileSize, gs.Equals, uint64(expectedLen)) queueFiles, err = ioutil.ReadDir(feeder.queue) c.Expect(err, gs.IsNil) c.Expect(len(queueFiles), gs.Equals, 2) }) }) c.Specify("getQueueBufferSize", func() { c.Expect(getQueueBufferSize(tmpDir), gs.Equals, uint64(0)) fd, _ := os.Create(filepath.Join(tmpDir, "4.log")) fd.WriteString("0123456789") fd.Close() fd, _ = os.Create(filepath.Join(tmpDir, "5.log")) fd.WriteString("0123456789") fd.Close() // Only size of *.log files should be taken in calculations. fd, _ = os.Create(filepath.Join(tmpDir, "random_file")) fd.WriteString("0123456789") fd.Close() c.Expect(getQueueBufferSize(tmpDir), gs.Equals, uint64(20)) }) }) }
// Loads all plugin configuration from a TOML configuration file. The // PipelineConfig should be already initialized via the Init function before // this method is called. func (self *PipelineConfig) LoadFromConfigFile(filename string) (err error) { var configFile ConfigFile if _, err = toml.DecodeFile(filename, &configFile); err != nil { return fmt.Errorf("Error decoding config file: %s", err) } var ( errcnt uint protobufDRegistered bool protobufERegistered bool ) sectionsByCategory := make(map[string][]*ConfigSection) // Load all the plugin globals and file them by category. for name, conf := range configFile { if name == HEKA_DAEMON { continue } log.Printf("Pre-loading: [%s]\n", name) section := &ConfigSection{ name: name, tomlSection: conf, } if err = self.loadPluginGlobals(section); err != nil { self.log(err.Error()) errcnt++ continue } category := getPluginCategory(section.globals.Typ) if category == "" { self.log(fmt.Sprintf("Type doesn't contain valid plugin name: %s\n", section.globals.Typ)) errcnt++ continue } section.category = category if section.globals.Typ == "MultiDecoder" { // Special case MultiDecoders so we can make sure they get // registered *after* all possible subdecoders. sectionsByCategory["MultiDecoder"] = append(sectionsByCategory["MultiDecoder"], section) } else { sectionsByCategory[category] = append(sectionsByCategory[category], section) } if name == "ProtobufDecoder" { protobufDRegistered = true } if name == "ProtobufEncoder" { protobufERegistered = true } } // Make sure ProtobufDecoder is registered. if !protobufDRegistered { var configDefault ConfigFile toml.Decode(protobufDecoderToml, &configDefault) log.Println("Pre-loading: [ProtobufDecoder]") section := &ConfigSection{ name: "ProtobufDecoder", category: "Decoder", tomlSection: configDefault["ProtobufDecoder"], } if err = self.loadPluginGlobals(section); err != nil { // This really shouldn't happen. self.log(err.Error()) errcnt++ } else { sectionsByCategory["Decoder"] = append(sectionsByCategory["Decoder"], section) } } // Make sure ProtobufEncoder is registered. if !protobufERegistered { var configDefault ConfigFile toml.Decode(protobufEncoderToml, &configDefault) log.Println("Pre-loading: [ProtobufEncoder]") section := &ConfigSection{ name: "ProtobufEncoder", category: "Encoder", tomlSection: configDefault["ProtobufEncoder"], } if err = self.loadPluginGlobals(section); err != nil { // This really shouldn't happen. self.log(err.Error()) errcnt++ } else { sectionsByCategory["Encoder"] = append(sectionsByCategory["Encoder"], section) } } multiDecoders := make([]multiDecoderNode, len(sectionsByCategory["MultiDecoder"])) multiConfigs := make(map[string]*ConfigSection) for i, section := range sectionsByCategory["MultiDecoder"] { multiConfigs[section.name] = section multiDecoders[i] = newMultiDecoderNode(section.name, subsFromSection(section.tomlSection)) } multiDecoders, err = orderDependencies(multiDecoders) if err != nil { return err } for i, d := range multiDecoders { sectionsByCategory["MultiDecoder"][i] = multiConfigs[d.name] } // Append MultiDecoders to the end of the Decoders list. sectionsByCategory["Decoder"] = append(sectionsByCategory["Decoder"], sectionsByCategory["MultiDecoder"]...) // Force decoders and encoders to be registered before the other plugin // types are initialized so we know they'll be there for inputs and // outputs to use during initialization. order := []string{"Decoder", "Encoder", "Input", "Filter", "Output"} for _, category := range order { for _, section := range sectionsByCategory[category] { log.Printf("Loading: [%s]\n", section.name) if err = self.loadSection(section); err != nil { self.log(err.Error()) errcnt++ } } } if errcnt != 0 { return fmt.Errorf("%d errors loading plugins", errcnt) } return }