func newIncludeFields(c common.Config) (processors.Processor, error) { config := struct { Fields []string `config:"fields"` }{} err := c.Unpack(&config) if err != nil { return nil, fmt.Errorf("fail to unpack the include_fields configuration: %s", err) } /* add read only fields if they are not yet */ for _, readOnly := range processors.MandatoryExportedFields { found := false for _, field := range config.Fields { if readOnly == field { found = true } } if !found { config.Fields = append(config.Fields, readOnly) } } f := includeFields{Fields: config.Fields} return &f, nil }
func (lj *logstash) init(cfg *common.Config) error { config := defaultConfig if err := cfg.Unpack(&config); err != nil { return err } tls, err := outputs.LoadTLSConfig(config.TLS) if err != nil { return err } transp := &transport.Config{ Timeout: config.Timeout, Proxy: &config.Proxy, TLS: tls, Stats: &transport.IOStats{ Read: statReadBytes, Write: statWriteBytes, ReadErrors: statReadErrors, WriteErrors: statWriteErrors, }, } logp.Info("Max Retries set to: %v", config.MaxRetries) m, err := initConnectionMode(cfg, &config, transp) if err != nil { return err } lj.mode = m lj.index = config.Index return nil }
// Creates beater func New(b *beat.Beat, rawConfig *common.Config) (beat.Beater, error) { cfg := config.DefaultConfig err := rawConfig.Unpack(&cfg) if err != nil { return nil, fmt.Errorf("Error reading config file: %v", err) } done := make(chan struct{}) workers := []*worker{} for name, cfg := range cfg.Generators { factory, ok := generators[name] if !ok { return nil, fmt.Errorf("Unknown generator: %v", name) } generators, err := factory(cfg) if err != nil { return nil, err } for _, gen := range generators { workers = append(workers, &worker{ done: done, gen: gen, }) } } return &Generatorbeat{ done: done, worker: workers, }, nil }
func NewHarvester( cfg *common.Config, state file.State, prospectorChan chan *input.Event, done chan struct{}, ) (*Harvester, error) { h := &Harvester{ config: defaultConfig, state: state, prospectorChan: prospectorChan, done: done, } if err := cfg.Unpack(&h.config); err != nil { return nil, err } encodingFactory, ok := encoding.FindEncoding(h.config.Encoding) if !ok || encodingFactory == nil { return nil, fmt.Errorf("unknown encoding('%v')", h.config.Encoding) } h.encodingFactory = encodingFactory return h, nil }
func New(b *beat.Beat, rawConfig *common.Config) (beat.Beater, error) { config := config.Config{ Interfaces: config.InterfacesConfig{ File: *cmdLineArgs.file, Loop: *cmdLineArgs.loop, TopSpeed: *cmdLineArgs.topSpeed, OneAtATime: *cmdLineArgs.oneAtAtime, Dumpfile: *cmdLineArgs.dumpfile, }, } err := rawConfig.Unpack(&config) if err != nil { logp.Err("fails to read the beat config: %v, %v", err, config) return nil, err } pb := &packetbeat{ config: config, cmdLineArgs: cmdLineArgs, } err = pb.init(b) if err != nil { return nil, err } return pb, nil }
func newDropFields(c common.Config) (filter.FilterRule, error) { f := DropFields{} if err := f.CheckConfig(c); err != nil { return nil, err } config := DropFieldsConfig{} err := c.Unpack(&config) if err != nil { return nil, fmt.Errorf("fail to unpack the drop_fields configuration: %s", err) } /* remove read only fields */ for _, readOnly := range filter.MandatoryExportedFields { for i, field := range config.Fields { if readOnly == field { config.Fields = append(config.Fields[:i], config.Fields[i+1:]...) } } } f.Fields = config.Fields cond, err := filter.NewCondition(config.ConditionConfig) if err != nil { return nil, err } f.Cond = cond return &f, nil }
// NewModule creates a new module func NewModule(cfg *common.Config, moduler func() Moduler) (*Module, error) { // Module config defaults config := ModuleConfig{ Period: "1s", Enabled: true, } err := cfg.Unpack(&config) if err != nil { return nil, err } filters, err := filter.New(config.Filters) if err != nil { return nil, fmt.Errorf("error initializing filters: %v", err) } logp.Debug("module", "Filters: %+v", filters) return &Module{ name: config.Module, Config: config, cfg: cfg, moduler: moduler(), metricSets: map[string]*MetricSet{}, Publish: make(chan common.MapStr), // TODO: What should be size of channel? @ruflin,20160316 wg: sync.WaitGroup{}, done: make(chan struct{}), filters: filters, }, nil }
func ReadHostList(cfg *common.Config) ([]string, error) { config := struct { Hosts []string `config:"hosts"` Worker int `config:"worker"` }{ Worker: 1, } err := cfg.Unpack(&config) if err != nil { return nil, err } lst := config.Hosts if len(lst) == 0 || config.Worker <= 1 { return lst, nil } // duplicate entries config.Workers times hosts := make([]string, 0, len(lst)*config.Worker) for _, entry := range lst { for i := 0; i < config.Worker; i++ { hosts = append(hosts, entry) } } return hosts, nil }
func (k *kafka) init(cfg *common.Config) error { debugf("initialize kafka output") config := defaultConfig if err := cfg.Unpack(&config); err != nil { return err } topic, err := outil.BuildSelectorFromConfig(cfg, outil.Settings{ Key: "topic", MultiKey: "topics", EnableSingleOnly: true, FailEmpty: true, }) if err != nil { return err } partitioner, err := makePartitioner(config.Partition) if err != nil { return err } k.config = config k.partitioner = partitioner k.topic = topic // validate config one more time _, err = k.newKafkaConfig() if err != nil { return err } return nil }
func cfgRoundRobinPartitioner(config *common.Config) (func() partitioner, error) { cfg := struct { GroupEvents int `config:"group_events" validate:"min=1"` }{ GroupEvents: 1, } if err := config.Unpack(&cfg); err != nil { return nil, err } return func() partitioner { N := cfg.GroupEvents count := N partition := rand.Int31() return func(_ *message, numPartitions int32) (int32, error) { if N == count { count = 0 if partition++; partition >= numPartitions { partition = 0 } } count++ return partition, nil } }, nil }
func cfgRandomPartitioner(config *common.Config) (func() partitioner, error) { cfg := struct { GroupEvents int `config:"group_events" validate:"min=1"` }{ GroupEvents: 1, } if err := config.Unpack(&cfg); err != nil { return nil, err } return func() partitioner { generator := rand.New(rand.NewSource(rand.Int63())) N := cfg.GroupEvents count := cfg.GroupEvents partition := int32(0) return func(_ *message, numPartitions int32) (int32, error) { if N == count { count = 0 partition = int32(generator.Intn(int(numPartitions))) } count++ return partition, nil } }, nil }
func New(b *beat.Beat, cfg *common.Config) (beat.Beater, error) { config := config.DefaultConfig if err := cfg.Unpack(&config); err != nil { return nil, fmt.Errorf("Error reading config file: %v", err) } limit := config.Scheduler.Limit locationName := config.Scheduler.Location if locationName == "" { locationName = "Local" } location, err := time.LoadLocation(locationName) if err != nil { return nil, err } client := b.Publisher.Connect() sched := scheduler.NewWithLocation(limit, location) manager, err := newMonitorManager(client, sched, monitors.Registry, config.Monitors) if err != nil { return nil, err } bt := &Heartbeat{ done: make(chan struct{}), client: client, scheduler: sched, manager: manager, } return bt, nil }
func (k *kafka) init(cfg *common.Config) error { debugf("initialize kafka output") k.config = defaultConfig if err := cfg.Unpack(&k.config); err != nil { return err } var err error k.topic, err = outil.BuildSelectorFromConfig(cfg, outil.Settings{ Key: "topic", MultiKey: "topics", EnableSingleOnly: true, FailEmpty: true, }) if err != nil { return err } _, err = newKafkaConfig(&k.config) if err != nil { return err } return nil }
func New(config *common.Config, _ int) (outputs.Outputer, error) { c := &console{config: defaultConfig} err := config.Unpack(&c.config) if err != nil { return nil, err } return c, nil }
// newBaseModuleFromConfig creates a new BaseModule from config. func newBaseModuleFromConfig(rawConfig *common.Config) (BaseModule, error) { baseModule := BaseModule{ config: defaultModuleConfig, rawConfig: rawConfig, } err := rawConfig.Unpack(&baseModule.config) baseModule.name = strings.ToLower(baseModule.config.Module) return baseModule, err }
func (f *DropEvent) CheckConfig(c common.Config) error { for _, field := range c.GetFields() { if !filter.AvailableCondition(field) { return fmt.Errorf("unexpected %s option in the drop_event configuration", field) } } return nil }
func (lj *logstash) init(cfg *common.Config) error { config := defaultConfig if err := cfg.Unpack(&config); err != nil { return err } sendRetries := config.MaxRetries maxAttempts := sendRetries + 1 if sendRetries < 0 { maxAttempts = 0 } tls, err := outputs.LoadTLSConfig(config.TLS) if err != nil { return err } transp := &transport.Config{ Timeout: config.Timeout, Proxy: &config.Proxy, TLS: tls, Stats: &transport.IOStats{ Read: statReadBytes, Write: statWriteBytes, ReadErrors: statReadErrors, WriteErrors: statWriteErrors, }, } logp.Info("Max Retries set to: %v", sendRetries) var m mode.ConnectionMode if config.Pipelining == 0 { clients, err := modeutil.MakeClients(cfg, makeClientFactory(&config, transp)) if err == nil { m, err = modeutil.NewConnectionMode(clients, !config.LoadBalance, maxAttempts, defaultWaitRetry, config.Timeout, defaultMaxWaitRetry) } } else { clients, err := modeutil.MakeAsyncClients(cfg, makeAsyncClientFactory(&config, transp)) if err == nil { m, err = modeutil.NewAsyncConnectionMode(clients, !config.LoadBalance, maxAttempts, defaultWaitRetry, config.Timeout, defaultMaxWaitRetry) } } if err != nil { return err } lj.mode = m lj.index = config.Index return nil }
// New instantiates a new output plugin instance publishing to elasticsearch. func New(cfg *common.Config, topologyExpire int) (outputs.Outputer, error) { if !cfg.HasField("bulk_max_size") { cfg.SetInt("bulk_max_size", -1, defaultBulkSize) } output := &elasticsearchOutput{} err := output.init(cfg, topologyExpire) if err != nil { return nil, err } return output, nil }
func new(beatName string, cfg *common.Config, _ int) (outputs.Outputer, error) { if !cfg.HasField("index") { cfg.SetString("index", -1, beatName) } output := &logstash{} if err := output.init(cfg); err != nil { return nil, err } return output, nil }
func newDecodeJSONFields(c common.Config) (processors.Processor, error) { config := defaultConfig err := c.Unpack(&config) if err != nil { logp.Warn("Error unpacking config for decode_json_fields") return nil, fmt.Errorf("fail to unpack the decode_json_fields configuration: %s", err) } f := decodeJSONFields{fields: config.Fields, maxDepth: config.MaxDepth, processArray: config.ProcessArray} return f, nil }
func New(testMode bool, results publish.Transactions, cfg *common.Config) (*Icmp, error) { p := &Icmp{} config := defaultConfig if !testMode { if err := cfg.Unpack(&config); err != nil { return nil, err } } if err := p.init(results, &config); err != nil { return nil, err } return p, nil }
func (k *kafka) init(cfg *common.Config) error { debugf("initialize kafka output") k.config = defaultConfig if err := cfg.Unpack(&k.config); err != nil { return err } _, err := newKafkaConfig(&k.config) if err != nil { return err } return nil }
// New creates a new Filebeat pointer instance. func New(b *beat.Beat, rawConfig *common.Config) (beat.Beater, error) { config := cfg.DefaultConfig if err := rawConfig.Unpack(&config); err != nil { return nil, fmt.Errorf("Error reading config file: %v", err) } if err := config.FetchConfigs(); err != nil { return nil, err } fb := &Filebeat{ done: make(chan struct{}), config: &config, } return fb, nil }
func NewProspector(cfg *common.Config, states file.States, outlet Outlet) (*Prospector, error) { prospector := &Prospector{ cfg: cfg, config: defaultConfig, outlet: outlet, harvesterChan: make(chan *input.Event), done: make(chan struct{}), wg: sync.WaitGroup{}, states: &file.States{}, channelWg: sync.WaitGroup{}, } if err := cfg.Unpack(&prospector.config); err != nil { return nil, err } if err := prospector.config.Validate(); err != nil { return nil, err } err := prospector.Init(states.GetStates()) if err != nil { return nil, err } logp.Debug("prospector", "File Configs: %v", prospector.config.Paths) return prospector, nil }
func newCloudMetadata(c common.Config) (processors.Processor, error) { config := struct { MetadataHostAndPort string `config:"host"` // Specifies the host and port of the metadata service (for testing purposes only). Timeout time.Duration `config:"timeout"` // Amount of time to wait for responses from the metadata services. }{ MetadataHostAndPort: metadataHost, Timeout: 3 * time.Second, } err := c.Unpack(&config) if err != nil { return nil, errors.Wrap(err, "failed to unpack add_cloud_metadata config") } var ( doURL = "http://" + config.MetadataHostAndPort + doMetadataURI ec2URL = "http://" + config.MetadataHostAndPort + ec2InstanceIdentityURI gceURL = "http://" + config.MetadataHostAndPort + gceMetadataURI ) result := fetchMetadata(doURL, ec2URL, gceURL, config.Timeout) if result == nil { logp.Info("add_cloud_metadata: hosting provider type not detected.") return addCloudMetadata{}, nil } logp.Info("add_cloud_metadata: hosting provider type detected as %v, metadata=%v", result.provider, result.metadata.String()) return addCloudMetadata{metadata: result.metadata}, nil }
func NewProspector(cfg *common.Config, states file.States, spoolerChan chan *input.FileEvent) (*Prospector, error) { prospector := &Prospector{ cfg: cfg, config: defaultConfig, spoolerChan: spoolerChan, harvesterChan: make(chan *input.FileEvent), done: make(chan struct{}), states: states.Copy(), wg: sync.WaitGroup{}, } if err := cfg.Unpack(&prospector.config); err != nil { return nil, err } if err := prospector.config.Validate(); err != nil { return nil, err } err := prospector.Init() if err != nil { return nil, err } logp.Debug("prospector", "File Configs: %v", prospector.config.Paths) return prospector, nil }
func create( info monitors.Info, cfg *common.Config, ) ([]monitors.Job, error) { config := DefaultConfig if err := cfg.Unpack(&config); err != nil { return nil, err } tls, err := outputs.LoadTLSConfig(config.TLS) if err != nil { return nil, err } defaultScheme := "tcp" if tls != nil { defaultScheme = "ssl" } addrs, err := collectHosts(&config, defaultScheme) if err != nil { return nil, err } if config.Socks5.URL != "" && !config.Socks5.LocalResolve { var jobs []monitors.Job for _, addr := range addrs { scheme, host := addr.Scheme, addr.Host for _, port := range addr.Ports { job, err := newTCPMonitorHostJob(scheme, host, port, tls, &config) if err != nil { return nil, err } jobs = append(jobs, job) } } return jobs, nil } jobs := make([]monitors.Job, len(addrs)) for i, addr := range addrs { jobs[i], err = newTCPMonitorIPsJob(addr, tls, &config) if err != nil { return nil, err } } return jobs, nil }
func (out *elasticsearchOutput) init( cfg *common.Config, topologyExpire int, ) error { config := defaultConfig if err := cfg.Unpack(&config); err != nil { return err } tlsConfig, err := outputs.LoadTLSConfig(config.TLS) if err != nil { return err } err = out.readTemplate(&config.Template) if err != nil { return err } clients, err := modeutil.MakeClients(cfg, makeClientFactory(tlsConfig, &config, out)) if err != nil { return err } maxRetries := config.MaxRetries maxAttempts := maxRetries + 1 // maximum number of send attempts (-1 = infinite) if maxRetries < 0 { maxAttempts = 0 } var waitRetry = time.Duration(1) * time.Second var maxWaitRetry = time.Duration(60) * time.Second out.clients = clients loadBalance := config.LoadBalance m, err := modeutil.NewConnectionMode(clients, !loadBalance, maxAttempts, waitRetry, config.Timeout, maxWaitRetry) if err != nil { return err } out.mode = m out.index = config.Index return nil }
// New create and initializes a new cassandra protocol analyzer instance. func New( testMode bool, results publish.Transactions, cfg *common.Config, ) (protos.Plugin, error) { p := &cassandra{} config := defaultConfig if !testMode { if err := cfg.Unpack(&config); err != nil { return nil, err } } if err := p.init(results, &config); err != nil { return nil, err } return p, nil }
// newBaseModuleFromConfig creates a new BaseModule from config. The returned // BaseModule's name will always be lower case. func newBaseModuleFromConfig(rawConfig *common.Config) (BaseModule, error) { baseModule := BaseModule{ config: DefaultModuleConfig(), rawConfig: rawConfig, } err := rawConfig.Unpack(&baseModule.config) if err != nil { return baseModule, err } baseModule.name = strings.ToLower(baseModule.config.Module) err = mustNotContainDuplicates(baseModule.config.Hosts) if err != nil { return baseModule, errors.Wrapf(err, "invalid hosts for module '%s'", baseModule.name) } return baseModule, nil }