func (p *protocol) doSerializeEvents(out io.Writer, events []common.MapStr) ([]common.MapStr, error) { var sequence uint32 okEvents := events for _, event := range events { sequence++ err := p.serializeDataFrame(out, event, sequence) if err != nil { logp.Critical("failed to encode event: %v", err) sequence-- goto failedLoop } } return okEvents, nil failedLoop: // on serialization error continue serializing remaining events and collect // serializable events only okEvents = events[:sequence] restEvents := events[sequence+1:] for _, event := range restEvents { sequence++ err := p.serializeDataFrame(out, event, sequence) if err != nil { logp.Critical("failed to encode event: %v", err) sequence-- continue } okEvents = append(okEvents, event) } return okEvents, nil }
func (fs *FileSizeBeat) Config(b *beat.Beat) error { err := cfgfile.Read(&fs.config, "") if err != nil { logp.Err("Error reading configuration file: %v", err) return err } if fs.config.Input.Period != nil { fs.period = time.Duration(*fs.config.Input.Period) * time.Second } else { fs.period = 10 * time.Second } logp.Debug("filesizebeat", "Period %v\n", fs.period) if fs.config.Input.Paths != nil { //fs.paths = make([]Path, len(*fs.config.Input.Paths)) for _, path := range *fs.config.Input.Paths { err := fs.AddPath(path) if err != nil { logp.Critical("Error: %v", err) os.Exit(1) } } logp.Debug("filesizebeat", "Paths : %v\n", fs.paths) } else { logp.Critical("Error: no paths specified, cannot continue!") os.Exit(1) } return nil }
func loadCertificateAuthorities(CAs []string) (*x509.CertPool, []error) { errors := []error{} if len(CAs) == 0 { return nil, nil } roots := x509.NewCertPool() for _, path := range CAs { pemData, err := ioutil.ReadFile(path) if err != nil { logp.Critical("Failed reading CA certificate: %v", err) errors = append(errors, fmt.Errorf("%v reading %v", err, path)) continue } if ok := roots.AppendCertsFromPEM(pemData); !ok { logp.Critical("Failed reading CA certificate: %v", err) errors = append(errors, fmt.Errorf("%v adding %v", ErrNotACertificate, path)) continue } } return roots, errors }
// init packetbeat components func (pb *packetbeat) init(b *beat.Beat) error { cfg := &pb.config err := procs.ProcWatcher.Init(cfg.Procs) if err != nil { logp.Critical(err.Error()) return err } // This is required as init Beat is called before the beat publisher is initialised b.Config.Shipper.InitShipperConfig() pb.pub, err = publish.NewPublisher(b.Publisher, *b.Config.Shipper.QueueSize, *b.Config.Shipper.BulkQueueSize, pb.config.IgnoreOutgoing) if err != nil { return fmt.Errorf("Initializing publisher failed: %v", err) } logp.Debug("main", "Initializing protocol plugins") err = protos.Protos.Init(false, pb.pub, cfg.Protocols) if err != nil { return fmt.Errorf("Initializing protocol analyzers failed: %v", err) } logp.Debug("main", "Initializing sniffer") err = pb.setupSniffer() if err != nil { return fmt.Errorf("Initializing sniffer failed: %v", err) } return nil }
func (out *fileOutput) PublishEvent( trans outputs.Signaler, opts outputs.Options, event common.MapStr, ) error { jsonEvent, err := json.Marshal(event) if err != nil { // mark as success so event is not sent again. outputs.SignalCompleted(trans) logp.Err("Fail to convert the event to JSON: %s", err) return err } err = out.rotator.WriteLine(jsonEvent) if err != nil { if opts.Guaranteed { logp.Critical("Unable to write events to file: %s", err) } else { logp.Err("Error when writing line to file: %s", err) } } outputs.Signal(trans, err) return err }
func (c *console) PublishEvent( s outputs.Signaler, opts outputs.Options, event common.MapStr, ) error { var jsonEvent []byte var err error if c.config.Pretty { jsonEvent, err = json.MarshalIndent(event, "", " ") } else { jsonEvent, err = json.Marshal(event) } if err != nil { logp.Err("Fail to convert the event to JSON: %s", err) outputs.SignalCompleted(s) return err } if err = writeBuffer(jsonEvent); err != nil { goto fail } if err = writeBuffer([]byte{'\n'}); err != nil { goto fail } outputs.SignalCompleted(s) return nil fail: if opts.Guaranteed { logp.Critical("Unable to publish events to console: %v", err) } outputs.SignalFailed(s, err) return err }
func (pb *Packetbeat) Run(b *beat.Beat) error { // run the sniffer in background go func() { err := pb.Sniff.Run() if err != nil { logp.Critical("Sniffer main loop failed: %v", err) os.Exit(1) } pb.over <- true }() // Startup successful, disable stderr logging if requested by // cmdline flag logp.SetStderr() logp.Debug("main", "Waiting for the sniffer to finish") // Wait for the goroutines to finish for range pb.over { if !pb.Sniff.IsAlive() { break } } waitShutdown := pb.CmdLineArgs.WaitShutdown if waitShutdown != nil && *waitShutdown > 0 { time.Sleep(time.Duration(*waitShutdown) * time.Second) } return nil }
// Run initiates and runs a new beat object func Run(name string, version string, bt Beater) error { b := NewBeat(name, version, bt) // Runs beat inside a go process go func() { err := b.Start() if err != nil { // TODO: detect if logging was already fully setup or not fmt.Printf("Start error: %v\n", err) logp.Critical("Start error: %v", err) b.error = err } // If start finishes, exit has to be called. This requires start to be blocking // which is currently the default. b.Exit() }() // Waits until beats channel is closed select { case <-b.exit: b.Stop() logp.Info("Exit beat completed") return b.error } }
// Run calls the beater Setup and Run methods. In case of errors // during the setup phase, it exits the process. func (b *Beat) Run() error { // Setup beater object err := b.BT.Setup(b) if err != nil { return fmt.Errorf("setup returned an error: %v", err) } b.setState(SetupState) // Up to here was the initialization, now about running if cfgfile.IsTestConfig() { logp.Info("Testing configuration file") // all good, exit return nil } service.BeforeRun() // Callback is called if the processes is asked to stop. // This needs to be called before the main loop is started so that // it can register the signals that stop or query (on Windows) the loop. service.HandleSignals(b.Exit) logp.Info("%s sucessfully setup. Start running.", b.Name) b.setState(RunState) // Run beater specific stuff err = b.BT.Run(b) if err != nil { logp.Critical("Running the beat returned an error: %v", err) } return err }
// LoadConfig inits the config file and reads the default config information // into Beat.Config. It exists the processes in case of errors. func (b *Beat) LoadConfig() { err := cfgfile.Read(&b.Config, "") if err != nil { // logging not yet initialized, so using fmt.Printf fmt.Printf("Loading config file error: %v\n", err) os.Exit(1) } err = logp.Init(b.Name, &b.Config.Logging) if err != nil { fmt.Printf("Error initializing logging: %v\n", err) os.Exit(1) } // Disable stderr logging if requested by cmdline flag logp.SetStderr() logp.Debug("beat", "Initializing output plugins") pub, err := publisher.New(b.Name, b.Config.Output, b.Config.Shipper) if err != nil { fmt.Printf("Error Initialising publisher: %v\n", err) logp.Critical(err.Error()) os.Exit(1) } b.Events = pub.Client() logp.Info("Init Beat: %s; Version: %s", b.Name, b.Version) }
func (out *fileOutput) PublishEvent( sig op.Signaler, opts outputs.Options, event common.MapStr, ) error { jsonEvent, err := json.Marshal(event) if err != nil { // mark as success so event is not sent again. op.SigCompleted(sig) logp.Err("Fail to json encode event(%v): %#v", err, event) return err } err = out.rotator.WriteLine(jsonEvent) if err != nil { if opts.Guaranteed { logp.Critical("Unable to write events to file: %s", err) } else { logp.Err("Error when writing line to file: %s", err) } } op.Sig(sig, err) return err }
func FiltersRun(config common.MapStr, plugins map[Filter]FilterPlugin, next chan common.MapStr, stopCb func()) (input chan common.MapStr, err error) { logp.Debug("filters", "Initializing filters plugins") for filter, plugin := range plugins { Filters.Register(filter, plugin) } filters_plugins, err := LoadConfiguredFilters(config) if err != nil { return nil, fmt.Errorf("Error loading filters plugins: %v", err) } logp.Debug("filters", "Filters plugins order: %v", filters_plugins) if len(filters_plugins) > 0 { runner := NewFilterRunner(next, filters_plugins) go func() { err := runner.Run() if err != nil { logp.Critical("Filters runner failed: %v", err) // shutting down stopCb() } }() input = runner.FiltersQueue } else { input = next } return input, nil }
func OpenGeoIp2DB(db string) error { var err error GeoIp2Reader, err = geoip2.Open(db) // avoid ":=" so no shadowing of GeoIp2Reader variable if err != nil { logp.Critical("OpenGeoIp2DB: unable to open GeoIP2 database '%v' error: %v!", db, err) return err } return nil }
// handleError handles the given error by logging it and then returning the // error. If the err is nil or is a GracefulExit error then the method will // return nil without logging anything. func handleError(err error) error { if err == nil || err == GracefulExit { return nil } // logp may not be initialized so log the err to stderr too. logp.Critical("Exiting: %v", err) fmt.Fprintf(os.Stderr, "Exiting: %v\n", err) return err }
// Setup packetbeat func (pb *Packetbeat) Setup(b *beat.Beat) error { if err := procs.ProcWatcher.Init(pb.PbConfig.Procs); err != nil { logp.Critical(err.Error()) os.Exit(1) } queueSize := defaultQueueSize if pb.PbConfig.Shipper.QueueSize != nil { queueSize = *pb.PbConfig.Shipper.QueueSize } bulkQueueSize := defaultBulkQueueSize if pb.PbConfig.Shipper.BulkQueueSize != nil { bulkQueueSize = *pb.PbConfig.Shipper.BulkQueueSize } pb.Pub = publish.NewPublisher(b.Publisher, queueSize, bulkQueueSize) pb.Pub.Start() logp.Debug("main", "Initializing protocol plugins") err := protos.Protos.Init(false, pb.Pub, pb.PbConfig.Protocols) if err != nil { logp.Critical("Initializing protocol analyzers failed: %v", err) os.Exit(1) } pb.over = make(chan bool) logp.Debug("main", "Initializing sniffer") if err := pb.setupSniffer(); err != nil { logp.Critical("Initializing sniffer failed: %v", err) os.Exit(1) } // This needs to be after the sniffer Init but before the sniffer Run. if err := droppriv.DropPrivileges(config.ConfigSingleton.RunOptions); err != nil { logp.Critical(err.Error()) os.Exit(1) } return nil }
// Setup packetbeat func (pb *Packetbeat) Setup(b *beat.Beat) error { if err := procs.ProcWatcher.Init(pb.PbConfig.Procs); err != nil { logp.Critical(err.Error()) os.Exit(1) } pb.Sniff = new(sniffer.SnifferSetup) logp.Debug("main", "Initializing protocol plugins") for proto, plugin := range EnabledProtocolPlugins { err := plugin.Init(false, b.Events) if err != nil { logp.Critical("Initializing plugin %s failed: %v", proto, err) os.Exit(1) } protos.Protos.Register(proto, plugin) } var err error icmpProc, err := icmp.NewIcmp(false, b.Events) if err != nil { logp.Critical(err.Error()) os.Exit(1) } tcpProc, err := tcp.NewTcp(&protos.Protos) if err != nil { logp.Critical(err.Error()) os.Exit(1) } udpProc, err := udp.NewUdp(&protos.Protos) if err != nil { logp.Critical(err.Error()) os.Exit(1) } pb.over = make(chan bool) logp.Debug("main", "Initializing sniffer") err = pb.Sniff.Init(false, icmpProc, icmpProc, tcpProc, udpProc) if err != nil { logp.Critical("Initializing sniffer failed: %v", err) os.Exit(1) } // This needs to be after the sniffer Init but before the sniffer Run. if err = droppriv.DropPrivileges(config.ConfigSingleton.RunOptions); err != nil { logp.Critical(err.Error()) os.Exit(1) } return err }
func (l *lumberjackClient) doSerializeEvents(out io.Writer, events []common.MapStr) (uint32, error) { var sequence uint32 for _, event := range events { sequence++ err := l.writeDataFrame(event, sequence, out) if err != nil { logp.Critical("failed to encode event: %v", err) sequence-- //forget this last broken event and continue } } return sequence, nil }
// Run calls the beater Setup and Run methods. In case of errors // during the setup phase, it exits the process. func (b *Beat) Run() error { // Setup beater object err := b.BT.Setup(b) if err != nil { logp.Critical("Setup returned an error: %v", err) os.Exit(1) } // Up to here was the initialization, now about running if cfgfile.IsTestConfig() { // all good, exit with 0 os.Exit(0) } service.BeforeRun() // Callback is called if the processes is asked to stop. // This needs to be called before the main loop is started so that // it can register the signals that stop or query (on Windows) the loop. service.HandleSignals(b.Exit) logp.Info("%s sucessfully setup. Start running.", b.Name) // Run beater specific stuff err = b.BT.Run(b) if err != nil { logp.Critical("Running the beat returned an error: %v", err) } service.Cleanup() logp.Info("Cleaning up %s before shutting down.", b.Name) // Call beater cleanup function err = b.BT.Cleanup(b) if err != nil { logp.Err("Cleanup returned an error: %v", err) } return err }
func (crawler *Crawler) Start(files []config.ProspectorConfig, eventChan chan *input.FileEvent) { pendingProspectorCnt := 0 crawler.running = true // Prospect the globs/paths given on the command line and launch harvesters for _, fileconfig := range files { logp.Debug("prospector", "File Configs: %v", fileconfig.Paths) prospector := &Prospector{ ProspectorConfig: fileconfig, registrar: crawler.Registrar, } err := prospector.Init() if err != nil { logp.Critical("Error in initing prospector: %s", err) fmt.Printf("Error in initing prospector: %s", err) os.Exit(1) } go prospector.Run(eventChan) pendingProspectorCnt++ } // Now determine which states we need to persist by pulling the events from the prospectors // When we hit a nil source a prospector had finished so we decrease the expected events logp.Debug("prospector", "Waiting for %d prospectors to initialise", pendingProspectorCnt) for event := range crawler.Registrar.Persist { if event.Source == "" { pendingProspectorCnt-- if pendingProspectorCnt == 0 { logp.Debug("prospector", "No pending prospectors. Finishing setup") break } continue } crawler.Registrar.state[event.Source] = event logp.Debug("prospector", "Registrar will re-save state for %s", event.Source) if !crawler.running { break } } logp.Info("All prospectors initialised with %d states to persist", len(crawler.Registrar.getStateCopy())) }
func loadCertificate(config *CertificateConfig) (*tls.Certificate, error) { certificate := config.Certificate key := config.Key hasCertificate := certificate != "" hasKey := key != "" switch { case hasCertificate && !hasKey: return nil, ErrCertificateNoKey case !hasCertificate && hasKey: return nil, ErrKeyNoCertificate case !hasCertificate && !hasKey: return nil, nil } certPEM, err := readPEMFile(certificate, config.Passphrase) if err != nil { logp.Critical("Failed reading certificate file %v: %v", certificate, err) return nil, fmt.Errorf("%v %v", err, certificate) } keyPEM, err := readPEMFile(key, config.Passphrase) if err != nil { logp.Critical("Failed reading key file %v: %v", key, err) return nil, fmt.Errorf("%v %v", err, key) } cert, err := tls.X509KeyPair(certPEM, keyPEM) if err != nil { logp.Critical("Failed loading client certificate", err) return nil, err } return &cert, nil }
// collect collects finished bulk-Events in order and forward processed batches // to registrar. Reports to registrar are guaranteed to be in same order // as bulk-Events have been received by the spooler func (p *asyncLogPublisher) collect() bool { for batch := p.active.head; batch != nil; batch = batch.next { state := batchStatus(atomic.LoadInt32(&batch.flag)) if state == batchInProgress && !p.stopping { break } if state == batchFailed { // with guaranteed enabled this must must not happen. msg := "Failed to process batch" logp.Critical(msg) panic(msg) } // remove batch from active list p.active.head = batch.next if batch.next == nil { p.active.tail = nil } // Batches get marked as canceled, if publisher pipeline is shutting down // In this case we do not want to send any more batches to the registrar if state == batchCanceled { p.stopping = true } if p.stopping { logp.Info("Shutting down - No registrar update for potentially published batch.") // if in failing state keep cleaning up queue continue } // Tell the registrar that we've successfully publish the last batch events. // If registrar queue is blocking (quite unlikely), but stop signal has been // received in the meantime (by closing p.done), we do not wait for // registrar picking up the current batch. Instead prefer to shut-down and // resend the last published batch on next restart, basically taking advantage // of send-at-last-once semantics in order to speed up cleanup on shutdown. select { case <-p.done: logp.Info("Shutting down - No registrar update for successfully published batch.") return false case p.out <- batch.events: } } return true }
// exitProcess causes the process to exit. If no error is provided then it will // exit with code 0. If an error is provided it will set a non-zero exit code // and log the error logp and to stderr. // // The exit code can controlled if the error is an ExitError. func exitProcess(err error) { code := 0 if ee, ok := err.(ExitError); ok { code = ee.ExitCode } else if err != nil { code = 1 } if err != nil && code != 0 { // logp may not be initialized so log the err to stderr too. logp.Critical("Exiting: %v", err) fmt.Fprintf(os.Stderr, "Exiting: %v\n", err) } os.Exit(code) }
func main() { ab := &ApacheBeat{} beat := beat.NewBeat(Name, Version, ab) beat.CommandLineSetup() beat.LoadConfig() err := ab.Config(beat) if err != nil { logp.Critical("Config error: %v", err) os.Exit(1) } beat.Run() }
func (b *Beat) Start() error { // Additional command line args are used to overwrite config options b.CommandLineSetup() // Loads base config b.LoadConfig() // Configures beat err := b.BT.Config(b) if err != nil { logp.Critical("Config error: %v", err) os.Exit(1) } // Run beat. This calls first beater.Setup, // then beater.Run and beater.Cleanup in the end return b.Run() }
// Check that the file isn't a symlink, mode is regular or file is nil func (f *File) IsRegularFile() bool { if f.File == nil { logp.Critical("Harvester: BUG: f arg is nil") return false } info, e := f.File.Stat() if e != nil { logp.Err("File check fault: stat error: %s", e.Error()) return false } if !info.Mode().IsRegular() { logp.Warn("Harvester: not a regular file: %q %s", info.Mode(), info.Name()) return false } return true }
func (pgsql *Pgsql) pgsqlMessageParser(s *PgsqlStream) (bool, bool) { debugf("pgsqlMessageParser, off=%v", s.parseOffset) var ok, complete bool switch s.parseState { case PgsqlStartState: ok, complete = pgsql.parseMessageStart(s) case PgsqlGetDataState: ok, complete = pgsql.parseMessageData(s) default: logp.Critical("Pgsql invalid parser state") } detailedf("pgsqlMessageParser return: ok=%v, complete=%v, off=%v", ok, complete, s.parseOffset) return ok, complete }
func (c *cmp) PublishEvent( s outputs.Signaler, opts outputs.Options, event common.MapStr, ) error { var err error if err = postToCmp(c, event); err != nil { goto fail } outputs.SignalCompleted(s) return nil fail: if opts.Guaranteed { logp.Critical("Unable to publish events to cmp: %v", err) } outputs.SignalFailed(s, err) return err }
// Initiates and runs a new beat object func Run(name string, version string, bt Beater) *Beat { b := NewBeat(name, version, bt) // Additional command line args are used to overwrite config options b.CommandLineSetup() // Loads base config b.LoadConfig() // Configures beat err := bt.Config(b) if err != nil { logp.Critical("Config error: %v", err) os.Exit(1) } // Run beat. This calls first beater.Setup, // then beater.Run and beater.Cleanup in the end b.Run() return b }
// Setup packetbeat func (pb *Packetbeat) Setup(b *beat.Beat) error { cfg := &pb.PbConfig.Packetbeat if err := procs.ProcWatcher.Init(cfg.Procs); err != nil { logp.Critical(err.Error()) return err } queueSize := defaultQueueSize if b.Config.Shipper.QueueSize != nil { queueSize = *b.Config.Shipper.QueueSize } bulkQueueSize := defaultBulkQueueSize if b.Config.Shipper.BulkQueueSize != nil { bulkQueueSize = *b.Config.Shipper.BulkQueueSize } pb.Pub = publish.NewPublisher(b.Publisher, queueSize, bulkQueueSize) pb.Pub.Start() logp.Debug("main", "Initializing protocol plugins") err := protos.Protos.Init(false, pb.Pub, cfg.Protocols) if err != nil { return fmt.Errorf("Initializing protocol analyzers failed: %v", err) } logp.Debug("main", "Initializing sniffer") if err := pb.setupSniffer(); err != nil { return fmt.Errorf("Initializing sniffer failed: %v", err) } // This needs to be after the sniffer Init but before the sniffer Run. if err := droppriv.DropPrivileges(cfg.RunOptions); err != nil { return err } return nil }
// init packetbeat components func (pb *Packetbeat) init(b *beat.Beat) error { cfg := &pb.Config err := procs.ProcWatcher.Init(cfg.Procs) if err != nil { logp.Critical(err.Error()) return err } queueSize := defaultQueueSize if b.Config.Shipper.QueueSize != nil { queueSize = *b.Config.Shipper.QueueSize } bulkQueueSize := defaultBulkQueueSize if b.Config.Shipper.BulkQueueSize != nil { bulkQueueSize = *b.Config.Shipper.BulkQueueSize } pb.Pub, err = publish.NewPublisher(b.Publisher, queueSize, bulkQueueSize) if err != nil { return fmt.Errorf("Initializing publisher failed: %v", err) } logp.Debug("main", "Initializing protocol plugins") err = protos.Protos.Init(false, pb.Pub, cfg.Protocols) if err != nil { return fmt.Errorf("Initializing protocol analyzers failed: %v", err) } logp.Debug("main", "Initializing sniffer") err = pb.setupSniffer() if err != nil { return fmt.Errorf("Initializing sniffer failed: %v", err) } return nil }