func (publisher *PublisherType) publishEvents(events []common.MapStr) { var ignore []int // indices of events to be removed from events for i, event := range events { // validate some required field if err := filterEvent(event); err != nil { logp.Err("Publishing event failed: %v", err) ignore = append(ignore, i) continue } // update address and geo-ip information. Ignore event // if address is invalid or event is found to be a duplicate ok := updateEventAddresses(publisher, event) if !ok { ignore = append(ignore, i) continue } // add additional meta data event["shipper"] = publisher.name if len(publisher.tags) > 0 { event["tags"] = publisher.tags } if logp.IsDebug("publish") { PrintPublishEvent(event) } } // return if no event is left if len(ignore) == len(events) { return } // remove invalid events. // TODO: is order important? Removal can be turned into O(len(ignore)) by // copying last element into idx and doing // events=events[:len(events)-len(ignore)] afterwards // Alternatively filtering could be implemented like: // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating for i := len(ignore) - 1; i >= 0; i-- { idx := ignore[i] events = append(events[:idx], events[idx+1:]...) } // get timestamp of first event for outputer ts := events[0]["timestamp"].(common.Time) // add transaction if !publisher.disabled { for _, out := range publisher.Output { err := out.BulkPublish(time.Time(ts), events) if err != nil { logp.Err("Fail to publish event type on output %s: %v", out, err) } } } }
func (p *preprocessor) onMessage(m message) { publisher := p.pub single := false events := m.events if m.event != nil { single = true events = []common.MapStr{m.event} } var ignore []int // indices of events to be removed from events debug("preprocessor") for i, event := range events { // validate some required field if err := filterEvent(event); err != nil { logp.Err("Publishing event failed: %v", err) ignore = append(ignore, i) continue } // update address and geo-ip information. Ignore event // if address is invalid or event is found to be a duplicate ok := updateEventAddresses(publisher, event) if !ok { ignore = append(ignore, i) continue } // add additional Beat meta data event["beat"] = common.MapStr{ "name": publisher.name, "hostname": publisher.hostname, "version": publisher.version, } if len(publisher.tags) > 0 { event["tags"] = publisher.tags } if logp.IsDebug("publish") { PrintPublishEvent(event) } } // return if no event is left if len(ignore) == len(events) { debug("no event left, complete send") outputs.SignalCompleted(m.context.signal) return } // remove invalid events. // TODO: is order important? Removal can be turned into O(len(ignore)) by // copying last element into idx and doing // events=events[:len(events)-len(ignore)] afterwards // Alternatively filtering could be implemented like: // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating for i := len(ignore) - 1; i >= 0; i-- { idx := ignore[i] debug("remove event[%v]", idx) events = append(events[:idx], events[idx+1:]...) } if publisher.disabled { debug("publisher disabled") outputs.SignalCompleted(m.context.signal) return } debug("preprocessor forward") if single { p.handler.onMessage(message{context: m.context, event: events[0]}) } else { p.handler.onMessage(message{context: m.context, events: events}) } }
func (publisher *PublisherType) publishEvent(event common.MapStr) error { // the timestamp is mandatory ts, ok := event["timestamp"].(common.Time) if !ok { return errors.New("Missing 'timestamp' field from event.") } // the count is mandatory err := event.EnsureCountField() if err != nil { return err } // the type is mandatory _, ok = event["type"].(string) if !ok { return errors.New("Missing 'type' field from event.") } var src_server, dst_server string src, ok := event["src"].(*common.Endpoint) if ok { src_server = publisher.GetServerName(src.Ip) event["client_ip"] = src.Ip event["client_port"] = src.Port event["client_proc"] = src.Proc event["client_server"] = src_server delete(event, "src") } dst, ok := event["dst"].(*common.Endpoint) if ok { dst_server = publisher.GetServerName(dst.Ip) event["ip"] = dst.Ip event["port"] = dst.Port event["proc"] = dst.Proc event["server"] = dst_server delete(event, "dst") } if publisher.IgnoreOutgoing && dst_server != "" && dst_server != publisher.name { // duplicated transaction -> ignore it logp.Debug("publish", "Ignore duplicated transaction on %s: %s -> %s", publisher.name, src_server, dst_server) return nil } event["shipper"] = publisher.name if len(publisher.tags) > 0 { event["tags"] = publisher.tags } if publisher.GeoLite != nil { real_ip, exists := event["real_ip"] if exists && len(real_ip.(string)) > 0 { loc := publisher.GeoLite.GetLocationByIP(real_ip.(string)) if loc != nil && loc.Latitude != 0 && loc.Longitude != 0 { event["client_location"] = fmt.Sprintf("%f, %f", loc.Latitude, loc.Longitude) } } else { if len(src_server) == 0 && src != nil { // only for external IP addresses loc := publisher.GeoLite.GetLocationByIP(src.Ip) if loc != nil && loc.Latitude != 0 && loc.Longitude != 0 { event["client_location"] = fmt.Sprintf("%f, %f", loc.Latitude, loc.Longitude) } } } } if logp.IsDebug("publish") { PrintPublishEvent(event) } // add transaction has_error := false if !publisher.disabled { for i := 0; i < len(publisher.Output); i++ { err := publisher.Output[i].PublishEvent(time.Time(ts), event) if err != nil { logp.Err("Fail to publish event type on output %s: %v", publisher.Output[i], err) has_error = true } } } if has_error { return errors.New("Fail to publish event") } return nil }
func main() { // Use our own FlagSet, because some libraries pollute the global one var cmdLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError) // configfile := cmdLine.String("c", "packetbeat.dev.yml", "Configuration file") configfile := cmdLine.String("c", "/etc/packetbeat/packetbeat.yml", "Configuration file") file := cmdLine.String("I", "", "file") loop := cmdLine.Int("l", 1, "Loop file. 0 - loop forever") debugSelectorsStr := cmdLine.String("d", "", "Enable certain debug selectors") oneAtAtime := cmdLine.Bool("O", false, "Read packets one at a time (press Enter)") toStderr := cmdLine.Bool("e", false, "Output to stdout instead of syslog") topSpeed := cmdLine.Bool("t", false, "Read packets as fast as possible, without sleeping") publishDisabled := cmdLine.Bool("N", false, "Disable actual publishing for testing") verbose := cmdLine.Bool("v", false, "Log at INFO level") printVersion := cmdLine.Bool("version", false, "Print version and exit") memprofile := cmdLine.String("memprofile", "", "Write memory profile to this file") cpuprofile := cmdLine.String("cpuprofile", "", "Write cpu profile to file") dumpfile := cmdLine.String("dump", "", "Write all captured packets to this libpcap file.") testConfig := cmdLine.Bool("test", false, "Test configuration and exit.") cmdLine.Parse(os.Args[1:]) sniff := new(sniffer.SnifferSetup) if *printVersion { fmt.Printf("Packetbeat version %s (%s)\n", Version, runtime.GOARCH) return } logLevel := logp.LOG_ERR if *verbose { logLevel = logp.LOG_INFO } debugSelectors := []string{} if len(*debugSelectorsStr) > 0 { debugSelectors = strings.Split(*debugSelectorsStr, ",") logLevel = logp.LOG_DEBUG } var err error filecontent, err := ioutil.ReadFile(*configfile) if err != nil { fmt.Printf("Fail to read %s: %s. Exiting.\n", *configfile, err) return } if err = yaml.Unmarshal(filecontent, &config.ConfigSingleton); err != nil { fmt.Printf("YAML config parsing failed on %s: %s. Exiting.\n", *configfile, err) return } if len(debugSelectors) == 0 { debugSelectors = config.ConfigSingleton.Logging.Selectors } logp.LogInit(logp.Priority(logLevel), "", !*toStderr, true, debugSelectors) if !logp.IsDebug("stdlog") { // disable standard logging by default log.SetOutput(ioutil.Discard) } // CLI flags over-riding config if *topSpeed { config.ConfigSingleton.Interfaces.TopSpeed = true } if len(*file) > 0 { config.ConfigSingleton.Interfaces.File = *file } config.ConfigSingleton.Interfaces.Loop = *loop config.ConfigSingleton.Interfaces.OneAtATime = *oneAtAtime if len(*dumpfile) > 0 { config.ConfigSingleton.Interfaces.Dumpfile = *dumpfile } logp.Debug("main", "Configuration %s", config.ConfigSingleton) logp.Debug("main", "Initializing output plugins") if err = publisher.Publisher.Init(*publishDisabled, config.ConfigSingleton.Output, config.ConfigSingleton.Shipper); err != nil { logp.Critical(err.Error()) os.Exit(1) } if err = procs.ProcWatcher.Init(config.ConfigSingleton.Procs); err != nil { logp.Critical(err.Error()) os.Exit(1) } logp.Debug("main", "Initializing protocol plugins") for proto, plugin := range EnabledProtocolPlugins { logp.Debug("protos", proto.String()) err = plugin.Init(false, publisher.Publisher.Queue) if err != nil { logp.Critical("Initializing plugin %s failed: %v", proto, err) os.Exit(1) } protos.Protos.Register(proto, plugin) } if err = tcp.TcpInit(); err != nil { logp.Critical(err.Error()) os.Exit(1) } over := make(chan bool) logp.Debug("main", "Initializing filters plugins") for filter, plugin := range EnabledFilterPlugins { filters.Filters.Register(filter, plugin) } filters_plugins, err := LoadConfiguredFilters(config.ConfigSingleton.Filter) if err != nil { logp.Critical("Error loading filters plugins: %v", err) os.Exit(1) } logp.Debug("main", "Filters plugins order: %v", filters_plugins) var afterInputsQueue chan common.MapStr if len(filters_plugins) > 0 { runner := NewFilterRunner(publisher.Publisher.Queue, filters_plugins) go func() { err := runner.Run() if err != nil { logp.Critical("Filters runner failed: %v", err) // shutting doen sniff.Stop() } }() afterInputsQueue = runner.FiltersQueue } else { // short-circuit the runner afterInputsQueue = publisher.Publisher.Queue } logp.Debug("main", "Initializing sniffer") err = sniff.Init(false, afterInputsQueue) if err != nil { logp.Critical("Initializing sniffer failed: %v", err) os.Exit(1) } // This needs to be after the sniffer Init but before the sniffer Run. if err = droppriv.DropPrivileges(config.ConfigSingleton.RunOptions); err != nil { logp.Critical(err.Error()) os.Exit(1) } // Up to here was the initialization, now about running if *testConfig { // all good, exit with 0 os.Exit(0) } if *cpuprofile != "" { f, err := os.Create(*cpuprofile) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } // run the sniffer in background go func() { err := sniff.Run() if err != nil { logp.Critical("Sniffer main loop failed: %v", err) os.Exit(1) } over <- true }() // On ^C or SIGTERM, gracefully stop the sniffer sigc := make(chan os.Signal, 1) signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM) go func() { <-sigc logp.Debug("signal", "Received sigterm/sigint, stopping") sniff.Stop() }() if !*toStderr { logp.Info("Startup successful, sending output only to syslog from now on") logp.SetToStderr(false) } logp.Debug("main", "Waiting for the sniffer to finish") // Wait for the goroutines to finish for _ = range over { if !sniff.IsAlive() { break } } logp.Debug("main", "Cleanup") if *memprofile != "" { // wait for all TCP streams to expire time.Sleep(tcp.TCP_STREAM_EXPIRY * 1.2) tcp.PrintTcpMap() runtime.GC() writeHeapProfile(*memprofile) debugMemStats() } }