// Run allows the beater to be run as a beat. func (fb *Filebeat) Run(b *beat.Beat) error { var err error config := fb.config // Setup registrar to persist state registrar, err := registrar.New(config.RegistryFile) if err != nil { logp.Err("Could not init registrar: %v", err) return err } // Channel from harvesters to spooler publisherChan := make(chan []*input.FileEvent, 1) // Publishes event to output publisher := publish.New(config.PublishAsync, publisherChan, registrar.Channel, b.Publisher) // Init and Start spooler: Harvesters dump events into the spooler. spooler, err := spooler.New(config, publisherChan) if err != nil { logp.Err("Could not init spooler: %v", err) return err } crawler, err := crawler.New(spooler, config.Prospectors) if err != nil { logp.Err("Could not init crawler: %v", err) return err } // The order of starting and stopping is important. Stopping is inverted to the starting order. // The current order is: registrar, publisher, spooler, crawler // That means, crawler is stopped first. // Start the registrar err = registrar.Start() if err != nil { logp.Err("Could not start registrar: %v", err) } // Stopping registrar will write last state defer registrar.Stop() // Start publisher publisher.Start() // Stopping publisher (might potentially drop items) defer publisher.Stop() // Starting spooler spooler.Start() // Stopping spooler will flush items defer spooler.Stop() err = crawler.Start(registrar.GetStates()) if err != nil { return err } // Stop crawler -> stop prospectors -> stop harvesters defer crawler.Stop() // Blocks progressing. As soon as channel is closed, all defer statements come into play <-fb.done return nil }
// Run allows the beater to be run as a beat. func (fb *Filebeat) Run(b *beat.Beat) error { var err error config := fb.config waitFinished := newSignalWait() waitEvents := newSignalWait() // count active events for waiting on shutdown wgEvents := &sync.WaitGroup{} finishedLogger := newFinishedLogger(wgEvents) // Setup registrar to persist state registrar, err := registrar.New(config.RegistryFile, finishedLogger) if err != nil { logp.Err("Could not init registrar: %v", err) return err } // Make sure all events that were published in registrarChannel := newRegistrarLogger(registrar) // Channel from spooler to harvester publisherChan := newPublisherChannel() // Publishes event to output publisher := publisher.New(config.PublishAsync, publisherChan.ch, registrarChannel, b.Publisher) // Init and Start spooler: Harvesters dump events into the spooler. spooler, err := spooler.New(config, publisherChan) if err != nil { logp.Err("Could not init spooler: %v", err) return err } crawler, err := crawler.New(newSpoolerOutlet(fb.done, spooler, wgEvents), config.Prospectors) if err != nil { logp.Err("Could not init crawler: %v", err) return err } // The order of starting and stopping is important. Stopping is inverted to the starting order. // The current order is: registrar, publisher, spooler, crawler // That means, crawler is stopped first. // Start the registrar err = registrar.Start() if err != nil { return fmt.Errorf("Could not start registrar: %v", err) } // Stopping registrar will write last state defer registrar.Stop() // Start publisher publisher.Start() // Stopping publisher (might potentially drop items) defer func() { // Closes first the registrar logger to make sure not more events arrive at the registrar // registrarChannel must be closed first to potentially unblock (pretty unlikely) the publisher registrarChannel.Close() publisher.Stop() }() // Starting spooler spooler.Start() // Stopping spooler will flush items defer func() { // Wait for all events to be processed or timeout waitEvents.Wait() // Closes publisher so no further events can be sent publisherChan.Close() // Stopping spooler spooler.Stop() }() err = crawler.Start(registrar.GetStates(), *once) if err != nil { return err } // If run once, add crawler completion check as alternative to done signal if *once { runOnce := func() { logp.Info("Running filebeat once. Waiting for completion ...") crawler.WaitForCompletion() logp.Info("All data collection completed. Shutting down.") } waitFinished.Add(runOnce) } // Add done channel to wait for shutdown signal waitFinished.AddChan(fb.done) waitFinished.Wait() // Stop crawler -> stop prospectors -> stop harvesters // Note: waiting for crawlers to stop here in order to install wgEvents.Wait // after all events have been enqueued for publishing. Otherwise wgEvents.Wait // or publisher might panic due to concurrent updates. crawler.Stop() timeout := fb.config.ShutdownTimeout // Checks if on shutdown it should wait for all events to be published waitPublished := fb.config.ShutdownTimeout > 0 || *once if waitPublished { // Wait for registrar to finish writing registry waitEvents.Add(withLog(wgEvents.Wait, "Continue shutdown: All enqueued events being published.")) // Wait for either timeout or all events having been ACKed by outputs. if fb.config.ShutdownTimeout > 0 { logp.Info("Shutdown output timer started. Waiting for max %v.", timeout) waitEvents.Add(withLog(waitDuration(timeout), "Continue shutdown: Time out waiting for events being published.")) } else { waitEvents.AddChan(fb.done) } } return nil }
// Run allows the beater to be run as a beat. func (fb *Filebeat) Run(b *beat.Beat) error { var err error config := fb.config var wgEvents *sync.WaitGroup // count active events for waiting on shutdown var finishedLogger publisher.SuccessLogger if fb.config.ShutdownTimeout > 0 { wgEvents = &sync.WaitGroup{} finishedLogger = newFinishedLogger(wgEvents) } // Setup registrar to persist state registrar, err := registrar.New(config.RegistryFile, finishedLogger) if err != nil { logp.Err("Could not init registrar: %v", err) return err } // Channel from harvesters to spooler successLogger := newRegistrarLogger(registrar) publisherChan := newPublisherChannel() // Publishes event to output publisher := publisher.New(config.PublishAsync, publisherChan.ch, successLogger, b.Publisher) // Init and Start spooler: Harvesters dump events into the spooler. spooler, err := spooler.New(config, publisherChan) if err != nil { logp.Err("Could not init spooler: %v", err) return err } crawler, err := crawler.New( newSpoolerOutlet(fb.done, spooler, wgEvents), config.Prospectors) if err != nil { logp.Err("Could not init crawler: %v", err) return err } // The order of starting and stopping is important. Stopping is inverted to the starting order. // The current order is: registrar, publisher, spooler, crawler // That means, crawler is stopped first. // Start the registrar err = registrar.Start() if err != nil { logp.Err("Could not start registrar: %v", err) } // Stopping registrar will write last state defer registrar.Stop() // Start publisher publisher.Start() // Stopping publisher (might potentially drop items) defer publisher.Stop() defer successLogger.Close() // Starting spooler spooler.Start() // Stopping spooler will flush items defer func() { // With harvesters being stopped, optionally wait for all enqueued events being // published and written by registrar before continuing shutdown. fb.sigWait.Wait() // continue shutdown publisherChan.Close() spooler.Stop() }() err = crawler.Start(registrar.GetStates()) if err != nil { return err } // Blocks progressing. As soon as channel is closed, all defer statements come into play <-fb.done // Stop crawler -> stop prospectors -> stop harvesters // Note: waiting for crawlers to stop here in order to install wgEvents.Wait // after all events have been enqueued for publishing. Otherwise wgEvents.Wait // or publisher might panic due to concurrent updates. crawler.Stop() timeout := fb.config.ShutdownTimeout if timeout > 0 { logp.Info("Shutdown output timer started. Waiting for max %v.", timeout) // Wait for either timeout or all events having been ACKed by outputs. fb.sigWait.Add(withLog(wgEvents.Wait, "Continue shutdown: All enqueued events being published.")) fb.sigWait.Add(withLog(waitDuration(timeout), "Continue shutdown: Time out waiting for events being published.")) } return nil }