// PublishEvent forwards a single event. On failure PublishEvent tries to reconnect. func (f *FailOverConnectionMode) PublishEvent( signaler outputs.Signaler, event common.MapStr, ) error { fails := 0 var err error for !f.closed && (f.maxAttempts == 0 || fails < f.maxAttempts) { if err := f.connect(f.active); err != nil { logp.Info("Connecting error publishing events (retrying): %s", err) fails++ time.Sleep(f.waitRetry) continue } if err := f.conns[f.active].PublishEvent(event); err != nil { logp.Info("Error publishing events (retrying): %s", err) fails++ continue } outputs.SignalCompleted(signaler) return nil } outputs.SignalFailed(signaler, err) return nil }
func (restart *ProspectorResume) Scan(files []cfg.FileConfig, persist map[string]*FileState, eventChan chan *FileEvent) { pendingProspectorCnt := 0 // Prospect the globs/paths given on the command line and launch harvesters for _, fileconfig := range files { prospector := &Prospector{FileConfig: fileconfig} go prospector.Prospect(restart, eventChan) pendingProspectorCnt++ } // Now determine which states we need to persist by pulling the events from the prospectors // When we hit a nil source a prospector had finished so we decrease the expected events logp.Info("filebeat", "Waiting for %d prospectors to initialise", pendingProspectorCnt) for event := range restart.Persist { if event.Source == nil { pendingProspectorCnt-- if pendingProspectorCnt == 0 { break } continue } persist[*event.Source] = event logp.Info("filebeat", "Registrar will re-save state for %s", *event.Source) } logp.Info("filebeat", "All prospectors initialised with %d states to persist", len(persist)) }
func (r *Registrar) Run() { logp.Info("Starting Registrar") r.running = true // Writes registry on shutdown defer r.writeRegistry() for { select { case <-r.done: logp.Info("Ending Registrar") return // Treats new log files to persist with higher priority then new events case state := <-r.Persist: r.State[*state.Source] = state logp.Debug("prospector", "Registrar will re-save state for %s", *state.Source) case events := <-r.Channel: r.processEvents(events) } if e := r.writeRegistry(); e != nil { // REVU: but we should panic, or something, right? logp.Err("Writing of registry returned error: %v. Continuing..", e) } } }
func (r *Registrar) fetchState(filePath string, fileInfo os.FileInfo) (int64, bool) { // Check if there is a state for this file lastState, isFound := r.GetFileState(filePath) if isFound && input.IsSameFile(filePath, fileInfo) { logp.Debug("registar", "Same file as before found. Fetch the state and persist it.") // We're resuming - throw the last state back downstream so we resave it // And return the offset - also force harvest in case the file is old and we're about to skip it r.Persist <- lastState return lastState.Offset, true } if previous, err := r.getPreviousFile(filePath, fileInfo); err == nil { // File has rotated between shutdown and startup // We return last state downstream, with a modified event source with the new file name // And return the offset - also force harvest in case the file is old and we're about to skip it logp.Info("Detected rename of a previously harvested file: %s -> %s", previous, filePath) lastState, _ := r.GetFileState(previous) lastState.Source = &filePath r.Persist <- lastState return lastState.Offset, true } if isFound { logp.Info("Not resuming rotated file: %s", filePath) } // New file so just start from an automatic position return 0, false }
// PublishEvent forwards a single event. On failure PublishEvent tries to reconnect. func (s *SingleConnectionMode) PublishEvent( signaler outputs.Signaler, event common.MapStr, ) error { fails := 0 var err error for !s.closed && (s.maxAttempts == 0 || fails < s.maxAttempts) { if err = s.connect(); err != nil { logp.Info("Connecting error publishing event (retrying): %s", err) fails++ time.Sleep(s.waitRetry) continue } if err := s.conn.PublishEvent(event); err != nil { logp.Info("Error publishing event (retrying): %s", err) fails++ continue } outputs.SignalCompleted(signaler) return nil } outputs.SignalFailed(signaler, err) return nil }
func (d *Dockerbeat) exportContainerStats(container docker.APIContainers) error { statsC := make(chan *docker.Stats) done := make(chan bool) errC := make(chan error, 1) statsOptions := docker.StatsOptions{container.ID, statsC, false, done, -1} go func() { logp.Info("1") errC <- d.dockerClient.Stats(statsOptions) logp.Info("2") close(errC) }() go func() { stats := <-statsC events := []common.MapStr{ d.getContainerEvent(&container, stats), d.getCpuEvent(&container, stats), d.getMemoryEvent(&container, stats), d.getNetworkEvent(&container, stats), } d.events.PublishEvents(events) }() return nil }
func (d *Dockerbeat) Run(b *beat.Beat) error { d.isAlive = true var err error for d.isAlive { containers, err := d.dockerClient.ListContainers(docker.ListContainersOptions{}) if err == nil { for _, container := range containers { logp.Info("3") d.exportContainerStats(container) logp.Info("4") } logp.Info("running! %s", d.socket) } else { logp.Err("Cannot get container list: %d", err) } time.Sleep(d.period) } return err }
func (p *Prospector) calculateResume(file string, fileinfo os.FileInfo, resume *ProspectorResume) (int64, bool) { last_state, is_found := resume.Files[file] if is_found && IsSameFile(file, fileinfo, last_state) { // We're resuming - throw the last state back downstream so we resave it // And return the offset - also force harvest in case the file is old and we're about to skip it resume.Persist <- last_state return last_state.Offset, true } if previous := p.isFileRenamedResumelist(file, fileinfo, resume.Files); previous != "" { // File has rotated between shutdown and startup // We return last state downstream, with a modified event source with the new file name // And return the offset - also force harvest in case the file is old and we're about to skip it logp.Info("prospector", "Detected rename of a previously harvested file: %s -> %s", previous, file) last_state := resume.Files[previous] last_state.Source = &file resume.Persist <- last_state return last_state.Offset, true } if is_found { logp.Info("prospector", "Not resuming rotated file: %s", file) } // New file so just start from an automatic position return 0, false }
// bulkCollectPublishFails checks per item errors returning all events // to be tried again due to error code returned for that items. If indexing an // event failed due to some error in the event itself (e.g. does not respect mapping), // the event will be dropped. func bulkCollectPublishFails( res *BulkResult, events []common.MapStr, ) []common.MapStr { failed := events[:0] for i, rawItem := range res.Items { status, msg, err := itemStatus(rawItem) if err != nil { logp.Info("Failed to parse bulk reponse for item (%i): %v", i, err) // add index if response parse error as we can not determine success/fail failed = append(failed, events[i]) continue } if status < 300 { continue // ok value } if status < 500 && status != 429 { // hard failure, don't collect logp.Warn("Can not index event (status=%v): %v", status, msg) continue } debug("Failed to insert data(%v): %v", i, events[i]) logp.Info("Bulk item insert failed (i=%v, status=%v): %v", i, status, msg) failed = append(failed, events[i]) } return failed }
func (proc *ProcessesWatcher) Init(config ProcsConfig) error { proc.proc_prefix = "" proc.PortProcMap = make(map[uint16]PortProcMapping) proc.LastMapUpdate = time.Now() proc.ReadFromProc = config.Enabled if proc.ReadFromProc { if runtime.GOOS != "linux" { proc.ReadFromProc = false logp.Info("Disabled /proc/ reading because not on linux") } else { logp.Info("Process matching enabled") } } if config.Max_proc_read_freq == 0 { proc.MaxReadFreq = 10 * time.Millisecond } else { proc.MaxReadFreq = time.Duration(config.Max_proc_read_freq) * time.Millisecond } if config.Refresh_pids_freq == 0 { proc.RefreshPidsFreq = 1 * time.Second } else { proc.RefreshPidsFreq = time.Duration(config.Refresh_pids_freq) * time.Millisecond } // Read the local IP addresses var err error proc.LocalAddrs, err = common.LocalIpAddrs() if err != nil { logp.Err("Error getting local IP addresses: %s", err) proc.LocalAddrs = []net.IP{} } if proc.ReadFromProc { for _, procConfig := range config.Monitored { grepper := procConfig.Cmdline_grep if len(grepper) == 0 { grepper = procConfig.Process } p, err := NewProcess(proc, procConfig.Process, grepper, time.Tick(proc.RefreshPidsFreq)) if err != nil { logp.Err("NewProcess: %s", err) } else { proc.Processes = append(proc.Processes, p) } } } return nil }
// Initialize Elasticsearch as output func (out *ElasticsearchOutput) Init(config outputs.MothershipConfig, topology_expire int) error { if len(config.Protocol) == 0 { config.Protocol = "http" } url := fmt.Sprintf("%s://%s:%d%s", config.Protocol, config.Host, config.Port, config.Path) con := NewElasticsearch(url, config.Username, config.Password) out.Conn = con if config.Index != "" { out.Index = config.Index } else { out.Index = "packetbeat" } out.TopologyExpire = 15000 if topology_expire != 0 { out.TopologyExpire = topology_expire /*sec*/ * 1000 // millisec } out.FlushInterval = 1000 * time.Millisecond if config.Flush_interval != nil { out.FlushInterval = time.Duration(*config.Flush_interval) * time.Millisecond } out.BulkMaxSize = 10000 if config.Bulk_size != nil { out.BulkMaxSize = *config.Bulk_size } err := out.EnableTTL() if err != nil { logp.Err("Fail to set _ttl mapping: %s", err) return err } out.sendingQueue = make(chan BulkMsg, 1000) go out.SendMessagesGoroutine() logp.Info("[ElasticsearchOutput] Using Elasticsearch %s", url) logp.Info("[ElasticsearchOutput] Using index pattern [%s-]YYYY.MM.DD", out.Index) logp.Info("[ElasticsearchOutput] Topology expires after %ds", out.TopologyExpire/1000) if out.FlushInterval > 0 { logp.Info("[ElasticsearchOutput] Insert events in batches. Flush interval is %s. Bulk size is %d.", out.FlushInterval, out.BulkMaxSize) } else { logp.Info("[ElasticsearchOutput] Insert events one by one. This might affect the performance of the shipper.") } return nil }
// initOffset finds the current offset of the file and sets it in the harvester as position func (h *Harvester) initOffset() { // get current offset in file offset, _ := h.file.Seek(0, os.SEEK_CUR) if h.Offset > 0 { logp.Info("harvester", "harvest: %q position:%d (offset snapshot:%d)", h.Path, h.Offset, offset) } else if cfg.CmdlineOptions.TailOnRotate { logp.Info("harvester", "harvest: (tailing) %q (offset snapshot:%d)", h.Path, offset) } else { logp.Info("harvester", "harvest: %q (offset snapshot:%d)", h.Path, offset) } h.Offset = offset }
// publish is used to publish events using the configured protocol client. // It provides general error handling and back off support used on failed // send attempts. To be used by PublishEvent and PublishEvents. // The send callback will try to progress sending traffic and returns kind of // progress made in ok or resetFail. If ok is set to true, send finished // processing events. If ok is false but resetFail is set, send was partially // successful. If send was partially successful, the fail counter is reset thus up // to maxAttempts send attempts without any progress might be executed. func (s *SingleConnectionMode) publish( signaler outputs.Signaler, send func() (ok bool, resetFail bool), ) error { fails := 0 var backoffCount uint var err error for !s.closed && (s.maxAttempts == 0 || fails < s.maxAttempts) { ok := false resetFail := false if err := s.connect(); err != nil { logp.Info("Connecting error publishing events (retrying): %s", err) goto sendFail } ok, resetFail = send() if !ok { goto sendFail } outputs.SignalCompleted(signaler) return nil sendFail: fails++ if resetFail { fails = 0 } if s.maxAttempts > 0 && fails == s.maxAttempts { // max number of attempts reached break } logp.Info("send fail") backoff := time.Duration(int64(s.waitRetry) * (1 << backoffCount)) if backoff > s.maxWaitRetry { backoff = s.maxWaitRetry } else { backoffCount++ } logp.Info("backoff retry: %v", backoff) time.Sleep(backoff) } outputs.SignalFailed(signaler, err) return nil }
// SafeFileRotate safely rotates an existing file under path and replaces it with the tempfile func SafeFileRotate(path, tempfile string) error { old := path + ".old" var e error if e = os.Rename(path, old); e != nil { logp.Info("rotate: rename of %s to %s - %s", path, old, e) return e } if e = os.Rename(tempfile, path); e != nil { logp.Info("rotate: rename of %s to %s - %s", tempfile, path, e) return e } return nil }
func LoadGeoIPData(config Geoip) *libgeo.GeoIP { geoipPaths := []string{} if config.Paths != nil { geoipPaths = *config.Paths } if len(geoipPaths) == 0 { logp.Info("GeoIP disabled: No paths were set under output.geoip.paths") // disabled return nil } // look for the first existing path var geoipPath string for _, path := range geoipPaths { fi, err := os.Lstat(path) if err != nil { logp.Err("GeoIP path could not be loaded: %s", path) continue } if fi.Mode()&os.ModeSymlink == os.ModeSymlink { // follow symlink geoipPath, err = filepath.EvalSymlinks(path) if err != nil { logp.Warn("Could not load GeoIP data: %s", err.Error()) return nil } } else { geoipPath = path } break } if len(geoipPath) == 0 { logp.Warn("Couldn't load GeoIP database") return nil } geoLite, err := libgeo.Load(geoipPath) if err != nil { logp.Warn("Could not load GeoIP data: %s", err.Error()) } logp.Info("Loaded GeoIP data from: %s", geoipPath) return geoLite }
func (r *Registrar) Init() error { // Init state r.Persist = make(chan *FileState) r.State = make(map[string]*FileState) r.Channel = make(chan []*FileEvent, 1) // Set to default in case it is not set if r.registryFile == "" { r.registryFile = cfg.DefaultRegistryFile } // Make sure the directory where we store the registryFile exists absPath, err := filepath.Abs(r.registryFile) if err != nil { return fmt.Errorf("Failed to get the absolute path of %s: %v", r.registryFile, err) } r.registryFile = absPath // Create directory if it does not already exist. registryPath := filepath.Dir(r.registryFile) err = os.MkdirAll(registryPath, 0755) if err != nil { return fmt.Errorf("Failed to created registry file dir %s: %v", registryPath, err) } logp.Info("Registry file set to: %s", r.registryFile) return nil }
// SafeFileRotate safely rotates an existing file under path and replaces it with the tempfile func SafeFileRotate(path, tempfile string) error { if e := os.Rename(tempfile, path); e != nil { logp.Info("registry rotate: rename of %s to %s - %s", tempfile, path, e) return e } return nil }
func (h *Harvester) open() *os.File { // Special handling that "-" means to read from standard input if h.Path == "-" { h.file = os.Stdin return h.file } for { var err error h.file, err = os.Open(h.Path) if err != nil { // retry on failure. logp.Info("harvester", "Failed opening %s: %s", h.Path, err) time.Sleep(5 * time.Second) } else { break } } file := &File{ File: h.file, } // Check we are not following a rabbit hole (symlinks, etc.) if !file.IsRegularFile() { panic(fmt.Errorf("Harvester file error")) } h.setFileOffset() return h.file }
// Starts scanning through all the file paths and fetch the related files. Start a harvester for each file func (p *Prospector) Run(spoolChan chan *input.FileEvent) { p.running = true logp.Info("Starting prospector of type: %v", p.ProspectorConfig.Harvester.InputType) switch p.ProspectorConfig.Harvester.InputType { case cfg.StdinInputType: p.stdinRun(spoolChan) return case cfg.LogInputType: p.logRun(spoolChan) return } logp.Info("Invalid prospector type: %v") }
func (eb *Winlogbeat) Setup(b *beat.Beat) error { eb.beat = b eb.client = b.Events eb.done = make(chan struct{}) var err error eb.checkpoint, err = checkpoint.NewCheckpoint(eb.config.Winlogbeat.RegistryFile, 10, 5*time.Second) if err != nil { return err } if eb.config.Winlogbeat.Metrics.BindAddress != "" { bindAddress := eb.config.Winlogbeat.Metrics.BindAddress sock, err := net.Listen("tcp", bindAddress) if err != nil { return err } go func() { logp.Info("Metrics hosted at http://%s/debug/vars", bindAddress) err := http.Serve(sock, nil) if err != nil { logp.Warn("Unable to launch HTTP service for metrics. %v", err) return } }() } return nil }
// LoadConfig inits the config file and reads the default config information // into Beat.Config. It exists the processes in case of errors. func (b *Beat) LoadConfig() { err := cfgfile.Read(&b.Config, "") if err != nil { // logging not yet initialized, so using fmt.Printf fmt.Printf("Loading config file error: %v\n", err) os.Exit(1) } err = logp.Init(b.Name, &b.Config.Logging) if err != nil { fmt.Printf("Error initializing logging: %v\n", err) os.Exit(1) } // Disable stderr logging if requested by cmdline flag logp.SetStderr() logp.Debug("beat", "Initializing output plugins") if err := publisher.Publisher.Init(b.Name, b.Config.Output, b.Config.Shipper); err != nil { fmt.Printf("Error Initialising publisher: %v\n", err) logp.Critical(err.Error()) os.Exit(1) } b.Events = publisher.Publisher.Client() logp.Info("Init Beat: %s; Version: %s", b.Name, b.Version) }
func (el *eventLog) Open(recordNumber uint32) error { // If uncServerPath is nil the local computer is used. var uncServerPath *uint16 var err error if el.uncServerPath != "" { uncServerPath, err = syscall.UTF16PtrFromString(el.uncServerPath) if err != nil { return err } } providerName, err := syscall.UTF16PtrFromString(el.name) if err != nil { return err } detailf("%s Open(recordNumber=%d) calling openEventLog(uncServerPath=%s, providerName=%s)", el.logPrefix, recordNumber, el.uncServerPath, el.name) handle, err := openEventLog(uncServerPath, providerName) if err != nil { return err } numRecords, err := getNumberOfEventLogRecords(handle) if err != nil { return err } var oldestRecord, newestRecord uint32 if numRecords > 0 { el.recordNumber = recordNumber el.ignoreFirst = true oldestRecord, err = getOldestEventLogRecord(handle) if err != nil { return err } newestRecord = oldestRecord + numRecords - 1 if recordNumber < oldestRecord || recordNumber > newestRecord { el.recordNumber = oldestRecord el.ignoreFirst = false } } else { el.recordNumber = 0 el.seek = false el.ignoreFirst = false } logp.Info("%s contains %d records. Record number range [%d, %d]. Starting "+ "at %d (ignoringFirst=%t)", el.logPrefix, numRecords, oldestRecord, newestRecord, el.recordNumber, el.ignoreFirst) el.seek = true el.handle = handle el.readBuf = make([]byte, maxEventBufferSize) // TODO: Start with this buffer smaller and grow it when needed. el.formatBuf = make([]byte, maxFormatMessageBufferSize) return nil }
// Fetches and merges all config files given by configDir. All are put into one config object func (config *Config) FetchConfigs() { configDir := config.Filebeat.ConfigDir // If option not set, do nothing if configDir == "" { return } // Check if optional configDir is set to fetch additional config files logp.Info("Additional config files are fetched from: %s", configDir) configFiles, err := getConfigFiles(configDir) if err != nil { log.Fatal("Could not use config_dir of: ", configDir, err) } err = mergeConfigFiles(configFiles, config) if err != nil { log.Fatal("Error merging config files: ", err) } if len(config.Filebeat.Prospectors) == 0 { log.Fatalf("No paths given. What files do you want me to watch?") } }
func (ab *AmqpBeat) Run(b *beat.Beat) error { logp.Info("Running...") serverURI := ab.RbConfig.AmqpInput.ServerURI ab.exposeMetrics() conn, err := amqp.Dial(*serverURI) if err != nil { logp.Err("Failed to connect to RabbitMQ at '%s': %v", *serverURI, err) return err } defer conn.Close() ab.handleDisconnect(conn) ch, err := conn.Channel() if err != nil { logp.Err("Failed to open RabbitMQ channel: %v", err) return err } defer ch.Close() ab.runPipeline(b, ch) return nil }
func (eb *Winlogbeat) Config(b *beat.Beat) error { // Read configuration. err := cfgfile.Read(&eb.config, "") if err != nil { return fmt.Errorf("Error reading configuration file. %v", err) } // Validate configuration. err = eb.config.Winlogbeat.Validate() if err != nil { return fmt.Errorf("Error validating configuration file. %v", err) } debugf("Configuration validated. config=%v", eb.config) // Registry file grooming. if eb.config.Winlogbeat.RegistryFile == "" { eb.config.Winlogbeat.RegistryFile = config.DefaultRegistryFile } eb.config.Winlogbeat.RegistryFile, err = filepath.Abs( eb.config.Winlogbeat.RegistryFile) if err != nil { return fmt.Errorf("Error getting absolute path of registry file %s. %v", eb.config.Winlogbeat.RegistryFile, err) } logp.Info("State will be read from and persisted to %s", eb.config.Winlogbeat.RegistryFile) return nil }
// handleReadlineError handles error which are raised during reading file. // // If error is EOF, it will check for: // * File truncated // * Older then ignore_older // * General file error // // If none of the above cases match, no error will be returned and file is kept open // // In case of a general error, the error itself is returned func (h *Harvester) handleReadlineError(lastTimeRead time.Time, err error) error { if err != io.EOF || !h.file.Continuable() { logp.Err("Unexpected state reading from %s; error: %s", h.Path, err) return err } // Refetch fileinfo to check if the file was truncated or disappeared. // Errors if the file was removed/rotated after reading and before // calling the stat function info, statErr := h.file.Stat() if statErr != nil { logp.Err("Unexpected error reading from %s; error: %s", h.Path, statErr) return statErr } // Handle fails if file was truncated if info.Size() < h.Offset { seeker, ok := h.file.(io.Seeker) if !ok { logp.Err("Can not seek source") return err } logp.Debug("harvester", "File was truncated as offset (%s) > size (%s). Begin reading file from offset 0: %s", h.Offset, info.Size(), h.Path) h.Offset = 0 seeker.Seek(h.Offset, os.SEEK_SET) return nil } age := time.Since(lastTimeRead) if age > h.ProspectorConfig.IgnoreOlderDuration { // If the file hasn't change for longer the ignore_older, harvester stops // and file handle will be closed. return fmt.Errorf("Stop harvesting as file is older then ignore_older: %s; Last change was: %s ", h.Path, age) } if h.Config.ForceCloseFiles { // Check if the file name exists (see #93) _, statErr := os.Stat(h.file.Name()) // Error means file does not exist. If no error, check if same file. If not close as rotated. if statErr != nil || !input.IsSameFile(h.file.Name(), info) { logp.Info("Force close file: %s; error: %s", h.Path, statErr) // Return directly on windows -> file is closing return fmt.Errorf("Force closing file: %s", h.Path) } } if err != io.EOF { logp.Err("Unexpected state reading from %s; error: %s", h.Path, err) } logp.Debug("harvester", "End of file reached: %s; Backoff now.", h.Path) // Do nothing in case it is just EOF, keep reading the file after backing off h.backOff() return nil }
func (m *LoadBalancerMode) onFail(msg eventsMessage, err error) { logp.Info("Error publishing events (retrying): %s", err) if ok := m.forwardEvent(m.retries, msg); !ok { outputs.SignalFailed(msg.signaler, err) } }
// loadState fetches the previous reading state from the configure RegistryFile file // The default file is .filebeat file which is stored in the same path as the binary is running func (r *Registrar) LoadState() { if existing, e := os.Open(r.registryFile); e == nil { defer existing.Close() logp.Info("Loading registrar data from %s", r.registryFile) decoder := json.NewDecoder(existing) decoder.Decode(&r.State) } }
func (eb *Winlogbeat) Stop() { logp.Info("Initiating shutdown, please wait.") // TODO: Remove atomic bool and use a channel to signal shutdown. Caution: // Stop() can be invoked more than once on Windows when you Ctrl+C (one // callback for svc shutdown and one for the Ctrl+C) which might cause a // double golang channel close bug. eb.stop.Set(true) }
// PublishEvents tries to publish the events with retries if connection becomes // unavailable. On failure PublishEvents tries to reconnect. func (s *SingleConnectionMode) PublishEvents( signaler outputs.Signaler, events []common.MapStr, ) error { fails := 0 var backoffCount uint var err error for !s.closed && (s.maxAttempts == 0 || fails < s.maxAttempts) { if err := s.connect(); err != nil { logp.Info("Connecting error publishing events (retrying): %s", err) goto sendFail } for len(events) > 0 { var err error events, err = s.conn.PublishEvents(events) if err != nil { logp.Info("Error publishing events (retrying): %s", err) break } fails = 0 } if len(events) == 0 { outputs.SignalCompleted(signaler) return nil } sendFail: logp.Info("send fail") backoff := time.Duration(int64(s.waitRetry) * (1 << backoffCount)) if backoff > s.maxWaitRetry { backoff = s.maxWaitRetry } else { backoffCount++ } logp.Info("backoff retry: %v", backoff) time.Sleep(backoff) fails++ } outputs.SignalFailed(signaler, err) return nil }