func (b *Beat) launch(bt Creator) error { err := b.handleFlags() if err != nil { return err } svc.BeforeRun() defer svc.Cleanup() if err := b.configure(); err != nil { return err } // load the beats config section var sub *common.Config configName := strings.ToLower(b.Name) if b.RawConfig.HasField(configName) { sub, err = b.RawConfig.Child(configName, -1) if err != nil { return err } } else { sub = common.NewConfig() } logp.Info("Setup Beat: %s; Version: %s", b.Name, b.Version) processors, err := processors.New(b.Config.Processors) if err != nil { return fmt.Errorf("error initializing processors: %v", err) } debugf("Initializing output plugins") publisher, err := publisher.New(b.Name, b.Config.Output, b.Config.Shipper, processors) if err != nil { return fmt.Errorf("error initializing publisher: %v", err) } // TODO: some beats race on shutdown with publisher.Stop -> do not call Stop yet, // but refine publisher to disconnect clients on stop automatically // defer publisher.Stop() b.Publisher = publisher beater, err := bt(b, sub) if err != nil { return err } // If -configtest was specified, exit now prior to run. if cfgfile.IsTestConfig() { fmt.Println("Config OK") return GracefulExit } svc.HandleSignals(beater.Stop) logp.Info("%s start running.", b.Name) defer logp.Info("%s stopped.", b.Name) return beater.Run(b) }
func (c *Crawler) Start(states file.States) error { logp.Info("Loading Prospectors: %v", len(c.prospectorConfigs)) // Prospect the globs/paths given on the command line and launch harvesters for _, prospectorConfig := range c.prospectorConfigs { prospector, err := prospector.NewProspector(prospectorConfig, states, c.spooler.Channel) if err != nil { return fmt.Errorf("Error in initing prospector: %s", err) } c.prospectors = append(c.prospectors, prospector) } logp.Info("Loading Prospectors completed. Number of prospectors: %v", len(c.prospectors)) for i, p := range c.prospectors { c.wg.Add(1) go func(id int, prospector *prospector.Prospector) { defer func() { c.wg.Done() logp.Debug("crawler", "Prospector %v stopped", id) }() logp.Debug("crawler", "Starting prospector %v", id) prospector.Run() }(i, p) } logp.Info("All prospectors are initialised and running with %d states to persist", states.Count()) return nil }
func (r *Registrar) fetchState(filePath string, fileInfo os.FileInfo) (int64, bool) { // Check if there is a state for this file lastState, isFound := r.GetFileState(filePath) if isFound && input.IsSameFile(filePath, fileInfo) { logp.Debug("registar", "Same file as before found. Fetch the state and persist it.") // We're resuming - throw the last state back downstream so we resave it // And return the offset - also force harvest in case the file is old and we're about to skip it r.Persist <- lastState return lastState.Offset, true } if previous, err := r.getPreviousFile(filePath, fileInfo); err == nil { // File has rotated between shutdown and startup // We return last state downstream, with a modified event source with the new file name // And return the offset - also force harvest in case the file is old and we're about to skip it logp.Info("Detected rename of a previously harvested file: %s -> %s", previous, filePath) lastState, _ := r.GetFileState(previous) lastState.Source = &filePath r.Persist <- lastState return lastState.Offset, true } if isFound { logp.Info("Not resuming rotated file: %s", filePath) } logp.Info("prospector", "New file. Start reading from the beginning: %s", filePath) // New file so just start from the beginning return 0, false }
func (p *ProspectorLog) Init() { logp.Debug("prospector", "exclude_files: %s", p.config.ExcludeFiles) logp.Info("Load previous states from registry into memory") fileStates := p.Prospector.states.GetStates() // Make sure all states are set as finished for _, state := range fileStates { // Check if state source belongs to this prospector. If yes, update the state. if p.hasFile(state.Source) { // Set all states again to infinity TTL to make sure only removed if config still same // clean_inactive / clean_removed could have been changed between restarts state.TTL = -1 // Update prospector states and send new states to registry err := p.Prospector.updateState(input.NewEvent(state)) if err != nil { logp.Err("Problem putting initial state: %+v", err) } } else { // Only update internal state, do not report it to registry // Having all states could be useful in case later a file is moved into this prospector // TODO: Think about if this is expected or unexpected p.Prospector.states.Update(state) } } p.lastClean = time.Now() logp.Info("Previous states loaded: %v", p.Prospector.states.Count()) }
// Stop is called on exit before Cleanup // why isn't the flow Cleanup and then Stop? func (ub *Unifiedbeat) Stop() { startStopping := time.Now() logp.Info("Stop: is spooling and publishing running? '%v'", ub.isSpooling) if ub.isSpooling { ub.isSpooling = false logp.Info("Stop: waiting %v for spool/publish to shutdown.", ub.spoolTimeout) // lame, but might work time.Sleep(ub.spoolTimeout) // // tell "U2SpoolAndPublish" to shutdown: // quit <- true // // block/wait for "U2SpoolAndPublish" to close the quit channel: // <-quit err := ub.registrar.WriteRegistry() if err != nil { logp.Info("Stop: failed to update registry file; error: %v", err) } else { logp.Info("Stop: successfully updated registry file.") } } elapsed := time.Since(startStopping) logp.Info("Stop: done after waiting %v.", elapsed) }
func (r *Registrar) Run() { logp.Info("Starting Registrar") // Writes registry on shutdown defer r.writeRegistry() for { select { case <-r.done: logp.Info("Ending Registrar") return // Treats new log files to persist with higher priority then new events case state := <-r.Persist: r.State[*state.Source] = state logp.Debug("prospector", "Registrar will re-save state for %s", *state.Source) case events := <-r.Channel: r.processEvents(events) } if e := r.writeRegistry(); e != nil { // REVU: but we should panic, or something, right? logp.Err("Writing of registry returned error: %v. Continuing..", e) } } }
func newCloudMetadata(c common.Config) (processors.Processor, error) { config := struct { MetadataHostAndPort string `config:"host"` // Specifies the host and port of the metadata service (for testing purposes only). Timeout time.Duration `config:"timeout"` // Amount of time to wait for responses from the metadata services. }{ MetadataHostAndPort: metadataHost, Timeout: 3 * time.Second, } err := c.Unpack(&config) if err != nil { return nil, errors.Wrap(err, "failed to unpack add_cloud_metadata config") } var ( doURL = "http://" + config.MetadataHostAndPort + doMetadataURI ec2URL = "http://" + config.MetadataHostAndPort + ec2InstanceIdentityURI gceURL = "http://" + config.MetadataHostAndPort + gceMetadataURI ) result := fetchMetadata(doURL, ec2URL, gceURL, config.Timeout) if result == nil { logp.Info("add_cloud_metadata: hosting provider type not detected.") return addCloudMetadata{}, nil } logp.Info("add_cloud_metadata: hosting provider type detected as %v, metadata=%v", result.provider, result.metadata.String()) return addCloudMetadata{metadata: result.metadata}, nil }
// loadTemplate checks if the index mapping template should be loaded // In case the template is not already loaded or overwritting is enabled, the // template is written to index func (out *elasticsearchOutput) loadTemplate(config Template, client *Client) error { out.templateMutex.Lock() defer out.templateMutex.Unlock() logp.Info("Trying to load template for client: %s", client) // Check if template already exist or should be overwritten exists := client.CheckTemplate(config.Name) if !exists || config.Overwrite { if config.Overwrite { logp.Info("Existing template will be overwritten, as overwrite is enabled.") } reader := bytes.NewReader(out.templateContents) err := client.LoadTemplate(config.Name, reader) if err != nil { return fmt.Errorf("Could not load template: %v", err) } } else { logp.Info("Template already exists and will not be overwritten.") } return nil }
func (d *Dockerbeat) Config(b *beat.Beat) error { // Requires Docker 1.5 minimum d.minimalDockerVersion = SoftwareVersion{major: 1, minor: 5} err := cfgfile.Read(&d.TbConfig, "") if err != nil { logp.Err("Error reading configuration file: %v", err) return err } //init the period if d.TbConfig.Input.Period != nil { d.period = time.Duration(*d.TbConfig.Input.Period) * time.Second } else { d.period = 1 * time.Second } //init the socket if d.TbConfig.Input.Socket != nil { d.socket = *d.TbConfig.Input.Socket } else { d.socket = "unix:///var/run/docker.sock" // default docker socket location } logp.Info("dockerbeat", "Init dockerbeat") logp.Info("dockerbeat", "Follow docker socket %q\n", d.socket) logp.Info("dockerbeat", "Period %v\n", d.period) return nil }
func (r *Registrar) WriteRegistry() error { r.Lock() defer r.Unlock() // can't truncate a file that does not exist: _, err := os.Stat(r.registryFile) if os.IsExist(err) { err := os.Truncate(r.registryFile, 0) if err != nil { logp.Info("WriteRegistry: os.Truncate: err=%v\n", err) return err } } // if "json.Marshal" or "ioutil.WriteFile" fail then most likely // unifiedbeat does not have access to the registry file jsonState, err := json.Marshal(r.State) if err != nil { logp.Info("WriteRegistry: json.Marshal: err=%v\n", err) return err } // https://golang.org/pkg/io/ioutil/#WriteFile // If the file does not exist, WriteFile creates it with // permissions 0644; otherwise it is truncated. err = ioutil.WriteFile(r.registryFile, jsonState, 0644) if err != nil { logp.Info("WriteRegistry: ioutil.WriteFile: err=%v\n", err) return err } return nil }
// Run calls the beater Setup and Run methods. In case of errors // during the setup phase, it exits the process. func (b *Beat) Run() error { // Setup beater object err := b.BT.Setup(b) if err != nil { return fmt.Errorf("setup returned an error: %v", err) } b.setState(SetupState) // Up to here was the initialization, now about running if cfgfile.IsTestConfig() { logp.Info("Testing configuration file") // all good, exit return nil } service.BeforeRun() // Callback is called if the processes is asked to stop. // This needs to be called before the main loop is started so that // it can register the signals that stop or query (on Windows) the loop. service.HandleSignals(b.Exit) logp.Info("%s sucessfully setup. Start running.", b.Name) b.setState(RunState) // Run beater specific stuff err = b.BT.Run(b) if err != nil { logp.Critical("Running the beat returned an error: %v", err) } return err }
func (c *Crawler) Start(prospectorConfigs []config.ProspectorConfig, eventChan chan *input.FileEvent) error { if len(prospectorConfigs) == 0 { return fmt.Errorf("No prospectors defined. You must have at least one prospector defined in the config file.") } logp.Info("Loading Prospectors: %v", len(prospectorConfigs)) // Prospect the globs/paths given on the command line and launch harvesters for _, prospectorConfig := range prospectorConfigs { logp.Debug("prospector", "File Configs: %v", prospectorConfig.Paths) prospector, err := NewProspector(prospectorConfig, c.Registrar, eventChan) if err != nil { return fmt.Errorf("Error in initing prospector: %s", err) } c.prospectors = append(c.prospectors, prospector) } logp.Info("Loading Prospectors completed. Number of prospectors: %v", len(c.prospectors)) c.wg = sync.WaitGroup{} for _, prospector := range c.prospectors { c.wg.Add(1) go prospector.Run(&c.wg) } logp.Info("All prospectors are initialised and running with %d states to persist", len(c.Registrar.State)) return nil }
func (out *fileOutput) init(config config) error { out.rotator.Path = config.Path out.rotator.Name = config.Filename if out.rotator.Name == "" { out.rotator.Name = config.Index } logp.Info("File output path set to: %v", out.rotator.Path) logp.Info("File output base filename set to: %v", out.rotator.Name) rotateeverybytes := uint64(config.RotateEveryKb) * 1024 logp.Info("Rotate every bytes set to: %v", rotateeverybytes) out.rotator.RotateEveryBytes = &rotateeverybytes keepfiles := config.NumberOfFiles logp.Info("Number of files set to: %v", keepfiles) out.rotator.KeepFiles = &keepfiles err := out.rotator.CreateDirectory() if err != nil { return err } err = out.rotator.CheckIfConfigSane() if err != nil { return err } return nil }
func (p *syncLogPublisher) Start() { p.wg.Add(1) go func() { defer p.wg.Done() logp.Info("Start sending events to output") for { var events []*input.FileEvent select { case <-p.done: return case events = <-p.in: } pubEvents := make([]common.MapStr, 0, len(events)) for _, event := range events { pubEvents = append(pubEvents, event.ToMapStr()) } p.client.PublishEvents(pubEvents, publisher.Sync, publisher.Guaranteed) logp.Info("Events sent: %d", len(events)) // Tell the registrar that we've successfully sent these events select { case <-p.done: return case p.out <- events: } } }() }
// Starts the given module func (m *Module) Start(b *beat.Beat) error { defer logp.Recover(fmt.Sprintf("Module %s paniced and stopped running.", m.name)) if !m.Config.Enabled { logp.Debug("helper", "Not starting module %s with metricsets %s as not enabled.", m.name, m.getMetricSetsList()) return nil } logp.Info("Setup moduler: %s", m.name) err := m.moduler.Setup(m) if err != nil { return fmt.Errorf("Error setting up module: %s. Not starting metricsets for this module.", err) } err = m.loadMetricsets() if err != nil { return fmt.Errorf("Error loading metricsets: %s", err) } // Setup period period, err := time.ParseDuration(m.Config.Period) if err != nil { return fmt.Errorf("Error in parsing period of module %s: %v", m.name, err) } // If no period set, set default if period == 0 { logp.Info("Setting default period for module %s as not set.", m.name) period = 1 * time.Second } var timeout time.Duration if m.Config.Timeout != "" { // Setup timeout timeout, err := time.ParseDuration(m.Config.Timeout) if err != nil { return fmt.Errorf("Error in parsing timeout of module %s: %v", m.name, err) } // If no timeout set, set to period as default if timeout == 0 { logp.Info("Setting default timeout for module %s as not set.", m.name) timeout = period } } else { timeout = period } m.Timeout = timeout logp.Info("Start Module %s with metricsets [%s] and period %v", m.name, m.getMetricSetsList(), period) m.setupMetricSets() m.events = b.Publisher.Connect() go m.Run(period, b) return nil }
func (r *Registrar) fetchState(filePath string, fileInfo os.FileInfo) (int64, bool) { // Check if there is a state for this file lastState, isFound := r.GetFileState(filePath) // Use os info to compare files as old fileInfo not necessary available if isFound && lastState.FileStateOS.IsSame(GetOSFileState(&fileInfo)) { logp.Debug("registar", "Same file as before found. Fetch the state.") // We're resuming - throw the last state back downstream so we resave it // And return the offset - also force harvest in case the file is old and we're about to skip it return lastState.Offset, true } if previous, err := r.getPreviousFile(filePath, fileInfo); err == nil { // File has rotated between shutdown and startup // We return last state downstream, with a modified event source with the new file name // And return the offset - also force harvest in case the file is old and we're about to skip it logp.Info("Detected rename of a previously harvested file: %s -> %s", previous, filePath) lastState, _ := r.GetFileState(previous) return lastState.Offset, true } if isFound { logp.Info("Not resuming rotated file: %s", filePath) } // New file so just start from an automatic position return 0, false }
func (r *Registrar) Run() { logp.Info("Starting Registrar") // Writes registry on shutdown defer func() { r.writeRegistry() r.wg.Done() }() for { select { case <-r.done: logp.Info("Ending Registrar") return case events := <-r.Channel: r.processEventStates(events) } beforeCount := r.states.Count() cleanedStates := r.states.Cleanup() logp.Debug("registrar", "Registrar states cleaned up. Before: %d , After: %d", beforeCount, beforeCount-cleanedStates) statesCleanup.Add(int64(cleanedStates)) if err := r.writeRegistry(); err != nil { logp.Err("Writing of registry returned error: %v. Continuing...", err) } } }
// loadTemplate checks if the index mapping template should be loaded // In case the template is not already loaded or overwritting is enabled, the // template is written to index func (out *elasticsearchOutput) loadTemplate(config Template, client *Client) error { out.templateMutex.Lock() defer out.templateMutex.Unlock() logp.Info("Trying to load template for client: %s", client.Connection.URL) // Check if template already exist or should be overwritten exists := client.CheckTemplate(config.Name) if !exists || config.Overwrite { if config.Overwrite { logp.Info("Existing template will be overwritten, as overwrite is enabled.") } template := out.template if config.Versions.Es2x.Enabled && strings.HasPrefix(client.Connection.version, "2.") { logp.Info("Detected Elasticsearch 2.x. Automatically selecting the 2.x version of the template") template = out.template2x } err := client.LoadTemplate(config.Name, template) if err != nil { return fmt.Errorf("Could not load template: %v", err) } } else { logp.Info("Template already exists and will not be overwritten.") } return nil }
// bulkCollectPublishFails checks per item errors returning all events // to be tried again due to error code returned for that items. If indexing an // event failed due to some error in the event itself (e.g. does not respect mapping), // the event will be dropped. func bulkCollectPublishFails( res *BulkResult, events []common.MapStr, ) []common.MapStr { failed := events[:0] for i, rawItem := range res.Items { status, msg, err := itemStatus(rawItem) if err != nil { logp.Info("Failed to parse bulk reponse for item (%i): %v", i, err) // add index if response parse error as we can not determine success/fail failed = append(failed, events[i]) continue } if status < 300 { continue // ok value } if status < 500 && status != 429 { // hard failure, don't collect logp.Warn("Can not index event (status=%v): %v", status, msg) continue } debug("Failed to insert data(%v): %v", i, events[i]) logp.Info("Bulk item insert failed (i=%v, status=%v): %v", i, status, msg) failed = append(failed, events[i]) } return failed }
// dumpMetrics is used to log metrics on shutdown. func dumpMetrics() { logp.Info("Dumping runtime metrics...") expvar.Do(func(kv expvar.KeyValue) { if kv.Key != "memstats" { logp.Info("%s=%s", kv.Key, kv.Value.String()) } }) }
// publish is used to publish events using the configured protocol client. // It provides general error handling and back off support used on failed // send attempts. To be used by PublishEvent and PublishEvents. // The send callback will try to progress sending traffic and returns kind of // progress made in ok or resetFail. If ok is set to true, send finished // processing events. If ok is false but resetFail is set, send was partially // successful. If send was partially successful, the fail counter is reset thus up // to maxAttempts send attempts without any progress might be executed. func (s *SingleConnectionMode) publish( signaler outputs.Signaler, opts outputs.Options, send func() (ok bool, resetFail bool), ) error { fails := 0 var backoffCount uint var err error guaranteed := opts.Guaranteed || s.maxAttempts == 0 for !s.closed && (guaranteed || fails < s.maxAttempts) { ok := false resetFail := false if err := s.connect(); err != nil { logp.Info("Connecting error publishing events (retrying): %s", err) goto sendFail } ok, resetFail = send() if !ok { goto sendFail } debug("send completed") outputs.SignalCompleted(signaler) return nil sendFail: fails++ if resetFail { debug("reset fails") fails = 0 } if !guaranteed && (s.maxAttempts > 0 && fails == s.maxAttempts) { // max number of attempts reached debug("max number of attempts reached") break } logp.Info("send fail") backoff := time.Duration(int64(s.waitRetry) * (1 << backoffCount)) if backoff > s.maxWaitRetry { backoff = s.maxWaitRetry } else { backoffCount++ } logp.Info("backoff retry: %v", backoff) time.Sleep(backoff) } debug("messages dropped") messagesDropped.Add(1) outputs.SignalFailed(signaler, err) return nil }
// Stop stops the spooler. Flushes events before stopping func (s *Spooler) Stop() { logp.Info("Stopping spooler") close(s.exit) // Flush again before exiting spooler and closes channel logp.Info("Spooler last flush spooler") s.flush() close(s.Channel) }
// Cleanup performs clean-up after Run completes. func (bt *Metricbeat) Cleanup(b *beat.Beat) error { logp.Info("Dumping runtime metrics...") expvar.Do(func(kv expvar.KeyValue) { if kv.Key != "memstats" { logp.Info("%s=%s", kv.Key, kv.Value.String()) } }) return nil }
func (proc *ProcessesWatcher) Init(config ProcsConfig) error { proc.proc_prefix = "" proc.PortProcMap = make(map[uint16]PortProcMapping) proc.LastMapUpdate = time.Now() proc.ReadFromProc = config.Enabled if proc.ReadFromProc { if runtime.GOOS != "linux" { proc.ReadFromProc = false logp.Info("Disabled /proc/ reading because not on linux") } else { logp.Info("Process matching enabled") } } else { logp.Info("Process matching disabled") } if config.Max_proc_read_freq == 0 { proc.MaxReadFreq = 10 * time.Millisecond } else { proc.MaxReadFreq = config.Max_proc_read_freq } if config.Refresh_pids_freq == 0 { proc.RefreshPidsFreq = 1 * time.Second } else { proc.RefreshPidsFreq = config.Refresh_pids_freq } // Read the local IP addresses var err error proc.LocalAddrs, err = common.LocalIpAddrs() if err != nil { logp.Err("Error getting local IP addresses: %s", err) proc.LocalAddrs = []net.IP{} } if proc.ReadFromProc { for _, procConfig := range config.Monitored { grepper := procConfig.Cmdline_grep if len(grepper) == 0 { grepper = procConfig.Process } p, err := NewProcess(proc, procConfig.Process, grepper, time.Tick(proc.RefreshPidsFreq)) if err != nil { logp.Err("NewProcess: %s", err) } else { proc.Processes = append(proc.Processes, p) } } } return nil }
func (bt *Kafkabeat) Config(b *beat.Beat) error { // Load beater beatConfig logp.Info("Configuring Kafkabeat...") var err error err = cfgfile.Read(&bt.beatConfig, "") if err != nil { return fmt.Errorf("Error reading config file: %v", err) } bt.zookeepers = bt.beatConfig.Kafkabeat.Zookeepers if bt.zookeepers == nil || len(bt.zookeepers) == 0 { return KafkabeatError{"Atleast one zookeeper must be defined"} } zClient, err = kazoo.NewKazoo(bt.zookeepers, nil) if err != nil { logp.Err("Unable to connect to Zookeeper") return err } bt.brokers, err = zClient.BrokerList() if err != nil { logp.Err("Error identifying brokers from zookeeper") return err } if bt.brokers == nil || len(bt.brokers) == 0 { return KafkabeatError{"Unable to identify active brokers"} } logp.Info("Brokers: %v", bt.brokers) client, err = sarama.NewClient(bt.brokers, sarama.NewConfig()) // Setting default period if not set if bt.beatConfig.Kafkabeat.Period == "" { bt.beatConfig.Kafkabeat.Period = "1s" } bt.period, err = time.ParseDuration(bt.beatConfig.Kafkabeat.Period) if err != nil { return err } bt.topics = bt.beatConfig.Kafkabeat.Topics bt.create_topic_docs = true if bt.topics == nil || len(bt.topics) == 0 { bt.create_topic_docs = bt.topics == nil bt.topics, err = client.Topics() } if err != nil { return err } logp.Info("Monitoring topics: %v", bt.topics) bt.groups = bt.beatConfig.Kafkabeat.Groups if bt.groups == nil { bt.groups, err = getGroups() } logp.Info("Monitoring groups %v", bt.groups) return err }
func (c *Crawler) Stop() { logp.Info("Stopping Crawler") logp.Info("Stopping %v prospectors", len(c.prospectors)) for _, prospector := range c.prospectors { prospector.Stop() } c.wg.Wait() logp.Info("Crawler stopped") }
func (ub *Unifiedbeat) Cleanup(b *beat.Beat) error { logp.Info("Cleanup: is spooling and publishing running? '%v'", ub.isSpooling) // see "beat/geoip2.go": if GeoIp2Reader != nil { GeoIp2Reader.Close() logp.Info("Cleanup: closed GeoIp2Reader.") } logp.Info("Cleanup: done.") return nil }
func (c *Crawler) Stop() { logp.Info("Stopping Crawler") logp.Info("Stopping %v prospectors", len(c.prospectors)) for _, prospector := range c.prospectors { // Stop prospectors in parallel c.wg.Add(1) go prospector.Stop(&c.wg) } c.wg.Wait() logp.Info("Crawler stopped") }
// publish is used to publish events using the configured protocol client. // It provides general error handling and back off support used on failed // send attempts. To be used by PublishEvent and PublishEvents. // The send callback will try to progress sending traffic and returns kind of // progress made in ok or resetFail. If ok is set to true, send finished // processing events. If ok is false but resetFail is set, send was partially // successful. If send was partially successful, the fail counter is reset thus up // to maxAttempts send attempts without any progress might be executed. func (s *Mode) publish( signaler op.Signaler, opts outputs.Options, send func() (ok bool, resetFail bool), ) error { fails := 0 var err error guaranteed := opts.Guaranteed || s.maxAttempts == 0 for !s.closed && (guaranteed || fails < s.maxAttempts) { ok := false resetFail := false if err := s.connect(); err != nil { logp.Info("Connecting error publishing events (retrying): %s", err) goto sendFail } ok, resetFail = send() if !ok { goto sendFail } debugf("send completed") s.backoff.Reset() op.SigCompleted(signaler) return nil sendFail: logp.Info("send fail") fails++ if resetFail { debugf("reset fails") s.backoff.Reset() fails = 0 } s.backoff.Wait() if !guaranteed && (s.maxAttempts > 0 && fails == s.maxAttempts) { // max number of attempts reached debugf("max number of attempts reached") break } } debugf("messages dropped") mode.Dropped(1) op.SigFailed(signaler, err) return nil }
func (out *redisOutput) Init(config *redisConfig, topology_expire int) error { logp.Warn("Redis Output is deprecated. Please use the Redis Output Plugin from Logstash instead.") out.Hostname = fmt.Sprintf("%s:%d", config.Host, config.Port) out.Password = config.Password out.Index = config.Index out.Db = config.Db out.DbTopology = config.DbTopology out.Timeout = 5 * time.Second if config.Timeout > 0 { out.Timeout = time.Duration(config.Timeout) * time.Second } out.ReconnectInterval = time.Duration(1) * time.Second if config.ReconnectInterval >= 0 { out.ReconnectInterval = time.Duration(config.ReconnectInterval) * time.Second } logp.Info("Reconnect Interval set to: %v", out.ReconnectInterval) expSec := 15 if topology_expire != 0 { expSec = topology_expire } out.TopologyExpire = time.Duration(expSec) * time.Second switch config.DataType { case "", "list": out.DataType = RedisListType case "channel": out.DataType = RedisChannelType default: return errors.New("Bad Redis data type") } logp.Info("[RedisOutput] Using Redis server %s", out.Hostname) if out.Password != "" { logp.Info("[RedisOutput] Using password to connect to Redis") } logp.Info("[RedisOutput] Redis connection timeout %s", out.Timeout) logp.Info("[RedisOutput] Redis reconnect interval %s", out.ReconnectInterval) logp.Info("[RedisOutput] Using index pattern %s", out.Index) logp.Info("[RedisOutput] Topology expires after %s", out.TopologyExpire) logp.Info("[RedisOutput] Using db %d for storing events", out.Db) logp.Info("[RedisOutput] Using db %d for storing topology", out.DbTopology) logp.Info("[RedisOutput] Using %d data type", out.DataType) out.Reconnect() return nil }