func state_body_chunked_start(s *HttpStream, m *HttpMessage) (cont bool, ok bool, complete bool) { // read hexa length i := bytes.Index(s.data[s.parseOffset:], []byte("\r\n")) if i == -1 { return false, true, false } line := string(s.data[s.parseOffset : s.parseOffset+i]) _, err := fmt.Sscanf(line, "%x", &m.chunked_length) if err != nil { logp.Warn("Failed to understand chunked body start line") return false, false, false } s.parseOffset += i + 2 //+ \r\n if m.chunked_length == 0 { if len(s.data[s.parseOffset:]) < 2 { s.parseState = BODY_CHUNKED_WAIT_FINAL_CRLF return false, true, false } if s.data[s.parseOffset] != '\r' || s.data[s.parseOffset+1] != '\n' { logp.Warn("Expected CRLF sequence at end of message") return false, false, false } s.parseOffset += 2 // skip final CRLF m.end = s.parseOffset m.Size = uint64(m.end - m.start) return false, true, true } s.bodyReceived = 0 s.parseState = BODY_CHUNKED return true, true, false }
func (redis *Redis) receivedRedisResponse(msg *RedisMessage) { tuple := msg.TcpTuple trans := redis.getTransaction(tuple.Hashable()) if trans == nil { logp.Warn("Response from unknown transaction. Ignoring.") return } // check if the request was received if trans.Redis == nil { logp.Warn("Response from unknown transaction. Ignoring.") return } trans.IsError = msg.IsError if msg.IsError { trans.Redis["error"] = msg.Message } else { trans.Redis["return_value"] = msg.Message } trans.BytesOut = msg.Size trans.Response_raw = msg.Message trans.ResponseTime = int32(msg.Ts.Sub(trans.ts).Nanoseconds() / 1e6) // resp_time in milliseconds redis.publishTransaction(trans) redis.transactions.Delete(trans.tuple.Hashable()) logp.Debug("redis", "Redis transaction completed: %s", trans.Redis) }
func (mongodb *Mongodb) receivedMongodbResponse(msg *MongodbMessage) { trans := mongodb.getTransaction(msg.TcpTuple.Hashable()) if trans == nil { logp.Warn("Response from unknown transaction. Ignoring.") return } // check if the request was received if trans.Mongodb == nil { logp.Warn("Response from unknown transaction. Ignoring.") return } // Merge request and response events attributes for k, v := range msg.event { trans.event[k] = v } trans.error = msg.error trans.documents = msg.documents trans.ResponseTime = int32(msg.Ts.Sub(trans.ts).Nanoseconds() / 1e6) // resp_time in milliseconds trans.BytesOut = msg.messageLength mongodb.publishTransaction(trans) mongodb.transactions.Delete(trans.tuple.Hashable()) logp.Debug("mongodb", "Mongodb transaction completed: %s", trans.Mongodb) }
func (mc *Memcache) correlateTCP(conn *connection) error { // merge requests with responses into transactions for !conn.responses.empty() { var requ *message resp := conn.responses.pop() for !conn.requests.empty() { requ = conn.requests.pop() if requ.isBinary != resp.isBinary { err := ErrMixOfBinaryAndText logp.Warn("%v", err) return err } // If requ and response belong to the same transaction, continue // merging them into one transaction sameTransaction := !requ.isBinary || (requ.opaque == resp.opaque && requ.opcode == resp.opcode) if sameTransaction { break } // check if we are missing a response or quiet message. // Quiet message only MAY get a response -> so we need // to clear message list from all quiet messages not having // received a response if requ.isBinary && !requ.isQuiet { note := NoteNonQuietResponseOnly logp.Warn("%s", note) requ.AddNotes(note) } // send request debug("send single request=%p", requ) err := mc.onTCPTrans(requ, nil) if err != nil { logp.Warn("error processing memcache transaction: %s", err) } requ = nil } // Check if response without request. This should only happen when a TCP // stream is found (or after message gap) when we receive a response // without having seen a request. if requ == nil { debug("found orphan memcached response=%p", resp) resp.AddNotes(NoteTransactionNoRequ) } debug("merge request=%p and response=%p", requ, resp) err := mc.onTCPTrans(requ, resp) if err != nil { logp.Warn("error processing memcache transaction: %s", err) } // continue processing more transactions (reporting error only) } return nil }
func (http *Http) receivedHttpResponse(msg *HttpMessage) { // we need to search the request first. tuple := msg.TcpTuple logp.Debug("http", "Received response with tuple: %s", tuple) trans := http.getTransaction(tuple.Hashable()) if trans == nil { logp.Warn("Response from unknown transaction. Ignoring: %v", tuple) return } if trans.Http == nil { logp.Warn("Response without a known request. Ignoring.") return } response := common.MapStr{ "phrase": msg.StatusPhrase, "code": msg.StatusCode, "content_length": msg.ContentLength, } if http.Send_headers { if !http.Split_cookie { response["response_headers"] = msg.Headers } else { hdrs := common.MapStr{} for hdr_name, hdr_val := range msg.Headers { if hdr_name == "set-cookie" { hdrs[hdr_name] = splitCookiesHeader(hdr_val) } else { hdrs[hdr_name] = hdr_val } } response["response_headers"] = hdrs } } trans.BytesOut = msg.Size trans.Http.Update(response) trans.Notes = append(trans.Notes, msg.Notes...) trans.ResponseTime = int32(msg.Ts.Sub(trans.ts).Nanoseconds() / 1e6) // resp_time in milliseconds // save Raw message if http.Send_response { trans.Response_raw = string(http.cutMessageBody(msg)) } http.publishTransaction(trans) http.transactions.Delete(trans.tuple.Hashable()) logp.Debug("http", "HTTP transaction completed: %s\n", trans.Http) }
func (t *Topbeat) exportSystemStats() error { load_stat, err := GetSystemLoad() if err != nil { logp.Warn("Getting load statistics: %v", err) return err } cpu_stat, err := GetCpuTimes() if err != nil { logp.Warn("Getting cpu times: %v", err) return err } t.addCpuPercentage(cpu_stat) cpu_core_stat, err := GetCpuTimesList() if err != nil { logp.Warn("Getting cpu core times: %v", err) return err } t.addCpuPercentageList(cpu_core_stat) mem_stat, err := GetMemory() if err != nil { logp.Warn("Getting memory details: %v", err) return err } t.addMemPercentage(mem_stat) swap_stat, err := GetSwap() if err != nil { logp.Warn("Getting swap details: %v", err) return err } t.addMemPercentage(swap_stat) event := common.MapStr{ "@timestamp": common.Time(time.Now()), "type": "system", "load": load_stat, "cpu": cpu_stat, "mem": mem_stat, "swap": swap_stat, } for coreNumber, core := range cpu_core_stat { event["cpu"+strconv.Itoa(coreNumber)] = core } t.events.PublishEvent(event) return nil }
func (mysql *Mysql) receivedMysqlResponse(msg *MysqlMessage) { tuple := msg.TcpTuple trans := mysql.transactionsMap[tuple.Hashable()] if trans == nil { logp.Warn("Response from unknown transaction. Ignoring.") return } // check if the request was received if trans.Mysql == nil { logp.Warn("Response from unknown transaction. Ignoring.") return } // save json details trans.Mysql.Update(common.MapStr{ "affected_rows": msg.AffectedRows, "insert_id": msg.InsertId, "num_rows": msg.NumberOfRows, "num_fields": msg.NumberOfFields, "iserror": msg.IsError, "error_code": msg.ErrorCode, "error_message": msg.ErrorInfo, }) trans.BytesOut = msg.Size trans.Path = msg.Tables trans.ResponseTime = int32(msg.Ts.Sub(trans.ts).Nanoseconds() / 1e6) // resp_time in milliseconds // save Raw message if len(msg.Raw) > 0 { fields, rows := mysql.parseMysqlResponse(msg.Raw) trans.Response_raw = common.DumpInCSVFormat(fields, rows) } trans.Notes = append(trans.Notes, msg.Notes...) mysql.publishTransaction(trans) logp.Debug("mysql", "Mysql transaction completed: %s", trans.Mysql) logp.Debug("mysql", "%s", trans.Response_raw) trans.Notes = append(trans.Notes, msg.Notes...) // remove from map delete(mysql.transactionsMap, trans.tuple.Hashable()) if trans.timer != nil { trans.timer.Stop() } }
func LoadGeoIPData(config Geoip) *libgeo.GeoIP { geoipPaths := []string{} if config.Paths != nil { geoipPaths = *config.Paths } if len(geoipPaths) == 0 { logp.Info("GeoIP disabled: No paths were set under output.geoip.paths") // disabled return nil } // look for the first existing path var geoipPath string for _, path := range geoipPaths { fi, err := os.Lstat(path) if err != nil { logp.Err("GeoIP path could not be loaded: %s", path) continue } if fi.Mode()&os.ModeSymlink == os.ModeSymlink { // follow symlink geoipPath, err = filepath.EvalSymlinks(path) if err != nil { logp.Warn("Could not load GeoIP data: %s", err.Error()) return nil } } else { geoipPath = path } break } if len(geoipPath) == 0 { logp.Warn("Couldn't load GeoIP database") return nil } geoLite, err := libgeo.Load(geoipPath) if err != nil { logp.Warn("Could not load GeoIP data: %s", err.Error()) } logp.Info("Loaded GeoIP data from: %s", geoipPath) return geoLite }
func LoadGeoIPData(config Geoip) *libgeo.GeoIP { geoip_paths := []string{ "/usr/share/GeoIP/GeoIP.dat", "/usr/local/var/GeoIP/GeoIP.dat", } if config.Paths != nil { geoip_paths = *config.Paths } if len(geoip_paths) == 0 { // disabled return nil } // look for the first existing path var geoip_path string for _, path := range geoip_paths { fi, err := os.Lstat(path) if err != nil { continue } if fi.Mode()&os.ModeSymlink == os.ModeSymlink { // follow symlink geoip_path, err = filepath.EvalSymlinks(path) if err != nil { logp.Warn("Could not load GeoIP data: %s", err.Error()) return nil } } else { geoip_path = path } break } if len(geoip_path) == 0 { logp.Warn("Couldn't load GeoIP database") return nil } geoLite, err := libgeo.Load(geoip_path) if err != nil { logp.Warn("Could not load GeoIP data: %s", err.Error()) } logp.Info("Loaded GeoIP data from: %s", geoip_path) return geoLite }
func (f *FileEvent) ToMapStr() common.MapStr { event := common.MapStr{ "@timestamp": common.Time(f.ReadTime), "source": f.Source, "offset": f.Offset, "line": f.Line, "message": f.Text, "fileinfo": f.Fileinfo, "type": f.DocumentType, "input_type": f.InputType, } if f.Fields != nil { if f.fieldsUnderRoot { for key, value := range *f.Fields { // in case of conflicts, overwrite _, found := event[key] if found { logp.Warn("Overwriting %s key", key) } event[key] = value } } else { event["fields"] = f.Fields } } return event }
func (j *Journaler) flush() error { // keep track of how many bytes we've flushed to the current file // so we know when to rotate it j.curFileSizeBytes += j.buffer.Buffered() logp.Debug("Journal", "Flushing journal buffer of size %d bytes", j.buffer.Buffered()) var flushErr error j.totalBytesWrit += int64(j.buffer.Buffered()) for j.buffer.Buffered() > 0 && (flushErr == nil || flushErr == io.ErrShortWrite) { if flushErr != nil { logp.Warn(flushErr.Error()) } flushErr = j.buffer.Flush() } if flushErr != nil { j.emitter.close() return flushErr } j.totalEvtsProcd += int64(len(j.emitter.queuedEvents)) j.emitter.sendAll() j.resetTimer() j.refreshMetrics() return nil }
// Run ranges over input, buffering the journal until the buffer is full, // or the maxDelay time has exceeded. Either condition will cause the // the journal to be flushed to disk and the journaled deliveries to // be published to the j.Out channel func (j *Journaler) Run(input <-chan *AmqpEvent, stop chan interface{}) { var err error defer j.Close() loop: for { // For an event, we may or may not want to flush the buffer, depending // on whether the buffer is out of space. Whereas on receiving a timer // event, we always need to flush the buffer. select { case d, more := <-input: if !more { break loop } if d == nil { // This seems to coincide with server disconnects, but its not clearly documented // in the amqp lib logp.Warn("Journaler recieved nil delivery, ignoring") continue } err = j.processEvent(d) case <-j.timer.C: err = j.flush() } if err != nil { panic(err) } } }
func (eb *Winlogbeat) Setup(b *beat.Beat) error { eb.beat = b eb.client = b.Events eb.done = make(chan struct{}) var err error eb.checkpoint, err = checkpoint.NewCheckpoint(eb.config.Winlogbeat.RegistryFile, 10, 5*time.Second) if err != nil { return err } if eb.config.Winlogbeat.Metrics.BindAddress != "" { bindAddress := eb.config.Winlogbeat.Metrics.BindAddress sock, err := net.Listen("tcp", bindAddress) if err != nil { return err } go func() { logp.Info("Metrics hosted at http://%s/debug/vars", bindAddress) err := http.Serve(sock, nil) if err != nil { logp.Warn("Unable to launch HTTP service for metrics. %v", err) return } }() } return nil }
func (ab *AmqpBeat) newAmqpEvent(delivery *amqp.Delivery, typeTag, tsField, tsFormat *string) (*AmqpEvent, error) { m := common.MapStr{} err := json.Unmarshal(delivery.Body, &m) if err != nil { return nil, fmt.Errorf("error unmarshalling delivery %v: %v", delivery.Body, err) } now := time.Now() ts := common.Time(now) if tsField != nil && tsFormat != nil { var err error ts, err = extractTS(m, *tsField, *tsFormat, ts) if err != nil { logp.Warn("Failed to extract @timestamp for event, defaulting to delivery time ('%s'): %v", now, err) } } sanitize(m, ab.RbConfig.AmqpInput) m["type"] = *typeTag m["@timestamp"] = ts ev := &AmqpEvent{ deliveryTag: delivery.DeliveryTag, acknowledger: delivery.Acknowledger, body: m, } return ev, nil }
func (t *Topbeat) exportFileSystemStats() error { fss, err := GetFileSystemList() if err != nil { logp.Warn("Getting filesystem list: %v", err) return err } for _, fs := range fss { fs_stat, err := GetFileSystemStat(fs) if err != nil { logp.Debug("topbeat", "Skip filesystem %d: %v", fs_stat, err) continue } t.addFileSystemUsedPercentage(fs_stat) event := common.MapStr{ "timestamp": common.Time(time.Now()), "type": "filesystem", "fs": fs_stat, } t.events <- event } return nil }
func (conn *Connection) execRequest( method, url string, body io.Reader, ) (int, []byte, error) { req, err := http.NewRequest(method, url, body) if err != nil { logp.Warn("Failed to create request", err) return 0, nil, err } req.Header.Add("Accept", "application/json") if conn.Username != "" || conn.Password != "" { req.SetBasicAuth(conn.Username, conn.Password) } resp, err := conn.http.Do(req) if err != nil { conn.connected = false return 0, nil, err } defer closing(resp.Body) status := resp.StatusCode if status >= 300 { conn.connected = false return status, nil, fmt.Errorf("%v", resp.Status) } obj, err := ioutil.ReadAll(resp.Body) if err != nil { conn.connected = false return status, nil, err } return status, obj, nil }
// bulkCollectPublishFails checks per item errors returning all events // to be tried again due to error code returned for that items. If indexing an // event failed due to some error in the event itself (e.g. does not respect mapping), // the event will be dropped. func bulkCollectPublishFails( res *BulkResult, events []common.MapStr, ) []common.MapStr { failed := events[:0] for i, rawItem := range res.Items { status, msg, err := itemStatus(rawItem) if err != nil { logp.Info("Failed to parse bulk reponse for item (%i): %v", i, err) // add index if response parse error as we can not determine success/fail failed = append(failed, events[i]) continue } if status < 300 { continue // ok value } if status < 500 && status != 429 { // hard failure, don't collect logp.Warn("Can not index event (status=%v): %v", status, msg) continue } debug("Failed to insert data(%v): %v", i, events[i]) logp.Info("Bulk item insert failed (i=%v, status=%v): %v", i, status, msg) failed = append(failed, events[i]) } return failed }
func (client *Client) PublishEvent(event common.MapStr) error { if !client.connected { return ErrNotConnected } ts := time.Time(event["@timestamp"].(common.Time)) index := fmt.Sprintf("%s-%d.%02d.%02d", client.index, ts.Year(), ts.Month(), ts.Day()) logp.Debug("output_elasticsearch", "Publish event: %s", event) // insert the events one by one status, _, err := client.Index(index, event["type"].(string), "", nil, event) if err != nil { logp.Warn("Fail to insert a single event: %s", err) if err == ErrJSONEncodeFailed { // don't retry unencodable values return nil } } switch { case status == 0: // event was not send yet return nil case status >= 500 || status == 429: // server error, retry return err case status >= 300 && status < 500: // won't be able to index event in Elasticsearch => don't retry return nil } return nil }
func (p *Pingbeat) AddTarget(target string, tag string) { if addr := net.ParseIP(target); addr.String() == "" { if addr.To4() != nil { logp.Debug("pingbeat", "IPv4: %s\n", addr.String()) p.ipv4targets[addr.String()] = [2]string{target, tag} } else { logp.Debug("pingbeat", "IPv6: %s\n", addr.String()) p.ipv6targets[addr.String()] = [2]string{target, tag} } } else { logp.Debug("pingbeat", "Getting IP addresses for %s:\n", target) addrs, err := net.LookupIP(target) if err != nil { logp.Warn("Failed to resolve %s to IP address, ignoring this target.\n", target) } else { for j := 0; j < len(addrs); j++ { if addrs[j].To4() != nil { logp.Debug("pingbeat", "IPv4: %s\n", addrs[j].String()) p.ipv4targets[addrs[j].String()] = [2]string{target, tag} } else { logp.Debug("pingbeat", "IPv6: %s\n", addrs[j].String()) p.ipv6targets[addrs[j].String()] = [2]string{target, tag} } } } } }
func (spooler *Spooler) Config() error { config := &spooler.Filebeat.FbConfig.Filebeat // Set default pool size if value not set if config.SpoolSize == 0 { config.SpoolSize = cfg.DefaultSpoolSize } // Set default idle timeout if not set if config.IdleTimeout == "" { logp.Debug("spooler", "Set idleTimeoutDuration to %s", cfg.DefaultIdleTimeout) // Set it to default config.IdleTimeoutDuration = cfg.DefaultIdleTimeout } else { var err error config.IdleTimeoutDuration, err = time.ParseDuration(config.IdleTimeout) if err != nil { logp.Warn("Failed to parse idle timeout duration '%s'. Error was: %v", config.IdleTimeout, err) return err } } return nil }
func ensureMemcacheConnection(private protos.ProtocolData) *tcpConnectionData { if private == nil { return &tcpConnectionData{} } priv, ok := private.(*tcpConnectionData) if !ok { logp.Warn("memcache connection data type error, create new one") return &tcpConnectionData{} } if priv == nil { logp.Warn("Unexpected: memcache TCP connection data not set, create new one") return &tcpConnectionData{} } return priv }
// formatMessage builds the message text that is associated with an event log // record. Each EventID has a template that is stored in a library. The event // contains the parameters used to populate the template. This method evaluates // the template with the parameters and returns the resulting string. func (el *eventLog) formatMessage(event *winEventLogRecord, buf []byte, lr LogRecord) (string, error) { handles := el.handles.get(lr.SourceName) if handles == nil || len(handles) == 0 { msgParams, err := getStrings(event, buf) if err != nil { return "", err } message := fmt.Sprintf(noMessageFile, lr.EventId, lr.SourceName, strings.Join(msgParams, ", ")) return message, nil } // Get addresses of the string inserts: stringInsertPtrs, err := getStringsPointers(event, buf) if err != nil { logp.Warn("Failed to get string insert pointers.", err) return "", err } var addr *uintptr if stringInsertPtrs != nil { addr = &stringInsertPtrs[0] } var message string for _, handle := range handles { numChars, err := formatMessage( windows.FORMAT_MESSAGE_FROM_SYSTEM| windows.FORMAT_MESSAGE_FROM_HMODULE| windows.FORMAT_MESSAGE_ARGUMENT_ARRAY, handle, event.eventID, 0, // Language ID &el.formatBuf[0], uint32(len(el.formatBuf)), addr) if err != nil { // Try the next handle to see if a message can be found. logp.Debug("eventlog", "Failed to find message. Trying next handle.") continue } message, _, err = utf16ToString(el.formatBuf[:numChars*2]) if err != nil { // Found a handle that provides the message. break } } if message == "" { msgParams, err := getStrings(event, buf) if err != nil { return "", err } message = fmt.Sprintf(noMessageFile, lr.EventId, lr.SourceName, strings.Join(msgParams, ", ")) } return message, nil }
// queryEventMessageFiles queries the registry to get the value of // the EventMessageFile key that points to a DLL or EXE containing parameterized // event log messages. If found, it loads the libraries as a datafiles and // returns a slice of Handles to the libraries. func queryEventMessageFiles(providerName, sourceName string) ([]Handle, error) { // Open key in registry: registryKeyName := fmt.Sprintf( "SYSTEM\\CurrentControlSet\\Services\\EventLog\\%s\\%s", providerName, sourceName) key, err := registry.OpenKey(registry.LOCAL_MACHINE, registryKeyName, registry.QUERY_VALUE) if err != nil { return nil, fmt.Errorf("Failed to open HKLM\\%s", registryKeyName) } defer func() { err := key.Close() if err != nil { logp.Warn("Failed to close registry key. key=%s err=%v", registryKeyName, err) } }() logp.Debug("eventlog", "RegOpenKey opened handle to HKLM\\%s, key=%v", registryKeyName, key) // Read value from registry: value, _, err := key.GetStringValue("EventMessageFile") if err != nil { return nil, fmt.Errorf("Failed querying EventMessageFile from "+ "HKLM\\%s. %v", registryKeyName, err) } value, err = registry.ExpandString(value) if err != nil { return nil, err } // Split the value in case there is more than one file in the value. eventMessageFiles := strings.Split(value, ";") logp.Debug("eventlog", "RegQueryValueEx queried EventMessageFile from "+ "HKLM\\%s and got [%s]", registryKeyName, strings.Join(eventMessageFiles, ",")) // Load the libraries: var handles []Handle for _, eventMessageFile := range eventMessageFiles { sPtr, err := syscall.UTF16PtrFromString(eventMessageFile) if err != nil { logp.Debug("eventlog", "Failed to get UTF16Ptr for '%s'. "+ "Skipping. %v", eventMessageFile, err) continue } handle, err := loadLibraryEx(sPtr, 0, LOAD_LIBRARY_AS_DATAFILE) if err != nil { logp.Debug("eventlog", "Failed to load library '%s' as data file. "+ "Skipping. %v", eventMessageFile, err) continue } handles = append(handles, handle) } logp.Debug("eventlog", "Returning handles %v for sourceName %s", handles, sourceName) return handles, nil }
func ensureRedisConnection(private protos.ProtocolData) *redisConnectionData { if private == nil { return &redisConnectionData{} } priv, ok := private.(*redisConnectionData) if !ok { logp.Warn("redis connection data type error, create new one") return &redisConnectionData{} } if priv == nil { logp.Warn("Unexpected: redis connection data not set, create new one") return &redisConnectionData{} } return priv }
func ensureMongodbConnection(private protos.ProtocolData) *mongodbConnectionData { if private == nil { return &mongodbConnectionData{} } priv, ok := private.(*mongodbConnectionData) if !ok { logp.Warn("mongodb connection data type error, create new one") return &mongodbConnectionData{} } if priv == nil { logp.Warn("Unexpected: mongodb connection data not set, create new one") return &mongodbConnectionData{} } return priv }
// freeHandles free the event message file Handles so that the modules can // be unloaded. The Handles are no longer valid after being freed. func (hc *handleCache) freeHandles(handles []Handle) { for _, handle := range handles { err := hc.freer(handle) if err != nil { logp.Warn("FreeLibrary error for handle %v", handle) } } }
// Init sets up default config for prospector func (p *Prospector) Init() error { // Setup Ignore Older if p.ProspectorConfig.IgnoreOlder != "" { var err error p.ProspectorConfig.IgnoreOlderDuration, err = time.ParseDuration(p.ProspectorConfig.IgnoreOlder) if err != nil { logp.Warn("Failed to parse ignore_older value '%s'. Error was: %s\n", p.ProspectorConfig.IgnoreOlder) return err } } else { p.ProspectorConfig.IgnoreOlderDuration = cfg.DefaultIgnoreOlderDuration } logp.Debug("propsector", "Set IgnoreOlderDuration to %s", p.ProspectorConfig.IgnoreOlderDuration) // Setup Scan Frequency if p.ProspectorConfig.ScanFrequency != "" { var err error p.ProspectorConfig.ScanFrequencyDuration, err = time.ParseDuration(p.ProspectorConfig.ScanFrequency) if err != nil { logp.Warn("Failed to parse scan_frequency value '%s'. Error was: %s\n", p.ProspectorConfig.ScanFrequency, err) return err } } else { p.ProspectorConfig.ScanFrequencyDuration = cfg.DefaultScanFrequency } logp.Debug("propsector", "Set ScanFrequencyDuration to %s", p.ProspectorConfig.ScanFrequencyDuration) // Setup Buffer Size if p.ProspectorConfig.Harvester.BufferSize == 0 { p.ProspectorConfig.Harvester.BufferSize = cfg.DefaultHarvesterBufferSize } // Setup DocumentType if p.ProspectorConfig.Harvester.DocumentType == "" { p.ProspectorConfig.Harvester.DocumentType = cfg.DefaultDocumentType } // Init File Stat list p.prospectorList = make(map[string]ProspectorFileStat) return nil }
// Recursively sanitizes the map keys. see (*AmqpBeat).sanitizeKey func sanitize(m common.MapStr, cfg *AmqpConfig) { // We do a DFS traversal of the nested maps, replacing the keys with sanitized versions // Since key replacement is a delete + add, we need to wait until all items in a given // map have been traversed before we update the keys. So we use a replacements map // to keep track of which keys need to be replaced. Then do the replacements outside // of the traversal iteration. replacements := make(map[string]string) for oldK, v := range m { if subm, ok := v.(common.MapStr); ok { sanitize(subm, cfg) } newK := sanitizeKey(oldK, cfg) // Valid keys should be left unchanged if newK == oldK { continue } if len(newK) == 0 { // In the unlikely event we end up with an empty key, (eg if k = '____') newK = uuid.New() logp.Warn("Sanitizing key %s resulted in empty string, using UUID: %s instead", oldK, newK) } if _, ok := m[newK]; ok { // In the unlikely event we end up with a collision on the key name, append a UUID collidingK := newK newK = fmt.Sprintf("%s_%s", newK, uuid.New()) logp.Warn("Sanitizing key %s resulted in collision with %s, appended UUID: %s", oldK, collidingK, newK) } replacements[oldK] = newK } // Do the key manipulation as a separate step so that the our traversal loop above // doesn't process the new keys for oldK, newK := range replacements { m[newK] = m[oldK] delete(m, oldK) } }
// Init sets up default config for prospector func (p *Prospector) Init() error { if p.ProspectorConfig.IgnoreOlder != "" { var err error p.ProspectorConfig.IgnoreOlderDuration, err = time.ParseDuration(p.ProspectorConfig.IgnoreOlder) if err != nil { logp.Warn("Failed to parse dead time duration '%s'. Error was: %s\n", p.ProspectorConfig.IgnoreOlder, err) return err } } else { logp.Debug("propsector", "Set ignoreOlderDuration to %s", cfg.DefaultIgnoreOlderDuration) // Set it to default p.ProspectorConfig.IgnoreOlderDuration = cfg.DefaultIgnoreOlderDuration } if p.ProspectorConfig.ScanFrequency != "" { var err error p.ProspectorConfig.ScanFrequencyDuration, err = time.ParseDuration(p.ProspectorConfig.ScanFrequency) if err != nil { logp.Warn("Failed to parse dead time duration '%s'. Error was: %s\n", p.ProspectorConfig.IgnoreOlder, err) return err } } else { logp.Debug("propsector", "Set ignoreOlderDuration to %s", cfg.DefaultIgnoreOlderDuration) // Set it to default p.ProspectorConfig.ScanFrequencyDuration = cfg.DefaultScanFrequency } if p.ProspectorConfig.Harvester.BufferSize == 0 { p.ProspectorConfig.Harvester.BufferSize = cfg.DefaultHarvesterBufferSize } // Init list p.prospectorList = make(map[string]ProspectorFileStat) return nil }
func (p *Pingbeat) Run(b *beat.Beat) error { p.isAlive = true fp := fastping.NewPinger() errInput, err := fp.Network(p.pingType) if err != nil { logp.Critical("Error: %v (input %v)", err, errInput) os.Exit(1) } if p.useIPv4 { for addr, details := range p.ipv4targets { logp.Debug("pingbeat", "Adding target IP: %s, Name: %s, Tag: %s\n", addr, details[0], details[1]) fp.AddIP(addr) } } if p.useIPv6 { for addr, details := range p.ipv6targets { logp.Debug("pingbeat", "Adding target IP: %s, Name: %s, Tag: %s\n", addr, details[0], details[1]) fp.AddIP(addr) } } fp.OnRecv = func(addr *net.IPAddr, rtt time.Duration) { var name, tag string ip := addr.IP if ip.To4() != nil { name = p.ipv4targets[addr.String()][0] tag = p.ipv4targets[addr.String()][1] } else { name = p.ipv6targets[addr.String()][0] tag = p.ipv6targets[addr.String()][1] } event := common.MapStr{ "timestamp": common.Time(time.Now()), "type": "pingbeat", "target_name": name, "target_addr": addr.String(), "tag": tag, "rtt": milliSeconds(rtt), } p.events.PublishEvent(event) } // fp.OnIdle = func() { // fmt.Println("loop done") // } for p.isAlive { time.Sleep(p.period) err := fp.Run() if err != nil { logp.Warn("Warning: %v", err) } } return nil }