func (mc *Memcache) correlateTCP(conn *connection) error { // merge requests with responses into transactions for !conn.responses.empty() { var requ *message resp := conn.responses.pop() for !conn.requests.empty() { requ = conn.requests.pop() if requ.isBinary != resp.isBinary { err := ErrMixOfBinaryAndText logp.Warn("%v", err) return err } // If requ and response belong to the same transaction, continue // merging them into one transaction sameTransaction := !requ.isBinary || (requ.opaque == resp.opaque && requ.opcode == resp.opcode) if sameTransaction { break } // check if we are missing a response or quiet message. // Quiet message only MAY get a response -> so we need // to clear message list from all quiet messages not having // received a response if requ.isBinary && !requ.isQuiet { note := NoteNonQuietResponseOnly logp.Warn("%s", note) requ.AddNotes(note) } // send request debug("send single request=%p", requ) err := mc.onTCPTrans(requ, nil) if err != nil { logp.Warn("error processing memcache transaction: %s", err) } requ = nil } // Check if response without request. This should only happen when a TCP // stream is found (or after message gap) when we receive a response // without having seen a request. if requ == nil { debug("found orphan memcached response=%p", resp) resp.AddNotes(NoteTransactionNoRequ) } debug("merge request=%p and response=%p", requ, resp) err := mc.onTCPTrans(requ, resp) if err != nil { logp.Warn("error processing memcache transaction: %s", err) } // continue processing more transactions (reporting error only) } return nil }
func connectionStartMethod(m *AmqpMessage, args []byte) (bool, bool) { major := args[0] minor := args[1] properties := make(common.MapStr) next, err, exists := getTable(properties, args, 2) if err { //failed to get de peer-properties, size may be wrong, let's quit logp.Warn("Failed to parse server properties in connection.start method") return false, false } mechanisms, next, err := getShortString(args, next+4, binary.BigEndian.Uint32(args[next:next+4])) if err { logp.Warn("Failed to get connection mechanisms") return false, false } locales, _, err := getShortString(args, next+4, binary.BigEndian.Uint32(args[next:next+4])) if err { logp.Warn("Failed to get connection locales") return false, false } m.Method = "connection.start" m.IsRequest = true m.Fields = common.MapStr{ "version-major": major, "version-minor": minor, "mechanisms": mechanisms, "locales": locales, } //if there is a server properties table, add it if exists { m.Fields["server-properties"] = properties } return true, true }
func (config *harvesterConfig) Validate() error { // DEPRECATED: remove in 6.0 if config.ForceCloseFiles { config.CloseRemoved = true config.CloseRenamed = true logp.Warn("DEPRECATED: force_close_files was set to true. Use close_removed + close_rename") } // DEPRECATED: remove in 6.0 if config.CloseOlder > 0 { config.CloseInactive = config.CloseOlder logp.Warn("DEPRECATED: close_older is deprecated. Use close_inactive") } // Check input type if _, ok := cfg.ValidInputType[config.InputType]; !ok { return fmt.Errorf("Invalid input type: %v", config.InputType) } if config.JSON != nil && len(config.JSON.MessageKey) == 0 && config.Multiline != nil { return fmt.Errorf("When using the JSON decoder and multiline together, you need to specify a message_key value") } if config.JSON != nil && len(config.JSON.MessageKey) == 0 && (len(config.IncludeLines) > 0 || len(config.ExcludeLines) > 0) { return fmt.Errorf("When using the JSON decoder and line filtering together, you need to specify a message_key value") } return nil }
func basicConsumeMethod(m *AmqpMessage, args []byte) (bool, bool) { queue, offset, err := getShortString(args, 3, uint32(args[2])) if err { logp.Warn("Error getting name of queue in basic consume") return false, false } consumerTag, offset, err := getShortString(args, offset+1, uint32(args[offset])) if err { logp.Warn("Error getting name of consumer tag in basic consume") return false, false } params := getBitParams(args[offset]) m.Method = "basic.consume" m.IsRequest = true m.Request = queue m.Fields = common.MapStr{ "queue": queue, "consumer-tag": consumerTag, "no-local": params[0], "no-ack": params[1], "exclusive": params[2], "no-wait": params[3], } if args[offset+1] != frameEndOctet && m.ParseArguments { arguments := make(common.MapStr) _, err, exists := getTable(arguments, args, offset+1) if !err && exists { m.Fields["arguments"] = arguments } else if err { m.Notes = append(m.Notes, "Failed to parse additional arguments") } } return true, true }
func connectionStartOkMethod(m *AmqpMessage, args []byte) (bool, bool) { properties := make(common.MapStr) next, err, exists := getTable(properties, args, 0) if err { //failed to get de peer-properties, size may be wrong, let's quit logp.Warn("Failed to parse server properties in connection.start method") return false, false } mechanism, next, err := getShortString(args, next+1, uint32(args[next])) if err { logp.Warn("Failed to get connection mechanism from client") return false, false } _, next, err = getShortString(args, next+4, binary.BigEndian.Uint32(args[next:next+4])) if err { logp.Warn("Failed to get connection response from client") return false, false } locale, _, err := getShortString(args, next+1, uint32(args[next])) if err { logp.Warn("Failed to get connection locale from client") return false, false } m.IsRequest = false m.Fields = common.MapStr{ "mechanism": mechanism, "locale": locale, } //if there is a client properties table, add it if exists { m.Fields["client-properties"] = properties } return true, true }
func basicReturnMethod(m *AmqpMessage, args []byte) (bool, bool) { code := binary.BigEndian.Uint16(args[0:2]) if code < 300 { //not an error or exception ? not interesting return true, false } replyText, nextOffset, err := getShortString(args, 3, uint32(args[2])) if err { logp.Warn("Error getting name of reply text in basic return") return false, false } exchange, nextOffset, err := getShortString(args, nextOffset+1, uint32(args[nextOffset])) if err { logp.Warn("Error getting name of exchange in basic return") return false, false } routingKey, _, err := getShortString(args, nextOffset+1, uint32(args[nextOffset])) if err { logp.Warn("Error getting name of routing key in basic return") return false, false } m.Method = "basic.return" m.Fields = common.MapStr{ "exchange": exchange, "routing-key": routingKey, "reply-code": code, "reply-text": replyText, } return true, false }
func (msw *metricSetWrapper) multiEventFetch(fetcher mb.EventsFetcher, host string) error { start := time.Now() events, err := fetcher.Fetch(host) elapsed := time.Since(start) if err == nil { msw.stats.Add(successesKey, 1) for _, event := range events { event, err = createEvent(msw, host, event, nil, start, elapsed) if err != nil { logp.Warn("createEvent error: %v", err) } if event != nil { msw.module.pubClient.PublishEvent(event) msw.stats.Add(eventsKey, 1) } } } else { msw.stats.Add(failuresKey, 1) event, err := createEvent(msw, host, nil, err, start, elapsed) if err != nil { logp.Warn("createEvent error: %v", err) } if event != nil { msw.module.pubClient.PublishEvent(event) msw.stats.Add(eventsKey, 1) } } return nil }
func (*parser) parseBodyChunkedStart(s *stream, m *message) (cont, ok, complete bool) { // read hexa length i := bytes.Index(s.data[s.parseOffset:], []byte("\r\n")) if i == -1 { return false, true, false } line := string(s.data[s.parseOffset : s.parseOffset+i]) _, err := fmt.Sscanf(line, "%x", &m.chunkedLength) if err != nil { logp.Warn("Failed to understand chunked body start line") return false, false, false } s.parseOffset += i + 2 //+ \r\n if m.chunkedLength == 0 { if len(s.data[s.parseOffset:]) < 2 { s.parseState = stateBodyChunkedWaitFinalCRLF return false, true, false } if s.data[s.parseOffset] != '\r' || s.data[s.parseOffset+1] != '\n' { logp.Warn("Expected CRLF sequence at end of message") return false, false, false } s.parseOffset += 2 // skip final CRLF m.end = s.parseOffset m.Size = uint64(m.end - m.start) return false, true, true } s.bodyReceived = 0 s.parseState = stateBodyChunked return true, true, false }
func basicDeliverMethod(m *AmqpMessage, args []byte) (bool, bool) { consumerTag, offset, err := getShortString(args, 1, uint32(args[0])) if err { logp.Warn("Failed to get consumer tag in basic deliver") return false, false } deliveryTag := binary.BigEndian.Uint64(args[offset : offset+8]) params := getBitParams(args[offset+8]) offset = offset + 9 exchange, offset, err := getShortString(args, offset+1, uint32(args[offset])) if err { logp.Warn("Failed to get exchange in basic deliver") return false, false } routingKey, _, err := getShortString(args, offset+1, uint32(args[offset])) if err { logp.Warn("Failed to get routing key in basic deliver") return false, false } m.Method = "basic.deliver" m.Fields = common.MapStr{ "consumer-tag": consumerTag, "delivery-tag": deliveryTag, "redelivered": params[0], "routing-key": routingKey, } // is exchange not default exchange ? if len(exchange) > 0 { m.Fields["exchange"] = exchange } return true, false }
func (t *Topbeat) exportSystemStats() error { load_stat, err := GetSystemLoad() if err != nil { logp.Warn("Getting load statistics: %v", err) return err } cpu_stat, err := GetCpuTimes() if err != nil { logp.Warn("Getting cpu times: %v", err) return err } t.addCpuPercentage(cpu_stat) cpu_core_stat, err := GetCpuTimesList() if err != nil { logp.Warn("Getting cpu core times: %v", err) return err } t.addCpuPercentageList(cpu_core_stat) mem_stat, err := GetMemory() if err != nil { logp.Warn("Getting memory details: %v", err) return err } t.addMemPercentage(mem_stat) swap_stat, err := GetSwap() if err != nil { logp.Warn("Getting swap details: %v", err) return err } t.addSwapPercentage(swap_stat) event := common.MapStr{ "@timestamp": common.Time(time.Now()), "type": "system", "load": load_stat, "cpu": cpu_stat, "mem": mem_stat, "swap": swap_stat, "count": 1, } if t.cpuPerCore { cpus := common.MapStr{} for coreNumber, stat := range cpu_core_stat { cpus["cpu"+strconv.Itoa(coreNumber)] = stat } event["cpus"] = cpus } t.events.PublishEvent(event) return nil }
func (http *HTTP) receivedHTTPResponse(msg *message) { // we need to search the request first. tuple := msg.TCPTuple debugf("Received response with tuple: %s", tuple) trans := http.getTransaction(tuple.Hashable()) if trans == nil { logp.Warn("Response from unknown transaction. Ignoring: %v", tuple) return } if trans.HTTP == nil { logp.Warn("Response without a known request. Ignoring.") return } response := common.MapStr{ "phrase": msg.StatusPhrase, "code": msg.StatusCode, "content_length": msg.ContentLength, } if http.parserConfig.SendHeaders { if !http.SplitCookie { response["response_headers"] = msg.Headers } else { hdrs := common.MapStr{} for name, value := range msg.Headers { if name == "set-cookie" { hdrs[name] = splitCookiesHeader(value) } else { hdrs[name] = value } } response["response_headers"] = hdrs } } trans.BytesOut = msg.Size trans.HTTP.Update(response) trans.Notes = append(trans.Notes, msg.Notes...) trans.ResponseTime = int32(msg.Ts.Sub(trans.ts).Nanoseconds() / 1e6) // resp_time in milliseconds // save Raw message if http.SendResponse { trans.ResponseRaw = string(http.cutMessageBody(msg)) } http.publishTransaction(trans) http.transactions.Delete(trans.tuple.Hashable()) debugf("HTTP transaction completed: %s\n", trans.HTTP) }
func (cpu *CPU) GetSystemStats() (common.MapStr, error) { loadStat, err := GetSystemLoad() if err != nil { logp.Warn("Getting load statistics: %v", err) return nil, err } cpuStat, err := GetCpuTimes() if err != nil { logp.Warn("Getting cpu times: %v", err) return nil, err } cpu.AddCpuPercentage(cpuStat) memStat, err := GetMemory() if err != nil { logp.Warn("Getting memory details: %v", err) return nil, err } AddMemPercentage(memStat) swapStat, err := GetSwap() if err != nil { logp.Warn("Getting swap details: %v", err) return nil, err } AddSwapPercentage(swapStat) event := common.MapStr{ "@timestamp": common.Time(time.Now()), "type": "system", "load": loadStat, "cpu": GetCpuStatEvent(cpuStat), "mem": GetMemoryEvent(memStat), "swap": GetSwapEvent(swapStat), } if cpu.CpuPerCore { cpuCoreStat, err := GetCpuTimesList() if err != nil { logp.Warn("Getting cpu core times: %v", err) return nil, err } cpu.AddCpuPercentageList(cpuCoreStat) cpus := common.MapStr{} for coreNumber, stat := range cpuCoreStat { cpus["cpu"+strconv.Itoa(coreNumber)] = GetCpuStatEvent(&stat) } event["cpus"] = cpus } return event, nil }
func LoadGeoIPData(config Geoip) *libgeo.GeoIP { geoipPaths := []string{} if config.Paths != nil { geoipPaths = *config.Paths } if len(geoipPaths) == 0 { logp.Info("GeoIP disabled: No paths were set under output.geoip.paths") // disabled return nil } // look for the first existing path var geoipPath string for _, path := range geoipPaths { fi, err := os.Lstat(path) if err != nil { logp.Err("GeoIP path could not be loaded: %s", path) continue } if fi.Mode()&os.ModeSymlink == os.ModeSymlink { // follow symlink geoipPath, err = filepath.EvalSymlinks(path) if err != nil { logp.Warn("Could not load GeoIP data: %s", err.Error()) return nil } } else { geoipPath = path } break } if len(geoipPath) == 0 { logp.Warn("Couldn't load GeoIP database") return nil } geoLite, err := libgeo.Load(geoipPath) if err != nil { logp.Warn("Could not load GeoIP data: %s", err.Error()) } logp.Info("Loaded GeoIP data from: %s", geoipPath) return geoLite }
func (amqp *Amqp) amqpMessageParser(s *AmqpStream) (ok bool, complete bool) { for s.parseOffset < len(s.data) { if len(s.data[s.parseOffset:]) < 8 { logp.Warn("AMQP message smaller than a frame, waiting for more data") return true, false } yes, version := isProtocolHeader(s.data[s.parseOffset:]) if yes { debugf("Client header detected, version %d.%d.%d", version[0], version[1], version[2]) s.parseOffset += 8 } f, err := readFrameHeader(s.data[s.parseOffset:]) if err { //incorrect header return false, false } else if f == nil { //header not complete return true, false } switch f.Type { case methodType: ok, complete = amqp.decodeMethodFrame(s, f.content) case headerType: ok = amqp.decodeHeaderFrame(s, f.content) case bodyType: ok, complete = s.decodeBodyFrame(f.content) case heartbeatType: detailedf("Heartbeat frame received") default: logp.Warn("Received unknown AMQP frame") return false, false } // cast should be safe because f.size should not be bigger than tcp.TCP_MAX_DATA_IN_STREAM s.parseOffset += 8 + int(f.size) if !ok { return false, false } else if complete { return true, true } } return ok, complete }
func (f *FileEvent) ToMapStr() common.MapStr { event := common.MapStr{ "@timestamp": common.Time(f.ReadTime), "source": f.Source, "offset": f.Offset, // Offset here is the offset before the starting char. "message": f.Text, "type": f.DocumentType, "input_type": f.InputType, "count": 1, } if f.Fields != nil { if f.fieldsUnderRoot { for key, value := range *f.Fields { // in case of conflicts, overwrite _, found := event[key] if found { logp.Warn("Overwriting %s key", key) } event[key] = value } } else { event["fields"] = f.Fields } } return event }
func (c *Condition) CheckEquals(event common.MapStr) bool { for field, equalValue := range c.Equals { value, err := event.GetValue(field) if err != nil { logp.Debug("filter", "unavailable field %s: %v", field, err) return false } switch value.(type) { case uint8, uint16, uint32, uint64, int8, int16, int32, int64, int, uint: return value == equalValue.Int case string: return value == equalValue.Str default: logp.Warn("unexpected type %T in equals condition as it accepts only integers and strings. ", value) return false } } return true }
func queueDeclareMethod(m *AmqpMessage, args []byte) (bool, bool) { name, offset, err := getShortString(args, 3, uint32(args[2])) if err { logp.Warn("Error getting name of queue in queue declaration") return false, false } m.IsRequest = true m.Method = "queue.declare" params := getBitParams(args[offset]) m.Request = name m.Fields = common.MapStr{ "queue": name, "passive": params[0], "durable": params[1], "exclusive": params[2], "auto-delete": params[3], "no-wait": params[4], } if args[offset+1] != frameEndOctet && m.ParseArguments { arguments := make(common.MapStr) _, err, exists := getTable(arguments, args, offset+1) if !err && exists { m.Fields["arguments"] = arguments } else if err { m.Notes = append(m.Notes, "Failed to parse additional arguments") } } return true, true }
func (eb *Winlogbeat) Setup(b *beat.Beat) error { eb.beat = b eb.client = b.Events eb.done = make(chan struct{}) var err error eb.checkpoint, err = checkpoint.NewCheckpoint( eb.config.Winlogbeat.RegistryFile, 10, 5*time.Second) if err != nil { return err } if eb.config.Winlogbeat.Metrics.BindAddress != "" { bindAddress := eb.config.Winlogbeat.Metrics.BindAddress sock, err := net.Listen("tcp", bindAddress) if err != nil { return err } go func() { logp.Info("Metrics hosted at http://%s/debug/vars", bindAddress) err := http.Serve(sock, nil) if err != nil { logp.Warn("Unable to launch HTTP service for metrics. %v", err) return } }() } return nil }
func exportContainerStats(client *docker.Client, container *docker.APIContainers) Stat { var wg sync.WaitGroup var event Stat statsC := make(chan *docker.Stats) errC := make(chan error, 1) statsOptions := docker.StatsOptions{ ID: container.ID, Stats: statsC, Stream: false, Timeout: -1, } wg.Add(2) go func() { defer wg.Done() errC <- client.Stats(statsOptions) close(errC) }() go func() { defer wg.Done() stats := <-statsC err := <-errC if stats != nil && err == nil { event.Stats = *stats event.Container = *container } else if err == nil && stats == nil { logp.Warn("Container stopped when recovering stats: %v", container.ID) } else { logp.Err("An error occurred while getting docker stats: %v", err) } }() wg.Wait() return event }
// Setup uses the loaded config and creates necessary markers and environment // settings to allow the beat to be used. func (eb *Winlogbeat) setup(b *beat.Beat) error { config := &eb.config.Winlogbeat eb.client = b.Publisher.Connect() var err error eb.checkpoint, err = checkpoint.NewCheckpoint(config.RegistryFile, 10, 5*time.Second) if err != nil { return err } if config.Metrics.BindAddress != "" { bindAddress := config.Metrics.BindAddress sock, err := net.Listen("tcp", bindAddress) if err != nil { return err } go func() { logp.Info("Metrics hosted at http://%s/debug/vars", bindAddress) err := http.Serve(sock, nil) if err != nil { logp.Warn("Unable to launch HTTP service for metrics. %v", err) } }() } return nil }
func (d *Dockerbeat) Run(b *beat.Beat) error { var err error ticker := time.NewTicker(d.period) defer ticker.Stop() for { select { case <-d.done: return nil case <-ticker.C: } // check prerequisites err = d.checkPrerequisites() if err != nil { logp.Err("Unable to collect metrics: %v", err) continue } timerStart := time.Now() d.RunOneTime(b) timerEnd := time.Now() duration := timerEnd.Sub(timerStart) if duration.Nanoseconds() > d.period.Nanoseconds() { logp.Warn("Ignoring tick(s) due to processing taking longer than one period") } } return err }
// bulkCollectPublishFails checks per item errors returning all events // to be tried again due to error code returned for that items. If indexing an // event failed due to some error in the event itself (e.g. does not respect mapping), // the event will be dropped. func bulkCollectPublishFails( reader *jsonReader, events []common.MapStr, ) []common.MapStr { if err := reader.expectDict(); err != nil { logp.Err("Failed to parse bulk respose: expected JSON object") return nil } // find 'items' field in response for { kind, name, err := reader.nextFieldName() if err != nil { logp.Err("Failed to parse bulk response") return nil } if kind == dictEnd { logp.Err("Failed to parse bulk response: no 'items' field in response") return nil } // found items array -> continue if bytes.Equal(name, nameItems) { break } reader.ignoreNext() } // check items field is an array if err := reader.expectArray(); err != nil { logp.Err("Failed to parse bulk respose: expected items array") return nil } count := len(events) failed := events[:0] for i := 0; i < count; i++ { status, msg, err := itemStatus(reader) if err != nil { return nil } if status < 300 { continue // ok value } if status < 500 && status != 429 { // hard failure, don't collect logp.Warn("Can not index event (status=%v): %s", status, msg) continue } logp.Info("Bulk item insert failed (i=%v, status=%v): %s", i, status, msg) failed = append(failed, events[i]) } return failed }
func (client *Client) PublishEvent(event common.MapStr) error { if !client.connected { return ErrNotConnected } index := getIndex(event, client.index) logp.Debug("output_elasticsearch", "Publish event: %s", event) // insert the events one by one status, _, err := client.Index( index, event["type"].(string), "", client.params, event) if err != nil { logp.Warn("Fail to insert a single event: %s", err) if err == ErrJSONEncodeFailed { // don't retry unencodable values return nil } } switch { case status == 0: // event was not send yet return nil case status >= 500 || status == 429: // server error, retry return err case status >= 300 && status < 500: // won't be able to index event in Elasticsearch => don't retry return nil } return nil }
func eventMapping(response io.Reader) common.MapStr { fullEvent := map[string]interface{}{} scanner := bufio.NewScanner(response) // Iterate through all events to gather data for scanner.Scan() { if match := paramMatcher.FindStringSubmatch(scanner.Text()); len(match) == 3 { fullEvent[match[1]] = match[2] } else { logp.Warn("Unexpected line in mntr output: %s", scanner.Text()) } } event := schema_.Apply(fullEvent) // only exposed by the Leader if _, ok := fullEvent["zk_followers"]; ok { schemaLeader.ApplyTo(event, fullEvent) } // only available on Unix platforms if _, ok := fullEvent["open_file_descriptor_count"]; ok { schemaUnix.ApplyTo(event, fullEvent) } return event }
func (amqp *Amqp) handleAmqpResponse(msg *AmqpMessage) { tuple := msg.TcpTuple trans := amqp.getTransaction(tuple.Hashable()) if trans == nil || trans.Amqp == nil { logp.Warn("Response from unknown transaction. Ignoring.") return } //length = message + 4 bytes class/method + frame end octet + header trans.BytesOut = msg.Body_size + 12 //merge the both fields from request and response trans.Amqp.Update(msg.Fields) trans.Response = common.OK_STATUS if msg.Method == "basic.get-empty" { trans.Method = "basic.get-empty" } trans.ResponseTime = int32(msg.Ts.Sub(trans.ts).Nanoseconds() / 1e6) trans.Notes = msg.Notes amqp.publishTransaction(trans) debugf("Amqp transaction completed") // remove from map amqp.transactions.Delete(trans.tuple.Hashable()) if trans.timer != nil { trans.timer.Stop() } }
func (conn *Connection) execRequest( method, url string, body io.Reader, ) (int, []byte, error) { req, err := http.NewRequest(method, url, body) if err != nil { logp.Warn("Failed to create request", err) return 0, nil, err } req.Header.Add("Accept", "application/json") if conn.Username != "" || conn.Password != "" { req.SetBasicAuth(conn.Username, conn.Password) } resp, err := conn.http.Do(req) if err != nil { conn.connected = false return 0, nil, err } defer closing(resp.Body) status := resp.StatusCode if status >= 300 { conn.connected = false return status, nil, fmt.Errorf("%v", resp.Status) } obj, err := ioutil.ReadAll(resp.Body) if err != nil { conn.connected = false return status, nil, err } return status, obj, nil }
func getConnection(private protos.ProtocolData) *connection { if private == nil { return nil } priv, ok := private.(*connection) if !ok { logp.Warn("cassandra connection type error") return nil } if priv == nil { logp.Warn("Unexpected: cassandra connection data not set") return nil } return priv }
func (mc *Memcache) pushAllTCPTrans(conn *connection) { if conn == nil { return } // first let's try to send finished transactions // (unlikely we have some, though) mc.correlateTCP(conn) // only requests in map: debug("publish incomplete transactions") for !conn.requests.empty() { msg := conn.requests.pop() if !msg.isQuiet && !msg.noreply { msg.AddNotes(NoteTransUnfinished) unfinishedTransactions.Add(1) } debug("push incomplete request=%p", msg) err := mc.onTCPTrans(msg, nil) if err != nil { logp.Warn("failed to publish unfinished transaction with %v", err) } // continue processing more transactions (reporting error only) } }
func (h *Harvester) close() { // Mark harvester as finished h.state.Finished = true logp.Debug("harvester", "Stopping harvester for file: %s", h.state.Source) // Make sure file is closed as soon as harvester exits // If file was never opened, it can't be closed if h.file != nil { h.file.Close() logp.Debug("harvester", "Closing file: %s", h.state.Source) harvesterOpenFiles.Add(-1) // On completion, push offset so we can continue where we left off if we relaunch on the same file // Only send offset if file object was created successfully h.sendStateUpdate() } else { logp.Warn("Stopping harvester, NOT closing file as file info not available: %s", h.state.Source) } harvesterClosed.Add(1) }
func ensureMemcacheConnection(private protos.ProtocolData) *tcpConnectionData { if private == nil { return &tcpConnectionData{} } priv, ok := private.(*tcpConnectionData) if !ok { logp.Warn("memcache connection data type error, create new one") return &tcpConnectionData{} } if priv == nil { logp.Warn("Unexpected: memcache TCP connection data not set, create new one") return &tcpConnectionData{} } return priv }