Exemplo n.º 1
0
// writeRegistry writes the new json registry file to disk.
func (r *Registrar) writeRegistry() error {
	logp.Debug("registrar", "Write registry file: %s", r.registryFile)

	tempfile := r.registryFile + ".new"
	f, err := os.Create(tempfile)
	if err != nil {
		logp.Err("Failed to create tempfile (%s) for writing: %s", tempfile, err)
		return err
	}

	// First clean up states
	states := r.states.GetStates()

	encoder := json.NewEncoder(f)
	err = encoder.Encode(states)
	if err != nil {
		logp.Err("Error when encoding the states: %s", err)
		return err
	}

	// Directly close file because of windows
	f.Close()

	logp.Debug("registrar", "Registry file updated. %d states written.", len(states))
	registryWrites.Add(1)
	statesCurrent.Set(int64(len(states)))

	return file.SafeFileRotate(r.registryFile, tempfile)
}
Exemplo n.º 2
0
// harvestExistingFile continues harvesting a file with a known state if needed
func (p *ProspectorLog) harvestExistingFile(newState file.State, oldState file.State) {

	logp.Debug("prospector", "Update existing file for harvesting: %s, offset: %v", newState.Source, oldState.Offset)

	// TODO: check for ignore_older reached? or should that happen in scan already?

	// No harvester is running for the file, start a new harvester
	// It is important here that only the size is checked and not modification time, as modification time could be incorrect on windows
	// https://blogs.technet.microsoft.com/asiasupp/2010/12/14/file-date-modified-property-are-not-updating-while-modifying-a-file-without-closing-it/
	if oldState.Finished && newState.Fileinfo.Size() > oldState.Offset {
		// Resume harvesting of an old file we've stopped harvesting from
		// This could also be an issue with force_close_older that a new harvester is started after each scan but not needed?
		// One problem with comparing modTime is that it is in seconds, and scans can happen more then once a second
		logp.Debug("prospector", "Resuming harvesting of file: %s, offset: %v", newState.Source, oldState.Offset)
		p.Prospector.startHarvester(newState, oldState.Offset)

	} else if oldState.Source != "" && oldState.Source != newState.Source {
		// This does not start a new harvester as it is assume that the older harvester is still running
		// or no new lines were detected. It sends only an event status update to make sure the new name is persisted.
		logp.Debug("prospector", "File rename was detected, updating state: %s -> %s, Current offset: %v", oldState.Source, newState.Source, oldState.Offset)

		h, _ := p.Prospector.createHarvester(newState)
		h.SetOffset(oldState.Offset)

		// Update state because of file rotation
		h.SendStateUpdate()
	} else {
		// TODO: improve logging depedent on what the exact reason is that harvesting does not continue
		// Nothing to do. Harvester is still running and file was not renamed
		logp.Debug("prospector", "No updates needed, file %s is already harvested.", newState.Source)
	}
}
Exemplo n.º 3
0
// Check if harvester for new file has to be started
// For a new file the following options exist:
func (p ProspectorLog) checkNewFile(h *harvester.Harvester) {

	logp.Debug("prospector", "Start harvesting unknown file: %s", h.Path)

	// Call crawler if there if there exists a state for the given file
	offset, resuming := p.Prospector.registrar.fetchState(h.Path, h.Stat.Fileinfo)

	if p.checkOldFile(h) {

		logp.Debug("prospector", "Fetching old state of file to resume: %s", h.Path)

		// Are we resuming a dead file? We have to resume even if dead so we catch any old updates to the file
		// This is safe as the harvester, once it hits the EOF and a timeout, will stop harvesting
		// Once we detect changes again we can resume another harvester again - this keeps number of go routines to a minimum
		if resuming {
			logp.Debug("prospector", "Resuming harvester on a previously harvested file: %s", h.Path)
			p.resumeHarvesting(h, offset)
		} else {
			// Old file, skip it, but push offset of file size so we start from the end if this file changes and needs picking up
			logp.Debug("prospector", "Skipping file (older than ignore older of %v, %v): %s",
				p.config.IgnoreOlderDuration,
				time.Since(h.Stat.Fileinfo.ModTime()),
				h.Path)
			h.Stat.Skip(h.Stat.Fileinfo.Size())
		}
	} else if previousFile, err := p.getPreviousFile(h.Path, h.Stat.Fileinfo); err == nil {
		p.continueExistingFile(h, previousFile)
	} else {
		p.resumeHarvesting(h, offset)
	}
}
Exemplo n.º 4
0
// checkExistingFile checks if a harvester has to be started for a already known file
// For existing files the following options exist:
// * Last reading position is 0, no harvester has to be started as old harvester probably still busy
// * The old known modification time is older then the current one. Start at last known position
// * The new file is not the same as the old file, means file was renamed
// ** New file is actually really a new file, start a new harvester
// ** Renamed file has a state, continue there
func (p ProspectorLog) checkExistingFile(h *harvester.Harvester, newFile *input.File, oldFile *input.File) {

	logp.Debug("prospector", "Update existing file for harvesting: %s", h.Path)

	// We assume it is the same file, but it wasn't
	if !oldFile.IsSameFile(newFile) {

		logp.Debug("prospector", "File previously found: %s", h.Path)

		if previousFile, err := p.getPreviousFile(h.Path, h.Stat.Fileinfo); err == nil {
			p.continueExistingFile(h, previousFile)
		} else {
			// File is not the same file we saw previously, it must have rotated and is a new file
			logp.Debug("prospector", "Launching harvester on rotated file: %s", h.Path)

			// Forget about the previous harvester and let it continue on the old file - so start a new channel to use with the new harvester
			h.Stat.Ignore()

			// Start a new harvester on the path
			h.Start()
		}

		// Keep the old file in missingFiles so we don't rescan it if it was renamed and we've not yet reached the new filename
		// We only need to keep it for the remainder of this iteration then we can assume it was deleted and forget about it
		p.missingFiles[h.Path] = oldFile.FileInfo

	} else if h.Stat.Finished() && oldFile.FileInfo.ModTime() != h.Stat.Fileinfo.ModTime() {
		// Resume harvesting of an old file we've stopped harvesting from
		// Start a harvester on the path; a file was just modified and it doesn't have a harvester
		// The offset to continue from will be stored in the harvester channel - so take that to use and also clear the channel
		p.resumeHarvesting(h, <-h.Stat.Return)
	} else {
		logp.Debug("prospector", "Not harvesting, file didn't change: %s", h.Path)
	}
}
Exemplo n.º 5
0
func (proc *ProcessesWatcher) FindProcessesTuple(tuple *common.IPPortTuple) (procTuple *common.CmdlineTuple) {
	procTuple = &common.CmdlineTuple{}

	if !proc.readFromProc {
		return
	}

	if proc.isLocalIP(tuple.SrcIP) {
		logp.Debug("procs", "Looking for port %d", tuple.SrcPort)
		procTuple.Src = []byte(proc.findProc(tuple.SrcPort))
		if len(procTuple.Src) > 0 {
			logp.Debug("procs", "Found device %s for port %d", procTuple.Src, tuple.SrcPort)
		}
	}

	if proc.isLocalIP(tuple.DstIP) {
		logp.Debug("procs", "Looking for port %d", tuple.DstPort)
		procTuple.Dst = []byte(proc.findProc(tuple.DstPort))
		if len(procTuple.Dst) > 0 {
			logp.Debug("procs", "Found device %s for port %d", procTuple.Dst, tuple.DstPort)
		}
	}

	return
}
Exemplo n.º 6
0
// Run runs the spooler
// It heartbeats periodically. If the last flush was longer than
// 'IdleTimeoutDuration' time ago, then we'll force a flush to prevent us from
// holding on to spooled events for too long.
func (s *Spooler) Run() {

	config := &s.Filebeat.FbConfig.Filebeat

	// Sets up ticket channel
	ticker := time.NewTicker(config.IdleTimeoutDuration / 2)

	s.spool = make([]*input.FileEvent, 0, config.SpoolSize)

	logp.Info("Starting spooler: spool_size: %v; idle_timeout: %s", config.SpoolSize, config.IdleTimeoutDuration)

	// Loops until running is set to false
	for {
		select {

		case <-s.exit:
			break
		case event := <-s.Channel:
			s.spool = append(s.spool, event)

			// Spooler is full -> flush
			if len(s.spool) == cap(s.spool) {
				logp.Debug("spooler", "Flushing spooler because spooler full. Events flushed: %v", len(s.spool))
				s.flush()
			}
		case <-ticker.C:
			// Flush periodically
			if time.Now().After(s.nextFlushTime) {
				logp.Debug("spooler", "Flushing spooler because of timeout. Events flushed: %v", len(s.spool))
				s.flush()
			}
		}
	}
}
Exemplo n.º 7
0
func (fs *FileSizeBeat) Config(b *beat.Beat) error {
	err := cfgfile.Read(&fs.config, "")
	if err != nil {
		logp.Err("Error reading configuration file: %v", err)
		return err
	}

	if fs.config.Input.Period != nil {
		fs.period = time.Duration(*fs.config.Input.Period) * time.Second
	} else {
		fs.period = 10 * time.Second
	}
	logp.Debug("filesizebeat", "Period %v\n", fs.period)

	if fs.config.Input.Paths != nil {
		//fs.paths = make([]Path, len(*fs.config.Input.Paths))
		for _, path := range *fs.config.Input.Paths {
			err := fs.AddPath(path)
			if err != nil {
				logp.Critical("Error: %v", err)
				os.Exit(1)
			}
		}
		logp.Debug("filesizebeat", "Paths : %v\n", fs.paths)
	} else {
		logp.Critical("Error: no paths specified, cannot continue!")
		os.Exit(1)
	}
	return nil
}
Exemplo n.º 8
0
// harvestExistingFile continues harvesting a file with a known state if needed
func (p *ProspectorLog) harvestExistingFile(newState input.FileState, oldState input.FileState) {

	logp.Debug("prospector", "Update existing file for harvesting: %s, offset: %v", newState.Source, oldState.Offset)

	// No harvester is running for the file, start a new harvester
	if oldState.Finished {
		// TODO: should we check if modtime old / new is the same -> no harvester has to be started -> prevent duplicates?
		// Resume harvesting of an old file we've stopped harvesting from
		// This could also be an issue with force_close_older that a new harvester is started after each scan but not needed?
		// One problem with comparing modTime is that it is in seconds, and scans can happen more then once a second
		logp.Debug("prospector", "Resuming harvesting of file: %s, offset: %v", newState.Source, oldState.Offset)
		p.Prospector.startHarvester(newState, oldState.Offset)

	} else if oldState.Source != "" && oldState.Source != newState.Source {
		logp.Debug("prospector", "File rename was detected, updating state: %s -> %s, Current offset: %v", oldState.Source, newState.Source, oldState.Offset)

		h, _ := p.Prospector.createHarvester(newState)
		h.SetOffset(oldState.Offset)
		// Update state because of file rotation
		h.SendStateUpdate()
	} else {
		// TODO: It could be that a harvester is still harvesting the file?
		logp.Debug("prospector", "Not harvesting, file didn't change: %s", newState.Source)
	}
}
Exemplo n.º 9
0
func (p *ProspectorLog) Run() {
	logp.Debug("prospector", "Start next scan")

	p.scan()

	// It is important that a first scan is run before cleanup to make sure all new states are read first
	if p.config.CleanInactive > 0 || p.config.CleanRemoved {
		beforeCount := p.Prospector.states.Count()
		p.Prospector.states.Cleanup()
		logp.Debug("prospector", "Prospector states cleaned up. Before: %d, After: %d", beforeCount, p.Prospector.states.Count())
	}

	// Cleanup of removed files will only happen after next scan. Otherwise it can happen that not all states
	// were updated before cleanup is called
	if p.config.CleanRemoved {
		for _, state := range p.Prospector.states.GetStates() {
			// os.Stat will return an error in case the file does not exist
			_, err := os.Stat(state.Source)
			if err != nil {
				state.TTL = 0
				event := input.NewEvent(state)
				p.Prospector.harvesterChan <- event
				logp.Debug("prospector", "Remove state for file as file removed: %s", state.Source)
			}
		}
	}
}
Exemplo n.º 10
0
func (d *Dockerbeat) Config(b *beat.Beat) error {

	err := cfgfile.Read(&d.TbConfig, "")
	if err != nil {
		logp.Err("Error reading configuration file: %v", err)
		return err
	}

	//init the period
	if d.TbConfig.Input.Period != nil {
		d.period = time.Duration(*d.TbConfig.Input.Period) * time.Second
	} else {
		d.period = 1 * time.Second
	}
	//init the socket
	if d.TbConfig.Input.Socket != nil {
		d.socket = *d.TbConfig.Input.Socket
	} else {
		d.socket = "unix:///var/run/docker.sock" // default docker socket location
	}

	logp.Debug("dockerbeat", "Init dockerbeat")
	logp.Debug("dockerbeat", "Follow docker socket %q\n", d.socket)
	logp.Debug("dockerbeat", "Period %v\n", d.period)

	return nil
}
Exemplo n.º 11
0
func (dns *Dns) GapInStream(tcpTuple *common.TcpTuple, dir uint8, nbytes int, private protos.ProtocolData) (priv protos.ProtocolData, drop bool) {
	if private == nil {
		return private, true
	}
	conn, ok := private.(*dnsConnectionData)
	if !ok {
		return private, false
	}
	stream := conn.Data[dir]

	if stream == nil || stream.message == nil {
		return private, false
	}

	decodedData, err := stream.handleTcpRawData()

	if err == nil {
		dns.messageComplete(conn, tcpTuple, dir, decodedData)
		return private, true
	}

	if dir == tcp.TcpDirectionReverse {
		dns.publishResponseError(conn, err)
	}

	logp.Debug("dns", "%s addresses %s, length %d", err.Error(),
		tcpTuple.String(), len(stream.rawData))
	logp.Debug("dns", "Dropping the stream %s", tcpTuple.String())

	// drop the stream because it is binary Data and it would be unexpected to have a decodable message later
	return private, true
}
Exemplo n.º 12
0
func (c *Crawler) Start(states file.States) error {

	logp.Info("Loading Prospectors: %v", len(c.prospectorConfigs))

	// Prospect the globs/paths given on the command line and launch harvesters
	for _, prospectorConfig := range c.prospectorConfigs {

		prospector, err := prospector.NewProspector(prospectorConfig, states, c.spooler.Channel)
		if err != nil {
			return fmt.Errorf("Error in initing prospector: %s", err)
		}
		c.prospectors = append(c.prospectors, prospector)
	}

	logp.Info("Loading Prospectors completed. Number of prospectors: %v", len(c.prospectors))

	for i, p := range c.prospectors {
		c.wg.Add(1)

		go func(id int, prospector *prospector.Prospector) {
			defer func() {
				c.wg.Done()
				logp.Debug("crawler", "Prospector %v stopped", id)
			}()
			logp.Debug("crawler", "Starting prospector %v", id)
			prospector.Run()
		}(i, p)
	}

	logp.Info("All prospectors are initialised and running with %d states to persist", states.Count())

	return nil
}
Exemplo n.º 13
0
Arquivo: log.go Projeto: yhyang1/beats
func (h *Harvester) initFileOffset(file *os.File) error {
	offset, err := file.Seek(0, os.SEEK_CUR)

	if h.getOffset() > 0 {
		// continue from last known offset

		logp.Debug("harvester",
			"harvest: %q position:%d (offset snapshot:%d)", h.Path, h.getOffset(), offset)
		_, err = file.Seek(h.getOffset(), os.SEEK_SET)
	} else if h.Config.TailFiles {
		// tail file if file is new and tail_files config is set

		logp.Debug("harvester",
			"harvest: (tailing) %q (offset snapshot:%d)", h.Path, offset)
		offset, err = file.Seek(0, os.SEEK_END)
		h.SetOffset(offset)

	} else {
		// get offset from file in case of encoding factory was
		// required to read some data.
		logp.Debug("harvester", "harvest: %q (offset snapshot:%d)", h.Path, offset)
		h.SetOffset(offset)
	}

	return err
}
Exemplo n.º 14
0
func FindSocketsOfPid(prefix string, pid int) (inodes []int64, err error) {

	dirname := filepath.Join(prefix, "/proc", strconv.Itoa(pid), "fd")
	procfs, err := os.Open(dirname)
	if err != nil {
		return []int64{}, fmt.Errorf("Open: %s", err)
	}
	defer procfs.Close()
	names, err := procfs.Readdirnames(0)
	if err != nil {
		return []int64{}, fmt.Errorf("Readdirnames: %s", err)
	}

	for _, name := range names {
		link, err := os.Readlink(filepath.Join(dirname, name))
		if err != nil {
			logp.Debug("procs", "Readlink %s: %s", name, err)
			continue
		}

		if strings.HasPrefix(link, "socket:[") {
			inode, err := strconv.ParseInt(link[8:len(link)-1], 10, 64)
			if err != nil {
				logp.Debug("procs", "ParseInt: %s:", err)
				continue
			}

			inodes = append(inodes, int64(inode))
		}
	}

	return inodes, nil
}
Exemplo n.º 15
0
func isSpecialPgsqlCommand(data []byte) (bool, int) {

	if len(data) < 8 {
		// 8 bytes required
		return false, 0
	}

	// read length
	length := int(common.Bytes_Ntohl(data[0:4]))

	// read command identifier
	code := int(common.Bytes_Ntohl(data[4:8]))

	if length == 16 && code == 80877102 {
		// Cancel Request
		logp.Debug("pgsqldetailed", "Cancel Request, length=%d", length)
		return true, CancelRequest
	} else if length == 8 && code == 80877103 {
		// SSL Request
		logp.Debug("pgsqldetailed", "SSL Request, length=%d", length)
		return true, SSLRequest
	} else if code == 196608 {
		// Startup Message
		logp.Debug("pgsqldetailed", "Startup Message, length=%d", length)
		return true, StartupMessage
	}
	return false, 0
}
Exemplo n.º 16
0
// convertReplyToMap converts a bulk string reply from Redis to a map
func convertReplyToMap(s string) (map[string]string, error) {
	var info map[string]string
	info = make(map[string]string)

	// Regex for INFO type property
	infoRegex := `^\s*#\s*\w+\s*$`
	r, err := regexp.Compile(infoRegex)
	if err != nil {
		return nil, errors.New("Regex failed to compile")
	}

	// http://redis.io/topics/protocol#bulk-string-reply
	a := strings.Split(s, "\r\n")

	for _, v := range a {
		if r.MatchString(v) || v == "" {
			logp.Debug("redisbeat", "Skipping reply string - \"%v\"", v)
			continue
		}
		entry := strings.Split(v, ":")
		logp.Debug("redisbeat", "Entry: %#v\n", entry)
		info[entry[0]] = entry[1]
	}
	return info, nil
}
Exemplo n.º 17
0
func (proc *ProcessesWatcher) FindProcessesTuple(tuple *common.IpPortTuple) (proc_tuple *common.CmdlineTuple) {
	proc_tuple = &common.CmdlineTuple{}

	if !proc.ReadFromProc {
		return
	}

	if proc.IsLocalIp(tuple.Src_ip) {
		logp.Debug("procs", "Looking for port %d", tuple.Src_port)
		proc_tuple.Src = []byte(proc.FindProc(tuple.Src_port))
		if len(proc_tuple.Src) > 0 {
			logp.Debug("procs", "Found device %s for port %d", proc_tuple.Src, tuple.Src_port)
		}
	}

	if proc.IsLocalIp(tuple.Dst_ip) {
		logp.Debug("procs", "Looking for port %d", tuple.Dst_port)
		proc_tuple.Dst = []byte(proc.FindProc(tuple.Dst_port))
		if len(proc_tuple.Dst) > 0 {
			logp.Debug("procs", "Found device %s for port %d", proc_tuple.Dst, tuple.Dst_port)
		}
	}

	return
}
Exemplo n.º 18
0
// Scan starts a scanGlob for each provided path/glob
func (p *ProspectorLog) scan() {

	for path, info := range p.getFiles() {

		logp.Debug("prospector", "Check file for harvesting: %s", path)

		// Create new state for comparison
		newState := file.NewState(info, path)

		// Load last state
		lastState := p.Prospector.states.FindPrevious(newState)

		// Ignores all files which fall under ignore_older
		if p.isIgnoreOlder(newState) {
			err := p.handleIgnoreOlder(lastState, newState)
			if err != nil {
				logp.Err("Updating ignore_older state error: %s", err)
			}
			continue
		}

		// Decides if previous state exists
		if lastState.IsEmpty() {
			logp.Debug("prospector", "Start harvester for new file: %s", newState.Source)
			err := p.Prospector.startHarvester(newState, 0)
			if err != nil {
				logp.Err("Harvester could not be started on new file: %s, Err: %s", newState.Source, err)
			}
		} else {
			p.harvestExistingFile(newState, lastState)
		}
	}
}
Exemplo n.º 19
0
func (p *ProspectorLog) Run() {
	logp.Debug("prospector", "Start next scan")

	p.scan()

	// It is important that a first scan is run before cleanup to make sure all new states are read first
	if p.config.CleanInactive > 0 || p.config.CleanRemoved {
		beforeCount := p.Prospector.states.Count()
		cleanedStates := p.Prospector.states.Cleanup()
		logp.Debug("prospector", "Prospector states cleaned up. Before: %d, After: %d", beforeCount, beforeCount-cleanedStates)
	}

	// Marking removed files to be cleaned up. Cleanup happens after next scan to make sure all states are updated first
	if p.config.CleanRemoved {
		for _, state := range p.Prospector.states.GetStates() {
			// os.Stat will return an error in case the file does not exist
			_, err := os.Stat(state.Source)
			if err != nil {
				// Only clean up files where state is Finished
				if state.Finished {
					state.TTL = 0
					err := p.Prospector.updateState(input.NewEvent(state))
					if err != nil {
						logp.Err("File cleanup state update error: %s", err)
					}
					logp.Debug("prospector", "Remove state for file as file removed: %s", state.Source)
				} else {
					logp.Debug("prospector", "State for file not removed because not finished: %s", state.Source)
				}
			}
		}
	}
}
Exemplo n.º 20
0
func (thrift *thriftPlugin) receivedReply(msg *thriftMessage) {

	// we need to search the request first.
	tuple := msg.tcpTuple

	trans := thrift.getTransaction(tuple.Hashable())
	if trans == nil {
		logp.Debug("thrift", "Response from unknown transaction. Ignoring: %v", tuple)
		unmatchedResponses.Add(1)
		return
	}

	if trans.request.method != msg.method {
		logp.Debug("thrift", "Response from another request received '%s' '%s'"+
			". Ignoring.", trans.request.method, msg.method)
		unmatchedResponses.Add(1)
		return
	}

	trans.reply = msg
	trans.bytesOut = uint64(msg.frameSize)

	trans.responseTime = int32(msg.ts.Sub(trans.ts).Nanoseconds() / 1e6) // resp_time in milliseconds

	thrift.publishQueue <- trans
	thrift.transactions.Delete(tuple.Hashable())

	logp.Debug("thrift", "Transaction queued")
}
Exemplo n.º 21
0
// handleIgnoreOlder handles states which fall under ignore older
// Based on the state information it is decided if the state information has to be updated or not
func (p *ProspectorLog) handleIgnoreOlder(lastState, newState file.State) error {
	logp.Debug("prospector", "Ignore file because ignore_older reached: %s", newState.Source)

	if !lastState.IsEmpty() {
		if !lastState.Finished {
			logp.Info("File is falling under ignore_older before harvesting is finished. Adjust your close_* settings: %s", newState.Source)
		}
		// Old state exist, no need to update it
		return nil
	}

	// Make sure file is not falling under clean_inactive yet
	if p.isCleanInactive(newState) {
		logp.Debug("prospector", "Do not write state for ignore_older because clean_inactive reached")
		return nil
	}

	// Set offset to end of file to be consistent with files which were harvested before
	// See https://github.com/elastic/beats/pull/2907
	newState.Offset = newState.Fileinfo.Size()

	// Write state for ignore_older file as none exists yet
	newState.Finished = true
	err := p.Prospector.updateState(input.NewEvent(newState))
	if err != nil {
		return err
	}

	return nil
}
Exemplo n.º 22
0
Arquivo: bulkapi.go Projeto: tsg/beats
// BulkWith creates a HTTP request containing a bunch of operations and send
// them to Elasticsearch. The request is retransmitted up to max_retries before
// returning an error.
func (conn *Connection) BulkWith(
	index string,
	docType string,
	params map[string]string,
	metaBuilder MetaBuilder,
	body []interface{},
) (*QueryResult, error) {
	if len(body) == 0 {
		logp.Debug("elasticsearch", "Empty channel. Wait for more data.")
		return nil, nil
	}

	path, err := makePath(index, docType, "_bulk")
	if err != nil {
		return nil, err
	}

	buf := bulkEncode(metaBuilder, body)
	if buf.Len() == 0 {
		logp.Debug("elasticsearch", "Empty channel. Wait for more data.")
		return nil, nil
	}

	_, resp, err := conn.sendBulkRequest("POST", path, params, &buf)
	if err != nil {
		return nil, err
	}
	return readQueryResult(resp)
}
Exemplo n.º 23
0
func (dns *Dns) ParseUdp(pkt *protos.Packet) {
	defer logp.Recover("Dns ParseUdp")

	logp.Debug("dns", "Parsing packet addressed with %s of length %d.",
		pkt.Tuple.String(), len(pkt.Payload))

	dnsPkt, err := decodeDnsData(TransportUdp, pkt.Payload)
	if err != nil {
		// This means that malformed requests or responses are being sent or
		// that someone is attempting to the DNS port for non-DNS traffic. Both
		// are issues that a monitoring system should report.
		logp.Debug("dns", "%s", err.Error())
		return
	}

	dnsTuple := DnsTupleFromIpPort(&pkt.Tuple, TransportUdp, dnsPkt.ID)
	dnsMsg := &DnsMessage{
		Ts:           pkt.Ts,
		Tuple:        pkt.Tuple,
		CmdlineTuple: procs.ProcWatcher.FindProcessesTuple(&pkt.Tuple),
		Data:         dnsPkt,
		Length:       len(pkt.Payload),
	}

	if dnsMsg.Data.QR == Query {
		dns.receivedDnsRequest(&dnsTuple, dnsMsg)
	} else /* Response */ {
		dns.receivedDnsResponse(&dnsTuple, dnsMsg)
	}
}
Exemplo n.º 24
0
// init packetbeat components
func (pb *packetbeat) init(b *beat.Beat) error {

	cfg := &pb.config
	err := procs.ProcWatcher.Init(cfg.Procs)
	if err != nil {
		logp.Critical(err.Error())
		return err
	}

	// This is required as init Beat is called before the beat publisher is initialised
	b.Config.Shipper.InitShipperConfig()

	pb.pub, err = publish.NewPublisher(b.Publisher, *b.Config.Shipper.QueueSize, *b.Config.Shipper.BulkQueueSize, pb.config.IgnoreOutgoing)
	if err != nil {
		return fmt.Errorf("Initializing publisher failed: %v", err)
	}

	logp.Debug("main", "Initializing protocol plugins")
	err = protos.Protos.Init(false, pb.pub, cfg.Protocols)
	if err != nil {
		return fmt.Errorf("Initializing protocol analyzers failed: %v", err)
	}

	logp.Debug("main", "Initializing sniffer")
	err = pb.setupSniffer()
	if err != nil {
		return fmt.Errorf("Initializing sniffer failed: %v", err)
	}

	return nil
}
Exemplo n.º 25
0
// Scan starts a scanGlob for each provided path/glob
func (p *ProspectorLog) scan() {

	for path, info := range p.getFiles() {

		logp.Debug("prospector", "Check file for harvesting: %s", path)

		// Create new state for comparison
		newState := file.NewState(info, path)

		// Load last state
		lastState := p.Prospector.states.FindPrevious(newState)

		// Ignores all files which fall under ignore_older
		if p.isIgnoreOlder(newState) {
			logp.Debug("prospector", "Ignore file because ignore_older reached: %s", newState.Source)
			if lastState.IsEmpty() && lastState.Finished == false {
				logp.Err("File is falling under ignore_older before harvesting is finished. Adjust your close_* settings: %s", newState.Source)
			}
			continue
		}

		// Decides if previous state exists
		if lastState.IsEmpty() {
			logp.Debug("prospector", "Start harvester for new file: %s", newState.Source)
			p.Prospector.startHarvester(newState, 0)
		} else {
			p.harvestExistingFile(newState, lastState)
		}
	}
}
Exemplo n.º 26
0
// handleIgnoreOlder handles states which fall under ignore older
// Based on the state information it is decided if the state information has to be updated or not
func (p *ProspectorLog) handleIgnoreOlder(lastState, newState file.State) error {
	logp.Debug("prospector", "Ignore file because ignore_older reached: %s", newState.Source)

	if !lastState.IsEmpty() {
		if !lastState.Finished {
			logp.Info("File is falling under ignore_older before harvesting is finished. Adjust your close_* settings: %s", newState.Source)
		}
		// Old state exist, no need to update it
		return nil
	}

	// Make sure file is not falling under clean_inactive yet
	if p.isCleanInactive(newState) {
		logp.Debug("prospector", "Do not write state for ignore_older because clean_inactive reached")
		return nil
	}

	// Write state for ignore_older file as none exists yet
	newState.Finished = true
	err := p.Prospector.updateState(input.NewEvent(newState))
	if err != nil {
		return err
	}

	return nil
}
Exemplo n.º 27
0
func (h *Harvester) close() {

	// Mark harvester as finished
	h.state.Finished = true

	logp.Debug("harvester", "Stopping harvester for file: %s", h.state.Source)

	// Make sure file is closed as soon as harvester exits
	// If file was never opened, it can't be closed
	if h.file != nil {

		h.file.Close()

		logp.Debug("harvester", "Closing file: %s", h.state.Source)
		harvesterOpenFiles.Add(-1)

		// On completion, push offset so we can continue where we left off if we relaunch on the same file
		// Only send offset if file object was created successfully
		h.sendStateUpdate()
	} else {
		logp.Warn("Stopping harvester, NOT closing file as file info not available: %s", h.state.Source)
	}

	harvesterClosed.Add(1)
}
Exemplo n.º 28
0
// Scan starts a scanGlob for each provided path/glob
func (p *ProspectorLog) scan() {

	for path, info := range p.getFiles() {

		logp.Debug("prospector", "Check file for harvesting: %s", path)

		// Create new state for comparison
		newState := file.NewState(info, path)

		// Load last state
		lastState := p.Prospector.states.FindPrevious(newState)

		// Ignores all files which fall under ignore_older
		if p.isIgnoreOlder(newState) {
			logp.Debug("prospector", "Ignore file because ignore_older reached: %s", newState.Source)

			// If last state is empty, it means state was removed or never created -> can be ignored
			if !lastState.IsEmpty() && !lastState.Finished {
				logp.Err("File is falling under ignore_older before harvesting is finished. Adjust your close_* settings: %s", newState.Source)
			}
			continue
		}

		// Decides if previous state exists
		if lastState.IsEmpty() {
			logp.Debug("prospector", "Start harvester for new file: %s", newState.Source)
			err := p.Prospector.startHarvester(newState, 0)
			if err != nil {
				logp.Err("Harvester could not be started on new file: %s, Err: %s", newState.Source, err)
			}
		} else {
			p.harvestExistingFile(newState, lastState)
		}
	}
}
Exemplo n.º 29
0
func FiltersRun(config common.MapStr, plugins map[Filter]FilterPlugin,
	next chan common.MapStr, stopCb func()) (input chan common.MapStr, err error) {

	logp.Debug("filters", "Initializing filters plugins")

	for filter, plugin := range plugins {
		Filters.Register(filter, plugin)
	}
	filters_plugins, err :=
		LoadConfiguredFilters(config)
	if err != nil {
		return nil, fmt.Errorf("Error loading filters plugins: %v", err)
	}
	logp.Debug("filters", "Filters plugins order: %v", filters_plugins)

	if len(filters_plugins) > 0 {
		runner := NewFilterRunner(next, filters_plugins)
		go func() {
			err := runner.Run()
			if err != nil {
				logp.Critical("Filters runner failed: %v", err)
				// shutting down
				stopCb()
			}
		}()
		input = runner.FiltersQueue
	} else {
		input = next
	}

	return input, nil
}
Exemplo n.º 30
0
// Scans the specific path which can be a glob (/**/**/*.log)
// For all found files it is checked if a harvester should be started
func (p *Prospector) scan(path string, output chan *input.FileEvent) {

	logp.Debug("prospector", "scan path %s", path)
	// Evaluate the path as a wildcards/shell glob
	matches, err := filepath.Glob(path)
	if err != nil {
		logp.Debug("prospector", "glob(%s) failed: %v", path, err)
		return
	}

	p.missingFiles = map[string]os.FileInfo{}

	// Check any matched files to see if we need to start a harvester
	for _, file := range matches {
		logp.Debug("prospector", "Check file for harvesting: %s", file)

		// Stat the file, following any symlinks.
		fileinfo, err := os.Stat(file)

		// TODO(sissel): check err
		if err != nil {
			logp.Debug("prospector", "stat(%s) failed: %s", file, err)
			continue
		}

		newFile := input.File{
			FileInfo: fileinfo,
		}

		if newFile.FileInfo.IsDir() {
			logp.Debug("prospector", "Skipping directory: %s", file)
			continue
		}

		// Check the current info against p.prospectorinfo[file]
		lastinfo, isKnown := p.prospectorList[file]

		oldFile := input.File{
			FileInfo: lastinfo.Fileinfo,
		}

		// Create a new prospector info with the stat info for comparison
		newInfo := harvester.NewFileStat(newFile.FileInfo, p.iteration)

		// Conditions for starting a new harvester:
		// - file path hasn't been seen before
		// - the file's inode or device changed
		if !isKnown {
			p.checkNewFile(newInfo, file, output)
		} else {
			newInfo.Continue(&lastinfo)
			p.checkExistingFile(newInfo, &newFile, &oldFile, file, output)
		}

		// Track the stat data for this file for later comparison to check for
		// rotation/etc
		p.prospectorList[file] = *newInfo
	} // for each file matched by the glob
}