Example #1
0
func (eb *Winlogbeat) Cleanup(b *beat.Beat) error {
	logp.Debug("winlogbeat", "Dumping runtime metrics...")
	expvar.Do(func(kv expvar.KeyValue) {
		logp.Debug("winlogbeat", "%s=%s", kv.Key, kv.Value.String())
	})
	return nil
}
Example #2
0
func (beat *Beat) ConfigSetup(beater Beater, inputConfig interface{}) {

	config := &ConfigSettings{
	//Input: inputConfig,
	}

	err := cfgfile.Read(config)

	beat.Config = *config

	if err != nil {
		logp.Debug("Log read error", "Error %v\n", err)
	}

	logp.Init(beat.Name, &beat.Config.Logging)

	logp.Debug("main", "Initializing output plugins")

	if err := publisher.Publisher.Init(beat.Config.Output, beat.Config.Shipper); err != nil {
		logp.Critical(err.Error())
		os.Exit(1)
	}

	beat.events = publisher.Publisher.Queue

	logp.Debug(beat.Name, "Init %s", beat.Name)

	if err := beater.Init(beat); err != nil {

		logp.Critical(err.Error())
		os.Exit(1)
	}
}
Example #3
0
func (d *Dockerbeat) Config(b *beat.Beat) error {

	err := cfgfile.Read(&d.TbConfig, "")
	if err != nil {
		logp.Err("Error reading configuration file: %v", err)
		return err
	}

	//init the period
	if d.TbConfig.Input.Period != nil {
		d.period = time.Duration(*d.TbConfig.Input.Period) * time.Second
	} else {
		d.period = 1 * time.Second
	}
	//init the socket
	if d.TbConfig.Input.Socket != nil {
		d.socket = *d.TbConfig.Input.Socket
	} else {
		d.socket = "unix:///var/run/docker.sock" // default docker socket location
	}

	logp.Debug("dockerbeat", "Init dockerbeat")
	logp.Debug("dockerbeat", "Follow docker socket %q\n", d.socket)
	logp.Debug("dockerbeat", "Period %v\n", d.period)

	return nil
}
Example #4
0
// Config Uwsgibeat according to uwsgibeat.yml.
func (ub *Uwsgibeat) Config(b *beat.Beat) error {
	err := cfgfile.Read(&ub.UbConfig, "")
	if err != nil {
		logp.Err("Error reading configuration file: %v", err)
		return err
	}

	var u string
	if ub.UbConfig.Input.URL != "" {
		u = ub.UbConfig.Input.URL
	} else {
		u = "127.0.0.1:1717"
	}
	ub.url, err = url.Parse(u)
	if err != nil {
		logp.Err("Invalid uWSGI stats server address: %v", err)
		return err
	}

	if ub.UbConfig.Input.Period != nil {
		ub.period = time.Duration(*ub.UbConfig.Input.Period) * time.Second
	} else {
		ub.period = 1 * time.Second
	}

	logp.Debug(selector, "Init uwsgibeat")
	logp.Debug(selector, "Watch %v", ub.url)
	logp.Debug(selector, "Period %v", ub.period)

	return nil
}
Example #5
0
func FindSocketsOfPid(prefix string, pid int) (inodes []int64, err error) {

	dirname := filepath.Join(prefix, "/proc", strconv.Itoa(pid), "fd")
	procfs, err := os.Open(dirname)
	if err != nil {
		return []int64{}, fmt.Errorf("Open: %s", err)
	}
	names, err := procfs.Readdirnames(0)
	if err != nil {
		return []int64{}, fmt.Errorf("Readdirnames: %s", err)
	}

	for _, name := range names {
		link, err := os.Readlink(filepath.Join(dirname, name))
		if err != nil {
			logp.Debug("procs", "Readlink %s: %s", name, err)
			continue
		}

		if strings.HasPrefix(link, "socket:[") {
			inode, err := strconv.ParseInt(link[8:len(link)-1], 10, 64)
			if err != nil {
				logp.Debug("procs", "ParseInt: %s:", err)
				continue
			}

			inodes = append(inodes, int64(inode))
		}
	}

	return inodes, nil
}
Example #6
0
// handleReadlineError handles error which are raised during reading file.
//
// If error is EOF, it will check for:
// * File truncated
// * Older then ignore_older
// * General file error
//
// If none of the above cases match, no error will be returned and file is kept open
//
// In case of a general error, the error itself is returned
func (h *Harvester) handleReadlineError(lastTimeRead time.Time, err error) error {

	if err == io.EOF {
		// Refetch fileinfo to check if the file was truncated or disappeared
		info, statErr := h.file.Stat()

		// This could happen if the file was removed / rotate after reading and before calling the stat function
		if statErr != nil {
			logp.Err("Unexpected error reading from %s; error: %s", h.Path, statErr)
			return statErr
		}

		// Check if file was truncated
		if info.Size() < h.Offset {
			logp.Debug("harvester", "File was truncated as offset > size. Begin reading file from offset 0: %s", h.Path)
			h.Offset = 0
			h.file.Seek(h.Offset, os.SEEK_SET)
		} else if age := time.Since(lastTimeRead); age > h.ProspectorConfig.IgnoreOlderDuration {
			// If the file hasn't change for longer the ignore_older, harvester stops and file handle will be closed.
			logp.Debug("harvester", "Stopping harvesting of file as older then ignore_old: ", h.Path, "Last change was: ", age)
			return err
		}
		// Do nothing in case it is just EOF, keep reading the file
		return nil
	} else {
		logp.Err("Unexpected state reading from %s; error: %s", h.Path, err)
		return err
	}
}
Example #7
0
func (restart *ProspectorResume) Scan(files []cfg.FileConfig, persist map[string]*FileState, eventChan chan *FileEvent) {
	pendingProspectorCnt := 0

	// Prospect the globs/paths given on the command line and launch harvesters
	for _, fileconfig := range files {

		prospector := &Prospector{FileConfig: fileconfig}
		go prospector.Prospect(restart, eventChan)
		pendingProspectorCnt++
	}

	// Now determine which states we need to persist by pulling the events from the prospectors
	// When we hit a nil source a prospector had finished so we decrease the expected events
	logp.Debug("prospector", "Waiting for %d prospectors to initialise", pendingProspectorCnt)

	for event := range restart.Persist {
		if event.Source == nil {
			pendingProspectorCnt--
			if pendingProspectorCnt == 0 {
				break
			}
			continue
		}
		persist[*event.Source] = event
		logp.Debug("prospector", "Registrar will re-save state for %s", *event.Source)
	}

	logp.Info("All prospectors initialised with %d states to persist", len(persist))

}
Example #8
0
File: beat.go Project: urso/topbeat
// LoadConfig inits the config file and reads the default config information
// into Beat.Config. It exists the processes in case of errors.
func (b *Beat) LoadConfig() {

	err := cfgfile.Read(&b.Config, "")
	if err != nil {
		// logging not yet initialized, so using fmt.Printf
		fmt.Printf("Loading config file error: %v\n", err)
		os.Exit(1)
	}

	err = logp.Init(b.Name, &b.Config.Logging)
	if err != nil {
		fmt.Printf("Error initializing logging: %v\n", err)
		os.Exit(1)
	}

	// Disable stderr logging if requested by cmdline flag
	logp.SetStderr()

	logp.Debug("beat", "Initializing output plugins")

	if err := publisher.Publisher.Init(b.Name, b.Config.Output, b.Config.Shipper); err != nil {
		fmt.Printf("Error Initialising publisher: %v\n", err)
		logp.Critical(err.Error())
		os.Exit(1)
	}

	b.Events = publisher.Publisher.Client()

	logp.Debug("beat", "Init %s", b.Name)
}
Example #9
0
func (p *Prospector) calculateResume(file string, fileinfo os.FileInfo, resume *ProspectorResume) (int64, bool) {
	last_state, is_found := resume.Files[file]

	if is_found && IsSameFile(file, fileinfo, last_state) {
		// We're resuming - throw the last state back downstream so we resave it
		// And return the offset - also force harvest in case the file is old and we're about to skip it
		resume.Persist <- last_state
		return last_state.Offset, true
	}

	if previous := p.isFileRenamedResumelist(file, fileinfo, resume.Files); previous != "" {
		// File has rotated between shutdown and startup
		// We return last state downstream, with a modified event source with the new file name
		// And return the offset - also force harvest in case the file is old and we're about to skip it
		logp.Debug("prospector", "Detected rename of a previously harvested file: %s -> %s", previous, file)
		last_state := resume.Files[previous]
		last_state.Source = &file
		resume.Persist <- last_state
		return last_state.Offset, true
	}

	if is_found {
		logp.Debug("prospector", "Not resuming rotated file: %s", file)
	}

	// New file so just start from an automatic position
	return 0, false
}
Example #10
0
func (tb *Topbeat) Config(b *beat.Beat) error {

	err := cfgfile.Read(&tb.TbConfig, "")
	if err != nil {
		logp.Err("Error reading configuration file: %v", err)
		return err
	}

	if tb.TbConfig.Input.Period != nil {
		tb.period = time.Duration(*tb.TbConfig.Input.Period) * time.Second
	} else {
		tb.period = 1 * time.Second
	}
	if tb.TbConfig.Input.Procs != nil {
		tb.procs = *tb.TbConfig.Input.Procs
	} else {
		tb.procs = []string{".*"} //all processes
	}

	logp.Debug("topbeat", "Init toppbeat")
	logp.Debug("topbeat", "Follow processes %q\n", tb.procs)
	logp.Debug("topbeat", "Period %v\n", tb.period)

	return nil
}
Example #11
0
func Publish(beat *beat.Beat, fb *Filebeat) {
	// Receives events from spool during flush
	for events := range fb.publisherChan {
		logp.Debug("filebeat", "Send events to output")

		pubEvents := make([]common.MapStr, 0, len(events))
		for _, event := range events {
			bEvent := common.MapStr{
				"timestamp": common.Time(time.Now()),
				"source":    event.Source,
				"offset":    event.Offset,
				"line":      event.Line,
				"message":   event.Text,
				"fields":    event.Fields,
				"fileinfo":  event.Fileinfo,
				"type":      "log",
			}

			pubEvents = append(pubEvents, bEvent)
		}

		beat.Events.PublishEvents(pubEvents, publisher.Sync)

		logp.Debug("filebeat", "Events sent: %d", len(events))

		// Tell the registrar that we've successfully sent these events
		fb.registrar.Channel <- events
	}
}
Example #12
0
func (dns *Dns) ParseUdp(pkt *protos.Packet) {
	defer logp.Recover("Dns ParseUdp")

	logp.Debug("dns", "Parsing packet addressed with %s of length %d.",
		pkt.Tuple.String(), len(pkt.Payload))

	dnsPkt, err := decodeDnsPacket(pkt.Payload)
	if err != nil {
		// This means that malformed requests or responses are being sent or
		// that someone is attempting to the DNS port for non-DNS traffic. Both
		// are issues that a monitoring system should report.
		logp.Debug("dns", NonDnsPacketMsg+" addresses %s, length %d",
			pkt.Tuple.String(), len(pkt.Payload))
		return
	}

	dnsTuple := DnsTupleFromIpPort(&pkt.Tuple, TransportUdp, dnsPkt.ID)
	dnsMsg := &DnsMessage{
		Ts:           pkt.Ts,
		Tuple:        pkt.Tuple,
		CmdlineTuple: procs.ProcWatcher.FindProcessesTuple(&pkt.Tuple),
		Data:         dnsPkt,
		Length:       len(pkt.Payload),
	}

	if dnsMsg.Data.QR == Query {
		dns.receivedDnsRequest(&dnsTuple, dnsMsg)
	} else /* Response */ {
		dns.receivedDnsResponse(&dnsTuple, dnsMsg)
	}
}
Example #13
0
// LoadConfig inits the config file and reads the default config information
// into Beat.Config. It exists the processes in case of errors.
func (b *Beat) LoadConfig() {

	err := cfgfile.Read(&b.Config, "")
	if err != nil {
		// logging not yet initialized, so using fmt.Printf
		fmt.Printf("%v\n", err)
		os.Exit(1)
	}

	err = logp.Init(b.Name, &b.Config.Logging)
	if err != nil {
		fmt.Printf("Error initializing logging: %v\n", err)
		os.Exit(1)
	}

	logp.Debug("beat", "Initializing output plugins")

	if err := publisher.Publisher.Init(b.Name, b.Version, b.Config.Output, b.Config.Shipper); err != nil {
		logp.Critical(err.Error())
		os.Exit(1)
	}

	b.Events = publisher.Publisher.Client()

	logp.Debug("beat", "Init %s", b.Name)
}
Example #14
0
// Handles error during reading file. If EOF and nothing special, exit without errors
func (h *Harvester) handleReadlineError(lastTimeRead time.Time, err error) error {
	if err == io.EOF {
		// timed out waiting for data, got eof.
		// Check to see if the file was truncated
		info, _ := h.file.Stat()

		if h.ProspectorConfig.IgnoreOlder != "" {
			logp.Debug("harvester", "Ignore Unmodified After: %s", h.ProspectorConfig.IgnoreOlder)
		}

		if info.Size() < h.Offset {
			logp.Debug("harvester", "File truncated, seeking to beginning: %s", h.Path)
			h.file.Seek(0, os.SEEK_SET)
			h.Offset = 0
		} else if age := time.Since(lastTimeRead); age > h.ProspectorConfig.IgnoreOlderDuration {
			// if lastTimeRead was more than ignore older and ignore older is set, this file is probably dead. Stop watching it.
			logp.Debug("harvester", "Stopping harvest of ", h.Path, "last change was: ", age)
			return err
		}
	} else {
		logp.Err("Unexpected state reading from %s; error: %s", h.Path, err)
		return err
	}
	return nil
}
Example #15
0
func (h *Harvester) initFileOffset(file *os.File) error {
	offset, err := file.Seek(0, os.SEEK_CUR)

	if h.Offset > 0 {
		// continue from last known offset

		logp.Debug("harvester",
			"harvest: %q position:%d (offset snapshot:%d)", h.Path, h.Offset, offset)
		_, err = file.Seek(h.Offset, os.SEEK_SET)
	} else if h.Config.TailFiles {
		// tail file if file is new and tail_files config is set

		logp.Debug("harvester",
			"harvest: (tailing) %q (offset snapshot:%d)", h.Path, offset)
		h.Offset, err = file.Seek(0, os.SEEK_END)

	} else {
		// get offset from file in case of encoding factory was
		// required to read some data.

		logp.Debug("harvester", "harvest: %q (offset snapshot:%d)", h.Path, offset)
		h.Offset = offset
	}

	return err
}
Example #16
0
// Each shipper publishes a list of IPs together with its name to Elasticsearch
func (out *elasticsearchOutput) PublishIPs(name string, localAddrs []string) error {
	if !out.ttlEnabled {
		logp.Debug("output_elasticsearch", "Not publishing IPs because TTL was not yet confirmed to be enabled")
		return nil
	}

	logp.Debug("output_elasticsearch", "Publish IPs %s with expiration time %d", localAddrs, out.TopologyExpire)
	params := map[string]string{
		"ttl":     fmt.Sprintf("%dms", out.TopologyExpire),
		"refresh": "true",
	}
	_, err := out.Conn.Index(
		".packetbeat-topology", /*index*/
		"server-ip",            /*type*/
		name,                   /* id */
		params,                 /* parameters */
		publishedTopology{name, strings.Join(localAddrs, ",")} /* body */)

	if err != nil {
		logp.Err("Fail to publish IP addresses: %s", err)
		return err
	}

	out.UpdateLocalTopologyMap()

	return nil
}
Example #17
0
func (thrift *Thrift) receivedReply(msg *ThriftMessage) {

	// we need to search the request first.
	tuple := msg.TcpTuple

	trans := thrift.getTransaction(tuple.Hashable())
	if trans == nil {
		logp.Debug("thrift", "Response from unknown transaction. Ignoring: %v", tuple)
		return
	}

	if trans.Request.Method != msg.Method {
		logp.Debug("thrift", "Response from another request received '%s' '%s'"+
			". Ignoring.", trans.Request.Method, msg.Method)
		return
	}

	trans.Reply = msg
	trans.BytesOut = uint64(msg.FrameSize)

	trans.ResponseTime = int32(msg.Ts.Sub(trans.ts).Nanoseconds() / 1e6) // resp_time in milliseconds

	thrift.PublishQueue <- trans
	thrift.transactions.Delete(tuple.Hashable())

	logp.Debug("thrift", "Transaction queued")
}
Example #18
0
func Publish(beat *beat.Beat, fb *Filebeat) {

	// Receives events from spool during flush
	for events := range fb.publisherChan {

		logp.Debug("filebeat", "Send events to output")
		for _, event := range events {

			bEvent := common.MapStr{
				"timestamp": common.Time(time.Now()),
				"source":    event.Source,
				"offset":    event.Offset,
				"line":      event.Line,
				"text":      event.Text,
				"fields":    event.Fields,
				"fileinfo":  event.Fileinfo,
				"type":      "log",
			}

			// Sends event to beat (outputs)
			beat.Events <- bEvent
		}

		logp.Debug("filebeat", "Events sent:", len(events))

		// Tell the registrar that we've successfully sent these events
		fb.RegistrarChan <- events
	}
}
Example #19
0
func (r *Registrar) fetchState(filePath string, fileInfo os.FileInfo) (int64, bool) {

	// Check if there is a state for this file
	lastState, isFound := r.GetFileState(filePath)

	if isFound && input.IsSameFile(filePath, fileInfo) {
		// We're resuming - throw the last state back downstream so we resave it
		// And return the offset - also force harvest in case the file is old and we're about to skip it
		r.Persist <- lastState
		return lastState.Offset, true
	}

	if previous := r.getPreviousFile(filePath, fileInfo); previous != "" {
		// File has rotated between shutdown and startup
		// We return last state downstream, with a modified event source with the new file name
		// And return the offset - also force harvest in case the file is old and we're about to skip it
		logp.Debug("prospector", "Detected rename of a previously harvested file: %s -> %s", previous, filePath)

		lastState, _ := r.GetFileState(previous)
		lastState.Source = &filePath
		r.Persist <- lastState
		return lastState.Offset, true
	}

	if isFound {
		logp.Debug("prospector", "Not resuming rotated file: %s", filePath)
	}

	// New file so just start from an automatic position
	return 0, false
}
Example #20
0
// BulkWith creates a HTTP request containing a bunch of operations and send
// them to Elasticsearch. The request is retransmitted up to max_retries before
// returning an error.
func (conn *Connection) BulkWith(
	index string,
	docType string,
	params map[string]string,
	metaBuilder MetaBuilder,
	body []interface{},
) (*QueryResult, error) {
	if len(body) == 0 {
		logp.Debug("elasticsearch", "Empty channel. Wait for more data.")
		return nil, nil
	}

	path, err := makePath(index, docType, "_bulk")
	if err != nil {
		return nil, err
	}

	buf := bulkEncode(metaBuilder, body)
	if buf.Len() == 0 {
		logp.Debug("elasticsearch", "Empty channel. Wait for more data.")
		return nil, nil
	}

	_, resp, err := conn.sendBulkRequest("POST", path, params, &buf)
	if err != nil {
		return nil, err
	}
	return readQueryResult(resp)
}
Example #21
0
// AddTarget takes a target name and tag, fetches the IP addresses associated
// with it and adds them to the Pingbeat struct
func (p *Pingbeat) AddTarget(target string, tag string) {
	if addr := net.ParseIP(target); addr.String() == target {
		if addr.To4() != nil && p.useIPv4 {
			p.ipv4targets[addr.String()] = [2]string{target, tag}
		} else if p.useIPv6 {
			p.ipv6targets[addr.String()] = [2]string{target, tag}
		}
	} else {
		logp.Debug("pingbeat", "Getting IP addresses for %s:\n", target)
		ip4addr := make(chan string)
		ip6addr := make(chan string)
		go FetchIPs(ip4addr, ip6addr, target)
	lookup:
		for {
			select {
			case ip := <-ip4addr:
				if ip == "done" {
					break lookup
				} else if p.useIPv4 {
					logp.Debug("pingbeat", "IPv4: %s\n", ip)
					p.ipv4targets[ip] = [2]string{target, tag}
				}
			case ip := <-ip6addr:
				if ip == "done" {
					break lookup
				} else if p.useIPv6 {
					logp.Debug("pingbeat", "IPv6: %s\n", ip)
					p.ipv6targets[ip] = [2]string{target, tag}
				}
			}
		}
	}
}
Example #22
0
func (proc *ProcessesWatcher) FindProcessesTuple(tuple *common.IpPortTuple) (proc_tuple *common.CmdlineTuple) {
	proc_tuple = &common.CmdlineTuple{}

	if !proc.ReadFromProc {
		return
	}

	if proc.IsLocalIp(tuple.Src_ip) {
		logp.Debug("procs", "Looking for port %d", tuple.Src_port)
		proc_tuple.Src = []byte(proc.FindProc(tuple.Src_port))
		if len(proc_tuple.Src) > 0 {
			logp.Debug("procs", "Found device %s for port %d", proc_tuple.Src, tuple.Src_port)
		}
	}

	if proc.IsLocalIp(tuple.Dst_ip) {
		logp.Debug("procs", "Looking for port %d", tuple.Dst_port)
		proc_tuple.Dst = []byte(proc.FindProc(tuple.Dst_port))
		if len(proc_tuple.Dst) > 0 {
			logp.Debug("procs", "Found device %s for port %d", proc_tuple.Dst, tuple.Dst_port)
		}
	}

	return
}
Example #23
0
func (tcp *Tcp) Process(tcphdr *layers.TCP, pkt *protos.Packet) {

	// This Recover should catch all exceptions in
	// protocol modules.
	defer logp.Recover("FollowTcp exception")

	stream, exists := tcp.streamsMap[pkt.Tuple.Hashable()]
	var original_dir uint8 = TcpDirectionOriginal
	created := false
	if !exists {
		stream, exists = tcp.streamsMap[pkt.Tuple.RevHashable()]
		if !exists {
			protocol := tcp.decideProtocol(&pkt.Tuple)
			if protocol == protos.UnknownProtocol {
				// don't follow
				return
			}
			logp.Debug("tcp", "Stream doesn't exists, creating new")

			// create
			stream = &TcpStream{id: tcp.getId(), tuple: &pkt.Tuple, protocol: protocol, tcp: tcp}
			stream.tcptuple = common.TcpTupleFromIpPort(stream.tuple, stream.id)
			tcp.streamsMap[pkt.Tuple.Hashable()] = stream
			created = true
		} else {
			original_dir = TcpDirectionReverse
		}
	}
	tcp_start_seq := tcphdr.Seq
	tcp_seq := tcp_start_seq + uint32(len(pkt.Payload))

	logp.Debug("tcp", "pkt.start_seq=%v pkt.last_seq=%v stream.last_seq=%v (len=%d)",
		tcp_start_seq, tcp_seq, stream.lastSeq[original_dir], len(pkt.Payload))

	if len(pkt.Payload) > 0 &&
		stream.lastSeq[original_dir] != 0 {

		if TcpSeqBeforeEq(tcp_seq, stream.lastSeq[original_dir]) {

			logp.Debug("tcp", "Ignoring what looks like a retrasmitted segment. pkt.seq=%v len=%v stream.seq=%v",
				tcphdr.Seq, len(pkt.Payload), stream.lastSeq[original_dir])
			return
		}

		if TcpSeqBefore(stream.lastSeq[original_dir], tcp_start_seq) {
			if !created {
				logp.Debug("tcp", "Gap in tcp stream. last_seq: %d, seq: %d", stream.lastSeq[original_dir], tcp_start_seq)
				drop := stream.GapInStream(original_dir,
					int(tcp_start_seq-stream.lastSeq[original_dir]))
				if drop {
					logp.Debug("tcp", "Dropping stream because of gap")
					stream.Expire()
				}
			}
		}
	}
	stream.lastSeq[original_dir] = tcp_seq

	stream.AddPacket(pkt, tcphdr, original_dir)
}
Example #24
0
func (p *Pingbeat) AddTarget(target string, tag string) {
	if addr := net.ParseIP(target); addr.String() == "" {
		if addr.To4() != nil {
			logp.Debug("pingbeat", "IPv4: %s\n", addr.String())
			p.ipv4targets[addr.String()] = [2]string{target, tag}
		} else {
			logp.Debug("pingbeat", "IPv6: %s\n", addr.String())
			p.ipv6targets[addr.String()] = [2]string{target, tag}
		}
	} else {
		logp.Debug("pingbeat", "Getting IP addresses for %s:\n", target)
		addrs, err := net.LookupIP(target)
		if err != nil {
			logp.Warn("Failed to resolve %s to IP address, ignoring this target.\n", target)
		} else {
			for j := 0; j < len(addrs); j++ {
				if addrs[j].To4() != nil {
					logp.Debug("pingbeat", "IPv4: %s\n", addrs[j].String())
					p.ipv4targets[addrs[j].String()] = [2]string{target, tag}
				} else {
					logp.Debug("pingbeat", "IPv6: %s\n", addrs[j].String())
					p.ipv6targets[addrs[j].String()] = [2]string{target, tag}
				}
			}
		}
	}
}
Example #25
0
func isSpecialPgsqlCommand(data []byte) (bool, int) {

	if len(data) < 8 {
		// 8 bytes required
		return false, 0
	}

	// read length
	length := int(common.Bytes_Ntohl(data[0:4]))

	// read command identifier
	code := int(common.Bytes_Ntohl(data[4:8]))

	if length == 16 && code == 80877102 {
		// Cancel Request
		logp.Debug("pgsqldetailed", "Cancel Request, length=%d", length)
		return true, CancelRequest
	} else if length == 8 && code == 80877103 {
		// SSL Request
		logp.Debug("pgsqldetailed", "SSL Request, length=%d", length)
		return true, SSLRequest
	} else if code == 196608 {
		// Startup Message
		logp.Debug("pgsqldetailed", "Startup Message, length=%d", length)
		return true, StartupMessage
	}
	return false, 0
}
Example #26
0
// handleReadlineError handles error which are raised during reading file.
//
// If error is EOF, it will check for:
// * File truncated
// * Older then ignore_older
// * General file error
//
// If none of the above cases match, no error will be returned and file is kept open
//
// In case of a general error, the error itself is returned
func (h *Harvester) handleReadlineError(lastTimeRead time.Time, err error) error {
	if err != io.EOF || !h.file.Continuable() {
		logp.Err("Unexpected state reading from %s; error: %s", h.Path, err)
		return err
	}

	// Refetch fileinfo to check if the file was truncated or disappeared.
	// Errors if the file was removed/rotated after reading and before
	// calling the stat function
	info, statErr := h.file.Stat()
	if statErr != nil {
		logp.Err("Unexpected error reading from %s; error: %s", h.Path, statErr)
		return statErr
	}

	// Handle fails if file was truncated
	if info.Size() < h.Offset {
		seeker, ok := h.file.(io.Seeker)
		if !ok {
			logp.Err("Can not seek source")
			return err
		}

		logp.Debug("harvester", "File was truncated as offset (%s) > size (%s). Begin reading file from offset 0: %s", h.Offset, info.Size(), h.Path)

		h.Offset = 0
		seeker.Seek(h.Offset, os.SEEK_SET)
		return nil
	}

	age := time.Since(lastTimeRead)
	if age > h.ProspectorConfig.IgnoreOlderDuration {
		// If the file hasn't change for longer the ignore_older, harvester stops
		// and file handle will be closed.
		return fmt.Errorf("Stop harvesting as file is older then ignore_older: %s; Last change was: %s ", h.Path, age)
	}

	if h.Config.ForceCloseFiles {
		// Check if the file name exists (see #93)
		_, statErr := os.Stat(h.file.Name())

		// Error means file does not exist. If no error, check if same file. If not close as rotated.
		if statErr != nil || !input.IsSameFile(h.file.Name(), info) {
			logp.Info("Force close file: %s; error: %s", h.Path, statErr)
			// Return directly on windows -> file is closing
			return fmt.Errorf("Force closing file: %s", h.Path)
		}
	}

	if err != io.EOF {
		logp.Err("Unexpected state reading from %s; error: %s", h.Path, err)
	}

	logp.Debug("harvester", "End of file reached: %s; Backoff now.", h.Path)

	// Do nothing in case it is just EOF, keep reading the file after backing off
	h.backOff()
	return nil
}
Example #27
0
// queryEventMessageFiles queries the registry to get the value of
// the EventMessageFile key that points to a DLL or EXE containing parameterized
// event log messages. If found, it loads the libraries as a datafiles and
// returns a slice of Handles to the libraries.
func queryEventMessageFiles(providerName, sourceName string) ([]Handle, error) {
	// Open key in registry:
	registryKeyName := fmt.Sprintf(
		"SYSTEM\\CurrentControlSet\\Services\\EventLog\\%s\\%s",
		providerName, sourceName)
	key, err := registry.OpenKey(registry.LOCAL_MACHINE, registryKeyName,
		registry.QUERY_VALUE)
	if err != nil {
		return nil, fmt.Errorf("Failed to open HKLM\\%s", registryKeyName)
	}
	defer func() {
		err := key.Close()
		if err != nil {
			logp.Warn("Failed to close registry key. key=%s err=%v",
				registryKeyName, err)
		}
	}()
	logp.Debug("eventlog", "RegOpenKey opened handle to HKLM\\%s, key=%v",
		registryKeyName, key)

	// Read value from registry:
	value, _, err := key.GetStringValue("EventMessageFile")
	if err != nil {
		return nil, fmt.Errorf("Failed querying EventMessageFile from "+
			"HKLM\\%s. %v", registryKeyName, err)
	}
	value, err = registry.ExpandString(value)
	if err != nil {
		return nil, err
	}

	// Split the value in case there is more than one file in the value.
	eventMessageFiles := strings.Split(value, ";")
	logp.Debug("eventlog", "RegQueryValueEx queried EventMessageFile from "+
		"HKLM\\%s and got [%s]", registryKeyName,
		strings.Join(eventMessageFiles, ","))

	// Load the libraries:
	var handles []Handle
	for _, eventMessageFile := range eventMessageFiles {
		sPtr, err := syscall.UTF16PtrFromString(eventMessageFile)
		if err != nil {
			logp.Debug("eventlog", "Failed to get UTF16Ptr for '%s'. "+
				"Skipping. %v", eventMessageFile, err)
			continue
		}
		handle, err := loadLibraryEx(sPtr, 0, LOAD_LIBRARY_AS_DATAFILE)
		if err != nil {
			logp.Debug("eventlog", "Failed to load library '%s' as data file. "+
				"Skipping. %v", eventMessageFile, err)
			continue
		}
		handles = append(handles, handle)
	}

	logp.Debug("eventlog", "Returning handles %v for sourceName %s", handles,
		sourceName)
	return handles, nil
}
Example #28
0
// Scans the specific path which can be a glob (/**/**/*.log)
// For all found files it is checked if a harvester should be started
func (p *Prospector) scan(path string, output chan *input.FileEvent) {

	logp.Debug("prospector", "scan path %s", path)
	// Evaluate the path as a wildcards/shell glob
	matches, err := filepath.Glob(path)
	if err != nil {
		logp.Debug("prospector", "glob(%s) failed: %v", path, err)
		return
	}

	p.missingFiles = map[string]os.FileInfo{}

	// Check any matched files to see if we need to start a harvester
	for _, file := range matches {
		logp.Debug("prospector", "Check file for harvesting: %s", file)

		// Stat the file, following any symlinks.
		fileinfo, err := os.Stat(file)

		// TODO(sissel): check err
		if err != nil {
			logp.Debug("prospector", "stat(%s) failed: %s", file, err)
			continue
		}

		newFile := input.File{
			FileInfo: fileinfo,
		}

		if newFile.FileInfo.IsDir() {
			logp.Debug("prospector", "Skipping directory: %s", file)
			continue
		}

		// Check the current info against p.prospectorinfo[file]
		lastinfo, isKnown := p.prospectorList[file]

		oldFile := input.File{
			FileInfo: lastinfo.Fileinfo,
		}

		// Create a new prospector info with the stat info for comparison
		newInfo := harvester.NewFileStat(newFile.FileInfo, p.iteration)

		// Conditions for starting a new harvester:
		// - file path hasn't been seen before
		// - the file's inode or device changed
		if !isKnown {
			p.checkNewFile(newInfo, file, output)
		} else {
			newInfo.Continue(&lastinfo)
			p.checkExistingFile(newInfo, &newFile, &oldFile, file, output)
		}

		// Track the stat data for this file for later comparison to check for
		// rotation/etc
		p.prospectorList[file] = *newInfo
	} // for each file matched by the glob
}
Example #29
0
func (http *Http) receivedHttpResponse(msg *HttpMessage) {

	// we need to search the request first.
	tuple := msg.TcpTuple

	logp.Debug("http", "Received response with tuple: %s", tuple)

	trans := http.getTransaction(tuple.Hashable())
	if trans == nil {
		logp.Warn("Response from unknown transaction. Ignoring: %v", tuple)
		return
	}

	if trans.Http == nil {
		logp.Warn("Response without a known request. Ignoring.")
		return
	}

	response := common.MapStr{
		"phrase":         msg.StatusPhrase,
		"code":           msg.StatusCode,
		"content_length": msg.ContentLength,
	}

	if http.Send_headers {
		if !http.Split_cookie {
			response["response_headers"] = msg.Headers
		} else {
			hdrs := common.MapStr{}
			for hdr_name, hdr_val := range msg.Headers {
				if hdr_name == "set-cookie" {
					hdrs[hdr_name] = splitCookiesHeader(hdr_val)
				} else {
					hdrs[hdr_name] = hdr_val
				}
			}

			response["response_headers"] = hdrs
		}
	}

	trans.BytesOut = msg.Size
	trans.Http.Update(response)
	trans.Notes = append(trans.Notes, msg.Notes...)

	trans.ResponseTime = int32(msg.Ts.Sub(trans.ts).Nanoseconds() / 1e6) // resp_time in milliseconds

	// save Raw message
	if http.Send_response {
		trans.Response_raw = string(http.cutMessageBody(msg))
	}

	http.publishTransaction(trans)
	http.transactions.Delete(trans.tuple.Hashable())

	logp.Debug("http", "HTTP transaction completed: %s\n", trans.Http)
}
Example #30
0
func (pb *Packetbeat) Cleanup(b *beat.Beat) error {

	if service.WithMemProfile() {
		logp.Debug("main", "Waiting for streams and transactions to expire...")
		time.Sleep(time.Duration(float64(protos.DefaultTransactionExpiration) * 1.2))
		logp.Debug("main", "Streams and transactions should all be expired now.")
	}
	return nil
}