Esempio n. 1
0
// Config Uwsgibeat according to uwsgibeat.yml.
func (ub *Uwsgibeat) Config(b *beat.Beat) error {
	err := cfgfile.Read(&ub.UbConfig, "")
	if err != nil {
		logp.Err("Error reading configuration file: %v", err)
		return err
	}

	var u string
	if ub.UbConfig.Input.URL != "" {
		u = ub.UbConfig.Input.URL
	} else {
		u = "127.0.0.1:1717"
	}
	ub.url, err = url.Parse(u)
	if err != nil {
		logp.Err("Invalid uWSGI stats server address: %v", err)
		return err
	}

	if ub.UbConfig.Input.Period != nil {
		ub.period = time.Duration(*ub.UbConfig.Input.Period) * time.Second
	} else {
		ub.period = 1 * time.Second
	}

	logp.Debug(selector, "Init uwsgibeat")
	logp.Debug(selector, "Watch %v", ub.url)
	logp.Debug(selector, "Period %v", ub.period)

	return nil
}
Esempio n. 2
0
// Handles error during reading file. If EOF and nothing special, exit without errors
func (h *Harvester) handleReadlineError(lastTimeRead time.Time, err error) error {
	if err == io.EOF {
		// timed out waiting for data, got eof.
		// Check to see if the file was truncated
		info, statErr := h.file.Stat()

		// This could happen if the file was removed / rotate after reading and before calling the stat function
		if statErr != nil {
			logp.Err("Unexpected error reading from %s; error: %s", h.Path, statErr)
			return statErr
		}

		if h.ProspectorConfig.IgnoreOlder != "" {
			logp.Debug("harvester", "Ignore Unmodified After: %s", h.ProspectorConfig.IgnoreOlder)
		}

		if info.Size() < h.Offset {
			logp.Debug("harvester", "File truncated, seeking to beginning: %s", h.Path)
			h.file.Seek(0, os.SEEK_SET)
			h.Offset = 0
		} else if age := time.Since(lastTimeRead); age > h.ProspectorConfig.IgnoreOlderDuration {
			// if lastTimeRead was more than ignore older and ignore older is set, this file is probably dead. Stop watching it.
			// As an error is returned, the harvester will stop and the file is closed
			logp.Debug("harvester", "Stopping harvest of ", h.Path, "last change was: ", age)
			return err
		}
	} else {
		logp.Err("Unexpected state reading from %s; error: %s", h.Path, err)
		return err
	}
	return nil
}
Esempio n. 3
0
// handleReadlineError handles error which are raised during reading file.
//
// If error is EOF, it will check for:
// * File truncated
// * Older then ignore_older
// * General file error
//
// If none of the above cases match, no error will be returned and file is kept open
//
// In case of a general error, the error itself is returned
func (h *Harvester) handleReadlineError(lastTimeRead time.Time, err error) error {

	if err == io.EOF {
		// Refetch fileinfo to check if the file was truncated or disappeared
		info, statErr := h.file.Stat()

		// This could happen if the file was removed / rotate after reading and before calling the stat function
		if statErr != nil {
			logp.Err("Unexpected error reading from %s; error: %s", h.Path, statErr)
			return statErr
		}

		// Check if file was truncated
		if info.Size() < h.Offset {
			logp.Debug("harvester", "File was truncated as offset > size. Begin reading file from offset 0: %s", h.Path)
			h.Offset = 0
			h.file.Seek(h.Offset, os.SEEK_SET)
		} else if age := time.Since(lastTimeRead); age > h.ProspectorConfig.IgnoreOlderDuration {
			// If the file hasn't change for longer the ignore_older, harvester stops and file handle will be closed.
			logp.Debug("harvester", "Stopping harvesting of file as older then ignore_old: ", h.Path, "Last change was: ", age)
			return err
		}
		// Do nothing in case it is just EOF, keep reading the file
		return nil
	} else {
		logp.Err("Unexpected state reading from %s; error: %s", h.Path, err)
		return err
	}
}
Esempio n. 4
0
func (ab *AmqpBeat) Run(b *beat.Beat) error {
	logp.Info("Running...")
	serverURI := ab.RbConfig.AmqpInput.ServerURI

	ab.exposeMetrics()

	conn, err := amqp.Dial(*serverURI)
	if err != nil {
		logp.Err("Failed to connect to RabbitMQ at '%s': %v", *serverURI, err)
		return err
	}
	defer conn.Close()

	ab.handleDisconnect(conn)

	ch, err := conn.Channel()
	if err != nil {
		logp.Err("Failed to open RabbitMQ channel: %v", err)
		return err
	}
	defer ch.Close()

	ab.runPipeline(b, ch)

	return nil
}
Esempio n. 5
0
func (out *RedisOutput) PublishIPs(name string, localAddrs []string) error {

	logp.Debug("output_redis", "[%s] Publish the IPs %s", name, localAddrs)

	// connect to db
	conn, err := out.RedisConnect(out.DbTopology)
	if err != nil {
		return err
	}
	defer conn.Close()

	_, err = conn.Do("HSET", name, "ipaddrs", strings.Join(localAddrs, ","))
	if err != nil {
		logp.Err("[%s] Fail to set the IP addresses: %s", name, err)
		return err
	}

	_, err = conn.Do("EXPIRE", name, int(out.TopologyExpire.Seconds()))
	if err != nil {
		logp.Err("[%s] Fail to set the expiration time: %s", name, err)
		return err
	}

	out.UpdateLocalTopologyMap(conn)

	return nil
}
Esempio n. 6
0
// Update local topology map
func (out *ElasticsearchOutput) UpdateLocalTopologyMap() {

	// get all shippers IPs from Elasticsearch
	TopologyMapTmp := make(map[string]string)

	res, err := out.Conn.SearchUri(".packetbeat-topology", "server-ip", nil)
	if err == nil {
		for _, obj := range res.Hits.Hits {
			var result QueryResult
			err = json.Unmarshal(obj, &result)
			if err != nil {
				return
			}

			var pub PublishedTopology
			err = json.Unmarshal(result.Source, &pub)
			if err != nil {
				logp.Err("json.Unmarshal fails with: %s", err)
			}
			// add mapping
			ipaddrs := strings.Split(pub.IPs, ",")
			for _, addr := range ipaddrs {
				TopologyMapTmp[addr] = pub.Name
			}
		}
	} else {
		logp.Err("Getting topology map fails with: %s", err)
	}

	// update topology map
	out.TopologyMap = TopologyMapTmp

	logp.Debug("output_elasticsearch", "Topology map %s", out.TopologyMap)
}
Esempio n. 7
0
func (t *Topbeat) Run(b *beat.Beat) error {

	t.isAlive = true

	t.initProcStats()

	var err error

	for t.isAlive {
		time.Sleep(t.period)

		err = t.exportSystemStats()
		if err != nil {
			logp.Err("Error reading system stats: %v", err)
		}
		err = t.exportProcStats()
		if err != nil {
			logp.Err("Error reading proc stats: %v", err)
		}
		err = t.exportFileSystemStats()
		if err != nil {
			logp.Err("Error reading fs stats: %v", err)
		}
	}

	return err
}
Esempio n. 8
0
func (out *RedisOutput) UpdateLocalTopologyMap(conn redis.Conn) {

	TopologyMapTmp := make(map[string]string)

	hostnames, err := redis.Strings(conn.Do("KEYS", "*"))
	if err != nil {
		logp.Err("Fail to get the all shippers from the topology map %s", err)
		return
	}
	for _, hostname := range hostnames {
		res, err := redis.String(conn.Do("HGET", hostname, "ipaddrs"))
		if err != nil {
			logp.Err("[%s] Fail to get the IPs: %s", hostname, err)
		} else {
			ipaddrs := strings.Split(res, ",")
			for _, addr := range ipaddrs {
				TopologyMapTmp[addr] = hostname
			}
		}
	}

	out.TopologyMap = TopologyMapTmp

	logp.Debug("output_redis", "Topology %s", TopologyMapTmp)
}
Esempio n. 9
0
// Setup Gzipbeat.
func (gb *Gzipbeat) Setup(b *beat.Beat) error {
	gb.events = b.Events

	// read registry if file exists
	_, err := os.Stat(gb.config.Registry)
	if err == nil || os.IsExist(err) {
		// read content
		rr, err := os.Open(gb.config.Registry)
		if err != nil {
			logp.Err("Error opening registry file: %v", err)
			return err
		}
		defer rr.Close()

		scanner := bufio.NewScanner(rr)
		for scanner.Scan() {
			gb.registry = append(gb.registry, scanner.Text())
		}
		err = scanner.Err()
		if err != nil {
			logp.Err("Error scanning registry file: %v", err)
			return err
		}
	}

	// get hostname to be later used in events
	hostname, err := os.Hostname()
	if err != nil {
		logp.Err("Error getting hostname: %v", err)
		return err
	}
	gb.hostname = hostname

	return nil
}
Esempio n. 10
0
func (c *console) PublishEvent(
	s outputs.Signaler,
	ts time.Time,
	event common.MapStr,
) error {
	var jsonEvent []byte
	var err error

	if c.pretty {
		jsonEvent, err = json.MarshalIndent(event, "", "  ")
	} else {
		jsonEvent, err = json.Marshal(event)
	}
	if err != nil {
		logp.Err("Fail to convert the event to JSON: %s", err)
		outputs.SignalCompleted(s)
		return err
	}

	if err = writeBuffer(jsonEvent); err != nil {
		goto fail
	}
	if err = writeBuffer([]byte{'\n'}); err != nil {
		goto fail
	}

	outputs.SignalCompleted(s)
	return nil
fail:
	logp.Err("Fail to write event: %s", err)
	outputs.SignalFailed(s)
	return err
}
Esempio n. 11
0
// handleReadlineError handles error which are raised during reading file.
//
// If error is EOF, it will check for:
// * File truncated
// * Older then ignore_older
// * General file error
//
// If none of the above cases match, no error will be returned and file is kept open
//
// In case of a general error, the error itself is returned
func (h *Harvester) handleReadlineError(lastTimeRead time.Time, err error) error {
	if err != io.EOF || !h.file.Continuable() {
		logp.Err("Unexpected state reading from %s; error: %s", h.Path, err)
		return err
	}

	// Refetch fileinfo to check if the file was truncated or disappeared.
	// Errors if the file was removed/rotated after reading and before
	// calling the stat function
	info, statErr := h.file.Stat()
	if statErr != nil {
		logp.Err("Unexpected error reading from %s; error: %s", h.Path, statErr)
		return statErr
	}

	// Handle fails if file was truncated
	if info.Size() < h.Offset {
		seeker, ok := h.file.(io.Seeker)
		if !ok {
			logp.Err("Can not seek source")
			return err
		}

		logp.Debug("harvester", "File was truncated as offset (%s) > size (%s). Begin reading file from offset 0: %s", h.Offset, info.Size(), h.Path)

		h.Offset = 0
		seeker.Seek(h.Offset, os.SEEK_SET)
		return nil
	}

	age := time.Since(lastTimeRead)
	if age > h.ProspectorConfig.IgnoreOlderDuration {
		// If the file hasn't change for longer the ignore_older, harvester stops
		// and file handle will be closed.
		return fmt.Errorf("Stop harvesting as file is older then ignore_older: %s; Last change was: %s ", h.Path, age)
	}

	if h.Config.ForceCloseFiles {
		// Check if the file name exists (see #93)
		_, statErr := os.Stat(h.file.Name())

		// Error means file does not exist. If no error, check if same file. If not close as rotated.
		if statErr != nil || !input.IsSameFile(h.file.Name(), info) {
			logp.Info("Force close file: %s; error: %s", h.Path, statErr)
			// Return directly on windows -> file is closing
			return fmt.Errorf("Force closing file: %s", h.Path)
		}
	}

	if err != io.EOF {
		logp.Err("Unexpected state reading from %s; error: %s", h.Path, err)
	}

	logp.Debug("harvester", "End of file reached: %s; Backoff now.", h.Path)

	// Do nothing in case it is just EOF, keep reading the file after backing off
	h.backOff()
	return nil
}
Esempio n. 12
0
func (publisher *PublisherType) publishEvents(events []common.MapStr) {
	var ignore []int // indices of events to be removed from events
	for i, event := range events {
		// validate some required field
		if err := filterEvent(event); err != nil {
			logp.Err("Publishing event failed: %v", err)
			ignore = append(ignore, i)
			continue
		}

		// update address and geo-ip information. Ignore event
		// if address is invalid or event is found to be a duplicate
		ok := updateEventAddresses(publisher, event)
		if !ok {
			ignore = append(ignore, i)
			continue
		}

		// add additional meta data
		event["shipper"] = publisher.name
		if len(publisher.tags) > 0 {
			event["tags"] = publisher.tags
		}

		if logp.IsDebug("publish") {
			PrintPublishEvent(event)
		}
	}

	// return if no event is left
	if len(ignore) == len(events) {
		return
	}

	// remove invalid events.
	// TODO: is order important? Removal can be turned into O(len(ignore)) by
	//       copying last element into idx and doing
	//       events=events[:len(events)-len(ignore)] afterwards
	// Alternatively filtering could be implemented like:
	//   https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating
	for i := len(ignore) - 1; i >= 0; i-- {
		idx := ignore[i]
		events = append(events[:idx], events[idx+1:]...)
	}

	// get timestamp of first event for outputer
	ts := events[0]["timestamp"].(common.Time)

	// add transaction
	if !publisher.disabled {
		for _, out := range publisher.Output {
			err := out.BulkPublish(time.Time(ts), events)
			if err != nil {
				logp.Err("Fail to publish event type on output %s: %v", out, err)
			}
		}
	}
}
Esempio n. 13
0
func (proc *ProcessesWatcher) Init(config ProcsConfig) error {

	proc.proc_prefix = ""
	proc.PortProcMap = make(map[uint16]PortProcMapping)
	proc.LastMapUpdate = time.Now()

	proc.ReadFromProc = config.Enabled
	if proc.ReadFromProc {
		if runtime.GOOS != "linux" {
			proc.ReadFromProc = false
			logp.Info("Disabled /proc/ reading because not on linux")
		} else {
			logp.Info("Process matching enabled")
		}
	}

	if config.Max_proc_read_freq == 0 {
		proc.MaxReadFreq = 10 * time.Millisecond
	} else {
		proc.MaxReadFreq = time.Duration(config.Max_proc_read_freq) *
			time.Millisecond
	}

	if config.Refresh_pids_freq == 0 {
		proc.RefreshPidsFreq = 1 * time.Second
	} else {
		proc.RefreshPidsFreq = time.Duration(config.Refresh_pids_freq) *
			time.Millisecond
	}

	// Read the local IP addresses
	var err error
	proc.LocalAddrs, err = common.LocalIpAddrs()
	if err != nil {
		logp.Err("Error getting local IP addresses: %s", err)
		proc.LocalAddrs = []net.IP{}
	}

	if proc.ReadFromProc {
		for _, procConfig := range config.Monitored {

			grepper := procConfig.Cmdline_grep
			if len(grepper) == 0 {
				grepper = procConfig.Process
			}

			p, err := NewProcess(proc, procConfig.Process, grepper, time.Tick(proc.RefreshPidsFreq))
			if err != nil {
				logp.Err("NewProcess: %s", err)
			} else {
				proc.Processes = append(proc.Processes, p)
			}
		}
	}

	return nil
}
Esempio n. 14
0
func packetFromType(version uint8, packetType ofp_type) OpenFlowContent {
	logp.Err("openflow", "Found packet type ", packetType)
	if packet, ok := OpenFlowPacketTypes[packetType]; ok {
		return packet
	} else {
		logp.Err("openflow", "Unknown message ", packetType)
		return nil
	}
}
Esempio n. 15
0
func (fb *Filebeat) Run(b *beat.Beat) error {

	defer func() {
		p := recover()
		if p == nil {
			return
		}

		fmt.Printf("recovered panic: %v", p)
		os.Exit(1)
	}()

	var err error

	// Init channels
	fb.publisherChan = make(chan []*FileEvent, 1)

	// Setup registrar to persist state
	fb.registrar, err = NewRegistrar(fb.FbConfig.Filebeat.RegistryFile)
	if err != nil {
		logp.Err("Could not init registrar: %v", err)
		return err
	}

	crawl := &Crawler{
		Registrar: fb.registrar,
	}

	// Load the previous log file locations now, for use in prospector
	fb.registrar.LoadState()

	// Init and Start spooler: Harvesters dump events into the spooler.
	fb.Spooler = NewSpooler(fb)
	err = fb.Spooler.Config()

	if err != nil {
		logp.Err("Could not init spooler: %v", err)
		return err
	}

	// Start up spooler
	go fb.Spooler.Run()

	crawl.Start(fb.FbConfig.Filebeat.Prospectors, fb.Spooler.Channel)

	// Publishes event to output
	go Publish(b, fb)

	// registrar records last acknowledged positions in all files.
	fb.registrar.Run()

	return nil
}
Esempio n. 16
0
func (out *redisOutput) BulkPublish(
	signal outputs.Signaler,
	ts time.Time,
	events []common.MapStr,
) error {
	if !out.connected {
		logp.Debug("output_redis", "Droping pkt ...")
		return errors.New("Not connected")
	}

	command := "RPUSH"
	if out.DataType == RedisChannelType {
		command = "PUBLISH"
	}

	if len(events) == 1 { // single event
		event := events[0]
		jsonEvent, err := json.Marshal(event)
		if err != nil {
			logp.Err("Fail to convert the event to JSON: %s", err)
			outputs.SignalCompleted(signal)
			return err
		}

		_, err = out.Conn.Do(command, out.Index, string(jsonEvent))
		outputs.Signal(signal, err)
		out.onFail(err)
		return err
	}

	for _, event := range events {
		jsonEvent, err := json.Marshal(event)
		if err != nil {
			logp.Err("Fail to convert the event to JSON: %s", err)
			continue
		}
		err = out.Conn.Send(command, out.Index, string(jsonEvent))
		if err != nil {
			outputs.SignalFailed(signal, err)
			out.onFail(err)
			return err
		}
	}
	if err := out.Conn.Flush(); err != nil {
		outputs.Signal(signal, err)
		out.onFail(err)
		return err
	}
	_, err := out.Conn.Receive()
	outputs.Signal(signal, err)
	out.onFail(err)
	return err
}
Esempio n. 17
0
func (out *RedisOutput) SendMessagesGoroutine() {

	var err error
	var pending int
	flushChannel := make(<-chan time.Time)

	if !out.flush_immediatelly {
		flushTicker := time.NewTicker(out.FlushInterval)
		flushChannel = flushTicker.C
	}

	for {
		select {
		case queueMsg := <-out.sendingQueue:

			if !out.connected {
				logp.Debug("output_redis", "Droping pkt ...")
				continue
			}
			logp.Debug("output_redis", "Send event to redis")
			command := "RPUSH"
			if out.DataType == RedisChannelType {
				command = "PUBLISH"
			}

			if !out.flush_immediatelly {
				err = out.Conn.Send(command, queueMsg.index, queueMsg.msg)
				pending += 1
			} else {
				_, err = out.Conn.Do(command, queueMsg.index, queueMsg.msg)
			}
			if err != nil {
				logp.Err("Fail to publish event to REDIS: %s", err)
				out.connected = false
				go out.Reconnect()
			}
		case _ = <-flushChannel:
			if pending > 0 {
				out.Conn.Flush()
				_, err = out.Conn.Receive()
				if err != nil {
					logp.Err("Fail to publish event to REDIS: %s", err)
					out.connected = false
					go out.Reconnect()
				}
				logp.Debug("output_redis", "Flushed %d pending commands", pending)
				pending = 0
			}
		}
	}
}
Esempio n. 18
0
func pgsqlFieldsParser(s *PgsqlStream) {
	m := s.message

	// read field count (int16)
	field_count := int(common.Bytes_Ntohs(s.data[s.parseOffset : s.parseOffset+2]))
	s.parseOffset += 2
	logp.Debug("pgsqldetailed", "Row Description field count=%d", field_count)

	fields := []string{}
	fields_format := []byte{}

	for i := 0; i < field_count; i++ {
		// read field name (null terminated string)
		field_name, err := common.ReadString(s.data[s.parseOffset:])
		if err != nil {
			logp.Err("Fail to read the column field")
		}
		fields = append(fields, field_name)
		m.NumberOfFields += 1
		s.parseOffset += len(field_name) + 1

		// read Table OID (int32)
		s.parseOffset += 4

		// read Column Index (int16)
		s.parseOffset += 2

		// read Type OID (int32)
		s.parseOffset += 4

		// read column length (int16)
		s.parseOffset += 2

		// read type modifier (int32)
		s.parseOffset += 4

		// read format (int16)
		format := common.Bytes_Ntohs(s.data[s.parseOffset : s.parseOffset+2])
		fields_format = append(fields_format, byte(format))
		s.parseOffset += 2

		logp.Debug("pgsqldetailed", "Field name=%s, format=%d", field_name, format)
	}
	m.Fields = fields
	m.FieldsFormat = fields_format
	if m.NumberOfFields != field_count {
		logp.Err("Missing fields from RowDescription. Expected %d. Received %d", field_count, m.NumberOfFields)
	}
}
Esempio n. 19
0
func (t *Topbeat) Run(b *beat.Beat) error {
	var err error

	t.initProcStats()

	ticker := time.NewTicker(t.period)
	defer ticker.Stop()

	for {
		select {
		case <-t.done:
			return nil
		case <-ticker.C:
		}

		timerStart := time.Now()

		if t.sysStats {
			err = t.exportSystemStats()
			if err != nil {
				logp.Err("Error reading system stats: %v", err)
				break
			}
		}
		if t.procStats {
			err = t.exportProcStats()
			if err != nil {
				logp.Err("Error reading proc stats: %v", err)
				break
			}
		}
		if t.fsStats {
			err = t.exportFileSystemStats()
			if err != nil {
				logp.Err("Error reading fs stats: %v", err)
				break
			}
		}

		timerEnd := time.Now()
		duration := timerEnd.Sub(timerStart)
		if duration.Nanoseconds() > t.period.Nanoseconds() {
			logp.Warn("Ignoring tick(s) due to processing taking longer than one period")
		}
	}

	return err
}
Esempio n. 20
0
func (d *Dockerbeat) Config(b *beat.Beat) error {

	err := cfgfile.Read(&d.TbConfig, "")
	if err != nil {
		logp.Err("Error reading configuration file: %v", err)
		return err
	}

	//init the period
	if d.TbConfig.Input.Period != nil {
		d.period = time.Duration(*d.TbConfig.Input.Period) * time.Second
	} else {
		d.period = 1 * time.Second
	}
	//init the socket
	if d.TbConfig.Input.Socket != nil {
		d.socket = *d.TbConfig.Input.Socket
	} else {
		d.socket = "unix:///var/run/docker.sock" // default docker socket location
	}

	logp.Debug("dockerbeat", "Init dockerbeat")
	logp.Debug("dockerbeat", "Follow docker socket %q\n", d.socket)
	logp.Debug("dockerbeat", "Period %v\n", d.period)

	return nil
}
Esempio n. 21
0
// open does open the file given under h.Path and assigns the file handler to h.file
func (h *Harvester) open() error {
	// Special handling that "-" means to read from standard input
	if h.Path == "-" {
		h.file = os.Stdin
		return nil
	}

	for {
		var err error
		h.file, err = input.ReadOpen(h.Path)

		if err != nil {
			// TODO: This is currently end endless retry, should be set to a max?
			// retry on failure.
			logp.Err("Failed opening %s: %s", h.Path, err)
			time.Sleep(5 * time.Second)
		} else {
			break
		}
	}

	file := &input.File{
		File: h.file,
	}

	// Check we are not following a rabbit hole (symlinks, etc.)
	if !file.IsRegularFile() {
		return errors.New("Given file is not a regular file.")
	}

	h.setFileOffset()

	return nil
}
Esempio n. 22
0
// see http://docs.mongodb.org/meta-driver/latest/legacy/mongodb-wire-protocol/#op-reply
func opReplyParse(d *decoder, m *MongodbMessage) (bool, bool) {
	_, err := d.readInt32() // ignore flags for now
	m.event["cursorId"], err = d.readInt64()
	m.event["startingFrom"], err = d.readInt32()

	numberReturned, err := d.readInt32()
	m.event["numberReturned"] = numberReturned

	logp.Debug("mongodb", "Prepare to read %i document from reply", m.event["numberReturned"])

	documents := make([]interface{}, numberReturned)
	for i := 0; i < numberReturned; i++ {
		var document bson.M
		document, err = d.readDocument()

		// Check if the result is actually an error
		if i == 0 {
			if mongoError, present := document["$err"]; present {
				m.error, err = doc2str(mongoError)
			}
		}

		documents[i] = document
	}
	m.documents = documents

	if err != nil {
		logp.Err("An error occured while parsing OP_REPLY message: %s", err)
		return false, false
	}
	return true, true
}
Esempio n. 23
0
// Run Gzipbeat.
func (gb *Gzipbeat) Run(b *beat.Beat) error {

	// iterate through each config section
	for _, input := range gb.config.Input {

		// list all gzip files in directory
		gzips, _ := filepath.Glob(input.Path)
		if input.Exclude != "" {
			exclude, _ := filepath.Glob(input.Exclude)
			gzips = diff(gzips, exclude)
		}
		gzips = diff(gzips, gb.registry)

		// do 1 file at the time
		for _, filename := range gzips {
			send(gb, filename, &input.Fields)
			err := saveToRegistry(gb, filename)
			if err != nil {
				logp.Err("Error saving to registry file %s: %v", gb.config.Registry, err)
				return err
			}
		}
	}

	return nil
}
Esempio n. 24
0
// Each shipper publishes a list of IPs together with its name to Elasticsearch
func (out *elasticsearchOutput) PublishIPs(name string, localAddrs []string) error {
	if !out.ttlEnabled {
		logp.Debug("output_elasticsearch", "Not publishing IPs because TTL was not yet confirmed to be enabled")
		return nil
	}

	logp.Debug("output_elasticsearch", "Publish IPs %s with expiration time %d", localAddrs, out.TopologyExpire)
	params := map[string]string{
		"ttl":     fmt.Sprintf("%dms", out.TopologyExpire),
		"refresh": "true",
	}
	_, err := out.Conn.Index(
		".packetbeat-topology", /*index*/
		"server-ip",            /*type*/
		name,                   /* id */
		params,                 /* parameters */
		publishedTopology{name, strings.Join(localAddrs, ",")} /* body */)

	if err != nil {
		logp.Err("Fail to publish IP addresses: %s", err)
		return err
	}

	out.UpdateLocalTopologyMap()

	return nil
}
Esempio n. 25
0
func (h *Harvester) open() *os.File {
	// Special handling that "-" means to read from standard input
	if h.Path == "-" {
		h.file = os.Stdin
		return h.file
	}

	for {
		var err error
		h.file, err = os.Open(h.Path)

		if err != nil {
			// TODO: This is currently end endless retry, should be set to a max?
			// retry on failure.
			logp.Err("Failed opening %s: %s", h.Path, err)
			time.Sleep(5 * time.Second)
		} else {
			break
		}
	}

	file := &input.File{
		File: h.file,
	}

	// Check we are not following a rabbit hole (symlinks, etc.)
	if !file.IsRegularFile() {
		// TODO: This should be replaced by a normal error
		panic(fmt.Errorf("Harvester file error"))
	}

	h.setFileOffset()

	return h.file
}
Esempio n. 26
0
func (r *Registrar) Run() {
	logp.Info("Starting Registrar")

	r.running = true

	// Writes registry on shutdown
	defer r.writeRegistry()

	for {
		select {
		case <-r.done:
			logp.Info("Ending Registrar")
			return
		// Treats new log files to persist with higher priority then new events
		case state := <-r.Persist:
			r.State[*state.Source] = state
			logp.Debug("prospector", "Registrar will re-save state for %s", *state.Source)
		case events := <-r.Channel:
			r.processEvents(events)
		}

		if e := r.writeRegistry(); e != nil {
			// REVU: but we should panic, or something, right?
			logp.Err("Writing of registry returned error: %v. Continuing..", e)
		}
	}
}
Esempio n. 27
0
func (tb *Topbeat) Config(b *beat.Beat) error {

	err := cfgfile.Read(&tb.TbConfig, "")
	if err != nil {
		logp.Err("Error reading configuration file: %v", err)
		return err
	}

	if tb.TbConfig.Input.Period != nil {
		tb.period = time.Duration(*tb.TbConfig.Input.Period) * time.Second
	} else {
		tb.period = 1 * time.Second
	}
	if tb.TbConfig.Input.Procs != nil {
		tb.procs = *tb.TbConfig.Input.Procs
	} else {
		tb.procs = []string{".*"} //all processes
	}

	logp.Debug("topbeat", "Init toppbeat")
	logp.Debug("topbeat", "Follow processes %q\n", tb.procs)
	logp.Debug("topbeat", "Period %v\n", tb.period)

	return nil
}
Esempio n. 28
0
func pgsqlErrorParser(s *PgsqlStream) {

	m := s.message

	for len(s.data[s.parseOffset:]) > 0 {
		// read field type(byte1)
		field_type := s.data[s.parseOffset]
		s.parseOffset += 1

		if field_type == 0 {
			break
		}

		// read field value(string)
		field_value, err := common.ReadString(s.data[s.parseOffset:])
		if err != nil {
			logp.Err("Fail to read the column field")
		}
		s.parseOffset += len(field_value) + 1

		if field_type == 'M' {
			m.ErrorInfo = field_value
		} else if field_type == 'C' {
			m.ErrorCode = field_value
		} else if field_type == 'S' {
			m.ErrorSeverity = field_value
		}

	}
	logp.Debug("pgsqldetailed", "%s %s %s", m.ErrorSeverity, m.ErrorCode, m.ErrorInfo)
}
Esempio n. 29
0
func createElasticsearchConnection(flushInterval int, bulkSize int) elasticsearchOutput {

	index := fmt.Sprintf("packetbeat-unittest-%d", os.Getpid())

	esPort, err := strconv.Atoi(GetEsPort())

	if err != nil {
		logp.Err("Invalid port. Cannot be converted to in: %s", GetEsPort())
	}

	var output elasticsearchOutput
	output.Init("packetbeat", outputs.MothershipConfig{
		Enabled:        true,
		Save_topology:  true,
		Host:           GetEsHost(),
		Port:           esPort,
		Username:       "",
		Password:       "",
		Path:           "",
		Index:          index,
		Protocol:       "",
		Flush_interval: &flushInterval,
		Bulk_size:      &bulkSize,
	}, 10)

	return output
}
Esempio n. 30
0
func (out *redisOutput) onFail(err error) {
	if err != nil {
		logp.Err("Fail to publish event to REDIS: %s", err)
		out.connected = false
		go out.Reconnect()
	}
}