Ejemplo n.º 1
0
// Starts the given module
func (m *Module) Start(b *beat.Beat) {

	defer func() {
		if r := recover(); r != nil {
			logp.Err("Module %s paniced and stopped running. Reason: %+v", m.Name, r)
		}
	}()

	if !m.Enabled {
		logp.Debug("helper", "Not starting module %s as not enabled.", m.Name)
		return
	}

	logp.Info("Start Module: %v", m.Name)

	err := m.Moduler.Setup()
	if err != nil {
		logp.Err("Error setting up module: %s. Not starting metricsets for this module.", err)
		return
	}

	for _, metricSet := range m.MetricSets {
		go metricSet.Start(b, m.wg)
		m.wg.Add(1)
	}
}
Ejemplo n.º 2
0
// startFetching performs an immediate fetch for the MetricSet then it
// begins a continuous timer scheduled loop to fetch data. To stop the loop the
// done channel should be closed.
func (msw *metricSetWrapper) startFetching(
	done <-chan struct{},
	out chan<- common.MapStr,
) {
	debugf("Starting %s", msw)
	defer debugf("Stopped %s", msw)

	// Fetch immediately.
	err := msw.fetch(done, out)
	if err != nil {
		logp.Err("%v", err)
	}

	// Start timer for future fetches.
	t := time.NewTicker(msw.Module().Config().Period)
	defer t.Stop()
	for {
		select {
		case <-done:
			return
		case <-t.C:
			err := msw.fetch(done, out)
			if err != nil {
				logp.Err("%v", err)
			}
		}
	}
}
Ejemplo n.º 3
0
func (out *fileOutput) PublishEvent(
	sig op.Signaler,
	opts outputs.Options,
	event common.MapStr,
) error {
	jsonEvent, err := json.Marshal(event)
	if err != nil {
		// mark as success so event is not sent again.
		op.SigCompleted(sig)

		logp.Err("Fail to json encode event(%v): %#v", err, event)
		return err
	}

	err = out.rotator.WriteLine(jsonEvent)
	if err != nil {
		if opts.Guaranteed {
			logp.Critical("Unable to write events to file: %s", err)
		} else {
			logp.Err("Error when writing line to file: %s", err)
		}
	}
	op.Sig(sig, err)
	return err
}
Ejemplo n.º 4
0
func createWatchUpdater(monitor *Monitor) func(content []byte) {
	return func(content []byte) {
		defer logp.Recover("Failed applying monitor watch")

		// read multiple json objects from content
		dec := json.NewDecoder(bytes.NewBuffer(content))
		var configs []*common.Config
		for dec.More() {
			var obj map[string]interface{}
			err := dec.Decode(&obj)
			if err != nil {
				logp.Err("Failed parsing json object: %v", err)
				return
			}

			logp.Info("load watch object: %v", obj)

			cfg, err := common.NewConfigFrom(obj)
			if err != nil {
				logp.Err("Failed normalizing json input: %v", err)
				return
			}

			configs = append(configs, cfg)
		}

		// apply read configurations
		if err := monitor.Update(configs); err != nil {
			logp.Err("Failed applying configuration: %v", err)
		}
	}
}
Ejemplo n.º 5
0
// Scan starts a scanGlob for each provided path/glob
func (p *ProspectorLog) scan() {

	for path, info := range p.getFiles() {

		logp.Debug("prospector", "Check file for harvesting: %s", path)

		// Create new state for comparison
		newState := file.NewState(info, path)

		// Load last state
		lastState := p.Prospector.states.FindPrevious(newState)

		// Ignores all files which fall under ignore_older
		if p.isIgnoreOlder(newState) {
			err := p.handleIgnoreOlder(lastState, newState)
			if err != nil {
				logp.Err("Updating ignore_older state error: %s", err)
			}
			continue
		}

		// Decides if previous state exists
		if lastState.IsEmpty() {
			logp.Debug("prospector", "Start harvester for new file: %s", newState.Source)
			err := p.Prospector.startHarvester(newState, 0)
			if err != nil {
				logp.Err("Harvester could not be started on new file: %s, Err: %s", newState.Source, err)
			}
		} else {
			p.harvestExistingFile(newState, lastState)
		}
	}
}
Ejemplo n.º 6
0
Archivo: file.go Proyecto: jarpy/beats
func (out *fileOutput) PublishEvent(
	trans outputs.Signaler,
	opts outputs.Options,
	event common.MapStr,
) error {
	jsonEvent, err := json.Marshal(event)
	if err != nil {
		// mark as success so event is not sent again.
		outputs.SignalCompleted(trans)

		logp.Err("Fail to convert the event to JSON: %s", err)
		return err
	}

	err = out.rotator.WriteLine(jsonEvent)
	if err != nil {
		if opts.Guaranteed {
			logp.Critical("Unable to write events to file: %s", err)
		} else {
			logp.Err("Error when writing line to file: %s", err)
		}
	}
	outputs.Signal(trans, err)
	return err
}
Ejemplo n.º 7
0
// Setup prepares Journalbeat for the main loop (starts journalreader, etc.)
func (jb *Journalbeat) Setup(b *beat.Beat) error {
	logp.Info("Journalbeat Setup")
	jb.output = b.Publisher.Connect()
	// Buffer channel else write to it blocks when Stop is called while
	// FollowJournal waits to write next  event
	jb.done = make(chan int, 1)
	jb.recv = make(chan sdjournal.JournalEntry)
	jb.cursorChan = make(chan string)
	jb.cursorChanFlush = make(chan int)

	jr, err := sdjournal.NewJournalReader(sdjournal.JournalReaderConfig{
		Since: time.Duration(1),
		//          NumFromTail: 0,
	})
	if err != nil {
		logp.Err("Could not create JournalReader")
		return err
	}

	jb.jr = jr

	// seek to position
	err = jb.seekToPosition()
	if err != nil {
		errMsg := fmt.Sprintf("seeking to a good position in journal failed: %v", err)
		logp.Err(errMsg)
		return fmt.Errorf("%s", errMsg)
	}

	// done with setup
	return nil
}
Ejemplo n.º 8
0
// Scan starts a scanGlob for each provided path/glob
func (p *ProspectorLog) scan() {

	for path, info := range p.getFiles() {

		logp.Debug("prospector", "Check file for harvesting: %s", path)

		// Create new state for comparison
		newState := file.NewState(info, path)

		// Load last state
		lastState := p.Prospector.states.FindPrevious(newState)

		// Ignores all files which fall under ignore_older
		if p.isIgnoreOlder(newState) {
			logp.Debug("prospector", "Ignore file because ignore_older reached: %s", newState.Source)

			// If last state is empty, it means state was removed or never created -> can be ignored
			if !lastState.IsEmpty() && !lastState.Finished {
				logp.Err("File is falling under ignore_older before harvesting is finished. Adjust your close_* settings: %s", newState.Source)
			}
			continue
		}

		// Decides if previous state exists
		if lastState.IsEmpty() {
			logp.Debug("prospector", "Start harvester for new file: %s", newState.Source)
			err := p.Prospector.startHarvester(newState, 0)
			if err != nil {
				logp.Err("Harvester could not be started on new file: %s, Err: %s", newState.Source, err)
			}
		} else {
			p.harvestExistingFile(newState, lastState)
		}
	}
}
Ejemplo n.º 9
0
func (out *redisOutput) PublishIPs(name string, localAddrs []string) error {
	logp.Debug("output_redis", "[%s] Publish the IPs %s", name, localAddrs)

	// connect to db
	conn, err := out.RedisConnect(out.DbTopology)
	if err != nil {
		return err
	}
	defer func() { _ = conn.Close() }()

	_, err = conn.Do("HSET", name, "ipaddrs", strings.Join(localAddrs, ","))
	if err != nil {
		logp.Err("[%s] Fail to set the IP addresses: %s", name, err)
		return err
	}

	_, err = conn.Do("EXPIRE", name, int(out.TopologyExpire.Seconds()))
	if err != nil {
		logp.Err("[%s] Fail to set the expiration time: %s", name, err)
		return err
	}

	out.UpdateLocalTopologyMap(conn)

	return nil
}
Ejemplo n.º 10
0
func itemStatus(reader *jsonReader) (int, []byte, error) {
	// skip outer dictionary
	if err := reader.expectDict(); err != nil {
		return 0, nil, errExpectedItemObject
	}

	// find first field in outer dictionary (e.g. 'create')
	kind, _, err := reader.nextFieldName()
	if err != nil {
		logp.Err("Failed to parse bulk response item: %s", err)
		return 0, nil, err
	}
	if kind == dictEnd {
		err = errUnexpectedEmptyObject
		logp.Err("Failed to parse bulk response item: %s", err)
		return 0, nil, err
	}

	// parse actual item response code and error message
	status, msg, err := itemStatusInner(reader)

	// close dictionary. Expect outer dictionary to have only one element
	kind, _, err = reader.step()
	if err != nil {
		logp.Err("Failed to parse bulk response item: %s", err)
		return 0, nil, err
	}
	if kind != dictEnd {
		err = errExcpectedObjectEnd
		logp.Err("Failed to parse bulk response item: %s", err)
		return 0, nil, err
	}

	return status, msg, nil
}
Ejemplo n.º 11
0
// bulkCollectPublishFails checks per item errors returning all events
// to be tried again due to error code returned for that items. If indexing an
// event failed due to some error in the event itself (e.g. does not respect mapping),
// the event will be dropped.
func bulkCollectPublishFails(
	reader *jsonReader,
	events []common.MapStr,
) []common.MapStr {
	if err := reader.expectDict(); err != nil {
		logp.Err("Failed to parse bulk respose: expected JSON object")
		return nil
	}

	// find 'items' field in response
	for {
		kind, name, err := reader.nextFieldName()
		if err != nil {
			logp.Err("Failed to parse bulk response")
			return nil
		}

		if kind == dictEnd {
			logp.Err("Failed to parse bulk response: no 'items' field in response")
			return nil
		}

		// found items array -> continue
		if bytes.Equal(name, nameItems) {
			break
		}

		reader.ignoreNext()
	}

	// check items field is an array
	if err := reader.expectArray(); err != nil {
		logp.Err("Failed to parse bulk respose: expected items array")
		return nil
	}

	count := len(events)
	failed := events[:0]
	for i := 0; i < count; i++ {
		status, msg, err := itemStatus(reader)
		if err != nil {
			return nil
		}

		if status < 300 {
			continue // ok value
		}

		if status < 500 && status != 429 {
			// hard failure, don't collect
			logp.Warn("Can not index event (status=%v): %s", status, msg)
			continue
		}

		logp.Info("Bulk item insert failed (i=%v, status=%v): %s", i, status, msg)
		failed = append(failed, events[i])
	}

	return failed
}
Ejemplo n.º 12
0
// startFetching performs an immediate fetch for the specified host then it
// begins continuous timer scheduled loop to fetch data. To stop the loop the
// done channel should be closed. On exit the method will decrement the
// WaitGroup counter.
//
// startFetching manages fetching for a single host so it should be called once
// per host.
func (msw *metricSetWrapper) startFetching(
	done <-chan struct{},
	wg *sync.WaitGroup,
	host string,
) {
	defer wg.Done()

	// Fetch immediately.
	err := msw.fetch(host)
	if err != nil {
		logp.Err("fetch error: %v", err)
	}

	// Start timer for future fetches.
	t := time.NewTicker(msw.Module().Config().Period)
	defer t.Stop()
	for {
		select {
		case <-done:
			return
		case <-t.C:
			err := msw.fetch(host)
			if err != nil {
				logp.Err("%v", err)
			}
		}
	}
}
Ejemplo n.º 13
0
// writeRegistry writes the new json registry file to disk.
func (r *Registrar) writeRegistry() error {
	logp.Debug("registrar", "Write registry file: %s", r.registryFile)

	tempfile := r.registryFile + ".new"
	f, err := os.Create(tempfile)
	if err != nil {
		logp.Err("Failed to create tempfile (%s) for writing: %s", tempfile, err)
		return err
	}

	// First clean up states
	states := r.states.GetStates()

	encoder := json.NewEncoder(f)
	err = encoder.Encode(states)
	if err != nil {
		logp.Err("Error when encoding the states: %s", err)
		return err
	}

	// Directly close file because of windows
	f.Close()

	logp.Debug("registrar", "Registry file updated. %d states written.", len(states))
	registryWrites.Add(1)
	statesCurrent.Set(int64(len(states)))

	return file.SafeFileRotate(r.registryFile, tempfile)
}
Ejemplo n.º 14
0
// harvestExistingFile continues harvesting a file with a known state if needed
func (p *ProspectorLog) harvestExistingFile(newState file.State, oldState file.State) {

	logp.Debug("prospector", "Update existing file for harvesting: %s, offset: %v", newState.Source, oldState.Offset)

	// No harvester is running for the file, start a new harvester
	// It is important here that only the size is checked and not modification time, as modification time could be incorrect on windows
	// https://blogs.technet.microsoft.com/asiasupp/2010/12/14/file-date-modified-property-are-not-updating-while-modifying-a-file-without-closing-it/
	if oldState.Finished && newState.Fileinfo.Size() > oldState.Offset {
		// Resume harvesting of an old file we've stopped harvesting from
		// This could also be an issue with force_close_older that a new harvester is started after each scan but not needed?
		// One problem with comparing modTime is that it is in seconds, and scans can happen more then once a second
		logp.Debug("prospector", "Resuming harvesting of file: %s, offset: %v", newState.Source, oldState.Offset)
		err := p.Prospector.startHarvester(newState, oldState.Offset)
		if err != nil {
			logp.Err("Harvester could not be started on existing file: %s, Err: %s", newState.Source, err)
		}
		return
	}

	// File size was reduced -> truncated file
	if oldState.Finished && newState.Fileinfo.Size() < oldState.Offset {
		logp.Debug("prospector", "Old file was truncated. Starting from the beginning: %s", newState.Source)
		err := p.Prospector.startHarvester(newState, 0)
		if err != nil {
			logp.Err("Harvester could not be started on truncated file: %s, Err: %s", newState.Source, err)
		}

		filesTrucated.Add(1)
		return
	}

	// Check if file was renamed
	if oldState.Source != "" && oldState.Source != newState.Source {
		// This does not start a new harvester as it is assume that the older harvester is still running
		// or no new lines were detected. It sends only an event status update to make sure the new name is persisted.
		logp.Debug("prospector", "File rename was detected: %s -> %s, Current offset: %v", oldState.Source, newState.Source, oldState.Offset)

		if oldState.Finished {
			logp.Debug("prospector", "Updating state for renamed file: %s -> %s, Current offset: %v", oldState.Source, newState.Source, oldState.Offset)
			// Update state because of file rotation
			oldState.Source = newState.Source
			event := input.NewEvent(oldState)
			err := p.Prospector.updateState(event)
			if err != nil {
				logp.Err("File rotation state update error: %s", err)
			}

			filesRenamed.Add(1)
		} else {
			logp.Debug("prospector", "File rename detected but harvester not finished yet.")
		}
	}

	if !oldState.Finished {
		// Nothing to do. Harvester is still running and file was not renamed
		logp.Debug("prospector", "Harvester for file is still running: %s", newState.Source)
	} else {
		logp.Debug("prospector", "File didn't change: %s", newState.Source)
	}
}
Ejemplo n.º 15
0
func serializeEvents(
	to []interface{},
	i int,
	data []outputs.Data,
) ([]outputs.Data, []interface{}) {
	succeeded := data
	for _, d := range data {
		jsonEvent, err := json.Marshal(d.Event)
		if err != nil {
			logp.Err("Failed to convert the event to JSON (%v): %#v", err, d.Event)
			goto failLoop
		}
		to = append(to, jsonEvent)
		i++
	}
	return succeeded, to

failLoop:
	succeeded = data[:i]
	rest := data[i+1:]
	for _, d := range rest {
		jsonEvent, err := json.Marshal(d.Event)
		if err != nil {
			logp.Err("Failed to convert the event to JSON (%v): %#v", err, d.Event)
			i++
			continue
		}
		to = append(to, jsonEvent)
		i++
	}

	return succeeded, to
}
Ejemplo n.º 16
0
func getConsumerOffsets(group string, topic string, pids map[int32]int64) (map[int32]int64, error) {
	broker, err := client.Coordinator(group)
	offsets := make(map[int32]int64)
	if err != nil {
		logp.Err("Unable to identify group coordinator for group %v", group)
	} else {
		request := sarama.OffsetFetchRequest{ConsumerGroup: group, Version: 0}
		for pid, size := range pids {
			if size > 0 {
				request.AddPartition(topic, pid)
			}
		}
		res, err := broker.FetchOffset(&request)
		if err != nil {
			logp.Err("Issue fetching offsets coordinator for topic %v", topic)
			logp.Err("%v", err)
		}
		if res != nil {
			for pid, _ := range pids {
				offset := res.GetBlock(topic, pid)
				if offset != nil && offset.Offset > -1 {
					offsets[pid] = offset.Offset
				}
			}
		}
	}
	return offsets, err
}
Ejemplo n.º 17
0
func (mb *Mesosbeat) GetAgentStatistics(u string) (map[string]float64, error) {
	statistics := make(map[string]float64)

	resp, err := http.Get(u)
	defer resp.Body.Close()

	if err != nil {
		logp.Err("An error occured while executing HTTP request: %v", err)
		return statistics, err
	}

	// read json http response
	jsonDataFromHttp, err := ioutil.ReadAll(resp.Body)

	if err != nil {
		logp.Err("An error occured while reading HTTP response: %v", err)
		return statistics, err
	}

	err = json.Unmarshal([]byte(jsonDataFromHttp), &statistics)

	if err != nil {
		logp.Err("An error occured while unmarshaling agent statistics: %v", err)
		return statistics, err
	}
	return statistics, nil
}
Ejemplo n.º 18
0
func serializeEvents(
	to []interface{},
	i int,
	events []common.MapStr,
) ([]common.MapStr, []interface{}) {
	okEvents := events
	for _, event := range events {
		jsonEvent, err := json.Marshal(event)
		if err != nil {
			logp.Err("Failed to convert the event to JSON (%v): %#v", err, event)
			goto failLoop
		}
		to = append(to, jsonEvent)
		i++
	}
	return okEvents, to

failLoop:
	okEvents = events[:i]
	restEvents := events[i+1:]
	for _, event := range restEvents {
		jsonEvent, err := json.Marshal(event)
		if err != nil {
			logp.Err("Failed to convert the event to JSON (%v): %#v", err, event)
			i++
			continue
		}
		to = append(to, jsonEvent)
		i++
	}

	return okEvents, to
}
Ejemplo n.º 19
0
// handleReadlineError handles error which are raised during reading file.
//
// If error is EOF, it will check for:
// * File truncated
// * Older then ignore_older
// * General file error
//
// If none of the above cases match, no error will be returned and file is kept open
//
// In case of a general error, the error itself is returned
func (h *Harvester) handleReadlineError(lastTimeRead time.Time, err error) error {
	if err != io.EOF || !h.file.Continuable() {
		logp.Err("Unexpected state reading from %s; error: %s", h.Path, err)
		return err
	}

	// Refetch fileinfo to check if the file was truncated or disappeared.
	// Errors if the file was removed/rotated after reading and before
	// calling the stat function
	info, statErr := h.file.Stat()
	if statErr != nil {
		logp.Err("Unexpected error reading from %s; error: %s", h.Path, statErr)
		return statErr
	}

	// Handle fails if file was truncated
	if info.Size() < h.Offset {
		seeker, ok := h.file.(io.Seeker)
		if !ok {
			logp.Err("Can not seek source")
			return err
		}

		logp.Debug("harvester", "File was truncated as offset (%s) > size (%s). Begin reading file from offset 0: %s", h.Offset, info.Size(), h.Path)

		h.Offset = 0
		seeker.Seek(h.Offset, os.SEEK_SET)
		return nil
	}

	age := time.Since(lastTimeRead)
	if age > h.ProspectorConfig.IgnoreOlderDuration {
		// If the file hasn't change for longer the ignore_older, harvester stops
		// and file handle will be closed.
		return fmt.Errorf("Stop harvesting as file is older then ignore_older: %s; Last change was: %s ", h.Path, age)
	}

	if h.Config.ForceCloseFiles {
		// Check if the file name exists (see #93)
		_, statErr := os.Stat(h.file.Name())

		// Error means file does not exist. If no error, check if same file. If not close as rotated.
		if statErr != nil || !input.IsSameFile(h.file.Name(), info) {
			logp.Info("Force close file: %s; error: %s", h.Path, statErr)
			// Return directly on windows -> file is closing
			return fmt.Errorf("Force closing file: %s", h.Path)
		}
	}

	if err != io.EOF {
		logp.Err("Unexpected state reading from %s; error: %s", h.Path, err)
	}

	logp.Debug("harvester", "End of file reached: %s; Backoff now.", h.Path)

	// Do nothing in case it is just EOF, keep reading the file after backing off
	h.backOff()
	return nil
}
Ejemplo n.º 20
0
func (p *parser) parserBody() (bool, error) {

	headLen := p.framer.Header.HeadLength
	bdyLen := p.framer.Header.BodyLength
	if bdyLen <= 0 {
		return true, nil
	}

	//let's wait for enough buf
	debugf("bodyLength: %d", bdyLen)
	if !p.buf.Avail(bdyLen) {
		if isDebug {
			debugf("buf not enough for body, waiting for more, return")
		}
		return false, nil
	}

	//check if the ops already ignored
	if p.message.ignored {
		if isDebug {
			debugf("message marked to be ignored, let's do this")
		}
		p.buf.Collect(bdyLen)
	} else {
		// start to parse body
		data, err := p.framer.ReadFrame()
		if err != nil {
			// if the frame parsed failed, should ignore the whole message
			p.framer = nil
			return false, err
		}

		// dealing with un-parsed content
		frameParsedLength := p.buf.BufferConsumed()

		// collect leftover
		unParsedSize := bdyLen + headLen - frameParsedLength
		if unParsedSize > 0 {
			if !p.buf.Avail(unParsedSize) {
				err := errors.New("should be enough bytes for cleanup,but not enough")
				logp.Err("Finishing frame failed with: %v", err)
				return false, err
			}

			p.buf.Collect(unParsedSize)
		}

		p.message.data = data
	}

	finalCollectedFrameLength := p.buf.BufferConsumed()
	if finalCollectedFrameLength-headLen != bdyLen {
		logp.Err("body_length:%d, head_length:%d, all_consumed:%d",
			bdyLen, headLen, finalCollectedFrameLength)
		return false, errors.New("data messed while parse frame body")
	}

	return true, nil
}
Ejemplo n.º 21
0
Archivo: kafka.go Proyecto: jarpy/beats
func (k *kafka) init(cfg *ucfg.Config) error {
	debugf("initialize kafka output")

	config := defaultConfig
	if err := cfg.Unpack(&config); err != nil {
		return err
	}

	libCfg, retries, err := newKafkaConfig(&config)
	if err != nil {
		return err
	}

	hosts := config.Hosts
	if len(hosts) < 1 {
		logp.Err("Kafka configuration failed with: %v", errNoHosts)
		return errNoHosts
	}
	debugf("hosts: %v", hosts)

	useType := config.UseType

	topic := config.Topic
	if topic == "" && !useType {
		logp.Err("Kafka configuration failed with: %v", errNoTopicSet)
		return errNoTopicSet
	}

	var clients []mode.AsyncProtocolClient
	worker := 1
	if config.Worker > 1 {
		worker = config.Worker
	}
	for i := 0; i < worker; i++ {
		client, err := newKafkaClient(hosts, topic, useType, libCfg)
		if err != nil {
			logp.Err("Failed to create kafka client: %v", err)
			return err
		}

		clients = append(clients, client)
	}

	mode, err := mode.NewAsyncConnectionMode(
		clients,
		false,
		retries, // retry implemented by kafka client
		libCfg.Producer.Retry.Backoff,
		libCfg.Net.WriteTimeout,
		10*time.Second)
	if err != nil {
		logp.Err("Failed to configure kafka connection: %v", err)
		return err
	}

	k.mode = mode
	return nil
}
Ejemplo n.º 22
0
// errorChecks checks how the given error should be handled based on the config options
func (r *LogFile) errorChecks(err error) error {

	if err != io.EOF {
		logp.Err("Unexpected state reading from %s; error: %s", r.fs.Name(), err)
		return err
	}

	// Stdin is not continuable
	if !r.fs.Continuable() {
		logp.Debug("harvester", "Source is not continuable: %s", r.fs.Name())
		return err
	}

	if err == io.EOF && r.config.CloseEOF {
		return err
	}

	// Refetch fileinfo to check if the file was truncated or disappeared.
	// Errors if the file was removed/rotated after reading and before
	// calling the stat function
	info, statErr := r.fs.Stat()
	if statErr != nil {
		logp.Err("Unexpected error reading from %s; error: %s", r.fs.Name(), statErr)
		return statErr
	}

	// check if file was truncated
	if info.Size() < r.offset {
		logp.Debug("harvester",
			"File was truncated as offset (%d) > size (%d): %s", r.offset, info.Size(), r.fs.Name())
		return ErrFileTruncate
	}

	// Check file wasn't read for longer then CloseInactive
	age := time.Since(r.lastTimeRead)
	if age > r.config.CloseInactive {
		return ErrInactive
	}

	if r.config.CloseRenamed {
		// Check if the file can still be found under the same path
		if !file.IsSameFile(r.fs.Name(), info) {
			return ErrRenamed
		}
	}

	if r.config.CloseRemoved {
		// Check if the file name exists. See https://github.com/elastic/filebeat/issues/93
		_, statErr := os.Stat(r.fs.Name())

		// Error means file does not exist.
		if statErr != nil {
			return ErrRemoved
		}
	}

	return nil
}
Ejemplo n.º 23
0
func (proc *ProcessesWatcher) Init(config ProcsConfig) error {

	proc.proc_prefix = ""
	proc.PortProcMap = make(map[uint16]PortProcMapping)
	proc.LastMapUpdate = time.Now()

	proc.ReadFromProc = config.Enabled
	if proc.ReadFromProc {
		if runtime.GOOS != "linux" {
			proc.ReadFromProc = false
			logp.Info("Disabled /proc/ reading because not on linux")
		} else {
			logp.Info("Process matching enabled")
		}
	} else {
		logp.Info("Process matching disabled")
	}

	if config.Max_proc_read_freq == 0 {
		proc.MaxReadFreq = 10 * time.Millisecond
	} else {
		proc.MaxReadFreq = config.Max_proc_read_freq
	}

	if config.Refresh_pids_freq == 0 {
		proc.RefreshPidsFreq = 1 * time.Second
	} else {
		proc.RefreshPidsFreq = config.Refresh_pids_freq
	}

	// Read the local IP addresses
	var err error
	proc.LocalAddrs, err = common.LocalIpAddrs()
	if err != nil {
		logp.Err("Error getting local IP addresses: %s", err)
		proc.LocalAddrs = []net.IP{}
	}

	if proc.ReadFromProc {
		for _, procConfig := range config.Monitored {

			grepper := procConfig.Cmdline_grep
			if len(grepper) == 0 {
				grepper = procConfig.Process
			}

			p, err := NewProcess(proc, procConfig.Process, grepper, time.Tick(proc.RefreshPidsFreq))
			if err != nil {
				logp.Err("NewProcess: %s", err)
			} else {
				proc.Processes = append(proc.Processes, p)
			}
		}
	}

	return nil
}
Ejemplo n.º 24
0
func (bt *Kafkabeat) Config(b *beat.Beat) error {

	// Load beater beatConfig
	logp.Info("Configuring Kafkabeat...")
	var err error
	err = cfgfile.Read(&bt.beatConfig, "")
	if err != nil {
		return fmt.Errorf("Error reading config file: %v", err)
	}

	bt.zookeepers = bt.beatConfig.Kafkabeat.Zookeepers
	if bt.zookeepers == nil || len(bt.zookeepers) == 0 {
		return KafkabeatError{"Atleast one zookeeper must be defined"}
	}
	zClient, err = kazoo.NewKazoo(bt.zookeepers, nil)
	if err != nil {
		logp.Err("Unable to connect to Zookeeper")
		return err
	}
	bt.brokers, err = zClient.BrokerList()
	if err != nil {
		logp.Err("Error identifying brokers from zookeeper")
		return err
	}
	if bt.brokers == nil || len(bt.brokers) == 0 {
		return KafkabeatError{"Unable to identify active brokers"}
	}
	logp.Info("Brokers: %v", bt.brokers)
	client, err = sarama.NewClient(bt.brokers, sarama.NewConfig())

	// Setting default period if not set
	if bt.beatConfig.Kafkabeat.Period == "" {
		bt.beatConfig.Kafkabeat.Period = "1s"
	}

	bt.period, err = time.ParseDuration(bt.beatConfig.Kafkabeat.Period)
	if err != nil {
		return err
	}
	bt.topics = bt.beatConfig.Kafkabeat.Topics
	bt.create_topic_docs = true
	if bt.topics == nil || len(bt.topics) == 0 {
		bt.create_topic_docs = bt.topics == nil
		bt.topics, err = client.Topics()
	}
	if err != nil {
		return err
	}
	logp.Info("Monitoring topics: %v", bt.topics)
	bt.groups = bt.beatConfig.Kafkabeat.Groups
	if bt.groups == nil {
		bt.groups, err = getGroups()
	}
	logp.Info("Monitoring groups %v", bt.groups)
	return err
}
Ejemplo n.º 25
0
func (fb *Filebeat) Run(b *beat.Beat) error {

	defer func() {
		p := recover()
		if p == nil {
			return
		}

		fmt.Printf("recovered panic: %v", p)
		os.Exit(1)
	}()

	var err error

	// Init channels
	fb.publisherChan = make(chan []*FileEvent, 1)

	// Setup registrar to persist state
	fb.registrar, err = NewRegistrar(fb.FbConfig.Filebeat.RegistryFile)
	if err != nil {
		logp.Err("Could not init registrar: %v", err)
		return err
	}

	crawl := &Crawler{
		Registrar: fb.registrar,
	}

	// Load the previous log file locations now, for use in prospector
	fb.registrar.LoadState()

	// Init and Start spooler: Harvesters dump events into the spooler.
	fb.Spooler = NewSpooler(fb)
	err = fb.Spooler.Config()

	if err != nil {
		logp.Err("Could not init spooler: %v", err)
		return err
	}

	// Start up spooler
	go fb.Spooler.Run()

	crawl.Start(fb.FbConfig.Filebeat.Prospectors, fb.Spooler.Channel)

	// Publishes event to output
	pub := newPublisher(fb.FbConfig.Filebeat.PublishAsync,
		fb.publisherChan, fb.registrar.Channel, b.Events)
	pub.Start()

	// registrar records last acknowledged positions in all files.
	fb.registrar.Run()

	return nil
}
Ejemplo n.º 26
0
func (t *Topbeat) Run(b *beat.Beat) error {
	var err error

	err = t.procStats.InitProcStats()
	if err != nil {
		return err
	}

	ticker := time.NewTicker(t.period)
	defer ticker.Stop()

	for {
		select {
		case <-t.done:
			return nil
		case <-ticker.C:
		}

		timerStart := time.Now()

		if t.sysStats {
			event, err := t.cpu.GetSystemStats()
			if err != nil {
				logp.Err("Error reading system stats: %v", err)
				break
			}
			t.events.PublishEvent(event)
		}
		if t.procStats.ProcStats {
			events, err := t.procStats.GetProcStatsEvents()
			if err != nil {
				logp.Err("Error reading proc stats: %v", err)
				break
			}
			t.events.PublishEvents(events)
		}
		if t.fsStats {
			events, err := system.GetFileSystemStats()
			if err != nil {
				logp.Err("Error reading fs stats: %v", err)
				break
			}
			t.events.PublishEvents(events)

		}

		timerEnd := time.Now()
		duration := timerEnd.Sub(timerStart)
		if duration.Nanoseconds() > t.period.Nanoseconds() {
			logp.Warn("Ignoring tick(s) due to processing taking longer than one period")
		}
	}

	return err
}
Ejemplo n.º 27
0
func (fb *Filebeat) Run(b *beat.Beat) error {

	var err error

	// Init channels
	fb.publisherChan = make(chan []*FileEvent, 1)

	// Setup registrar to persist state
	fb.registrar, err = NewRegistrar(fb.FbConfig.Filebeat.RegistryFile)
	if err != nil {
		logp.Err("Could not init registrar: %v", err)
		return err
	}

	fb.cralwer = &Crawler{
		Registrar: fb.registrar,
	}

	// Load the previous log file locations now, for use in prospector
	fb.registrar.LoadState()

	// Init and Start spooler: Harvesters dump events into the spooler.
	fb.spooler = NewSpooler(fb)
	err = fb.spooler.Config()

	if err != nil {
		logp.Err("Could not init spooler: %v", err)
		return err
	}

	// Start up spooler
	go fb.spooler.Run()

	// registrar records last acknowledged positions in all files.
	go fb.registrar.Run()

	err = fb.cralwer.Start(fb.FbConfig.Filebeat.Prospectors, fb.spooler.Channel)
	if err != nil {
		return err
	}

	// Publishes event to output
	pub := newPublisher(fb.FbConfig.Filebeat.PublishAsync,
		fb.publisherChan, fb.registrar.Channel, b.Events)
	pub.Start()

	// Blocks progressing
	select {
	case <-fb.done:
	}

	return nil
}
Ejemplo n.º 28
0
func (out *redisOutput) BulkPublish(
	signal outputs.Signaler,
	ts time.Time,
	events []common.MapStr,
) error {
	if !out.connected {
		logp.Debug("output_redis", "Droping pkt ...")
		return errors.New("Not connected")
	}

	command := "RPUSH"
	if out.DataType == RedisChannelType {
		command = "PUBLISH"
	}

	if len(events) == 1 { // single event
		event := events[0]
		jsonEvent, err := json.Marshal(event)
		if err != nil {
			logp.Err("Fail to convert the event to JSON: %s", err)
			outputs.SignalCompleted(signal)
			return err
		}

		_, err = out.Conn.Do(command, out.Index, string(jsonEvent))
		outputs.Signal(signal, err)
		out.onFail(err)
		return err
	}

	for _, event := range events {
		jsonEvent, err := json.Marshal(event)
		if err != nil {
			logp.Err("Fail to convert the event to JSON: %s", err)
			continue
		}
		err = out.Conn.Send(command, out.Index, string(jsonEvent))
		if err != nil {
			outputs.SignalFailed(signal, err)
			out.onFail(err)
			return err
		}
	}
	if err := out.Conn.Flush(); err != nil {
		outputs.Signal(signal, err)
		out.onFail(err)
		return err
	}
	_, err := out.Conn.Receive()
	outputs.Signal(signal, err)
	out.onFail(err)
	return err
}
Ejemplo n.º 29
0
// Run allows the beater to be run as a beat.
func (fb *Filebeat) Run(b *beat.Beat) error {

	var err error

	// Init channels
	fb.publisherChan = make(chan []*input.FileEvent, 1)

	// Setup registrar to persist state
	fb.registrar, err = crawler.NewRegistrar(fb.FbConfig.Filebeat.RegistryFile)
	if err != nil {
		logp.Err("Could not init registrar: %v", err)
		return err
	}

	fb.crawler = &crawler.Crawler{
		Registrar: fb.registrar,
	}

	// Load the previous log file locations now, for use in prospector
	err = fb.registrar.LoadState()
	if err != nil {
		logp.Err("Error loading state: %v", err)
		return err
	}

	// Init and Start spooler: Harvesters dump events into the spooler.
	fb.spooler = NewSpooler(fb.FbConfig.Filebeat, fb.publisherChan)

	if err != nil {
		logp.Err("Could not init spooler: %v", err)
		return err
	}

	fb.registrar.Start()
	fb.spooler.Start()

	err = fb.crawler.Start(fb.FbConfig.Filebeat.Prospectors, fb.spooler.Channel)
	if err != nil {
		return err
	}

	// Publishes event to output
	fb.pub = newPublisher(fb.FbConfig.Filebeat.PublishAsync,
		fb.publisherChan, fb.registrar.Channel, b.Publisher.Connect())
	fb.pub.Start()

	// Blocks progressing
	<-fb.done

	return nil
}
Ejemplo n.º 30
0
// mergeJSONFields writes the JSON fields in the event map,
// respecting the KeysUnderRoot and OverwriteKeys configuration options.
// If MessageKey is defined, the Text value from the event always
// takes precedence.
func mergeJSONFields(e *Event, event common.MapStr, jsonFields common.MapStr) {

	// The message key might have been modified by multiline
	if len(e.JSONConfig.MessageKey) > 0 && e.Text != nil {
		jsonFields[e.JSONConfig.MessageKey] = *e.Text
	}

	if e.JSONConfig.KeysUnderRoot {
		// Delete existing json key
		delete(event, "json")

		for k, v := range jsonFields {
			if e.JSONConfig.OverwriteKeys {
				if k == "@timestamp" {
					vstr, ok := v.(string)
					if !ok {
						logp.Err("JSON: Won't overwrite @timestamp because value is not string")
						event[reader.JsonErrorKey] = "@timestamp not overwritten (not string)"
						continue
					}

					// @timestamp must be of format RFC3339
					ts, err := time.Parse(time.RFC3339, vstr)
					if err != nil {
						logp.Err("JSON: Won't overwrite @timestamp because of parsing error: %v", err)
						event[reader.JsonErrorKey] = fmt.Sprintf("@timestamp not overwritten (parse error on %s)", vstr)
						continue
					}
					event[k] = common.Time(ts)
				} else if k == "type" {
					vstr, ok := v.(string)
					if !ok {
						logp.Err("JSON: Won't overwrite type because value is not string")
						event[reader.JsonErrorKey] = "type not overwritten (not string)"
						continue
					}
					if len(vstr) == 0 || vstr[0] == '_' {
						logp.Err("JSON: Won't overwrite type because value is empty or starts with an underscore")
						event[reader.JsonErrorKey] = fmt.Sprintf("type not overwritten (invalid value [%s])", vstr)
						continue
					}
					event[k] = vstr
				} else {
					event[k] = v
				}
			} else if _, exists := event[k]; !exists {
				event[k] = v
			}
		}
	}
}