Ejemplo n.º 1
0
// Runs HTTP server
func (wh *WorkerHttp) Run(wg *sync.WaitGroup, die chan bool) {
	defer wg.Done()

	server := manners.NewWithServer(&http.Server{
		Addr:    wh.addr,
		Handler: wh.getRouter(),
	})

	// Start goroutine which will gracefully close server
	go func(server *manners.GracefulServer) {
		for {
			select {
			case <-die:
				logger.Instance().
					Info("Stopping HTTP server")
				server.Close()
				return
			default:
			}

			time.Sleep(time.Second)
		}
	}(server)

	logger.Instance().
		WithField("addr", server.Addr).
		Info("HTTP server started")

	_ = server.ListenAndServe()
}
Ejemplo n.º 2
0
// Periodically flushes messages to bleve index
func (b *Bleve) PeriodicFlush(die chan bool) {
	var (
		bvBatchIndex  *bv.Batch
		err           error
		nbMessages    int
		sleepDuration time.Duration = 3 * time.Second
	)

	// Run periodic cleanup task
	go b.periodicCleanup(die)

	for {
		select {
		case <-die:
			return
		default:
		}

		nbMessages = len(b.messages)

		if nbMessages > 0 && (nbMessages >= b.batchSize || time.Now().Sub(b.lastFlush) > b.intervalFlush) {
			b.mutexFlushMessages.Lock()

			bvBatchIndex = b.index.NewBatch()

			for _, message := range b.messages {
				err = bvBatchIndex.Index(uuid.NewV4().String(), message)
				if err != nil {
					logger.Instance().
						WithError(err).
						Warning("Unable to add message to batch")
				}
			}

			if bvBatchIndex.Size() > 0 {
				err = b.index.Batch(bvBatchIndex)

				if err != nil {
					logger.Instance().
						WithError(err).
						Warning("Unable to batch index messages")
				} else {
					logger.Instance().
						WithField("nb_messages", bvBatchIndex.Size()).
						Info("Messages successfully indexed")

					b.lastFlush = time.Now()
					b.messages = []*storage.Message{}
				}
			}

			b.mutexFlushMessages.Unlock()
		}

		time.Sleep(sleepDuration)
	}
}
Ejemplo n.º 3
0
// Runs the UDP receiver
func (wr *WorkerReceiver) Run(wg *sync.WaitGroup, die chan bool) {
	var (
		err     error
		message *gelf.Message
	)

	defer wg.Done()

	logger.Instance().
		WithField("addr", wr.reader.Addr()).
		Info("Packet receiver started")

	for {
		select {
		case <-die:
			return
		default:
		}

		// Set read timeout to prevent routine lock
		err = wr.reader.GetConnection().SetDeadline(time.Now().Add(time.Second))
		if err != nil {
			logger.Instance().
				WithError(err).
				Warning("Unable to set timeout")
		}

		message, err = wr.reader.ReadMessage()

		if err != nil {
			if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() {
				logger.Instance().
					WithError(err).
					Debug("Reached timeout, everything is ok")
			} else {
				logger.Instance().
					WithError(err).
					Warning("Unable to read message")
			}

			continue
		}

		go wr.storage.HandleMessage(message)
	}
}
Ejemplo n.º 4
0
// Successful response
func statusOk(w http.ResponseWriter, data interface{}) {
	rs := &responseOk{
		Status: "ok",
		Data:   data,
	}

	addHeaders(w)

	b, err := json.Marshal(rs)
	if err != nil {
		logger.Instance().
			WithError(err).
			WithField("status", "ok").
			Warning("Unable to marshal response")
	} else {
		_, err = w.Write(b)
		if err != nil {
			logger.Instance().
				WithError(err).
				Warning("Unable to write response")
		}
	}
}
Ejemplo n.º 5
0
// Response with error
func statusError(w http.ResponseWriter, message string, code int) {
	rs := &responseError{
		Status:  "error",
		Message: message,
	}

	addHeaders(w)

	b, err := json.Marshal(rs)
	if err != nil {
		logger.Instance().
			WithField("status", "error").
			Warning("Unable marshal response")
	} else {
		http.Error(w, string(b), code)
	}
}
Ejemplo n.º 6
0
// Dumps document
func (wh *WorkerHttp) handleApiDump(w http.ResponseWriter, req *http.Request) {
	msgId := mux.Vars(req)["msgId"]

	if len(msgId) == 0 {
		statusError(w, "Message not found", http.StatusNotFound)

		return
	}

	doc, err := wh.storage.GetMessage(msgId)
	if err != nil {
		logger.Instance().
			WithError(err).
			Warning("Unable to find message")

		statusError(w, "An error occured while getting message", http.StatusInternalServerError)

		return
	}

	statusOk(w, doc)
}
Ejemplo n.º 7
0
func actionRun(c *cc.Context) error {
	_ = config.Instance(c.String("config"))

	var (
		wg          *sync.WaitGroup = &sync.WaitGroup{}
		die         chan bool       = make(chan bool)
		storage     storage.Storage
		workersList []workers.Worker
	)

	// Listen for SIGINT
	ch := make(chan os.Signal, 1)
	signal.Notify(ch, os.Interrupt)
	go func() {
		for range ch {
			logger.Instance().Info("Caught interrupt signal")

			// Close all workers.
			close(die)
		}
	}()

	storage = elastic.NewElasticStorage()
	go storage.PeriodicFlush(die)

	workersList = append(workersList, workers.NewWorkerHttp(storage))
	workersList = append(workersList, workers.NewWorkerReceiver(storage))

	wg.Add(len(workersList))

	for _, w := range workersList {
		go w.Run(wg, die)
	}

	wg.Wait()

	return nil
}
Ejemplo n.º 8
0
// Returns object to work with bleve
func NewBleveStorage() *Bleve {
	datapath, err := config.Instance().String("bleve", "datapath")
	if err != nil {
		logger.Instance().
			WithError(err).
			Error("Path to bleve index is not provided")

		os.Exit(1)
	}

	dirname := path.Dir(datapath)
	if !cli.FileExists(dirname) {
		logger.Instance().
			WithError(err).
			WithField("directory", dirname).
			Error("Directory with bleve index doesn't exist")

		os.Exit(1)
	}

	batchSize, err := config.Instance().Int("bleve", "batch_size")
	if err != nil {
		batchSize = 10
	}

	var (
		defaultIntervalSecond string = "1s"
		defaultIntervalMonth  string = "720h"
		index                 bv.Index
	)

	intervalCleanupStr, err := config.Instance().String("bleve", "interval_cleanup")
	if err != nil {
		intervalCleanupStr = defaultIntervalMonth
	}

	intervalCleanup, err := time.ParseDuration(intervalCleanupStr)
	if err != nil {
		intervalCleanup, _ = time.ParseDuration(defaultIntervalMonth)
	}

	intervalFlushStr, err := config.Instance().String("bleve", "interval_flush")
	if err != nil {
		intervalFlushStr = defaultIntervalSecond
	}

	intervalFlush, err := time.ParseDuration(intervalFlushStr)
	if err != nil {
		intervalFlush, _ = time.ParseDuration(defaultIntervalSecond)
	}

	if !cli.FileExists(datapath) {
		index, err = bv.New(datapath, getIndexMapping())

		if err != nil {
			logger.Instance().
				WithError(err).
				Error("Unable to create bleve index")

			os.Exit(1)
		} else {
			logger.Instance().
				Debug("New bleve index created")
		}
	} else {
		index, err = bv.Open(datapath)

		if err != nil {
			logger.Instance().
				WithError(err).
				Error("Unable to open bleve index")

			os.Exit(1)
		} else {
			logger.Instance().
				Debug("Bleve index successfully opened")
		}
	}

	return &Bleve{
		batchSize:          batchSize,
		index:              index,
		messages:           []*storage.Message{},
		mutexHandleMessage: &sync.RWMutex{},
		mutexFlushMessages: &sync.RWMutex{},
		intervalCleanup:    -intervalCleanup,
		intervalFlush:      intervalFlush,
		lastFlush:          time.Now(),
	}
}
Ejemplo n.º 9
0
// Periodically removes old entries from index
func (b *Bleve) periodicCleanup(die chan bool) {
	var (
		bvBatchDelete *bv.Batch
		bvRequest     *bv.SearchRequest
		bvResults     *bv.SearchResult
		bvCleaningNow bool
		bvNbCleaned   int
		err           error
		till          string
		limit         int           = 20
		offset        int           = 0
		sleepDuration time.Duration = 3 * time.Second
	)

	defer func() {
		err = b.index.Close()
		if err != nil {
			logger.Instance().
				WithError(err).
				Warning("Unable to close Bleve index")
		}
	}()

	for {
		select {
		case <-die:
			return
		default:
		}

		offset = 0
		bvNbCleaned = 0
		bvCleaningNow = true
		till = time.Now().Add(b.intervalCleanup).Format(time.RFC3339)
		bvQuery := bv.NewDateRangeQuery(nil, &till)
		bvQuery.FieldVal = "timestamp"

		for bvCleaningNow != false {
			bvRequest = bv.NewSearchRequestOptions(bvQuery, limit, offset, false)
			bvResults, err = b.index.Search(bvRequest)

			if err != nil {
				logger.Instance().
					WithError(err).
					Warning("Unable to get obsolete messages from index")

				bvCleaningNow = false
				continue
			}

			if bvResults.Hits.Len() == 0 {
				bvCleaningNow = false
				continue
			}

			// List of documents to be deleted
			bvBatchDelete = b.index.NewBatch()
			for _, hit := range bvResults.Hits {
				bvBatchDelete.Delete(hit.ID)
			}

			// Batch delete them
			err = b.index.Batch(bvBatchDelete)
			if err != nil {
				logger.Instance().
					WithError(err).
					Warning("Unable to delete obsolete messages from index")

				bvCleaningNow = false
				continue
			} else {
				bvNbCleaned += bvBatchDelete.Size()
				offset += limit
			}
		}

		if bvNbCleaned > 0 {
			logger.Instance().
				WithField("nb_messages", bvNbCleaned).
				Infof("Obsolete messages were deleted from index")
		}

		time.Sleep(sleepDuration)
	}
}
Ejemplo n.º 10
0
// Returns object to work with bleve
func NewElasticStorage() *Elastic {
	url, err := config.Instance().String("elastic", "url")
	if err != nil {
		logger.Instance().
			WithError(err).
			Error("Elastic url is not provided")

		os.Exit(1)
	}

	client, err := es.NewClient(
		es.SetURL(url),
		es.SetSniff(false),
		es.SetHealthcheck(false),
		es.SetMaxRetries(0),
	)
	if err != nil {
		logger.Instance().
			WithError(err).
			Error("Unable to create client to elastic")

		os.Exit(1)
	}

	indexName, err := config.Instance().String("elastic", "index")
	if err != nil {
		logger.Instance().
			WithError(err).
			Error("Index name is not provided")

		os.Exit(1)
	}

	typeName, err := config.Instance().String("elastic", "type")
	if err != nil {
		logger.Instance().
			WithError(err).
			Error("Type name is not provided")

		os.Exit(1)
	}

	batchSize, err := config.Instance().Int("elastic", "batch_size")
	if err != nil {
		batchSize = 10
	}

	var (
		defaultIntervalSecond string = "1s"
		defaultIntervalMonth  string = "720h"
	)

	intervalCleanupStr, err := config.Instance().String("elastic", "interval_cleanup")
	if err != nil {
		intervalCleanupStr = defaultIntervalMonth
	}

	intervalCleanup, err := time.ParseDuration(intervalCleanupStr)
	if err != nil {
		intervalCleanup, _ = time.ParseDuration(defaultIntervalMonth)
	}

	intervalFlushStr, err := config.Instance().String("elastic", "interval_flush")
	if err != nil {
		intervalFlushStr = defaultIntervalSecond
	}

	intervalFlush, err := time.ParseDuration(intervalFlushStr)
	if err != nil {
		intervalFlush, _ = time.ParseDuration(defaultIntervalSecond)
	}

	return &Elastic{
		batchSize:          batchSize,
		indexName:          indexName,
		typeName:           typeName,
		client:             client,
		messages:           []*storage.Message{},
		mutexHandleMessage: &sync.RWMutex{},
		mutexFlushMessages: &sync.RWMutex{},
		ttl:                int64(intervalCleanup.Seconds() * 1000), // TTL is in milliseconds
		intervalFlush:      intervalFlush,
		lastFlush:          time.Now(),
	}
}
Ejemplo n.º 11
0
// Periodically flushes messages to elastic
func (e *Elastic) PeriodicFlush(die chan bool) {
	var (
		esBulk        *es.BulkService
		esResponse    *es.BulkResponse
		err           error
		nbMessages    int
		sleepDuration time.Duration = 3 * time.Second
	)

	for {
		select {
		case <-die:
			return
		default:
		}

		nbMessages = len(e.messages)

		if nbMessages > 0 && (nbMessages >= e.batchSize || time.Now().Sub(e.lastFlush) > e.intervalFlush) {
			e.mutexFlushMessages.Lock()

			esBulk = e.client.Bulk()

			for _, message := range e.messages {
				esBulk.Add(es.NewBulkIndexRequest().
					Index(e.indexName).
					Type(e.typeName).
					Id(uuid.NewV4().String()).
					Ttl(e.ttl).
					Doc(message))
			}

			if esBulk.NumberOfActions() > 0 {
				esResponse, err = esBulk.Do()

				if err != nil {
					logger.Instance().
						WithError(err).
						Warning("Unable to batch index messages")
				} else {
					nbCreated := len(esResponse.Indexed())
					if nbCreated != nbMessages {
						logger.Instance().
							WithField("nb_messages", nbMessages).
							WithField("nb_created", nbCreated).
							Warning("Not all messages were indexed")
					} else {
						logger.Instance().
							WithField("nb_messages", nbMessages).
							Info("Messages successfully indexed")
					}

					e.lastFlush = time.Now()
					e.messages = []*storage.Message{}
				}
			}

			e.mutexFlushMessages.Unlock()
		}

		time.Sleep(sleepDuration)
	}
}
Ejemplo n.º 12
0
// Handles search request
func (wh *WorkerHttp) handleApiSearch(w http.ResponseWriter, req *http.Request) {
	requestBody, err := ioutil.ReadAll(req.Body)
	if err != nil {
		logger.Instance().
			WithError(err).
			Warning("Unable to read request body")

		statusError(w, "Request body is empty", http.StatusBadRequest)

		return
	}

	var (
		q             storage.SearchQuery
		requestString string = string(requestBody)
	)

	err = json.Unmarshal(requestBody, &q)
	if err != nil {
		logger.Instance().
			WithError(err).
			WithField("body", requestString).
			Warning("Unable to parse JSON")

		statusError(w, "Provided JSON is invalid", http.StatusBadRequest)

		return
	}

	q.Query = strings.TrimSpace(q.Query)

	if len(q.Query) > 0 {
		err = wh.storage.ValidateQuery(q.Query)
		if err != nil {
			logger.Instance().
				WithError(err).
				WithField("query", q.Query).
				Warning("Unable to validate JSON query")

			statusError(w, "Provided query is invalid", http.StatusBadRequest)

			return
		}
	}

	// Process limit and offset
	if q.Limit <= 0 || q.Limit > wh.maxPerPage {
		q.Limit = wh.maxPerPage
	}

	if q.Offset < 0 {
		q.Offset = 0
	} else if q.Offset+q.Limit > wh.maxResults {
		q.Offset = wh.maxResults - q.Limit
	}

	// Search for messages
	searchResponse, err := wh.storage.GetMessages(&q)
	if err != nil {
		logger.Instance().
			WithError(err).
			WithField("body", requestString).
			Error("Unable to search messages")

		statusError(w, "An error occured while searching messages", http.StatusInternalServerError)

		return
	}

	statusOk(w, searchResponse)
}