Exemple #1
0
// fetchByFetchlist fetches the messages in the supplied fetchlist and sends them to the message-channel
func (p *messagePartition) fetchByFetchlist(fetchList *indexList, req *store.FetchRequest) error {
	return fetchList.mapWithPredicate(func(index *index, _ int) error {
		if req.IsDone() {
			return store.ErrRequestDone
		}

		filename := p.composeMsgFilenameForPosition(uint64(index.fileID))
		file, err := os.Open(filename)
		if err != nil {
			return err
		}
		defer file.Close()

		msg := make([]byte, index.size, index.size)
		_, err = file.ReadAt(msg, int64(index.offset))
		if err != nil {
			logger.WithFields(log.Fields{
				"err":    err,
				"offset": index.offset,
			}).Error("Error ReadAt")
			return err
		}

		req.Push(index.id, msg)
		return nil
	})
}
Exemple #2
0
// Contains returns true if the req.StartID is between the min and max
// There is a chance the request messages to be found in this range
func (entry *cacheEntry) Contains(req *store.FetchRequest) bool {
	if req.StartID == 0 {
		req.Direction = 1
		return true
	}
	if req.Direction >= 0 {
		return req.StartID >= entry.min && req.StartID <= entry.max
	}
	return req.StartID >= entry.min
}
Exemple #3
0
// Fetch fetches a set of messages
func (p *messagePartition) Fetch(req *store.FetchRequest) {
	log.WithField("fetchRequest", *req).Debug("Fetching")

	go func() {
		fetchList, err := p.calculateFetchList(req)

		if err != nil {
			log.WithField("err", err).Error("Error calculating list")
			req.ErrorC <- err
			return
		}
		req.StartC <- fetchList.len()

		err = p.fetchByFetchlist(fetchList, req)

		if err != nil {
			log.WithField("err", err).Error("Error calculating list")
			req.Error(err)
			return
		}
		req.Done()
	}()
}
Exemple #4
0
// calculateFetchList returns a list of fetchEntry records for all messages in the fetch request.
func (p *messagePartition) calculateFetchList(req *store.FetchRequest) (*indexList, error) {
	if req.Direction == 0 {
		req.Direction = 1
	}

	potentialEntries := newIndexList(0)

	// reading from IndexFiles
	// TODO: fix  prev when EndID logic will be done
	// prev specifies if we found anything in the previous list, in which case
	// it is possible the items to continue in the next list
	prev := false

	p.fileCache.RLock()

	for i, fce := range p.fileCache.entries {
		if fce.Contains(req) || (prev && potentialEntries.len() < req.Count) {
			prev = true

			l, err := p.loadIndexList(i)
			if err != nil {
				logger.WithError(err).Info("Error loading idx file in memory")
				return nil, err
			}

			potentialEntries.insert(l.extract(req).toSliceArray()...)
		} else {
			prev = false
		}
	}

	// Read from current cached value (the idx file which size is smaller than MESSAGE_PER_FILE
	if p.list.contains(req.StartID) || (prev && potentialEntries.len() < req.Count) {
		potentialEntries.insert(p.list.extract(req).toSliceArray()...)
	}

	// Currently potentialEntries contains a potentials IDs from any files and
	// from in memory. From this will select only Count.
	fetchList := potentialEntries.extract(req)

	p.fileCache.RUnlock()

	return fetchList, nil
}