Esempio n. 1
0
// newRangeDescriptorCache returns a new RangeDescriptorCache which
// uses the given RangeDescriptorDB as the underlying source of range
// descriptors.
func newRangeDescriptorCache(db RangeDescriptorDB, size int) *rangeDescriptorCache {
	return &rangeDescriptorCache{
		db: db,
		rangeCache: cache.NewOrderedCache(cache.Config{
			Policy: cache.CacheLRU,
			ShouldEvict: func(n int, k, v interface{}) bool {
				return n > size
			},
		}),
	}
}
Esempio n. 2
0
// newRangeDescriptorCache returns a new RangeDescriptorCache which
// uses the given RangeDescriptorDB as the underlying source of range
// descriptors.
func newRangeDescriptorCache(db RangeDescriptorDB, size int) *rangeDescriptorCache {
	rdc := &rangeDescriptorCache{db: db}
	rdc.rangeCache.cache = cache.NewOrderedCache(cache.Config{
		Policy: cache.CacheLRU,
		ShouldEvict: func(n int, _, _ interface{}) bool {
			return n > size
		},
	})
	rdc.lookupRequests.inflight = make(map[lookupRequestKey]lookupRequest)
	return rdc
}
Esempio n. 3
0
// newRaftEntryCache returns a new RaftEntryCache with the given
// maximum size in bytes.
func newRaftEntryCache(maxBytes uint64) *raftEntryCache {
	rec := &raftEntryCache{
		cache: cache.NewOrderedCache(cache.Config{Policy: cache.CacheLRU}),
	}
	// The raft entry cache mutex will be held when the ShouldEvict
	// and OnEvicted callbacks are invoked.
	//
	// On ShouldEvict, compare the total size of the cache in bytes to the
	// configured maxBytes. We also insist that at least one entry remains
	// in the cache to prevent the case where a very large entry isn't able
	// to be cached at all.
	rec.cache.Config.ShouldEvict = func(n int, k, v interface{}) bool {
		return rec.bytes > maxBytes && n >= 1
	}
	rec.cache.Config.OnEvicted = func(k, v interface{}) {
		ent := v.(raftpb.Entry)
		rec.bytes -= uint64(ent.Size())
	}

	return rec
}