Пример #1
0
func NewDvExistCache(mb, ttl int) *SimpleCache {
	lc := SimpleCache{
		cache: freecache.NewCache(mb * 1024 * 1024),
		ttl:   ttl,
	}
	return &lc
}
Пример #2
0
func (_ Test) FreeCache() {

	freeCache := freecache.NewCache(512 * 1024 * 1024)
	val := make([]byte, 10)
	freeCache.Set([]byte("Key"), val, 0)
	_, _ = freeCache.Get([]byte("Key"))
}
func StartDispatcher(nworkers int) {
	// First, initialize the channel we are going to but the workers' work channels into.
	WorkerQueue = make(chan chan WorkRequest, nworkers)

	// Inicialize the cache
	cache := freecache.NewCache(512 * 1024)

	// Now, create all of our workers.
	for i := 0; i < nworkers; i++ {
		fmt.Println("Starting worker", i+1)
		worker := NewWorker(i+1, WorkerQueue, cache)
		worker.Start()
	}

	go func() {
		for {
			select {
			case work := <-WorkQueue:
				fmt.Println("Received work requeust")
				go func() {
					worker := <-WorkerQueue

					fmt.Println("Dispatching work request")
					worker <- work
				}()
			}
		}
	}()
}
Пример #4
0
func BenchmarkParserSet(b *testing.B) {
	cache := freecache.NewCache(0)
	logger := log.New(ioutil.Discard, "logger: ", log.Lshortfile)
	parser := Parser{logger: logger, writer: ioutil.Discard, cache: cache}
	line := []byte("SET key 0 value")
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		parser.Parse(line)
	}
}
func main() {
	debug.SetGCPercent(10)
	fmt.Println("Number of entries: ", entries)

	config := bigcache.Config{
		Shards:             256,
		LifeWindow:         100 * time.Minute,
		MaxEntriesInWindow: entries,
		MaxEntrySize:       200,
		Verbose:            true,
	}

	bigcache, _ := bigcache.NewBigCache(config)
	for i := 0; i < entries; i++ {
		key, val := generateKeyValue(i, valueSize)
		bigcache.Set(key, val)
	}

	firstKey, _ := generateKeyValue(1, valueSize)
	checkFirstElement(bigcache.Get(firstKey))

	fmt.Println("GC pause for bigcache: ", gcPause())
	bigcache = nil
	gcPause()

	//------------------------------------------

	freeCache := freecache.NewCache(entries * 200) //allocate entries * 200 bytes
	for i := 0; i < entries; i++ {
		key, val := generateKeyValue(i, valueSize)
		if err := freeCache.Set([]byte(key), val, 0); err != nil {
			fmt.Println("Error in set: ", err.Error())
		}
	}

	firstKey, _ = generateKeyValue(1, valueSize)
	checkFirstElement(freeCache.Get([]byte(firstKey)))

	if freeCache.OverwriteCount() != 0 {
		fmt.Println("Overwritten: ", freeCache.OverwriteCount())
	}
	fmt.Println("GC pause for freecache: ", gcPause())
	freeCache = nil
	gcPause()

	//------------------------------------------

	mapCache := make(map[string][]byte)
	for i := 0; i < entries; i++ {
		key, val := generateKeyValue(i, valueSize)
		mapCache[key] = val
	}
	fmt.Println("GC pause for map: ", gcPause())

}
func BenchmarkFreeCacheGet(b *testing.B) {
	b.StopTimer()
	cache := freecache.NewCache(b.N * maxEntrySize)
	for i := 0; i < b.N; i++ {
		cache.Set([]byte(key(i)), value(), 0)
	}

	b.StartTimer()
	for i := 0; i < b.N; i++ {
		cache.Get([]byte(key(i)))
	}
}
Пример #7
0
func NewCache(size int, expireTime time.Duration) *Cache {
	if size == 0 {
		return &Cache{}
	} else {
		return &Cache{
			cache:      freecache.NewCache(size),
			mutex:      &sync.Mutex{},
			data:       map[string]bool{},
			expireTime: expireTime,
		}
	}
}
Пример #8
0
func main() {
	runtime.GOMAXPROCS(8)
	// defer profile.Start(profile.MemProfile, profile.ProfilePath(".")).Stop()

	logger := log.New(os.Stdout, "logger: ", log.Lshortfile)
	cache := freecache.NewCache(512 * 1024 * 1024)
	for index := 0; index < 128; index++ {
		cache.Set([]byte(fmt.Sprintf("key%d", index)), []byte("value"), 0)
	}
	server := mulu.NewServer(cache, logger)
	server.Start(":9022")
}
func BenchmarkFreeCacheSetParallel(b *testing.B) {
	cache := freecache.NewCache(b.N * maxEntrySize)
	rand.Seed(time.Now().Unix())

	b.RunParallel(func(pb *testing.PB) {
		id := rand.Intn(1000)
		counter := 0
		for pb.Next() {
			cache.Set([]byte(parallelKey(id, counter)), value(), 0)
			counter = counter + 1
		}
	})
}
func BenchmarkFreeCacheGetParallel(b *testing.B) {
	b.StopTimer()
	cache := freecache.NewCache(b.N * maxEntrySize)
	for i := 0; i < b.N; i++ {
		cache.Set([]byte(key(i)), value(), 0)
	}

	b.StartTimer()
	b.RunParallel(func(pb *testing.PB) {
		counter := 0
		for pb.Next() {
			cache.Get([]byte(key(counter)))
			counter = counter + 1
		}
	})
}
Пример #11
0
func (_ Test) FreeCache2() {
	cacheSize := 100 * 1024 * 1024
	cache := freecache.NewCache(cacheSize)

	key := []byte("abc")
	val := []byte("def")
	expire := 60 // expire in 60 seconds
	cache.Set(key, val, expire)
	got, err := cache.Get(key)
	if err != nil {
		e.InfoLog.Println(err)
	} else {
		e.InfoLog.Println(string(got))
	}
	affected := cache.Del(key)
	e.InfoLog.Println("deleted key ", affected)
	e.InfoLog.Println("entry count ", cache.EntryCount())
}
Пример #12
0
func NewServerSize(cachesize int, logger *log.Logger) *Server {
	return &Server{
		cache:  freecache.NewCache(0),
		logger: logger,
	}
}
func BenchmarkFreeCacheSet(b *testing.B) {
	cache := freecache.NewCache(b.N * maxEntrySize)
	for i := 0; i < b.N; i++ {
		cache.Set([]byte(key(i)), value(), 0)
	}
}
Пример #14
0
	"github.com/coocood/freecache"

	"$GITHUB_URI/common/fs"
)

const (
	generalTimeout = 30 // seconds
	statsTimeout   = 10 //seconds
)

var (
	hitMetricsKey  = []string{"process", "cache", "hit"}
	missMetricsKey = []string{"process", "cache", "miss"}
)

var fileCache = freecache.NewCache(1024 * 16)

type entry struct {
	buf []byte
	err error
	ts  time.Time
}

func cachedReadFile(path string) ([]byte, error) {
	key := []byte(path)
	if v, err := fileCache.Get(key); err == nil {
		metrics.IncrCounter(hitMetricsKey, 1.0)
		return v, nil
	}

	buf, err := fs.ReadFile(path)
Пример #15
0
func NewServer(cacheSize int) (server *Server) {
	server = new(Server)
	server.cache = freecache.NewCache(cacheSize)
	return
}
Пример #16
0
func main() {
	c := freecache.NewCache(512 * 1024)
	c.Set([]byte("a"), []byte("b"), 1024)
	v, _ := c.Get([]byte("a"))
	log.Println(string(v))
}
Пример #17
0
// 初始化Free进程缓存
func NewFree() {
	cacheSize := 100 * 1024 * 1024
	Free = freecache.NewCache(cacheSize)
}
Пример #18
0
func JoinConsumerGroupRealIp(realIp string, name string, topics []string, zookeeper []string,
	config *Config) (cg *ConsumerGroup, err error) {
	if name == "" {
		return nil, sarama.ConfigurationError("Empty consumergroup name")
	}
	if len(topics) == 0 {
		return nil, sarama.ConfigurationError("No topics provided")
	}
	if len(zookeeper) == 0 {
		return nil, EmptyZkAddrs
	}

	if config == nil {
		config = NewConfig()
	}
	config.ClientID = name
	if err = config.Validate(); err != nil {
		return
	}

	var kz *kazoo.Kazoo
	if kz, err = kazoo.NewKazoo(zookeeper, config.Zookeeper); err != nil {
		return
	}

	group := kz.Consumergroup(name)
	if config.Offsets.ResetOffsets {
		err = group.ResetOffsets()
		if err != nil {
			kz.Close()
			return
		}
	}

	instance := group.NewInstanceRealIp(realIp)

	cg = &ConsumerGroup{
		config: config,

		kazoo:    kz,
		group:    group,
		instance: instance,

		messages: make(chan *sarama.ConsumerMessage, config.ChannelBufferSize),
		errors:   make(chan *sarama.ConsumerError, config.ChannelBufferSize),
		stopper:  make(chan struct{}),
	}
	if config.NoDup {
		cg.cacher = freecache.NewCache(1 << 20) // TODO
	}

	// Register consumer group in zookeeper
	if exists, err := cg.group.Exists(); err != nil {
		_ = kz.Close()
		return nil, err
	} else if !exists {
		log.Debug("[%s/%s] consumer group in zk creating...", cg.group.Name, cg.shortID())

		if err := cg.group.Create(); err != nil {
			_ = kz.Close()
			return nil, err
		}
	}

	// Register itself with zookeeper: consumers/{group}/ids/{instanceId}
	// This will lead to consumer group rebalance
	if err := cg.instance.Register(topics); err != nil {
		return nil, err
	} else {
		log.Debug("[%s/%s] cg instance registered in zk for %+v", cg.group.Name, cg.shortID(), topics)
	}

	// kafka connect
	brokers, err := cg.kazoo.BrokerList()
	if err != nil {
		return nil, err
	}

	if consumer, err := sarama.NewConsumer(brokers, cg.config.Config); err != nil {
		return nil, err
	} else {
		cg.consumer = consumer
	}

	offsetConfig := OffsetManagerConfig{CommitInterval: config.Offsets.CommitInterval}
	cg.offsetManager = NewZookeeperOffsetManager(cg, &offsetConfig)

	cg.wg.Add(1)
	go cg.consumeTopics(topics)

	return
}