// New creates and returns a memory based store. func New(capacity int64) chainstore.Store { memStore := &memStore{ data: make(map[string][]byte, 1000), } store := lrumgr.New(capacity, memStore) return store }
func TestLRUManager(t *testing.T) { var err error var store chainstore.Store var lru *lrumgr.LruManager var capacity int64 = 20 Convey("LRUManager", t, func() { storeDir := chainstore.TempDir() store = filestore.New(storeDir, 0755) lru = lrumgr.New(capacity, store) // based on 10% cushion lru.Put("peter", []byte{1, 2, 3}) lru.Put("jeff", []byte{4}) lru.Put("julia", []byte{5, 6, 7, 8, 9, 10}) lru.Put("janet", []byte{11, 12, 13}) lru.Put("ted", []byte{14, 15, 16, 17, 18}) remaining := capacity - 18 So(lru.Capacity(), ShouldEqual, remaining) remaining = remaining + 4 err = lru.Put("agnes", []byte{20, 21, 22, 23, 24, 25}) So(lru.Capacity(), ShouldEqual, remaining) So(err, ShouldEqual, nil) var b []byte var err error // has been evicted.. b, err = lru.Get("peter") if err != nil { t.Error(err) t.Fail() } if len(b) != 0 { t.Error("byte arrays do not match") t.Fail() } // exists b, err = lru.Get("janet") if err != nil { t.Error(err) t.Fail() } if !reflect.DeepEqual(b, []byte{11, 12, 13}) { t.Error("byte arrays do not match") t.Fail() } }) }
func TestAsyncChain(t *testing.T) { var ms, fs, bs, chain chainstore.Store var err error logger := log.New(os.Stdout, "", log.LstdFlags) Convey("Async chain", t, func() { storeDir := chainstore.TempDir() err = nil ms = memstore.New(100) fs = filestore.New(storeDir+"/filestore", 0755) bs = boltstore.New(storeDir+"/boltstore/bolt.db", "test") chain = chainstore.New( logmgr.New(logger, ""), ms, chainstore.Async( logmgr.New(logger, "async"), metricsmgr.New("chaintest", nil, fs, lrumgr.New(100, bs), ), ), ) err = chain.Open() So(err, ShouldEqual, nil) Convey("Put/Get/Del", func() { v := []byte("value") err = chain.Put("k", v) So(err, ShouldEqual, nil) val, err := chain.Get("k") So(err, ShouldEqual, nil) So(v, ShouldResemble, v) val, err = ms.Get("k") So(err, ShouldEqual, nil) So(val, ShouldResemble, v) time.Sleep(10e6) // wait for async operation.. val, err = fs.Get("k") So(err, ShouldEqual, nil) So(val, ShouldResemble, v) val, err = bs.Get("k") So(err, ShouldEqual, nil) So(val, ShouldResemble, v) }) }) }
func (cf *Config) GetChainstore() (chainstore.Store, error) { // chainstore.DefaultTimeout = 60 * time.Second // TODO: .... // First, reset cache storage path err := filepath.Walk( cf.Chainstore.Path, func(path string, info os.FileInfo, err error) error { if cf.Chainstore.Path == path { return nil // skip the root } if err = os.RemoveAll(path); err != nil { return fmt.Errorf("Failed to remove or clean the directory: %s, because: %s", path, err) } return nil }, ) if err != nil { return nil, err } // Build the stores and setup the chain memStore := metricsmgr.New("fn.store.mem", memstore.New(cf.Chainstore.MemCacheSize*1024*1024), ) diskStore := lrumgr.New(cf.Chainstore.DiskCacheSize*1024*1024, metricsmgr.New("fn.store.bolt", boltstore.New(cf.Chainstore.Path+"store.db", "imgry"), ), ) var store chainstore.Store if cf.Chainstore.S3AccessKey != "" && cf.Chainstore.S3SecretKey != "" { s3Store := metricsmgr.New("fn.store.s3", s3store.New(cf.Chainstore.S3Bucket, cf.Chainstore.S3AccessKey, cf.Chainstore.S3SecretKey), ) // store = chainstore.New(memStore, chainstore.Async(diskStore, s3Store)) store = chainstore.New(memStore, chainstore.Async(nil, s3Store)) } else { store = chainstore.New(memStore, chainstore.Async(nil, diskStore)) } if err := store.Open(); err != nil { return nil, err } return store, nil }
func (cf *Config) GetChainstore() (chainstore.Store, error) { // First, reset cache storage path err := filepath.Walk( cf.Chainstore.Path, func(path string, info os.FileInfo, err error) error { if cf.Chainstore.Path == path { return nil // skip the root } if err = os.RemoveAll(path); err != nil { return fmt.Errorf("Failed to remove or clean the directory: %s, because: %s", path, err) } return nil }, ) if err != nil { return nil, err } // TODO: impl another kind of lrumgr (or option) to be based on number of keys, not filesize // at which point, we can add a method called .Keys() that will return the keys // matching some query from a Store, and we can seed the LRU this way, and keep // the bolt data.. // Build the stores and setup the chain memStore := memstore.New(cf.Chainstore.MemCacheSize * 1024 * 1024) diskStore := lrumgr.New(cf.Chainstore.DiskCacheSize*1024*1024, metricsmgr.New("fn.store.bolt", nil, levelstore.New(cf.Chainstore.Path), ), ) var store chainstore.Store if cf.Chainstore.S3AccessKey != "" && cf.Chainstore.S3SecretKey != "" { s3Store := metricsmgr.New("fn.store.s3", nil, s3store.New(cf.Chainstore.S3Bucket, cf.Chainstore.S3AccessKey, cf.Chainstore.S3SecretKey), ) store = chainstore.New(memStore, chainstore.Async(diskStore, s3Store)) } else { store = chainstore.New(memStore, chainstore.Async(diskStore)) } if err := store.Open(); err != nil { return nil, err } return store, nil }
func New(capacity int64) *lrumgr.LruManager { memStore := &memStore{data: make(map[string][]byte, 1000)} store := lrumgr.New(capacity, memStore) return store }
func main() { diskStore := lrumgr.New(500*1024*1024, // 500MB of working data metricsmgr.New("chainstore.ex.bolt", nil, boltstore.New("/tmp/store.db", "myBucket"), ), ) remoteStore := metricsmgr.New("chainstore.ex.s3", nil, // NOTE: you'll have to supply your own keys in order for this example to work properly s3store.New("myBucket", "access-key", "secret-key"), ) dataStore := chainstore.New(diskStore, chainstore.Async(remoteStore)) // OR.. define inline. Except, I wanted to show store independence & state. /* dataStore := chainstore.New( lrumgr.New(500*1024*1024, // 500MB of working data metricsmgr.New("chainstore.ex.bolt", nil, boltstore.New("/tmp/store.db", "myBucket"), ), ), chainstore.Async( // calls stores in the async chain in a goroutine metricsmgr.New("chainstore.ex.s3", nil, // NOTE: you'll have to supply your own keys in order for this example to work properly s3store.New("myBucket", "access-key", "secret-key"), ), ), ) */ err := dataStore.Open() if err != nil { fmt.Println(err) os.Exit(1) } // Since we've used the metricsManager above (metricsmgr), any calls to the boltstore // and s3store will be measured. Next is to send metrics to librato, graphite, influxdb, // whatever.. via github.com/rcrowley/go-metrics // go librato.Librato(metrics.DefaultRegistry, 10e9, ...) //-- // Save the object in the chain. It will be Put() synchronously into diskStore, // the boltdb engine, and then immediately dispatch background Put()'s to the // other stores down the chain, in this case S3. fmt.Println("Example 1...") obj := []byte{1, 2, 3} dataStore.Put("k", obj) fmt.Println("Put 'k':", obj, "in the chain") v, _ := dataStore.Get("k") fmt.Println("Grabbing 'k' from the chain:", v) // => [1 2 3] // For demonstration, let's grab the key directly from the store instead of // through the chain. This is pretty much the same as above, as the chain's Get() // stops once it finds the object. v, _ = diskStore.Get("k") fmt.Println("Grabbing 'k' directly from boltdb:", v) // => [1 2 3] // lets pause for a moment and then try to retrieve the value from the s3 store time.Sleep(1e9) // Grab the object from s3 v, _ = remoteStore.Get("k") fmt.Println("Grabbing 'k' directly from s3:", v) // => [1 2 3] // Delete the object from everywhere dataStore.Del("k") time.Sleep(1e9) // pause for s3 demo v, _ = dataStore.Get("k") fmt.Println("Deleted 'k' from the chain (all stores). Get(k) returns:", v) //-- // Another interesting behavior of the chain is when doing a Get(), it goes down // the entire chain looking for the value, and when found, it will Put() that // object back up the chain for subsequent retrievals. Lets see.. fmt.Println("Example 2...") obj = []byte("hope you enjoy") dataStore.Put("hi", obj) fmt.Println("Put 'hi':", obj, "in the chain") time.Sleep(1e9) // lets wait for s3 again with more then enough time diskStore.Del("hi") v, _ = diskStore.Get("hi") fmt.Println("Delete 'hi' from boltdb. diskStore.Get(k) returns:", v) v, _ = dataStore.Get("hi") fmt.Println("Let's ask the chain for 'hi':", v) time.Sleep(1e9) // pause for bg routine to fill our local cache // The diskStore now has the value again from remoteStore lower down the chain. v, _ = diskStore.Get("hi") fmt.Println("Now, let's ask our diskStore again! diskStore.Get(k) returns:", v) // Also.. even though it hasn't been demonstrated here, the diskStore will only // store a max of 500MB (as defined with diskLru) worth of objects. Give it a shot. }
func TestAsyncChain(t *testing.T) { var ms, fs, bs, chain chainstore.Store var err error logger := log.New(os.Stdout, "", log.LstdFlags) storeDir := tempDir() var errored atomic.Value ms = memstore.New(100) fs = filestore.New(storeDir+"/filestore", 0755) bs = boltstore.New(storeDir+"/boltstore/bolt.db", "test") chain = chainstore.New( logmgr.New(logger, ""), ms, chainstore.Async( func(err error) { log.Println("async error:", err) errored.Store(true) }, logmgr.New(logger, "async"), &testStore{}, metricsmgr.New("chaintest", fs, lrumgr.New(100, bs), ), ), ) ctx := context.Background() assert := assert.New(t) err = chain.Open() assert.Nil(err) v := []byte("value") err = chain.Put(ctx, "k", v) assert.Nil(err) val, err := chain.Get(ctx, "k") assert.Nil(err) assert.Equal(val, v) val, err = ms.Get(ctx, "k") assert.Nil(err) assert.Equal(val, v) time.Sleep(time.Second * 1) // wait for async operation.. val, err = fs.Get(ctx, "k") assert.Nil(err) assert.Equal(val, v) val, err = bs.Get(ctx, "k") assert.Nil(err) assert.Equal(val, v) //-- // Lets make an error in async store assert.Nil(errored.Load()) err = chain.Put(ctx, "bad", []byte("v")) assert.Nil(err) // no error because sync store took it fine time.Sleep(time.Second * 1) // wait for async operation.. assert.NotEmpty(errored.Load()) }