func newBucketStore(name, path string, settings BucketSettings, keyCompareForCollection func(collName string) gkvlite.KeyCompare) ( res *bucketstore, err error) { var file FileLike if settings.MemoryOnly < MemoryOnly_LEVEL_PERSIST_NOTHING { file, err = fileService.OpenFile(path, os.O_RDWR|os.O_CREATE) if err != nil { fmt.Printf("!!!! %v\n", err) return nil, err } } sc := mkBucketStoreCallbacks(keyCompareForCollection) bsf := NewBucketStoreFile(path, file, &BucketStoreStats{}) bsfForGKVLite := bsf if settings.MemoryOnly >= MemoryOnly_LEVEL_PERSIST_NOTHING { bsfForGKVLite = nil } bsf.store, err = gkvlite.NewStoreEx(bsfForGKVLite, sc) if err != nil { return nil, err } var bsfMemoryOnly *bucketstorefile if settings.MemoryOnly > MemoryOnly_LEVEL_PERSIST_EVERYTHING { bsfMemoryOnly = NewBucketStoreFile(path, file, bsf.stats) bsfMemoryOnly.store, err = gkvlite.NewStoreEx(nil, sc) if err != nil { return nil, err } } return &bucketstore{ name: name, bsf: unsafe.Pointer(bsf), bsfMemoryOnly: bsfMemoryOnly, endch: make(chan bool), partitions: make(map[uint16]*partitionstore), stats: bsf.stats, keyCompareForCollection: keyCompareForCollection, }, nil }
func (s *bucketstore) compactSwapFile(bsf *bucketstorefile, compactPath string) error { fname := filepath.Base(bsf.path) suffix, err := parseFileNameSuffix(fname) if err != nil { return err } prefix, ver, err := parseStoreFileName(fname, suffix) if err != nil { return err } nextName := makeStoreFileName(prefix, ver+1, suffix) nextPath := filepath.Join(filepath.Dir(bsf.path), nextName) if err = os.Rename(compactPath, nextPath); err != nil { return err } nextFile, err := fileService.OpenFile(nextPath, os.O_RDWR|os.O_CREATE) if err != nil { return err } sc := mkBucketStoreCallbacks(s.keyCompareForCollection) nextBSF := NewBucketStoreFile(nextPath, nextFile, bsf.stats) nextStore, err := gkvlite.NewStoreEx(nextBSF, sc) if err != nil { // TODO: Rollback the previous *.orig rename. return err } nextBSF.store = nextStore atomic.StorePointer(&s.bsf, unsafe.Pointer(nextBSF)) bsf.apply(func() { bsf.purge = true // Mark the old file as purgable. }) return nil }
func TestSlabStoreRandom(t *testing.T) { fname := "tmp.test" os.Remove(fname) f, err := os.Create(fname) if err != nil { t.Errorf("expected to create file: " + fname) } defer os.Remove(fname) arena, scb := setupStoreArena(t, 256) start := func(f *os.File) (*gkvlite.Store, *gkvlite.Collection) { s, err := gkvlite.NewStoreEx(f, scb) if err != nil || s == nil { t.Errorf("expected NewStoreEx to work") } s.SetCollection("x", bytes.Compare) x := s.GetCollection("x") if x == nil { t.Errorf("expected SetColl/GetColl to work") } return s, x } s, x := start(f) stop := func() { s.Flush() s.Close() f.Close() f = nil } numSets := 0 numKeys := 10 for i := 0; i < 100; i++ { for j := 0; j < 1000; j++ { kr := rand.Int() % numKeys ks := fmt.Sprintf("%03d", kr) k := []byte(ks) r := rand.Int() % 100 if r < 20 { i, err := x.GetItem(k, true) if err != nil { t.Errorf("expected nil error, got: %v", err) } if i != nil { kr4 := kr * kr * kr * kr if scb.ItemValLength(x, i) != kr4 { t.Errorf("expected len: %d, got %d", kr4, scb.ItemValLength(x, i)) } s.ItemDecRef(x, i) } } else if r < 60 { numSets++ b := arena.Alloc(kr * kr * kr * kr) pri := rand.Int31() it := scb.ItemAlloc(x, uint16(len(k))) copy(it.Key, k) it.Val = b it.Priority = pri err := x.SetItem(it) if err != nil { t.Errorf("expected nil error, got: %v", err) } scb.ItemDecRef(x, it) } else if r < 80 { _, err := x.Delete(k) if err != nil { t.Errorf("expected nil error, got: %v", err) } } else if r < 90 { x.EvictSomeItems() } else { // Close and reopen the store. stop() f, _ = os.OpenFile(fname, os.O_RDWR, 0666) s, x = start(f) } } x.EvictSomeItems() for k := 0; k < numKeys; k++ { _, err := x.Delete([]byte(fmt.Sprintf("%03d", k))) if err != nil { t.Fatalf("expected nil error, got: %v", err) } } } }
func TestSlabStore(t *testing.T) { fname := "tmp.test" os.Remove(fname) f, err := os.Create(fname) if err != nil { t.Errorf("expected to create file: " + fname) } defer os.Remove(fname) arena, scb := setupStoreArena(t, 256) s, err := gkvlite.NewStoreEx(f, scb) if err != nil || s == nil { t.Errorf("expected NewStoreEx to work") } s.SetCollection("x", bytes.Compare) x := s.GetCollection("x") if x == nil { t.Errorf("expected SetColl/GetColl to work") } b := arena.Alloc(5) if b == nil { t.Errorf("expected buf") } copy(b, []byte("hello")) i := scb.ItemAlloc(x, 1) copy(i.Key, []byte("a")) i.Val = b i.Priority = 100 x.SetItem(i) scb.ItemDecRef(x, i) i = scb.ItemAlloc(x, 3) copy(i.Key, []byte("big")) i.Val = arena.Alloc(1234) i.Priority = 100 x.SetItem(i) scb.ItemDecRef(x, i) err = s.Flush() if err != nil { t.Errorf("expected Flush() to error") } s.Close() f.Close() f, _ = os.OpenFile(fname, os.O_RDWR, 0666) arena, scb = setupStoreArena(t, 64) // Read with a different buf-size. s, err = gkvlite.NewStoreEx(f, scb) if err != nil || s == nil { t.Errorf("expected NewStoreEx to work") } x = s.SetCollection("x", bytes.Compare) if x == nil { t.Errorf("expected SetColl/GetColl to work") } i, err = x.GetItem([]byte("a"), true) if err != nil || i == nil { t.Errorf("expected no GetItem() err, got: %v", err) } if string(i.Val) != "hello" { t.Errorf("expected hello, got: %#v", i) } s.ItemDecRef(x, i) i, err = x.GetItem([]byte("big"), true) if err != nil || i == nil { t.Errorf("expected no GetItem() err, got: %v", err) } if len(i.Val) != 64 { t.Errorf("expected 64, got: %d", len(i.Val)) } if scb.ItemValLength(x, i) != 1234 { t.Errorf("expected 1234, got: %d", scb.ItemValLength(x, i)) } s.ItemDecRef(x, i) }
func (s *bucketstore) compactGo(bsf *bucketstorefile, compactPath string) error { bsf.removeOldFiles() // Clean up previous, successful compactions. os.Remove(compactPath) // Clean up previous, aborted compaction attempts. compactFile, err := fileService.OpenFile(compactPath, os.O_RDWR|os.O_CREATE|os.O_EXCL) if err != nil { return err } defer func() { if compactFile != nil { compactFile.Close() os.Remove(compactPath) } }() sc := mkBucketStoreCallbacks(s.keyCompareForCollection) compactStore, err := gkvlite.NewStoreEx(compactFile, sc) if err != nil { return err } // TODO: Parametrize writeEvery. writeEvery := 1000 lastChanges := make(map[uint16]*gkvlite.Item) // Last items in changes colls. collNames := bsf.store.GetCollectionNames() // Names of collections to process. collRest := make([]string, 0, len(collNames)) // Names of unprocessed collections. vbids := make([]uint16, 0, len(collNames)) // VBucket id's that we processed. // Process compaction in a few steps: // 1) First, unlocked, snapshot-based collection copying meant to // handle most of each vbucket's data. // 2) Next, locked copying of any vbucket mutations (deltas) // that happened in the meantime. // 3) Next, while still holding all vbucket collection locks, we // copy any remaining non-vbucket collections. // 4) Finally, atomically swap the files and unwind the locks. for _, collName := range collNames { if !strings.HasSuffix(collName, COLL_SUFFIX_CHANGES) { if !strings.HasSuffix(collName, COLL_SUFFIX_KEYS) { // It's neither a changes nor a keys collection, // so handle it later in step 3. collRest = append(collRest, collName) } // Skip the keys collection as it's handled while copying // over the changes collection. continue } vbid, lastChange, err := s.copyVBucketColls(bsf, collName, compactStore, writeEvery) if err != nil { return err } lastChanges[uint16(vbid)] = lastChange vbids = append(vbids, uint16(vbid)) } return s.copyBucketStoreDeltas(bsf, compactStore, vbids, 0, lastChanges, writeEvery, func() (err error) { // Copy any remaining (simple) collections (like COLL_VBMETA). err = s.copyRemainingColls(bsf, collRest, compactStore, writeEvery) if err != nil { return err } err = compactStore.Flush() if err != nil { return err } compactStore.Close() compactFile.Close() return s.compactSwapFile(bsf, compactPath) // The last step. }) }