func newTableCache() (*tableCache, *tableCacheTestFS, error) { xxx := bytes.Repeat([]byte("x"), tableCacheTestNumTables) fs := &tableCacheTestFS{ FileSystem: memfs.New(), } for i := 0; i < tableCacheTestNumTables; i++ { f, err := fs.Create(dbFilename("", fileTypeTable, uint64(i))) if err != nil { return nil, nil, fmt.Errorf("fs.Create: %v", err) } tw := table.NewWriter(f, &db.Options{ Comparer: internalKeyComparer{userCmp: db.DefaultComparer}, }) if err := tw.Set(makeIkey(fmt.Sprintf("k.SET.%d", i)), xxx[:i], nil); err != nil { return nil, nil, fmt.Errorf("tw.Set: %v", err) } if err := tw.Close(); err != nil { return nil, nil, fmt.Errorf("tw.Close: %v", err) } } fs.mu.Lock() fs.openCounts = map[string]int{} fs.closeCounts = map[string]int{} fs.mu.Unlock() c := &tableCache{} c.init("", fs, nil, tableCacheTestCacheSize) return c, fs, nil }
func TestFinalBlockIsWritten(t *testing.T) { const blockSize = 100 keys := []string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J"} valueLengths := []int{0, 1, 22, 28, 33, 40, 50, 61, 87, 100, 143, 200} xxx := bytes.Repeat([]byte("x"), valueLengths[len(valueLengths)-1]) for nk := 0; nk <= len(keys); nk++ { loop: for _, vLen := range valueLengths { got, memFS := 0, memfs.New() wf, err := memFS.Create("foo") if err != nil { t.Errorf("nk=%d, vLen=%d: memFS create: %v", nk, vLen, err) continue } w := NewWriter(wf, &db.Options{ BlockSize: blockSize, }) for _, k := range keys[:nk] { if err := w.Set([]byte(k), xxx[:vLen], nil); err != nil { t.Errorf("nk=%d, vLen=%d: set: %v", nk, vLen, err) continue loop } } if err := w.Close(); err != nil { t.Errorf("nk=%d, vLen=%d: writer close: %v", nk, vLen, err) continue } rf, err := memFS.Open("foo") if err != nil { t.Errorf("nk=%d, vLen=%d: memFS open: %v", nk, vLen, err) continue } r := NewReader(rf, nil) i := r.Find(nil, nil) for i.Next() { got++ } if err := i.Close(); err != nil { t.Errorf("nk=%d, vLen=%d: Iterator close: %v", nk, vLen, err) continue } if err := r.Close(); err != nil { t.Errorf("nk=%d, vLen=%d: reader close: %v", nk, vLen, err) continue } if got != nk { t.Errorf("nk=%2d, vLen=%3d: got %2d keys, want %2d", nk, vLen, got, nk) continue } } } }
func TestCompaction(t *testing.T) { const writeBufferSize = 1000 fs := memfs.New() d, err := Open("", &db.Options{ FileSystem: fs, WriteBufferSize: writeBufferSize, }) if err != nil { t.Fatalf("Open: %v", err) } get1 := func(x db.DB) (ret string) { b := &bytes.Buffer{} iter := x.Find(nil, nil) for iter.Next() { b.Write(internalKey(iter.Key()).ukey()) } if err := iter.Close(); err != nil { t.Fatalf("iterator Close: %v", err) } return b.String() } getAll := func() (gotMem, gotDisk string, err error) { d.mu.Lock() defer d.mu.Unlock() if d.mem != nil { gotMem = get1(d.mem) } ss := []string(nil) v := d.versions.currentVersion() for _, files := range v.files { for _, meta := range files { f, err := fs.Open(dbFilename("", fileTypeTable, meta.fileNum)) if err != nil { return "", "", fmt.Errorf("Open: %v", err) } defer f.Close() r := table.NewReader(f, &db.Options{ Comparer: internalKeyComparer{db.DefaultComparer}, }) defer r.Close() ss = append(ss, get1(r)+".") } } sort.Strings(ss) return gotMem, strings.Join(ss, ""), nil } value := bytes.Repeat([]byte("x"), writeBufferSize*6/10) testCases := []struct { key, wantMem, wantDisk string }{ {"+A", "A", ""}, {"+a", "Aa", ""}, {"+B", "B", "Aa."}, {"+b", "Bb", "Aa."}, // The next level-0 table overwrites the B key. {"+C", "C", "Aa.Bb."}, {"+B", "BC", "Aa.Bb."}, // The next level-0 table deletes the a key. {"+D", "D", "Aa.BC.Bb."}, {"-a", "Da", "Aa.BC.Bb."}, {"+d", "Dad", "Aa.BC.Bb."}, // The next addition creates the fourth level-0 table, and l0CompactionTrigger == 4, // so this triggers a non-trivial compaction into one level-1 table. Note that the // keys in this one larger table are interleaved from the four smaller ones. {"+E", "E", "ABCDbd."}, {"+e", "Ee", "ABCDbd."}, {"+F", "F", "ABCDbd.Ee."}, } for _, tc := range testCases { if key := tc.key[1:]; tc.key[0] == '+' { if err := d.Set([]byte(key), value, nil); err != nil { t.Errorf("%q: Set: %v", key, err) break } } else { if err := d.Delete([]byte(key), nil); err != nil { t.Errorf("%q: Delete: %v", key, err) break } } // Allow any writes to the memfs to complete. time.Sleep(1 * time.Millisecond) gotMem, gotDisk, err := getAll() if err != nil { t.Errorf("%q: %v", tc.key, err) break } if gotMem != tc.wantMem { t.Errorf("%q: mem: got %q, want %q", tc.key, gotMem, tc.wantMem) } if gotDisk != tc.wantDisk { t.Errorf("%q: ldb: got %q, want %q", tc.key, gotDisk, tc.wantDisk) } } if err := d.Close(); err != nil { t.Fatalf("db Close: %v", err) } }
for i.Next() { n++ } if err := i.Close(); err != nil { return err } if n != ct.count { return fmt.Errorf("count %q: got %d, want %d", ct.start, n, ct.count) } } return r.Close() } var ( memFileSystem = memfs.New() tmpFileCount int ) func build(compression db.Compression) (db.File, error) { // Create a sorted list of wordCount's keys. keys := make([]string, len(wordCount)) i := 0 for k := range wordCount { keys[i] = k i++ } sort.Strings(keys) // Write the key/value pairs to a new table, in increasing key order. filename := fmt.Sprintf("/tmp%d", tmpFileCount)