func NewStorage() *Storage { var stor storage.Storage var closeFn func() error if storageUseFS { for { storageMu.Lock() num := storageNum storageNum++ storageMu.Unlock() path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) if _, err := os.Stat(path); os.IsNotExist(err) { stor, err = storage.OpenFile(path) ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path) closeFn = func() error { if storageKeepFS { return nil } return os.RemoveAll(path) } break } } } else { stor = storage.NewMemStorage() } s := &Storage{ Storage: stor, closeFn: closeFn, opens: make(map[uint64]bool), } s.stallCond.L = &s.mu return s }
func openDBBench(b *testing.B, noCompress bool) *dbBench { _, err := os.Stat(benchDB) if err == nil { err = os.RemoveAll(benchDB) if err != nil { b.Fatal("cannot remove old db: ", err) } } p := &dbBench{ b: b, o: &opt.Options{}, ro: &opt.ReadOptions{}, wo: &opt.WriteOptions{}, } p.stor, err = storage.OpenFile(benchDB) if err != nil { b.Fatal("cannot open stor: ", err) } if noCompress { p.o.Compression = opt.NoCompression } p.db, err = Open(p.stor, p.o) if err != nil { b.Fatal("cannot open db: ", err) } runtime.GOMAXPROCS(runtime.NumCPU()) return p }
// RecoverFile recovers and opens a DB with missing or corrupted manifest files // for the given path. It will ignore any manifest files, valid or not. // The DB must already exist or it will returns an error. // Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. // // RecoverFile uses standard file-system backed storage implementation as desribed // in the leveldb/storage package. // // The returned DB instance is goroutine-safe. // The DB must be closed after use, by calling Close method. func RecoverFile(path string, o *opt.Options) (db *DB, err error) { stor, err := storage.OpenFile(path) if err != nil { return } db, err = Recover(stor, o) if err != nil { stor.Close() } else { db.closer = stor } return }
func main() { flag.Parse() fmt.Printf("Using path: %s\n", filename) if child { fmt.Println("Child flag set.") } stor, err := storage.OpenFile(filename) if err != nil { fmt.Printf("Could not open storage: %s", err) os.Exit(10) } if !child { fmt.Println("Executing child -- first test (expecting error)") err := runChild() if err == nil { fmt.Println("Expecting error from child") } else if err.Error() != "exit status 10" { fmt.Println("Got unexpected error from child:", err) } else { fmt.Printf("Got error from child: %s (expected)\n", err) } } err = stor.Close() if err != nil { fmt.Printf("Error when closing storage: %s", err) os.Exit(11) } if !child { fmt.Println("Executing child -- second test") err := runChild() if err != nil { fmt.Println("Got unexpected error from child:", err) } } os.RemoveAll(filename) }
func main() { flag.Parse() if httpProf != "" { runtime.SetBlockProfileRate(1) go func() { if err := http.ListenAndServe(httpProf, nil); err != nil { log.Fatalf("HTTPPROF: %v", err) } }() } runtime.GOMAXPROCS(runtime.NumCPU()) os.RemoveAll(dbPath) stor, err := storage.OpenFile(dbPath) if err != nil { log.Fatal(err) } stor = &testingStorage{stor} defer stor.Close() fatalf := func(err error, format string, v ...interface{}) { atomic.StoreUint32(&fail, 1) atomic.StoreUint32(&done, 1) log.Printf("FATAL: "+format, v...) if err != nil && errors.IsCorrupted(err) { cerr := err.(*errors.ErrCorrupted) if cerr.File != nil && cerr.File.Type == storage.TypeTable { if !scanTable(stor.GetFile(cerr.File.Num, cerr.File.Type), false) { log.Printf("FATAL: unable to find corrupted key/value pair in table %v", cerr.File) } } } runtime.Goexit() } if openFilesCacheCapacity == 0 { openFilesCacheCapacity = -1 } o := &opt.Options{ OpenFilesCacheCapacity: openFilesCacheCapacity, DisableBlockCache: !enableBlockCache, ErrorIfExist: true, } db, err := leveldb.Open(stor, o) if err != nil { log.Fatal(err) } defer db.Close() var ( mu = &sync.Mutex{} gGetStat = &latencyStats{} gIterStat = &latencyStats{} gWriteStat = &latencyStats{} startTime = time.Now() writeReq = make(chan *leveldb.Batch) writeAck = make(chan error) writeAckAck = make(chan struct{}) ) go func() { for b := range writeReq { gWriteStat.start() err := db.Write(b, nil) if err == nil { gWriteStat.record(b.Len()) } writeAck <- err <-writeAckAck } }() go func() { for { time.Sleep(3 * time.Second) log.Print("------------------------") log.Printf("> Elapsed=%v", time.Now().Sub(startTime)) mu.Lock() log.Printf("> GetLatencyMin=%v GetLatencyMax=%v GetLatencyAvg=%v GetRatePerSec=%d", gGetStat.min, gGetStat.max, gGetStat.avg(), gGetStat.ratePerSec()) log.Printf("> IterLatencyMin=%v IterLatencyMax=%v IterLatencyAvg=%v IterRatePerSec=%d", gIterStat.min, gIterStat.max, gIterStat.avg(), gIterStat.ratePerSec()) log.Printf("> WriteLatencyMin=%v WriteLatencyMax=%v WriteLatencyAvg=%v WriteRatePerSec=%d", gWriteStat.min, gWriteStat.max, gWriteStat.avg(), gWriteStat.ratePerSec()) mu.Unlock() cachedblock, _ := db.GetProperty("leveldb.cachedblock") openedtables, _ := db.GetProperty("leveldb.openedtables") alivesnaps, _ := db.GetProperty("leveldb.alivesnaps") aliveiters, _ := db.GetProperty("leveldb.aliveiters") blockpool, _ := db.GetProperty("leveldb.blockpool") log.Printf("> BlockCache=%s OpenedTables=%s AliveSnaps=%s AliveIter=%s BlockPool=%q", cachedblock, openedtables, alivesnaps, aliveiters, blockpool) log.Print("------------------------") } }() for ns, numKey := range numKeys { func(ns, numKey int) { log.Printf("[%02d] STARTING: numKey=%d", ns, numKey) keys := make([][]byte, numKey) for i := range keys { keys[i] = randomData(nil, byte(ns), 1, uint32(i)) } wg.Add(1) go func() { var wi uint32 defer func() { log.Printf("[%02d] WRITER DONE #%d", ns, wi) wg.Done() }() var ( b = new(leveldb.Batch) k2, v2 []byte nReader int32 ) for atomic.LoadUint32(&done) == 0 { log.Printf("[%02d] WRITER #%d", ns, wi) b.Reset() for _, k1 := range keys { k2 = randomData(k2, byte(ns), 2, wi) v2 = randomData(v2, byte(ns), 3, wi) b.Put(k2, v2) b.Put(k1, k2) } writeReq <- b if err := <-writeAck; err != nil { fatalf(err, "[%02d] WRITER #%d db.Write: %v", ns, wi, err) } snap, err := db.GetSnapshot() if err != nil { fatalf(err, "[%02d] WRITER #%d db.GetSnapshot: %v", ns, wi, err) } writeAckAck <- struct{}{} wg.Add(1) atomic.AddInt32(&nReader, 1) go func(snapwi uint32, snap *leveldb.Snapshot) { var ( ri int iterStat = &latencyStats{} getStat = &latencyStats{} ) defer func() { mu.Lock() gGetStat.add(getStat) gIterStat.add(iterStat) mu.Unlock() atomic.AddInt32(&nReader, -1) log.Printf("[%02d] READER #%d.%d DONE Snap=%v Alive=%d IterLatency=%v GetLatency=%v", ns, snapwi, ri, snap, atomic.LoadInt32(&nReader), iterStat.avg(), getStat.avg()) snap.Release() wg.Done() }() stopi := snapwi + 3 for (ri < 3 || atomic.LoadUint32(&wi) < stopi) && atomic.LoadUint32(&done) == 0 { var n int iter := snap.NewIterator(dataPrefixSlice(byte(ns), 1), nil) iterStat.start() for iter.Next() { k1 := iter.Key() k2 := iter.Value() iterStat.record(1) if dataNS(k2) != byte(ns) { fatalf(nil, "[%02d] READER #%d.%d K%d invalid in-key NS: want=%d got=%d", ns, snapwi, ri, n, ns, dataNS(k2)) } kwritei := dataI(k2) if kwritei != snapwi { fatalf(nil, "[%02d] READER #%d.%d K%d invalid in-key iter num: %d", ns, snapwi, ri, n, kwritei) } getStat.start() _, err := snap.Get(k2, nil) if err != nil { fatalf(err, "[%02d] READER #%d.%d K%d snap.Get: %v\nk1: %x\n -> k2: %x", ns, snapwi, ri, n, err, k1, k2) } getStat.record(1) n++ iterStat.start() } iter.Release() if err := iter.Error(); err != nil { fatalf(nil, "[%02d] READER #%d.%d K%d iter.Error: %v", ns, snapwi, ri, numKey, err) } if n != numKey { fatalf(nil, "[%02d] READER #%d.%d missing keys: want=%d got=%d", ns, snapwi, ri, numKey, n) } ri++ } }(wi, snap) atomic.AddUint32(&wi, 1) } }() delB := new(leveldb.Batch) wg.Add(1) go func() { var ( i int iterStat = &latencyStats{} ) defer func() { log.Printf("[%02d] SCANNER DONE #%d", ns, i) wg.Done() }() time.Sleep(2 * time.Second) for atomic.LoadUint32(&done) == 0 { var n int delB.Reset() iter := db.NewIterator(dataNsSlice(byte(ns)), nil) iterStat.start() for iter.Next() && atomic.LoadUint32(&done) == 0 { k := iter.Key() v := iter.Value() iterStat.record(1) for ci, x := range [...][]byte{k, v} { checksum0, checksum1 := dataChecksum(x) if checksum0 != checksum1 { if ci == 0 { fatalf(nil, "[%02d] SCANNER %d.%d invalid key checksum: want %d, got %d\n%x -> %x", ns, i, n, checksum0, checksum1, k, v) } else { fatalf(nil, "[%02d] SCANNER %d.%d invalid value checksum: want %d, got %d\n%x -> %x", ns, i, n, checksum0, checksum1, k, v) } } } if dataPrefix(k) == 2 || mrand.Int()%999 == 0 { delB.Delete(k) } n++ iterStat.start() } iter.Release() if err := iter.Error(); err != nil { fatalf(nil, "[%02d] SCANNER #%d.%d iter.Error: %v", ns, i, n, err) } if n > 0 { log.Printf("[%02d] SCANNER #%d IterLatency=%v", ns, i, iterStat.avg()) } if delB.Len() > 0 && atomic.LoadUint32(&done) == 0 { t := time.Now() writeReq <- delB if err := <-writeAck; err != nil { fatalf(err, "[%02d] SCANNER #%d db.Write: %v", ns, i, err) } writeAckAck <- struct{}{} log.Printf("[%02d] SCANNER #%d Deleted=%d Time=%v", ns, i, delB.Len(), time.Now().Sub(t)) } i++ } }() }(ns, numKey) } go func() { sig := make(chan os.Signal) signal.Notify(sig, os.Interrupt, os.Kill) log.Printf("Got signal: %v, exiting...", <-sig) atomic.StoreUint32(&done, 1) }() wg.Wait() }
func newTestStorage(t *testing.T) *testStorage { var stor storage.Storage var closeFn func() error if tsFS { for { tsMU.Lock() num := tsNum tsNum++ tsMU.Unlock() tempdir := tsTempdir if tempdir == "" { tempdir = os.TempDir() } path := filepath.Join(tempdir, fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) if _, err := os.Stat(path); err != nil { stor, err = storage.OpenFile(path) if err != nil { t.Fatalf("F: cannot create storage: %v", err) } t.Logf("I: storage created: %s", path) closeFn = func() error { for _, name := range []string{"LOG.old", "LOG"} { f, err := os.Open(filepath.Join(path, name)) if err != nil { continue } if log, err := ioutil.ReadAll(f); err != nil { t.Logf("---------------------- %s ----------------------", name) t.Logf("cannot read log: %v", err) t.Logf("---------------------- %s ----------------------", name) } else if len(log) > 0 { t.Logf("---------------------- %s ----------------------\n%s", name, string(log)) t.Logf("---------------------- %s ----------------------", name) } f.Close() } if t.Failed() { t.Logf("testing failed, test DB preserved at %s", path) return nil } if tsKeepFS { return nil } return os.RemoveAll(path) } break } } } else { stor = storage.NewMemStorage() } ts := &testStorage{ t: t, Storage: stor, closeFn: closeFn, opens: make(map[uint64]bool), emuErrOnceMap: make(map[uint64]uint), emuRandErrProb: 0x999, emuRandRand: rand.New(rand.NewSource(0xfacedead)), } ts.cond.L = &ts.mu return ts }