// NewMDServerLocal constructs a new MDServerLocal object that stores // data in the directories specified as parameters to this function. func NewMDServerLocal(config Config, handleDbfile string, mdDbfile string, branchDbfile string) (*MDServerLocal, error) { handleStorage, err := storage.OpenFile(handleDbfile) if err != nil { return nil, err } mdStorage, err := storage.OpenFile(mdDbfile) if err != nil { return nil, err } branchStorage, err := storage.OpenFile(branchDbfile) if err != nil { return nil, err } // Always use memory for the lock storage, so it gets wiped after // a restart. lockStorage := storage.NewMemStorage() return newMDServerLocalWithStorage(config, handleStorage, mdStorage, branchStorage, lockStorage) }
func openDBBench(b *testing.B) *dbBench { _, err := os.Stat(benchDB) if err == nil { err = os.RemoveAll(benchDB) if err != nil { b.Fatal("cannot remove old db: ", err) } } p := &dbBench{b: b} p.stor, err = storage.OpenFile(benchDB) if err != nil { b.Fatal("cannot open stor: ", err) } p.o = &opt.Options{ Flag: opt.OFCreateIfMissing, } p.ro = &opt.ReadOptions{} p.wo = &opt.WriteOptions{} p.db, err = Open(p.stor, p.o) if err != nil { b.Fatal("cannot open db: ", err) } runtime.GOMAXPROCS(runtime.NumCPU()) return p }
func openDBBench(b *testing.B, noCompress bool) *dbBench { _, err := os.Stat(benchDB) if err == nil { err = os.RemoveAll(benchDB) if err != nil { b.Fatal("cannot remove old db: ", err) } } p := &dbBench{ b: b, o: &opt.Options{}, ro: &opt.ReadOptions{}, wo: &opt.WriteOptions{}, } p.stor, err = storage.OpenFile(benchDB, false) if err != nil { b.Fatal("cannot open stor: ", err) } if noCompress { p.o.Compression = opt.NoCompression } p.db, err = Open(p.stor, p.o) if err != nil { b.Fatal("cannot open db: ", err) } return p }
// Open will open and possibly create a datastore at the given directory. func OpenLeveldb(path string, create bool, kvOpts Options) (db Engine, err error) { goOpts := kvOpts.(*goKeyValueOptions) if goOpts == nil { err = fmt.Errorf("Nil pointer passed in as key-value options to Openleveldb()!") return } leveldb_stor, err := storage.OpenFile(path) if err != nil { return } // Set the CreateIfMissing flag. if create { goOpts.Options.Flag |= opt.OFCreateIfMissing goOpts.Options.Flag |= opt.OFErrorIfExist } // Open the leveldb leveldb_db, err := leveldb.Open(leveldb_stor, goOpts.Options) if err != nil { return } db = &goLDB{ directory: path, opts: *goOpts, // We want a copy at time of Open() stor: leveldb_stor, ldb: leveldb_db, } return }
func TestDb_CreateReopenDbOnFile(t *testing.T) { dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid())) if err := os.RemoveAll(dbpath); err != nil { t.Fatal("cannot remove old db: ", err) } defer os.RemoveAll(dbpath) for i := 0; i < 3; i++ { stor, err := storage.OpenFile(dbpath) if err != nil { t.Fatalf("(%d) cannot open storage: %s", i, err) } db, err := Open(stor, &opt.Options{Flag: opt.OFCreateIfMissing}) if err != nil { t.Fatalf("(%d) cannot open db: %s", i, err) } if err := db.Put([]byte("foo"), []byte("bar"), &opt.WriteOptions{}); err != nil { t.Fatalf("(%d) cannot write to db: %s", i, err) } if err := db.Close(); err != nil { t.Fatalf("(%d) cannot close db: %s", i, err) } if err := stor.Close(); err != nil { t.Fatalf("(%d) cannot close storage: %s", i, err) } } }
func NewStorage() *Storage { var stor storage.Storage var closeFn func() error if storageUseFS { for { storageMu.Lock() num := storageNum storageNum++ storageMu.Unlock() path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) if _, err := os.Stat(path); os.IsNotExist(err) { stor, err = storage.OpenFile(path) ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path) closeFn = func() error { if storageKeepFS { return nil } return os.RemoveAll(path) } break } } } else { stor = storage.NewMemStorage() } s := &Storage{ Storage: stor, closeFn: closeFn, opens: make(map[uint64]bool), } s.stallCond.L = &s.mu return s }
// Main method. Will panic if things are so bad that the application // will not start. func main() { flag.Parse() log.Println("Event store to use:", *eventStorePath) log.Println("Command socket path:", *commandSocketZPath) log.Println("Event publishing socket path:", *eventPublishZPath) log.Println() var stor storage.Storage if *inMemoryStore { log.Println("!!! WARNING: Using in-memory store.") log.Println("!!! Events will not be persisted.") log.Println() stor = &storage.MemStorage{} } else { stor, err := storage.OpenFile(*eventStorePath) if err != nil { log.Panicln("could not create DB storage") } defer stor.Close() } estore, err := eventstore.New(stor) if err != nil { log.Panicln(os.Stderr, "could not create event store") } context, err := zmq.NewContext() if err != nil { log.Panicln(err) } initParams := server.InitParams{ Store: estore, CommandSocketZPath: commandSocketZPath, EvPubSocketZPath: eventPublishZPath, ZMQContext: context, } serv, err := server.New(&initParams) if err != nil { panic(err.Error()) } sigchan := make(chan os.Signal, 5) serverStopper := func() { sig := <-sigchan if sig == os.Interrupt { serv.Stop() } } go serverStopper() signal.Notify(sigchan) serv.Start() serv.Wait() }
func (ulevel) OpenDatabase(name string, o level.UnderlyingOptions) (dtb level.UnderlyingDatabase, err error) { stor, err := storage.OpenFile(name) if err != nil { return } var dtbe *leveldb.DB dtbe, err = leveldb.Open(stor, o.(opts).Options) dtb = db{dtbe, stor} return }
func newTestStorage(t *testing.T) *testStorage { var stor storage.Storage var closeFn func() error if tsFS { for { tsMU.Lock() num := tsNum tsNum++ tsMU.Unlock() path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) if _, err := os.Stat(path); err != nil { stor, err = storage.OpenFile(path) if err != nil { t.Fatalf("F: cannot create storage: %v", err) } t.Logf("I: storage created: %s", path) closeFn = func() error { for _, name := range []string{"LOG.old", "LOG"} { f, err := os.Open(filepath.Join(path, name)) if err != nil { continue } if log, err := ioutil.ReadAll(f); err != nil { t.Logf("---------------------- %s ----------------------", name) t.Logf("cannot read log: %v", err) t.Logf("---------------------- %s ----------------------", name) } else if len(log) > 0 { t.Logf("---------------------- %s ----------------------\n%s", name, string(log)) t.Logf("---------------------- %s ----------------------", name) } f.Close() } if tsKeepFS { return nil } return os.RemoveAll(path) } break } } } else { stor = storage.NewMemStorage() } ts := &testStorage{ t: t, Storage: stor, closeFn: closeFn, opens: make(map[uint64]bool), } ts.cond.L = &ts.mu return ts }
// NewMDServerLocal constructs a new MDServerLocal object that stores // data in the directories specified as parameters to this function. func NewMDServerLocal(config Config, handleDbfile string, mdDbfile string, branchDbfile string) (*MDServerLocal, error) { handleStorage, err := storage.OpenFile(handleDbfile) if err != nil { return nil, err } mdStorage, err := storage.OpenFile(mdDbfile) if err != nil { return nil, err } branchStorage, err := storage.OpenFile(branchDbfile) if err != nil { return nil, err } return newMDServerLocalWithStorage(config, handleStorage, mdStorage, branchStorage) }
// RecoverFile recovers and opens a DB with missing or corrupted manifest files // for the given path. It will ignore any manifest files, valid or not. // The DB must already exist or it will returns an error. // Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. // // RecoverFile uses standard file-system backed storage implementation as desribed // in the leveldb/storage package. // // The returned DB instance is goroutine-safe. // The DB must be closed after use, by calling Close method. func RecoverFile(path string, o *opt.Options) (db *DB, err error) { stor, err := storage.OpenFile(path) if err != nil { return } db, err = Recover(stor, o) if err != nil { stor.Close() } else { db.closer = stor } return }
// OpenFile opens or creates a DB for the given path. // The DB will be created if not exist, unless ErrorIfMissing is true. // Also, if ErrorIfExist is true and the DB exist OpenFile will returns // os.ErrExist error. // // OpenFile uses standard file-system backed storage implementation as // desribed in the leveldb/storage package. // // OpenFile will return an error with type of ErrCorrupted if corruption // detected in the DB. Corrupted DB can be recovered with Recover // function. // // The returned DB instance is goroutine-safe. // The DB must be closed after use, by calling Close method. func OpenFile(path string, o *opt.Options) (db *DB, err error) { stor, err := storage.OpenFile(path, o.GetReadOnly()) if err != nil { return } db, err = Open(stor, o) if err != nil { stor.Close() } else { db.closer = stor } return }
// OpenFile open or create database from given file. // // This is alias of: // stor, err := storage.OpenFile("path/to/db") // ... // db, err := Open(stor, &opt.Options{}) // ... func OpenFile(path string, o *opt.Options) (db *DB, err error) { stor, err := storage.OpenFile(path) if err != nil { return } db, err = Open(stor, o) if err == nil { db.closeCb = func() error { return stor.Close() } } return }
// RecoverDB recovers LevelDB database from corruption func RecoverDB(path string) error { stor, err := storage.OpenFile(path, false) if err != nil { return err } db, err := leveldb.Recover(stor, nil) if err != nil { return err } db.Close() stor.Close() return nil }
func (ec *recoverCmd) Main() { ec.configuredCmd.Main() InitLog() path := openpgp.Config().Settings.TomlTree.Get("symflux.recon.leveldb.path").(string) stor, err := storage.OpenFile(path) if err != nil { die(err) } log.Println("database storage opened, recovering...") db, err := leveldb.Recover(stor, nil) if err != nil { die(err) } log.Println("recovery complete") db.Close() }
func NewMirageStorage(cfg *Config) *MirageStorage { fileStorage, err := storage.OpenFile(cfg.Storage.DataDir) if err != nil { fmt.Println("cannot open leveldb fileStorage") log.Fatal(err) } storage, err := leveldb.Open(fileStorage, &opt.Options{}) if err != nil { fmt.Println("cannot open leveldb") log.Fatal(err) } ms := &MirageStorage{storage: storage} return ms }
func main() { flag.Parse() fmt.Printf("Using path: %s\n", filename) if child { fmt.Println("Child flag set.") } stor, err := storage.OpenFile(filename) if err != nil { fmt.Printf("Could not open storage: %s", err) os.Exit(10) } if !child { fmt.Println("Executing child -- first test (expecting error)") err := runChild() if err == nil { fmt.Println("Expecting error from child") } else if err.Error() != "exit status 10" { fmt.Println("Got unexpected error from child:", err) } else { fmt.Printf("Got error from child: %s (expected)\n", err) } } err = stor.Close() if err != nil { fmt.Printf("Error when closing storage: %s", err) os.Exit(11) } if !child { fmt.Println("Executing child -- second test") err := runChild() if err != nil { fmt.Println("Got unexpected error from child:", err) } } os.RemoveAll(filename) }
func NewStorage() *Storage { var ( stor storage.Storage path string ) if storageUseFS { for { storageMu.Lock() num := storageNum storageNum++ storageMu.Unlock() path = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) if _, err := os.Stat(path); os.IsNotExist(err) { stor, err = storage.OpenFile(path, false) ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path) break } } } else { stor = storage.NewMemStorage() } s := &Storage{ Storage: stor, path: path, rand: NewRand(), opens: make(map[uint64]bool), } s.stallCond.L = &s.mu if s.path != "" { s.logI("using FS storage") s.logI("storage path: %s", s.path) } else { s.logI("using MEM storage") } return s }
func main() { flag.Parse() if enableBufferPool { bpool = util.NewBufferPool(opt.DefaultBlockSize + 128) } log.Printf("Test DB stored at %q", dbPath) if httpProf != "" { log.Printf("HTTP pprof listening at %q", httpProf) runtime.SetBlockProfileRate(1) go func() { if err := http.ListenAndServe(httpProf, nil); err != nil { log.Fatalf("HTTPPROF: %v", err) } }() } runtime.GOMAXPROCS(runtime.NumCPU()) os.RemoveAll(dbPath) stor, err := storage.OpenFile(dbPath, false) if err != nil { log.Fatal(err) } tstor := &testingStorage{stor} defer tstor.Close() fatalf := func(err error, format string, v ...interface{}) { atomic.StoreUint32(&fail, 1) atomic.StoreUint32(&done, 1) log.Printf("FATAL: "+format, v...) if err != nil && errors.IsCorrupted(err) { cerr := err.(*errors.ErrCorrupted) if !cerr.Fd.Zero() && cerr.Fd.Type == storage.TypeTable { log.Print("FATAL: corruption detected, scanning...") if !tstor.scanTable(storage.FileDesc{Type: storage.TypeTable, Num: cerr.Fd.Num}, false) { log.Printf("FATAL: unable to find corrupted key/value pair in table %v", cerr.Fd) } } } runtime.Goexit() } if openFilesCacheCapacity == 0 { openFilesCacheCapacity = -1 } o := &opt.Options{ OpenFilesCacheCapacity: openFilesCacheCapacity, DisableBufferPool: !enableBufferPool, DisableBlockCache: !enableBlockCache, ErrorIfExist: true, Compression: opt.NoCompression, } if enableCompression { o.Compression = opt.DefaultCompression } db, err := leveldb.Open(tstor, o) if err != nil { log.Fatal(err) } defer db.Close() var ( mu = &sync.Mutex{} gGetStat = &latencyStats{} gIterStat = &latencyStats{} gWriteStat = &latencyStats{} gTrasactionStat = &latencyStats{} startTime = time.Now() writeReq = make(chan *leveldb.Batch) writeAck = make(chan error) writeAckAck = make(chan struct{}) ) go func() { for b := range writeReq { var err error if mrand.Float64() < transactionProb { log.Print("> Write using transaction") gTrasactionStat.start() var tr *leveldb.Transaction if tr, err = db.OpenTransaction(); err == nil { if err = tr.Write(b, nil); err == nil { if err = tr.Commit(); err == nil { gTrasactionStat.record(b.Len()) } } else { tr.Discard() } } } else { gWriteStat.start() if err = db.Write(b, nil); err == nil { gWriteStat.record(b.Len()) } } writeAck <- err <-writeAckAck } }() go func() { for { time.Sleep(3 * time.Second) log.Print("------------------------") log.Printf("> Elapsed=%v", time.Now().Sub(startTime)) mu.Lock() log.Printf("> GetLatencyMin=%v GetLatencyMax=%v GetLatencyAvg=%v GetRatePerSec=%d", gGetStat.min, gGetStat.max, gGetStat.avg(), gGetStat.ratePerSec()) log.Printf("> IterLatencyMin=%v IterLatencyMax=%v IterLatencyAvg=%v IterRatePerSec=%d", gIterStat.min, gIterStat.max, gIterStat.avg(), gIterStat.ratePerSec()) log.Printf("> WriteLatencyMin=%v WriteLatencyMax=%v WriteLatencyAvg=%v WriteRatePerSec=%d", gWriteStat.min, gWriteStat.max, gWriteStat.avg(), gWriteStat.ratePerSec()) log.Printf("> TransactionLatencyMin=%v TransactionLatencyMax=%v TransactionLatencyAvg=%v TransactionRatePerSec=%d", gTrasactionStat.min, gTrasactionStat.max, gTrasactionStat.avg(), gTrasactionStat.ratePerSec()) mu.Unlock() cachedblock, _ := db.GetProperty("leveldb.cachedblock") openedtables, _ := db.GetProperty("leveldb.openedtables") alivesnaps, _ := db.GetProperty("leveldb.alivesnaps") aliveiters, _ := db.GetProperty("leveldb.aliveiters") blockpool, _ := db.GetProperty("leveldb.blockpool") log.Printf("> BlockCache=%s OpenedTables=%s AliveSnaps=%s AliveIter=%s BlockPool=%q", cachedblock, openedtables, alivesnaps, aliveiters, blockpool) log.Print("------------------------") } }() for ns, numKey := range numKeys { func(ns, numKey int) { log.Printf("[%02d] STARTING: numKey=%d", ns, numKey) keys := make([][]byte, numKey) for i := range keys { keys[i] = randomData(nil, byte(ns), 1, uint32(i), keyLen) } wg.Add(1) go func() { var wi uint32 defer func() { log.Printf("[%02d] WRITER DONE #%d", ns, wi) wg.Done() }() var ( b = new(leveldb.Batch) k2, v2 []byte nReader int32 ) for atomic.LoadUint32(&done) == 0 { log.Printf("[%02d] WRITER #%d", ns, wi) b.Reset() for _, k1 := range keys { k2 = randomData(k2, byte(ns), 2, wi, keyLen) v2 = randomData(v2, byte(ns), 3, wi, valueLen) b.Put(k2, v2) b.Put(k1, k2) } writeReq <- b if err := <-writeAck; err != nil { writeAckAck <- struct{}{} fatalf(err, "[%02d] WRITER #%d db.Write: %v", ns, wi, err) } snap, err := db.GetSnapshot() if err != nil { writeAckAck <- struct{}{} fatalf(err, "[%02d] WRITER #%d db.GetSnapshot: %v", ns, wi, err) } writeAckAck <- struct{}{} wg.Add(1) atomic.AddInt32(&nReader, 1) go func(snapwi uint32, snap *leveldb.Snapshot) { var ( ri int iterStat = &latencyStats{} getStat = &latencyStats{} ) defer func() { mu.Lock() gGetStat.add(getStat) gIterStat.add(iterStat) mu.Unlock() atomic.AddInt32(&nReader, -1) log.Printf("[%02d] READER #%d.%d DONE Snap=%v Alive=%d IterLatency=%v GetLatency=%v", ns, snapwi, ri, snap, atomic.LoadInt32(&nReader), iterStat.avg(), getStat.avg()) snap.Release() wg.Done() }() stopi := snapwi + 3 for (ri < 3 || atomic.LoadUint32(&wi) < stopi) && atomic.LoadUint32(&done) == 0 { var n int iter := snap.NewIterator(dataPrefixSlice(byte(ns), 1), nil) iterStat.start() for iter.Next() { k1 := iter.Key() k2 := iter.Value() iterStat.record(1) if dataNS(k2) != byte(ns) { fatalf(nil, "[%02d] READER #%d.%d K%d invalid in-key NS: want=%d got=%d", ns, snapwi, ri, n, ns, dataNS(k2)) } kwritei := dataI(k2) if kwritei != snapwi { fatalf(nil, "[%02d] READER #%d.%d K%d invalid in-key iter num: %d", ns, snapwi, ri, n, kwritei) } getStat.start() v2, err := snap.Get(k2, nil) if err != nil { fatalf(err, "[%02d] READER #%d.%d K%d snap.Get: %v\nk1: %x\n -> k2: %x", ns, snapwi, ri, n, err, k1, k2) } getStat.record(1) if checksum0, checksum1 := dataChecksum(v2); checksum0 != checksum1 { err := &errors.ErrCorrupted{Fd: storage.FileDesc{0xff, 0}, Err: fmt.Errorf("v2: %x: checksum mismatch: %v vs %v", v2, checksum0, checksum1)} fatalf(err, "[%02d] READER #%d.%d K%d snap.Get: %v\nk1: %x\n -> k2: %x", ns, snapwi, ri, n, err, k1, k2) } n++ iterStat.start() } iter.Release() if err := iter.Error(); err != nil { fatalf(err, "[%02d] READER #%d.%d K%d iter.Error: %v", ns, snapwi, ri, numKey, err) } if n != numKey { fatalf(nil, "[%02d] READER #%d.%d missing keys: want=%d got=%d", ns, snapwi, ri, numKey, n) } ri++ } }(wi, snap) atomic.AddUint32(&wi, 1) } }() delB := new(leveldb.Batch) wg.Add(1) go func() { var ( i int iterStat = &latencyStats{} ) defer func() { log.Printf("[%02d] SCANNER DONE #%d", ns, i) wg.Done() }() time.Sleep(2 * time.Second) for atomic.LoadUint32(&done) == 0 { var n int delB.Reset() iter := db.NewIterator(dataNsSlice(byte(ns)), nil) iterStat.start() for iter.Next() && atomic.LoadUint32(&done) == 0 { k := iter.Key() v := iter.Value() iterStat.record(1) for ci, x := range [...][]byte{k, v} { checksum0, checksum1 := dataChecksum(x) if checksum0 != checksum1 { if ci == 0 { fatalf(nil, "[%02d] SCANNER %d.%d invalid key checksum: want %d, got %d\n%x -> %x", ns, i, n, checksum0, checksum1, k, v) } else { fatalf(nil, "[%02d] SCANNER %d.%d invalid value checksum: want %d, got %d\n%x -> %x", ns, i, n, checksum0, checksum1, k, v) } } } if dataPrefix(k) == 2 || mrand.Int()%999 == 0 { delB.Delete(k) } n++ iterStat.start() } iter.Release() if err := iter.Error(); err != nil { fatalf(err, "[%02d] SCANNER #%d.%d iter.Error: %v", ns, i, n, err) } if n > 0 { log.Printf("[%02d] SCANNER #%d IterLatency=%v", ns, i, iterStat.avg()) } if delB.Len() > 0 && atomic.LoadUint32(&done) == 0 { t := time.Now() writeReq <- delB if err := <-writeAck; err != nil { writeAckAck <- struct{}{} fatalf(err, "[%02d] SCANNER #%d db.Write: %v", ns, i, err) } else { writeAckAck <- struct{}{} } log.Printf("[%02d] SCANNER #%d Deleted=%d Time=%v", ns, i, delB.Len(), time.Now().Sub(t)) } i++ } }() }(ns, numKey) } go func() { sig := make(chan os.Signal) signal.Notify(sig, os.Interrupt, os.Kill) log.Printf("Got signal: %v, exiting...", <-sig) atomic.StoreUint32(&done, 1) }() wg.Wait() }