// makeDB copies the database snapshot to the snapshot directory func makeDB(snapdir, dbfile string) { f, ferr := os.OpenFile(dbfile, os.O_RDONLY, 0600) if ferr != nil { ExitWithError(ExitInvalidInput, ferr) } defer f.Close() if err := os.MkdirAll(snapdir, 0755); err != nil { ExitWithError(ExitIO, err) } dbpath := path.Join(snapdir, "db") db, dberr := os.OpenFile(dbpath, os.O_WRONLY|os.O_CREATE, 0600) if dberr != nil { ExitWithError(ExitIO, dberr) } if _, err := io.Copy(db, f); err != nil { ExitWithError(ExitIO, err) } db.Close() // update consistentIndex so applies go through on etcdserver despite // having a new raft instance be := backend.NewDefaultBackend(dbpath) s := mvcc.NewStore(be, nil, &initIndex{}) id := s.TxnBegin() btx := be.BatchTx() del := func(k, v []byte) error { _, _, err := s.TxnDeleteRange(id, k, nil) return err } // delete stored members from old cluster since using new members btx.UnsafeForEach([]byte("members"), del) btx.UnsafeForEach([]byte("members_removed"), del) // trigger write-out of new consistent index s.TxnEnd(id) s.Commit() s.Close() }
// makeDB copies the database snapshot to the snapshot directory func makeDB(snapdir, dbfile string) { f, ferr := os.OpenFile(dbfile, os.O_RDONLY, 0600) if ferr != nil { ExitWithError(ExitInvalidInput, ferr) } defer f.Close() // get snapshot integrity hash if _, err := f.Seek(-sha256.Size, os.SEEK_END); err != nil { ExitWithError(ExitIO, err) } sha := make([]byte, sha256.Size) if _, err := f.Read(sha); err != nil { ExitWithError(ExitIO, err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { ExitWithError(ExitIO, err) } if err := os.MkdirAll(snapdir, 0755); err != nil { ExitWithError(ExitIO, err) } dbpath := path.Join(snapdir, "db") db, dberr := os.OpenFile(dbpath, os.O_RDWR|os.O_CREATE, 0600) if dberr != nil { ExitWithError(ExitIO, dberr) } if _, err := io.Copy(db, f); err != nil { ExitWithError(ExitIO, err) } // truncate away integrity hash, if any. off, serr := db.Seek(0, os.SEEK_END) if serr != nil { ExitWithError(ExitIO, serr) } hasHash := (off % 512) == sha256.Size if hasHash { if err := db.Truncate(off - sha256.Size); err != nil { ExitWithError(ExitIO, err) } } if !hasHash && !skipHashCheck { err := fmt.Errorf("snapshot missing hash but --skip-hash-check=false") ExitWithError(ExitBadArgs, err) } if hasHash && !skipHashCheck { // check for match if _, err := db.Seek(0, os.SEEK_SET); err != nil { ExitWithError(ExitIO, err) } h := sha256.New() if _, err := io.Copy(h, db); err != nil { ExitWithError(ExitIO, err) } dbsha := h.Sum(nil) if !reflect.DeepEqual(sha, dbsha) { err := fmt.Errorf("expected sha256 %v, got %v", sha, dbsha) ExitWithError(ExitInvalidInput, err) } } // db hash is OK, can now modify DB so it can be part of a new cluster db.Close() // update consistentIndex so applies go through on etcdserver despite // having a new raft instance be := backend.NewDefaultBackend(dbpath) s := mvcc.NewStore(be, nil, &initIndex{}) id := s.TxnBegin() btx := be.BatchTx() del := func(k, v []byte) error { _, _, err := s.TxnDeleteRange(id, k, nil) return err } // delete stored members from old cluster since using new members btx.UnsafeForEach([]byte("members"), del) btx.UnsafeForEach([]byte("members_removed"), del) // trigger write-out of new consistent index s.TxnEnd(id) s.Commit() s.Close() }
func initMVCC() { be := backend.New("mvcc-bench", time.Duration(batchInterval), batchLimit) s = mvcc.NewStore(be, &lease.FakeLessor{}, nil) os.Remove("mvcc-bench") // boltDB has an opened fd, so removing the file is ok }