// flush a leveldb batch to database and reset batch to 0 func cacheFlush(db *leveldb.DB, batch *leveldb.Batch) { err := db.Write(batch, nil) if err != nil { log.Fatal(err) } batch.Reset() }
func clearItems(db *leveldb.DB) error { snap, err := db.GetSnapshot() if err != nil { return err } defer snap.Release() // Iterate over k2 it := snap.NewIterator(util.BytesPrefix([]byte{1}), nil) defer it.Release() batch := new(leveldb.Batch) for it.Next() { k1 := it.Key() k2 := it.Value() // k2 should exist _, err := snap.Get(k2, nil) if err != nil { return err } // Delete the k1 => k2 mapping first batch.Delete(k1) // Then the k2 => data mapping batch.Delete(k2) } if testing.Verbose() { log.Printf("batch write (clear) %p", batch) } return db.Write(batch, nil) }
func clearItems(db *leveldb.DB) error { snap, err := db.GetSnapshot() if err != nil { return err } defer snap.Release() // Iterate from the start of k2 space to the end it := snap.NewIterator(&util.Range{Start: []byte{2}, Limit: []byte{2, 0xff, 0xff, 0xff, 0xff}}, nil) defer it.Release() batch := &leveldb.Batch{} for it.Next() { k2 := it.Key() k1 := it.Value() // k1 should exist _, err := snap.Get(k1, nil) if err != nil { return err } // Delete the k2 => k1 mapping first batch.Delete(k2) // Then the k1 => key mapping batch.Delete(k1) } return db.Write(batch, nil) }
func ldbCheckGlobals(db *leveldb.DB, folder []byte) { defer runtime.GC() snap, err := db.GetSnapshot() if err != nil { panic(err) } l.Debugf("created snapshot %p", snap) defer func() { l.Debugf("close snapshot %p", snap) snap.Release() }() start := globalKey(folder, nil) limit := globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff}) dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil) defer dbi.Release() batch := new(leveldb.Batch) l.Debugf("new batch %p", batch) var fk []byte for dbi.Next() { gk := dbi.Key() var vl versionList err := vl.UnmarshalXDR(dbi.Value()) if err != nil { panic(err) } // Check the global version list for consistency. An issue in previous // versions of goleveldb could result in reordered writes so that // there are global entries pointing to no longer existing files. Here // we find those and clear them out. name := globalKeyName(gk) var newVL versionList for _, version := range vl.versions { fk = deviceKeyInto(fk[:cap(fk)], folder, version.device, name) l.Debugf("snap.Get %p %x", snap, fk) _, err := snap.Get(fk, nil) if err == leveldb.ErrNotFound { continue } if err != nil { panic(err) } newVL.versions = append(newVL.versions, version) } if len(newVL.versions) != len(vl.versions) { l.Infof("db repair: rewriting global version list for %x %x", gk[1:1+64], gk[1+64:]) batch.Put(dbi.Key(), newVL.MustMarshalXDR()) } } l.Debugf("db check completed for %q", folder) db.Write(batch, nil) }
func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64 { defer runtime.GC() batch := new(leveldb.Batch) snap, err := db.GetSnapshot() if err != nil { panic(err) } defer snap.Release() var maxLocalVer uint64 for _, f := range fs { name := []byte(f.Name) fk := nodeKey(repo, node, name) bs, err := snap.Get(fk, nil) if err == leveldb.ErrNotFound { if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer { maxLocalVer = lv } if f.IsInvalid() { ldbRemoveFromGlobal(snap, batch, repo, node, name) } else { ldbUpdateGlobal(snap, batch, repo, node, name, f.Version) } continue } var ef protocol.FileInfoTruncated err = ef.UnmarshalXDR(bs) if err != nil { panic(err) } // Flags might change without the version being bumped when we set the // invalid flag on an existing file. if ef.Version != f.Version || ef.Flags != f.Flags { if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer { maxLocalVer = lv } if f.IsInvalid() { ldbRemoveFromGlobal(snap, batch, repo, node, name) } else { ldbUpdateGlobal(snap, batch, repo, node, name, f.Version) } } } err = db.Write(batch, nil) if err != nil { panic(err) } return maxLocalVer }
func setItems(db *leveldb.DB) error { batch := new(leveldb.Batch) for _, k1 := range keys { k2 := randomData(2) // k2 -> data batch.Put(k2, randomData(42)) // k1 -> k2 batch.Put(k1, k2) } if testing.Verbose() { log.Printf("batch write (set) %p", batch) } return db.Write(batch, nil) }
func setItems(db *leveldb.DB) error { snap, err := db.GetSnapshot() if err != nil { return err } defer snap.Release() batch := &leveldb.Batch{} for k2, k1 := range keys { // Create k1 => item mapping first batch.Put(makeK1(k1), items[k1]) // Then the k2 => k1 mapping batch.Put(makeK2(k2), makeK1(k1)) } return db.Write(batch, nil) }
func ldbWithAllFolderTruncated(db *leveldb.DB, folder []byte, fn func(device []byte, f FileInfoTruncated) bool) { runtime.GC() start := deviceKey(folder, nil, nil) // before all folder/device files limit := deviceKey(folder, protocol.LocalDeviceID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files snap, err := db.GetSnapshot() if err != nil { panic(err) } if debugDB { l.Debugf("created snapshot %p", snap) } defer func() { if debugDB { l.Debugf("close snapshot %p", snap) } snap.Release() }() dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil) defer dbi.Release() for dbi.Next() { device := deviceKeyDevice(dbi.Key()) var f FileInfoTruncated err := f.UnmarshalXDR(dbi.Value()) if err != nil { panic(err) } switch f.Name { case "", ".", "..", "/": // A few obviously invalid filenames l.Infof("Dropping invalid filename %q from database", f.Name) batch := new(leveldb.Batch) ldbRemoveFromGlobal(db, batch, folder, device, nil) batch.Delete(dbi.Key()) db.Write(batch, nil) continue } if cont := fn(device, f); !cont { return } } }
func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64 { defer runtime.GC() batch := new(leveldb.Batch) snap, err := db.GetSnapshot() if err != nil { panic(err) } defer snap.Release() var maxLocalVer uint64 for _, f := range fs { name := []byte(f.Name) fk := nodeKey(repo, node, name) bs, err := snap.Get(fk, nil) if err == leveldb.ErrNotFound { if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer { maxLocalVer = lv } ldbUpdateGlobal(snap, batch, repo, node, name, f.Version) continue } var ef protocol.FileInfoTruncated err = ef.UnmarshalXDR(bs) if err != nil { panic(err) } if ef.Version != f.Version { if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer { maxLocalVer = lv } ldbUpdateGlobal(snap, batch, repo, node, name, f.Version) } } err = db.Write(batch, nil) if err != nil { panic(err) } return maxLocalVer }
func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) bool { batch := new(leveldb.Batch) snap, err := db.GetSnapshot() if err != nil { panic(err) } defer snap.Release() for _, f := range fs { name := []byte(f.Name) fk := nodeKey(repo, node, name) bs, err := snap.Get(fk, nil) if err == leveldb.ErrNotFound { ldbInsert(batch, repo, node, name, f) ldbUpdateGlobal(snap, batch, repo, node, name, f.Version) continue } var ef protocol.FileInfo err = ef.UnmarshalXDR(bs) if err != nil { panic(err) } if ef.Version != f.Version { ldbInsert(batch, repo, node, name, f) ldbUpdateGlobal(snap, batch, repo, node, name, f.Version) } } err = db.Write(batch, nil) if err != nil { panic(err) } return true }
func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) int64 { runtime.GC() batch := new(leveldb.Batch) if debugDB { l.Debugf("new batch %p", batch) } snap, err := db.GetSnapshot() if err != nil { panic(err) } if debugDB { l.Debugf("created snapshot %p", snap) } defer func() { if debugDB { l.Debugf("close snapshot %p", snap) } snap.Release() }() var maxLocalVer int64 var fk []byte for _, f := range fs { name := []byte(f.Name) fk = deviceKeyInto(fk[:cap(fk)], folder, device, name) if debugDB { l.Debugf("snap.Get %p %x", snap, fk) } bs, err := snap.Get(fk, nil) if err == leveldb.ErrNotFound { if lv := ldbInsert(batch, folder, device, f); lv > maxLocalVer { maxLocalVer = lv } if f.IsInvalid() { ldbRemoveFromGlobal(snap, batch, folder, device, name) } else { ldbUpdateGlobal(snap, batch, folder, device, f) } continue } var ef FileInfoTruncated err = ef.UnmarshalXDR(bs) if err != nil { panic(err) } // Flags might change without the version being bumped when we set the // invalid flag on an existing file. if !ef.Version.Equal(f.Version) || ef.Flags != f.Flags { if lv := ldbInsert(batch, folder, device, f); lv > maxLocalVer { maxLocalVer = lv } if f.IsInvalid() { ldbRemoveFromGlobal(snap, batch, folder, device, name) } else { ldbUpdateGlobal(snap, batch, folder, device, f) } } // Write out and reuse the batch every few records, to avoid the batch // growing too large and thus allocating unnecessarily much memory. if batch.Len() > batchFlushSize { if debugDB { l.Debugf("db.Write %p", batch) } err = db.Write(batch, nil) if err != nil { panic(err) } batch.Reset() } } if debugDB { l.Debugf("db.Write %p", batch) } err = db.Write(batch, nil) if err != nil { panic(err) } return maxLocalVer }
func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo, deleteFn deletionHandler) int64 { runtime.GC() sort.Sort(fileList(fs)) // sort list on name, same as in the database start := deviceKey(folder, device, nil) // before all folder/device files limit := deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files batch := new(leveldb.Batch) if debugDB { l.Debugf("new batch %p", batch) } snap, err := db.GetSnapshot() if err != nil { panic(err) } if debugDB { l.Debugf("created snapshot %p", snap) } defer func() { if debugDB { l.Debugf("close snapshot %p", snap) } snap.Release() }() dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil) defer dbi.Release() moreDb := dbi.Next() fsi := 0 var maxLocalVer int64 for { var newName, oldName []byte moreFs := fsi < len(fs) if !moreDb && !moreFs { break } if moreFs { newName = []byte(fs[fsi].Name) } if moreDb { oldName = deviceKeyName(dbi.Key()) } cmp := bytes.Compare(newName, oldName) if debugDB { l.Debugf("generic replace; folder=%q device=%v moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", folder, protocol.DeviceIDFromBytes(device), moreFs, moreDb, cmp, newName, oldName) } switch { case moreFs && (!moreDb || cmp == -1): if debugDB { l.Debugln("generic replace; missing - insert") } // Database is missing this file. Insert it. if lv := ldbInsert(batch, folder, device, fs[fsi]); lv > maxLocalVer { maxLocalVer = lv } if fs[fsi].IsInvalid() { ldbRemoveFromGlobal(snap, batch, folder, device, newName) } else { ldbUpdateGlobal(snap, batch, folder, device, fs[fsi]) } fsi++ case moreFs && moreDb && cmp == 0: // File exists on both sides - compare versions. We might get an // update with the same version and different flags if a device has // marked a file as invalid, so handle that too. if debugDB { l.Debugln("generic replace; exists - compare") } var ef FileInfoTruncated ef.UnmarshalXDR(dbi.Value()) if !fs[fsi].Version.Equal(ef.Version) || fs[fsi].Flags != ef.Flags { if debugDB { l.Debugln("generic replace; differs - insert") } if lv := ldbInsert(batch, folder, device, fs[fsi]); lv > maxLocalVer { maxLocalVer = lv } if fs[fsi].IsInvalid() { ldbRemoveFromGlobal(snap, batch, folder, device, newName) } else { ldbUpdateGlobal(snap, batch, folder, device, fs[fsi]) } } else if debugDB { l.Debugln("generic replace; equal - ignore") } fsi++ moreDb = dbi.Next() case moreDb && (!moreFs || cmp == 1): if debugDB { l.Debugln("generic replace; exists - remove") } if lv := deleteFn(snap, batch, folder, device, oldName, dbi); lv > maxLocalVer { maxLocalVer = lv } moreDb = dbi.Next() } // Write out and reuse the batch every few records, to avoid the batch // growing too large and thus allocating unnecessarily much memory. if batch.Len() > batchFlushSize { if debugDB { l.Debugf("db.Write %p", batch) } err = db.Write(batch, nil) if err != nil { panic(err) } batch.Reset() } } if debugDB { l.Debugf("db.Write %p", batch) } err = db.Write(batch, nil) if err != nil { panic(err) } return maxLocalVer }
func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo, deleteFn deletionHandler) bool { sort.Sort(fileList(fs)) // sort list on name, same as on disk start := nodeKey(repo, node, nil) // before all repo/node files limit := nodeKey(repo, node, []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files batch := new(leveldb.Batch) snap, err := db.GetSnapshot() if err != nil { panic(err) } defer snap.Release() dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil) defer dbi.Release() moreDb := dbi.Next() fsi := 0 changed := false for { var newName, oldName []byte moreFs := fsi < len(fs) if !moreDb && !moreFs { break } if !moreFs && deleteFn == nil { // We don't have any more updated files to process and deletion // has not been requested, so we can exit early break } if moreFs { newName = []byte(fs[fsi].Name) } if moreDb { oldName = nodeKeyName(dbi.Key()) } cmp := bytes.Compare(newName, oldName) if debug { l.Debugf("generic replace; repo=%q node=%x moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", repo, node, moreFs, moreDb, cmp, newName, oldName) } switch { case moreFs && (!moreDb || cmp == -1): changed = true // Disk is missing this file. Insert it. ldbInsert(batch, repo, node, newName, fs[fsi]) ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version) fsi++ case cmp == 0: // File exists on both sides - compare versions. var ef protocol.FileInfo ef.UnmarshalXDR(dbi.Value()) if fs[fsi].Version > ef.Version { ldbInsert(batch, repo, node, newName, fs[fsi]) ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version) changed = true } // Iterate both sides. fsi++ moreDb = dbi.Next() case moreDb && (!moreFs || cmp == 1): if deleteFn != nil { if deleteFn(snap, batch, repo, node, oldName, dbi) { changed = true } } moreDb = dbi.Next() } } err = db.Write(batch, nil) if err != nil { panic(err) } return changed }
func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo, deleteFn deletionHandler) uint64 { defer runtime.GC() sort.Sort(fileList(fs)) // sort list on name, same as on disk start := nodeKey(repo, node, nil) // before all repo/node files limit := nodeKey(repo, node, []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files batch := new(leveldb.Batch) snap, err := db.GetSnapshot() if err != nil { panic(err) } defer snap.Release() dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil) defer dbi.Release() moreDb := dbi.Next() fsi := 0 var maxLocalVer uint64 for { var newName, oldName []byte moreFs := fsi < len(fs) if !moreDb && !moreFs { break } if !moreFs && deleteFn == nil { // We don't have any more updated files to process and deletion // has not been requested, so we can exit early break } if moreFs { newName = []byte(fs[fsi].Name) } if moreDb { oldName = nodeKeyName(dbi.Key()) } cmp := bytes.Compare(newName, oldName) if debug { l.Debugf("generic replace; repo=%q node=%v moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", repo, protocol.NodeIDFromBytes(node), moreFs, moreDb, cmp, newName, oldName) } switch { case moreFs && (!moreDb || cmp == -1): // Disk is missing this file. Insert it. if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer { maxLocalVer = lv } if fs[fsi].IsInvalid() { ldbRemoveFromGlobal(snap, batch, repo, node, newName) } else { ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version) } fsi++ case moreFs && moreDb && cmp == 0: // File exists on both sides - compare versions. We might get an // update with the same version and different flags if a node has // marked a file as invalid, so handle that too. var ef protocol.FileInfoTruncated ef.UnmarshalXDR(dbi.Value()) if fs[fsi].Version > ef.Version || fs[fsi].Version != ef.Version { if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer { maxLocalVer = lv } if fs[fsi].IsInvalid() { ldbRemoveFromGlobal(snap, batch, repo, node, newName) } else { ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version) } } // Iterate both sides. fsi++ moreDb = dbi.Next() case moreDb && (!moreFs || cmp == 1): if deleteFn != nil { if lv := deleteFn(snap, batch, repo, node, oldName, dbi); lv > maxLocalVer { maxLocalVer = lv } } moreDb = dbi.Next() } } err = db.Write(batch, nil) if err != nil { panic(err) } return maxLocalVer }
func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) uint64 { runtime.GC() batch := new(leveldb.Batch) if debugDB { l.Debugf("new batch %p", batch) } snap, err := db.GetSnapshot() if err != nil { panic(err) } if debugDB { l.Debugf("created snapshot %p", snap) } defer func() { if debugDB { l.Debugf("close snapshot %p", snap) } snap.Release() }() var maxLocalVer uint64 for _, f := range fs { name := []byte(f.Name) fk := deviceKey(folder, device, name) if debugDB { l.Debugf("snap.Get %p %x", snap, fk) } bs, err := snap.Get(fk, nil) if err == leveldb.ErrNotFound { if lv := ldbInsert(batch, folder, device, f); lv > maxLocalVer { maxLocalVer = lv } if f.IsInvalid() { ldbRemoveFromGlobal(snap, batch, folder, device, name) } else { ldbUpdateGlobal(snap, batch, folder, device, name, f.Version) } continue } var ef protocol.FileInfoTruncated err = ef.UnmarshalXDR(bs) if err != nil { panic(err) } // Flags might change without the version being bumped when we set the // invalid flag on an existing file. if ef.Version != f.Version || ef.Flags != f.Flags { if lv := ldbInsert(batch, folder, device, f); lv > maxLocalVer { maxLocalVer = lv } if f.IsInvalid() { ldbRemoveFromGlobal(snap, batch, folder, device, name) } else { ldbUpdateGlobal(snap, batch, folder, device, name, f.Version) } } } if debugDB { l.Debugf("db.Write %p", batch) } err = db.Write(batch, nil) if err != nil { panic(err) } return maxLocalVer }
func upgrade1To2(db *leveldb.DB) error { fmt.Println("Upgrading v1 to v2...") type v1IndexEntry struct { Subject string `json:"subject"` Predicate string `json:"predicate"` Object string `json:"object"` Label string `json:"label,omitempty"` History []int64 `json:"History"` } type v1ValueData struct { Name string `json:"Name"` Size int64 `json:"Size"` } var ( spoPref = []byte{spo[0].Prefix(), spo[1].Prefix()} ospPref = []byte{osp[0].Prefix(), osp[1].Prefix()} posPref = []byte{pos[0].Prefix(), pos[1].Prefix()} cpsPref = []byte{cps[0].Prefix(), cps[1].Prefix()} ) { fmt.Println("Upgrading bucket z") it := db.NewIterator(&util.Range{Start: []byte{'z'}, Limit: []byte{'z' + 1}}, nil) for it.Next() { k, v := it.Key(), it.Value() var val v1ValueData if err := json.Unmarshal(v, &val); err != nil { return err } node := proto.NodeData{ Size: val.Size, Value: proto.MakeValue(quad.Raw(val.Name)), } nv, err := node.Marshal() if err != nil { return err } if err = db.Put(k, nv, nil); err != nil { return err } } it.Release() } for _, pref := range [4][]byte{spoPref, ospPref, posPref, cpsPref} { fmt.Println("Upgrading bucket", string(pref)) end := []byte{pref[0], pref[1] + 1} it := db.NewIterator(&util.Range{Start: pref, Limit: end}, nil) for it.Next() { k, v := it.Key(), it.Value() var entry v1IndexEntry if err := json.Unmarshal(v, &entry); err != nil { return err } var h proto.HistoryEntry h.History = make([]uint64, len(entry.History)) for i, id := range entry.History { h.History[i] = uint64(id) } nv, err := h.Marshal() if err != nil { return err } if err = db.Put(k, nv, nil); err != nil { return err } } it.Release() } { fmt.Println("Upgrading bucket d") it := db.NewIterator(&util.Range{Start: []byte{'d'}, Limit: []byte{'d' + 1}}, nil) for it.Next() { k, v := it.Key(), it.Value() id, err := strconv.ParseInt(string(k[1:]), 16, 64) if err != nil { return err } nk := createDeltaKeyFor(id) var val graph.Delta if err := json.Unmarshal(v, &val); err != nil { return err } p := deltaToProto(val) nv, err := p.Marshal() if err != nil { return err } b := &leveldb.Batch{} b.Put(nk, nv) b.Delete(k) if err = db.Write(b, nil); err != nil { return err } } it.Release() } return nil }