// ChildFileIds returns the IDs of all Files that have parent refs to the given file. func (d *DriveDB) ChildFileIds(fileId string) ([]string, error) { var ids []string d.iters.Add(1) batch := new(leveldb.Batch) iter := d.db.NewIterator(util.BytesPrefix(childKey(fileId)), nil) for iter.Next() { pidcid := deKey(string(iter.Key())) cid := pidcid[len(fileId)+1:] found, err := d.db.Has(fileKey(cid), nil) if err == nil && found { ids = append(ids, cid) } else { batch.Delete(iter.Key()) } } iter.Release() d.iters.Done() if batch.Len() > 0 { err := d.db.Write(batch, nil) if err != nil { log.Printf("error writing to db: %v", err) } } return ids, iter.Error() }
// DeleteDataWithPrefix deletes all service data such as service metadata, items and payloads. func (ds *LevelDBStorage) DeleteDataWithPrefix(prefix string) int { ds.FlushCache() ds.saveLock.Lock() defer ds.saveLock.Unlock() limitCounter := 0 total := 0 iter := ds.IterData(prefix) wb := new(leveldb.Batch) for iter.Valid() { total++ if limitCounter < 1000 { wb.Delete(iter.GetKey()) limitCounter++ } else { limitCounter = 0 ds.db.Write(wb, nil) wb.Reset() } iter.Next() } ds.db.Write(wb, nil) return total }
func (self dbSync) saveInDb(mult map[string]string) { batch := new(leveldb.Batch) for k, v := range mult { batch.Put([]byte(k), []byte(v)) } self.Db.Write(batch, nil) }
func msetnx(v resp.CommandArgs, ex *CommandExtras) error { if len(v) == 0 || len(v)%2 != 0 { return resp.NewError(ErrFmtWrongNumberArgument, "msetnx").WriteTo(ex.Buffer) } batch := new(leveldb.Batch) for i := 0; i < len(v); { batch.Put(v[i], v[i+1]) i += 2 } ex.DB.Lock() defer ex.DB.Unlock() for i := 0; i < len(v); { _, err := ex.DB.Get(v[i]) if err != nil && err != leveldb.ErrNotFound { return err } if err != leveldb.ErrNotFound { return resp.ZeroInteger.WriteTo(ex.Buffer) } i += 2 } if err := ex.DB.WriteBatch(batch); err != nil { return err } return resp.OneInteger.WriteTo(ex.Buffer) }
func (self dbSync) multiDeleteDb(ks []string) { batch := new(leveldb.Batch) for _, k := range ks { batch.Delete([]byte(k)) } self.Db.Write(batch, nil) }
// PersistReceivedRecords save the info about which records we've already seen func (p *LevelDBPersister) PersistReceivedRecords(comp StreamID, records []*Record) error { batch := new(leveldb.Batch) for _, r := range records { batch.Put([]byte(fmt.Sprintf(receivedKeyFormat, comp, r.ID)), nil) } return p.db.Write(batch, nil) }
// PutReceipts stores the receipts in the current database func PutReceipts(db common.Database, receipts types.Receipts) error { batch := new(leveldb.Batch) _, batchWrite := db.(*ethdb.LDBDatabase) for _, receipt := range receipts { storageReceipt := (*types.ReceiptForStorage)(receipt) bytes, err := rlp.EncodeToBytes(storageReceipt) if err != nil { return err } if batchWrite { batch.Put(append(receiptsPre, receipt.TxHash[:]...), bytes) } else { err = db.Put(append(receiptsPre, receipt.TxHash[:]...), bytes) if err != nil { return err } } } if db, ok := db.(*ethdb.LDBDatabase); ok { if err := db.LDB().Write(batch, nil); err != nil { return err } } return nil }
// childFileIds returns a map containing IDs of all Files that have parent // refs to the given file. The returned map keys are IDs, and the map values // indicate if the child is a directory. func (d *DriveDB) childFileIds(fileId string) (map[string]bool, error) { ids := make(map[string]bool) d.iters.Add(1) batch := new(leveldb.Batch) iter := d.db.NewIterator(util.BytesPrefix(childKeyPrefix(fileId)), nil) for iter.Next() { pidcid := deKey(string(iter.Key())) cid := pidcid[len(fileId)+1:] if gdriveFile, err := d.FileById(cid); err != nil { log.Printf("unknown fileId %v: %v", fileId, err) batch.Delete(iter.Key()) } else { ids[cid] = gdriveFile.MimeType == driveFolderMimeType } } iter.Release() d.iters.Done() if batch.Len() > 0 { err := d.db.Write(batch, nil) if err != nil { log.Printf("error writing to db: %v", err) } } return ids, iter.Error() }
func (l *LevelDb) Put(id DbKey, aliases []DbKey, value []byte) error { // Lazy Open if err := l.open(); err != nil { return err } batch := new(leveldb.Batch) idb := id.ToBytes("kv") batch.Put(idb, value) if aliases != nil { for _, alias := range aliases { batch.Put(alias.ToBytes("lo"), idb) } } err := l.db.Write(batch, nil) // If the file is corrupt, just nuke and act like we didn't find anything if l.nukeIfCorrupt(err) { err = nil } return err }
func clearItems(db *leveldb.DB) error { snap, err := db.GetSnapshot() if err != nil { return err } defer snap.Release() // Iterate over k2 it := snap.NewIterator(util.BytesPrefix([]byte{1}), nil) defer it.Release() batch := new(leveldb.Batch) for it.Next() { k1 := it.Key() k2 := it.Value() // k2 should exist _, err := snap.Get(k2, nil) if err != nil { return err } // Delete the k1 => k2 mapping first batch.Delete(k1) // Then the k2 => data mapping batch.Delete(k2) } if testing.Verbose() { log.Printf("batch write (clear) %p", batch) } return db.Write(batch, nil) }
// flush a leveldb batch to database and reset batch to 0 func cacheFlush(db *leveldb.DB, batch *leveldb.Batch) { err := db.Write(batch, nil) if err != nil { log.Fatal(err) } batch.Reset() }
func (ds *LevelDBStorage) DeleteData(id ...string) error { wb := new(leveldb.Batch) for _, i := range id { wb.Delete(enc.UnsafeStringToBytes(i)) } return ds.db.Write(wb, nil) }
func (server *Server) deleteMessages(uid *[32]byte, messageList []*[32]byte) error { batch := new(leveldb.Batch) for _, messageID := range messageList { key := append(append([]byte{'m'}, uid[:]...), messageID[:]...) batch.Delete(key) } return server.database.Write(batch, wO_sync) }
func (l *internalLevelDBStore) putBatch(b *leveldb.Batch, numBytes int) { l.concurrentWriteLimit <- struct{}{} err := l.db.Write(b, nil) d.Chk.NoError(err) l.putCount += int64(b.Len()) l.putBytes += int64(numBytes) <-l.concurrentWriteLimit }
func ldbCheckGlobals(db *leveldb.DB, folder []byte) { defer runtime.GC() snap, err := db.GetSnapshot() if err != nil { panic(err) } l.Debugf("created snapshot %p", snap) defer func() { l.Debugf("close snapshot %p", snap) snap.Release() }() start := globalKey(folder, nil) limit := globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff}) dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil) defer dbi.Release() batch := new(leveldb.Batch) l.Debugf("new batch %p", batch) var fk []byte for dbi.Next() { gk := dbi.Key() var vl versionList err := vl.UnmarshalXDR(dbi.Value()) if err != nil { panic(err) } // Check the global version list for consistency. An issue in previous // versions of goleveldb could result in reordered writes so that // there are global entries pointing to no longer existing files. Here // we find those and clear them out. name := globalKeyName(gk) var newVL versionList for _, version := range vl.versions { fk = deviceKeyInto(fk[:cap(fk)], folder, version.device, name) l.Debugf("snap.Get %p %x", snap, fk) _, err := snap.Get(fk, nil) if err == leveldb.ErrNotFound { continue } if err != nil { panic(err) } newVL.versions = append(newVL.versions, version) } if len(newVL.versions) != len(vl.versions) { l.Infof("db repair: rewriting global version list for %x %x", gk[1:1+64], gk[1+64:]) batch.Put(dbi.Key(), newVL.MustMarshalXDR()) } } l.Debugf("db check completed for %q", folder) db.Write(batch, nil) }
func (ldb *LevelDB) delete(keys [][]byte) { batch := new(leveldb.Batch) for _, key := range keys { batch.Delete(key) } if err := ldb.db.Write(batch, nil); err != nil && err != leveldb.ErrNotFound { panic(err) } }
// Discard block map state, removing the given files func (m *BlockMap) Discard(files []protocol.FileInfo) error { batch := new(leveldb.Batch) for _, file := range files { for _, block := range file.Blocks { batch.Delete(m.blockKey(block.Hash, file.Name)) } } return m.db.Write(batch, nil) }
func (server *Server) newKeys(uid *[32]byte, keyList [][]byte) error { batch := new(leveldb.Batch) for _, key := range keyList { keyHash := sha256.Sum256(key) dbKey := append(append([]byte{'k'}, uid[:]...), keyHash[:]...) batch.Put(dbKey, key) } return server.database.Write(batch, wO_sync) }
// Fix repairs incorrect blockmap entries, removing the old entry and // replacing it with a new entry for the given block func (f *BlockFinder) Fix(folder, file string, index int32, oldHash, newHash []byte) error { buf := make([]byte, 4) binary.BigEndian.PutUint32(buf, uint32(index)) batch := new(leveldb.Batch) batch.Delete(blockKeyInto(nil, oldHash, folder, file)) batch.Put(blockKeyInto(nil, newHash, folder, file), buf) return f.db.Write(batch, nil) }
func main() { olddb, err := Open(os.Args[1]) if err != nil { panic(err) } newdb, err := Open(os.Args[2]) if err != nil { panic(err) } iter := olddb.NewIterator(&util.Range{Start: nil, Limit: nil}, nil) totalBitset := 0 totalMsgpack := 0 rows := 0 var batch *leveldb.Batch batch = new(leveldb.Batch) for iter.Next() { key := iter.Key() value := iter.Value() if bytes.HasPrefix(key, []byte("doc:")) { batch.Put(key, value) continue } bs := bitset.New(8) bs.ReadFrom(bytes.NewBuffer(value)) var docIDs []uint for i, e := bs.NextSet(0); e; i, e = bs.NextSet(i + 1) { docIDs = append(docIDs, i) } b, err := msgpack.Marshal(delta_encode(docIDs)) if err != nil { panic(err) } //fmt.Printf("bitset size is %d\n", len(value)) //fmt.Printf("msgpack size is %d\n", len(b)) totalBitset += len(value) totalMsgpack += len(b) batch.Put(key, b) if rows%10000 == 0 { log.Print("rows ", rows) newdb.Write(batch, nil) batch = new(leveldb.Batch) } rows++ } fmt.Printf("bitset size is %d\n", totalBitset) fmt.Printf("msgpack size is %d\n", totalMsgpack) newdb.Write(batch, nil) newdb.CompactRange(util.Range{Start: nil, Limit: nil}) }
// Update block map state, removing any deleted or invalid files. func (m *BlockMap) Update(files []protocol.FileInfo) error { batch := new(leveldb.Batch) buf := make([]byte, 4) var key []byte for _, file := range files { if batch.Len() > maxBatchSize { if err := m.db.Write(batch, nil); err != nil { return err } batch.Reset() } if file.IsDirectory() { continue } if file.IsDeleted() || file.IsInvalid() { for _, block := range file.Blocks { key = m.blockKeyInto(key, block.Hash, file.Name) batch.Delete(key) } continue } for i, block := range file.Blocks { binary.BigEndian.PutUint32(buf, uint32(i)) key = m.blockKeyInto(key, block.Hash, file.Name) batch.Put(key, buf) } } return m.db.Write(batch, nil) }
func makeBatchWithOps(ops []AbstractBatchOperation) *leveldb.Batch { batch := new(leveldb.Batch) for _, op := range ops { if op.kind == "PUT" { batch.Put(op.key, op.value) } else if op.kind == "DELETE" { batch.Delete(op.key) } } return batch }
func (l *LevelDBStore) PutMany(chunks []Chunk) (e BackpressureError) { numBytes := 0 b := new(leveldb.Batch) for _, c := range chunks { data := snappy.Encode(nil, c.Data()) numBytes += len(data) b.Put(l.toChunkKey(c.Hash()), data) } l.putBatch(b, numBytes) return }
func (s *Store) StoreLogs(logs []*raft.Log) error { batch := new(leveldb.Batch) for _, log := range logs { key := uint64ToBytes(log.Index) val, err := encodeMsgPack(log) if err != nil { return err } batch.Put(key, val.Bytes()) } return s.db.Write(batch, nil) }
// Drop block map, removing all entries related to this block map from the db. func (m *BlockMap) Drop() error { batch := new(leveldb.Batch) iter := m.db.NewIterator(util.BytesPrefix(m.blockKey(nil, "")[:1+64]), nil) defer iter.Release() for iter.Next() { batch.Delete(iter.Key()) } if iter.Error() != nil { return iter.Error() } return m.db.Write(batch, nil) }
func (ldb *LevelDB) PutHash(key []byte, hash map[string][]byte, expireAt *time.Time) { metaKey := encodeMetaKey(key) batch := new(leveldb.Batch) batch.Put(metaKey, encodeMetadata(Hash, expireAt)) for k, v := range hash { fieldKey := encodeHashFieldKey(key, []byte(k)) batch.Put(fieldKey, v) } if err := ldb.db.Write(batch, nil); err != nil { panic(err) } }
// Delete all states for a metric name. // This operation is currently only used for cleaning. func (db *DB) Delete(name string) error { // Name must be the key prefix iter := db.db.NewIterator(util.BytesPrefix([]byte(name)), nil) batch := new(leveldb.Batch) for iter.Next() { key := iter.Key() batch.Delete(key) } if batch.Len() > 0 { return db.db.Write(batch, nil) } return nil }
func (l *LevelDBStore) PutMany(chunks []Chunk) (e BackpressureError) { d.Chk.True(l.internalLevelDBStore != nil, "Cannot use LevelDBStore after Close().") l.versionSetOnce.Do(l.setVersIfUnset) numBytes := 0 b := new(leveldb.Batch) for _, c := range chunks { data := snappy.Encode(nil, c.Data()) numBytes += len(data) b.Put(l.toChunkKey(c.Hash()), data) } l.putBatch(b, numBytes) return }
// processChange applies a ChangeList to the database. func (d *DriveDB) processChange(c *gdrive.ChangeList) error { if c == nil { return nil } // If we read zero items, there's no work to do, and we're probably synced. if len(c.Items) == 0 { if d.lastChangeId() >= c.LargestChangeId { d.synced.Broadcast() } return nil } log.Printf("processing %v/%v, %v changes", d.lastChangeId(), c.LargestChangeId, len(c.Items)) batch := new(leveldb.Batch) for _, i := range c.Items { if i.File == nil { debug.Printf(" %s: deleted", i.FileId) } else { debug.Printf(" %s: %q size:%v version:%v labels:%#v", i.FileId, i.File.Title, i.File.FileSize, i.File.Version, i.File.Labels) } batch.Reset() // Update leveldb. inode, _ := d.InodeForFileId(i.FileId) d.lruCache.Remove(inode) // TODO: don't delete trashed/hidden files? ".trash" folder? if i.Deleted || i.File.Labels.Trashed || i.File.Labels.Hidden { d.RemoveFileById(i.FileId, batch) } else { d.UpdateFile(batch, i.File) } // Update the checkpoint, which now encompasses one additional change. d.setLastChangeId(i.Id) err := d.writeCheckpoint(batch) if err != nil { return err } // Commit err = d.db.Write(batch, nil) if err != nil { return err } } // Signal we're synced, if we are. if d.lastChangeId() >= c.LargestChangeId { d.synced.Broadcast() } return nil }
func setItems(db *leveldb.DB) error { batch := new(leveldb.Batch) for _, k1 := range keys { k2 := randomData(2) // k2 -> data batch.Put(k2, randomData(42)) // k1 -> k2 batch.Put(k1, k2) } if testing.Verbose() { log.Printf("batch write (set) %p", batch) } return db.Write(batch, nil) }