// PersistReceivedRecords save the info about which records we've already seen func (p *LevelDBPersister) PersistReceivedRecords(comp StreamID, records []*Record) error { batch := new(leveldb.Batch) for _, r := range records { batch.Put([]byte(fmt.Sprintf(receivedKeyFormat, comp, r.ID)), nil) } return p.db.Write(batch, nil) }
func msetnx(v resp.CommandArgs, ex *CommandExtras) error { if len(v) == 0 || len(v)%2 != 0 { return resp.NewError(ErrFmtWrongNumberArgument, "msetnx").WriteTo(ex.Buffer) } batch := new(leveldb.Batch) for i := 0; i < len(v); { batch.Put(v[i], v[i+1]) i += 2 } ex.DB.Lock() defer ex.DB.Unlock() for i := 0; i < len(v); { _, err := ex.DB.Get(v[i]) if err != nil && err != leveldb.ErrNotFound { return err } if err != leveldb.ErrNotFound { return resp.ZeroInteger.WriteTo(ex.Buffer) } i += 2 } if err := ex.DB.WriteBatch(batch); err != nil { return err } return resp.OneInteger.WriteTo(ex.Buffer) }
func (db *DB) SsPut(key, member []byte, score uint64) *skv.Reply { batch := new(leveldb.Batch) // if prev := db.SsGet(key, member); prev.Status == skv.ReplyOK && prev.Uint64() != score { batch.Delete(skv.SortSetsNsScoreKey(key, member, prev.Uint64())) } else if prev.Status == skv.ReplyNotFound { db.RawIncrby(skv.SortSetsNsLengthKey(key), 1) } // batch.Put(skv.SortSetsNsScoreKey(key, member, score), []byte{}) // batch.Put(skv.SortSetsNsEntryKey(key, member), []byte(strconv.FormatUint(score, 10))) rpl := skv.NewReply("") if err := db.ldb.Write(batch, nil); err != nil { rpl.Status = err.Error() } return rpl }
// PutReceipts stores the receipts in the current database func PutReceipts(db common.Database, receipts types.Receipts) error { batch := new(leveldb.Batch) _, batchWrite := db.(*ethdb.LDBDatabase) for _, receipt := range receipts { storageReceipt := (*types.ReceiptForStorage)(receipt) bytes, err := rlp.EncodeToBytes(storageReceipt) if err != nil { return err } if batchWrite { batch.Put(append(receiptsPre, receipt.TxHash[:]...), bytes) } else { err = db.Put(append(receiptsPre, receipt.TxHash[:]...), bytes) if err != nil { return err } } } if db, ok := db.(*ethdb.LDBDatabase); ok { if err := db.LDB().Write(batch, nil); err != nil { return err } } return nil }
func (l *LevelDb) Put(id DbKey, aliases []DbKey, value []byte) error { // Lazy Open if err := l.open(); err != nil { return err } batch := new(leveldb.Batch) idb := id.ToBytes("kv") batch.Put(idb, value) if aliases != nil { for _, alias := range aliases { batch.Put(alias.ToBytes("lo"), idb) } } err := l.db.Write(batch, nil) // If the file is corrupt, just nuke and act like we didn't find anything if l.nukeIfCorrupt(err) { err = nil } return err }
func (self dbSync) saveInDb(mult map[string]string) { batch := new(leveldb.Batch) for k, v := range mult { batch.Put([]byte(k), []byte(v)) } self.Db.Write(batch, nil) }
// Place notes the presence of a blob at a particular location. func (d *DB) Place(ref, location, ct string, dependencies []string) (err error) { b := new(leveldb.Batch) // TODO(dichro): duplicates are interesting, but pretty rare, // so probably not worth tracking? b.Put(pack(found, ref), pack(location)) b.Put(pack(last), pack(location)) if ct != "" { b.Put(pack(camliType, ct, ref), nil) } for _, dep := range dependencies { b.Put(pack(parent, dep, ref), nil) // TODO(dichro): should these always be looked up // inline? Maybe a post-scan would be faster for bulk // insert? if ok, _ := d.db.Has(pack(found, dep), nil); !ok { b.Put(pack(missing, dep, ref), nil) } } it := d.db.NewIterator(&util.Range{ Start: pack(missing, ref, start), Limit: pack(missing, ref, limit), }, nil) defer it.Release() for it.Next() { b.Delete(it.Key()) } if err := it.Error(); err != nil { fmt.Println(err) } err = d.db.Write(b, nil) return }
func (db *DB) _raw_ssttlat_put(ns byte, key []byte, ttlat uint64) bool { if ttlat == 0 { return true } key = skv.RawNsKeyConcat(ns, key) batch := new(leveldb.Batch) // if prev := db.RawGet(skv.RawTtlEntry(key)); prev.Status == skv.ReplyOK { if prev_ttlat := dbutil.BytesToUint64(prev.Bytes()); prev_ttlat != ttlat { batch.Delete(skv.RawTtlQueue(key, prev_ttlat)) } } // batch.Put(skv.RawTtlQueue(key, ttlat), []byte{}) // batch.Put(skv.RawTtlEntry(key), dbutil.Uint64ToBytes(ttlat)) if err := db.ldb.Write(batch, nil); err != nil { return false } return true }
// Update block map state, removing any deleted or invalid files. func (m *BlockMap) Update(files []protocol.FileInfo) error { batch := new(leveldb.Batch) buf := make([]byte, 4) var key []byte for _, file := range files { if batch.Len() > maxBatchSize { if err := m.db.Write(batch, nil); err != nil { return err } batch.Reset() } if file.IsDirectory() { continue } if file.IsDeleted() || file.IsInvalid() { for _, block := range file.Blocks { key = m.blockKeyInto(key, block.Hash, file.Name) batch.Delete(key) } continue } for i, block := range file.Blocks { binary.BigEndian.PutUint32(buf, uint32(i)) key = m.blockKeyInto(key, block.Hash, file.Name) batch.Put(key, buf) } } return m.db.Write(batch, nil) }
func ldbCheckGlobals(db *leveldb.DB, folder []byte) { defer runtime.GC() snap, err := db.GetSnapshot() if err != nil { panic(err) } l.Debugf("created snapshot %p", snap) defer func() { l.Debugf("close snapshot %p", snap) snap.Release() }() start := globalKey(folder, nil) limit := globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff}) dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil) defer dbi.Release() batch := new(leveldb.Batch) l.Debugf("new batch %p", batch) var fk []byte for dbi.Next() { gk := dbi.Key() var vl versionList err := vl.UnmarshalXDR(dbi.Value()) if err != nil { panic(err) } // Check the global version list for consistency. An issue in previous // versions of goleveldb could result in reordered writes so that // there are global entries pointing to no longer existing files. Here // we find those and clear them out. name := globalKeyName(gk) var newVL versionList for _, version := range vl.versions { fk = deviceKeyInto(fk[:cap(fk)], folder, version.device, name) l.Debugf("snap.Get %p %x", snap, fk) _, err := snap.Get(fk, nil) if err == leveldb.ErrNotFound { continue } if err != nil { panic(err) } newVL.versions = append(newVL.versions, version) } if len(newVL.versions) != len(vl.versions) { l.Infof("db repair: rewriting global version list for %x %x", gk[1:1+64], gk[1+64:]) batch.Put(dbi.Key(), newVL.MustMarshalXDR()) } } l.Debugf("db check completed for %q", folder) db.Write(batch, nil) }
func (server *Server) newKeys(uid *[32]byte, keyList [][]byte) error { batch := new(leveldb.Batch) for _, key := range keyList { keyHash := sha256.Sum256(key) dbKey := append(append([]byte{'k'}, uid[:]...), keyHash[:]...) batch.Put(dbKey, key) } return server.database.Write(batch, wO_sync) }
// Fix repairs incorrect blockmap entries, removing the old entry and // replacing it with a new entry for the given block func (f *BlockFinder) Fix(folder, file string, index int32, oldHash, newHash []byte) error { buf := make([]byte, 4) binary.BigEndian.PutUint32(buf, uint32(index)) batch := new(leveldb.Batch) batch.Delete(blockKeyInto(nil, oldHash, folder, file)) batch.Put(blockKeyInto(nil, newHash, folder, file), buf) return f.db.Write(batch, nil) }
func main() { olddb, err := Open(os.Args[1]) if err != nil { panic(err) } newdb, err := Open(os.Args[2]) if err != nil { panic(err) } iter := olddb.NewIterator(&util.Range{Start: nil, Limit: nil}, nil) totalBitset := 0 totalMsgpack := 0 rows := 0 var batch *leveldb.Batch batch = new(leveldb.Batch) for iter.Next() { key := iter.Key() value := iter.Value() if bytes.HasPrefix(key, []byte("doc:")) { batch.Put(key, value) continue } bs := bitset.New(8) bs.ReadFrom(bytes.NewBuffer(value)) var docIDs []uint for i, e := bs.NextSet(0); e; i, e = bs.NextSet(i + 1) { docIDs = append(docIDs, i) } b, err := msgpack.Marshal(delta_encode(docIDs)) if err != nil { panic(err) } //fmt.Printf("bitset size is %d\n", len(value)) //fmt.Printf("msgpack size is %d\n", len(b)) totalBitset += len(value) totalMsgpack += len(b) batch.Put(key, b) if rows%10000 == 0 { log.Print("rows ", rows) newdb.Write(batch, nil) batch = new(leveldb.Batch) } rows++ } fmt.Printf("bitset size is %d\n", totalBitset) fmt.Printf("msgpack size is %d\n", totalMsgpack) newdb.Write(batch, nil) newdb.CompactRange(util.Range{Start: nil, Limit: nil}) }
func makeBatchWithOps(ops []AbstractBatchOperation) *leveldb.Batch { batch := new(leveldb.Batch) for _, op := range ops { if op.kind == "PUT" { batch.Put(op.key, op.value) } else if op.kind == "DELETE" { batch.Delete(op.key) } } return batch }
func (l *LevelDBStore) PutMany(chunks []Chunk) (e BackpressureError) { numBytes := 0 b := new(leveldb.Batch) for _, c := range chunks { data := snappy.Encode(nil, c.Data()) numBytes += len(data) b.Put(l.toChunkKey(c.Hash()), data) } l.putBatch(b, numBytes) return }
func (s *Store) StoreLogs(logs []*raft.Log) error { batch := new(leveldb.Batch) for _, log := range logs { key := uint64ToBytes(log.Index) val, err := encodeMsgPack(log) if err != nil { return err } batch.Put(key, val.Bytes()) } return s.db.Write(batch, nil) }
func (l *LevelDBStore) PutMany(chunks []Chunk) (e BackpressureError) { d.Chk.True(l.internalLevelDBStore != nil, "Cannot use LevelDBStore after Close().") l.versionSetOnce.Do(l.setVersIfUnset) numBytes := 0 b := new(leveldb.Batch) for _, c := range chunks { data := snappy.Encode(nil, c.Data()) numBytes += len(data) b.Put(l.toChunkKey(c.Hash()), data) } l.putBatch(b, numBytes) return }
func (ldb *LevelDB) PutHash(key []byte, hash map[string][]byte, expireAt *time.Time) { metaKey := encodeMetaKey(key) batch := new(leveldb.Batch) batch.Put(metaKey, encodeMetadata(Hash, expireAt)) for k, v := range hash { fieldKey := encodeHashFieldKey(key, []byte(k)) batch.Put(fieldKey, v) } if err := ldb.db.Write(batch, nil); err != nil { panic(err) } }
func setItems(db *leveldb.DB) error { batch := new(leveldb.Batch) for _, k1 := range keys { k2 := randomData(2) // k2 -> data batch.Put(k2, randomData(42)) // k1 -> k2 batch.Put(k1, k2) } if testing.Verbose() { log.Printf("batch write (set) %p", batch) } return db.Write(batch, nil) }
// writeCheckpoint writes the checkpoint to the db, optionally using a batch. func (d *DriveDB) writeCheckpoint(batch *leveldb.Batch) error { d.Lock() cpt := d.cpt d.Unlock() bytes, err := encode(cpt) if err != nil { log.Printf("error encoding checkpoint: %v", err) return err } if batch != nil { batch.Put(internalKey("checkpoint"), bytes) return nil } return d.db.Put(internalKey("checkpoint"), bytes, nil) }
func (self *LDBDatabase) Flush() error { self.mu.Lock() defer self.mu.Unlock() batch := new(leveldb.Batch) for key, value := range self.queue { batch.Put([]byte(key), rle.Compress(value)) } self.makeQueue() // reset the queue glog.V(logger.Detail).Infoln("Flush database: ", self.fn) return self.db.Write(batch, nil) }
//fixDocId fixes an issue where the max docid was stored under max_id //instead of doc:max_id so a search for 109.97.120.95 would find it func (ls *LevelDBStore) fixDocId() { v, err := ls.db.Get([]byte("max_id"), nil) if err == leveldb.ErrNotFound { return } if err != nil { return } //key max_id exists, rewrite it to doc:max_id log.Println("FIX: Renaming max_id to doc:max_id") batch := new(leveldb.Batch) batch.Put([]byte("doc:max_id"), v) batch.Delete([]byte("max_id")) ls.db.Write(batch, nil) }
// Add files to the block map, ignoring any deleted or invalid files. func (m *BlockMap) Add(files []protocol.FileInfo) error { batch := new(leveldb.Batch) buf := make([]byte, 4) for _, file := range files { if file.IsDirectory() || file.IsDeleted() || file.IsInvalid() { continue } for i, block := range file.Blocks { binary.BigEndian.PutUint32(buf, uint32(i)) batch.Put(m.blockKey(block.Hash, file.Name), buf) } } return m.db.Write(batch, nil) }
func (d *DriveDB) inodeForFileIdImpl(fileId string) (uint64, error) { var inode uint64 batch := new(leveldb.Batch) // Check if an inode has been allocated for this fileId if fileId == d.rootId { inode = 1 } else { err := d.get(fileIdToInodeKey(fileId), &inode) if err != nil { // if not, allocate an inode number inode, err = d.nextInode(batch) if err != nil { return 0, err } } } // Check the opposite mapping is present and correct var currentId string err := d.get(inodeToFileIdKey(inode), ¤tId) if err == nil { if currentId == fileId { return inode, nil } else { debug.Printf("inodeToFileId mapping wrong for %v, expected %v got %v", inode, fileId, currentId) } } encodedInode, err := encode(inode) if err != nil { return 0, err } encodedFileId, err := encode(fileId) if err != nil { return 0, err } // Create forward and reverse mappings. batch.Put(fileIdToInodeKey(fileId), encodedInode) batch.Put(inodeToFileIdKey(inode), encodedFileId) err = d.db.Write(batch, nil) if err != nil { return 0, err } return inode, nil }
func (ldb *LevelDB) PutString(key []byte, value []byte, expireAt *time.Time) { metaKey := encodeMetaKey(key) valueKey := encodeStringKey(key) exists, tipe, _ := ldb.has(metaKey) if exists && tipe != String { // If exists data is not string, should delete it. ldb.delete([][]byte{metaKey, valueKey}) } batch := new(leveldb.Batch) batch.Put(metaKey, encodeMetadata(String, expireAt)) batch.Put(valueKey, value) if err := ldb.db.Write(batch, nil); err != nil { panic(err) } }
func (ts *TripleStore) UpdateValueKeyBy(name string, amount int, batch *leveldb.Batch) { value := &ValueData{name, int64(amount)} key := ts.createValueKeyFor(name) b, err := ts.db.Get(key, ts.readopts) // Error getting the node from the database. if err != nil && err != leveldb.ErrNotFound { glog.Errorf("Error reading Value %s from the DB\n", name) return } // Node exists in the database -- unmarshal and update. if b != nil && err != leveldb.ErrNotFound { err = json.Unmarshal(b, value) if err != nil { glog.Errorln("Error: couldn't reconstruct value ", err) return } value.Size += int64(amount) } // Are we deleting something? if amount < 0 { if value.Size <= 0 { if batch == nil { ts.db.Delete(key, ts.writeopts) } else { batch.Delete(key) } return } } // Repackage and rewrite. bytes, err := json.Marshal(&value) if err != nil { glog.Errorf("Couldn't write to buffer for value %s\n %s", name, err) return } if batch == nil { ts.db.Put(key, bytes, ts.writeopts) } else { batch.Put(key, bytes) } }
func (db *GoLevelDB) Commit(bt *engine.Batch) error { if bt.OpList.Len() == 0 { return nil } wb := new(leveldb.Batch) for e := bt.OpList.Front(); e != nil; e = e.Next() { switch op := e.Value.(type) { case *engine.BatchOpSet: wb.Put(op.Key, op.Value) case *engine.BatchOpDel: wb.Delete(op.Key) default: panic(fmt.Sprintf("unsupported batch operation: %+v", op)) } } return errors.Trace(db.lvdb.Write(wb, db.wopt)) }
func (ls *LeveldbStore) SetBatch(keylist []interface{}, valuelist []interface{}) (bool, error) { if len(keylist) != len(valuelist) { return false, errors.New("key and value not match") } if len(keylist) == 0 && len(valuelist) == 0 { return true, nil } batchwrite := new(leveldb.Batch) for index, keyvalue := range keylist { batchwrite.Put([]byte(keyvalue.([]byte)), []byte(valuelist[index].([]byte))) } err := ls.DB.Write(batchwrite, nil) if err != nil { return false, err } return true, nil }
// PutTransactions stores the transactions in the given database func PutTransactions(db common.Database, block *types.Block, txs types.Transactions) { batch := new(leveldb.Batch) _, batchWrite := db.(*ethdb.LDBDatabase) for i, tx := range block.Transactions() { rlpEnc, err := rlp.EncodeToBytes(tx) if err != nil { glog.V(logger.Debug).Infoln("Failed encoding tx", err) return } if batchWrite { batch.Put(tx.Hash().Bytes(), rlpEnc) } else { db.Put(tx.Hash().Bytes(), rlpEnc) } var txExtra struct { BlockHash common.Hash BlockIndex uint64 Index uint64 } txExtra.BlockHash = block.Hash() txExtra.BlockIndex = block.NumberU64() txExtra.Index = uint64(i) rlpMeta, err := rlp.EncodeToBytes(txExtra) if err != nil { glog.V(logger.Debug).Infoln("Failed encoding tx meta data", err) return } if batchWrite { batch.Put(append(tx.Hash().Bytes(), 0x0001), rlpMeta) } else { db.Put(append(tx.Hash().Bytes(), 0x0001), rlpMeta) } } if db, ok := db.(*ethdb.LDBDatabase); ok { if err := db.LDB().Write(batch, nil); err != nil { glog.V(logger.Error).Infoln("db write err:", err) } } }
func (qs *QuadStore) UpdateValueKeyBy(name quad.Value, amount int64, batch *leveldb.Batch) error { value := proto.NodeData{ Value: proto.MakeValue(name), Size: amount, } key := createValueKeyFor(name) b, err := qs.db.Get(key, qs.readopts) // Error getting the node from the database. if err != nil && err != leveldb.ErrNotFound { clog.Errorf("Error reading Value %s from the DB.", name) return err } // Node exists in the database -- unmarshal and update. if b != nil && err != leveldb.ErrNotFound { var oldvalue proto.NodeData err = oldvalue.Unmarshal(b) if err != nil { clog.Errorf("Error: could not reconstruct value: %v", err) return err } oldvalue.Size += amount value = oldvalue } // Are we deleting something? if value.Size <= 0 { value.Size = 0 } // Repackage and rewrite. bytes, err := value.Marshal() if err != nil { clog.Errorf("could not write to buffer for value %s: %s", name, err) return err } if batch == nil { qs.db.Put(key, bytes, qs.writeopts) } else { batch.Put(key, bytes) } return nil }