示例#1
0
文件: t_ss.go 项目: lessos/lessdb
func (db *DB) SsPut(key, member []byte, score uint64) *skv.Reply {

	batch := new(leveldb.Batch)

	//
	if prev := db.SsGet(key, member); prev.Status == skv.ReplyOK && prev.Uint64() != score {

		batch.Delete(skv.SortSetsNsScoreKey(key, member, prev.Uint64()))

	} else if prev.Status == skv.ReplyNotFound {
		db.RawIncrby(skv.SortSetsNsLengthKey(key), 1)
	}

	//
	batch.Put(skv.SortSetsNsScoreKey(key, member, score), []byte{})

	//
	batch.Put(skv.SortSetsNsEntryKey(key, member), []byte(strconv.FormatUint(score, 10)))

	rpl := skv.NewReply("")

	if err := db.ldb.Write(batch, nil); err != nil {
		rpl.Status = err.Error()
	}

	return rpl
}
示例#2
0
文件: ldb.go 项目: vburenin/firempq
// DeleteDataWithPrefix deletes all service data such as service metadata, items and payloads.
func (ds *LevelDBStorage) DeleteDataWithPrefix(prefix string) int {
	ds.FlushCache()
	ds.saveLock.Lock()
	defer ds.saveLock.Unlock()

	limitCounter := 0
	total := 0
	iter := ds.IterData(prefix)
	wb := new(leveldb.Batch)

	for iter.Valid() {
		total++
		if limitCounter < 1000 {
			wb.Delete(iter.GetKey())
			limitCounter++
		} else {
			limitCounter = 0
			ds.db.Write(wb, nil)
			wb.Reset()
		}
		iter.Next()
	}

	ds.db.Write(wb, nil)
	return total
}
示例#3
0
func (self dbSync) multiDeleteDb(ks []string) {
	batch := new(leveldb.Batch)
	for _, k := range ks {
		batch.Delete([]byte(k))
	}
	self.Db.Write(batch, nil)
}
示例#4
0
文件: db.go 项目: dichro/cameloff
// Place notes the presence of a blob at a particular location.
func (d *DB) Place(ref, location, ct string, dependencies []string) (err error) {
	b := new(leveldb.Batch)
	// TODO(dichro): duplicates are interesting, but pretty rare,
	// so probably not worth tracking?
	b.Put(pack(found, ref), pack(location))
	b.Put(pack(last), pack(location))
	if ct != "" {
		b.Put(pack(camliType, ct, ref), nil)
	}
	for _, dep := range dependencies {
		b.Put(pack(parent, dep, ref), nil)
		// TODO(dichro): should these always be looked up
		// inline? Maybe a post-scan would be faster for bulk
		// insert?
		if ok, _ := d.db.Has(pack(found, dep), nil); !ok {
			b.Put(pack(missing, dep, ref), nil)
		}
	}
	it := d.db.NewIterator(&util.Range{
		Start: pack(missing, ref, start),
		Limit: pack(missing, ref, limit),
	}, nil)
	defer it.Release()
	for it.Next() {
		b.Delete(it.Key())
	}
	if err := it.Error(); err != nil {
		fmt.Println(err)
	}
	err = d.db.Write(b, nil)
	return
}
示例#5
0
// childFileIds returns a map containing IDs of all Files that have parent
// refs to the given file.  The returned map keys are IDs, and the map values
// indicate if the child is a directory.
func (d *DriveDB) childFileIds(fileId string) (map[string]bool, error) {
	ids := make(map[string]bool)
	d.iters.Add(1)
	batch := new(leveldb.Batch)
	iter := d.db.NewIterator(util.BytesPrefix(childKeyPrefix(fileId)), nil)
	for iter.Next() {
		pidcid := deKey(string(iter.Key()))
		cid := pidcid[len(fileId)+1:]
		if gdriveFile, err := d.FileById(cid); err != nil {
			log.Printf("unknown fileId %v: %v", fileId, err)
			batch.Delete(iter.Key())
		} else {
			ids[cid] = gdriveFile.MimeType == driveFolderMimeType
		}
	}
	iter.Release()
	d.iters.Done()
	if batch.Len() > 0 {
		err := d.db.Write(batch, nil)
		if err != nil {
			log.Printf("error writing to db: %v", err)
		}
	}
	return ids, iter.Error()
}
示例#6
0
// Update block map state, removing any deleted or invalid files.
func (m *BlockMap) Update(files []protocol.FileInfo) error {
	batch := new(leveldb.Batch)
	buf := make([]byte, 4)
	var key []byte
	for _, file := range files {
		if batch.Len() > maxBatchSize {
			if err := m.db.Write(batch, nil); err != nil {
				return err
			}
			batch.Reset()
		}

		if file.IsDirectory() {
			continue
		}

		if file.IsDeleted() || file.IsInvalid() {
			for _, block := range file.Blocks {
				key = m.blockKeyInto(key, block.Hash, file.Name)
				batch.Delete(key)
			}
			continue
		}

		for i, block := range file.Blocks {
			binary.BigEndian.PutUint32(buf, uint32(i))
			key = m.blockKeyInto(key, block.Hash, file.Name)
			batch.Put(key, buf)
		}
	}
	return m.db.Write(batch, nil)
}
示例#7
0
文件: t_raw.go 项目: lessos/lessdb
func (db *DB) _raw_ssttlat_put(ns byte, key []byte, ttlat uint64) bool {

	if ttlat == 0 {
		return true
	}

	key = skv.RawNsKeyConcat(ns, key)

	batch := new(leveldb.Batch)

	//
	if prev := db.RawGet(skv.RawTtlEntry(key)); prev.Status == skv.ReplyOK {
		if prev_ttlat := dbutil.BytesToUint64(prev.Bytes()); prev_ttlat != ttlat {
			batch.Delete(skv.RawTtlQueue(key, prev_ttlat))
		}
	}

	//
	batch.Put(skv.RawTtlQueue(key, ttlat), []byte{})

	//
	batch.Put(skv.RawTtlEntry(key), dbutil.Uint64ToBytes(ttlat))

	if err := db.ldb.Write(batch, nil); err != nil {
		return false
	}

	return true
}
示例#8
0
// ChildFileIds returns the IDs of all Files that have parent refs to the given file.
func (d *DriveDB) ChildFileIds(fileId string) ([]string, error) {
	var ids []string
	d.iters.Add(1)
	batch := new(leveldb.Batch)
	iter := d.db.NewIterator(util.BytesPrefix(childKey(fileId)), nil)
	for iter.Next() {
		pidcid := deKey(string(iter.Key()))
		cid := pidcid[len(fileId)+1:]
		found, err := d.db.Has(fileKey(cid), nil)
		if err == nil && found {
			ids = append(ids, cid)
		} else {
			batch.Delete(iter.Key())
		}
	}
	iter.Release()
	d.iters.Done()
	if batch.Len() > 0 {
		err := d.db.Write(batch, nil)
		if err != nil {
			log.Printf("error writing to db: %v", err)
		}
	}
	return ids, iter.Error()
}
示例#9
0
文件: ldb.go 项目: vburenin/firempq
func (ds *LevelDBStorage) DeleteData(id ...string) error {
	wb := new(leveldb.Batch)
	for _, i := range id {
		wb.Delete(enc.UnsafeStringToBytes(i))
	}
	return ds.db.Write(wb, nil)
}
示例#10
0
func clearItems(db *leveldb.DB) error {
	snap, err := db.GetSnapshot()
	if err != nil {
		return err
	}
	defer snap.Release()

	// Iterate over k2

	it := snap.NewIterator(util.BytesPrefix([]byte{1}), nil)
	defer it.Release()

	batch := new(leveldb.Batch)
	for it.Next() {
		k1 := it.Key()
		k2 := it.Value()

		// k2 should exist
		_, err := snap.Get(k2, nil)
		if err != nil {
			return err
		}

		// Delete the k1 => k2 mapping first
		batch.Delete(k1)
		// Then the k2 => data mapping
		batch.Delete(k2)
	}
	if testing.Verbose() {
		log.Printf("batch write (clear) %p", batch)
	}
	return db.Write(batch, nil)
}
示例#11
0
func (server *Server) deleteMessages(uid *[32]byte, messageList []*[32]byte) error {
	batch := new(leveldb.Batch)
	for _, messageID := range messageList {
		key := append(append([]byte{'m'}, uid[:]...), messageID[:]...)
		batch.Delete(key)
	}
	return server.database.Write(batch, wO_sync)
}
示例#12
0
// Discard block map state, removing the given files
func (m *BlockMap) Discard(files []protocol.FileInfo) error {
	batch := new(leveldb.Batch)
	for _, file := range files {
		for _, block := range file.Blocks {
			batch.Delete(m.blockKey(block.Hash, file.Name))
		}
	}
	return m.db.Write(batch, nil)
}
示例#13
0
// Fix repairs incorrect blockmap entries, removing the old entry and
// replacing it with a new entry for the given block
func (f *BlockFinder) Fix(folder, file string, index int32, oldHash, newHash []byte) error {
	buf := make([]byte, 4)
	binary.BigEndian.PutUint32(buf, uint32(index))

	batch := new(leveldb.Batch)
	batch.Delete(blockKeyInto(nil, oldHash, folder, file))
	batch.Put(blockKeyInto(nil, newHash, folder, file), buf)
	return f.db.Write(batch, nil)
}
示例#14
0
文件: leveldb.go 项目: wshn13/rodis
func (ldb *LevelDB) delete(keys [][]byte) {
	batch := new(leveldb.Batch)
	for _, key := range keys {
		batch.Delete(key)
	}
	if err := ldb.db.Write(batch, nil); err != nil && err != leveldb.ErrNotFound {
		panic(err)
	}
}
示例#15
0
func makeBatchWithOps(ops []AbstractBatchOperation) *leveldb.Batch {
	batch := new(leveldb.Batch)
	for _, op := range ops {
		if op.kind == "PUT" {
			batch.Put(op.key, op.value)
		} else if op.kind == "DELETE" {
			batch.Delete(op.key)
		}
	}
	return batch
}
示例#16
0
// Drop block map, removing all entries related to this block map from the db.
func (m *BlockMap) Drop() error {
	batch := new(leveldb.Batch)
	iter := m.db.NewIterator(util.BytesPrefix(m.blockKey(nil, "")[:1+64]), nil)
	defer iter.Release()
	for iter.Next() {
		batch.Delete(iter.Key())
	}
	if iter.Error() != nil {
		return iter.Error()
	}
	return m.db.Write(batch, nil)
}
示例#17
0
文件: db.go 项目: nrzipher/banshee
// Delete all states for a metric name.
// This operation is currently only used for cleaning.
func (db *DB) Delete(name string) error {
	// Name must be the key prefix
	iter := db.db.NewIterator(util.BytesPrefix([]byte(name)), nil)
	batch := new(leveldb.Batch)
	for iter.Next() {
		key := iter.Key()
		batch.Delete(key)
	}
	if batch.Len() > 0 {
		return db.db.Write(batch, nil)
	}
	return nil
}
示例#18
0
func (db *DB) ObjectDocDel(fold, key string) *skv.Reply {

	var (
		rpl     = skv.NewReply("")
		opath   = skv.NewObjectPathKey(fold, key)
		bkey    = opath.EntryIndex()
		prevobj *skv.Object
		previdx = map[uint8]skv.ObjectDocSchemaIndexEntryBytes{}
	)

	if rs := db.RawGet(bkey); rs.Status == skv.ReplyNotFound {

		return rpl

	} else if rs.Status != skv.ReplyOK {

		return rs

	} else {

		prevobj = rs.Object()

		var prev map[string]interface{}

		if err := prevobj.Data.JsonDecode(&prev); err == nil {
			previdx = skv.ObjectDocIndexDataExport(_obj_doc_indexes, opath.Fold, prev)
		}
	}

	batch := new(leveldb.Batch)

	for piKey, piEntry := range previdx {
		batch.Delete(append(append(skv.ObjectDocIndexFieldPrefix(opath.Fold, piKey), piEntry.Data...), opath.Field...))
	}

	batch.Delete(bkey)

	if err := db.ldb.Write(batch, nil); err != nil {
		rpl.Status = err.Error()
	} else {
		db._obj_meta_sync(prevobj.Meta.Type, &prevobj.Meta, opath, -1, 0, _obj_options_def)

		// if _obj_event_handler != nil {
		//     _obj_event_handler(opath, skv.ObjectEventDeleted, 0)
		// }
	}

	return rpl
}
示例#19
0
// clearDataCache removes the leveldb block cache records, but leaves the actual
// blocks on disk. The blocks will be recycled, so this ok.
func (d *DriveDB) clearDataCache(fileId string) {
	var ids []string
	d.iters.Add(1)
	iter := d.db.NewIterator(util.BytesPrefix(cacheMapKeyPrefix(fileId)), nil)
	for iter.Next() {
		ids = append(ids, string(iter.Key()))
	}
	iter.Release()
	d.iters.Done()
	batch := new(leveldb.Batch)
	for _, id := range ids {
		batch.Delete([]byte(id))
	}
	d.db.Write(batch, nil)
}
示例#20
0
//fixDocId fixes an issue where the max docid was stored under max_id
//instead of doc:max_id so a search for 109.97.120.95 would find it
func (ls *LevelDBStore) fixDocId() {
	v, err := ls.db.Get([]byte("max_id"), nil)
	if err == leveldb.ErrNotFound {
		return
	}
	if err != nil {
		return
	}
	//key max_id exists, rewrite it to doc:max_id
	log.Println("FIX: Renaming max_id to doc:max_id")
	batch := new(leveldb.Batch)
	batch.Put([]byte("doc:max_id"), v)
	batch.Delete([]byte("max_id"))
	ls.db.Write(batch, nil)
}
示例#21
0
// DeleteRange implements raft.LogStore.
func (s *LevelDBStore) DeleteRange(min, max uint64) error {
	s.mu.Lock()
	defer s.mu.Unlock()

	var batch leveldb.Batch
	key := make([]byte, binary.Size(uint64(0)))
	for n := min; n <= max; n++ {
		binary.BigEndian.PutUint64(key, n)
		batch.Delete(key)
	}
	if err := s.db.Write(&batch, nil); err != nil {
		return err
	}
	return nil
}
示例#22
0
// Delete a job with job id.
func (l Driver) Delete(jobID int64) (err error) {
	defer l.RWLocker.Unlock()
	l.RWLocker.Lock()
	var job driver.Job
	batch := new(leveldb.Batch)
	job, err = l.get(jobID)
	if err != nil {
		return
	}
	var strID = strconv.FormatInt(job.ID, 10)
	batch.Delete([]byte(PREFUNC + job.Func + ":" + job.Name))
	batch.Delete([]byte(PREJOB + strID))
	err = l.db.Write(batch, nil)
	l.cache.Remove(PREJOB + strID)
	return
}
示例#23
0
文件: t_raw.go 项目: lessos/lessdb
func (db *DB) RawDel(keys ...[]byte) *skv.Reply {

	rpl := skv.NewReply("")

	batch := new(leveldb.Batch)

	for _, key := range keys {
		batch.Delete(key)
	}

	if err := db.ldb.Write(batch, nil); err != nil {
		rpl.Status = err.Error()
	}

	return rpl
}
示例#24
0
func (s *Store) DeleteRange(min, max uint64) error {
	Range := util.Range{
		Start: uint64ToBytes(min),
		Limit: uint64ToBytes(max),
	}
	batch := new(leveldb.Batch)

	iter := s.db.NewIterator(&Range, nil)
	defer iter.Release()

	for iter.Next() {
		batch.Delete(iter.Key())
	}
	// leveldb的range不包含Limit
	batch.Delete(uint64ToBytes(max))
	return s.db.Write(batch, nil)
}
示例#25
0
func ldbWithAllFolderTruncated(db *leveldb.DB, folder []byte, fn func(device []byte, f FileInfoTruncated) bool) {
	runtime.GC()

	start := deviceKey(folder, nil, nil)                                                  // before all folder/device files
	limit := deviceKey(folder, protocol.LocalDeviceID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
	snap, err := db.GetSnapshot()
	if err != nil {
		panic(err)
	}
	if debugDB {
		l.Debugf("created snapshot %p", snap)
	}
	defer func() {
		if debugDB {
			l.Debugf("close snapshot %p", snap)
		}
		snap.Release()
	}()

	dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
	defer dbi.Release()

	for dbi.Next() {
		device := deviceKeyDevice(dbi.Key())
		var f FileInfoTruncated
		err := f.UnmarshalXDR(dbi.Value())
		if err != nil {
			panic(err)
		}

		switch f.Name {
		case "", ".", "..", "/": // A few obviously invalid filenames
			l.Infof("Dropping invalid filename %q from database", f.Name)
			batch := new(leveldb.Batch)
			ldbRemoveFromGlobal(db, batch, folder, device, nil)
			batch.Delete(dbi.Key())
			db.Write(batch, nil)
			continue
		}

		if cont := fn(device, f); !cont {
			return
		}
	}
}
示例#26
0
func (ts *TripleStore) UpdateValueKeyBy(name string, amount int, batch *leveldb.Batch) {
	value := &ValueData{name, int64(amount)}
	key := ts.createValueKeyFor(name)
	b, err := ts.db.Get(key, ts.readopts)

	// Error getting the node from the database.
	if err != nil && err != leveldb.ErrNotFound {
		glog.Errorf("Error reading Value %s from the DB\n", name)
		return
	}

	// Node exists in the database -- unmarshal and update.
	if b != nil && err != leveldb.ErrNotFound {
		err = json.Unmarshal(b, value)
		if err != nil {
			glog.Errorln("Error: couldn't reconstruct value ", err)
			return
		}
		value.Size += int64(amount)
	}

	// Are we deleting something?
	if amount < 0 {
		if value.Size <= 0 {
			if batch == nil {
				ts.db.Delete(key, ts.writeopts)
			} else {
				batch.Delete(key)
			}
			return
		}
	}

	// Repackage and rewrite.
	bytes, err := json.Marshal(&value)
	if err != nil {
		glog.Errorf("Couldn't write to buffer for value %s\n %s", name, err)
		return
	}
	if batch == nil {
		ts.db.Put(key, bytes, ts.writeopts)
	} else {
		batch.Put(key, bytes)
	}
}
示例#27
0
// Discard block map state, removing the given files
func (m *BlockMap) Discard(files []protocol.FileInfo) error {
	batch := new(leveldb.Batch)
	var key []byte
	for _, file := range files {
		if batch.Len() > maxBatchSize {
			if err := m.db.Write(batch, nil); err != nil {
				return err
			}
			batch.Reset()
		}

		for _, block := range file.Blocks {
			key = m.blockKeyInto(key, block.Hash, file.Name)
			batch.Delete(key)
		}
	}
	return m.db.Write(batch, nil)
}
示例#28
0
文件: database.go 项目: vebin/reborn
func (db *GoLevelDB) Commit(bt *engine.Batch) error {
	if bt.OpList.Len() == 0 {
		return nil
	}
	wb := new(leveldb.Batch)

	for e := bt.OpList.Front(); e != nil; e = e.Next() {
		switch op := e.Value.(type) {
		case *engine.BatchOpSet:
			wb.Put(op.Key, op.Value)
		case *engine.BatchOpDel:
			wb.Delete(op.Key)
		default:
			panic(fmt.Sprintf("unsupported batch operation: %+v", op))
		}
	}
	return errors.Trace(db.lvdb.Write(wb, db.wopt))
}
示例#29
0
// Drop block map, removing all entries related to this block map from the db.
func (m *BlockMap) Drop() error {
	batch := new(leveldb.Batch)
	iter := m.db.NewIterator(util.BytesPrefix(m.blockKeyInto(nil, nil, "")[:keyPrefixLen+keyFolderLen]), nil)
	defer iter.Release()
	for iter.Next() {
		if batch.Len() > maxBatchSize {
			if err := m.db.Write(batch, nil); err != nil {
				return err
			}
			batch.Reset()
		}

		batch.Delete(iter.Key())
	}
	if iter.Error() != nil {
		return iter.Error()
	}
	return m.db.Write(batch, nil)
}
示例#30
0
// Reset removes all entries in this namespace.
func (n *NamespacedKV) Reset() {
	it := n.db.NewIterator(util.BytesPrefix(n.prefix), nil)
	defer it.Release()
	batch := new(leveldb.Batch)
	for it.Next() {
		batch.Delete(it.Key())
		if batch.Len() > batchFlushSize {
			if err := n.db.Write(batch, nil); err != nil {
				panic(err)
			}
			batch.Reset()
		}
	}
	if batch.Len() > 0 {
		if err := n.db.Write(batch, nil); err != nil {
			panic(err)
		}
	}
}