Пример #1
0
func ldbDropFolder(db *leveldb.DB, folder []byte) {
	runtime.GC()

	snap, err := db.GetSnapshot()
	if err != nil {
		panic(err)
	}
	l.Debugf("created snapshot %p", snap)
	defer func() {
		l.Debugf("close snapshot %p", snap)
		snap.Release()
	}()

	// Remove all items related to the given folder from the device->file bucket
	dbi := snap.NewIterator(util.BytesPrefix([]byte{KeyTypeDevice}), nil)
	for dbi.Next() {
		itemFolder := deviceKeyFolder(dbi.Key())
		if bytes.Compare(folder, itemFolder) == 0 {
			db.Delete(dbi.Key(), nil)
		}
	}
	dbi.Release()

	// Remove all items related to the given folder from the global bucket
	dbi = snap.NewIterator(util.BytesPrefix([]byte{KeyTypeGlobal}), nil)
	for dbi.Next() {
		itemFolder := globalKeyFolder(dbi.Key())
		if bytes.Compare(folder, itemFolder) == 0 {
			db.Delete(dbi.Key(), nil)
		}
	}
	dbi.Release()
}
Пример #2
0
// childFileIds returns a map containing IDs of all Files that have parent
// refs to the given file.  The returned map keys are IDs, and the map values
// indicate if the child is a directory.
func (d *DriveDB) childFileIds(fileId string) (map[string]bool, error) {
	ids := make(map[string]bool)
	d.iters.Add(1)
	batch := new(leveldb.Batch)
	iter := d.db.NewIterator(util.BytesPrefix(childKeyPrefix(fileId)), nil)
	for iter.Next() {
		pidcid := deKey(string(iter.Key()))
		cid := pidcid[len(fileId)+1:]
		if gdriveFile, err := d.FileById(cid); err != nil {
			log.Printf("unknown fileId %v: %v", fileId, err)
			batch.Delete(iter.Key())
		} else {
			ids[cid] = gdriveFile.MimeType == driveFolderMimeType
		}
	}
	iter.Release()
	d.iters.Done()
	if batch.Len() > 0 {
		err := d.db.Write(batch, nil)
		if err != nil {
			log.Printf("error writing to db: %v", err)
		}
	}
	return ids, iter.Error()
}
Пример #3
0
// Recent returns up to n of the most recently completed jobs (including
// failed ones).
func (d *DB) Recent(n int) ([]*Job, error) {
	it := d.db.NewIterator(util.BytesPrefix([]byte(finishPrefix)), nil)
	defer it.Release()

	// the last iterated over jobs are the most recent
	ids := []JobId{}
	for it.Next() {
		var id JobId
		copy(id[:], it.Value())
		ids = append(ids, id)
	}
	if err := it.Error(); err != nil {
		return nil, err
	}

	if len(ids) > n {
		ids = ids[len(ids)-n:]
	}

	jobs := make([]*Job, len(ids))
	for i, id := range ids {
		j, err := d.Get(id)
		if err != nil {
			return nil, err
		}
		jobs[i] = j
	}

	return jobs, nil
}
Пример #4
0
func (db *Instance) withAllFolderTruncated(folder []byte, fn func(device []byte, f FileInfoTruncated) bool) {
	t := db.newReadWriteTransaction()
	defer t.close()

	dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, nil, nil)[:keyPrefixLen+keyFolderLen]), nil)
	defer dbi.Release()

	for dbi.Next() {
		device := db.deviceKeyDevice(dbi.Key())
		var f FileInfoTruncated
		err := f.UnmarshalXDR(dbi.Value())
		if err != nil {
			panic(err)
		}

		switch f.Name {
		case "", ".", "..", "/": // A few obviously invalid filenames
			l.Infof("Dropping invalid filename %q from database", f.Name)
			t.removeFromGlobal(folder, device, nil, nil)
			t.Delete(dbi.Key())
			t.checkFlush()
			continue
		}

		if cont := fn(device, f); !cont {
			return
		}
	}
}
Пример #5
0
func main() {
	//Initialization
	flag.Parse()
	logFile, err := os.OpenFile(logfile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
	if err != nil {
		log.Fatalln("Failed to open log file", err)
	}
	logHandle := io.MultiWriter(logFile, os.Stdout)
	Init(logHandle)

	db, _ := leveldb.OpenFile(dbname, nil)
	defer db.Close()
	var key []byte
	var count int
	iter := db.NewIterator(util.BytesPrefix([]byte("h-")), nil)
	for iter.Next() {
		// Remember that the contents of the returned slice
		// should not be modified, and only valid until the next call to Next.
		key = iter.Key()
		lognbit := strings.Split(string(key[:]), "-")[1]
		if lognbit != "Inf" {
			//fmt.Println(string(key[:]))
			count += 1
			//mapChan <- &Map{string(key[:]), &value}
		}
	}
	iter.Release()

	Info.Println("Total count", count)
}
Пример #6
0
// Current returns the all jobs from the database that aren't completed - e.g.
// queued or running.
func (d *DB) Current() ([]*Job, error) {
	it := d.db.NewIterator(util.BytesPrefix([]byte(currPrefix)), nil)
	defer it.Release()

	ids := []JobId{}
	for it.Next() {
		var id JobId
		copy(id[:], it.Value())
		ids = append(ids, id)
	}
	if err := it.Error(); err != nil {
		return nil, err
	}

	jobs := make([]*Job, len(ids))
	for i, id := range ids {
		j, err := d.Get(id)
		if err != nil {
			return nil, err
		}
		jobs[i] = j
	}

	return jobs, nil
}
Пример #7
0
// ChildFileIds returns the IDs of all Files that have parent refs to the given file.
func (d *DriveDB) ChildFileIds(fileId string) ([]string, error) {
	var ids []string
	d.iters.Add(1)
	batch := new(leveldb.Batch)
	iter := d.db.NewIterator(util.BytesPrefix(childKey(fileId)), nil)
	for iter.Next() {
		pidcid := deKey(string(iter.Key()))
		cid := pidcid[len(fileId)+1:]
		found, err := d.db.Has(fileKey(cid), nil)
		if err == nil && found {
			ids = append(ids, cid)
		} else {
			batch.Delete(iter.Key())
		}
	}
	iter.Release()
	d.iters.Done()
	if batch.Len() > 0 {
		err := d.db.Write(batch, nil)
		if err != nil {
			log.Printf("error writing to db: %v", err)
		}
	}
	return ids, iter.Error()
}
Пример #8
0
// expireNodes iterates over the database and deletes all nodes that have not
// been seen (i.e. received a pong from) for some alloted time.
func (db *nodeDB) expireNodes() error {
	threshold := time.Now().Add(-nodeDBNodeExpiration)

	// Find discovered nodes that are older than the allowance
	it := db.lvl.NewIterator(nil, nil)
	defer it.Release()

	for it.Next() {
		// Skip the item if not a discovery node
		id, field := splitKey(it.Key())
		if field != nodeDBDiscoverRoot {
			continue
		}
		// Skip the node if not expired yet
		if seen := db.lastPong(id); seen.After(threshold) {
			continue
		}
		// Otherwise delete all associated information
		deleter := db.lvl.NewIterator(util.BytesPrefix(makeKey(id, "")), nil)
		for deleter.Next() {
			if err := db.lvl.Delete(deleter.Key(), nil); err != nil {
				return err
			}
		}
	}
	return nil
}
Пример #9
0
func RevsAt(path string) (revs []string, err error) {
	seqs := db.Sub(REV_STORE)

	basepath := path + "::"
	lenbase := len(basepath)
	already := make(map[string]bool)
	iter := seqs.NewIterator(util.BytesPrefix([]byte(basepath)), nil)
	i := 0
	for iter.Next() {
		rev := string(iter.Key())[lenbase:]
		revid := strings.Split(rev, "-")[0]
		if _, there := already[revid]; there {
			revs[i] = rev
		} else {
			revs = append(revs, rev)
			already[revid] = true
			i++
		}
	}
	iter.Release()
	err = iter.Error()
	if err != nil {
		return revs, err
	}

	return revs, nil
}
Пример #10
0
func ListChangesAt(path string, since uint64) ([]Change, error) {
	seqs := db.Sub(BY_SEQ)

	res := make([]Change, 0)

	basepath := path + "::"
	baselength := len(basepath)
	iter := seqs.NewIterator(util.BytesPrefix([]byte(basepath)), nil)
	for iter.Next() {
		seqstr := string(iter.Key())[baselength:]
		seq, _ := strconv.ParseUint(seqstr, 10, 64)
		valp := strings.Split(string(iter.Value()), "::")
		subpath := valp[0]
		rev := valp[1]

		if seq <= since {
			continue
		}

		res = append(res, Change{
			Id:      subpath,
			Seq:     seq,
			Changes: []justRev{justRev{rev}},
		})
	}
	iter.Release()
	err := iter.Error()
	if err != nil {
		return nil, err
	}

	return res, nil
}
Пример #11
0
func (server *Server) newMessage(uid *[32]byte, envelope []byte) (*[32]byte, error) {
	// TODO: check that user exists
	var fuzzyTimestamp uint64
	var r [8]byte
	if _, err := rand.Read(r[:]); err != nil {
		return nil, err
	}

	iter := server.database.NewIterator(util.BytesPrefix(append([]byte{'m'}, uid[:]...)), nil)
	hasMessages := iter.Last()
	if hasMessages {
		t := iter.Key()[1+32:][:8]
		fuzzyTimestamp = binary.BigEndian.Uint64(t[:]) + 0xffffffff&binary.BigEndian.Uint64(r[:])
	} else {
		fuzzyTimestamp = binary.BigEndian.Uint64(r[:])
	}
	iter.Release()

	var tstmp [8]byte
	binary.BigEndian.PutUint64(tstmp[:], fuzzyTimestamp)

	messageHash := sha256.Sum256(envelope)
	key := append(append(append([]byte{'m'}, uid[:]...), tstmp[:]...), messageHash[:24]...)
	err := server.database.Put(key, (envelope)[:], wO_sync)
	if err != nil {
		return nil, err
	}

	msg_id := new([32]byte)
	copy(msg_id[:], append(tstmp[:], messageHash[:24]...))
	server.notifier.Notify(uid, msg_id, append([]byte{}, envelope...))

	return msg_id, nil
}
Пример #12
0
func (self *ldbResultStorage) GetResult(keyPrefix []byte) (*pb.GetResultResponse, error) {
	res := new(pb.GetResultResponse)
	it := self.db.NewIterator(util.BytesPrefix(keyPrefix), nil)
	defer it.Release()

	it.Last()
	for {
		k := make([]byte, len(it.Key()))
		copy(k, it.Key())
		res.Key = append(res.Key, k)

		br := new(pb.BuildResult)
		if err := proto.Unmarshal(it.Value(), br); err != nil {
			return nil, fmt.Errorf("While unmarshaling proto for key record %s: %s", string(it.Key()), err)
		}
		res.Result = append(res.Result, br)

		if !it.Prev() {
			break
		}
	}
	if it.Error() != nil {
		return nil, fmt.Errorf("Iteration over %s prefix failed: %s", string(keyPrefix), it.Error())
	}
	return res, nil
}
Пример #13
0
func (ls *LevelDBStore) GetSlice(limit int64, offset int64) (posts []*postmi.Post, e error) {
	iter := ls.db.NewIterator(util.BytesPrefix([]byte("p")), nil)
	iter.Last()
	var p *postmi.Post
	p, e = postmi.NewPostFromJSON(iter.Value())
	if e != nil {
		return
	}
	posts = append(posts, p)
	for iter.Prev() {
		offset--
		if offset > 0 {
			continue
		}
		p, e = postmi.NewPostFromJSON(iter.Value())
		if e != nil {
			break
		}
		posts = append(posts, p)
		if int64(len(posts)) == limit {
			break
		}
	}
	iter.Release()
	if e != nil {
		return
	}
	e = iter.Error()
	return
}
Пример #14
0
func (bc *blockchain) loadHeightToHash() error {
	bc.heightToHash = make(map[uint64]coin.Hash)

	maxDifficulty := uint64(0)
	iter := bc.db.NewIterator(util.BytesPrefix([]byte(HeaderBucket)), nil)
	for iter.Next() {
		// Unmarshal processedHeader
		b := iter.Value()
		var pheader processedHeader
		err := json.Unmarshal(b, &pheader)
		if err != nil {
			return err
		}

		// Add to heightToHash map if header is in main chain
		if pheader.IsMainChain {
			id := pheader.Header.Sum()
			bc.heightToHash[pheader.BlockHeight] = id

			if pheader.TotalDifficulty > maxDifficulty {
				maxDifficulty = pheader.TotalDifficulty
				bc.head = pheader
			}
		}
	}

	// Calculate Difficulty
	diff, err := bc.computeDifficulty(bc.head.Header.Sum())
	if err != nil {
		return err
	}
	bc.currDifficulty = diff

	return nil
}
Пример #15
0
func (db *Instance) withAllFolderTruncated(folder []byte, fn func(device []byte, f FileInfoTruncated) bool) {
	t := db.newReadWriteTransaction()
	defer t.close()

	dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, nil, nil)[:keyPrefixLen+keyFolderLen]), nil)
	defer dbi.Release()

	for dbi.Next() {
		device := db.deviceKeyDevice(dbi.Key())
		var f FileInfoTruncated
		// The iterator function may keep a reference to the unmarshalled
		// struct, which in turn references the buffer it was unmarshalled
		// from. dbi.Value() just returns an internal slice that it reuses, so
		// we need to copy it.
		err := f.UnmarshalXDR(append([]byte{}, dbi.Value()...))
		if err != nil {
			panic(err)
		}

		switch f.Name {
		case "", ".", "..", "/": // A few obviously invalid filenames
			l.Infof("Dropping invalid filename %q from database", f.Name)
			t.removeFromGlobal(folder, device, nil, nil)
			t.Delete(dbi.Key())
			t.checkFlush()
			continue
		}

		if cont := fn(device, f); !cont {
			return
		}
	}
}
Пример #16
0
func clearItems(db *leveldb.DB) error {
	snap, err := db.GetSnapshot()
	if err != nil {
		return err
	}
	defer snap.Release()

	// Iterate over k2

	it := snap.NewIterator(util.BytesPrefix([]byte{1}), nil)
	defer it.Release()

	batch := new(leveldb.Batch)
	for it.Next() {
		k1 := it.Key()
		k2 := it.Value()

		// k2 should exist
		_, err := snap.Get(k2, nil)
		if err != nil {
			return err
		}

		// Delete the k1 => k2 mapping first
		batch.Delete(k1)
		// Then the k2 => data mapping
		batch.Delete(k2)
	}
	if testing.Verbose() {
		log.Printf("batch write (clear) %p", batch)
	}
	return db.Write(batch, nil)
}
Пример #17
0
func (db *Instance) withHave(folder, device, prefix []byte, truncate bool, fn Iterator) {
	t := db.newReadOnlyTransaction()
	defer t.close()

	dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, device, prefix)[:keyPrefixLen+keyFolderLen+keyDeviceLen+len(prefix)]), nil)
	defer dbi.Release()

	slashedPrefix := prefix
	if !bytes.HasSuffix(prefix, []byte{'/'}) {
		slashedPrefix = append(slashedPrefix, '/')
	}

	for dbi.Next() {
		name := db.deviceKeyName(dbi.Key())
		if len(prefix) > 0 && !bytes.Equal(name, prefix) && !bytes.HasPrefix(name, slashedPrefix) {
			return
		}

		// The iterator function may keep a reference to the unmarshalled
		// struct, which in turn references the buffer it was unmarshalled
		// from. dbi.Value() just returns an internal slice that it reuses, so
		// we need to copy it.
		f, err := unmarshalTrunc(append([]byte{}, dbi.Value()...), truncate)
		if err != nil {
			panic(err)
		}
		if cont := fn(f); !cont {
			return
		}
	}
}
Пример #18
0
// Get returns the ShortCommit info for the given branch, target, and buildID.
func (i *info) Get(branch, target, buildID string) (*gitinfo.ShortCommit, error) {
	// Get the list of targets and confirm that this target is in it, otherwise add it to the list of targets.
	branchtargets := i.branchtargets()
	branchtarget := fmt.Sprintf("%s:%s", branch, target)
	if !util.In(branchtarget, branchtargets) {
		// If we aren't currently scanning results for this (branch, target) pair
		// then add it to the list.
		branchtargets = append(branchtargets, branchtarget)
		err := i.db.Put([]byte(TARGETS_KEY), []byte(strings.Join(branchtargets, " ")), nil)
		if err != nil {
			glog.Errorf("Failed to add new target %s: %s", branchtarget, err)
		}
		// Always try to fetch the information from the Android Build API directly if
		// we don't have it yet.
		return i.single_get(branch, target, buildID)
	} else {
		key, err := toKey(branch, target, buildID)
		if err != nil {
			return nil, fmt.Errorf("Can't Get with an invalid build ID %q: %s", buildID, err)
		}
		// Scan backwards through the build info until we find a buildID that is equal to or
		// comes before the buildID we are looking for.
		iter := i.db.NewIterator(lutil.BytesPrefix([]byte(toPrefix(branch, target))), nil)
		defer iter.Release()
		if found := iter.Seek([]byte(key)); found {
			value := &gitinfo.ShortCommit{}
			if err := json.Unmarshal(iter.Value(), value); err != nil {
				return nil, fmt.Errorf("Unable to deserialize value: %s", err)
			}
			return value, nil
		} else {
			return i.single_get(branch, target, buildID)
		}
	}
}
Пример #19
0
func (daemon *Daemon) DeleteVolumeId(podId string) error {
	key := fmt.Sprintf("vol-%s", podId)
	iter := (daemon.db).NewIterator(util.BytesPrefix([]byte(key)), nil)
	for iter.Next() {
		value := iter.Key()
		if string(value)[4:18] == podId {
			fields := strings.Split(string(iter.Value()), ":")
			dev_id, _ := strconv.Atoi(fields[1])
			if err := dm.DeleteVolume(daemon.Storage.DmPoolData, dev_id); err != nil {
				glog.Error(err.Error())
				return err
			}
		}
		err := (daemon.db).Delete(value, nil)
		if err != nil {
			return err
		}
	}
	iter.Release()
	err := iter.Error()
	if err != nil {
		return err
	}
	return nil
}
Пример #20
0
func ldbListFolders(db *leveldb.DB) []string {
	runtime.GC()

	snap, err := db.GetSnapshot()
	if err != nil {
		panic(err)
	}
	l.Debugf("created snapshot %p", snap)
	defer func() {
		l.Debugf("close snapshot %p", snap)
		snap.Release()
	}()

	dbi := snap.NewIterator(util.BytesPrefix([]byte{KeyTypeGlobal}), nil)
	defer dbi.Release()

	folderExists := make(map[string]bool)
	for dbi.Next() {
		folder := string(globalKeyFolder(dbi.Key()))
		if !folderExists[folder] {
			folderExists[folder] = true
		}
	}

	folders := make([]string, 0, len(folderExists))
	for k := range folderExists {
		folders = append(folders, k)
	}

	sort.Strings(folders)
	return folders
}
Пример #21
0
func (d *DaemonDB) PrefixList2Chan(prefix []byte, keyFilter KeyFilter) chan *KVPair {
	ch := make(chan *KVPair, 128)
	if ch == nil {
		return ch
	}
	go func() {
		iter := d.db.NewIterator(util.BytesPrefix(prefix), nil)
		for iter.Next() {
			glog.V(3).Infof("got key from leveldb %s", string(iter.Key()))
			if keyFilter == nil || keyFilter(iter.Key()) {
				k := make([]byte, 0, len(iter.Key()))
				v := make([]byte, 0, len(iter.Value()))
				copy(k, iter.Key())
				copy(v, iter.Value())
				ch <- &KVPair{k, v}
			}
		}
		iter.Release()
		if err := iter.Error(); err != nil {
			ch <- nil
			glog.Error("Error occurs while iterate db with %v", prefix)
		}
		close(ch)
	}()

	return ch
}
Пример #22
0
func (q *WebService) GetCPU() {
	iter := q.LevelDB.NewIterator(util.BytesPrefix([]byte("cpu")), nil)
	for iter.Next() {
		// Use key/value.
	}
	iter.Release()
	iter.Error()
}
Пример #23
0
func dbEmpty(db *Instance) bool {
	iter := db.NewIterator(util.BytesPrefix([]byte{KeyTypeBlock}), nil)
	defer iter.Release()
	if iter.Next() {
		return false
	}
	return true
}
Пример #24
0
func ldbWithGlobal(db *leveldb.DB, folder, prefix []byte, truncate bool, fn Iterator) {
	runtime.GC()

	snap, err := db.GetSnapshot()
	if err != nil {
		panic(err)
	}
	if debugDB {
		l.Debugf("created snapshot %p", snap)
	}
	defer func() {
		if debugDB {
			l.Debugf("close snapshot %p", snap)
		}
		snap.Release()
	}()

	dbi := snap.NewIterator(util.BytesPrefix(globalKey(folder, prefix)), nil)
	defer dbi.Release()

	var fk []byte
	for dbi.Next() {
		var vl versionList
		err := vl.UnmarshalXDR(dbi.Value())
		if err != nil {
			panic(err)
		}
		if len(vl.versions) == 0 {
			l.Debugln(dbi.Key())
			panic("no versions?")
		}
		name := globalKeyName(dbi.Key())
		fk = deviceKeyInto(fk[:cap(fk)], folder, vl.versions[0].device, name)
		if debugDB {
			l.Debugf("snap.Get %p %x", snap, fk)
		}
		bs, err := snap.Get(fk, nil)
		if err != nil {
			l.Debugf("folder: %q (%x)", folder, folder)
			l.Debugf("key: %q (%x)", dbi.Key(), dbi.Key())
			l.Debugf("vl: %v", vl)
			l.Debugf("vl.versions[0].device: %x", vl.versions[0].device)
			l.Debugf("name: %q (%x)", name, name)
			l.Debugf("fk: %q", fk)
			l.Debugf("fk: %x %x %x", fk[1:1+64], fk[1+64:1+64+32], fk[1+64+32:])
			panic(err)
		}

		f, err := unmarshalTrunc(bs, truncate)
		if err != nil {
			panic(err)
		}

		if cont := fn(f); !cont {
			return
		}
	}
}
Пример #25
0
// deleteNode deletes all information/keys associated with a node.
func (db *nodeDB) deleteNode(id NodeID) error {
	deleter := db.lvl.NewIterator(util.BytesPrefix(makeKey(id, "")), nil)
	for deleter.Next() {
		if err := db.lvl.Delete(deleter.Key(), nil); err != nil {
			return err
		}
	}
	return nil
}
Пример #26
0
func (db *RedicoDB) keyStart(k string) []string {
	var keys []string
	iter := db.leveldb.NewIterator(util.BytesPrefix([]byte(k)), nil)
	for iter.Next() {
		keys = append(keys, string(iter.Key()))
	}
	iter.Release()
	return keys
}
Пример #27
0
func (self dbSync) loadFromDbPrefix(p string) map[string]string {
	res := make(map[string]string, 0)
	iter := self.Db.NewIterator(util.BytesPrefix([]byte(p)), nil)
	for iter.Next() {
		// Use key/value.
		res[string(iter.Key())] = string(iter.Value())
	}
	iter.Release()
	return res
}
Пример #28
0
func (r *Reader) PrefixIterator(prefix []byte) store.KVIterator {
	byteRange := util.BytesPrefix(prefix)
	iter := r.snapshot.NewIterator(byteRange, r.store.defaultReadOptions)
	iter.First()
	rv := Iterator{
		store:    r.store,
		iterator: iter,
	}
	return &rv
}
Пример #29
0
func (d *DaemonDB) PrefixDelete(prefix []byte) error {
	iter := d.db.NewIterator(util.BytesPrefix(prefix), nil)
	for iter.Next() {
		key := iter.Key()
		d.db.Delete(key, nil)
	}
	iter.Release()
	err := iter.Error()
	return err
}
Пример #30
0
func (db *Instance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator) {
	t := db.newReadOnlyTransaction()
	defer t.close()

	dbi := t.NewIterator(util.BytesPrefix(db.globalKey(folder, prefix)), nil)
	defer dbi.Release()

	slashedPrefix := prefix
	if !bytes.HasSuffix(prefix, []byte{'/'}) {
		slashedPrefix = append(slashedPrefix, '/')
	}

	var fk []byte
	for dbi.Next() {
		var vl VersionList
		err := vl.UnmarshalXDR(dbi.Value())
		if err != nil {
			panic(err)
		}
		if len(vl.versions) == 0 {
			l.Debugln(dbi.Key())
			panic("no versions?")
		}

		name := db.globalKeyName(dbi.Key())
		if len(prefix) > 0 && !bytes.Equal(name, prefix) && !bytes.HasPrefix(name, slashedPrefix) {
			return
		}

		fk = db.deviceKeyInto(fk[:cap(fk)], folder, vl.versions[0].device, name)
		bs, err := t.Get(fk, nil)
		if err != nil {
			l.Debugf("folder: %q (%x)", folder, folder)
			l.Debugf("key: %q (%x)", dbi.Key(), dbi.Key())
			l.Debugf("vl: %v", vl)
			l.Debugf("vl.versions[0].device: %x", vl.versions[0].device)
			l.Debugf("name: %q (%x)", name, name)
			l.Debugf("fk: %q", fk)
			l.Debugf("fk: %x %x %x",
				fk[keyPrefixLen:keyPrefixLen+keyFolderLen],
				fk[keyPrefixLen+keyFolderLen:keyPrefixLen+keyFolderLen+keyDeviceLen],
				fk[keyPrefixLen+keyFolderLen+keyDeviceLen:])
			panic(err)
		}

		f, err := unmarshalTrunc(bs, truncate)
		if err != nil {
			panic(err)
		}

		if cont := fn(f); !cont {
			return
		}
	}
}