コード例 #1
0
ファイル: t_hash.go プロジェクト: lessos/lessdb
func (db *DB) HashScan(key, cursor, end []byte, limit uint32) *skv.Reply {

	var (
		prefix = skv.RawNsKeyEncode(skv.NsHashEntry, key)
		prelen = len(prefix)
		cstart = append(prefix, cursor...)
		cend   = append(prefix, end...)
		rpl    = skv.NewReply("")
	)

	for i := len(cend); i < 256; i++ {
		cend = append(cend, 0xff)
	}

	if limit > skv.ScanLimitMax {
		limit = skv.ScanLimitMax
	}

	iter := db.ldb.NewIterator(&util.Range{Start: cstart, Limit: append(cend)}, nil)

	for iter.Next() {

		if limit < 1 {
			break
		}

		if len(iter.Key()) < prelen {
			continue
		}

		rpl.Data = append(rpl.Data, dbutil.BytesClone(iter.Key()[prelen:]))
		rpl.Data = append(rpl.Data, dbutil.BytesClone(iter.Value()))

		limit--
	}

	iter.Release()

	if iter.Error() != nil {
		rpl.Status = iter.Error().Error()
	}

	return rpl
}
コード例 #2
0
ファイル: t_raw.go プロジェクト: lessos/lessdb
func (db *DB) RawRevScan(cursor, end []byte, limit uint32) *skv.Reply {

	rpl := skv.NewReply("")

	if len(cursor) < 1 {
		cursor = end
	}

	for i := len(cursor); i < 256; i++ {
		cursor = append(cursor, 0x00)
	}

	for i := len(end); i < 256; i++ {
		end = append(end, 0xff)
	}

	if limit > skv.ScanLimitMax {
		limit = skv.ScanLimitMax
	}

	iter := db.ldb.NewIterator(&util.Range{Start: cursor, Limit: end}, nil)

	for ok := iter.Last(); ok; ok = iter.Prev() {

		if limit < 1 {
			break
		}

		rpl.Data = append(rpl.Data, dbutil.BytesClone(iter.Key()))
		rpl.Data = append(rpl.Data, dbutil.BytesClone(iter.Value()))

		limit--
	}

	iter.Release()

	if iter.Error() != nil {
		rpl.Status = iter.Error().Error()
	}

	return rpl
}
コード例 #3
0
ファイル: t_ss.go プロジェクト: lessos/lessdb
func (db *DB) SsRange(key []byte, score_start, score_end uint64, limit uint32) *skv.Reply {

	var (
		bs_start = skv.SortSetsNsScorePrefix(key, score_start)
		bs_end   = skv.SortSetsNsScorePrefix(key, score_end)
		rpl      = skv.NewReply("")
	)

	for i := len(bs_end); i < 256; i++ {
		bs_end = append(bs_end, 0xff)
	}

	iter := db.ldb.NewIterator(&util.Range{Start: bs_start, Limit: bs_end}, nil)

	for iter.Next() {

		if limit < 1 {
			break
		}

		if len(iter.Key()) < (len(key) + 10) {
			db.RawDel(iter.Key())
			continue
		}

		ui64 := binary.BigEndian.Uint64(iter.Key()[len(key)+2 : (len(key) + 10)])

		rpl.Data = append(rpl.Data, dbutil.BytesClone(iter.Key()[(len(key)+10):]))
		rpl.Data = append(rpl.Data, dbutil.BytesClone([]byte(strconv.FormatUint(ui64, 10))))

		limit--
	}

	iter.Release()

	if iter.Error() != nil {
		rpl.Status = iter.Error().Error()
	}

	return rpl
}
コード例 #4
0
ファイル: t_raw.go プロジェクト: lessos/lessdb
func (db *DB) _raw_ssttlat_range(score_start, score_end, limit uint64) *skv.Reply {

	var (
		bs_start = skv.RawTtlQueuePrefix(score_start)
		bs_end   = skv.RawTtlQueuePrefix(score_end)
		rpl      = skv.NewReply("")
	)

	for i := len(bs_end); i < 256; i++ {
		bs_end = append(bs_end, 0xff)
	}

	iter := db.ldb.NewIterator(&util.Range{Start: bs_start, Limit: bs_end}, nil)

	for iter.Next() {

		if limit < 1 {
			break
		}

		if len(iter.Key()) < 10 {
			db.RawDel(iter.Key())
			continue
		}

		ui64 := binary.BigEndian.Uint64(iter.Key()[1:9])

		rpl.Data = append(rpl.Data, dbutil.BytesClone(iter.Key()))
		rpl.Data = append(rpl.Data, []byte(strconv.FormatUint(ui64, 10)))

		limit--
	}

	iter.Release()

	if iter.Error() != nil {
		rpl.Status = iter.Error().Error()
	}

	return rpl
}
コード例 #5
0
ファイル: t_object_doc.go プロジェクト: lessos/lessdb
// TODO btree
//  https://github.com/petar/GoLLRB
//  https://github.com/google/btree
func (db *DB) ObjectDocQuery(fold string, qry *skv.ObjectDocQuerySet) *skv.Reply {

	var (
		rpl  = skv.NewReply(skv.ReplyBadArgument)
		key  = skv.ObjectDocFoldKey(fold)
		skey = string(key)
	)

	schema, ok := _obj_doc_indexes[skey]
	if !ok {
		return rpl
	}

	idxs := map[string]skv.ObjectDocSchemaIndexEntry{}
	for _, idx := range schema.Indexes {

		if qry.SortField == idx.Column && idx.Type != skv.ObjectDocSchemaIndexTypeUint {
			return rpl
		}

		idxs[idx.Column] = idx
	}

	for _, filter := range qry.Filters {
		if _, ok := idxs[filter.Field]; !ok {
			return rpl
		}
	}

	sls := [][]byte{}

	if idx, ok := idxs[qry.SortField]; ok {

		start, end := skv.ObjectDocIndexFieldPrefix(key, idx.Seq), skv.ObjectDocIndexFieldPrefix(key, idx.Seq)

		rs := []skv.ReplyEntry{}

		for {

			if qry.SortMode == skv.ObjectDocQuerySortAttrDesc {
				rs = db.RawRevScan(start, end, skv.ObjectDocScanMax).Hash()
			} else {
				rs = db.RawScan(start, end, skv.ObjectDocScanMax).Hash()
			}

			for _, v := range rs {

				if _, bkey, ok := skv.ObjectDocIndexRawKeyExport(v.Key, idx.Length); ok {
					sls = append(sls, dbutil.BytesClone(bkey))
				}

				if qry.SortMode == skv.ObjectDocQuerySortAttrDesc {
					end = skv.ObjectDocBytesDecr(v.Key)
				} else {
					start = skv.ObjectDocBytesIncr(v.Key)
				}
			}

			if uint32(len(rs)) < skv.ObjectDocScanMax {
				break
			}
		}
	}

	sls_ok := false
	if len(sls) > 0 {
		sls_ok = true
	}

	for _, filter := range qry.Filters {

		idx, ok := idxs[filter.Field]
		if !ok {
			continue
		}

		if idx.Type != skv.ObjectDocSchemaIndexTypeUint {
			continue
		}

		vstart, vend, values := []byte{}, []byte{}, [][]byte{}

		for _, v := range filter.Values {

			vb := dbutil.SintToBytes(v, idx.Length)

			dup := false
			for _, pvb := range values {

				if bytes.Compare(pvb, vb) == 0 {
					dup = true
					break
				}
			}

			if !dup {

				values = append(values, vb)

				if (filter.Type == skv.ObjectDocQueryFilterValues && !filter.Exclude) ||
					filter.Type == skv.ObjectDocQueryFilterRange {

					if len(vstart) < 1 {
						vstart = vb
					} else if bytes.Compare(vb, vstart) < 1 {
						vstart = vb
					}

					if bytes.Compare(vb, vend) > 0 {
						vend = vb
					}
				}
			}
		}

		var (
			kpre    = skv.ObjectDocIndexFieldPrefix(key, idx.Seq)
			start   = append(kpre, vstart...)
			end     = append(kpre, vend...)
			fitkeys = map[string]empty{}
		)

		for {

			rs := db.RawScan(start, end, skv.ObjectDocScanMax).Hash()

			for _, v := range rs {

				if _, bkey, ok := skv.ObjectDocIndexRawKeyExport(v.Key, idx.Length); ok {

					if sls_ok {

						fitkeys[string(bkey)] = empty{}

					} else {
						sls = append(sls, dbutil.BytesClone(bkey))
					}
				}

				start = skv.ObjectDocBytesIncr(v.Key)
			}

			if uint32(len(rs)) < skv.ObjectDocScanMax {
				break
			}
		}

		if sls_ok {

			sls_buf := sls
			sls = [][]byte{}

			for _, gv := range sls_buf {

				if _, ok := fitkeys[string(gv)]; ok {
					sls = append(sls, gv)
				}
			}
		}

		sls_ok = true
	}

	if !sls_ok {

		// TOPO
		tls := db.ObjectScan(fold, "", "", uint32(qry.Offset+qry.Limit)).Hash()
		for i := qry.Offset; i < len(tls); i++ {
			rpl.Data = append(rpl.Data, tls[i].Key, tls[i].Value)
		}

		return rpl
	}

	if len(sls) <= qry.Offset {
		return rpl
	}

	cutoff := qry.Offset + qry.Limit
	if cutoff > len(sls) {
		cutoff = len(sls)
	}

	for i := qry.Offset; i < cutoff; i++ {
		if rs := db.ObjectDocGet(fold, dbutil.BytesToHexString(sls[i])); rs.Status == skv.ReplyOK {
			rpl.Data = append(rpl.Data, sls[i], rs.Bytes())
		}
	}

	rpl.Status = skv.ReplyOK

	return rpl
}