Пример #1
0
func curs(pairs ...interface{}) queryCursor {
	if len(pairs)%2 != 0 {
		panic("curs() takes only even pairs")
	}
	pre := &bytes.Buffer{}
	if _, err := cmpbin.WriteUint(pre, uint64(len(pairs)/2)); err != nil {
		panic(err)
	}
	post := serialize.Invertible(&bytes.Buffer{})
	for i := 0; i < len(pairs); i += 2 {
		k, v := pairs[i].(string), pairs[i+1]

		col, err := dstore.ParseIndexColumn(k)
		if err != nil {
			panic(err)
		}

		post.SetInvert(col.Descending)
		if err := serialize.WriteIndexColumn(pre, col); err != nil {
			panic(err)
		}
		if err := serialize.WriteProperty(post, serialize.WithoutContext, prop(v)); err != nil {
			panic(err)
		}
	}
	return queryCursor(serialize.Join(pre.Bytes(), post.Bytes()))
}
Пример #2
0
// toComparableString computes the byte-sortable 'order' string for the given
// key/PropertyMap.
//
//   * start/end are byte sequences which are the inequality bounds of the
//     query, if any. These are a serialized datastore.Property. If the
//     inequality column is inverted, then start and end are also inverted and
//     swapped with each other.
//   * order is the list of sort orders in the actual executing queries.
//   * k / pm are the data to derive a sortable string for.
//
// The result of this function is the series of serialized properties, one per
// order column, which represent this key/pm's first entry in the composite
// index that would point to it (e.g. the one with `order` sort orders).
func toComparableString(start, end []byte, order []ds.IndexColumn, k *ds.Key, pm ds.PropertyMap) (row, key []byte) {
	doCmp := true
	soFar := []byte{}
	ps := serialize.PropertyMapPartially(k, nil)
	for _, ord := range order {
		row, ok := ps[ord.Property]
		if !ok {
			if vals, ok := pm[ord.Property]; ok {
				row = serialize.PropertySlice(vals)
			}
		}
		sort.Sort(row)
		foundOne := false
		for _, serialized := range row {
			if ord.Descending {
				serialized = serialize.Invert(serialized)
			}
			if doCmp {
				maybe := serialize.Join(soFar, serialized)
				cmp := bytes.Compare(maybe, start)
				if cmp >= 0 {
					foundOne = true
					soFar = maybe
					doCmp = len(soFar) < len(start)
					break
				}
			} else {
				foundOne = true
				soFar = serialize.Join(soFar, serialized)
				break
			}
		}
		if !foundOne {
			return nil, nil
		}
	}
	if end != nil && bytes.Compare(soFar, end) >= 0 {
		return nil, nil
	}
	return soFar, ps["__key__"][0]
}
Пример #3
0
func (s *projectionStrategy) handle(rawData [][]byte, decodedProps []ds.Property, key *ds.Key, gc func() (ds.Cursor, error)) error {
	projectedRaw := [][]byte(nil)
	if s.distinct != nil {
		projectedRaw = make([][]byte, len(decodedProps))
	}
	pmap := make(ds.PropertyMap, len(s.project))
	for i, p := range s.project {
		if s.distinct != nil {
			projectedRaw[i] = rawData[p.suffixIndex]
		}
		pmap[p.propertyName] = []ds.Property{decodedProps[p.suffixIndex]}
	}
	if s.distinct != nil {
		if !s.distinct.Add(string(serialize.Join(projectedRaw...))) {
			return nil
		}
	}
	return s.cb(key, pmap, gc)
}
Пример #4
0
func multiIterate(defs []*iterDefinition, cb func(suffix []byte) error) error {
	if len(defs) == 0 {
		return nil
	}

	ts := make([]*iterator, len(defs))
	prefixLens := make([]int, len(defs))
	for i, def := range defs {
		// bind i so that the defer below doesn't get goofed by the loop variable
		i := i
		ts[i] = def.mkIter()
		prefixLens[i] = def.prefixLen
		defer ts[i].stop()
	}

	suffix := []byte(nil)
	skip := -1

	for {
		stop := false
		restart := false

		for idx, it := range ts {
			if skip >= 0 && skip == idx {
				continue
			}
			def := defs[idx]

			pfxLen := prefixLens[idx]
			it.next(serialize.Join(def.prefix[:pfxLen], suffix), func(itm *gkvlite.Item) {
				if itm == nil {
					// we hit the end of an iterator, we're now done with the whole
					// query.
					stop = true
					return
				}

				sfxRO := itm.Key[pfxLen:]

				if bytes.Compare(sfxRO, suffix) > 0 {
					// this row has a higher suffix than anything we've seen before. Set
					// ourself to be the skip, and resart this loop from the top.
					suffix = append(suffix[:0], sfxRO...)
					skip = idx
					if idx != 0 {
						// no point to restarting on the 0th index
						restart = true
					}
				}
			})
			if stop || restart {
				break
			}
		}
		if stop {
			return nil
		}
		if restart {
			continue
		}

		if err := cb(suffix); err != nil {
			if err == datastore.Stop {
				return nil
			}
			return err
		}
		suffix = nil
		skip = -1
	}
}
Пример #5
0
func (def *iterDefinition) mkIter() *iterator {
	cmdChan := make(chan *cmd)
	ret := &iterator{
		ch: cmdChan,
	}

	prefix := def.prefix
	collection := def.c

	// convert the suffixes from the iterDefinition into full rows for the
	// underlying storage.
	start := serialize.Join(prefix, def.start)

	end := []byte(nil)
	if def.end != nil {
		end = serialize.Join(prefix, def.end)
	}

	go func() {
		c := (*cmd)(nil)
		ensureCmd := func() bool {
			if c == nil {
				c = <-cmdChan
				if c == nil { // stop()
					return false
				}
			}
			return true
		}
		if ensureCmd() {
			if bytes.Compare(c.targ, start) < 0 {
				c.targ = start
			}
		}

		defer ret.stop()
		for {
			if !ensureCmd() {
				return
			}
			terminalCallback := true
			collection.VisitItemsAscend(c.targ, true, func(i *gkvlite.Item) bool {
				if !ensureCmd() {
					return false
				}
				if bytes.Compare(i.Key, c.targ) < 0 {
					// we need to start a new ascension function
					terminalCallback = false
					return false
				}
				if !bytes.HasPrefix(i.Key, prefix) {
					// we're no longer in prefix, terminate
					return false
				}
				if end != nil && bytes.Compare(i.Key, end) >= 0 {
					// we hit our cap, terminate.
					return false
				}
				c.cb(i)
				c = nil
				return true
			})
			if terminalCallback && ensureCmd() {
				c.cb(nil)
				c = nil
			}
		}
	}()

	return ret
}
Пример #6
0
func TestMultiIteratorSimple(t *testing.T) {
	t.Parallel()

	// Simulate an index with 2 columns (int and int).
	vals := [][]int64{
		{1, 0},
		{1, 2},
		{1, 4},
		{1, 7},
		{1, 9},
		{3, 10},
		{3, 11},
	}

	valBytes := make([][]byte, len(vals))
	for i, nms := range vals {
		numbs := make([][]byte, len(nms))
		for j, n := range nms {
			numbs[j] = mkNum(n)
		}
		valBytes[i] = serialize.Join(numbs...)
	}

	otherVals := [][]int64{
		{3, 0},
		{4, 10},
		{19, 7},
		{20, 2},
		{20, 3},
		{20, 4},
		{20, 8},
		{20, 11},
	}

	otherValBytes := make([][]byte, len(otherVals))
	for i, nms := range otherVals {
		numbs := make([][]byte, len(nms))
		for i, n := range nms {
			numbs[i] = mkNum(n)
		}
		otherValBytes[i] = serialize.Join(numbs...)
	}

	Convey("Test MultiIterator", t, func() {
		s := newMemStore()
		c := s.SetCollection("zup1", nil)
		for _, row := range valBytes {
			c.Set(row, []byte{})
		}
		c2 := s.SetCollection("zup2", nil)
		for _, row := range otherValBytes {
			c2.Set(row, []byte{})
		}

		Convey("can join the same collection twice", func() {
			// get just the (1, *)
			// starting at (1, 2) (i.e. >= 2)
			// ending at (1, 4) (i.e. < 7)
			defs := []*iterDefinition{
				{c: c, prefix: mkNum(1), prefixLen: len(mkNum(1)), start: mkNum(2), end: mkNum(7)},
				{c: c, prefix: mkNum(1), prefixLen: len(mkNum(1)), start: mkNum(2), end: mkNum(7)},
			}

			i := 1
			So(multiIterate(defs, func(suffix []byte) error {
				So(readNum(suffix), ShouldEqual, vals[i][1])
				i++
				return nil
			}), ShouldBeNil)

			So(i, ShouldEqual, 3)
		})

		Convey("can make empty iteration", func() {
			// get just the (20, *) (doesn't exist)
			defs := []*iterDefinition{
				{c: c, prefix: mkNum(20)},
				{c: c, prefix: mkNum(20)},
			}

			i := 0
			So(multiIterate(defs, func(suffix []byte) error {
				panic("never")
			}), ShouldBeNil)

			So(i, ShouldEqual, 0)
		})

		Convey("can join (other, val, val)", func() {
			// 'other' must start with 20, 'vals' must start with 1
			// no range constraints
			defs := []*iterDefinition{
				{c: c2, prefix: mkNum(20)},
				{c: c, prefix: mkNum(1)},
				{c: c, prefix: mkNum(1)},
			}

			expect := []int64{2, 4}
			i := 0
			So(multiIterate(defs, func(suffix []byte) error {
				So(readNum(suffix), ShouldEqual, expect[i])
				i++
				return nil
			}), ShouldBeNil)
		})

		Convey("Can stop early", func() {
			defs := []*iterDefinition{
				{c: c, prefix: mkNum(1), prefixLen: len(mkNum(1))},
				{c: c, prefix: mkNum(1), prefixLen: len(mkNum(1))},
			}

			i := 0
			So(multiIterate(defs, func(suffix []byte) error {
				So(readNum(suffix), ShouldEqual, vals[i][1])
				i++
				return nil
			}), ShouldBeNil)
			So(i, ShouldEqual, 5)

			i = 0
			So(multiIterate(defs, func(suffix []byte) error {
				So(readNum(suffix), ShouldEqual, vals[i][1])
				i++
				return datastore.Stop
			}), ShouldBeNil)
			So(i, ShouldEqual, 1)
		})

	})

}
Пример #7
0
// generate generates a single iterDefinition for the given index.
func generate(q *reducedQuery, idx *indexDefinitionSortable, c *constraints) *iterDefinition {
	def := &iterDefinition{
		c:     idx.coll,
		start: q.start,
		end:   q.end,
	}
	toJoin := make([][]byte, len(idx.eqFilts))
	for _, sb := range idx.eqFilts {
		val := c.peel(sb.Property)
		if sb.Descending {
			val = serialize.Invert(val)
		}
		toJoin = append(toJoin, val)
	}
	def.prefix = serialize.Join(toJoin...)
	def.prefixLen = len(def.prefix)

	if q.eqFilters["__ancestor__"] != nil && !idx.hasAncestor() {
		// The query requires an ancestor, but the index doesn't explicitly have it
		// as part of the prefix (otherwise it would have been the first eqFilt
		// above). This happens when it's a builtin index, or if it's the primary
		// index (for a kindless query), or if it's the Kind index (for a filterless
		// query).
		//
		// builtin indexes are:
		//   Kind/__key__
		//   Kind/Prop/__key__
		//   Kind/Prop/-__key__
		if len(q.suffixFormat) > 2 || q.suffixFormat[len(q.suffixFormat)-1].Property != "__key__" {
			// This should never happen. One of the previous validators would have
			// selected a different index. But just in case.
			impossible(fmt.Errorf("cannot supply an implicit ancestor for %#v", idx))
		}

		// get the only value out of __ancestor__
		anc, _ := q.eqFilters["__ancestor__"].Peek()

		// Intentionally do NOT update prefixLen. This allows multiIterator to
		// correctly include the entire key in the shared iterator suffix, instead
		// of just the remainder.

		// chop the terminal null byte off the q.ancestor key... we can accept
		// anything which is a descendant or an exact match.  Removing the last byte
		// from the key (the terminating null) allows this trick to work. Otherwise
		// it would be a closed range of EXACTLY this key.
		chopped := []byte(anc[:len(anc)-1])
		if q.suffixFormat[0].Descending {
			chopped = serialize.Invert(chopped)
		}
		def.prefix = serialize.Join(def.prefix, chopped)

		// Update start and end, since we know that if they contain anything, they
		// contain values for the __key__ field. This is necessary because bytes
		// are shifting from the suffix to the prefix, and start/end should only
		// contain suffix (variable) bytes.
		if def.start != nil {
			if !bytes.HasPrefix(def.start, chopped) {
				// again, shouldn't happen, but if it does, we want to know about it.
				impossible(fmt.Errorf(
					"start suffix for implied ancestor doesn't start with ancestor! start:%v ancestor:%v",
					def.start, chopped))
			}
			def.start = def.start[len(chopped):]
		}
		if def.end != nil {
			if !bytes.HasPrefix(def.end, chopped) {
				impossible(fmt.Errorf(
					"end suffix for implied ancestor doesn't start with ancestor! end:%v ancestor:%v",
					def.end, chopped))
			}
			def.end = def.end[len(chopped):]
		}
	}

	return def
}
Пример #8
0
func executeQuery(fq *ds.FinalizedQuery, aid, ns string, isTxn bool, idx, head *memStore, cb ds.RawRunCB) error {
	rq, err := reduce(fq, aid, ns, isTxn)
	if err == ds.ErrNullQuery {
		return nil
	}
	if err != nil {
		return err
	}

	idxs, err := getIndexes(rq, idx)
	if err == ds.ErrNullQuery {
		return nil
	}
	if err != nil {
		return err
	}

	strategy := pickQueryStrategy(fq, rq, cb, head)
	if strategy == nil {
		// e.g. the normalStrategy found that there were NO entities in the current
		// namespace.
		return nil
	}

	offset, _ := fq.Offset()
	limit, hasLimit := fq.Limit()

	cursorPrefix := []byte(nil)
	getCursorFn := func(suffix []byte) func() (ds.Cursor, error) {
		return func() (ds.Cursor, error) {
			if cursorPrefix == nil {
				buf := &bytes.Buffer{}
				_, err := cmpbin.WriteUint(buf, uint64(len(rq.suffixFormat)))
				memoryCorruption(err)

				for _, col := range rq.suffixFormat {
					err := serialize.WriteIndexColumn(buf, col)
					memoryCorruption(err)
				}
				cursorPrefix = buf.Bytes()
			}
			// TODO(riannucci): Do we need to decrement suffix instead of increment
			// if we're sorting by __key__ DESCENDING?
			return queryCursor(serialize.Join(cursorPrefix, increment(suffix))), nil
		}
	}

	return multiIterate(idxs, func(suffix []byte) error {
		if offset > 0 {
			offset--
			return nil
		}
		if hasLimit {
			if limit <= 0 {
				return ds.Stop
			}
			limit--
		}

		rawData, decodedProps := parseSuffix(aid, ns, rq.suffixFormat, suffix, -1)

		keyProp := decodedProps[len(decodedProps)-1]
		if keyProp.Type() != ds.PTKey {
			impossible(fmt.Errorf("decoded index row doesn't end with a Key: %#v", keyProp))
		}

		return strategy.handle(
			rawData, decodedProps, keyProp.Value().(*ds.Key),
			getCursorFn(suffix))
	})
}