Esempio n. 1
0
func newProjectionStrategy(fq *ds.FinalizedQuery, rq *reducedQuery, cb ds.RawRunCB) queryStrategy {
	proj := fq.Project()

	projectionLookups := make([]projectionLookup, len(proj))
	for i, prop := range proj {
		projectionLookups[i].propertyName = prop
		lookupErr := fmt.Errorf("planning a strategy for an unfulfillable query?")
		for j, col := range rq.suffixFormat {
			if col.Property == prop {
				projectionLookups[i].suffixIndex = j
				lookupErr = nil
				break
			}
		}
		impossible(lookupErr)
	}
	ret := &projectionStrategy{cb: cb, project: projectionLookups}
	if fq.Distinct() {
		ret.distinct = stringset.New(0)
	}
	return ret
}
Esempio n. 2
0
func (d rdsImpl) fixQuery(fq *ds.FinalizedQuery) (*datastore.Query, error) {
	ret := datastore.NewQuery(fq.Kind())

	start, end := fq.Bounds()
	if start != nil {
		ret = ret.Start(start.(datastore.Cursor))
	}
	if end != nil {
		ret = ret.End(end.(datastore.Cursor))
	}

	for prop, vals := range fq.EqFilters() {
		if prop == "__ancestor__" {
			p, err := dsF2RProp(d.aeCtx, vals[0])
			if err != nil {
				return nil, err
			}
			ret = ret.Ancestor(p.Value.(*datastore.Key))
		} else {
			filt := prop + "="
			for _, v := range vals {
				p, err := dsF2RProp(d.aeCtx, v)
				if err != nil {
					return nil, err
				}

				ret = ret.Filter(filt, p.Value)
			}
		}
	}

	if lnam, lop, lprop := fq.IneqFilterLow(); lnam != "" {
		p, err := dsF2RProp(d.aeCtx, lprop)
		if err != nil {
			return nil, err
		}
		ret = ret.Filter(lnam+" "+lop, p.Value)
	}

	if hnam, hop, hprop := fq.IneqFilterHigh(); hnam != "" {
		p, err := dsF2RProp(d.aeCtx, hprop)
		if err != nil {
			return nil, err
		}
		ret = ret.Filter(hnam+" "+hop, p.Value)
	}

	if fq.EventuallyConsistent() {
		ret = ret.EventualConsistency()
	}

	if fq.KeysOnly() {
		ret = ret.KeysOnly()
	}

	if lim, ok := fq.Limit(); ok {
		ret = ret.Limit(int(lim))
	}

	if off, ok := fq.Offset(); ok {
		ret = ret.Offset(int(off))
	}

	for _, o := range fq.Orders() {
		ret = ret.Order(o.String())
	}

	ret = ret.Project(fq.Project()...)
	if fq.Distinct() {
		ret = ret.Distinct()
	}

	return ret, nil
}
Esempio n. 3
0
// runMergedQueries executes a user query `fq` against the parent datastore as
// well as the in-memory datastore, calling `cb` with the merged result set.
//
// It's expected that the caller of this function will apply limit and offset
// if the query contains those restrictions. This may convert the query to
// an expanded projection query with more data than the user asked for. It's the
// caller's responsibility to prune away the extra data.
//
// See also `dsTxnBuf.Run()`.
func runMergedQueries(fq *ds.FinalizedQuery, sizes *sizeTracker,
	memDS, parentDS ds.RawInterface, cb func(k *ds.Key, data ds.PropertyMap) error) error {

	toRun, err := adjustQuery(fq)
	if err != nil {
		return err
	}

	cmpLower, cmpUpper := memory.GetBinaryBounds(fq)
	cmpOrder := fq.Orders()
	cmpFn := func(i *item) string {
		return i.getCmpRow(cmpLower, cmpUpper, cmpOrder)
	}

	dedup := stringset.Set(nil)
	distinct := stringset.Set(nil)
	distinctOrder := []ds.IndexColumn(nil)
	if len(fq.Project()) > 0 { // the original query was a projection query
		if fq.Distinct() {
			// it was a distinct projection query, so we need to dedup by distinct
			// options.
			distinct = stringset.New(0)
			proj := fq.Project()
			distinctOrder = make([]ds.IndexColumn, len(proj))
			for i, p := range proj {
				distinctOrder[i].Property = p
			}
		}
	} else {
		// the original was a normal or keys-only query, so we need to dedup by keys.
		dedup = stringset.New(0)
	}

	stopChan := make(chan struct{})

	parIter := queryToIter(stopChan, toRun, parentDS)
	memIter := queryToIter(stopChan, toRun, memDS)

	parItemGet := func() (*item, error) {
		for {
			itm, err := parIter()
			if itm == nil || err != nil {
				return nil, err
			}
			encKey := itm.getEncKey()
			if sizes.has(encKey) || (dedup != nil && dedup.Has(encKey)) {
				continue
			}
			return itm, nil
		}
	}
	memItemGet := func() (*item, error) {
		for {
			itm, err := memIter()
			if itm == nil || err != nil {
				return nil, err
			}
			if dedup != nil && dedup.Has(itm.getEncKey()) {
				continue
			}
			return itm, nil
		}
	}

	defer func() {
		close(stopChan)
		parItemGet()
		memItemGet()
	}()

	pitm, err := parItemGet()
	if err != nil {
		return err
	}

	mitm, err := memItemGet()
	if err != nil {
		return err
	}

	for {
		// the err can be set during the loop below. If we come around the bend and
		// it's set, then we need to return it. We don't check it immediately
		// because it's set after we already have a good result to return to the
		// user.
		if err != nil {
			return err
		}

		usePitm := pitm != nil
		if pitm != nil && mitm != nil {
			usePitm = cmpFn(pitm) < cmpFn(mitm)
		} else if pitm == nil && mitm == nil {
			break
		}

		toUse := (*item)(nil)
		// we check the error at the beginning of the loop.
		if usePitm {
			toUse = pitm
			pitm, err = parItemGet()
		} else {
			toUse = mitm
			mitm, err = memItemGet()
		}

		if dedup != nil {
			if !dedup.Add(toUse.getEncKey()) {
				continue
			}
		}
		if distinct != nil {
			// NOTE: We know that toUse will not be used after this point for
			// comparison purposes, so re-use its cmpRow property for our distinct
			// filter here.
			toUse.cmpRow = ""
			if !distinct.Add(toUse.getCmpRow(nil, nil, distinctOrder)) {
				continue
			}
		}
		if err := cb(toUse.key, toUse.data); err != nil {
			if err == ds.Stop {
				return nil
			}
			return err
		}
	}

	return nil
}