func pickQueryStrategy(fq *ds.FinalizedQuery, rq *reducedQuery, cb ds.RawRunCB, head *memStore) queryStrategy { if fq.KeysOnly() { return &keysOnlyStrategy{cb, stringset.New(0)} } if len(fq.Project()) > 0 { return newProjectionStrategy(fq, rq, cb) } return newNormalStrategy(rq.aid, rq.ns, cb, head) }
func countQuery(fq *ds.FinalizedQuery, aid, ns string, isTxn bool, idx, head *memStore) (ret int64, err error) { if len(fq.Project()) == 0 && !fq.KeysOnly() { fq, err = fq.Original().KeysOnly(true).Finalize() if err != nil { return } } err = executeQuery(fq, aid, ns, isTxn, idx, head, func(_ *ds.Key, _ ds.PropertyMap, _ ds.CursorCB) error { ret++ return nil }) return }
func (d *dsTxnBuf) Count(fq *ds.FinalizedQuery) (count int64, err error) { // Unfortunately there's no fast-path here. We literally have to run the // query and count. Fortunately we can optimize to count keys if it's not // a projection query. This will save on bandwidth a bit. if len(fq.Project()) == 0 && !fq.KeysOnly() { fq, err = fq.Original().KeysOnly(true).Finalize() if err != nil { return } } err = d.Run(fq, func(_ *ds.Key, _ ds.PropertyMap, _ ds.CursorCB) error { count++ return nil }) return }
// adjustQuery applies various mutations to the query to make it suitable for // merging. In general, this removes limits and offsets the 'distinct' modifier, // and it ensures that if there are sort orders which won't appear in the // result data that the query is transformed into a projection query which // contains all of the data. A non-projection query will never be transformed // in this way. func adjustQuery(fq *ds.FinalizedQuery) (*ds.FinalizedQuery, error) { q := fq.Original() // The limit and offset must be done in-memory because otherwise we may // request too few entities from the underlying store if many matching // entities have been deleted in the buffered transaction. q = q.Limit(-1) q = q.Offset(-1) // distinction must be done in-memory, because otherwise there's no way // to merge in the effect of the in-flight changes (because there's no way // to push back to the datastore "yeah, I know you told me that the (1, 2) // result came from `/Bob,1`, but would you mind pretending that it didn't // and tell me next the one instead? q = q.Distinct(false) // since we need to merge results, we must have all order-related fields // in each result. The only time we wouldn't have all the data available would // be for a keys-only or projection query. To fix this, we convert all // Projection and KeysOnly queries to project on /all/ Orders. // // FinalizedQuery already guarantees that all projected fields show up in // the Orders, but the projected fields could be a subset of the orders. // // Additionally on a keys-only query, any orders other than __key__ require // conversion of this query to a projection query including those orders in // order to merge the results correctly. // // In both cases, the resulting objects returned to the higher layers of the // stack will only include the information requested by the user; keys-only // queries will discard all PropertyMap data, and projection queries will // discard any field data that the user didn't ask for. orders := fq.Orders() if len(fq.Project()) > 0 || (fq.KeysOnly() && len(orders) > 1) { q = q.KeysOnly(false) for _, o := range orders { if o.Property == "__key__" { continue } q = q.Project(o.Property) } } return q.Finalize() }
func (d *dsTxnBuf) Run(fq *ds.FinalizedQuery, cb ds.RawRunCB) error { if start, end := fq.Bounds(); start != nil || end != nil { return errors.New("txnBuf filter does not support query cursors") } limit, limitSet := fq.Limit() offset, _ := fq.Offset() keysOnly := fq.KeysOnly() project := fq.Project() bufDS, parentDS, sizes := func() (ds.RawInterface, ds.RawInterface, *sizeTracker) { if !d.haveLock { d.state.Lock() defer d.state.Unlock() } return d.state.bufDS, d.state.parentDS, d.state.entState.dup() }() return runMergedQueries(fq, sizes, bufDS, parentDS, func(key *ds.Key, data ds.PropertyMap) error { if offset > 0 { offset-- return nil } if limitSet { if limit == 0 { return ds.Stop } limit-- } if keysOnly { data = nil } else if len(project) > 0 { newData := make(ds.PropertyMap, len(project)) for _, p := range project { newData[p] = data[p] } data = newData } return cb(key, data, nil) }) }
func (d rdsImpl) fixQuery(fq *ds.FinalizedQuery) (*datastore.Query, error) { ret := datastore.NewQuery(fq.Kind()) start, end := fq.Bounds() if start != nil { ret = ret.Start(start.(datastore.Cursor)) } if end != nil { ret = ret.End(end.(datastore.Cursor)) } for prop, vals := range fq.EqFilters() { if prop == "__ancestor__" { p, err := dsF2RProp(d.aeCtx, vals[0]) if err != nil { return nil, err } ret = ret.Ancestor(p.Value.(*datastore.Key)) } else { filt := prop + "=" for _, v := range vals { p, err := dsF2RProp(d.aeCtx, v) if err != nil { return nil, err } ret = ret.Filter(filt, p.Value) } } } if lnam, lop, lprop := fq.IneqFilterLow(); lnam != "" { p, err := dsF2RProp(d.aeCtx, lprop) if err != nil { return nil, err } ret = ret.Filter(lnam+" "+lop, p.Value) } if hnam, hop, hprop := fq.IneqFilterHigh(); hnam != "" { p, err := dsF2RProp(d.aeCtx, hprop) if err != nil { return nil, err } ret = ret.Filter(hnam+" "+hop, p.Value) } if fq.EventuallyConsistent() { ret = ret.EventualConsistency() } if fq.KeysOnly() { ret = ret.KeysOnly() } if lim, ok := fq.Limit(); ok { ret = ret.Limit(int(lim)) } if off, ok := fq.Offset(); ok { ret = ret.Offset(int(off)) } for _, o := range fq.Orders() { ret = ret.Order(o.String()) } ret = ret.Project(fq.Project()...) if fq.Distinct() { ret = ret.Distinct() } return ret, nil }