// toEncoded returns a list of all of the serialized versions of these keys, // plus a stringset of all the encoded root keys that `keys` represents. func toEncoded(keys []*datastore.Key) (full []string, roots stringset.Set) { roots = stringset.New(len(keys)) full = make([]string, len(keys)) for i, k := range keys { roots.Add(string(serialize.ToBytes(k.Root()))) full[i] = string(serialize.ToBytes(k)) } return }
func TestDatastoreQueries(t *testing.T) { Convey("Datastore Query suport", t, func() { c := Use(context.Background()) ds := dsS.Get(c) So(ds, ShouldNotBeNil) Convey("can create good queries", func() { q := ds.NewQuery("Foo").Filter("farnsworth >", 20).KeysOnly().Limit(10).Offset(39) // normally you can only get cursors from inside of the memory // implementation, so this construction is just for testing. start := queryCursor(bjoin( mkNum(2), serialize.ToBytes(dsS.IndexColumn{Property: "farnsworth"}), serialize.ToBytes(dsS.IndexColumn{Property: "__key__"}), serialize.ToBytes(prop(200)), serialize.ToBytes(prop(ds.NewKey("Foo", "id", 0, nil))))) So(start.String(), ShouldEqual, `gYAAZzFdTeeb3d9zOxsAAF-v221Xy32_AIGHyIgAAUc32-AFabMAAA==`) end := queryCursor(bjoin( mkNum(2), serialize.ToBytes(dsS.IndexColumn{Property: "farnsworth"}), serialize.ToBytes(dsS.IndexColumn{Property: "__key__"}), serialize.ToBytes(prop(3000)), serialize.ToBytes(prop(ds.NewKey("Foo", "zeta", 0, nil))))) q = q.Start(start).End(end) So(q, ShouldNotBeNil) So(q.(*queryImpl).err, ShouldBeNil) rq, err := q.(*queryImpl).reduce("", false) So(rq, ShouldNotBeNil) So(err, ShouldBeNil) }) Convey("ensures orders make sense", func() { q := ds.NewQuery("Cool") q = q.Filter("cat =", 19).Filter("bob =", 10).Order("bob").Order("bob") Convey("removes dups and equality orders", func() { q = q.Order("wat") qi := q.(*queryImpl) So(qi.err, ShouldBeNil) rq, err := qi.reduce("", false) So(err, ShouldBeNil) So(rq.suffixFormat, ShouldResemble, []dsS.IndexColumn{ {Property: "wat"}, {Property: "__key__"}}) }) Convey("if we equality-filter on __key__, that's just silly", func() { q = q.Order("wat").Filter("__key__ =", ds.NewKey("Foo", "wat", 0, nil)) _, err := q.(*queryImpl).reduce("", false) So(err, ShouldErrLike, "query equality filter on __key__ is silly") }) }) }) }
func addIndex(store *memStore, ns string, compIdx []*ds.IndexDefinition) { normalized := make([]*ds.IndexDefinition, len(compIdx)) idxColl := store.SetCollection("idx", nil) for i, idx := range compIdx { normalized[i] = idx.Normalize() idxColl.Set(serialize.ToBytes(*normalized[i].PrepForIdxTable()), []byte{}) } if allEnts := store.GetCollection("ents:" + ns); allEnts != nil { allEnts.VisitItemsAscend(nil, true, func(i *gkvlite.Item) bool { pm, err := rpmWoCtx(i.Val, ns) memoryCorruption(err) prop, err := serialize.ReadProperty(bytes.NewBuffer(i.Key), serialize.WithoutContext, globalAppID, ns) memoryCorruption(err) k := prop.Value().(ds.Key) sip := partiallySerialize(k, pm) mergeIndexes(ns, store, newMemStore(), sip.indexEntries(ns, normalized)) return true }) } }
// walkCompIdxs walks the table of compound indexes in the store. If `endsWith` // is provided, this will only walk over compound indexes which match // Kind, Ancestor, and whose SortBy has `endsWith.SortBy` as a suffix. func walkCompIdxs(store *memStore, endsWith *ds.IndexDefinition, cb func(*ds.IndexDefinition) bool) { idxColl := store.GetCollection("idx") if idxColl == nil { return } itrDef := iterDefinition{c: idxColl} if endsWith != nil { full := serialize.ToBytes(*endsWith.Flip()) // chop off the null terminating byte itrDef.prefix = full[:len(full)-1] } it := itrDef.mkIter() defer it.stop() for !it.stopped { it.next(nil, func(i *gkvlite.Item) { if i == nil { return } qi, err := serialize.ReadIndexDefinition(bytes.NewBuffer(i.Key)) memoryCorruption(err) if !cb(qi.Flip()) { it.stop() } }) } }
func (d *dataStoreData) putMulti(keys []ds.Key, vals []ds.PropertyMap, cb ds.PutMultiCB) { for i, k := range keys { pmap, _ := vals[i].Save(false) dataBytes := serialize.ToBytes(pmap) k, err := func() (ret ds.Key, err error) { d.rwlock.Lock() defer d.rwlock.Unlock() ents, ret := d.entsKeyLocked(k) incrementLocked(ents, groupMetaKey(ret)) old := ents.Get(keyBytes(ret)) oldPM := ds.PropertyMap(nil) if old != nil { if oldPM, err = rpmWoCtx(old, ret.Namespace()); err != nil { return } } updateIndexes(d.head, ret, oldPM, pmap) ents.Set(keyBytes(ret), dataBytes) return }() if cb != nil { cb(k, err) } } }
func incrementLocked(ents *memCollection, key []byte) int64 { ret := curVersion(ents, key) + 1 ents.Set(key, serialize.ToBytes(ds.PropertyMap{ "__version__": {ds.MkPropertyNI(ret)}, })) return ret }
func HashKey(k datastore.Key) string { dgst := sha1.Sum(serialize.ToBytes(k)) buf := bytes.Buffer{} enc := base64.NewEncoder(base64.StdEncoding, &buf) _, _ = enc.Write(dgst[:]) enc.Close() return buf.String()[:buf.Len()-Sha1B64Padding] }
func (q *queryImpl) addEqFilt(prop string, p ds.Property) { binVal := string(serialize.ToBytes(p)) if cur, ok := q.eqFilters[prop]; !ok { q.eqFilters[prop] = stringSet{binVal: {}} } else { cur.add(binVal) } }
// GetBinaryBounds gets the binary encoding of the upper and lower bounds of // the inequality filter on fq, if any is defined. If a bound does not exist, // it is nil. // // NOTE: if fq specifies a descending sort order for the inequality, the bounds // will be inverted, incremented, and flipped. func GetBinaryBounds(fq *ds.FinalizedQuery) (lower, upper []byte) { // Pick up the start/end range from the inequalities, if any. // // start and end in the reducedQuery are normalized so that `start >= // X < end`. Because of that, we need to tweak the inequality filters // contained in the query if they use the > or <= operators. if ineqProp := fq.IneqFilterProp(); ineqProp != "" { _, startOp, startV := fq.IneqFilterLow() if startOp != "" { lower = serialize.ToBytes(startV) if startOp == ">" { lower = increment(lower) } } _, endOp, endV := fq.IneqFilterHigh() if endOp != "" { upper = serialize.ToBytes(endV) if endOp == "<=" { upper = increment(upper) } } // The inequality is specified in natural (ascending) order in the query's // Filter syntax, but the order information may indicate to use a descending // index column for it. If that's the case, then we must invert, swap and // increment the inequality endpoints. // // Invert so that the desired numbers are represented correctly in the index. // Swap so that our iterators still go from >= start to < end. // Increment so that >= and < get correctly bounded (since the iterator is // still using natrual bytes ordering) if fq.Orders()[0].Descending { hi, lo := []byte(nil), []byte(nil) if len(lower) > 0 { lo = increment(serialize.Invert(lower)) } if len(upper) > 0 { hi = increment(serialize.Invert(upper)) } upper, lower = lo, hi } } return }
func partiallySerialize(k ds.Key, pm ds.PropertyMap) (ret serializedIndexablePmap) { ret = make(serializedIndexablePmap, len(pm)+2) if k == nil { impossible(fmt.Errorf("key to partiallySerialize is nil")) } ret["__key__"] = [][]byte{serialize.ToBytes(ds.MkProperty(k))} for k != nil { ret["__ancestor__"] = append(ret["__ancestor__"], serialize.ToBytes(ds.MkProperty(k))) k = k.Parent() } for k, vals := range pm { newVals := serializeRow(vals) if len(newVals) > 0 { ret[k] = newVals } } return }
func incrementLocked(ents *memCollection, key []byte, amt int) int64 { if amt <= 0 { panic(fmt.Errorf("incrementLocked called with bad `amt`: %d", amt)) } ret := curVersion(ents, key) + 1 ents.Set(key, serialize.ToBytes(ds.PropertyMap{ "__version__": {ds.MkPropertyNI(ret + int64(amt-1))}, })) return ret }
func TestCompoundIndexes(t *testing.T) { t.Parallel() idxKey := func(def dsS.IndexDefinition) string { So(def, ShouldNotBeNil) return "idx::" + string(serialize.ToBytes(*def.PrepForIdxTable())) } numItms := func(c *memCollection) uint64 { ret, _ := c.GetTotals() return ret } Convey("Test Compound indexes", t, func() { type Model struct { ID int64 `gae:"$id"` Field1 []string Field2 []int64 } c := Use(context.Background()) ds := dsS.Get(c) t := ds.Testable().(*dsImpl) head := t.data.head So(ds.Put(&Model{1, []string{"hello", "world"}, []int64{10, 11}}), ShouldBeNil) idx := dsS.IndexDefinition{ Kind: "Model", SortBy: []dsS.IndexColumn{ {Property: "Field2"}, }, } coll := head.GetCollection(idxKey(idx)) So(coll, ShouldNotBeNil) So(numItms(coll), ShouldEqual, 2) idx.SortBy[0].Property = "Field1" coll = head.GetCollection(idxKey(idx)) So(coll, ShouldNotBeNil) So(numItms(coll), ShouldEqual, 2) idx.SortBy = append(idx.SortBy, dsS.IndexColumn{Property: "Field1"}) So(head.GetCollection(idxKey(idx)), ShouldBeNil) t.AddIndexes(&idx) coll = head.GetCollection(idxKey(idx)) So(coll, ShouldNotBeNil) So(numItms(coll), ShouldEqual, 4) }) }
func serializeRow(vals []ds.Property) serializedPvals { dups := map[string]struct{}{} ret := make(serializedPvals, 0, len(vals)) for _, v := range vals { if v.IndexSetting() == ds.NoIndex { continue } data := serialize.ToBytes(v.ForIndex()) dataS := string(data) if _, ok := dups[dataS]; ok { continue } dups[dataS] = struct{}{} ret = append(ret, data) } return ret }
func (sip serializedIndexablePmap) indexEntries(ns string, idxs []*ds.IndexDefinition) *memStore { ret := newMemStore() idxColl := ret.SetCollection("idx", nil) mtch := matcher{} for _, idx := range idxs { idx = idx.Normalize() if irg, ok := mtch.match(idx.GetFullSortOrder(), sip); ok { idxBin := serialize.ToBytes(*idx.PrepForIdxTable()) idxColl.Set(idxBin, []byte{}) coll := ret.SetCollection(fmt.Sprintf("idx:%s:%s", ns, idxBin), nil) irg.permute(coll.Set) } } return ret }
func (d *dataStoreData) putMulti(keys []*ds.Key, vals []ds.PropertyMap, cb ds.PutMultiCB) error { ns := keys[0].Namespace() for i, k := range keys { pmap, _ := vals[i].Save(false) dataBytes := serialize.ToBytes(pmap) k, err := func() (ret *ds.Key, err error) { d.Lock() defer d.Unlock() ents := d.mutableEntsLocked(ns) ret, err = d.fixKeyLocked(ents, k) if err != nil { return } if !d.disableSpecialEntities { incrementLocked(ents, groupMetaKey(ret), 1) } old := ents.Get(keyBytes(ret)) oldPM := ds.PropertyMap(nil) if old != nil { if oldPM, err = rpm(old); err != nil { return } } ents.Set(keyBytes(ret), dataBytes) updateIndexes(d.head, ret, oldPM, pmap) return }() if cb != nil { if err := cb(k, err); err != nil { if err == ds.Stop { return nil } return err } } } return nil }
func (i *item) getEncKey() string { if i.encKey == "" { i.encKey = string(serialize.ToBytes(i.key)) } return i.encKey }
// maybeAddDefinition possibly adds a new IndexDefinitionSortable to this slice. // It's only added if it could be useful in servicing q, otherwise this function // is a noop. // // This returns true iff the proposed index is OK and depletes missingTerms to // empty. // // If the proposed index is PERFECT (e.g. contains enough columns to cover all // equality filters, and also has the correct suffix), idxs will be replaced // with JUST that index, and this will return true. func (idxs *IndexDefinitionSortableSlice) maybeAddDefinition(q *reducedQuery, s *memStore, missingTerms stringSet, id *ds.IndexDefinition) bool { // Kindless queries are handled elsewhere. if id.Kind != q.kind { impossible( fmt.Errorf("maybeAddDefinition given index with wrong kind %q v %q", id.Kind, q.kind)) } // If we're an ancestor query, and the index is compound, but doesn't include // an Ancestor field, it doesn't work. Builtin indexes can be used for // ancestor queries (and have !Ancestor), assuming that it's only equality // filters (plus inequality on __key__), or a single inequality. if q.eqFilters["__ancestor__"] != nil && !id.Ancestor && !id.Builtin() { impossible( fmt.Errorf("maybeAddDefinition given compound index with wrong ancestor info: %s %#v", id, q)) } // add __ancestor__ if necessary sortBy := id.GetFullSortOrder() // If the index has fewer fields than we need for the suffix, it can't // possibly help. if len(sortBy) < len(q.suffixFormat) { return false } numEqFilts := len(sortBy) - len(q.suffixFormat) // make sure the orders are precisely the same for i, sb := range sortBy[numEqFilts:] { if q.suffixFormat[i] != sb { return false } } if id.Builtin() && numEqFilts == 0 { if len(q.eqFilters) > 1 || (len(q.eqFilters) == 1 && q.eqFilters["__ancestor__"] == nil) { return false } } // Make sure the equalities section doesn't contain any properties we don't // want in our query. // // numByProp && totalEqFilts will be used to see if this is a perfect match // later. numByProp := make(map[string]int, len(q.eqFilters)) totalEqFilts := 0 eqFilts := sortBy[:numEqFilts] for _, p := range eqFilts { if _, ok := q.eqFilters[p.Property]; !ok { return false } numByProp[p.Property]++ totalEqFilts++ } // ok, we can actually use this // Grab the collection for convenience later. We don't want to invalidate this // index's potential just because the collection doesn't exist. If it's // a builtin and it doesn't exist, it still needs to be one of the 'possible' // indexes... it just means that the user's query will end up with no results. coll := s.GetCollection( fmt.Sprintf("idx:%s:%s", q.ns, serialize.ToBytes(*id.PrepForIdxTable()))) // First, see if it's a perfect match. If it is, then our search is over. // // A perfect match contains ALL the equality filter columns (or more, since // we can use residuals to fill in the extras). toAdd := IndexDefinitionSortable{coll: coll} toAdd.eqFilts = eqFilts for _, sb := range toAdd.eqFilts { missingTerms.rm(sb.Property) } perfect := false if len(sortBy) == q.numCols { perfect = true for k, num := range numByProp { if num < len(q.eqFilters[k]) { perfect = false break } } } if perfect { *idxs = IndexDefinitionSortableSlice{toAdd} } else { *idxs = append(*idxs, toAdd) } return len(missingTerms) == 0 }
{"silly inequality (=> v <=)", nq().Gte("bob", 10).Lte("bob", 10), nil, nil}, {"cursors get smooshed into the inquality range", (nq().Gt("Foo", 3).Lt("Foo", 10). Start(curs("Foo", 2, "__key__", key("Something", 1))). End(curs("Foo", 20, "__key__", key("Something", 20)))), nil, &reducedQuery{ "dev~app", "ns", "Foo", map[string]stringset.Set{}, []dstore.IndexColumn{ {Property: "Foo"}, {Property: "__key__"}, }, increment(serialize.ToBytes(dstore.MkProperty(3))), serialize.ToBytes(dstore.MkProperty(10)), 2, }}, {"cursors could cause the whole query to be useless", (nq().Gt("Foo", 3).Lt("Foo", 10). Start(curs("Foo", 200, "__key__", key("Something", 1))). End(curs("Foo", 1, "__key__", key("Something", 20)))), dstore.ErrNullQuery, nil}, } func TestQueries(t *testing.T) { t.Parallel()
func (q *queryImpl) Filter(fStr string, val interface{}) ds.Query { prop := "" op := qInvalid p := ds.Property{} return q.checkMutateClone( func() error { var err error prop, op, err = parseFilter(fStr) if err != nil { return err } if q.kind == "" && prop != "__key__" { // https://cloud.google.com/appengine/docs/go/datastore/queries#Go_Kindless_queries return fmt.Errorf( "kindless queries can only filter on __key__, got %q", fStr) } err = p.SetValue(val, ds.ShouldIndex) if err != nil { return err } if p.Type() == ds.PTKey { if !p.Value().(ds.Key).Valid(false, globalAppID, q.ns) { return ds.ErrInvalidKey } } if prop == "__key__" { if op == qEqual { return fmt.Errorf( "query equality filter on __key__ is silly: %q", fStr) } if p.Type() != ds.PTKey { return fmt.Errorf("__key__ filter value is not a key: %T", val) } } else if strings.HasPrefix(prop, "__") && strings.HasSuffix(prop, "__") { return fmt.Errorf("filter on reserved property: %q", prop) } if op != qEqual { if q.ineqFilter.prop != "" && q.ineqFilter.prop != prop { return fmt.Errorf( "inequality filters on multiple properties: %q and %q", q.ineqFilter.prop, prop) } if len(q.order) > 0 && q.order[0].Property != prop { return fmt.Errorf( "first sort order must match inequality filter: %q v %q", q.order[0].Property, prop) } } else { for _, p := range q.project { if p == prop { return fmt.Errorf( "cannot project on field which is used in an equality filter: %q", prop) } } } return err }, func(q *queryImpl) { if op == qEqual { // add it to eq filters q.addEqFilt(prop, p) // remove it from sort orders. // https://cloud.google.com/appengine/docs/go/datastore/queries#sort_orders_are_ignored_on_properties_with_equality_filters toRm := -1 for i, o := range q.order { if o.Property == prop { toRm = i break } } if toRm >= 0 { q.order = append(q.order[:toRm], q.order[toRm+1:]...) } } else { q.ineqFilter.prop = prop if q.ineqFilter.constrain(op, serialize.ToBytes(p)) { q.err = errQueryDone } } }) }
func keyBytes(key *ds.Key) []byte { return serialize.ToBytes(ds.MkProperty(key)) }
func TestDSCache(t *testing.T) { t.Parallel() zeroTime, err := time.Parse("2006-01-02T15:04:05.999999999Z", "2006-01-02T15:04:05.999999999Z") if err != nil { panic(err) } Convey("Test dscache", t, func() { c := mathrand.Set(context.Background(), rand.New(rand.NewSource(1))) clk := testclock.New(zeroTime) c = clock.Set(c, clk) c = memory.Use(c) dsUnder := datastore.Get(c) mc := memcache.Get(c) shardsForKey := func(k *datastore.Key) int { last := k.LastTok() if last.Kind == "shardObj" { return int(last.IntID) } if last.Kind == "noCacheObj" { return 0 } return DefaultShards } numMemcacheItems := func() uint64 { stats, err := mc.Stats() So(err, ShouldBeNil) return stats.Items } Convey("enabled cases", func() { c = FilterRDS(c, shardsForKey) ds := datastore.Get(c) So(dsUnder, ShouldNotBeNil) So(ds, ShouldNotBeNil) So(mc, ShouldNotBeNil) Convey("basically works", func() { pm := datastore.PropertyMap{ "BigData": {datastore.MkProperty([]byte(""))}, "Value": {datastore.MkProperty("hi")}, } encoded := append([]byte{0}, serialize.ToBytes(pm)...) o := object{ID: 1, Value: "hi"} So(ds.Put(&o), ShouldBeNil) o = object{ID: 1} So(dsUnder.Get(&o), ShouldBeNil) So(o.Value, ShouldEqual, "hi") itm, err := mc.Get(MakeMemcacheKey(0, ds.KeyForObj(&o))) So(err, ShouldEqual, memcache.ErrCacheMiss) o = object{ID: 1} So(ds.Get(&o), ShouldBeNil) So(o.Value, ShouldEqual, "hi") itm, err = mc.Get(itm.Key()) So(err, ShouldBeNil) So(itm.Value(), ShouldResemble, encoded) Convey("now we don't need the datastore!", func() { o := object{ID: 1} // delete it, bypassing the cache filter. Don't do this in production // unless you want a crappy cache. So(dsUnder.Delete(ds.KeyForObj(&o)), ShouldBeNil) itm, err := mc.Get(MakeMemcacheKey(0, ds.KeyForObj(&o))) So(err, ShouldBeNil) So(itm.Value(), ShouldResemble, encoded) So(ds.Get(&o), ShouldBeNil) So(o.Value, ShouldEqual, "hi") }) Convey("deleting it properly records that fact, however", func() { o := object{ID: 1} So(ds.Delete(ds.KeyForObj(&o)), ShouldBeNil) itm, err := mc.Get(MakeMemcacheKey(0, ds.KeyForObj(&o))) So(err, ShouldEqual, memcache.ErrCacheMiss) So(ds.Get(&o), ShouldEqual, datastore.ErrNoSuchEntity) itm, err = mc.Get(itm.Key()) So(err, ShouldBeNil) So(itm.Value(), ShouldResemble, []byte{}) // this one hits memcache So(ds.Get(&o), ShouldEqual, datastore.ErrNoSuchEntity) }) }) Convey("compression works", func() { o := object{ID: 2, Value: `¯\_(ツ)_/¯`} data := make([]byte, 4000) for i := range data { const alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()" data[i] = alpha[i%len(alpha)] } o.BigData = data So(ds.Put(&o), ShouldBeNil) So(ds.Get(&o), ShouldBeNil) itm, err := mc.Get(MakeMemcacheKey(0, ds.KeyForObj(&o))) So(err, ShouldBeNil) So(itm.Value()[0], ShouldEqual, ZlibCompression) So(len(itm.Value()), ShouldEqual, 653) // a bit smaller than 4k // ensure the next Get comes from the cache So(dsUnder.Delete(ds.KeyForObj(&o)), ShouldBeNil) o = object{ID: 2} So(ds.Get(&o), ShouldBeNil) So(o.Value, ShouldEqual, `¯\_(ツ)_/¯`) So(o.BigData, ShouldResemble, data) }) Convey("transactions", func() { Convey("work", func() { // populate an object @ ID1 So(ds.Put(&object{ID: 1, Value: "something"}), ShouldBeNil) So(ds.Get(&object{ID: 1}), ShouldBeNil) So(ds.Put(&object{ID: 2, Value: "nurbs"}), ShouldBeNil) So(ds.Get(&object{ID: 2}), ShouldBeNil) // memcache now has the wrong value (simulated race) So(dsUnder.Put(&object{ID: 1, Value: "else"}), ShouldBeNil) So(ds.RunInTransaction(func(c context.Context) error { ds := datastore.Get(c) o := &object{ID: 1} So(ds.Get(o), ShouldBeNil) So(o.Value, ShouldEqual, "else") o.Value = "txn" So(ds.Put(o), ShouldBeNil) So(ds.Delete(ds.KeyForObj(&object{ID: 2})), ShouldBeNil) return nil }, &datastore.TransactionOptions{XG: true}), ShouldBeNil) _, err := mc.Get(MakeMemcacheKey(0, ds.KeyForObj(&object{ID: 1}))) So(err, ShouldEqual, memcache.ErrCacheMiss) _, err = mc.Get(MakeMemcacheKey(0, ds.KeyForObj(&object{ID: 2}))) So(err, ShouldEqual, memcache.ErrCacheMiss) o := &object{ID: 1} So(ds.Get(o), ShouldBeNil) So(o.Value, ShouldEqual, "txn") }) Convey("errors don't invalidate", func() { // populate an object @ ID1 So(ds.Put(&object{ID: 1, Value: "something"}), ShouldBeNil) So(ds.Get(&object{ID: 1}), ShouldBeNil) So(numMemcacheItems(), ShouldEqual, 1) So(ds.RunInTransaction(func(c context.Context) error { ds := datastore.Get(c) o := &object{ID: 1} So(ds.Get(o), ShouldBeNil) So(o.Value, ShouldEqual, "something") o.Value = "txn" So(ds.Put(o), ShouldBeNil) return errors.New("OH NOES") }, nil).Error(), ShouldContainSubstring, "OH NOES") // memcache still has the original So(numMemcacheItems(), ShouldEqual, 1) So(dsUnder.Delete(ds.KeyForObj(&object{ID: 1})), ShouldBeNil) o := &object{ID: 1} So(ds.Get(o), ShouldBeNil) So(o.Value, ShouldEqual, "something") }) }) Convey("control", func() { Convey("per-model bypass", func() { type model struct { ID string `gae:"$id"` UseDSCache datastore.Toggle `gae:"$dscache.enable,false"` Value string } itms := []model{ {ID: "hi", Value: "something"}, {ID: "there", Value: "else", UseDSCache: datastore.On}, } So(ds.PutMulti(itms), ShouldBeNil) So(ds.GetMulti(itms), ShouldBeNil) So(numMemcacheItems(), ShouldEqual, 1) }) Convey("per-key shard count", func() { s := &shardObj{ID: 4, Value: "hi"} So(ds.Put(s), ShouldBeNil) So(ds.Get(s), ShouldBeNil) So(numMemcacheItems(), ShouldEqual, 1) for i := 0; i < 20; i++ { So(ds.Get(s), ShouldBeNil) } So(numMemcacheItems(), ShouldEqual, 4) }) Convey("per-key cache disablement", func() { n := &noCacheObj{ID: "nurbs", Value: true} So(ds.Put(n), ShouldBeNil) So(ds.Get(n), ShouldBeNil) So(numMemcacheItems(), ShouldEqual, 0) }) Convey("per-model expiration", func() { type model struct { ID int64 `gae:"$id"` DSCacheExp int64 `gae:"$dscache.expiration,7"` Value string } So(ds.Put(&model{ID: 1, Value: "mooo"}), ShouldBeNil) So(ds.Get(&model{ID: 1}), ShouldBeNil) itm, err := mc.Get(MakeMemcacheKey(0, ds.KeyForObj(&model{ID: 1}))) So(err, ShouldBeNil) clk.Add(10 * time.Second) _, err = mc.Get(itm.Key()) So(err, ShouldEqual, memcache.ErrCacheMiss) }) }) Convey("screw cases", func() { Convey("memcache contains bogus value (simulated failed AddMulti)", func() { o := &object{ID: 1, Value: "spleen"} So(ds.Put(o), ShouldBeNil) sekret := []byte("I am a banana") itm := mc.NewItem(MakeMemcacheKey(0, ds.KeyForObj(o))).SetValue(sekret) So(mc.Set(itm), ShouldBeNil) o = &object{ID: 1} So(ds.Get(o), ShouldBeNil) So(o.Value, ShouldEqual, "spleen") itm, err := mc.Get(itm.Key()) So(err, ShouldBeNil) So(itm.Flags(), ShouldEqual, ItemUKNONWN) So(itm.Value(), ShouldResemble, sekret) }) Convey("memcache contains bogus value (corrupt entry)", func() { o := &object{ID: 1, Value: "spleen"} So(ds.Put(o), ShouldBeNil) sekret := []byte("I am a banana") itm := (mc.NewItem(MakeMemcacheKey(0, ds.KeyForObj(o))). SetValue(sekret). SetFlags(uint32(ItemHasData))) So(mc.Set(itm), ShouldBeNil) o = &object{ID: 1} So(ds.Get(o), ShouldBeNil) So(o.Value, ShouldEqual, "spleen") itm, err := mc.Get(itm.Key()) So(err, ShouldBeNil) So(itm.Flags(), ShouldEqual, ItemHasData) So(itm.Value(), ShouldResemble, sekret) }) Convey("other entity has the lock", func() { o := &object{ID: 1, Value: "spleen"} So(ds.Put(o), ShouldBeNil) sekret := []byte("r@vmarod!#)%9T") itm := (mc.NewItem(MakeMemcacheKey(0, ds.KeyForObj(o))). SetValue(sekret). SetFlags(uint32(ItemHasLock))) So(mc.Set(itm), ShouldBeNil) o = &object{ID: 1} So(ds.Get(o), ShouldBeNil) So(o.Value, ShouldEqual, "spleen") itm, err := mc.Get(itm.Key()) So(err, ShouldBeNil) So(itm.Flags(), ShouldEqual, ItemHasLock) So(itm.Value(), ShouldResemble, sekret) }) Convey("massive entities can't be cached", func() { o := &object{ID: 1, Value: "spleen"} mr := mathrand.Get(c) numRounds := (internalValueSizeLimit / 8) * 2 buf := bytes.Buffer{} for i := 0; i < numRounds; i++ { So(binary.Write(&buf, binary.LittleEndian, mr.Int63()), ShouldBeNil) } o.BigData = buf.Bytes() So(ds.Put(o), ShouldBeNil) o.BigData = nil So(ds.Get(o), ShouldBeNil) itm, err := mc.Get(MakeMemcacheKey(0, ds.KeyForObj(o))) So(err, ShouldBeNil) // Is locked until the next put, forcing all access to the datastore. So(itm.Value(), ShouldResemble, []byte{}) So(itm.Flags(), ShouldEqual, ItemHasLock) o.BigData = []byte("hi :)") So(ds.Put(o), ShouldBeNil) So(ds.Get(o), ShouldBeNil) itm, err = mc.Get(itm.Key()) So(err, ShouldBeNil) So(itm.Flags(), ShouldEqual, ItemHasData) }) Convey("failure on Setting memcache locks is a hard stop", func() { c, fb := featureBreaker.FilterMC(c, nil) fb.BreakFeatures(nil, "SetMulti") ds := datastore.Get(c) So(ds.Put(&object{ID: 1}).Error(), ShouldContainSubstring, "SetMulti") }) Convey("failure on Setting memcache locks in a transaction is a hard stop", func() { c, fb := featureBreaker.FilterMC(c, nil) fb.BreakFeatures(nil, "SetMulti") ds := datastore.Get(c) So(ds.RunInTransaction(func(c context.Context) error { So(datastore.Get(c).Put(&object{ID: 1}), ShouldBeNil) // no problems here... memcache operations happen after the function // body quits. return nil }, nil).Error(), ShouldContainSubstring, "SetMulti") }) }) Convey("misc", func() { Convey("verify numShards caps at MaxShards", func() { sc := supportContext{shardsForKey: shardsForKey} So(sc.numShards(ds.KeyForObj(&shardObj{ID: 9001})), ShouldEqual, MaxShards) }) Convey("CompressionType.String", func() { So(NoCompression.String(), ShouldEqual, "NoCompression") So(ZlibCompression.String(), ShouldEqual, "ZlibCompression") So(CompressionType(100).String(), ShouldEqual, "UNKNOWN_CompressionType(100)") }) }) }) Convey("disabled cases", func() { defer func() { globalEnabled = true }() So(IsGloballyEnabled(c), ShouldBeTrue) So(SetGlobalEnable(c, false), ShouldBeNil) // twice is a nop So(SetGlobalEnable(c, false), ShouldBeNil) // but it takes 5 minutes to kick in So(IsGloballyEnabled(c), ShouldBeTrue) clk.Add(time.Minute*5 + time.Second) So(IsGloballyEnabled(c), ShouldBeFalse) So(mc.Set(mc.NewItem("test").SetValue([]byte("hi"))), ShouldBeNil) So(numMemcacheItems(), ShouldEqual, 1) So(SetGlobalEnable(c, true), ShouldBeNil) // memcache gets flushed as a side effect So(numMemcacheItems(), ShouldEqual, 0) // Still takes 5 minutes to kick in So(IsGloballyEnabled(c), ShouldBeFalse) clk.Add(time.Minute*5 + time.Second) So(IsGloballyEnabled(c), ShouldBeTrue) }) }) }
func reduce(fq *ds.FinalizedQuery, aid, ns string, isTxn bool) (*reducedQuery, error) { if err := fq.Valid(aid, ns); err != nil { return nil, err } if isTxn && fq.Ancestor() == nil { return nil, fmt.Errorf("queries within a transaction must include an Ancestor filter") } if num := numComponents(fq); num > MaxQueryComponents { return nil, fmt.Errorf( "gae/memory: query is too large. may not have more than "+ "%d filters + sort orders + ancestor total: had %d", MaxQueryComponents, num) } ret := &reducedQuery{ aid: aid, ns: ns, kind: fq.Kind(), suffixFormat: fq.Orders(), } eqFilts := fq.EqFilters() ret.eqFilters = make(map[string]stringset.Set, len(eqFilts)) for prop, vals := range eqFilts { sVals := stringset.New(len(vals)) for _, v := range vals { sVals.Add(string(serialize.ToBytes(v))) } ret.eqFilters[prop] = sVals } startD, endD := GetBinaryBounds(fq) // Now we check the start and end cursors. // // Cursors are composed of a list of IndexColumns at the beginning, followed // by the raw bytes to use for the suffix. The cursor is only valid if all of // its IndexColumns match our proposed suffixFormat, as calculated above. // // Cursors are mutually exclusive with the start/end we picked up from the // inequality. In a well formed query, they indicate a subset of results // bounded by the inequality. Technically if the start cursor is not >= the // low bound, or the end cursor is < the high bound, it's an error, but for // simplicity we just cap to the narrowest intersection of the inequality and // cursors. ret.start = startD ret.end = endD if start, end := fq.Bounds(); start != nil || end != nil { if start != nil { if c, ok := start.(queryCursor); ok { startCols, startD, err := c.decode() if err != nil { return nil, err } if !sortOrdersEqual(startCols, ret.suffixFormat) { return nil, errors.New("gae/memory: start cursor is invalid for this query") } if ret.start == nil || bytes.Compare(ret.start, startD) < 0 { ret.start = startD } } else { return nil, errors.New("gae/memory: bad cursor type") } } if end != nil { if c, ok := end.(queryCursor); ok { endCols, endD, err := c.decode() if err != nil { return nil, err } if !sortOrdersEqual(endCols, ret.suffixFormat) { return nil, errors.New("gae/memory: end cursor is invalid for this query") } if ret.end == nil || bytes.Compare(endD, ret.end) < 0 { ret.end = endD } } else { return nil, errors.New("gae/memory: bad cursor type") } } } // Finally, verify that we could even /potentially/ do work. If we have // overlapping range ends, then we don't have anything to do. if ret.end != nil && bytes.Compare(ret.start, ret.end) >= 0 { return nil, ds.ErrNullQuery } ret.numCols = len(ret.suffixFormat) for prop, vals := range ret.eqFilters { if len(ret.suffixFormat) == 1 && prop == "__ancestor__" { continue } ret.numCols += vals.Len() } return ret, nil }