예제 #1
0
// walkCompIdxs walks the table of compound indexes in the store. If `endsWith`
// is provided, this will only walk over compound indexes which match
// Kind, Ancestor, and whose SortBy has `endsWith.SortBy` as a suffix.
func walkCompIdxs(store *memStore, endsWith *ds.IndexDefinition, cb func(*ds.IndexDefinition) bool) {
	idxColl := store.GetCollection("idx")
	if idxColl == nil {
		return
	}
	itrDef := iterDefinition{c: idxColl}

	if endsWith != nil {
		full := serialize.ToBytes(*endsWith.Flip())
		// chop off the null terminating byte
		itrDef.prefix = full[:len(full)-1]
	}

	it := itrDef.mkIter()
	defer it.stop()
	for !it.stopped {
		it.next(nil, func(i *gkvlite.Item) {
			if i == nil {
				return
			}
			qi, err := serialize.ReadIndexDefinition(bytes.NewBuffer(i.Key))
			memoryCorruption(err)
			if !cb(qi.Flip()) {
				it.stop()
			}
		})
	}
}
예제 #2
0
func TestCompoundIndexes(t *testing.T) {
	t.Parallel()

	idxKey := func(def dsS.IndexDefinition) string {
		So(def, ShouldNotBeNil)
		return "idx::" + string(serialize.ToBytes(*def.PrepForIdxTable()))
	}

	numItms := func(c *memCollection) uint64 {
		ret, _ := c.GetTotals()
		return ret
	}

	Convey("Test Compound indexes", t, func() {
		type Model struct {
			ID int64 `gae:"$id"`

			Field1 []string
			Field2 []int64
		}

		c := Use(context.Background())
		ds := dsS.Get(c)
		t := ds.Testable().(*dsImpl)
		head := t.data.head

		So(ds.Put(&Model{1, []string{"hello", "world"}, []int64{10, 11}}), ShouldBeNil)

		idx := dsS.IndexDefinition{
			Kind: "Model",
			SortBy: []dsS.IndexColumn{
				{Property: "Field2"},
			},
		}

		coll := head.GetCollection(idxKey(idx))
		So(coll, ShouldNotBeNil)
		So(numItms(coll), ShouldEqual, 2)

		idx.SortBy[0].Property = "Field1"
		coll = head.GetCollection(idxKey(idx))
		So(coll, ShouldNotBeNil)
		So(numItms(coll), ShouldEqual, 2)

		idx.SortBy = append(idx.SortBy, dsS.IndexColumn{Property: "Field1"})
		So(head.GetCollection(idxKey(idx)), ShouldBeNil)

		t.AddIndexes(&idx)
		coll = head.GetCollection(idxKey(idx))
		So(coll, ShouldNotBeNil)
		So(numItms(coll), ShouldEqual, 4)
	})
}
예제 #3
0
// maybeAddDefinition possibly adds a new IndexDefinitionSortable to this slice.
// It's only added if it could be useful in servicing q, otherwise this function
// is a noop.
//
// This returns true iff the proposed index is OK and depletes missingTerms to
// empty.
//
// If the proposed index is PERFECT (e.g. contains enough columns to cover all
// equality filters, and also has the correct suffix), idxs will be replaced
// with JUST that index, and this will return true.
func (idxs *IndexDefinitionSortableSlice) maybeAddDefinition(q *reducedQuery, s *memStore, missingTerms stringSet, id *ds.IndexDefinition) bool {
	// Kindless queries are handled elsewhere.
	if id.Kind != q.kind {
		impossible(
			fmt.Errorf("maybeAddDefinition given index with wrong kind %q v %q", id.Kind, q.kind))
	}

	// If we're an ancestor query, and the index is compound, but doesn't include
	// an Ancestor field, it doesn't work. Builtin indexes can be used for
	// ancestor queries (and have !Ancestor), assuming that it's only equality
	// filters (plus inequality on __key__), or a single inequality.
	if q.eqFilters["__ancestor__"] != nil && !id.Ancestor && !id.Builtin() {
		impossible(
			fmt.Errorf("maybeAddDefinition given compound index with wrong ancestor info: %s %#v", id, q))
	}

	// add __ancestor__ if necessary
	sortBy := id.GetFullSortOrder()

	// If the index has fewer fields than we need for the suffix, it can't
	// possibly help.
	if len(sortBy) < len(q.suffixFormat) {
		return false
	}

	numEqFilts := len(sortBy) - len(q.suffixFormat)
	// make sure the orders are precisely the same
	for i, sb := range sortBy[numEqFilts:] {
		if q.suffixFormat[i] != sb {
			return false
		}
	}

	if id.Builtin() && numEqFilts == 0 {
		if len(q.eqFilters) > 1 || (len(q.eqFilters) == 1 && q.eqFilters["__ancestor__"] == nil) {
			return false
		}
	}

	// Make sure the equalities section doesn't contain any properties we don't
	// want in our query.
	//
	// numByProp && totalEqFilts will be used to see if this is a perfect match
	// later.
	numByProp := make(map[string]int, len(q.eqFilters))
	totalEqFilts := 0

	eqFilts := sortBy[:numEqFilts]
	for _, p := range eqFilts {
		if _, ok := q.eqFilters[p.Property]; !ok {
			return false
		}
		numByProp[p.Property]++
		totalEqFilts++
	}

	// ok, we can actually use this

	// Grab the collection for convenience later. We don't want to invalidate this
	// index's potential just because the collection doesn't exist. If it's
	// a builtin and it doesn't exist, it still needs to be one of the 'possible'
	// indexes... it just means that the user's query will end up with no results.
	coll := s.GetCollection(
		fmt.Sprintf("idx:%s:%s", q.ns, serialize.ToBytes(*id.PrepForIdxTable())))

	// First, see if it's a perfect match. If it is, then our search is over.
	//
	// A perfect match contains ALL the equality filter columns (or more, since
	// we can use residuals to fill in the extras).
	toAdd := IndexDefinitionSortable{coll: coll}
	toAdd.eqFilts = eqFilts
	for _, sb := range toAdd.eqFilts {
		missingTerms.rm(sb.Property)
	}

	perfect := false
	if len(sortBy) == q.numCols {
		perfect = true
		for k, num := range numByProp {
			if num < len(q.eqFilters[k]) {
				perfect = false
				break
			}
		}
	}
	if perfect {
		*idxs = IndexDefinitionSortableSlice{toAdd}
	} else {
		*idxs = append(*idxs, toAdd)
	}
	return len(missingTerms) == 0
}
예제 #4
0
func TestSerializationReadMisc(t *testing.T) {
	t.Parallel()

	Convey("Misc Serialization tests", t, func() {
		Convey("GeoPoint", func() {
			buf := mkBuf(nil)
			cmpbin.WriteFloat64(buf, 10)
			cmpbin.WriteFloat64(buf, 20)
			So(string(ToBytes(ds.GeoPoint{Lat: 10, Lng: 20})), ShouldEqual, buf.String())
		})

		Convey("IndexColumn", func() {
			buf := mkBuf(nil)
			buf.WriteByte(1)
			cmpbin.WriteString(buf, "hi")
			So(string(ToBytes(ds.IndexColumn{Property: "hi", Direction: ds.DESCENDING})),
				ShouldEqual, buf.String())
		})

		Convey("KeyTok", func() {
			buf := mkBuf(nil)
			cmpbin.WriteString(buf, "foo")
			buf.WriteByte(byte(ds.PTInt))
			cmpbin.WriteInt(buf, 20)
			So(string(ToBytes(ds.KeyTok{Kind: "foo", IntID: 20})),
				ShouldEqual, buf.String())
		})

		Convey("Property", func() {
			buf := mkBuf(nil)
			buf.WriteByte(0x80 | byte(ds.PTString))
			cmpbin.WriteString(buf, "nerp")
			So(string(ToBytes(mp("nerp"))),
				ShouldEqual, buf.String())
		})

		Convey("Time", func() {
			tp := mp(time.Now().UTC())
			So(string(ToBytes(tp.Value())), ShouldEqual, string(ToBytes(tp)[1:]))
		})

		Convey("Bad ToBytes", func() {
			So(func() { ToBytes(100.7) }, ShouldPanic)
			So(func() { ToBytesWithContext(100.7) }, ShouldPanic)
		})

		Convey("ReadKey", func() {
			Convey("good cases", func() {
				Convey("w/ ctx decodes normally w/ ctx", func() {
					k := mkKey("aid", "ns", "knd", "yo", "other", 10)
					data := ToBytesWithContext(k)
					dk, err := ReadKey(mkBuf(data), WithContext, "", "")
					So(err, ShouldBeNil)
					So(dk, ShouldEqualKey, k)
				})
				Convey("w/ ctx decodes normally w/o ctx", func() {
					k := mkKey("aid", "ns", "knd", "yo", "other", 10)
					data := ToBytesWithContext(k)
					dk, err := ReadKey(mkBuf(data), WithoutContext, "spam", "nerd")
					So(err, ShouldBeNil)
					So(dk, ShouldEqualKey, mkKey("spam", "nerd", "knd", "yo", "other", 10))
				})
				Convey("w/o ctx decodes normally w/ ctx", func() {
					k := mkKey("aid", "ns", "knd", "yo", "other", 10)
					data := ToBytes(k)
					dk, err := ReadKey(mkBuf(data), WithContext, "spam", "nerd")
					So(err, ShouldBeNil)
					So(dk, ShouldEqualKey, mkKey("", "", "knd", "yo", "other", 10))
				})
				Convey("w/o ctx decodes normally w/o ctx", func() {
					k := mkKey("aid", "ns", "knd", "yo", "other", 10)
					data := ToBytes(k)
					dk, err := ReadKey(mkBuf(data), WithoutContext, "spam", "nerd")
					So(err, ShouldBeNil)
					So(dk, ShouldEqualKey, mkKey("spam", "nerd", "knd", "yo", "other", 10))
				})
				Convey("IntIDs always sort before StringIDs", func() {
					// -1 writes as almost all 1's in the first byte under cmpbin, even
					// though it's technically not a valid key.
					k := mkKey("aid", "ns", "knd", -1)
					data := ToBytes(k)

					k = mkKey("aid", "ns", "knd", "hat")
					data2 := ToBytes(k)

					So(string(data), ShouldBeLessThan, string(data2))
				})
			})

			Convey("err cases", func() {
				buf := mkBuf(nil)
				Convey("nil", func() {
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldEqual, io.EOF)
				})
				Convey("str", func() {
					buf.WriteString("sup")
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldErrLike, "expected actualCtx")
				})
				Convey("truncated 1", func() {
					buf.WriteByte(1) // actualCtx == 1
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldEqual, io.EOF)
				})
				Convey("truncated 2", func() {
					buf.WriteByte(1) // actualCtx == 1
					cmpbin.WriteString(buf, "aid")
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldEqual, io.EOF)
				})
				Convey("truncated 3", func() {
					buf.WriteByte(1) // actualCtx == 1
					cmpbin.WriteString(buf, "aid")
					cmpbin.WriteString(buf, "ns")
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldEqual, io.EOF)
				})
				Convey("huge key", func() {
					buf.WriteByte(1) // actualCtx == 1
					cmpbin.WriteString(buf, "aid")
					cmpbin.WriteString(buf, "ns")
					for i := 1; i < 60; i++ {
						buf.WriteByte(1)
						WriteKeyTok(buf, ds.KeyTok{Kind: "sup", IntID: int64(i)})
					}
					buf.WriteByte(0)
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldErrLike, "huge key")
				})
				Convey("insufficient tokens", func() {
					buf.WriteByte(1) // actualCtx == 1
					cmpbin.WriteString(buf, "aid")
					cmpbin.WriteString(buf, "ns")
					cmpbin.WriteUint(buf, 2)
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldEqual, io.EOF)
				})
				Convey("partial token 1", func() {
					buf.WriteByte(1) // actualCtx == 1
					cmpbin.WriteString(buf, "aid")
					cmpbin.WriteString(buf, "ns")
					buf.WriteByte(1)
					cmpbin.WriteString(buf, "hi")
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldEqual, io.EOF)
				})
				Convey("partial token 2", func() {
					buf.WriteByte(1) // actualCtx == 1
					cmpbin.WriteString(buf, "aid")
					cmpbin.WriteString(buf, "ns")
					buf.WriteByte(1)
					cmpbin.WriteString(buf, "hi")
					buf.WriteByte(byte(ds.PTString))
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldEqual, io.EOF)
				})
				Convey("bad token (invalid type)", func() {
					buf.WriteByte(1) // actualCtx == 1
					cmpbin.WriteString(buf, "aid")
					cmpbin.WriteString(buf, "ns")
					buf.WriteByte(1)
					cmpbin.WriteString(buf, "hi")
					buf.WriteByte(byte(ds.PTBlobKey))
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldErrLike, "invalid type PTBlobKey")
				})
				Convey("bad token (invalid IntID)", func() {
					buf.WriteByte(1) // actualCtx == 1
					cmpbin.WriteString(buf, "aid")
					cmpbin.WriteString(buf, "ns")
					buf.WriteByte(1)
					cmpbin.WriteString(buf, "hi")
					buf.WriteByte(byte(ds.PTInt))
					cmpbin.WriteInt(buf, -2)
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldErrLike, "zero/negative")
				})
			})
		})

		Convey("ReadGeoPoint", func() {
			buf := mkBuf(nil)
			Convey("trunc 1", func() {
				_, err := ReadGeoPoint(buf)
				So(err, ShouldEqual, io.EOF)
			})
			Convey("trunc 2", func() {
				cmpbin.WriteFloat64(buf, 100)
				_, err := ReadGeoPoint(buf)
				So(err, ShouldEqual, io.EOF)
			})
			Convey("invalid", func() {
				cmpbin.WriteFloat64(buf, 100)
				cmpbin.WriteFloat64(buf, 1000)
				_, err := ReadGeoPoint(buf)
				So(err, ShouldErrLike, "invalid GeoPoint")
			})
		})

		Convey("WriteTime", func() {
			Convey("in non-UTC!", func() {
				pst, err := time.LoadLocation("America/Los_Angeles")
				So(err, ShouldBeNil)
				So(func() {
					WriteTime(mkBuf(nil), time.Now().In(pst))
				}, ShouldPanic)
			})
		})

		Convey("ReadTime", func() {
			Convey("trunc 1", func() {
				_, err := ReadTime(mkBuf(nil))
				So(err, ShouldEqual, io.EOF)
			})
		})

		Convey("ReadProperty", func() {
			buf := mkBuf(nil)
			Convey("trunc 1", func() {
				p, err := ReadProperty(buf, WithContext, "", "")
				So(err, ShouldEqual, io.EOF)
				So(p.Type(), ShouldEqual, ds.PTNull)
				So(p.Value(), ShouldBeNil)
			})
			Convey("trunc (PTBytes)", func() {
				buf.WriteByte(byte(ds.PTBytes))
				_, err := ReadProperty(buf, WithContext, "", "")
				So(err, ShouldEqual, io.EOF)
			})
			Convey("trunc (PTBlobKey)", func() {
				buf.WriteByte(byte(ds.PTBlobKey))
				_, err := ReadProperty(buf, WithContext, "", "")
				So(err, ShouldEqual, io.EOF)
			})
			Convey("invalid type", func() {
				buf.WriteByte(byte(ds.PTUnknown + 1))
				_, err := ReadProperty(buf, WithContext, "", "")
				So(err, ShouldErrLike, "unknown type!")
			})
		})

		Convey("ReadPropertyMap", func() {
			buf := mkBuf(nil)
			Convey("trunc 1", func() {
				_, err := ReadPropertyMap(buf, WithContext, "", "")
				So(err, ShouldEqual, io.EOF)
			})
			Convey("too many rows", func() {
				cmpbin.WriteUint(buf, 1000000)
				_, err := ReadPropertyMap(buf, WithContext, "", "")
				So(err, ShouldErrLike, "huge number of rows")
			})
			Convey("trunc 2", func() {
				cmpbin.WriteUint(buf, 10)
				_, err := ReadPropertyMap(buf, WithContext, "", "")
				So(err, ShouldEqual, io.EOF)
			})
			Convey("trunc 3", func() {
				cmpbin.WriteUint(buf, 10)
				cmpbin.WriteString(buf, "ohai")
				_, err := ReadPropertyMap(buf, WithContext, "", "")
				So(err, ShouldEqual, io.EOF)
			})
			Convey("too many values", func() {
				cmpbin.WriteUint(buf, 10)
				cmpbin.WriteString(buf, "ohai")
				cmpbin.WriteUint(buf, 100000)
				_, err := ReadPropertyMap(buf, WithContext, "", "")
				So(err, ShouldErrLike, "huge number of properties")
			})
			Convey("trunc 4", func() {
				cmpbin.WriteUint(buf, 10)
				cmpbin.WriteString(buf, "ohai")
				cmpbin.WriteUint(buf, 10)
				_, err := ReadPropertyMap(buf, WithContext, "", "")
				So(err, ShouldEqual, io.EOF)
			})
		})

		Convey("IndexDefinition", func() {
			id := ds.IndexDefinition{Kind: "kind"}
			data := ToBytes(*id.PrepForIdxTable())
			newID, err := ReadIndexDefinition(mkBuf(data))
			So(err, ShouldBeNil)
			So(newID.Flip(), ShouldResemble, id.Normalize())

			id.SortBy = append(id.SortBy, ds.IndexColumn{Property: "prop"})
			data = ToBytes(*id.PrepForIdxTable())
			newID, err = ReadIndexDefinition(mkBuf(data))
			So(err, ShouldBeNil)
			So(newID.Flip(), ShouldResemble, id.Normalize())

			id.SortBy = append(id.SortBy, ds.IndexColumn{Property: "other", Direction: ds.DESCENDING})
			id.Ancestor = true
			data = ToBytes(*id.PrepForIdxTable())
			newID, err = ReadIndexDefinition(mkBuf(data))
			So(err, ShouldBeNil)
			So(newID.Flip(), ShouldResemble, id.Normalize())

			// invalid
			id.SortBy = append(id.SortBy, ds.IndexColumn{Property: "", Direction: ds.DESCENDING})
			data = ToBytes(*id.PrepForIdxTable())
			newID, err = ReadIndexDefinition(mkBuf(data))
			So(err, ShouldBeNil)
			So(newID.Flip(), ShouldResemble, id.Normalize())

			Convey("too many", func() {
				id := ds.IndexDefinition{Kind: "wat"}
				for i := 0; i < MaxIndexColumns+1; i++ {
					id.SortBy = append(id.SortBy, ds.IndexColumn{Property: "Hi", Direction: ds.ASCENDING})
				}
				data := ToBytes(*id.PrepForIdxTable())
				newID, err = ReadIndexDefinition(mkBuf(data))
				So(err, ShouldErrLike, "over 64 sort orders")
			})
		})
	})
}