예제 #1
0
func TestSerializationReadMisc(t *testing.T) {
	t.Parallel()

	Convey("Misc Serialization tests", t, func() {
		Convey("GeoPoint", func() {
			buf := mkBuf(nil)
			wf(buf, 10)
			wf(buf, 20)
			So(string(ToBytes(ds.GeoPoint{Lat: 10, Lng: 20})), ShouldEqual, buf.String())
		})

		Convey("IndexColumn", func() {
			buf := mkBuf(nil)
			die(buf.WriteByte(1))
			ws(buf, "hi")
			So(string(ToBytes(ds.IndexColumn{Property: "hi", Descending: true})),
				ShouldEqual, buf.String())
		})

		Convey("KeyTok", func() {
			buf := mkBuf(nil)
			ws(buf, "foo")
			die(buf.WriteByte(byte(ds.PTInt)))
			wi(buf, 20)
			So(string(ToBytes(ds.KeyTok{Kind: "foo", IntID: 20})),
				ShouldEqual, buf.String())
		})

		Convey("Property", func() {
			buf := mkBuf(nil)
			die(buf.WriteByte(0x80 | byte(ds.PTString)))
			ws(buf, "nerp")
			So(string(ToBytes(mp("nerp"))),
				ShouldEqual, buf.String())
		})

		Convey("Time", func() {
			tp := mp(time.Now().UTC())
			So(string(ToBytes(tp.Value())), ShouldEqual, string(ToBytes(tp)[1:]))
		})

		Convey("Zero time", func() {
			buf := mkBuf(nil)
			So(WriteTime(buf, time.Time{}), ShouldBeNil)
			t, err := ReadTime(mkBuf(buf.Bytes()))
			So(err, ShouldBeNil)
			So(t.Equal(time.Time{}), ShouldBeTrue)
		})

		Convey("ReadKey", func() {
			Convey("good cases", func() {
				Convey("w/ ctx decodes normally w/ ctx", func() {
					k := mkKey("aid", "ns", "knd", "yo", "other", 10)
					data := ToBytesWithContext(k)
					dk, err := ReadKey(mkBuf(data), WithContext, "", "")
					So(err, ShouldBeNil)
					So(dk, ShouldEqualKey, k)
				})
				Convey("w/ ctx decodes normally w/o ctx", func() {
					k := mkKey("aid", "ns", "knd", "yo", "other", 10)
					data := ToBytesWithContext(k)
					dk, err := ReadKey(mkBuf(data), WithoutContext, "spam", "nerd")
					So(err, ShouldBeNil)
					So(dk, ShouldEqualKey, mkKey("spam", "nerd", "knd", "yo", "other", 10))
				})
				Convey("w/o ctx decodes normally w/ ctx", func() {
					k := mkKey("aid", "ns", "knd", "yo", "other", 10)
					data := ToBytes(k)
					dk, err := ReadKey(mkBuf(data), WithContext, "spam", "nerd")
					So(err, ShouldBeNil)
					So(dk, ShouldEqualKey, mkKey("", "", "knd", "yo", "other", 10))
				})
				Convey("w/o ctx decodes normally w/o ctx", func() {
					k := mkKey("aid", "ns", "knd", "yo", "other", 10)
					data := ToBytes(k)
					dk, err := ReadKey(mkBuf(data), WithoutContext, "spam", "nerd")
					So(err, ShouldBeNil)
					So(dk, ShouldEqualKey, mkKey("spam", "nerd", "knd", "yo", "other", 10))
				})
				Convey("IntIDs always sort before StringIDs", func() {
					// -1 writes as almost all 1's in the first byte under cmpbin, even
					// though it's technically not a valid key.
					k := mkKey("aid", "ns", "knd", -1)
					data := ToBytes(k)

					k = mkKey("aid", "ns", "knd", "hat")
					data2 := ToBytes(k)

					So(string(data), ShouldBeLessThan, string(data2))
				})
			})

			Convey("err cases", func() {
				buf := mkBuf(nil)
				Convey("nil", func() {
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldEqual, io.EOF)
				})
				Convey("str", func() {
					_, err := buf.WriteString("sup")
					die(err)
					_, err = ReadKey(buf, WithContext, "", "")
					So(err, ShouldErrLike, "expected actualCtx")
				})
				Convey("truncated 1", func() {
					die(buf.WriteByte(1)) // actualCtx == 1
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldEqual, io.EOF)
				})
				Convey("truncated 2", func() {
					die(buf.WriteByte(1)) // actualCtx == 1
					ws(buf, "aid")
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldEqual, io.EOF)
				})
				Convey("truncated 3", func() {
					die(buf.WriteByte(1)) // actualCtx == 1
					ws(buf, "aid")
					ws(buf, "ns")
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldEqual, io.EOF)
				})
				Convey("huge key", func() {
					die(buf.WriteByte(1)) // actualCtx == 1
					ws(buf, "aid")
					ws(buf, "ns")
					for i := 1; i < 60; i++ {
						die(buf.WriteByte(1))
						die(WriteKeyTok(buf, ds.KeyTok{Kind: "sup", IntID: int64(i)}))
					}
					die(buf.WriteByte(0))
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldErrLike, "huge key")
				})
				Convey("insufficient tokens", func() {
					die(buf.WriteByte(1)) // actualCtx == 1
					ws(buf, "aid")
					ws(buf, "ns")
					wui(buf, 2)
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldEqual, io.EOF)
				})
				Convey("partial token 1", func() {
					die(buf.WriteByte(1)) // actualCtx == 1
					ws(buf, "aid")
					ws(buf, "ns")
					die(buf.WriteByte(1))
					ws(buf, "hi")
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldEqual, io.EOF)
				})
				Convey("partial token 2", func() {
					die(buf.WriteByte(1)) // actualCtx == 1
					ws(buf, "aid")
					ws(buf, "ns")
					die(buf.WriteByte(1))
					ws(buf, "hi")
					die(buf.WriteByte(byte(ds.PTString)))
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldEqual, io.EOF)
				})
				Convey("bad token (invalid type)", func() {
					die(buf.WriteByte(1)) // actualCtx == 1
					ws(buf, "aid")
					ws(buf, "ns")
					die(buf.WriteByte(1))
					ws(buf, "hi")
					die(buf.WriteByte(byte(ds.PTBlobKey)))
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldErrLike, "invalid type PTBlobKey")
				})
				Convey("bad token (invalid IntID)", func() {
					die(buf.WriteByte(1)) // actualCtx == 1
					ws(buf, "aid")
					ws(buf, "ns")
					die(buf.WriteByte(1))
					ws(buf, "hi")
					die(buf.WriteByte(byte(ds.PTInt)))
					wi(buf, -2)
					_, err := ReadKey(buf, WithContext, "", "")
					So(err, ShouldErrLike, "zero/negative")
				})
			})
		})

		Convey("ReadGeoPoint", func() {
			buf := mkBuf(nil)
			Convey("trunc 1", func() {
				_, err := ReadGeoPoint(buf)
				So(err, ShouldEqual, io.EOF)
			})
			Convey("trunc 2", func() {
				wf(buf, 100)
				_, err := ReadGeoPoint(buf)
				So(err, ShouldEqual, io.EOF)
			})
			Convey("invalid", func() {
				wf(buf, 100)
				wf(buf, 1000)
				_, err := ReadGeoPoint(buf)
				So(err, ShouldErrLike, "invalid GeoPoint")
			})
		})

		Convey("WriteTime", func() {
			Convey("in non-UTC!", func() {
				pst, err := time.LoadLocation("America/Los_Angeles")
				So(err, ShouldBeNil)
				So(func() {
					die(WriteTime(mkBuf(nil), time.Now().In(pst)))
				}, ShouldPanic)
			})
		})

		Convey("ReadTime", func() {
			Convey("trunc 1", func() {
				_, err := ReadTime(mkBuf(nil))
				So(err, ShouldEqual, io.EOF)
			})
		})

		Convey("ReadProperty", func() {
			buf := mkBuf(nil)
			Convey("trunc 1", func() {
				p, err := ReadProperty(buf, WithContext, "", "")
				So(err, ShouldEqual, io.EOF)
				So(p.Type(), ShouldEqual, ds.PTNull)
				So(p.Value(), ShouldBeNil)
			})
			Convey("trunc (PTBytes)", func() {
				die(buf.WriteByte(byte(ds.PTBytes)))
				_, err := ReadProperty(buf, WithContext, "", "")
				So(err, ShouldEqual, io.EOF)
			})
			Convey("trunc (PTBlobKey)", func() {
				die(buf.WriteByte(byte(ds.PTBlobKey)))
				_, err := ReadProperty(buf, WithContext, "", "")
				So(err, ShouldEqual, io.EOF)
			})
			Convey("invalid type", func() {
				die(buf.WriteByte(byte(ds.PTUnknown + 1)))
				_, err := ReadProperty(buf, WithContext, "", "")
				So(err, ShouldErrLike, "unknown type!")
			})
		})

		Convey("ReadPropertyMap", func() {
			buf := mkBuf(nil)
			Convey("trunc 1", func() {
				_, err := ReadPropertyMap(buf, WithContext, "", "")
				So(err, ShouldEqual, io.EOF)
			})
			Convey("too many rows", func() {
				wui(buf, 1000000)
				_, err := ReadPropertyMap(buf, WithContext, "", "")
				So(err, ShouldErrLike, "huge number of rows")
			})
			Convey("trunc 2", func() {
				wui(buf, 10)
				_, err := ReadPropertyMap(buf, WithContext, "", "")
				So(err, ShouldEqual, io.EOF)
			})
			Convey("trunc 3", func() {
				wui(buf, 10)
				ws(buf, "ohai")
				_, err := ReadPropertyMap(buf, WithContext, "", "")
				So(err, ShouldEqual, io.EOF)
			})
			Convey("too many values", func() {
				wui(buf, 10)
				ws(buf, "ohai")
				wui(buf, 100000)
				_, err := ReadPropertyMap(buf, WithContext, "", "")
				So(err, ShouldErrLike, "huge number of properties")
			})
			Convey("trunc 4", func() {
				wui(buf, 10)
				ws(buf, "ohai")
				wui(buf, 10)
				_, err := ReadPropertyMap(buf, WithContext, "", "")
				So(err, ShouldEqual, io.EOF)
			})
		})

		Convey("IndexDefinition", func() {
			id := ds.IndexDefinition{Kind: "kind"}
			data := ToBytes(*id.PrepForIdxTable())
			newID, err := ReadIndexDefinition(mkBuf(data))
			So(err, ShouldBeNil)
			So(newID.Flip(), ShouldResemble, id.Normalize())

			id.SortBy = append(id.SortBy, ds.IndexColumn{Property: "prop"})
			data = ToBytes(*id.PrepForIdxTable())
			newID, err = ReadIndexDefinition(mkBuf(data))
			So(err, ShouldBeNil)
			So(newID.Flip(), ShouldResemble, id.Normalize())

			id.SortBy = append(id.SortBy, ds.IndexColumn{Property: "other", Descending: true})
			id.Ancestor = true
			data = ToBytes(*id.PrepForIdxTable())
			newID, err = ReadIndexDefinition(mkBuf(data))
			So(err, ShouldBeNil)
			So(newID.Flip(), ShouldResemble, id.Normalize())

			// invalid
			id.SortBy = append(id.SortBy, ds.IndexColumn{Property: "", Descending: true})
			data = ToBytes(*id.PrepForIdxTable())
			newID, err = ReadIndexDefinition(mkBuf(data))
			So(err, ShouldBeNil)
			So(newID.Flip(), ShouldResemble, id.Normalize())

			Convey("too many", func() {
				id := ds.IndexDefinition{Kind: "wat"}
				for i := 0; i < MaxIndexColumns+1; i++ {
					id.SortBy = append(id.SortBy, ds.IndexColumn{Property: "Hi", Descending: true})
				}
				data := ToBytes(*id.PrepForIdxTable())
				newID, err = ReadIndexDefinition(mkBuf(data))
				So(err, ShouldErrLike, "over 64 sort orders")
			})
		})
	})
}
예제 #2
0
// maybeAddDefinition possibly adds a new indexDefinitionSortable to this slice.
// It's only added if it could be useful in servicing q, otherwise this function
// is a noop.
//
// This returns true iff the proposed index is OK and depletes missingTerms to
// empty.
//
// If the proposed index is PERFECT (e.g. contains enough columns to cover all
// equality filters, and also has the correct suffix), idxs will be replaced
// with JUST that index, and this will return true.
func (idxs *indexDefinitionSortableSlice) maybeAddDefinition(q *reducedQuery, s *memStore, missingTerms stringset.Set, id *ds.IndexDefinition) bool {
	// Kindless queries are handled elsewhere.
	if id.Kind != q.kind {
		impossible(
			fmt.Errorf("maybeAddDefinition given index with wrong kind %q v %q", id.Kind, q.kind))
	}

	// If we're an ancestor query, and the index is compound, but doesn't include
	// an Ancestor field, it doesn't work. Builtin indexes can be used for
	// ancestor queries (and have !Ancestor), assuming that it's only equality
	// filters (plus inequality on __key__), or a single inequality.
	if q.eqFilters["__ancestor__"] != nil && !id.Ancestor && !id.Builtin() {
		impossible(
			fmt.Errorf("maybeAddDefinition given compound index with wrong ancestor info: %s %#v", id, q))
	}

	// add __ancestor__ if necessary
	sortBy := id.GetFullSortOrder()

	// If the index has fewer fields than we need for the suffix, it can't
	// possibly help.
	if len(sortBy) < len(q.suffixFormat) {
		return false
	}

	numEqFilts := len(sortBy) - len(q.suffixFormat)
	// make sure the orders are precisely the same
	for i, sb := range sortBy[numEqFilts:] {
		if q.suffixFormat[i] != sb {
			return false
		}
	}

	if id.Builtin() && numEqFilts == 0 {
		if len(q.eqFilters) > 1 || (len(q.eqFilters) == 1 && q.eqFilters["__ancestor__"] == nil) {
			return false
		}
		if len(sortBy) > 1 && q.eqFilters["__ancestor__"] != nil {
			return false
		}
	}

	// Make sure the equalities section doesn't contain any properties we don't
	// want in our query.
	//
	// numByProp && totalEqFilts will be used to see if this is a perfect match
	// later.
	numByProp := make(map[string]int, len(q.eqFilters))
	totalEqFilts := 0

	eqFilts := sortBy[:numEqFilts]
	for _, p := range eqFilts {
		if _, ok := q.eqFilters[p.Property]; !ok {
			return false
		}
		numByProp[p.Property]++
		totalEqFilts++
	}

	// ok, we can actually use this

	// Grab the collection for convenience later. We don't want to invalidate this
	// index's potential just because the collection doesn't exist. If it's
	// a builtin and it doesn't exist, it still needs to be one of the 'possible'
	// indexes... it just means that the user's query will end up with no results.
	coll := s.GetCollection(
		fmt.Sprintf("idx:%s:%s", q.ns, serialize.ToBytes(*id.PrepForIdxTable())))

	// First, see if it's a perfect match. If it is, then our search is over.
	//
	// A perfect match contains ALL the equality filter columns (or more, since
	// we can use residuals to fill in the extras).
	toAdd := indexDefinitionSortable{coll: coll}
	toAdd.eqFilts = eqFilts
	for _, sb := range toAdd.eqFilts {
		missingTerms.Del(sb.Property)
	}

	perfect := false
	if len(sortBy) == q.numCols {
		perfect = true
		for k, num := range numByProp {
			if num < q.eqFilters[k].Len() {
				perfect = false
				break
			}
		}
	}
	if perfect {
		*idxs = indexDefinitionSortableSlice{toAdd}
	} else {
		*idxs = append(*idxs, toAdd)
	}
	return missingTerms.Len() == 0
}