func parseSuffix(aid, ns string, suffixFormat []ds.IndexColumn, suffix []byte, count int) (raw [][]byte, decoded []ds.Property) { buf := serialize.Invertible(bytes.NewBuffer(suffix)) decoded = make([]ds.Property, len(suffixFormat)) raw = make([][]byte, len(suffixFormat)) err := error(nil) for i := range decoded { if count >= 0 && i >= count { break } needInvert := suffixFormat[i].Descending buf.SetInvert(needInvert) decoded[i], err = serialize.ReadProperty(buf, serialize.WithoutContext, aid, ns) memoryCorruption(err) offset := len(suffix) - buf.Len() raw[i] = suffix[:offset] suffix = suffix[offset:] if needInvert { raw[i] = serialize.Invert(raw[i]) } } return }
// GetBinaryBounds gets the binary encoding of the upper and lower bounds of // the inequality filter on fq, if any is defined. If a bound does not exist, // it is nil. // // NOTE: if fq specifies a descending sort order for the inequality, the bounds // will be inverted, incremented, and flipped. func GetBinaryBounds(fq *ds.FinalizedQuery) (lower, upper []byte) { // Pick up the start/end range from the inequalities, if any. // // start and end in the reducedQuery are normalized so that `start >= // X < end`. Because of that, we need to tweak the inequality filters // contained in the query if they use the > or <= operators. if ineqProp := fq.IneqFilterProp(); ineqProp != "" { _, startOp, startV := fq.IneqFilterLow() if startOp != "" { lower = serialize.ToBytes(startV) if startOp == ">" { lower = increment(lower) } } _, endOp, endV := fq.IneqFilterHigh() if endOp != "" { upper = serialize.ToBytes(endV) if endOp == "<=" { upper = increment(upper) } } // The inequality is specified in natural (ascending) order in the query's // Filter syntax, but the order information may indicate to use a descending // index column for it. If that's the case, then we must invert, swap and // increment the inequality endpoints. // // Invert so that the desired numbers are represented correctly in the index. // Swap so that our iterators still go from >= start to < end. // Increment so that >= and < get correctly bounded (since the iterator is // still using natrual bytes ordering) if fq.Orders()[0].Descending { hi, lo := []byte(nil), []byte(nil) if len(lower) > 0 { lo = increment(serialize.Invert(lower)) } if len(upper) > 0 { hi = increment(serialize.Invert(upper)) } upper, lower = lo, hi } } return }
// toComparableString computes the byte-sortable 'order' string for the given // key/PropertyMap. // // * start/end are byte sequences which are the inequality bounds of the // query, if any. These are a serialized datastore.Property. If the // inequality column is inverted, then start and end are also inverted and // swapped with each other. // * order is the list of sort orders in the actual executing queries. // * k / pm are the data to derive a sortable string for. // // The result of this function is the series of serialized properties, one per // order column, which represent this key/pm's first entry in the composite // index that would point to it (e.g. the one with `order` sort orders). func toComparableString(start, end []byte, order []ds.IndexColumn, k *ds.Key, pm ds.PropertyMap) (row, key []byte) { doCmp := true soFar := []byte{} ps := serialize.PropertyMapPartially(k, nil) for _, ord := range order { row, ok := ps[ord.Property] if !ok { if vals, ok := pm[ord.Property]; ok { row = serialize.PropertySlice(vals) } } sort.Sort(row) foundOne := false for _, serialized := range row { if ord.Descending { serialized = serialize.Invert(serialized) } if doCmp { maybe := serialize.Join(soFar, serialized) cmp := bytes.Compare(maybe, start) if cmp >= 0 { foundOne = true soFar = maybe doCmp = len(soFar) < len(start) break } } else { foundOne = true soFar = serialize.Join(soFar, serialized) break } } if !foundOne { return nil, nil } } if end != nil && bytes.Compare(soFar, end) >= 0 { return nil, nil } return soFar, ps["__key__"][0] }
// generate generates a single iterDefinition for the given index. func generate(q *reducedQuery, idx *indexDefinitionSortable, c *constraints) *iterDefinition { def := &iterDefinition{ c: idx.coll, start: q.start, end: q.end, } toJoin := make([][]byte, len(idx.eqFilts)) for _, sb := range idx.eqFilts { val := c.peel(sb.Property) if sb.Descending { val = serialize.Invert(val) } toJoin = append(toJoin, val) } def.prefix = serialize.Join(toJoin...) def.prefixLen = len(def.prefix) if q.eqFilters["__ancestor__"] != nil && !idx.hasAncestor() { // The query requires an ancestor, but the index doesn't explicitly have it // as part of the prefix (otherwise it would have been the first eqFilt // above). This happens when it's a builtin index, or if it's the primary // index (for a kindless query), or if it's the Kind index (for a filterless // query). // // builtin indexes are: // Kind/__key__ // Kind/Prop/__key__ // Kind/Prop/-__key__ if len(q.suffixFormat) > 2 || q.suffixFormat[len(q.suffixFormat)-1].Property != "__key__" { // This should never happen. One of the previous validators would have // selected a different index. But just in case. impossible(fmt.Errorf("cannot supply an implicit ancestor for %#v", idx)) } // get the only value out of __ancestor__ anc, _ := q.eqFilters["__ancestor__"].Peek() // Intentionally do NOT update prefixLen. This allows multiIterator to // correctly include the entire key in the shared iterator suffix, instead // of just the remainder. // chop the terminal null byte off the q.ancestor key... we can accept // anything which is a descendant or an exact match. Removing the last byte // from the key (the terminating null) allows this trick to work. Otherwise // it would be a closed range of EXACTLY this key. chopped := []byte(anc[:len(anc)-1]) if q.suffixFormat[0].Descending { chopped = serialize.Invert(chopped) } def.prefix = serialize.Join(def.prefix, chopped) // Update start and end, since we know that if they contain anything, they // contain values for the __key__ field. This is necessary because bytes // are shifting from the suffix to the prefix, and start/end should only // contain suffix (variable) bytes. if def.start != nil { if !bytes.HasPrefix(def.start, chopped) { // again, shouldn't happen, but if it does, we want to know about it. impossible(fmt.Errorf( "start suffix for implied ancestor doesn't start with ancestor! start:%v ancestor:%v", def.start, chopped)) } def.start = def.start[len(chopped):] } if def.end != nil { if !bytes.HasPrefix(def.end, chopped) { impossible(fmt.Errorf( "end suffix for implied ancestor doesn't start with ancestor! end:%v ancestor:%v", def.end, chopped)) } def.end = def.end[len(chopped):] } } return def }