Esempio n. 1
0
// prev gives the right boundary of the union of all requests which don't
// affect keys larger than the given key.
// TODO(tschottdorf): again, better on BatchRequest itself, but can't pull
// 'keys' into 'proto'.
func prev(ba proto.BatchRequest, k proto.Key) proto.Key {
	candidate := proto.KeyMin
	for _, union := range ba.Requests {
		h := union.GetValue().(proto.Request).Header()
		addr := keys.KeyAddress(h.Key)
		eAddr := keys.KeyAddress(h.EndKey)
		if len(eAddr) == 0 {
			// Can probably avoid having to compute Next() here if
			// we're in the mood for some more complexity.
			eAddr = addr.Next()
		}
		if !eAddr.Less(k) {
			if !k.Less(addr) {
				// Range contains k, so won't be able to go lower.
				return k
			}
			// Range is disjoint from [KeyMin,k).
			continue
		}
		// We want the largest surviving candidate.
		if candidate.Less(addr) {
			candidate = addr
		}
	}
	return candidate
}
Esempio n. 2
0
// Add the specified timestamp to the cache as covering the range of
// keys from start to end. If end is nil, the range covers the start
// key only. txnID is nil for no transaction. readOnly specifies
// whether the command adding this timestamp was read-only or not.
func (tc *TimestampCache) Add(start, end proto.Key, timestamp proto.Timestamp, txnID []byte, readOnly bool) {
	// This gives us a memory-efficient end key if end is empty.
	if len(end) == 0 {
		end = start.Next()
		start = end[:len(start)]
	}
	if tc.latest.Less(timestamp) {
		tc.latest = timestamp
	}
	// Only add to the cache if the timestamp is more recent than the
	// low water mark.
	if tc.lowWater.Less(timestamp) {
		// Check existing, overlapping entries. Remove superseded
		// entries or return without adding this entry if necessary.
		key := tc.cache.NewKey(start, end)
		for _, o := range tc.cache.GetOverlaps(start, end) {
			ce := o.Value.(cacheEntry)
			if ce.readOnly != readOnly {
				continue
			}
			if o.Key.Contains(key) && !ce.timestamp.Less(timestamp) {
				return // don't add this key; there's already a cache entry with >= timestamp.
			} else if key.Contains(o.Key) && !timestamp.Less(ce.timestamp) {
				tc.cache.Del(o.Key) // delete existing key; this cache entry supersedes.
			}
		}
		ce := cacheEntry{timestamp: timestamp, txnID: txnID, readOnly: readOnly}
		tc.cache.Add(key, ce)
	}
}
Esempio n. 3
0
// clearOverlappingCachedRangeDescriptors looks up and clears any
// cache entries which overlap the specified key or descriptor.
func (rdc *rangeDescriptorCache) clearOverlappingCachedRangeDescriptors(key, metaKey proto.Key, desc *proto.RangeDescriptor) {
	if desc.StartKey.Equal(desc.EndKey) { // True for some unittests.
		return
	}
	// Clear out any descriptors which subsume the key which we're going
	// to cache. For example, if an existing KeyMin->KeyMax descriptor
	// should be cleared out in favor of a KeyMin->"m" descriptor.
	k, v, ok := rdc.rangeCache.Ceil(rangeCacheKey(metaKey))
	if ok {
		descriptor := v.(*proto.RangeDescriptor)
		if !key.Less(descriptor.StartKey) && !descriptor.EndKey.Less(key) {
			if log.V(1) {
				log.Infof("clearing overlapping descriptor: key=%s desc=%s", k, descriptor)
			}
			rdc.rangeCache.Del(k.(rangeCacheKey))
		}
	}
	// Also clear any descriptors which are subsumed by the one we're
	// going to cache. This could happen on a merge (and also happens
	// when there's a lot of concurrency). Iterate from the range meta key
	// after RangeMetaKey(desc.StartKey) to the range meta key for desc.EndKey.
	rdc.rangeCache.DoRange(func(k, v interface{}) {
		if log.V(1) {
			log.Infof("clearing subsumed descriptor: key=%s desc=%s", k, v.(*proto.RangeDescriptor))
		}
		rdc.rangeCache.Del(k.(rangeCacheKey))
	}, rangeCacheKey(keys.RangeMetaKey(desc.StartKey).Next()),
		rangeCacheKey(keys.RangeMetaKey(desc.EndKey)))
}
Esempio n. 4
0
// getConfig retrieves the configuration for the specified key. If the
// key is empty, all configurations are returned. Otherwise, the
// leading "/" path delimiter is stripped and the configuration
// matching the remainder is retrieved. Note that this will retrieve
// the default config if "key" is equal to "/", and will list all
// configs if "key" is equal to "". The body result contains a listing
// of keys and retrieval of a config. The output format is determined
// by the request header.
func getConfig(db *client.DB, configPrefix proto.Key, config gogoproto.Message,
	path string, r *http.Request) (body []byte, contentType string, err error) {
	// Scan all configs if the key is empty.
	if len(path) == 0 {
		var rows []client.KeyValue
		if rows, err = db.Scan(configPrefix, configPrefix.PrefixEnd(), maxGetResults); err != nil {
			return
		}
		if len(rows) == maxGetResults {
			log.Warningf("retrieved maximum number of results (%d); some may be missing", maxGetResults)
		}
		var prefixes []string
		for _, row := range rows {
			trimmed := bytes.TrimPrefix(row.Key, configPrefix)
			prefixes = append(prefixes, url.QueryEscape(string(trimmed)))
		}
		// Encode the response.
		body, contentType, err = util.MarshalResponse(r, prefixes, util.AllEncodings)
	} else {
		configkey := keys.MakeKey(configPrefix, proto.Key(path[1:]))
		if err = db.GetProto(configkey, config); err != nil {
			return
		}
		body, contentType, err = util.MarshalResponse(r, config, util.AllEncodings)
	}

	return
}
Esempio n. 5
0
// getDescriptors looks up the range descriptor to use for a query over the
// key range [from,to), with the given lookupOptions. The range descriptor
// which contains the range in which the request should start its query is
// returned first; the returned bool is true in case the given range reaches
// outside the first descriptor.
// In case either of the descriptors is discovered stale, the returned closure
// should be called; it evicts the cache appropriately.
// Note that `from` and `to` are not necessarily Key and EndKey from a
// RequestHeader; it's assumed that they've been translated to key addresses
// already (via KeyAddress).
func (ds *DistSender) getDescriptors(from, to proto.Key, options lookupOptions) (*proto.RangeDescriptor, bool, func(), error) {
	var desc *proto.RangeDescriptor
	var err error
	var descKey proto.Key
	if !options.useReverseScan {
		descKey = from
	} else {
		descKey = to
	}
	desc, err = ds.rangeCache.LookupRangeDescriptor(descKey, options)

	if err != nil {
		return nil, false, nil, err
	}

	// Checks whether need to get next range descriptor. If so, returns true.
	needAnother := func(desc *proto.RangeDescriptor, isReverse bool) bool {
		if isReverse {
			return from.Less(desc.StartKey)
		}
		return desc.EndKey.Less(to)
	}

	evict := func() {
		ds.rangeCache.EvictCachedRangeDescriptor(descKey, desc, options.useReverseScan)
	}

	return desc, needAnother(desc, options.useReverseScan), evict, nil
}
Esempio n. 6
0
// ValidateRangeMetaKey validates that the given key is a valid Range Metadata
// key.
func ValidateRangeMetaKey(key proto.Key) error {
	// KeyMin is a valid key.
	if key.Equal(proto.KeyMin) {
		return nil
	}
	// Key must be at least as long as Meta1Prefix.
	if len(key) < len(Meta1Prefix) {
		return NewInvalidRangeMetaKeyError("too short", key)
	}

	prefix, body := proto.Key(key[:len(Meta1Prefix)]), proto.Key(key[len(Meta1Prefix):])

	if prefix.Equal(Meta2Prefix) {
		if body.Less(proto.KeyMax) {
			return nil
		}
		return NewInvalidRangeMetaKeyError("body of meta2 range lookup is >= KeyMax", key)
	}

	if prefix.Equal(Meta1Prefix) {
		if proto.KeyMax.Less(body) {
			return NewInvalidRangeMetaKeyError("body of meta1 range lookup is > KeyMax", key)
		}
		return nil
	}
	return NewInvalidRangeMetaKeyError("not a meta key", key)
}
Esempio n. 7
0
// writeDescriptor takes a Table or Database descriptor and writes it
// if needed, incrementing the descriptor counter.
func (p *planner) writeDescriptor(key proto.Key, descriptor descriptorProto, ifNotExists bool) error {
	// Check whether key exists.
	gr, err := p.db.Get(key)
	if err != nil {
		return err
	}

	if gr.Exists() {
		if ifNotExists {
			// Noop.
			return nil
		}
		// Key exists, but we don't want it to: error out.
		// TODO(marc): prettify the error (strip stuff off the type name)
		return fmt.Errorf("%T \"%s\" already exists", descriptor, key.String())
	}

	// Increment unique descriptor counter.
	if ir, err := p.db.Inc(keys.DescIDGenerator, 1); err == nil {
		descriptor.SetID(uint32(ir.ValueInt() - 1))
	} else {
		return err
	}

	// TODO(pmattis): The error currently returned below is likely going to be
	// difficult to interpret.
	// TODO(pmattis): Need to handle if-not-exists here as well.
	descKey := keys.MakeDescMetadataKey(descriptor.GetID())
	return p.db.Txn(func(txn *client.Txn) error {
		b := &client.Batch{}
		b.CPut(key, descKey, nil)
		b.CPut(descKey, descriptor, nil)
		return txn.Commit(b)
	})
}
Esempio n. 8
0
// Add adds a command to the queue which affects the specified key
// range. If end is empty, it is set to start.Next(), meaning the
// command affects a single key. The returned interface is the key for
// the command queue and must be re-supplied on subsequent invocation
// of Remove().
//
// Add should be invoked after waiting on already-executing,
// overlapping commands via the WaitGroup initialized through
// GetWait().
func (cq *CommandQueue) Add(start, end proto.Key, readOnly bool) interface{} {
	if len(end) == 0 {
		end = start.Next()
	}
	key := cq.cache.NewKey(start, end)
	cq.cache.Add(key, &cmd{readOnly: readOnly})
	return key
}
Esempio n. 9
0
// Get searches the kv list for 'key' and returns its
// raw byte value if found. ok is true only if the key is found.
func (s *SystemConfig) Get(key proto.Key) ([]byte, bool) {
	l := len(s.Values)
	index := sort.Search(l, func(i int) bool {
		return bytes.Compare(s.Values[i].Key, key) >= 0
	})
	if index == l || !key.Equal(s.Values[index].Key) {
		return nil, false
	}
	// TODO(marc): I'm pretty sure a Value returned by MVCCScan can
	// never be nil. Should check.
	return s.Values[index].Value.Bytes, true
}
Esempio n. 10
0
// ComputeSplitKeys takes a start and end key and returns an array of keys
// at which to split the span [start, end).
// The only required splits are at each user table prefix.
func (s *SystemConfig) ComputeSplitKeys(startKey, endKey proto.Key) []proto.Key {
	if TestingDisableTableSplits {
		return nil
	}

	tableStart := proto.Key(keys.UserTableDataMin)
	if !tableStart.Less(endKey) {
		// This range is before the user tables span: no required splits.
		return nil
	}

	startID, ok := ObjectIDForKey(startKey)
	if !ok || startID <= keys.MaxReservedDescID {
		// The start key is either:
		// - not part of the structured data span
		// - part of the system span
		// In either case, start looking for splits at the first ID usable
		// by the user data span.
		startID = keys.MaxReservedDescID + 1
	} else {
		// The start key is either already a split key, or after the split
		// key for its ID. We can skip straight to the next one.
		startID++
	}

	// Find the largest object ID.
	// We can't keep splitting until we reach endKey as it could be proto.KeyMax.
	endID, err := s.GetLargestObjectID()
	if err != nil {
		log.Errorf("unable to determine largest object ID from system config: %s", err)
		return nil
	}

	// Build key prefixes for sequential table IDs until we reach endKey.
	var splitKeys proto.KeySlice
	var key proto.Key
	// endID could be smaller than startID if we don't have user tables.
	for id := startID; id <= endID; id++ {
		key = keys.MakeTablePrefix(id)
		// Skip if the range starts on a split key.
		if !startKey.Less(key) {
			continue
		}
		// Handle the case where EndKey is already a table prefix.
		if !key.Less(endKey) {
			break
		}
		splitKeys = append(splitKeys, key)
	}

	return splitKeys
}
Esempio n. 11
0
// GetIndex searches the kv list for 'key' and returns its index if found.
func (s *SystemConfig) GetIndex(key proto.Key) (int, bool) {
	if s == nil {
		return 0, false
	}

	l := len(s.Values)
	index := sort.Search(l, func(i int) bool {
		return !s.Values[i].Key.Less(key)
	})
	if index == l || !key.Equal(s.Values[index].Key) {
		return 0, false
	}
	return index, true
}
Esempio n. 12
0
// GetWait initializes the supplied wait group with the number of
// executing commands which overlap the specified key range. If end is
// empty, end is set to start.Next(), meaning the command affects a
// single key. The caller should call wg.Wait() to wait for
// confirmation that all gating commands have completed or
// failed. readOnly is true if the requester is a read-only command;
// false for read-write.
func (cq *CommandQueue) GetWait(start, end proto.Key, readOnly bool, wg *sync.WaitGroup) {
	// This gives us a memory-efficient end key if end is empty.
	if len(end) == 0 {
		end = start.Next()
		start = end[:len(start)]
	}
	for _, c := range cq.cache.GetOverlaps(start, end) {
		c := c.Value.(*cmd)
		// Only add to the wait group if one of the commands isn't read-only.
		if !readOnly || !c.readOnly {
			c.pending = append(c.pending, wg)
			wg.Add(1)
		}
	}
}
Esempio n. 13
0
// verifyBinarySearchTree checks to ensure that all keys to the left of the root
// node are less than it, and all nodes to the right of the root node are
// greater than it. It recursively walks the tree to perform this same check.
func verifyBinarySearchTree(t *testing.T, nodes map[string]proto.RangeTreeNode, testName string, node *proto.RangeTreeNode, keyMin, keyMax proto.Key) {
	if node == nil {
		return
	}
	if !node.Key.Less(keyMax) {
		t.Errorf("%s: Failed Property BST - The key %s is not less than %s.", testName, node.Key, keyMax)
	}
	// We need the extra check since proto.KeyMin is actually a range start key.
	if !keyMin.Less(node.Key) && !node.Key.Equal(proto.KeyMin) {
		t.Errorf("%s: Failed Property BST - The key %s is not greater than %s.", testName, node.Key, keyMin)
	}
	left, right := getLeftAndRight(t, nodes, testName, node)
	verifyBinarySearchTree(t, nodes, testName, left, keyMin, node.Key)
	verifyBinarySearchTree(t, nodes, testName, right, node.Key, keyMax)
}
Esempio n. 14
0
// getDescriptor looks up the descriptor at `key`, validates it,
// and unmarshals it into `descriptor`.
func (p *planner) getDescriptor(key proto.Key, descriptor descriptorProto) error {
	gr, err := p.db.Get(key)
	if err != nil {
		return err
	}
	if !gr.Exists() {
		// TODO(marc): prettify the error (strip stuff off the type name)
		return fmt.Errorf("%T \"%s\" does not exist", descriptor, key.String())
	}

	descKey := gr.ValueBytes()
	if err := p.db.GetProto(descKey, descriptor); err != nil {
		return err
	}

	return descriptor.Validate()
}
Esempio n. 15
0
// ObjectIDForKey returns the object ID (table or database) for 'key',
// or (_, false) if not within the structured key space.
func ObjectIDForKey(key proto.Key) (uint32, bool) {
	if key.Equal(proto.KeyMax) {
		return 0, false
	}
	if key.Equal(keys.TableDataPrefix) {
		// TODO(marc): this should eventually return SystemDatabaseID.
		return 0, false
	}
	remaining := bytes.TrimPrefix(key, keys.TableDataPrefix)
	if len(remaining) == len(key) {
		// TrimPrefix returns the input untouched if the prefix doesn't match.
		return 0, false
	}

	// Consume first encoded int.
	_, id64, err := encoding.DecodeUvarint(remaining)
	return uint32(id64), err == nil
}
Esempio n. 16
0
// GetMax returns the maximum read and write timestamps which overlap
// the interval spanning from start to end. Cached timestamps matching
// the specified txnID are not considered. If no part of the specified
// range is overlapped by timestamps in the cache, the low water
// timestamp is returned for both read and write timestamps.
//
// The txn ID prevents restarts with a pattern like: read("a"),
// write("a"). The read adds a timestamp for "a". Then the write (for
// the same transaction) would get that as the max timestamp and be
// forced to increment it. This allows timestamps from the same txn
// to be ignored.
func (tc *TimestampCache) GetMax(start, end proto.Key, txnID []byte) (proto.Timestamp, proto.Timestamp) {
	if len(end) == 0 {
		end = start.Next()
	}
	maxR := tc.lowWater
	maxW := tc.lowWater
	for _, o := range tc.cache.GetOverlaps(start, end) {
		ce := o.Value.(cacheEntry)
		if ce.txnID == nil || txnID == nil || !proto.TxnIDEqual(txnID, ce.txnID) {
			if ce.readOnly && maxR.Less(ce.timestamp) {
				maxR = ce.timestamp
			} else if !ce.readOnly && maxW.Less(ce.timestamp) {
				maxW = ce.timestamp
			}
		}
	}
	return maxR, maxW
}
Esempio n. 17
0
// MetaReverseScanBounds returns the range [start,end) within which the desired
// meta record can be found by means of a reverse engine scan. The given key
// must be a valid RangeMetaKey as defined by ValidateRangeMetaKey.
func MetaReverseScanBounds(key proto.Key) (proto.Key, proto.Key, error) {
	if key.Equal(proto.KeyMin) || key.Equal(Meta1Prefix) {
		return nil, nil, NewInvalidRangeMetaKeyError("KeyMin and Meta1Prefix can't be used as the key of reverse scan", key)
	}
	if key.Equal(Meta2Prefix) {
		// Special case Meta2Prefix: this is the first key in Meta2, and the scan
		// interval covers all of Meta1.
		return Meta1Prefix, key.Next(), nil
	}
	// Otherwise find the first entry greater than the given key and find the last entry
	// in the same prefix. For MVCCReverseScan the endKey is exclusive, if we want to find
	// the range descriptor the given key specified,we need to set the key.Next() as the
	// MVCCReverseScan`s endKey. For example:
	// If we have ranges ["", "f") and ["f", "z"), then we'll have corresponding meta records
	// at "f" and "z". If you're looking for the meta record for key "f", then you want the
	// second record (exclusive in MVCCReverseScan), hence key.Next() below.
	return key[:len(Meta1Prefix)], key.Next(), nil
}
Esempio n. 18
0
// getCachedRangeDescriptorLocked is a helper function to retrieve the
// descriptor of the range which contains the given key, if present in the
// cache. It is assumed that the caller holds a read lock on rdc.rangeCacheMu.
func (rdc *rangeDescriptorCache) getCachedRangeDescriptorLocked(key proto.Key) (
	rangeCacheKey, *proto.RangeDescriptor) {
	// The cache is indexed using the end-key of the range, but the
	// end-key is non-inclusive. If inclusive is false, we access the
	// cache using key.Next().
	metaKey := keys.RangeMetaKey(key.Next())

	k, v, ok := rdc.rangeCache.Ceil(rangeCacheKey(metaKey))
	if !ok {
		return nil, nil
	}
	metaEndKey := k.(rangeCacheKey)
	rd := v.(*proto.RangeDescriptor)

	// Check that key actually belongs to range
	if !rd.ContainsKey(keys.KeyAddress(key)) {
		return nil, nil
	}
	return metaEndKey, rd
}
Esempio n. 19
0
// next gives the left boundary of the union of all requests which don't
// affect keys less than the given key.
// TODO(tschottdorf): again, better on BatchRequest itself, but can't pull
// 'keys' into 'proto'.
func next(ba proto.BatchRequest, k proto.Key) proto.Key {
	candidate := proto.KeyMax
	for _, union := range ba.Requests {
		h := union.GetValue().(proto.Request).Header()
		addr := keys.KeyAddress(h.Key)
		if addr.Less(k) {
			if eAddr := keys.KeyAddress(h.EndKey); k.Less(eAddr) {
				// Starts below k, but continues beyond. Need to stay at k.
				return k
			}
			// Affects only [KeyMin,k).
			continue
		}
		// We want the smallest of the surviving candidates.
		if addr.Less(candidate) {
			candidate = addr
		}
	}
	return candidate
}
Esempio n. 20
0
func (db *testDescriptorDB) getDescriptor(key proto.Key) []proto.RangeDescriptor {
	response := make([]proto.RangeDescriptor, 0, 3)
	for i := 0; i < 3; i++ {
		v := db.data.Ceil(testDescriptorNode{
			&proto.RangeDescriptor{
				EndKey: key.Next(),
			},
		})
		if v == nil {
			break
		}
		response = append(response, *(v.(testDescriptorNode).RangeDescriptor))
		// Break to keep from skidding off the end of the available ranges.
		if response[i].EndKey.Equal(proto.KeyMax) {
			break
		}
		key = proto.Key(response[i].EndKey).Next()
	}
	return response
}
Esempio n. 21
0
// loadConfigMap scans the config entries under keyPrefix and
// instantiates/returns a config map and its sha256 hash. Prefix
// configuration maps include accounting, permissions, and zones.
func loadConfigMap(eng engine.Engine, keyPrefix proto.Key, configI interface{}) (PrefixConfigMap, []byte, error) {
	// TODO(tschottdorf): Currently this does not handle intents well.
	kvs, _, err := engine.MVCCScan(eng, keyPrefix, keyPrefix.PrefixEnd(), 0, proto.MaxTimestamp, true /* consistent */, nil)
	if err != nil {
		return nil, nil, err
	}
	var configs []*PrefixConfig
	sha := sha256.New()
	for _, kv := range kvs {
		// Instantiate an instance of the config type by unmarshalling
		// proto encoded config from the Value into a new instance of configI.
		config := reflect.New(reflect.TypeOf(configI)).Interface().(gogoproto.Message)
		if err := gogoproto.Unmarshal(kv.Value.Bytes, config); err != nil {
			return nil, nil, util.Errorf("unable to unmarshal config key %s: %s", string(kv.Key), err)
		}
		configs = append(configs, &PrefixConfig{Prefix: bytes.TrimPrefix(kv.Key, keyPrefix), Config: config})
		sha.Write(kv.Value.Bytes)
	}
	m, err := NewPrefixConfigMap(configs)
	return m, sha.Sum(nil), err
}
Esempio n. 22
0
// getCachedRangeDescriptorLocked is a helper function to retrieve the
// descriptor of the range which contains the given key, if present in the
// cache. It is assumed that the caller holds a read lock on rdc.rangeCacheMu.
func (rdc *rangeDescriptorCache) getCachedRangeDescriptorLocked(key proto.Key, inclusive bool) (
	rangeCacheKey, *proto.RangeDescriptor) {
	// The cache is indexed using the end-key of the range, but the
	// end-key is non-inclusive by default.
	var metaKey proto.Key
	if !inclusive {
		metaKey = keys.RangeMetaKey(key.Next())
	} else {
		metaKey = keys.RangeMetaKey(key)
	}

	k, v, ok := rdc.rangeCache.Ceil(rangeCacheKey(metaKey))
	if !ok {
		return nil, nil
	}
	metaEndKey := k.(rangeCacheKey)
	rd := v.(*proto.RangeDescriptor)

	// Check that key actually belongs to the range.
	if !rd.ContainsKey(key) {
		// The key is the EndKey and we're inclusive, so just return the range descriptor.
		if inclusive && key.Equal(rd.EndKey) {
			return metaEndKey, rd
		}
		return nil, nil
	}

	// The key is the StartKey, but we're inclusive and thus need to return the
	// previous range descriptor, but it is not in the cache yet.
	if inclusive && key.Equal(rd.StartKey) {
		return nil, nil
	}
	return metaEndKey, rd
}
Esempio n. 23
0
// addKeyRange adds the specified key range to the interval cache,
// taking care not to add this range if existing entries already
// completely cover the range.
func (tm *txnMetadata) addKeyRange(start, end proto.Key) {
	// This gives us a memory-efficient end key if end is empty.
	// The most common case for keys in the intents interval map
	// is for single keys. However, the interval cache requires
	// a non-empty interval, so we create two key slices which
	// share the same underlying byte array.
	if len(end) == 0 {
		end = start.Next()
		start = end[:len(start)]
	}
	key := tm.keys.NewKey(start, end)
	for _, o := range tm.keys.GetOverlaps(start, end) {
		if o.Key.Contains(key) {
			return
		} else if key.Contains(o.Key) {
			tm.keys.Del(o.Key)
		}
	}

	// Since no existing key range fully covered this range, add it now.
	tm.keys.Add(key, nil)
}
Esempio n. 24
0
// MatchByPrefix returns the longest matching PrefixConfig. If the key
// specified does not match an existing prefix, a panic will
// result. Based on the comments in NewPrefixConfigMap, that example
// will have a final list of PrefixConfig entries which look like:
//
//   "/":          config1
//   "/db1":       config2
//   "/db1/table": config3
//   "/db1/tablf": config2
//   "/db2":       config1
//   "/db3":       config4
//   "/db4":       config1
//
// To find the longest matching prefix, we take the lower bound of the
// specified key.
func (p PrefixConfigMap) MatchByPrefix(key proto.Key) *PrefixConfig {
	n := sort.Search(len(p), func(i int) bool {
		return key.Compare(p[i].Prefix) < 0
	})
	if n == 0 || n > len(p) {
		panic("should never match a key outside of default range")
	}
	// If the matched prefix config is already canonical, return it immediately.
	pc := p[n-1]
	if pc.Canonical == nil {
		return pc
	}
	// Otherwise, search for the canonical prefix config.
	n = sort.Search(len(p), func(i int) bool {
		return pc.Canonical.Compare(p[i].Prefix) <= 0
	})
	// Should find an exact match every time.
	if n >= len(p) || !pc.Canonical.Equal(p[n].Prefix) {
		panic(fmt.Sprintf("canonical lookup for key %q failed", string(pc.Canonical)))
	}
	return p[n]
}
Esempio n. 25
0
// insert performs the insertion of a new range into the RangeTree. It will
// recursively call insert until it finds the correct location. It will not
// overwrite an already existing key, but that case should not occur.
func (tc *treeContext) insert(node *proto.RangeTreeNode, key proto.Key) (*proto.RangeTreeNode, error) {
	if node == nil {
		// Insert the new node here.
		node = &proto.RangeTreeNode{
			Key: key,
		}
		tc.setNode(node)
	} else if key.Less(node.Key) {
		// Walk down the tree to the left.
		left, err := tc.getNode(node.LeftKey)
		if err != nil {
			return nil, err
		}
		left, err = tc.insert(left, key)
		if err != nil {
			return nil, err
		}
		if node.LeftKey == nil || !(*node.LeftKey).Equal(left.Key) {
			node.LeftKey = &left.Key
			tc.setNode(node)
		}
	} else {
		// Walk down the tree to the right.
		right, err := tc.getNode(node.RightKey)
		if err != nil {
			return nil, err
		}
		right, err = tc.insert(right, key)
		if err != nil {
			return nil, err
		}
		if node.RightKey == nil || !(*node.RightKey).Equal(right.Key) {
			node.RightKey = &right.Key
			tc.setNode(node)
		}
	}
	return tc.walkUpRot23(node)
}
Esempio n. 26
0
// ObjectIDForKey returns the object ID (table or database) for 'key',
// or (_, false) if not within the structured key space.
func ObjectIDForKey(key proto.Key) (uint32, bool) {
	if key.Equal(proto.KeyMax) {
		return 0, false
	}
	if key.Equal(keys.TableDataPrefix) {
		// TODO(marc): this should eventually return SystemDatabaseID.
		return 0, false
	}
	remaining := bytes.TrimPrefix(key, keys.TableDataPrefix)
	if len(remaining) == len(key) {
		// TrimPrefix returns the input untouched if the prefix doesn't match.
		return 0, false
	}

	// Consume first encoded int.
	defer func() {
		// Nothing to do, default return values mean "could not decode", which is
		// definitely the case if DecodeUvarint panics.
		_ = recover()
	}()
	_, id64 := encoding.DecodeUvarint(remaining)
	return uint32(id64), true
}
Esempio n. 27
0
// verifyBinarySearchTree checks to ensure that all keys to the left of the root
// node are less than it, and all nodes to the right of the root node are
// greater than it. It recursively walks the tree to perform this same check.
func verifyBinarySearchTree(t *testing.T, tc *treeContext, testName string, node *proto.RangeTreeNode, keyMin, keyMax proto.Key) {
	if !node.Key.Less(keyMax) {
		t.Errorf("%s: Failed Property BST - The key %s is not less than %s.", testName, node.Key, keyMax)
	}
	if !keyMin.Less(node.Key) {
		t.Errorf("%s: Failed Property BST - The key %s is not greater than %s.", testName, node.Key, keyMin)
	}

	if node.LeftKey != nil {
		left, err := tc.getNode(node.LeftKey)
		if err != nil {
			t.Fatal(err)
		}
		verifyBinarySearchTree(t, tc, testName, left, keyMin, node.Key)
	}
	if node.RightKey != nil {
		right, err := tc.getNode(node.RightKey)
		if err != nil {
			t.Fatal(err)
		}
		verifyBinarySearchTree(t, tc, testName, right, node.Key, keyMax)
	}
}
Esempio n. 28
0
// MetaScanBounds returns the start and end keys of the range within which the
// desired meta record can be found by means of an engine scan. The given key
// must be a valid RangeMetaKey as defined by ValidateRangeMetaKey.
func MetaScanBounds(key proto.Key) (proto.Key, proto.Key) {
	if key.Equal(proto.KeyMin) {
		// Special case KeyMin: find the first entry in meta1.
		return Meta1Prefix, Meta1Prefix.PrefixEnd()
	}
	if key.Equal(Meta1KeyMax) {
		// Special case Meta1KeyMax: this is the last key in Meta1, we don't want
		// to start at Next().
		return key, Meta1Prefix.PrefixEnd()
	}
	// Otherwise find the first entry greater than the given key in the same meta prefix.
	return key.Next(), proto.Key(key[:len(Meta1Prefix)]).PrefixEnd()
}
Esempio n. 29
0
// MetaScanBounds returns the range [start,end) within which the desired meta
// record can be found by means of an engine scan. The given key must be a
// valid RangeMetaKey as defined by validateRangeMetaKey.
func MetaScanBounds(key proto.Key) (proto.Key, proto.Key, error) {
	if err := validateRangeMetaKey(key); err != nil {
		return nil, nil, err
	}

	if key.Equal(Meta2KeyMax) {
		return nil, nil, NewInvalidRangeMetaKeyError("Meta2KeyMax can't be used as the key of scan", key)
	}

	if key.Equal(proto.KeyMin) {
		// Special case KeyMin: find the first entry in meta1.
		return Meta1Prefix, Meta1Prefix.PrefixEnd(), nil
	}
	if key.Equal(Meta1KeyMax) {
		// Special case Meta1KeyMax: this is the last key in Meta1, we don't want
		// to start at Next().
		return key, Meta1Prefix.PrefixEnd(), nil
	}
	// Otherwise find the first entry greater than the given key in the same meta prefix.
	return key.Next(), proto.Key(key[:len(Meta1Prefix)]).PrefixEnd(), nil
}
Esempio n. 30
0
// VisitPrefixes invokes the visitor function for each prefix overlapped
// by the specified key range [start, end). If visitor returns done=true
// or an error, the visitation is halted.
func (p PrefixConfigMap) VisitPrefixes(start, end proto.Key,
	visitor func(start, end proto.Key, config gogoproto.Message) (bool, error)) error {
	comp := start.Compare(end)
	if comp > 0 {
		return util.Errorf("start key %q not less than or equal to end key %q", start, end)
	}
	startIdx := sort.Search(len(p), func(i int) bool {
		return start.Compare(p[i].Prefix) < 0
	})
	// Common case of start == end.
	endIdx := startIdx
	if comp != 0 {
		endIdx = sort.Search(len(p), func(i int) bool {
			return end.Compare(p[i].Prefix) < 0
		})
	}

	if startIdx > len(p) || endIdx > len(p) {
		return util.Errorf("start and/or end keys (%q, %q) fall outside prefix range; "+
			"startIdx: %d, endIdx: %d, len(p): %d", start, end, startIdx, endIdx, len(p))
	}

	if startIdx == endIdx {
		_, err := visitor(start, end, p[startIdx-1].Config)
		return err
	}
	for i := startIdx; i < endIdx; i++ {
		done, err := visitor(start, p[i].Prefix, p[i-1].Config)
		if done || err != nil {
			return err
		}
		if p[i].Prefix.Equal(end) {
			return nil
		}
		start = p[i].Prefix
	}
	done, err := visitor(start, end, p[endIdx-1].Config)
	if done || err != nil {
		return err
	}

	return nil
}