// clearOverlappingCachedRangeDescriptors looks up and clears any // cache entries which overlap the specified key or descriptor. func (rdc *rangeDescriptorCache) clearOverlappingCachedRangeDescriptors(key, metaKey roachpb.Key, desc *roachpb.RangeDescriptor) { if desc.StartKey.Equal(desc.EndKey) { // True for some unittests. return } // Clear out any descriptors which subsume the key which we're going // to cache. For example, if an existing KeyMin->KeyMax descriptor // should be cleared out in favor of a KeyMin->"m" descriptor. k, v, ok := rdc.rangeCache.Ceil(rangeCacheKey(metaKey)) if ok { descriptor := v.(*roachpb.RangeDescriptor) if !key.Less(descriptor.StartKey) && !descriptor.EndKey.Less(key) { if log.V(1) { log.Infof("clearing overlapping descriptor: key=%s desc=%s", k, descriptor) } rdc.rangeCache.Del(k.(rangeCacheKey)) } } // Also clear any descriptors which are subsumed by the one we're // going to cache. This could happen on a merge (and also happens // when there's a lot of concurrency). Iterate from the range meta key // after RangeMetaKey(desc.StartKey) to the range meta key for desc.EndKey. rdc.rangeCache.DoRange(func(k, v interface{}) { if log.V(1) { log.Infof("clearing subsumed descriptor: key=%s desc=%s", k, v.(*roachpb.RangeDescriptor)) } rdc.rangeCache.Del(k.(rangeCacheKey)) }, rangeCacheKey(keys.RangeMetaKey(desc.StartKey).Next()), rangeCacheKey(keys.RangeMetaKey(desc.EndKey))) }
// getDescriptors looks up the range descriptor to use for a query over the // key range [from,to), with the given lookupOptions. The range descriptor // which contains the range in which the request should start its query is // returned first; the returned bool is true in case the given range reaches // outside the first descriptor. // In case either of the descriptors is discovered stale, the returned closure // should be called; it evicts the cache appropriately. // Note that `from` and `to` are not necessarily Key and EndKey from a // RequestHeader; it's assumed that they've been translated to key addresses // already (via KeyAddress). func (ds *DistSender) getDescriptors(from, to roachpb.Key, options lookupOptions) (*roachpb.RangeDescriptor, bool, func(), *roachpb.Error) { var desc *roachpb.RangeDescriptor var err error var descKey roachpb.Key if !options.useReverseScan { descKey = from } else { descKey = to } desc, err = ds.rangeCache.LookupRangeDescriptor(descKey, options) if err != nil { return nil, false, nil, roachpb.NewError(err) } // Checks whether need to get next range descriptor. If so, returns true. needAnother := func(desc *roachpb.RangeDescriptor, isReverse bool) bool { if isReverse { return from.Less(desc.StartKey) } return desc.EndKey.Less(to) } evict := func() { ds.rangeCache.EvictCachedRangeDescriptor(descKey, desc, options.useReverseScan) } return desc, needAnother(desc, options.useReverseScan), evict, nil }
// prev gives the right boundary of the union of all requests which don't // affect keys larger than the given key. // TODO(tschottdorf): again, better on BatchRequest itself, but can't pull // 'keys' into 'proto'. func prev(ba roachpb.BatchRequest, k roachpb.Key) roachpb.Key { candidate := roachpb.KeyMin for _, union := range ba.Requests { h := union.GetInner().Header() addr := keys.KeyAddress(h.Key) eAddr := keys.KeyAddress(h.EndKey) if len(eAddr) == 0 { // Can probably avoid having to compute Next() here if // we're in the mood for some more complexity. eAddr = addr.Next() } if !eAddr.Less(k) { if !k.Less(addr) { // Range contains k, so won't be able to go lower. return k } // Range is disjoint from [KeyMin,k). continue } // We want the largest surviving candidate. if candidate.Less(addr) { candidate = addr } } return candidate }
// ComputeSplitKeys takes a start and end key and returns an array of keys // at which to split the span [start, end). // The only required splits are at each user table prefix. func (s *SystemConfig) ComputeSplitKeys(startKey, endKey roachpb.Key) []roachpb.Key { if TestingDisableTableSplits { return nil } tableStart := roachpb.Key(keys.UserTableDataMin) if !tableStart.Less(endKey) { // This range is before the user tables span: no required splits. return nil } startID, ok := ObjectIDForKey(startKey) if !ok || startID <= keys.MaxReservedDescID { // The start key is either: // - not part of the structured data span // - part of the system span // In either case, start looking for splits at the first ID usable // by the user data span. startID = keys.MaxReservedDescID + 1 } else { // The start key is either already a split key, or after the split // key for its ID. We can skip straight to the next one. startID++ } // Find the largest object ID. // We can't keep splitting until we reach endKey as it could be roachpb.KeyMax. endID, err := s.GetLargestObjectID() if err != nil { log.Errorf("unable to determine largest object ID from system config: %s", err) return nil } // Build key prefixes for sequential table IDs until we reach endKey. var splitKeys roachpb.KeySlice var key roachpb.Key // endID could be smaller than startID if we don't have user tables. for id := startID; id <= endID; id++ { key = keys.MakeTablePrefix(id) // Skip if the range starts on a split key. if !startKey.Less(key) { continue } // Handle the case where EndKey is already a table prefix. if !key.Less(endKey) { break } splitKeys = append(splitKeys, key) } return splitKeys }
// verifyBinarySearchTree checks to ensure that all keys to the left of the root // node are less than it, and all nodes to the right of the root node are // greater than it. It recursively walks the tree to perform this same check. func verifyBinarySearchTree(t *testing.T, nodes map[string]roachpb.RangeTreeNode, testName string, node *roachpb.RangeTreeNode, keyMin, keyMax roachpb.Key) { if node == nil { return } if !node.Key.Less(keyMax) { t.Errorf("%s: Failed Property BST - The key %s is not less than %s.", testName, node.Key, keyMax) } // We need the extra check since roachpb.KeyMin is actually a range start key. if !keyMin.Less(node.Key) && !node.Key.Equal(roachpb.KeyMin) { t.Errorf("%s: Failed Property BST - The key %s is not greater than %s.", testName, node.Key, keyMin) } left, right := getLeftAndRight(t, nodes, testName, node) verifyBinarySearchTree(t, nodes, testName, left, keyMin, node.Key) verifyBinarySearchTree(t, nodes, testName, right, node.Key, keyMax) }
// next gives the left boundary of the union of all requests which don't // affect keys less than the given key. // TODO(tschottdorf): again, better on BatchRequest itself, but can't pull // 'keys' into 'proto'. func next(ba roachpb.BatchRequest, k roachpb.Key) roachpb.Key { candidate := roachpb.KeyMax for _, union := range ba.Requests { h := union.GetInner().Header() addr := keys.KeyAddress(h.Key) if addr.Less(k) { if eAddr := keys.KeyAddress(h.EndKey); k.Less(eAddr) { // Starts below k, but continues beyond. Need to stay at k. return k } // Affects only [KeyMin,k). continue } // We want the smallest of the surviving candidates. if addr.Less(candidate) { candidate = addr } } return candidate }
// verifyBinarySearchTree checks to ensure that all keys to the left of the root // node are less than it, and all nodes to the right of the root node are // greater than it. It recursively walks the tree to perform this same check. func verifyBinarySearchTree(t *testing.T, tc *treeContext, testName string, node *roachpb.RangeTreeNode, keyMin, keyMax roachpb.Key) { if !node.Key.Less(keyMax) { t.Errorf("%s: Failed Property BST - The key %s is not less than %s.", testName, node.Key, keyMax) } if !keyMin.Less(node.Key) { t.Errorf("%s: Failed Property BST - The key %s is not greater than %s.", testName, node.Key, keyMin) } if node.LeftKey != nil { left, err := tc.getNode(node.LeftKey) if err != nil { t.Fatal(err) } verifyBinarySearchTree(t, tc, testName, left, keyMin, node.Key) } if node.RightKey != nil { right, err := tc.getNode(node.RightKey) if err != nil { t.Fatal(err) } verifyBinarySearchTree(t, tc, testName, right, node.Key, keyMax) } }