// prev gives the right boundary of the union of all requests which don't // affect keys larger than the given key. // TODO(tschottdorf): again, better on BatchRequest itself, but can't pull // 'keys' into 'roachpb'. func prev(ba roachpb.BatchRequest, k roachpb.RKey) roachpb.RKey { candidate := roachpb.RKeyMin for _, union := range ba.Requests { h := union.GetInner().Header() addr := keys.Addr(h.Key) eAddr := keys.Addr(h.EndKey) if len(eAddr) == 0 { // Can probably avoid having to compute Next() here if // we're in the mood for some more complexity. eAddr = addr.Next() } if !eAddr.Less(k) { if !k.Less(addr) { // Range contains k, so won't be able to go lower. return k } // Range is disjoint from [KeyMin,k). continue } // We want the largest surviving candidate. if candidate.Less(addr) { candidate = addr } } return candidate }
// prev gives the right boundary of the union of all requests which don't // affect keys larger than the given key. // TODO(tschottdorf): again, better on BatchRequest itself, but can't pull // 'keys' into 'roachpb'. func prev(ba roachpb.BatchRequest, k roachpb.RKey) (roachpb.RKey, error) { candidate := roachpb.RKeyMin for _, union := range ba.Requests { h := union.GetInner().Header() addr, err := keys.Addr(h.Key) if err != nil { return nil, err } eAddr, err := keys.AddrUpperBound(h.EndKey) if err != nil { return nil, err } if len(eAddr) == 0 { eAddr = addr.Next() } if !eAddr.Less(k) { if !k.Less(addr) { // Range contains k, so won't be able to go lower. return k, nil } // Range is disjoint from [KeyMin,k). continue } // We want the largest surviving candidate. if candidate.Less(addr) { candidate = addr } } return candidate, nil }
// next gives the left boundary of the union of all requests which don't // affect keys less than the given key. // TODO(tschottdorf): again, better on BatchRequest itself, but can't pull // 'keys' into 'proto'. func next(ba roachpb.BatchRequest, k roachpb.RKey) (roachpb.RKey, error) { candidate := roachpb.RKeyMax for _, union := range ba.Requests { h := union.GetInner().Header() addr, err := keys.Addr(h.Key) if err != nil { return nil, err } if addr.Less(k) { eAddr, err := keys.AddrUpperBound(h.EndKey) if err != nil { return nil, err } if k.Less(eAddr) { // Starts below k, but continues beyond. Need to stay at k. return k, nil } // Affects only [KeyMin,k). continue } // We want the smallest of the surviving candidates. if addr.Less(candidate) { candidate = addr } } return candidate, nil }
// getDescriptors looks up the range descriptor to use for a query over the // key range [from,to), with the given lookupOptions. The range descriptor // which contains the range in which the request should start its query is // returned first; the returned bool is true in case the given range reaches // outside the first descriptor. // In case either of the descriptors is discovered stale, the returned closure // should be called; it evicts the cache appropriately. // Note that `from` and `to` are not necessarily Key and EndKey from a // RequestHeader; it's assumed that they've been translated to key addresses // already (via KeyAddress). func (ds *DistSender) getDescriptors(from, to roachpb.RKey, options lookupOptions) (*roachpb.RangeDescriptor, bool, func(), *roachpb.Error) { var desc *roachpb.RangeDescriptor var err error var descKey roachpb.RKey if !options.useReverseScan { descKey = from } else { descKey = to } desc, err = ds.rangeCache.LookupRangeDescriptor(descKey, options) if err != nil { return nil, false, nil, roachpb.NewError(err) } // Checks whether need to get next range descriptor. If so, returns true. needAnother := func(desc *roachpb.RangeDescriptor, isReverse bool) bool { if isReverse { return from.Less(desc.StartKey) } return desc.EndKey.Less(to) } evict := func() { ds.rangeCache.EvictCachedRangeDescriptor(descKey, desc, options.useReverseScan) } return desc, needAnother(desc, options.useReverseScan), evict, nil }
// clearOverlappingCachedRangeDescriptors looks up and clears any // cache entries which overlap the specified key or descriptor. func (rdc *rangeDescriptorCache) clearOverlappingCachedRangeDescriptors(key, metaKey roachpb.RKey, desc *roachpb.RangeDescriptor) { if desc.StartKey.Equal(desc.EndKey) { // True for some unittests. return } // Clear out any descriptors which subsume the key which we're going // to cache. For example, if an existing KeyMin->KeyMax descriptor // should be cleared out in favor of a KeyMin->"m" descriptor. k, v, ok := rdc.rangeCache.Ceil(rangeCacheKey(metaKey)) if ok { descriptor := v.(*roachpb.RangeDescriptor) if !key.Less(descriptor.StartKey) && !descriptor.EndKey.Less(key) { if log.V(1) { log.Infof("clearing overlapping descriptor: key=%s desc=%s", k, descriptor) } rdc.rangeCache.Del(k.(rangeCacheKey)) } } // Also clear any descriptors which are subsumed by the one we're // going to cache. This could happen on a merge (and also happens // when there's a lot of concurrency). Iterate from the range meta key // after RangeMetaKey(desc.StartKey) to the range meta key for desc.EndKey. rdc.rangeCache.DoRange(func(k, v interface{}) { if log.V(1) { log.Infof("clearing subsumed descriptor: key=%s desc=%s", k, v.(*roachpb.RangeDescriptor)) } rdc.rangeCache.Del(k.(rangeCacheKey)) }, rangeCacheKey(meta(desc.StartKey).Next()), rangeCacheKey(meta(desc.EndKey))) }
// ComputeSplitKeys takes a start and end key and returns an array of keys // at which to split the span [start, end). // The only required splits are at each user table prefix. func (s SystemConfig) ComputeSplitKeys(startKey, endKey roachpb.RKey) []roachpb.RKey { testingLock.Lock() tableSplitsDisabled := testingDisableTableSplits testingLock.Unlock() if tableSplitsDisabled { return nil } tableStart := roachpb.RKey(keys.UserTableDataMin) if !tableStart.Less(endKey) { // This range is before the user tables span: no required splits. return nil } startID, ok := ObjectIDForKey(startKey) if !ok || startID <= keys.MaxReservedDescID { // The start key is either: // - not part of the structured data span // - part of the system span // In either case, start looking for splits at the first ID usable // by the user data span. startID = keys.MaxReservedDescID + 1 } else { // The start key is either already a split key, or after the split // key for its ID. We can skip straight to the next one. startID++ } // Find the largest object ID. // We can't keep splitting until we reach endKey as it could be roachpb.KeyMax. endID, err := s.GetLargestObjectID() if err != nil { log.Errorf("unable to determine largest object ID from system config: %s", err) return nil } // Build key prefixes for sequential table IDs until we reach endKey. var splitKeys []roachpb.RKey var key roachpb.RKey // endID could be smaller than startID if we don't have user tables. for id := startID; id <= endID; id++ { key = keys.MakeTablePrefix(id) // Skip if the range starts on a split key. if !startKey.Less(key) { continue } // Handle the case where EndKey is already a table prefix. if !key.Less(endKey) { break } splitKeys = append(splitKeys, key) } return splitKeys }
// verifyBinarySearchTree checks to ensure that all keys to the left of the root // node are less than it, and all nodes to the right of the root node are // greater than it. It recursively walks the tree to perform this same check. func verifyBinarySearchTree(t *testing.T, nodes map[string]roachpb.RangeTreeNode, testName string, node *roachpb.RangeTreeNode, keyMin, keyMax roachpb.RKey) { if node == nil { return } if !node.Key.Less(keyMax) { t.Errorf("%s: Failed Property BST - The key %s is not less than %s.", testName, node.Key, keyMax) } // We need the extra check since roachpb.KeyMin is actually a range start key. if !keyMin.Less(node.Key) && !node.Key.Equal(roachpb.RKeyMin) { t.Errorf("%s: Failed Property BST - The key %s is not greater than %s.", testName, node.Key, keyMin) } left, right := getLeftAndRight(t, nodes, testName, node) verifyBinarySearchTree(t, nodes, testName, left, keyMin, node.Key) verifyBinarySearchTree(t, nodes, testName, right, node.Key, keyMax) }
// verifyBinarySearchTree checks to ensure that all keys to the left of the root // node are less than it, and all nodes to the right of the root node are // greater than it. It recursively walks the tree to perform this same check. func verifyBinarySearchTree(t *testing.T, nodes map[string]storage.RangeTreeNode, testName string, node storage.RangeTreeNode, keyMin, keyMax roachpb.RKey) { if !node.Key.Less(keyMax) { t.Errorf("%s: Failed Property BST - The key %s is not less than %s.", testName, node.Key, keyMax) } // We need the extra check since roachpb.KeyMin is actually a range start key. if !keyMin.Less(node.Key) && !node.Key.Equal(roachpb.RKeyMin) { t.Errorf("%s: Failed Property BST - The key %s is not greater than %s.", testName, node.Key, keyMin) } if left, ok := getNode(t, nodes, testName, node.LeftKey); ok { verifyBinarySearchTree(t, nodes, testName, left, keyMin, node.Key) } if right, ok := getNode(t, nodes, testName, node.RightKey); ok { verifyBinarySearchTree(t, nodes, testName, right, node.Key, keyMax) } }
// verifyBinarySearchTree checks to ensure that all keys to the left of the root // node are less than it, and all nodes to the right of the root node are // greater than it. It recursively walks the tree to perform this same check. func verifyBinarySearchTree(t *testing.T, tc *treeContext, testName string, node *roachpb.RangeTreeNode, keyMin, keyMax roachpb.RKey) { if !node.Key.Less(keyMax) { t.Errorf("%s: Failed Property BST - The key %s is not less than %s.", testName, node.Key, keyMax) } if !keyMin.Less(node.Key) { t.Errorf("%s: Failed Property BST - The key %s is not greater than %s.", testName, node.Key, keyMin) } if node.LeftKey != nil { left, err := tc.getNode(node.LeftKey) if err != nil { t.Fatal(err) } verifyBinarySearchTree(t, tc, testName, left, keyMin, node.Key) } if node.RightKey != nil { right, err := tc.getNode(node.RightKey) if err != nil { t.Fatal(err) } verifyBinarySearchTree(t, tc, testName, right, node.Key, keyMax) } }
// ComputeSplitKeys takes a start and end key and returns an array of keys // at which to split the span [start, end). // The only required splits are at each user table prefix. func (s SystemConfig) ComputeSplitKeys(startKey, endKey roachpb.RKey) []roachpb.RKey { if TestingTableSplitsDisabled() { return nil } tableStart := roachpb.RKey(keys.ReservedTableDataMin) if !tableStart.Less(endKey) { // This range is before the user tables span: no required splits. return nil } startID, ok := ObjectIDForKey(startKey) if !ok || startID <= keys.MaxSystemDescID { // The start key is either: // - not part of the structured data span // - part of the system span // In either case, start looking for splits at the first ID usable // by the user data span. startID = keys.MaxSystemDescID + 1 } else { // The start key is either already a split key, or after the split // key for its ID. We can skip straight to the next one. startID++ } // Build key prefixes for sequential table IDs until we reach endKey. Note // that there are two disjoint sets of sequential keys: non-system reserved // tables have sequential IDs, as do user tables, but the two ranges contain a // gap. var splitKeys []roachpb.RKey var key roachpb.RKey // appendSplitKeys generates all possible split keys between the given range // of IDs and adds them to splitKeys. appendSplitKeys := func(startID, endID uint32) { // endID could be smaller than startID if we don't have user tables. for id := startID; id <= endID; id++ { key = keys.MakeNonColumnKey(keys.MakeTablePrefix(id)) // Skip if this ID matches the startKey passed to ComputeSplitKeys. if !startKey.Less(key) { continue } // Handle the case where EndKey is already a table prefix. if !key.Less(endKey) { break } splitKeys = append(splitKeys, key) } } // If the startKey falls within the non-system reserved range, compute those // keys first. if startID <= keys.MaxReservedDescID { endID, err := s.GetLargestObjectID(keys.MaxReservedDescID) if err != nil { log.Errorf("unable to determine largest reserved object ID from system config: %s", err) return nil } appendSplitKeys(startID, endID) startID = keys.MaxReservedDescID + 1 } // Append keys in the user space. endID, err := s.GetLargestObjectID(0) if err != nil { log.Errorf("unable to determine largest object ID from system config: %s", err) return nil } appendSplitKeys(startID, endID) return splitKeys }