Пример #1
0
// TestRangeSplitMeta executes various splits (including at meta addressing)
// and checks that all created intents are resolved. This includes both intents
// which are resolved synchronously with EndTransaction and via RPC.
func TestRangeSplitMeta(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()

	splitKeys := []roachpb.Key{roachpb.Key("G"), keys.RangeMetaKey(roachpb.Key("F")),
		keys.RangeMetaKey(roachpb.Key("K")), keys.RangeMetaKey(roachpb.Key("H"))}

	// Execute the consecutive splits.
	for _, splitKey := range splitKeys {
		log.Infof("starting split at key %q...", splitKey)
		if err := s.DB.AdminSplit(splitKey); err != nil {
			t.Fatal(err)
		}
		log.Infof("split at key %q complete", splitKey)
	}

	if err := util.IsTrueWithin(func() bool {
		if _, _, err := engine.MVCCScan(s.Eng, keys.LocalMax, roachpb.KeyMax, 0, roachpb.MaxTimestamp, true, nil); err != nil {
			log.Infof("mvcc scan should be clean: %s", err)
			return false
		}
		return true
	}, 500*time.Millisecond); err != nil {
		t.Error("failed to verify no dangling intents within 500ms")
	}
}
Пример #2
0
// Scan scans the key range specified by start key through end key up
// to some maximum number of results. The last key of the iteration is
// returned with the reply.
func (r *Range) Scan(batch engine.Engine, args proto.ScanRequest) (proto.ScanResponse, []proto.Intent, error) {
	var reply proto.ScanResponse

	rows, intents, err := engine.MVCCScan(batch, args.Key, args.EndKey, args.MaxResults, args.Timestamp, args.ReadConsistency == proto.CONSISTENT, args.Txn)
	reply.Rows = rows
	return reply, intents, err
}
Пример #3
0
// Get looks up the latest sequence number recorded for this transaction ID.
// The latest entry is that with the highest epoch (and then, highest
// sequence). On a miss, zero is returned for both. If an entry is found and a
// SequenceCacheEntry is provided, it is populated from the found value.
func (sc *SequenceCache) Get(e engine.Engine, id []byte, dest *roachpb.SequenceCacheEntry) (uint32, uint32, error) {
	if len(id) == 0 {
		return 0, 0, errEmptyTxnID
	}

	// Pull response from disk and read into reply if available. Sequence
	// number sorts in decreasing order, so this gives us the largest entry or
	// an entry which isn't ours. To avoid encoding an end key for the scan,
	// we just scan and check via a simple prefix check whether we read a
	// key for "our" cache id.
	prefix := keys.SequenceCacheKeyPrefix(sc.rangeID, id)
	kvs, _, err := engine.MVCCScan(e, prefix, sc.max, 1, /* num */
		roachpb.ZeroTimestamp, true /* consistent */, nil /* txn */)
	if err != nil || len(kvs) == 0 || !bytes.HasPrefix(kvs[0].Key, prefix) {
		return 0, 0, err
	}
	_, epoch, seq, err := decodeSequenceCacheKey(kvs[0].Key, sc.scratchBuf[:0])
	if err != nil {
		return 0, 0, err
	}
	if dest != nil {
		dest.Reset()
		// Caller wants to have the unmarshaled value.
		if err := kvs[0].Value.GetProto(dest); err != nil {
			return 0, 0, err
		}
	}
	return epoch, seq, nil
}
Пример #4
0
// TestRangeSplitsWithWritePressure sets the zone config max bytes for
// a range to 256K and writes data until there are five ranges.
func TestRangeSplitsWithWritePressure(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()
	setTestRetryOptions()

	// Rewrite a zone config with low max bytes.
	zoneConfig := &proto.ZoneConfig{
		ReplicaAttrs: []proto.Attributes{
			{},
			{},
			{},
		},
		RangeMinBytes: 1 << 8,
		RangeMaxBytes: 1 << 18,
	}
	if err := s.DB.Put(keys.MakeKey(keys.ConfigZonePrefix, proto.KeyMin), zoneConfig); err != nil {
		t.Fatal(err)
	}

	// Start test writer write about a 32K/key so there aren't too many writes necessary to split 64K range.
	done := make(chan struct{})
	var wg sync.WaitGroup
	wg.Add(1)
	go startTestWriter(s.DB, int64(0), 1<<15, &wg, nil, nil, done, t)

	// Check that we split 5 times in allotted time.
	if err := util.IsTrueWithin(func() bool {
		// Scan the txn records.
		rows, err := s.DB.Scan(keys.Meta2Prefix, keys.MetaMax, 0)
		if err != nil {
			t.Fatalf("failed to scan meta2 keys: %s", err)
		}
		return len(rows) >= 5
	}, 6*time.Second); err != nil {
		t.Errorf("failed to split 5 times: %s", err)
	}
	close(done)
	wg.Wait()

	// This write pressure test often causes splits while resolve
	// intents are in flight, causing them to fail with range key
	// mismatch errors. However, LocalSender should retry in these
	// cases. Check here via MVCC scan that there are no dangling write
	// intents. We do this using an IsTrueWithin construct to account
	// for timing of finishing the test writer and a possibly-ongoing
	// asynchronous split.
	if err := util.IsTrueWithin(func() bool {
		if _, _, err := engine.MVCCScan(s.Eng, keys.LocalMax, proto.KeyMax, 0, proto.MaxTimestamp, true, nil); err != nil {
			log.Infof("mvcc scan should be clean: %s", err)
			return false
		}
		return true
	}, 500*time.Millisecond); err != nil {
		t.Error("failed to verify no dangling intents within 500ms")
	}
}
Пример #5
0
// TestRangeSplitsWithWritePressure sets the zone config max bytes for
// a range to 256K and writes data until there are five ranges.
func TestRangeSplitsWithWritePressure(t *testing.T) {
	defer leaktest.AfterTest(t)()
	// Override default zone config.
	cfg := config.DefaultZoneConfig()
	cfg.RangeMaxBytes = 1 << 18
	defer config.TestingSetDefaultZoneConfig(cfg)()

	dbCtx := client.DefaultDBContext()
	dbCtx.TxnRetryOptions = retry.Options{
		InitialBackoff: 1 * time.Millisecond,
		MaxBackoff:     10 * time.Millisecond,
		Multiplier:     2,
	}
	s, _ := createTestDBWithContext(t, dbCtx)
	// This is purely to silence log spam.
	config.TestingSetupZoneConfigHook(s.Stopper)
	defer s.Stop()

	// Start test writer write about a 32K/key so there aren't too many writes necessary to split 64K range.
	done := make(chan struct{})
	var wg sync.WaitGroup
	wg.Add(1)
	go startTestWriter(s.DB, int64(0), 1<<15, &wg, nil, nil, done, t)

	// Check that we split 5 times in allotted time.
	util.SucceedsSoon(t, func() error {
		// Scan the txn records.
		rows, err := s.DB.Scan(keys.Meta2Prefix, keys.MetaMax, 0)
		if err != nil {
			return util.Errorf("failed to scan meta2 keys: %s", err)
		}
		if lr := len(rows); lr < 5 {
			return util.Errorf("expected >= 5 scans; got %d", lr)
		}
		return nil
	})
	close(done)
	wg.Wait()

	// This write pressure test often causes splits while resolve
	// intents are in flight, causing them to fail with range key
	// mismatch errors. However, LocalSender should retry in these
	// cases. Check here via MVCC scan that there are no dangling write
	// intents. We do this using a SucceedsSoon construct to account
	// for timing of finishing the test writer and a possibly-ongoing
	// asynchronous split.
	util.SucceedsSoon(t, func() error {
		if _, _, err := engine.MVCCScan(context.Background(), s.Eng, keys.LocalMax, roachpb.KeyMax, 0, hlc.MaxTimestamp, true, nil); err != nil {
			return util.Errorf("failed to verify no dangling intents: %s", err)
		}
		return nil
	})
}
Пример #6
0
// TestRangeSplitsWithWritePressure sets the zone config max bytes for
// a range to 256K and writes data until there are five ranges.
func TestRangeSplitsWithWritePressure(t *testing.T) {
	defer leaktest.AfterTest(t)
	// Override default zone config.
	previousMaxBytes := config.DefaultZoneConfig.RangeMaxBytes
	config.DefaultZoneConfig.RangeMaxBytes = 1 << 18
	defer func() { config.DefaultZoneConfig.RangeMaxBytes = previousMaxBytes }()

	s := createTestDB(t)
	// This is purely to silence log spam.
	config.TestingSetupZoneConfigHook(s.Stopper)
	defer s.Stop()
	setTestRetryOptions()

	// Start test writer write about a 32K/key so there aren't too many writes necessary to split 64K range.
	done := make(chan struct{})
	var wg sync.WaitGroup
	wg.Add(1)
	go startTestWriter(s.DB, int64(0), 1<<15, &wg, nil, nil, done, t)

	// Check that we split 5 times in allotted time.
	if err := util.IsTrueWithin(func() bool {
		// Scan the txn records.
		rows, err := s.DB.Scan(keys.Meta2Prefix, keys.MetaMax, 0)
		if err != nil {
			t.Fatalf("failed to scan meta2 keys: %s", err)
		}
		return len(rows) >= 5
	}, 6*time.Second); err != nil {
		t.Errorf("failed to split 5 times: %s", err)
	}
	close(done)
	wg.Wait()

	// This write pressure test often causes splits while resolve
	// intents are in flight, causing them to fail with range key
	// mismatch errors. However, LocalSender should retry in these
	// cases. Check here via MVCC scan that there are no dangling write
	// intents. We do this using an IsTrueWithin construct to account
	// for timing of finishing the test writer and a possibly-ongoing
	// asynchronous split.
	if err := util.IsTrueWithin(func() bool {
		if _, _, err := engine.MVCCScan(s.Eng, keys.LocalMax, roachpb.KeyMax, 0, roachpb.MaxTimestamp, true, nil); err != nil {
			log.Infof("mvcc scan should be clean: %s", err)
			return false
		}
		return true
	}, cleanMVCCScanTimeout); err != nil {
		t.Error("failed to verify no dangling intents within 500ms")
	}
}
Пример #7
0
// getActualData returns the actual value of all time series keys in the
// underlying engine. Data is returned as a map of strings to roachpb.Values.
func (tm *testModel) getActualData() map[string]roachpb.Value {
	// Scan over all TS Keys stored in the engine
	startKey := keys.TimeseriesPrefix
	endKey := startKey.PrefixEnd()
	keyValues, _, err := engine.MVCCScan(context.Background(), tm.Eng, startKey, endKey, 0, tm.Clock.Now(), true, nil)
	if err != nil {
		tm.t.Fatalf("error scanning TS data from engine: %s", err.Error())
	}

	kvMap := make(map[string]roachpb.Value)
	for _, kv := range keyValues {
		kvMap[string(kv.Key)] = kv.Value
	}

	return kvMap
}
Пример #8
0
// getActualData returns the actual value of all time series keys in the
// underlying engine. Data is returned as a map of strings to proto.Values.
func (tm *testModel) getActualData() map[string]*proto.Value {
	// Scan over all TS Keys stored in the engine
	startKey := keyDataPrefix
	endKey := keyDataPrefix.PrefixEnd()
	keyValues, _, err := engine.MVCCScan(tm.Eng, startKey, endKey, 0, tm.Clock.Now(), true, nil)
	if err != nil {
		tm.t.Fatalf("error scanning TS data from engine: %s", err.Error())
	}

	kvMap := make(map[string]*proto.Value)
	for _, kv := range keyValues {
		val := kv.Value
		kvMap[string(kv.Key)] = &val
	}

	return kvMap
}
Пример #9
0
// loadConfigMap scans the config entries under keyPrefix and
// instantiates/returns a config map and its sha256 hash. Prefix
// configuration maps include accounting, permissions, and zones.
func loadConfigMap(eng engine.Engine, keyPrefix proto.Key, configI interface{}) (PrefixConfigMap, []byte, error) {
	// TODO(tschottdorf): Currently this does not handle intents well.
	kvs, _, err := engine.MVCCScan(eng, keyPrefix, keyPrefix.PrefixEnd(), 0, proto.MaxTimestamp, true /* consistent */, nil)
	if err != nil {
		return nil, nil, err
	}
	var configs []*PrefixConfig
	sha := sha256.New()
	for _, kv := range kvs {
		// Instantiate an instance of the config type by unmarshalling
		// proto encoded config from the Value into a new instance of configI.
		config := reflect.New(reflect.TypeOf(configI)).Interface().(gogoproto.Message)
		if err := gogoproto.Unmarshal(kv.Value.Bytes, config); err != nil {
			return nil, nil, util.Errorf("unable to unmarshal config key %s: %s", string(kv.Key), err)
		}
		configs = append(configs, &PrefixConfig{Prefix: bytes.TrimPrefix(kv.Key, keyPrefix), Config: config})
		sha.Write(kv.Value.Bytes)
	}
	m, err := NewPrefixConfigMap(configs)
	return m, sha.Sum(nil), err
}
Пример #10
0
// TestBootstrapCluster verifies the results of bootstrapping a
// cluster. Uses an in memory engine.
func TestBootstrapCluster(t *testing.T) {
	defer leaktest.AfterTest(t)()
	stopper := stop.NewStopper()
	defer stopper.Stop()
	e := engine.NewInMem(roachpb.Attributes{}, 1<<20, stopper)
	if _, err := bootstrapCluster([]engine.Engine{e}, kv.NewTxnMetrics(metric.NewRegistry())); err != nil {
		t.Fatal(err)
	}

	// Scan the complete contents of the local database directly from the engine.
	rows, _, err := engine.MVCCScan(context.Background(), e, keys.LocalMax, roachpb.KeyMax, 0, roachpb.MaxTimestamp, true, nil)
	if err != nil {
		t.Fatal(err)
	}
	var foundKeys keySlice
	for _, kv := range rows {
		foundKeys = append(foundKeys, kv.Key)
	}
	var expectedKeys = keySlice{
		testutils.MakeKey(roachpb.Key("\x02"), roachpb.KeyMax),
		testutils.MakeKey(roachpb.Key("\x03"), roachpb.KeyMax),
		roachpb.Key("\x04node-idgen"),
		roachpb.Key("\x04range-tree-root"),
		roachpb.Key("\x04store-idgen"),
	}
	// Add the initial keys for sql.
	for _, kv := range GetBootstrapSchema().GetInitialValues() {
		expectedKeys = append(expectedKeys, kv.Key)
	}
	// Resort the list. The sql values are not sorted.
	sort.Sort(expectedKeys)

	if !reflect.DeepEqual(foundKeys, expectedKeys) {
		t.Errorf("expected keys mismatch:\n%s\n  -- vs. -- \n\n%s",
			formatKeys(foundKeys), formatKeys(expectedKeys))
	}

	// TODO(spencer): check values.
}
Пример #11
0
// TestRangeSplitMeta executes various splits (including at meta addressing)
// and checks that all created intents are resolved. This includes both intents
// which are resolved synchronously with EndTransaction and via RPC.
func TestRangeSplitMeta(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s := createTestDB(t)
	defer s.Stop()

	splitKeys := []roachpb.RKey{roachpb.RKey("G"), mustMeta(roachpb.RKey("F")),
		mustMeta(roachpb.RKey("K")), mustMeta(roachpb.RKey("H"))}

	// Execute the consecutive splits.
	for _, splitKey := range splitKeys {
		log.Infof("starting split at key %q...", splitKey)
		if pErr := s.DB.AdminSplit(roachpb.Key(splitKey)); pErr != nil {
			t.Fatal(pErr)
		}
		log.Infof("split at key %q complete", splitKey)
	}

	util.SucceedsSoon(t, func() error {
		if _, _, err := engine.MVCCScan(s.Eng, keys.LocalMax, roachpb.KeyMax, 0, roachpb.MaxTimestamp, true, nil); err != nil {
			return util.Errorf("failed to verify no dangling intents: %s", err)
		}
		return nil
	})
}
Пример #12
0
// GetAllTransactionID returns all the key-value pairs for the given transaction ID from
// the engine.
func (sc *SequenceCache) GetAllTransactionID(e engine.Engine, id []byte) ([]roachpb.KeyValue, error) {
	prefix := keys.SequenceCacheKeyPrefix(sc.rangeID, id)
	kvs, _, err := engine.MVCCScan(e, prefix, prefix.PrefixEnd(), 0, /* max */
		roachpb.ZeroTimestamp, true /* consistent */, nil /* txn */)
	return kvs, err
}
Пример #13
0
// InternalRangeLookup is used to look up RangeDescriptors - a RangeDescriptor
// is a metadata structure which describes the key range and replica locations
// of a distinct range in the cluster.
//
// RangeDescriptors are stored as values in the cockroach cluster's key-value
// store. However, they are always stored using special "Range Metadata keys",
// which are "ordinary" keys with a special prefix prepended. The Range Metadata
// Key for an ordinary key can be generated with the `keys.RangeMetaKey(key)`
// function. The RangeDescriptor for the range which contains a given key can be
// retrieved by generating its Range Metadata Key and dispatching it to
// InternalRangeLookup.
//
// Note that the Range Metadata Key sent to InternalRangeLookup is NOT the key
// at which the desired RangeDescriptor is stored. Instead, this method returns
// the RangeDescriptor stored at the _lowest_ existing key which is _greater_
// than the given key. The returned RangeDescriptor will thus contain the
// ordinary key which was originally used to generate the Range Metadata Key
// sent to InternalRangeLookup.
//
// The "Range Metadata Key" for a range is built by appending the end key of
// the range to the meta[12] prefix because the RocksDB iterator only supports
// a Seek() interface which acts as a Ceil(). Using the start key of the range
// would cause Seek() to find the key after the meta indexing record we're
// looking for, which would result in having to back the iterator up, an option
// which is both less efficient and not available in all cases.
//
// This method has an important optimization: instead of just returning the
// request RangeDescriptor, it also returns a slice of additional range
// descriptors immediately consecutive to the desired RangeDescriptor. This is
// intended to serve as a sort of caching pre-fetch, so that the requesting
// nodes can aggressively cache RangeDescriptors which are likely to be desired
// by their current workload.
func (r *Range) InternalRangeLookup(batch engine.Engine, args *proto.InternalRangeLookupRequest, reply *proto.InternalRangeLookupResponse) []proto.Intent {
	if err := keys.ValidateRangeMetaKey(args.Key); err != nil {
		reply.SetGoError(err)
		return nil
	}

	rangeCount := int64(args.MaxRanges)
	if rangeCount < 1 {
		reply.SetGoError(util.Errorf(
			"Range lookup specified invalid maximum range count %d: must be > 0", rangeCount))
		return nil
	}
	if args.IgnoreIntents {
		rangeCount = 1 // simplify lookup because we may have to retry to read new
	}

	// We want to search for the metadata key just greater than args.Key. Scan
	// for both the requested key and the keys immediately afterwards, up to
	// MaxRanges.
	startKey, endKey := keys.MetaScanBounds(args.Key)
	// Scan inconsistently. Any intents encountered are bundled up, but other-
	// wise ignored.
	kvs, intents, err := engine.MVCCScan(batch, startKey, endKey, rangeCount,
		args.Timestamp, false /* !consistent */, args.Txn)
	if err != nil {
		// An error here would likely amount to something seriously going
		// wrong.
		reply.SetGoError(err)
		return nil
	}
	if args.IgnoreIntents && len(intents) > 0 {
		// NOTE (subtle): in general, we want to try to clean up dangling
		// intents on meta records. However, if we're in the process of
		// cleaning up a dangling intent on a meta record by pushing the
		// transaction, we don't want to create an infinite loop:
		//
		// intent! -> push-txn -> range-lookup -> intent! -> etc...
		//
		// Instead we want:
		//
		// intent! -> push-txn -> range-lookup -> ignore intent, return old/new ranges
		//
		// On the range-lookup from a push transaction, we therefore
		// want to suppress WriteIntentErrors and return a value
		// anyway. But which value? We don't know whether the range
		// update succeeded or failed, but if we don't return the
		// correct range descriptor we may not be able to find the
		// transaction to push. Since we cannot know the correct answer,
		// we choose randomly between the pre- and post- transaction
		// values. If we guess wrong, the client will try again and get
		// the other value (within a few tries).
		if rand.Intn(2) == 0 {
			key, txn := intents[0].Key, &intents[0].Txn
			val, _, err := engine.MVCCGet(batch, key, txn.Timestamp, true, txn)
			if err != nil {
				reply.SetGoError(err)
				return nil
			}
			kvs = []proto.KeyValue{{Key: key, Value: *val}}
		}
	}

	if len(kvs) == 0 {
		// No matching results were returned from the scan. This could
		// indicate a very bad system error, but for now we will just
		// treat it as a retryable Key Mismatch error.
		err := proto.NewRangeKeyMismatchError(args.Key, args.EndKey, r.Desc())
		reply.SetGoError(err)
		log.Errorf("InternalRangeLookup dispatched to correct range, but no matching RangeDescriptor was found. %s", err)
		return nil
	}

	// Decode all scanned range descriptors, stopping if a range is encountered
	// which does not have the same metadata prefix as the queried key.
	rds := make([]proto.RangeDescriptor, len(kvs))
	for i := range kvs {
		// TODO(tschottdorf) Candidate for a ReplicaCorruptionError, once we
		// introduce that.
		if err = gogoproto.Unmarshal(kvs[i].Value.Bytes, &rds[i]); err != nil {
			reply.SetGoError(err)
			return nil
		}
	}

	reply.Ranges = rds
	return intents
}
Пример #14
0
// Scan scans the key range specified by start key through end key up
// to some maximum number of results. The last key of the iteration is
// returned with the reply.
func (r *Range) Scan(batch engine.Engine, args *proto.ScanRequest, reply *proto.ScanResponse) []proto.Intent {
	kvs, intents, err := engine.MVCCScan(batch, args.Key, args.EndKey, args.MaxResults, args.Timestamp, args.ReadConsistency == proto.CONSISTENT, args.Txn)
	reply.Rows = kvs
	reply.SetGoError(err)
	return intents
}
Пример #15
0
// TestUpdateRangeAddressing verifies range addressing records are
// correctly updated on creation of new range descriptors.
func TestUpdateRangeAddressing(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, _, stopper := createTestStore(t)
	defer stopper.Stop()

	// When split is false, merging treats the right range as the merged
	// range. With merging, expNewLeft indicates the addressing keys we
	// expect to be removed.
	testCases := []struct {
		split                   bool
		leftStart, leftEnd      roachpb.RKey
		rightStart, rightEnd    roachpb.RKey
		leftExpNew, rightExpNew [][]byte
	}{
		// Start out with whole range.
		{false, roachpb.RKeyMin, roachpb.RKeyMax, roachpb.RKeyMin, roachpb.RKeyMax,
			[][]byte{}, [][]byte{meta1Key(roachpb.RKeyMax), meta2Key(roachpb.RKeyMax)}},
		// Split KeyMin-KeyMax at key "a".
		{true, roachpb.RKeyMin, roachpb.RKey("a"), roachpb.RKey("a"), roachpb.RKeyMax,
			[][]byte{meta1Key(roachpb.RKeyMax), meta2Key(roachpb.RKey("a"))}, [][]byte{meta2Key(roachpb.RKeyMax)}},
		// Split "a"-KeyMax at key "z".
		{true, roachpb.RKey("a"), roachpb.RKey("z"), roachpb.RKey("z"), roachpb.RKeyMax,
			[][]byte{meta2Key(roachpb.RKey("z"))}, [][]byte{meta2Key(roachpb.RKeyMax)}},
		// Split "a"-"z" at key "m".
		{true, roachpb.RKey("a"), roachpb.RKey("m"), roachpb.RKey("m"), roachpb.RKey("z"),
			[][]byte{meta2Key(roachpb.RKey("m"))}, [][]byte{meta2Key(roachpb.RKey("z"))}},
		// Split KeyMin-"a" at meta2(m).
		{true, roachpb.RKeyMin, metaKey(roachpb.RKey("m")), metaKey(roachpb.RKey("m")), roachpb.RKey("a"),
			[][]byte{meta1Key(roachpb.RKey("m"))}, [][]byte{meta1Key(roachpb.RKeyMax), meta2Key(roachpb.RKey("a"))}},
		// Split meta2(m)-"a" at meta2(z).
		{true, metaKey(roachpb.RKey("m")), metaKey(roachpb.RKey("z")), metaKey(roachpb.RKey("z")), roachpb.RKey("a"),
			[][]byte{meta1Key(roachpb.RKey("z"))}, [][]byte{meta1Key(roachpb.RKeyMax), meta2Key(roachpb.RKey("a"))}},
		// Split meta2(m)-meta2(z) at meta2(r).
		{true, metaKey(roachpb.RKey("m")), metaKey(roachpb.RKey("r")), metaKey(roachpb.RKey("r")), metaKey(roachpb.RKey("z")),
			[][]byte{meta1Key(roachpb.RKey("r"))}, [][]byte{meta1Key(roachpb.RKey("z"))}},

		// Now, merge all of our splits backwards...

		// Merge meta2(m)-meta2(z).
		{false, metaKey(roachpb.RKey("m")), metaKey(roachpb.RKey("r")), metaKey(roachpb.RKey("m")), metaKey(roachpb.RKey("z")),
			[][]byte{meta1Key(roachpb.RKey("r"))}, [][]byte{meta1Key(roachpb.RKey("z"))}},
		// Merge meta2(m)-"a".
		{false, metaKey(roachpb.RKey("m")), metaKey(roachpb.RKey("z")), metaKey(roachpb.RKey("m")), roachpb.RKey("a"),
			[][]byte{meta1Key(roachpb.RKey("z"))}, [][]byte{meta1Key(roachpb.RKeyMax), meta2Key(roachpb.RKey("a"))}},
		// Merge KeyMin-"a".
		{false, roachpb.RKeyMin, metaKey(roachpb.RKey("m")), roachpb.RKeyMin, roachpb.RKey("a"),
			[][]byte{meta1Key(roachpb.RKey("m"))}, [][]byte{meta1Key(roachpb.RKeyMax), meta2Key(roachpb.RKey("a"))}},
		// Merge "a"-"z".
		{false, roachpb.RKey("a"), roachpb.RKey("m"), roachpb.RKey("a"), roachpb.RKey("z"),
			[][]byte{meta2Key(roachpb.RKey("m"))}, [][]byte{meta2Key(roachpb.RKey("z"))}},
		// Merge "a"-KeyMax.
		{false, roachpb.RKey("a"), roachpb.RKey("z"), roachpb.RKey("a"), roachpb.RKeyMax,
			[][]byte{meta2Key(roachpb.RKey("z"))}, [][]byte{meta2Key(roachpb.RKeyMax)}},
		// Merge KeyMin-KeyMax.
		{false, roachpb.RKeyMin, roachpb.RKey("a"), roachpb.RKeyMin, roachpb.RKeyMax,
			[][]byte{meta2Key(roachpb.RKey("a"))}, [][]byte{meta1Key(roachpb.RKeyMax), meta2Key(roachpb.RKeyMax)}},
	}
	expMetas := metaSlice{}

	for i, test := range testCases {
		left := &roachpb.RangeDescriptor{RangeID: roachpb.RangeID(i * 2), StartKey: test.leftStart, EndKey: test.leftEnd}
		right := &roachpb.RangeDescriptor{RangeID: roachpb.RangeID(i*2 + 1), StartKey: test.rightStart, EndKey: test.rightEnd}
		b := &client.Batch{}
		if test.split {
			if err := splitRangeAddressing(b, left, right); err != nil {
				t.Fatal(err)
			}
		} else {
			if err := mergeRangeAddressing(b, left, right); err != nil {
				t.Fatal(err)
			}
		}
		if err := store.DB().Run(b); err != nil {
			t.Fatal(err)
		}
		// Scan meta keys directly from engine.
		kvs, _, err := engine.MVCCScan(store.Engine(), keys.MetaPrefix, keys.MetaMax, 0, roachpb.MaxTimestamp, true, nil)
		if err != nil {
			t.Fatal(err)
		}
		metas := metaSlice{}
		for _, kv := range kvs {
			scannedDesc := &roachpb.RangeDescriptor{}
			if err := proto.Unmarshal(kv.Value.GetRawBytes(), scannedDesc); err != nil {
				t.Fatal(err)
			}
			metas = append(metas, metaRecord{key: kv.Key, desc: scannedDesc})
		}

		// Continue to build up the expected metas slice, replacing any earlier
		// version of same key.
		addOrRemoveNew := func(keys [][]byte, desc *roachpb.RangeDescriptor, add bool) {
			for _, n := range keys {
				found := -1
				for i := range expMetas {
					if expMetas[i].key.Equal(roachpb.Key(n)) {
						found = i
						expMetas[i].desc = desc
						break
					}
				}
				if found == -1 && add {
					expMetas = append(expMetas, metaRecord{key: n, desc: desc})
				} else if found != -1 && !add {
					expMetas = append(expMetas[:found], expMetas[found+1:]...)
				}
			}
		}
		addOrRemoveNew(test.leftExpNew, left, test.split /* on split, add; on merge, remove */)
		addOrRemoveNew(test.rightExpNew, right, true)
		sort.Sort(expMetas)

		if test.split {
			if log.V(1) {
				log.Infof("test case %d: split %q-%q at %q", i, left.StartKey, right.EndKey, left.EndKey)
			}
		} else {
			if log.V(1) {
				log.Infof("test case %d: merge %q-%q + %q-%q", i, left.StartKey, left.EndKey, left.EndKey, right.EndKey)
			}
		}
		for _, meta := range metas {
			if log.V(1) {
				log.Infof("%q", meta.key)
			}
		}

		if !reflect.DeepEqual(expMetas, metas) {
			t.Errorf("expected metas don't match")
			if len(expMetas) != len(metas) {
				t.Errorf("len(expMetas) != len(metas); %d != %d", len(expMetas), len(metas))
			} else {
				for j, meta := range expMetas {
					if !meta.key.Equal(metas[j].key) {
						fmt.Printf("%d: expected %q vs %q\n", j, meta.key, metas[j].key)
					}
					if !reflect.DeepEqual(meta.desc, metas[j].desc) {
						fmt.Printf("%d: expected %q vs %q and %s vs %s\n", j, meta.key, metas[j].key, meta.desc, metas[j].desc)
					}
				}
			}
		}
	}
}