Пример #1
0
// TestSingleRangeReverseScan verifies that ReverseScan gets the right results
// on a single range.
func TestSingleRangeReverseScan(t *testing.T) {
	defer leaktest.AfterTest(t)
	s, db := initReverseScanTestEnv(t)
	defer s.Stop()

	// Case 1: Request.EndKey is in the middle of the range.
	if rows, err := db.ReverseScan("b", "d", 0); err != nil {
		t.Fatalf("unexpected error on ReverseScan: %s", err)
	} else if l := len(rows); l != 2 {
		t.Errorf("expected 2 rows; got %d", l)
	}
	// Case 2: Request.EndKey is equal to the EndKey of the range.
	if rows, err := db.ReverseScan("e", "g", 0); err != nil {
		t.Fatalf("unexpected error on ReverseScan: %s", err)
	} else if l := len(rows); l != 2 {
		t.Errorf("expected 2 rows; got %d", l)
	}
	// Case 3: Test roachpb.KeyMax
	// This span covers the system DB keys.
	wanted := 1 + len(sql.MakeMetadataSchema().GetInitialValues())
	if rows, err := db.ReverseScan("g", roachpb.KeyMax, 0); err != nil {
		t.Fatalf("unexpected error on ReverseScan: %s", err)
	} else if l := len(rows); l != wanted {
		t.Errorf("expected %d rows; got %d", wanted, l)
	}
	// Case 4: Test keys.SystemMax
	// This span covers the system DB keys. Note sql.GetInitialSystemValues
	// returns one key before keys.SystemMax, but our scan is including one key
	// (\xffa) created for the test.
	if rows, err := db.ReverseScan(keys.SystemMax, "b", 0); err != nil {
		t.Fatalf("unexpected error on ReverseScan: %s", err)
	} else if l := len(rows); l != 1 {
		t.Errorf("expected 1 row; got %d", l)
	}
}
Пример #2
0
// BootstrapCluster bootstraps a multiple stores using the provided engines and
// cluster ID. The first bootstrapped store contains a single range spanning
// all keys. Initial range lookup metadata is populated for the range.
//
// Returns a KV client for unittest purposes. Caller should close the returned
// client.
func BootstrapCluster(clusterID string, engines []engine.Engine, stopper *stop.Stopper) (*client.DB, error) {
	ctx := storage.StoreContext{}
	ctx.ScanInterval = 10 * time.Minute
	ctx.Clock = hlc.NewClock(hlc.UnixNano)
	// Create a KV DB with a local sender.
	stores := storage.NewStores()
	sender := kv.NewTxnCoordSender(stores, ctx.Clock, false, nil, stopper)
	ctx.DB = client.NewDB(sender)
	ctx.Transport = multiraft.NewLocalRPCTransport(stopper)
	for i, eng := range engines {
		sIdent := roachpb.StoreIdent{
			ClusterID: clusterID,
			NodeID:    1,
			StoreID:   roachpb.StoreID(i + 1),
		}

		// The bootstrapping store will not connect to other nodes so its
		// StoreConfig doesn't really matter.
		s := storage.NewStore(ctx, eng, &roachpb.NodeDescriptor{NodeID: 1})

		// Verify the store isn't already part of a cluster.
		if len(s.Ident.ClusterID) > 0 {
			return nil, util.Errorf("storage engine already belongs to a cluster (%s)", s.Ident.ClusterID)
		}

		// Bootstrap store to persist the store ident.
		if err := s.Bootstrap(sIdent, stopper); err != nil {
			return nil, err
		}
		// Create first range, writing directly to engine. Note this does
		// not create the range, just its data.  Only do this if this is the
		// first store.
		if i == 0 {
			initialValues := sql.MakeMetadataSchema().GetInitialValues()
			if err := s.BootstrapRange(initialValues); err != nil {
				return nil, err
			}
		}
		if err := s.Start(stopper); err != nil {
			return nil, err
		}

		stores.AddStore(s)

		// Initialize node and store ids.  Only initialize the node once.
		if i == 0 {
			if nodeID, err := allocateNodeID(ctx.DB); nodeID != sIdent.NodeID || err != nil {
				return nil, util.Errorf("expected to initialize node id allocator to %d, got %d: %s",
					sIdent.NodeID, nodeID, err)
			}
		}
		if storeID, err := allocateStoreIDs(sIdent.NodeID, 1, ctx.DB); storeID != sIdent.StoreID || err != nil {
			return nil, util.Errorf("expected to initialize store id allocator to %d, got %d: %s",
				sIdent.StoreID, storeID, err)
		}
	}
	return ctx.DB, nil
}
Пример #3
0
func TestInitialKeys(t *testing.T) {
	defer leaktest.AfterTest(t)()

	const nonSystemDesc = 2
	const keysPerDesc = 2
	const nonDescKeys = 2

	ms := sql.MakeMetadataSchema()
	kv := ms.GetInitialValues()
	expected := nonDescKeys + keysPerDesc*(nonSystemDesc+sql.NumSystemDescriptors)
	if actual := len(kv); actual != expected {
		t.Fatalf("Wrong number of initial sql kv pairs: %d, wanted %d", actual, expected)
	}

	// Add an additional table.
	ms.AddTable(keys.MaxSystemConfigDescID+1,
		"CREATE TABLE testdb.x (val INTEGER PRIMARY KEY)",
		privilege.List{privilege.ALL})
	kv = ms.GetInitialValues()
	expected = nonDescKeys + keysPerDesc*ms.DescriptorCount()
	if actual := len(kv); actual != expected {
		t.Fatalf("Wrong number of initial sql kv pairs: %d, wanted %d", actual, expected)
	}

	// Verify that IDGenerator value is correct.
	found := false
	var idgenkv roachpb.KeyValue
	for _, v := range kv {
		if v.Key.Equal(keys.DescIDGenerator) {
			idgenkv = v
			found = true
			break
		}
	}

	if !found {
		t.Fatal("Could not find descriptor ID generator in initial key set")
	}
	// Expect 2 non-reserved IDs to have been allocated.
	i, err := idgenkv.Value.GetInt()
	if err != nil {
		t.Fatal(err)
	}
	if a, e := i, int64(keys.MaxReservedDescID+1); a != e {
		t.Fatalf("Expected next descriptor ID to be %d, was %d", e, a)
	}
}
Пример #4
0
func TestInitialKeys(t *testing.T) {
	defer leaktest.AfterTest(t)

	ms := sql.MakeMetadataSchema()
	// IDGenerator + 2 for each system object in the default schema.
	kv := ms.GetInitialValues()
	if actual, expected := len(kv), 1+2*sql.NumSystemDescriptors; actual != expected {
		t.Fatalf("Wrong number of initial sql kv pairs: %d, wanted %d", actual, expected)
	}

	// Add a non-system database and table.
	db := sql.MakeMetadataDatabase("testdb", sql.NewPrivilegeDescriptor(security.RootUser, privilege.List{privilege.ALL}))
	db.AddTable("CREATE TABLE testdb.x (val INTEGER PRIMARY KEY)", sql.NewPrivilegeDescriptor(security.RootUser, privilege.List{privilege.ALL}))
	ms.AddDatabase(db)
	kv = ms.GetInitialValues()
	// IDGenerator + 2 for each system object + 2 for testdb + 2 for test table
	if actual, expected := len(kv), 1+2*sql.NumSystemDescriptors+4; actual != expected {
		t.Fatalf("Wrong number of initial sql kv pairs: %d, wanted %d", actual, expected)
	}

	// Verify that IDGenerator value is correct.
	found := false
	var idgenkv roachpb.KeyValue
	for _, v := range kv {
		if v.Key.Equal(keys.DescIDGenerator) {
			idgenkv = v
			found = true
			break
		}
	}

	if !found {
		t.Fatal("Could not find descriptor ID generator in initial key set")
	}
	// Expect 2 non-reserved IDs to have been allocated.
	i, err := idgenkv.Value.GetInt()
	if err != nil {
		t.Fatal(err)
	}
	if a, e := i, int64(keys.MaxReservedDescID+1); a != e {
		t.Fatalf("Expected next descriptor ID to be %d, was %d", e, a)
	}
}
Пример #5
0
// TestBootstrapCluster verifies the results of bootstrapping a
// cluster. Uses an in memory engine.
func TestBootstrapCluster(t *testing.T) {
	defer leaktest.AfterTest(t)
	stopper := stop.NewStopper()
	e := engine.NewInMem(roachpb.Attributes{}, 1<<20, stopper)
	localDB, err := BootstrapCluster("cluster-1", []engine.Engine{e}, stopper)
	if err != nil {
		t.Fatal(err)
	}
	defer stopper.Stop()

	// Scan the complete contents of the local database.
	rows, err := localDB.Scan(keys.LocalMax, roachpb.KeyMax, 0)
	if err != nil {
		t.Fatal(err)
	}
	var foundKeys keySlice
	for _, kv := range rows {
		foundKeys = append(foundKeys, kv.Key)
	}
	var expectedKeys = keySlice{
		roachpb.MakeKey(roachpb.Key("\x02"), roachpb.KeyMax),
		roachpb.MakeKey(roachpb.Key("\x03"), roachpb.KeyMax),
		roachpb.Key("\x04node-idgen"),
		roachpb.Key("\x04range-tree-root"),
		roachpb.Key("\x04store-idgen"),
	}
	// Add the initial keys for sql.
	for _, kv := range sql.MakeMetadataSchema().GetInitialValues() {
		expectedKeys = append(expectedKeys, kv.Key)
	}
	// Resort the list. The sql values are not sorted.
	sort.Sort(expectedKeys)

	if !reflect.DeepEqual(foundKeys, expectedKeys) {
		t.Errorf("expected keys mismatch:\n%s\n  -- vs. -- \n\n%s",
			formatKeys(foundKeys), formatKeys(expectedKeys))
	}

	// TODO(spencer): check values.
}
Пример #6
0
// TestStoreRangeSystemSplits verifies that splits are based on the
// contents of the SystemDB span.
func TestStoreRangeSystemSplits(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, stopper := createTestStore(t)
	defer stopper.Stop()

	initialSystemValues := sql.MakeMetadataSchema().GetInitialValues()
	numInitialValues := len(initialSystemValues)
	// Write the initial sql values to the system DB as well
	// as the equivalent of table descriptors for X user tables.
	// This does two things:
	// - descriptor IDs are used to determine split keys
	// - the write triggers a SystemConfig update and gossip.
	// We should end up with splits at each user table prefix.
	if err := store.DB().Txn(func(txn *client.Txn) error {
		txn.SetSystemDBTrigger()
		for i, kv := range initialSystemValues {
			bytes, err := kv.Value.GetBytes()
			if err != nil {
				log.Info(err)
				continue
			}
			if err := txn.Put(kv.Key, bytes); err != nil {
				return err
			}

			descID := keys.MaxReservedDescID + i + 1

			// We don't care about the values, just the keys.
			k := sql.MakeDescMetadataKey(sql.ID(descID))
			if err := txn.Put(k, bytes); err != nil {
				return err
			}
		}
		return nil
	}); err != nil {
		t.Fatal(err)
	}

	verifySplitsAtTablePrefixes := func(maxTableID int) {
		// We expect splits at each of the user tables, but not at the system
		// tables boundaries.
		expKeys := make([]roachpb.Key, 0, maxTableID+1)
		for i := 1; i <= maxTableID; i++ {
			expKeys = append(expKeys,
				keys.MakeKey(keys.Meta2Prefix, keys.MakeTablePrefix(keys.MaxReservedDescID+uint32(i))),
			)
		}
		expKeys = append(expKeys, keys.MakeKey(keys.Meta2Prefix, roachpb.RKeyMax))

		util.SucceedsWithinDepth(1, t, 5*time.Second, func() error {
			rows, err := store.DB().Scan(keys.Meta2Prefix, keys.MetaMax, 0)
			if err != nil {
				return err
			}
			keys := make([]roachpb.Key, 0, len(expKeys))
			for _, r := range rows {
				keys = append(keys, r.Key)
			}
			if !reflect.DeepEqual(keys, expKeys) {
				return util.Errorf("expected split keys:\n%v\nbut found:\n%v", expKeys, keys)
			}
			return nil
		})
	}

	verifySplitsAtTablePrefixes(len(initialSystemValues))

	numTotalValues := numInitialValues + 5

	// Write another, disjoint descriptor for a user table.
	if err := store.DB().Txn(func(txn *client.Txn) error {
		txn.SetSystemDBTrigger()
		// This time, only write the last table descriptor. Splits
		// still occur for every intervening ID.
		// We don't care about the values, just the keys.
		k := sql.MakeDescMetadataKey(sql.ID(keys.MaxReservedDescID + numTotalValues))
		return txn.Put(k, &sql.TableDescriptor{})
	}); err != nil {
		t.Fatal(err)
	}

	verifySplitsAtTablePrefixes(numTotalValues)
}
Пример #7
0
func TestComputeSplits(t *testing.T) {
	defer leaktest.AfterTest(t)

	const (
		start         = keys.MaxReservedDescID + 1
		reservedStart = keys.MaxSystemConfigDescID + 1
	)

	schema := sql.MakeMetadataSchema()
	// Real SQL system tables only.
	baseSql := schema.GetInitialValues()
	// Real SQL system tables plus some user stuff.
	userSql := append(schema.GetInitialValues(),
		descriptor(start), descriptor(start+1), descriptor(start+5))
	// Real SQL system with reserved non-system tables.
	schema.AddTable(reservedStart+1, "CREATE TABLE system.test1 (i INT PRIMARY KEY)",
		privilege.List{privilege.ALL})
	schema.AddTable(reservedStart+2, "CREATE TABLE system.test2 (i INT PRIMARY KEY)",
		privilege.List{privilege.ALL})
	reservedSql := schema.GetInitialValues()
	// Real SQL system with reserved non-system and user database.
	allSql := append(schema.GetInitialValues(),
		descriptor(start), descriptor(start+1), descriptor(start+5))

	allUserSplits := []uint32{start, start + 1, start + 2, start + 3, start + 4, start + 5}
	allReservedSplits := []uint32{reservedStart, reservedStart + 1, reservedStart + 2}
	allSplits := append(allReservedSplits, allUserSplits...)

	testCases := []struct {
		values     []roachpb.KeyValue
		start, end roachpb.RKey
		// Use ints in the testcase definitions, more readable.
		splits []uint32
	}{
		// No data.
		{nil, roachpb.RKeyMin, roachpb.RKeyMax, nil},
		{nil, keys.MakeTablePrefix(start), roachpb.RKeyMax, nil},
		{nil, keys.MakeTablePrefix(start), keys.MakeTablePrefix(start + 10), nil},
		{nil, roachpb.RKeyMin, keys.MakeTablePrefix(start + 10), nil},

		// No user data.
		{baseSql, roachpb.RKeyMin, roachpb.RKeyMax, allReservedSplits[:1]},
		{baseSql, keys.MakeTablePrefix(start), roachpb.RKeyMax, nil},
		{baseSql, keys.MakeTablePrefix(start), keys.MakeTablePrefix(start + 10), nil},
		{baseSql, roachpb.RKeyMin, keys.MakeTablePrefix(start + 10), allReservedSplits[:1]},

		// User descriptors.
		{userSql, keys.MakeTablePrefix(start - 1), roachpb.RKeyMax, allUserSplits},
		{userSql, keys.MakeTablePrefix(start), roachpb.RKeyMax, allUserSplits[1:]},
		{userSql, keys.MakeTablePrefix(start), keys.MakeTablePrefix(start + 10), allUserSplits[1:]},
		{userSql, keys.MakeTablePrefix(start - 1), keys.MakeTablePrefix(start + 10), allUserSplits},
		{userSql, keys.MakeTablePrefix(start + 4), keys.MakeTablePrefix(start + 10), allUserSplits[5:]},
		{userSql, keys.MakeTablePrefix(start + 5), keys.MakeTablePrefix(start + 10), nil},
		{userSql, keys.MakeTablePrefix(start + 6), keys.MakeTablePrefix(start + 10), nil},
		{userSql, keys.MakeKey(keys.MakeTablePrefix(start), roachpb.RKey("foo")),
			keys.MakeTablePrefix(start + 10), allUserSplits[1:]},
		{userSql, keys.MakeKey(keys.MakeTablePrefix(start), roachpb.RKey("foo")),
			keys.MakeTablePrefix(start + 5), allUserSplits[1:5]},
		{userSql, keys.MakeKey(keys.MakeTablePrefix(start), roachpb.RKey("foo")),
			keys.MakeKey(keys.MakeTablePrefix(start+5), roachpb.RKey("bar")), allUserSplits[1:5]},
		{userSql, keys.MakeKey(keys.MakeTablePrefix(start), roachpb.RKey("foo")),
			keys.MakeKey(keys.MakeTablePrefix(start), roachpb.RKey("morefoo")), nil},

		// Reserved descriptors.
		{reservedSql, roachpb.RKeyMin, roachpb.RKeyMax, allReservedSplits},
		{reservedSql, keys.MakeTablePrefix(reservedStart), roachpb.RKeyMax, allReservedSplits[1:]},
		{reservedSql, keys.MakeTablePrefix(start), roachpb.RKeyMax, nil},
		{reservedSql, keys.MakeTablePrefix(reservedStart), keys.MakeTablePrefix(start + 10), allReservedSplits[1:]},
		{reservedSql, roachpb.RKeyMin, keys.MakeTablePrefix(reservedStart + 2), allReservedSplits[:2]},
		{reservedSql, roachpb.RKeyMin, keys.MakeTablePrefix(reservedStart + 10), allReservedSplits},
		{reservedSql, keys.MakeTablePrefix(reservedStart), keys.MakeTablePrefix(reservedStart + 2), allReservedSplits[1:2]},
		{reservedSql, keys.MakeKey(keys.MakeTablePrefix(reservedStart), roachpb.RKey("foo")),
			keys.MakeKey(keys.MakeTablePrefix(start+10), roachpb.RKey("foo")), allReservedSplits[1:]},

		// Reserved/User mix.
		{allSql, roachpb.RKeyMin, roachpb.RKeyMax, allSplits},
		{allSql, keys.MakeTablePrefix(reservedStart + 1), roachpb.RKeyMax, allSplits[2:]},
		{allSql, keys.MakeTablePrefix(start), roachpb.RKeyMax, allSplits[4:]},
		{allSql, keys.MakeTablePrefix(reservedStart), keys.MakeTablePrefix(start + 10), allSplits[1:]},
		{allSql, roachpb.RKeyMin, keys.MakeTablePrefix(start + 2), allSplits[:5]},
		{allSql, keys.MakeKey(keys.MakeTablePrefix(reservedStart), roachpb.RKey("foo")),
			keys.MakeKey(keys.MakeTablePrefix(start+5), roachpb.RKey("foo")), allSplits[1:8]},
	}

	cfg := config.SystemConfig{}
	for tcNum, tc := range testCases {
		cfg.Values = tc.values
		splits := cfg.ComputeSplitKeys(tc.start, tc.end)
		if len(splits) == 0 && len(tc.splits) == 0 {
			continue
		}

		// Convert ints to actual keys.
		expected := []roachpb.RKey{}
		for _, s := range tc.splits {
			expected = append(expected, keys.MakeNonColumnKey(keys.MakeTablePrefix(s)))
		}
		if !reflect.DeepEqual(splits, expected) {
			t.Errorf("#%d: bad splits:\ngot: %v\nexpected: %v", tcNum, splits, expected)
		}
	}
}
Пример #8
0
func TestGetLargestID(t *testing.T) {
	defer leaktest.AfterTest(t)
	testCases := []struct {
		values  []roachpb.KeyValue
		largest uint32
		maxID   uint32
		errStr  string
	}{
		// No data.
		{nil, 0, 0, "descriptor table not found"},

		// Some data, but not from the system span.
		{[]roachpb.KeyValue{plainKV("a", "b")}, 0, 0, "descriptor table not found"},

		// Some real data, but no descriptors.
		{[]roachpb.KeyValue{
			sqlKV(keys.NamespaceTableID, 1, 1),
			sqlKV(keys.NamespaceTableID, 1, 2),
			sqlKV(keys.UsersTableID, 1, 3),
		}, 0, 0, "descriptor table not found"},

		// Single correct descriptor entry.
		{[]roachpb.KeyValue{sqlKV(keys.DescriptorTableID, 1, 1)}, 1, 0, ""},

		// Surrounded by other data.
		{[]roachpb.KeyValue{
			sqlKV(keys.NamespaceTableID, 1, 20),
			sqlKV(keys.NamespaceTableID, 1, 30),
			sqlKV(keys.DescriptorTableID, 1, 8),
			sqlKV(keys.ZonesTableID, 1, 40),
		}, 8, 0, ""},

		// Descriptors with holes. Index ID does not matter.
		{[]roachpb.KeyValue{
			sqlKV(keys.DescriptorTableID, 1, 1),
			sqlKV(keys.DescriptorTableID, 2, 5),
			sqlKV(keys.DescriptorTableID, 3, 8),
			sqlKV(keys.DescriptorTableID, 4, 12),
		}, 12, 0, ""},

		// Real SQL layout.
		{sql.MakeMetadataSchema().GetInitialValues(), keys.MaxSystemConfigDescID + 1, 0, ""},

		// Test non-zero max.
		{[]roachpb.KeyValue{
			sqlKV(keys.DescriptorTableID, 1, 1),
			sqlKV(keys.DescriptorTableID, 2, 5),
			sqlKV(keys.DescriptorTableID, 3, 8),
			sqlKV(keys.DescriptorTableID, 4, 12),
		}, 8, 8, ""},

		// Test non-zero max.
		{[]roachpb.KeyValue{
			sqlKV(keys.DescriptorTableID, 1, 1),
			sqlKV(keys.DescriptorTableID, 2, 5),
			sqlKV(keys.DescriptorTableID, 3, 8),
			sqlKV(keys.DescriptorTableID, 4, 12),
		}, 5, 7, ""},
	}

	cfg := config.SystemConfig{}
	for tcNum, tc := range testCases {
		cfg.Values = tc.values
		ret, err := cfg.GetLargestObjectID(tc.maxID)
		if tc.errStr == "" {
			if err != nil {
				t.Errorf("#%d: error: %v", tcNum, err)
				continue
			}
		} else if !testutils.IsError(err, tc.errStr) {
			t.Errorf("#%d: expected err=%s, got %v", tcNum, tc.errStr, err)
			continue
		}
		if ret != tc.largest {
			t.Errorf("#%d: expected largest=%d, got %d", tcNum, tc.largest, ret)
		}
	}
}
Пример #9
0
// GetBootstrapSchema returns the schema which will be used to bootstrap a new
// server.
func GetBootstrapSchema() sql.MetadataSchema {
	schema := sql.MakeMetadataSchema()
	storage.AddEventLogToMetadataSchema(&schema)
	sql.AddEventLogToMetadataSchema(&schema)
	return schema
}
Пример #10
0
// TestStoreRangeSystemSplits verifies that splits are based on the contents of
// the SystemConfig span.
func TestStoreRangeSystemSplits(t *testing.T) {
	defer leaktest.AfterTest(t)()
	store, stopper, _ := createTestStore(t)
	defer stopper.Stop()

	schema := sql.MakeMetadataSchema()
	initialSystemValues := schema.GetInitialValues()
	var userTableMax int
	// Write the initial sql values to the system DB as well
	// as the equivalent of table descriptors for X user tables.
	// This does two things:
	// - descriptor IDs are used to determine split keys
	// - the write triggers a SystemConfig update and gossip.
	// We should end up with splits at each user table prefix.
	if pErr := store.DB().Txn(func(txn *client.Txn) *roachpb.Error {
		prefix := keys.MakeTablePrefix(keys.DescriptorTableID)
		txn.SetSystemConfigTrigger()
		for i, kv := range initialSystemValues {
			if !bytes.HasPrefix(kv.Key, prefix) {
				continue
			}
			bytes, err := kv.Value.GetBytes()
			if err != nil {
				log.Info(err)
				continue
			}
			if pErr := txn.Put(kv.Key, bytes); pErr != nil {
				return pErr
			}

			descID := keys.MaxReservedDescID + i + 1
			userTableMax = i + 1

			// We don't care about the values, just the keys.
			k := sql.MakeDescMetadataKey(sql.ID(descID))
			if pErr := txn.Put(k, bytes); pErr != nil {
				return pErr
			}
		}
		return nil
	}); pErr != nil {
		t.Fatal(pErr)
	}

	verifySplitsAtTablePrefixes := func(maxTableID int) {
		// We expect splits at each of the user tables, but not at the system
		// tables boundaries.
		expKeys := make([]roachpb.Key, 0, maxTableID+2)

		// We can't simply set numReservedTables to schema.TableCount(), because
		// some system tables are created at cluster bootstrap time. So, before the
		// cluster bootstrap, TableCount() will return a value that's too low.
		numReservedTables := schema.MaxTableID() - keys.MaxSystemConfigDescID
		for i := 1; i <= int(numReservedTables); i++ {
			expKeys = append(expKeys,
				testutils.MakeKey(keys.Meta2Prefix,
					keys.MakeTablePrefix(keys.MaxSystemConfigDescID+uint32(i))),
			)
		}
		for i := 1; i <= maxTableID; i++ {
			expKeys = append(expKeys,
				testutils.MakeKey(keys.Meta2Prefix, keys.MakeTablePrefix(keys.MaxReservedDescID+uint32(i))),
			)
		}
		expKeys = append(expKeys, testutils.MakeKey(keys.Meta2Prefix, roachpb.RKeyMax))

		util.SucceedsSoonDepth(1, t, func() error {
			rows, pErr := store.DB().Scan(keys.Meta2Prefix, keys.MetaMax, 0)
			if pErr != nil {
				return pErr.GoError()
			}
			keys := make([]roachpb.Key, 0, len(expKeys))
			for _, r := range rows {
				keys = append(keys, r.Key)
			}
			if !reflect.DeepEqual(keys, expKeys) {
				return util.Errorf("expected split keys:\n%v\nbut found:\n%v", expKeys, keys)
			}
			return nil
		})
	}

	verifySplitsAtTablePrefixes(userTableMax)

	numTotalValues := keys.MaxSystemConfigDescID + 5

	// Write another, disjoint descriptor for a user table.
	if pErr := store.DB().Txn(func(txn *client.Txn) *roachpb.Error {
		txn.SetSystemConfigTrigger()
		// This time, only write the last table descriptor. Splits
		// still occur for every intervening ID.
		// We don't care about the values, just the keys.
		k := sql.MakeDescMetadataKey(sql.ID(keys.MaxReservedDescID + numTotalValues))
		return txn.Put(k, &sql.TableDescriptor{})
	}); pErr != nil {
		t.Fatal(pErr)
	}

	verifySplitsAtTablePrefixes(numTotalValues)
}