Ejemplo n.º 1
0
// TestStoreRangeSplitBetweenConfigPrefix verifies a range can be split
// between ConfigPrefix and gossip them correctly.
func TestStoreRangeSplitBetweenConfigPrefix(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, stopper := createTestStore(t)
	defer stopper.Stop()

	key := keys.MakeKey(keys.SystemPrefix, []byte("tsd"))

	args := adminSplitArgs(proto.KeyMin, key, 1, store.StoreID())
	_, err := store.ExecuteCmd(context.Background(), &args)
	if err != nil {
		t.Fatalf("%q: split unexpected error: %s", key, err)
	}

	// Update configs to trigger gossip in both of the ranges.
	acctConfig := &config.AcctConfig{}
	key = keys.MakeKey(keys.ConfigAccountingPrefix, proto.KeyMin)
	if err = store.DB().Put(key, acctConfig); err != nil {
		t.Fatal(err)
	}
	zoneConfig := &config.ZoneConfig{}
	key = keys.MakeKey(keys.ConfigZonePrefix, proto.KeyMin)
	if err = store.DB().Put(key, zoneConfig); err != nil {
		t.Fatal(err)
	}
}
Ejemplo n.º 2
0
func newRangeDataIterator(d *proto.RangeDescriptor, e engine.Engine) *rangeDataIterator {
	// The first range in the keyspace starts at KeyMin, which includes the node-local
	// space. We need the original StartKey to find the range metadata, but the
	// actual data starts at LocalMax.
	dataStartKey := d.StartKey
	if d.StartKey.Equal(proto.KeyMin) {
		dataStartKey = keys.LocalMax
	}
	ri := &rangeDataIterator{
		ranges: []keyRange{
			{
				start: engine.MVCCEncodeKey(keys.MakeKey(keys.LocalRangeIDPrefix, encoding.EncodeUvarint(nil, uint64(d.RangeID)))),
				end:   engine.MVCCEncodeKey(keys.MakeKey(keys.LocalRangeIDPrefix, encoding.EncodeUvarint(nil, uint64(d.RangeID+1)))),
			},
			{
				start: engine.MVCCEncodeKey(keys.MakeKey(keys.LocalRangePrefix, encoding.EncodeBytes(nil, d.StartKey))),
				end:   engine.MVCCEncodeKey(keys.MakeKey(keys.LocalRangePrefix, encoding.EncodeBytes(nil, d.EndKey))),
			},
			{
				start: engine.MVCCEncodeKey(dataStartKey),
				end:   engine.MVCCEncodeKey(d.EndKey),
			},
		},
		iter: e.NewIterator(),
	}
	ri.iter.Seek(ri.ranges[ri.curIndex].start)
	ri.advance()
	return ri
}
// TestRangeLookupWithOpenTransaction verifies that range lookups are
// done in such a way (e.g. using inconsistent reads) that they
// proceed in the event that a write intent is extant at the meta
// index record being read.
func TestRangeLookupWithOpenTransaction(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := server.StartTestServer(t)
	defer s.Stop()
	db := createTestClient(t, s.Stopper(), s.ServingAddr())

	// Create an intent on the meta1 record by writing directly to the
	// engine.
	key := keys.MakeKey(keys.Meta1Prefix, roachpb.KeyMax)
	now := s.Clock().Now()
	txn := roachpb.NewTransaction("txn", roachpb.Key("foobar"), 0, roachpb.SERIALIZABLE, now, 0)
	if err := engine.MVCCPutProto(s.Ctx.Engines[0], nil, key, now, txn, &roachpb.RangeDescriptor{}); err != nil {
		t.Fatal(err)
	}

	// Now, with an intent pending, attempt (asynchronously) to read
	// from an arbitrary key. This will cause the distributed sender to
	// do a range lookup, which will encounter the intent. We're
	// verifying here that the range lookup doesn't fail with a write
	// intent error. If it did, it would go into a deadloop attempting
	// to push the transaction, which in turn requires another range
	// lookup, etc, ad nauseam.
	if _, err := db.Get("a"); err != nil {
		t.Fatal(err)
	}
}
Ejemplo n.º 4
0
// getConfig retrieves the configuration for the specified key. If the
// key is empty, all configurations are returned. Otherwise, the
// leading "/" path delimiter is stripped and the configuration
// matching the remainder is retrieved. Note that this will retrieve
// the default config if "key" is equal to "/", and will list all
// configs if "key" is equal to "". The body result contains a listing
// of keys and retrieval of a config. The output format is determined
// by the request header.
func getConfig(db *client.DB, configPrefix proto.Key, config gogoproto.Message,
	path string, r *http.Request) (body []byte, contentType string, err error) {
	// Scan all configs if the key is empty.
	if len(path) == 0 {
		var rows []client.KeyValue
		if rows, err = db.Scan(configPrefix, configPrefix.PrefixEnd(), maxGetResults); err != nil {
			return
		}
		if len(rows) == maxGetResults {
			log.Warningf("retrieved maximum number of results (%d); some may be missing", maxGetResults)
		}
		var prefixes []string
		for _, row := range rows {
			trimmed := bytes.TrimPrefix(row.Key, configPrefix)
			prefixes = append(prefixes, url.QueryEscape(string(trimmed)))
		}
		// Encode the response.
		body, contentType, err = util.MarshalResponse(r, prefixes, util.AllEncodings)
	} else {
		configkey := keys.MakeKey(configPrefix, proto.Key(path[1:]))
		if err = db.GetProto(configkey, config); err != nil {
			return
		}
		body, contentType, err = util.MarshalResponse(r, config, util.AllEncodings)
	}

	return
}
Ejemplo n.º 5
0
func newTestDescriptorDB() *testDescriptorDB {
	db := &testDescriptorDB{}
	db.data.Insert(testDescriptorNode{
		&proto.RangeDescriptor{
			StartKey: keys.MakeKey(keys.Meta2Prefix, proto.KeyMin),
			EndKey:   keys.MakeKey(keys.Meta2Prefix, proto.KeyMax),
		},
	})
	db.data.Insert(testDescriptorNode{
		&proto.RangeDescriptor{
			StartKey: proto.KeyMin,
			EndKey:   proto.KeyMax,
		},
	})
	return db
}
Ejemplo n.º 6
0
func TestObjectIDForKey(t *testing.T) {
	defer leaktest.AfterTest(t)

	testCases := []struct {
		key     roachpb.RKey
		success bool
		id      uint32
	}{
		// Before the structured span.
		{roachpb.RKeyMin, false, 0},

		// Boundaries of structured span.
		{roachpb.RKeyMax, false, 0},

		// Valid, even if there are things after the ID.
		{keys.MakeKey(keys.MakeTablePrefix(42), roachpb.RKey("\xff")), true, 42},
		{keys.MakeTablePrefix(0), true, 0},
		{keys.MakeTablePrefix(999), true, 999},
	}

	for tcNum, tc := range testCases {
		id, success := config.ObjectIDForKey(tc.key)
		if success != tc.success {
			t.Errorf("#%d: expected success=%t", tcNum, tc.success)
			continue
		}
		if id != tc.id {
			t.Errorf("#%d: expected id=%d, got %d", tcNum, tc.id, id)
		}
	}
}
Ejemplo n.º 7
0
// getPermConfig fetches the permissions config for 'prefix'.
func getPermConfig(db *client.DB, prefix string) (*config.PermConfig, error) {
	config := &config.PermConfig{}
	if err := db.GetProto(keys.MakeKey(keys.ConfigPermissionPrefix, proto.Key(prefix)), config); err != nil {
		return nil, err
	}

	return config, nil
}
Ejemplo n.º 8
0
// TestStoreRangeSplitOnConfigs verifies that config changes to both
// accounting and zone configs cause ranges to be split along prefix
// boundaries.
func TestStoreRangeSplitOnConfigs(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, stopper := createTestStore(t)
	defer stopper.Stop()

	acctConfig := &config.AcctConfig{}
	zoneConfig := &config.ZoneConfig{}

	// Write zone configs for db3 & db4.
	b := &client.Batch{}
	for _, k := range []string{"db4", "db3"} {
		b.Put(keys.MakeKey(keys.ConfigZonePrefix, proto.Key(k)), zoneConfig)
	}
	// Write accounting configs for db1 & db2.
	for _, k := range []string{"db2", "db1"} {
		b.Put(keys.MakeKey(keys.ConfigAccountingPrefix, proto.Key(k)), acctConfig)
	}
	if err := store.DB().Run(b); err != nil {
		t.Fatal(err)
	}
	log.Infof("wrote updated configs")

	// Check that we split into expected ranges in allotted time.
	expKeys := []proto.Key{
		proto.Key("\x00\x00meta2db1"),
		proto.Key("\x00\x00meta2db2"),
		proto.Key("\x00\x00meta2db3"),
		proto.Key("\x00\x00meta2db4"),
		proto.Key("\x00\x00meta2db5"),
		keys.MakeKey(proto.Key("\x00\x00meta2"), proto.KeyMax),
	}
	if err := util.IsTrueWithin(func() bool {
		rows, err := store.DB().Scan(keys.Meta2Prefix, keys.MetaMax, 0)
		if err != nil {
			t.Fatalf("failed to scan meta2 keys: %s", err)
		}
		var keys []proto.Key
		for _, r := range rows {
			keys = append(keys, r.Key)
		}
		return reflect.DeepEqual(keys, expKeys)
	}, 500*time.Millisecond); err != nil {
		t.Errorf("expected splits not found: %s", err)
	}
}
Ejemplo n.º 9
0
// TestRangeSplitsWithWritePressure sets the zone config max bytes for
// a range to 256K and writes data until there are five ranges.
func TestRangeSplitsWithWritePressure(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := createTestDB(t)
	defer s.Stop()
	setTestRetryOptions()

	// Rewrite a zone config with low max bytes.
	zoneConfig := &proto.ZoneConfig{
		ReplicaAttrs: []proto.Attributes{
			{},
			{},
			{},
		},
		RangeMinBytes: 1 << 8,
		RangeMaxBytes: 1 << 18,
	}
	if err := s.DB.Put(keys.MakeKey(keys.ConfigZonePrefix, proto.KeyMin), zoneConfig); err != nil {
		t.Fatal(err)
	}

	// Start test writer write about a 32K/key so there aren't too many writes necessary to split 64K range.
	done := make(chan struct{})
	var wg sync.WaitGroup
	wg.Add(1)
	go startTestWriter(s.DB, int64(0), 1<<15, &wg, nil, nil, done, t)

	// Check that we split 5 times in allotted time.
	if err := util.IsTrueWithin(func() bool {
		// Scan the txn records.
		rows, err := s.DB.Scan(keys.Meta2Prefix, keys.MetaMax, 0)
		if err != nil {
			t.Fatalf("failed to scan meta2 keys: %s", err)
		}
		return len(rows) >= 5
	}, 6*time.Second); err != nil {
		t.Errorf("failed to split 5 times: %s", err)
	}
	close(done)
	wg.Wait()

	// This write pressure test often causes splits while resolve
	// intents are in flight, causing them to fail with range key
	// mismatch errors. However, LocalSender should retry in these
	// cases. Check here via MVCC scan that there are no dangling write
	// intents. We do this using an IsTrueWithin construct to account
	// for timing of finishing the test writer and a possibly-ongoing
	// asynchronous split.
	if err := util.IsTrueWithin(func() bool {
		if _, _, err := engine.MVCCScan(s.Eng, keys.LocalMax, proto.KeyMax, 0, proto.MaxTimestamp, true, nil); err != nil {
			log.Infof("mvcc scan should be clean: %s", err)
			return false
		}
		return true
	}, 500*time.Millisecond); err != nil {
		t.Error("failed to verify no dangling intents within 500ms")
	}
}
Ejemplo n.º 10
0
// TestStoreRangeSplitAtIllegalKeys verifies a range cannot be split
// at illegal keys.
func TestStoreRangeSplitAtIllegalKeys(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, stopper := createTestStore(t)
	defer stopper.Stop()

	for _, key := range []proto.Key{
		keys.Meta1Prefix,
		keys.MakeKey(keys.Meta1Prefix, []byte("a")),
		keys.MakeKey(keys.Meta1Prefix, proto.KeyMax),
		keys.MakeKey(keys.ConfigZonePrefix, []byte("a")),
	} {
		args := adminSplitArgs(proto.KeyMin, key, 1, store.StoreID())
		_, err := store.ExecuteCmd(context.Background(), &args)
		if err == nil {
			t.Fatalf("%q: split succeeded unexpectedly", key)
		}
	}
}
Ejemplo n.º 11
0
// deleteConfig removes the config specified by key.
func deleteConfig(db *client.DB, configPrefix proto.Key, path string, r *http.Request) error {
	if len(path) == 0 {
		return util.Errorf("no path specified for config Delete")
	}
	if path == "/" {
		return util.Errorf("the default configuration cannot be deleted")
	}
	configKey := keys.MakeKey(configPrefix, proto.Key(path[1:]))
	return db.Del(configKey)
}
Ejemplo n.º 12
0
func (mdb mockRangeDescriptorDB) rangeLookup(key roachpb.RKey, options lookupOptions, _ *roachpb.RangeDescriptor) ([]roachpb.RangeDescriptor, error) {
	if bytes.HasPrefix(key, keys.Meta2Prefix) {
		return mdb(key[len(keys.Meta1Prefix):], options)
	}
	if bytes.HasPrefix(key, keys.Meta1Prefix) {
		return mdb(keys.MakeKey(keys.Meta2Prefix, key[len(keys.Meta1Prefix):]), options)
	}
	// First range.
	return mdb(nil, options)
}
Ejemplo n.º 13
0
func (mdb mockRangeDescriptorDB) RangeLookup(key roachpb.RKey, _ *roachpb.RangeDescriptor, considerIntents, useReverseScan bool) ([]roachpb.RangeDescriptor, *roachpb.Error) {
	if bytes.HasPrefix(key, keys.Meta2Prefix) {
		return mdb(key[len(keys.Meta1Prefix):], considerIntents, useReverseScan)
	}
	if bytes.HasPrefix(key, keys.Meta1Prefix) {
		return mdb(keys.MakeKey(keys.Meta2Prefix, key[len(keys.Meta1Prefix):]), considerIntents, useReverseScan)
	}
	// First range.
	return mdb(nil, considerIntents, useReverseScan)
}
Ejemplo n.º 14
0
// WritePermissionConfig writes the passed-in 'cfg' permissions config
// for the 'path' key prefix.
func (ts *TestServer) WritePermissionConfig(path string, cfg *proto.PermConfig) error {
	// The testserver is running as "node". However, things like config changes are generally
	// done as root.
	db, err := client.Open(ts.Ctx.RequestScheme() + "://root@" + ts.ServingAddr() + "?certs=test_certs")
	if err != nil {
		return err
	}
	key := keys.MakeKey(keys.ConfigPermissionPrefix, proto.Key(path))
	return db.Put(key, cfg)
}
Ejemplo n.º 15
0
// TestStoreRangeSplitAtIllegalKeys verifies a range cannot be split
// at illegal keys.
func TestStoreRangeSplitAtIllegalKeys(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, stopper := createTestStore(t)
	defer stopper.Stop()

	for _, key := range []roachpb.Key{
		keys.Meta1Prefix,
		keys.MakeKey(keys.Meta1Prefix, []byte("a")),
		keys.MakeKey(keys.Meta1Prefix, roachpb.RKeyMax),
		keys.Meta2KeyMax,
		keys.MakeTablePrefix(10 /* system descriptor ID */),
	} {
		args := adminSplitArgs(roachpb.KeyMin, key)
		_, err := client.SendWrapped(rg1(store), nil, &args)
		if err == nil {
			t.Fatalf("%q: split succeeded unexpectedly", key)
		}
	}
}
Ejemplo n.º 16
0
// rangeAddressing updates or deletes the range addressing metadata
// for the range specified by desc. The action to take is specified by
// the supplied metaAction function.
//
// The rules for meta1 and meta2 records are as follows:
//
//  1. If desc.StartKey or desc.EndKey is meta1:
//     - ERROR
//  2. If desc.EndKey is meta2:
//     - meta1(desc.EndKey)
//  3. If desc.EndKey is normal user key:
//     - meta2(desc.EndKey)
//     3a. If desc.StartKey is KeyMin or meta2:
//         - meta1(KeyMax)
func rangeAddressing(b *client.Batch, desc *proto.RangeDescriptor, action metaAction) error {
	// 1. handle illegal case of start or end key being meta1.
	if bytes.HasPrefix(desc.EndKey, keys.Meta1Prefix) ||
		bytes.HasPrefix(desc.StartKey, keys.Meta1Prefix) {
		return util.Errorf("meta1 addressing records cannot be split: %+v", desc)
	}
	// 2. the case of the range ending with a meta2 prefix. This means
	// the range is full of meta2. We must update the relevant meta1
	// entry pointing to the end of this range.
	if bytes.HasPrefix(desc.EndKey, keys.Meta2Prefix) {
		action(b, keys.RangeMetaKey(desc.EndKey), desc)
	} else {
		// 3. the range ends with a normal user key, so we must update the
		// relevant meta2 entry pointing to the end of this range.
		action(b, keys.MakeKey(keys.Meta2Prefix, desc.EndKey), desc)
		// 3a. the range starts with KeyMin or a meta2 addressing record,
		// update the meta1 entry for KeyMax.
		if bytes.Equal(desc.StartKey, proto.KeyMin) ||
			bytes.HasPrefix(desc.StartKey, keys.Meta2Prefix) {
			action(b, keys.MakeKey(keys.Meta1Prefix, proto.KeyMax), desc)
		}
	}
	return nil
}
Ejemplo n.º 17
0
// putConfig writes a config for the specified key prefix (which is
// treated as a key). The config is parsed from the input "body". The
// config is stored proto-encoded. The specified body must validly
// parse into a config struct and must pass a given validation check (if
// validate is not nil).
func putConfig(db *client.DB, configPrefix proto.Key, config gogoproto.Message,
	path string, body []byte, r *http.Request,
	validate func(gogoproto.Message) error) error {
	if len(path) == 0 {
		return util.Errorf("no path specified for Put")
	}
	if err := util.UnmarshalRequest(r, body, config, util.AllEncodings); err != nil {
		return util.Errorf("config has invalid format: %+v: %s", config, err)
	}
	if validate != nil {
		if err := validate(config); err != nil {
			return err
		}
	}
	key := keys.MakeKey(configPrefix, proto.Key(path[1:]))
	return db.Put(key, config)
}
Ejemplo n.º 18
0
// TestStoreRangeSplitBetweenConfigPrefix verifies a range can be split
// between ConfigPrefix and gossip them correctly.
func TestStoreRangeSplitBetweenConfigPrefix(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, stopper := createTestStore(t)
	defer stopper.Stop()

	key := keys.MakeKey(keys.SystemPrefix, []byte("tsd"))

	args, reply := adminSplitArgs(proto.KeyMin, key, 1, store.StoreID())
	err := store.ExecuteCmd(context.Background(), proto.Call{Args: args, Reply: reply})
	if err != nil {
		t.Fatalf("%q: split unexpected error: %s", key, err)
	}

	if err := store.MaybeGossipConfigs(); err != nil {
		t.Fatalf("error gossiping configs: %s", err)
	}
}
Ejemplo n.º 19
0
// TestStoreSetRangesMaxBytes creates a set of ranges via splitting
// and then sets the config zone to a custom max bytes value to
// verify the ranges' max bytes are updated appropriately.
func TestStoreSetRangesMaxBytes(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, _, stopper := createTestStore(t)
	defer stopper.Stop()

	testData := []struct {
		rng         *Range
		expMaxBytes int64
	}{
		{store.LookupRange(proto.KeyMin, nil), 64 << 20},
		{splitTestRange(store, proto.KeyMin, proto.Key("a"), t), 1 << 20},
		{splitTestRange(store, proto.Key("a"), proto.Key("aa"), t), 1 << 20},
		{splitTestRange(store, proto.Key("aa"), proto.Key("b"), t), 64 << 20},
	}

	// Now set a new zone config for the prefix "a" with a different max bytes.
	zoneConfig := &proto.ZoneConfig{
		ReplicaAttrs:  []proto.Attributes{{}, {}, {}},
		RangeMinBytes: 1 << 8,
		RangeMaxBytes: 1 << 20,
	}
	data, err := gogoproto.Marshal(zoneConfig)
	if err != nil {
		t.Fatal(err)
	}
	key := keys.MakeKey(keys.ConfigZonePrefix, proto.Key("a"))
	pArgs := putArgs(key, data, 1, store.StoreID())
	pArgs.Timestamp = store.ctx.Clock.Now()
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &pArgs, Reply: pArgs.CreateReply()}); err != nil {
		t.Fatal(err)
	}

	if err := util.IsTrueWithin(func() bool {
		for _, test := range testData {
			if test.rng.GetMaxBytes() != test.expMaxBytes {
				return false
			}
		}
		return true
	}, 500*time.Millisecond); err != nil {
		t.Errorf("range max bytes values did not change as expected: %s", err)
	}
}
Ejemplo n.º 20
0
// TestStoreZoneUpdateAndRangeSplit verifies that modifying the zone
// configuration changes range max bytes and Range.maybeSplit() takes
// max bytes into account when deciding whether to enqueue a range for
// splitting. It further verifies that the range is in fact split on
// exceeding zone's RangeMaxBytes.
func TestStoreZoneUpdateAndRangeSplit(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, stopper := createTestStore(t)
	defer stopper.Stop()

	maxBytes := int64(1 << 16)
	rng := store.LookupReplica(proto.KeyMin, nil)
	fillRange(store, rng.Desc().RangeID, proto.Key("test"), maxBytes, t)

	// Rewrite zone config with range max bytes set to 64K.
	// This will cause the split queue to split the range in the background.
	// This must happen after fillRange() because that function is not using
	// a full-fledged client and cannot handle running concurrently with splits.
	zoneConfig := &config.ZoneConfig{
		ReplicaAttrs: []proto.Attributes{
			{},
			{},
			{},
		},
		RangeMinBytes: 1 << 8,
		RangeMaxBytes: maxBytes,
	}
	key := keys.MakeKey(keys.ConfigZonePrefix, proto.KeyMin)
	if err := store.DB().Put(key, zoneConfig); err != nil {
		t.Fatal(err)
	}

	// See if the range's max bytes is modified via gossip callback within 50ms.
	if err := util.IsTrueWithin(func() bool {
		return rng.GetMaxBytes() == zoneConfig.RangeMaxBytes
	}, 50*time.Millisecond); err != nil {
		t.Fatalf("failed to notice range max bytes update: %s", err)
	}

	// Verify that the range is in fact split (give it a second for very slow test machines).
	if err := util.IsTrueWithin(func() bool {
		newRng := store.LookupReplica(proto.Key("\xff\x00"), nil)
		return newRng != rng
	}, time.Second); err != nil {
		t.Errorf("expected range to split within 1s")
	}
}
// TestRangeLookupWithOpenTransaction verifies that range lookups are
// done in such a way (e.g. using inconsistent reads) that they
// proceed in the event that a write intent is extant at the meta
// index record being read.
func TestRangeLookupWithOpenTransaction(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := server.StartTestServer(t)
	defer s.Stop()
	db := createTestClient(t, s.ServingAddr())

	// Create an intent on the meta1 record by writing directly to the
	// engine.
	key := keys.MakeKey(keys.Meta1Prefix, proto.KeyMax)
	now := s.Clock().Now()
	txn := proto.NewTransaction("txn", proto.Key("foobar"), 0, proto.SERIALIZABLE, now, 0)
	if err := engine.MVCCPutProto(s.Ctx.Engines[0], nil, key, now, txn, &proto.RangeDescriptor{}); err != nil {
		t.Fatal(err)
	}

	// Now, with an intent pending, attempt (asynchronously) to read
	// from an arbitrary key. This will cause the distributed sender to
	// do a range lookup, which will encounter the intent. We're
	// verifying here that the range lookup doesn't fail with a write
	// intent error. If it did, it would go into a deadloop attempting
	// to push the transaction, which in turn requires another range
	// lookup, etc, ad nauseam.
	success := make(chan struct{})
	go func() {
		if _, err := db.Get("a"); err != nil {
			t.Fatal(err)
		}
		close(success)
	}()

	select {
	case <-success:
		// Hurrah!
	case <-time.After(5 * time.Second):
		t.Errorf("get request did not succeed in face of range metadata intent")
	}
}
Ejemplo n.º 22
0
// TestStoreRangeSplitWithMaxBytesUpdate tests a scenario where a new
// zone config that updates the max bytes is set and triggers a range
// split.
func TestStoreRangeSplitWithMaxBytesUpdate(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, stopper := createTestStore(t)
	defer stopper.Stop()

	origRng := store.LookupReplica(proto.KeyMin, nil)

	// Set the maxBytes and trigger a range split.
	key := keys.MakeKey(keys.ConfigZonePrefix, proto.Key("db1"))
	maxBytes := int64(1 << 16)
	zoneConfig := &config.ZoneConfig{
		ReplicaAttrs: []proto.Attributes{
			{},
			{},
			{},
		},
		RangeMinBytes: 1 << 8,
		RangeMaxBytes: maxBytes,
	}
	if err := store.DB().Put(key, zoneConfig); err != nil {
		t.Fatal(err)
	}

	// Verify that the range is split and the new range has the correct max bytes.
	util.SucceedsWithin(t, time.Second, func() error {
		newRng := store.LookupReplica(proto.Key("db1"), nil)
		if newRng.Desc().RangeID == origRng.Desc().RangeID {
			return util.Error("expected new range created by split")
		}
		if newRng.GetMaxBytes() != maxBytes {
			return util.Errorf("expected %d max bytes for the new range, but got %d",
				maxBytes, newRng.GetMaxBytes())
		}
		return nil
	})
}
Ejemplo n.º 23
0
// TestStoreRangeSystemSplits verifies that splits are based on the
// contents of the SystemDB span.
func TestStoreRangeSystemSplits(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, stopper := createTestStore(t)
	defer stopper.Stop()

	initialSystemValues := sql.MakeMetadataSchema().GetInitialValues()
	numInitialValues := len(initialSystemValues)
	// Write the initial sql values to the system DB as well
	// as the equivalent of table descriptors for X user tables.
	// This does two things:
	// - descriptor IDs are used to determine split keys
	// - the write triggers a SystemConfig update and gossip.
	// We should end up with splits at each user table prefix.
	if err := store.DB().Txn(func(txn *client.Txn) error {
		txn.SetSystemDBTrigger()
		for i, kv := range initialSystemValues {
			bytes, err := kv.Value.GetBytes()
			if err != nil {
				log.Info(err)
				continue
			}
			if err := txn.Put(kv.Key, bytes); err != nil {
				return err
			}

			descID := keys.MaxReservedDescID + i + 1

			// We don't care about the values, just the keys.
			k := sql.MakeDescMetadataKey(sql.ID(descID))
			if err := txn.Put(k, bytes); err != nil {
				return err
			}
		}
		return nil
	}); err != nil {
		t.Fatal(err)
	}

	verifySplitsAtTablePrefixes := func(maxTableID int) {
		// We expect splits at each of the user tables, but not at the system
		// tables boundaries.
		expKeys := make([]roachpb.Key, 0, maxTableID+1)
		for i := 1; i <= maxTableID; i++ {
			expKeys = append(expKeys,
				keys.MakeKey(keys.Meta2Prefix, keys.MakeTablePrefix(keys.MaxReservedDescID+uint32(i))),
			)
		}
		expKeys = append(expKeys, keys.MakeKey(keys.Meta2Prefix, roachpb.RKeyMax))

		util.SucceedsWithinDepth(1, t, 5*time.Second, func() error {
			rows, err := store.DB().Scan(keys.Meta2Prefix, keys.MetaMax, 0)
			if err != nil {
				return err
			}
			keys := make([]roachpb.Key, 0, len(expKeys))
			for _, r := range rows {
				keys = append(keys, r.Key)
			}
			if !reflect.DeepEqual(keys, expKeys) {
				return util.Errorf("expected split keys:\n%v\nbut found:\n%v", expKeys, keys)
			}
			return nil
		})
	}

	verifySplitsAtTablePrefixes(len(initialSystemValues))

	numTotalValues := numInitialValues + 5

	// Write another, disjoint descriptor for a user table.
	if err := store.DB().Txn(func(txn *client.Txn) error {
		txn.SetSystemDBTrigger()
		// This time, only write the last table descriptor. Splits
		// still occur for every intervening ID.
		// We don't care about the values, just the keys.
		k := sql.MakeDescMetadataKey(sql.ID(keys.MaxReservedDescID + numTotalValues))
		return txn.Put(k, &sql.TableDescriptor{})
	}); err != nil {
		t.Fatal(err)
	}

	verifySplitsAtTablePrefixes(numTotalValues)
}
Ejemplo n.º 24
0
func TestComputeSplits(t *testing.T) {
	defer leaktest.AfterTest(t)

	const (
		start         = keys.MaxReservedDescID + 1
		reservedStart = keys.MaxSystemConfigDescID + 1
	)

	schema := sql.MakeMetadataSchema()
	// Real SQL system tables only.
	baseSql := schema.GetInitialValues()
	// Real SQL system tables plus some user stuff.
	userSql := append(schema.GetInitialValues(),
		descriptor(start), descriptor(start+1), descriptor(start+5))
	// Real SQL system with reserved non-system tables.
	schema.AddTable(reservedStart+1, "CREATE TABLE system.test1 (i INT PRIMARY KEY)",
		privilege.List{privilege.ALL})
	schema.AddTable(reservedStart+2, "CREATE TABLE system.test2 (i INT PRIMARY KEY)",
		privilege.List{privilege.ALL})
	reservedSql := schema.GetInitialValues()
	// Real SQL system with reserved non-system and user database.
	allSql := append(schema.GetInitialValues(),
		descriptor(start), descriptor(start+1), descriptor(start+5))

	allUserSplits := []uint32{start, start + 1, start + 2, start + 3, start + 4, start + 5}
	allReservedSplits := []uint32{reservedStart, reservedStart + 1, reservedStart + 2}
	allSplits := append(allReservedSplits, allUserSplits...)

	testCases := []struct {
		values     []roachpb.KeyValue
		start, end roachpb.RKey
		// Use ints in the testcase definitions, more readable.
		splits []uint32
	}{
		// No data.
		{nil, roachpb.RKeyMin, roachpb.RKeyMax, nil},
		{nil, keys.MakeTablePrefix(start), roachpb.RKeyMax, nil},
		{nil, keys.MakeTablePrefix(start), keys.MakeTablePrefix(start + 10), nil},
		{nil, roachpb.RKeyMin, keys.MakeTablePrefix(start + 10), nil},

		// No user data.
		{baseSql, roachpb.RKeyMin, roachpb.RKeyMax, allReservedSplits[:1]},
		{baseSql, keys.MakeTablePrefix(start), roachpb.RKeyMax, nil},
		{baseSql, keys.MakeTablePrefix(start), keys.MakeTablePrefix(start + 10), nil},
		{baseSql, roachpb.RKeyMin, keys.MakeTablePrefix(start + 10), allReservedSplits[:1]},

		// User descriptors.
		{userSql, keys.MakeTablePrefix(start - 1), roachpb.RKeyMax, allUserSplits},
		{userSql, keys.MakeTablePrefix(start), roachpb.RKeyMax, allUserSplits[1:]},
		{userSql, keys.MakeTablePrefix(start), keys.MakeTablePrefix(start + 10), allUserSplits[1:]},
		{userSql, keys.MakeTablePrefix(start - 1), keys.MakeTablePrefix(start + 10), allUserSplits},
		{userSql, keys.MakeTablePrefix(start + 4), keys.MakeTablePrefix(start + 10), allUserSplits[5:]},
		{userSql, keys.MakeTablePrefix(start + 5), keys.MakeTablePrefix(start + 10), nil},
		{userSql, keys.MakeTablePrefix(start + 6), keys.MakeTablePrefix(start + 10), nil},
		{userSql, keys.MakeKey(keys.MakeTablePrefix(start), roachpb.RKey("foo")),
			keys.MakeTablePrefix(start + 10), allUserSplits[1:]},
		{userSql, keys.MakeKey(keys.MakeTablePrefix(start), roachpb.RKey("foo")),
			keys.MakeTablePrefix(start + 5), allUserSplits[1:5]},
		{userSql, keys.MakeKey(keys.MakeTablePrefix(start), roachpb.RKey("foo")),
			keys.MakeKey(keys.MakeTablePrefix(start+5), roachpb.RKey("bar")), allUserSplits[1:5]},
		{userSql, keys.MakeKey(keys.MakeTablePrefix(start), roachpb.RKey("foo")),
			keys.MakeKey(keys.MakeTablePrefix(start), roachpb.RKey("morefoo")), nil},

		// Reserved descriptors.
		{reservedSql, roachpb.RKeyMin, roachpb.RKeyMax, allReservedSplits},
		{reservedSql, keys.MakeTablePrefix(reservedStart), roachpb.RKeyMax, allReservedSplits[1:]},
		{reservedSql, keys.MakeTablePrefix(start), roachpb.RKeyMax, nil},
		{reservedSql, keys.MakeTablePrefix(reservedStart), keys.MakeTablePrefix(start + 10), allReservedSplits[1:]},
		{reservedSql, roachpb.RKeyMin, keys.MakeTablePrefix(reservedStart + 2), allReservedSplits[:2]},
		{reservedSql, roachpb.RKeyMin, keys.MakeTablePrefix(reservedStart + 10), allReservedSplits},
		{reservedSql, keys.MakeTablePrefix(reservedStart), keys.MakeTablePrefix(reservedStart + 2), allReservedSplits[1:2]},
		{reservedSql, keys.MakeKey(keys.MakeTablePrefix(reservedStart), roachpb.RKey("foo")),
			keys.MakeKey(keys.MakeTablePrefix(start+10), roachpb.RKey("foo")), allReservedSplits[1:]},

		// Reserved/User mix.
		{allSql, roachpb.RKeyMin, roachpb.RKeyMax, allSplits},
		{allSql, keys.MakeTablePrefix(reservedStart + 1), roachpb.RKeyMax, allSplits[2:]},
		{allSql, keys.MakeTablePrefix(start), roachpb.RKeyMax, allSplits[4:]},
		{allSql, keys.MakeTablePrefix(reservedStart), keys.MakeTablePrefix(start + 10), allSplits[1:]},
		{allSql, roachpb.RKeyMin, keys.MakeTablePrefix(start + 2), allSplits[:5]},
		{allSql, keys.MakeKey(keys.MakeTablePrefix(reservedStart), roachpb.RKey("foo")),
			keys.MakeKey(keys.MakeTablePrefix(start+5), roachpb.RKey("foo")), allSplits[1:8]},
	}

	cfg := config.SystemConfig{}
	for tcNum, tc := range testCases {
		cfg.Values = tc.values
		splits := cfg.ComputeSplitKeys(tc.start, tc.end)
		if len(splits) == 0 && len(tc.splits) == 0 {
			continue
		}

		// Convert ints to actual keys.
		expected := []roachpb.RKey{}
		for _, s := range tc.splits {
			expected = append(expected, keys.MakeNonColumnKey(keys.MakeTablePrefix(s)))
		}
		if !reflect.DeepEqual(splits, expected) {
			t.Errorf("#%d: bad splits:\ngot: %v\nexpected: %v", tcNum, splits, expected)
		}
	}
}
Ejemplo n.º 25
0
// TestStoreVerifyKeys checks that key length is enforced and
// that end keys must sort >= start.
func TestStoreVerifyKeys(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, _, stopper := createTestStore(t)
	defer stopper.Stop()
	tooLongKey := proto.Key(strings.Repeat("x", proto.KeyMaxLength+1))

	// Start with a too-long key on a get.
	gArgs := getArgs(tooLongKey, 1, store.StoreID())
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &gArgs, Reply: gArgs.CreateReply()}); err == nil {
		t.Fatal("expected error for key too long")
	}
	// Try a start key == KeyMax.
	gArgs.Key = proto.KeyMax
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &gArgs, Reply: gArgs.CreateReply()}); err == nil {
		t.Fatal("expected error for start key == KeyMax")
	}
	// Try a get with an end key specified (get requires only a start key and should fail).
	gArgs.EndKey = proto.KeyMax
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &gArgs, Reply: gArgs.CreateReply()}); err == nil {
		t.Fatal("expected error for end key specified on a non-range-based operation")
	}
	// Try a scan with too-long EndKey.
	sArgs := scanArgs(proto.KeyMin, tooLongKey, 1, store.StoreID())
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &sArgs, Reply: sArgs.CreateReply()}); err == nil {
		t.Fatal("expected error for end key too long")
	}
	// Try a scan with end key < start key.
	sArgs.Key = []byte("b")
	sArgs.EndKey = []byte("a")
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &sArgs, Reply: sArgs.CreateReply()}); err == nil {
		t.Fatal("expected error for end key < start")
	}
	// Try a scan with start key == end key.
	sArgs.Key = []byte("a")
	sArgs.EndKey = sArgs.Key
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &sArgs, Reply: sArgs.CreateReply()}); err == nil {
		t.Fatal("expected error for start == end key")
	}
	// Try a put to meta2 key which would otherwise exceed maximum key
	// length, but is accepted because of the meta prefix.
	meta2KeyMax := keys.MakeKey(keys.Meta2Prefix, proto.KeyMax)
	pArgs := putArgs(meta2KeyMax, []byte("value"), 1, store.StoreID())
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &pArgs, Reply: pArgs.CreateReply()}); err != nil {
		t.Fatalf("unexpected error on put to meta2 value: %s", err)
	}
	// Try to put a range descriptor record for a start key which is
	// maximum length.
	key := append([]byte{}, proto.KeyMax...)
	key[len(key)-1] = 0x01
	pArgs = putArgs(keys.RangeDescriptorKey(key), []byte("value"), 1, store.StoreID())
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &pArgs, Reply: pArgs.CreateReply()}); err != nil {
		t.Fatalf("unexpected error on put to range descriptor for KeyMax value: %s", err)
	}
	// Try a put to txn record for a meta2 key (note that this doesn't
	// actually happen in practice, as txn records are not put directly,
	// but are instead manipulated only through txn methods).
	pArgs = putArgs(keys.TransactionKey(meta2KeyMax, []byte(uuid.NewUUID4())),
		[]byte("value"), 1, store.StoreID())
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &pArgs, Reply: pArgs.CreateReply()}); err != nil {
		t.Fatalf("unexpected error on put to txn meta2 value: %s", err)
	}
}
Ejemplo n.º 26
0
// TestStoreRangeManySplits splits many ranges at once.
func TestStoreRangeManySplits(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, stopper := createTestStore(t)
	defer stopper.Stop()

	// Write zone configs to trigger the first round of splits.
	numDbs := 20
	zoneConfig := &config.ZoneConfig{}
	b := &client.Batch{}
	for i := 0; i < numDbs; i++ {
		key := proto.Key(fmt.Sprintf("db%02d", 20-i))
		b.Put(keys.MakeKey(keys.ConfigZonePrefix, key), zoneConfig)
	}
	if err := store.DB().Run(b); err != nil {
		t.Fatal(err)
	}

	// Check that we finish splitting in allotted time.
	expKeys := []proto.Key{}
	// Expect numDb+1 keys as the zone config for "db20" creates
	// "meta2db20" and "meta2db21" as start/end keys.
	for i := 1; i <= numDbs+1; i++ {
		if i%10 == 0 {
			expKeys = append(expKeys, proto.Key(fmt.Sprintf("\x00\x00meta2db%d:", i/10-1)))
		}
		expKeys = append(expKeys, proto.Key(fmt.Sprintf("\x00\x00meta2db%02d", i)))
	}
	expKeys = append(expKeys, keys.MakeKey(proto.Key("\x00\x00meta2"), proto.KeyMax))
	if err := util.IsTrueWithin(func() bool {
		rows, err := store.DB().Scan(keys.Meta2Prefix, keys.MetaMax, 0)
		if err != nil {
			t.Fatalf("failed to scan meta2 keys: %s", err)
		}
		var keys []proto.Key
		for _, r := range rows {
			keys = append(keys, r.Key)
		}
		return reflect.DeepEqual(keys, expKeys)
	}, 5*time.Second); err != nil {
		t.Errorf("expected splits not found: %s", err)
	}

	// Then start the second round of splits.
	acctConfig := &config.AcctConfig{}
	b = &client.Batch{}
	for i := 0; i < numDbs; i++ {
		key := proto.Key(fmt.Sprintf("db%02d/table", 20-i))
		b.Put(keys.MakeKey(keys.ConfigZonePrefix, key), acctConfig)
	}
	if err := store.DB().Run(b); err != nil {
		t.Fatal(err)
	}

	// Check the result of splits again.
	expKeys = []proto.Key{}
	for i := 1; i <= numDbs; i++ {
		if i%10 == 0 {
			expKeys = append(expKeys, proto.Key(fmt.Sprintf("\x00\x00meta2db%d:", i/10-1)))
		}
		expKeys = append(expKeys,
			proto.Key(fmt.Sprintf("\x00\x00meta2db%02d", i)),
			proto.Key(fmt.Sprintf("\x00\x00meta2db%02d/table", i)),
			proto.Key(fmt.Sprintf("\x00\x00meta2db%02d/tablf", i)))
	}
	expKeys = append(expKeys,
		proto.Key("\x00\x00meta2db21"),
		keys.MakeKey(proto.Key("\x00\x00meta2"), proto.KeyMax))
	if err := util.IsTrueWithin(func() bool {
		rows, err := store.DB().Scan(keys.Meta2Prefix, keys.MetaMax, 0)
		if err != nil {
			t.Fatalf("failed to scan meta2 keys: %s", err)
		}
		var keys []proto.Key
		for _, r := range rows {
			keys = append(keys, r.Key)
		}
		return reflect.DeepEqual(keys, expKeys)
	}, 5*time.Second); err != nil {
		t.Errorf("expected splits not found: %s", err)
	}
}
Ejemplo n.º 27
0
func meta2Key(key roachpb.RKey) []byte {
	return keys.MakeKey(keys.Meta2Prefix, key)
}
Ejemplo n.º 28
0
// TestStoreVerifyKeys checks that key length is enforced and
// that end keys must sort >= start.
func TestStoreVerifyKeys(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, _, stopper := createTestStore(t)
	defer stopper.Stop()
	tooLongKey := roachpb.Key(strings.Repeat("x", roachpb.KeyMaxLength+1))

	// Start with a too-long key on a get.
	gArgs := getArgs(tooLongKey, 1, store.StoreID())
	if _, err := client.SendWrapped(store, nil, &gArgs); !testutils.IsError(err, "exceeded") {
		t.Fatalf("unexpected error for key too long: %v", err)
	}
	// Try a start key == KeyMax.
	gArgs.Key = roachpb.KeyMax
	if _, err := client.SendWrapped(store, nil, &gArgs); !testutils.IsError(err, "must be less than KeyMax") {
		t.Fatalf("expected error for start key == KeyMax: %v", err)
	}
	// Try a get with an end key specified (get requires only a start key and should fail).
	gArgs.EndKey = roachpb.KeyMax
	if _, err := client.SendWrapped(store, nil, &gArgs); !testutils.IsError(err, "must be less than KeyMax") {
		t.Fatalf("unexpected error for end key specified on a non-range-based operation: %v", err)
	}
	// Try a scan with too-long EndKey.
	sArgs := scanArgs(roachpb.KeyMin, tooLongKey, 1, store.StoreID())
	if _, err := client.SendWrapped(store, nil, &sArgs); !testutils.IsError(err, "length exceeded") {
		t.Fatalf("unexpected error for end key too long: %v", err)
	}
	// Try a scan with end key < start key.
	sArgs.Key = []byte("b")
	sArgs.EndKey = []byte("a")
	if _, err := client.SendWrapped(store, nil, &sArgs); !testutils.IsError(err, "must be greater than") {
		t.Fatalf("unexpected error for end key < start: %v", err)
	}
	// Try a scan with start key == end key.
	sArgs.Key = []byte("a")
	sArgs.EndKey = sArgs.Key
	if _, err := client.SendWrapped(store, nil, &sArgs); !testutils.IsError(err, "must be greater than") {
		t.Fatalf("unexpected error for start == end key: %v", err)
	}
	// Try a scan with range-local start key, but "regular" end key.
	sArgs.Key = keys.MakeRangeKey([]byte("test"), []byte("sffx"), nil)
	sArgs.EndKey = []byte("z")
	if _, err := client.SendWrapped(store, nil, &sArgs); !testutils.IsError(err, "range-local") {
		t.Fatalf("unexpected error for local start, non-local end key: %v", err)
	}

	// Try a put to meta2 key which would otherwise exceed maximum key
	// length, but is accepted because of the meta prefix.
	meta2KeyMax := keys.MakeKey(keys.Meta2Prefix, roachpb.KeyMax)
	pArgs := putArgs(meta2KeyMax, []byte("value"), 1, store.StoreID())
	if _, err := client.SendWrapped(store, nil, &pArgs); err != nil {
		t.Fatalf("unexpected error on put to meta2 value: %s", err)
	}
	// Try to put a range descriptor record for a start key which is
	// maximum length.
	key := append([]byte{}, roachpb.KeyMax...)
	key[len(key)-1] = 0x01
	pArgs = putArgs(keys.RangeDescriptorKey(key), []byte("value"), 1, store.StoreID())
	if _, err := client.SendWrapped(store, nil, &pArgs); err != nil {
		t.Fatalf("unexpected error on put to range descriptor for KeyMax value: %s", err)
	}
	// Try a put to txn record for a meta2 key (note that this doesn't
	// actually happen in practice, as txn records are not put directly,
	// but are instead manipulated only through txn methods).
	pArgs = putArgs(keys.TransactionKey(meta2KeyMax, []byte(uuid.NewUUID4())),
		[]byte("value"), 1, store.StoreID())
	if _, err := client.SendWrapped(store, nil, &pArgs); err != nil {
		t.Fatalf("unexpected error on put to txn meta2 value: %s", err)
	}
}
Ejemplo n.º 29
0
// TestStoreRangeSystemSplits verifies that splits are based on the
// contents of the SystemDB span.
func TestStoreRangeSystemSplits(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, stopper := createTestStore(t)
	defer stopper.Stop()

	// Write the initial sql values to the system DB as well
	// as the equivalent of table descriptors for X user tables.
	// This does two things:
	// - descriptor IDs are used to determine split keys
	// - the write triggers a SystemConfig update and gossip.
	// We should end up with splits at each user table prefix.
	if err := store.DB().Txn(func(txn *client.Txn) error {
		txn.SetSystemDBTrigger()
		for _, kv := range sql.GetInitialSystemValues() {
			if err := txn.Put(kv.Key, kv.Value.GetRawBytes()); err != nil {
				return err
			}
		}
		for i := 1; i <= 5; i++ {
			// We don't care about the values, just the keys.
			k := sql.MakeDescMetadataKey(sql.ID(keys.MaxReservedDescID + i))
			v, err := txn.Get(k)
			if err != nil {
				return err
			}
			if err := txn.Put(k, v.ValueBytes()); err != nil {
				return err
			}
		}
		return nil
	}); err != nil {
		t.Fatal(err)
	}

	expKeys := []roachpb.Key{}
	for i := 1; i <= 5; i++ {
		expKeys = append(expKeys, keys.MakeKey(keys.Meta2Prefix,
			keys.MakeTablePrefix(uint32(keys.MaxReservedDescID+i))))
	}
	expKeys = append(expKeys, keys.MakeKey(keys.Meta2Prefix, roachpb.RKeyMax))

	if err := util.IsTrueWithin(func() bool {
		rows, err := store.DB().Scan(keys.Meta2Prefix, keys.MetaMax, 0)
		if err != nil {
			t.Fatalf("failed to scan meta2 keys: %s", err)
		}
		var keys []roachpb.Key
		for _, r := range rows {
			keys = append(keys, r.Key)
		}
		return reflect.DeepEqual(keys, expKeys)
	}, 5*time.Second); err != nil {
		t.Errorf("expected splits not found: %s", err)
	}

	// Write more descriptors for user tables.
	if err := store.DB().Txn(func(txn *client.Txn) error {
		txn.SetSystemDBTrigger()
		// This time, only write the last table descriptor. Splits
		// still occur for every ID.
		// We don't care about the values, just the keys.
		k := sql.MakeDescMetadataKey(sql.ID(keys.MaxReservedDescID + 10))
		v, err := txn.Get(k)
		if err != nil {
			return err
		}
		return txn.Put(k, v.ValueBytes())
	}); err != nil {
		t.Fatal(err)
	}

	expKeys = []roachpb.Key{}
	for i := 1; i <= 10; i++ {
		expKeys = append(expKeys, keys.MakeKey(keys.Meta2Prefix,
			keys.MakeTablePrefix(uint32(keys.MaxReservedDescID+i))))
	}
	expKeys = append(expKeys, keys.MakeKey(keys.Meta2Prefix, roachpb.RKeyMax))

	if err := util.IsTrueWithin(func() bool {
		rows, err := store.DB().Scan(keys.Meta2Prefix, keys.MetaMax, 0)
		if err != nil {
			t.Fatalf("failed to scan meta2 keys: %s", err)
		}
		var keys []roachpb.Key
		for _, r := range rows {
			keys = append(keys, r.Key)
		}
		return reflect.DeepEqual(keys, expKeys)
	}, 5*time.Second); err != nil {
		t.Errorf("expected splits not found: %s", err)
	}
}
Ejemplo n.º 30
0
// putPermConfig writes the permissions config for 'prefix'.
func putPermConfig(db *client.DB, prefix string, config *config.PermConfig) error {
	return db.Put(keys.MakeKey(keys.ConfigPermissionPrefix, proto.Key(prefix)), config)
}