Ejemplo n.º 1
0
// TestCoordinatorGC verifies that the coordinator cleans up extant
// transactions after the lastUpdateTS exceeds the timeout.
func TestCoordinatorGC(t *testing.T) {
	db, _, manual := createTestDB(t)
	defer db.Close()

	// Set heartbeat interval to 1ms for testing.
	db.coordinator.heartbeatInterval = 1 * time.Millisecond

	txnID := engine.Key("txn")
	<-db.Put(createPutRequest(engine.Key("a"), []byte("value"), txnID))

	// Now, advance clock past the default client timeout.
	// Locking the coordinator to prevent a data race.
	db.coordinator.Lock()
	*manual = hlc.ManualClock(defaultClientTimeout.Nanoseconds() + 1)
	db.coordinator.Unlock()

	if err := util.IsTrueWithin(func() bool {
		// Locking the coordinator to prevent a data race.
		db.coordinator.Lock()
		_, ok := db.coordinator.txns[string(txnID)]
		db.coordinator.Unlock()
		return !ok
	}, 50*time.Millisecond); err != nil {
		t.Error("expected garbage collection")
	}
}
Ejemplo n.º 2
0
// TestRangeGossipConfigUpdates verifies that writes to the
// permissions cause the updated configs to be re-gossipped.
func TestRangeGossipConfigUpdates(t *testing.T) {
	r, g := createTestRange(createTestEngine(t), t)
	defer r.Stop()
	// Add a permission for a new key prefix.
	db1Perm := proto.PermConfig{
		Read:  []string{"spencer"},
		Write: []string{"spencer"},
	}
	key := engine.MakeKey(engine.KeyConfigPermissionPrefix, engine.Key("/db1"))
	reply := &proto.PutResponse{}

	data, err := gogoproto.Marshal(&db1Perm)
	if err != nil {
		t.Fatal(err)
	}
	r.Put(&proto.PutRequest{RequestHeader: proto.RequestHeader{Key: key}, Value: proto.Value{Bytes: data}}, reply)
	if reply.Error != nil {
		t.Fatal(reply.GoError())
	}

	info, err := g.GetInfo(gossip.KeyConfigPermission)
	if err != nil {
		t.Fatal(err)
	}
	configMap := info.(PrefixConfigMap)
	expConfigs := []*PrefixConfig{
		&PrefixConfig{engine.KeyMin, nil, &testDefaultPermConfig},
		&PrefixConfig{engine.Key("/db1"), nil, &db1Perm},
		&PrefixConfig{engine.Key("/db2"), engine.KeyMin, &testDefaultPermConfig},
	}
	if !reflect.DeepEqual([]*PrefixConfig(configMap), expConfigs) {
		t.Errorf("expected gossiped configs to be equal %s vs %s", configMap, expConfigs)
	}
}
Ejemplo n.º 3
0
func TestReadQueueMultipleReads(t *testing.T) {
	rq := NewReadQueue()
	wg1 := sync.WaitGroup{}
	wg2 := sync.WaitGroup{}
	wg3 := sync.WaitGroup{}

	// Add a write which will overlap all reads.
	wk := rq.AddWrite(engine.Key("a"), engine.Key("d"))
	rq.AddRead(engine.Key("a"), nil, &wg1)
	rq.AddRead(engine.Key("b"), nil, &wg2)
	rq.AddRead(engine.Key("c"), nil, &wg3)
	rd1 := waitForReader(&wg1)
	rd2 := waitForReader(&wg2)
	rd3 := waitForReader(&wg3)

	if testReadDone(rd1, 1*time.Millisecond) ||
		testReadDone(rd2, 1*time.Millisecond) ||
		testReadDone(rd3, 1*time.Millisecond) {
		t.Fatal("no reads should finish with write outstanding")
	}
	rq.RemoveWrite(wk)
	if !testReadDone(rd1, 5*time.Millisecond) ||
		!testReadDone(rd2, 5*time.Millisecond) ||
		!testReadDone(rd3, 5*time.Millisecond) {
		t.Fatal("reads should finish with no writes outstanding")
	}
}
Ejemplo n.º 4
0
func TestGetFirstRangeDescriptor(t *testing.T) {
	n := gossip.NewSimulationNetwork(3, "unix", gossip.DefaultTestGossipInterval)
	kv := NewDistKV(n.Nodes[0].Gossip)
	if _, err := kv.getFirstRangeDescriptor(); err == nil {
		t.Errorf("expected not to find first range descriptor")
	}
	expectedDesc := &proto.RangeDescriptor{}
	expectedDesc.StartKey = engine.Key("a")
	expectedDesc.EndKey = engine.Key("c")

	// Add first RangeDescriptor to a node different from the node for this kv
	// and ensure that this kv has the information within a given time.
	n.Nodes[1].Gossip.AddInfo(
		gossip.KeyFirstRangeMetadata, *expectedDesc, time.Hour)
	maxCycles := 10
	n.SimulateNetwork(func(cycle int, network *gossip.SimulationNetwork) bool {
		desc, err := kv.getFirstRangeDescriptor()
		if err != nil {
			if cycle >= maxCycles {
				t.Errorf("could not get range descriptor after %d cycles", cycle)
				return false
			}
			return true
		}
		if !bytes.Equal(desc.StartKey, expectedDesc.StartKey) ||
			!bytes.Equal(desc.EndKey, expectedDesc.EndKey) {
			t.Errorf("expected first range descriptor %v, instead was %v",
				expectedDesc, desc)
		}
		return false
	})
	n.Stop()
}
Ejemplo n.º 5
0
// TestRangeGossipConfigWithMultipleKeyPrefixes verifies that multiple
// key prefixes for a config are gossipped.
func TestRangeGossipConfigWithMultipleKeyPrefixes(t *testing.T) {
	e := createTestEngine(t)
	// Add a permission for a new key prefix.
	db1Perm := proto.PermConfig{
		Read:  []string{"spencer", "foo", "bar", "baz"},
		Write: []string{"spencer"},
	}
	key := engine.MakeKey(engine.KeyConfigPermissionPrefix, engine.Key("/db1"))
	if err := engine.PutProto(e, key, &db1Perm); err != nil {
		t.Fatal(err)
	}
	r, g := createTestRange(e, t)
	defer r.Stop()

	info, err := g.GetInfo(gossip.KeyConfigPermission)
	if err != nil {
		t.Fatal(err)
	}
	configMap := info.(PrefixConfigMap)
	expConfigs := []*PrefixConfig{
		&PrefixConfig{engine.KeyMin, nil, &testDefaultPermConfig},
		&PrefixConfig{engine.Key("/db1"), nil, &db1Perm},
		&PrefixConfig{engine.Key("/db2"), engine.KeyMin, &testDefaultPermConfig},
	}
	if !reflect.DeepEqual([]*PrefixConfig(configMap), expConfigs) {
		t.Errorf("expected gossiped configs to be equal %s vs %s", configMap, expConfigs)
	}
}
Ejemplo n.º 6
0
// createTestStore creates a test store using an in-memory
// engine. Returns the store clock's manual unix nanos time and the
// store. If createDefaultRange is true, creates a single range from
// key "a" to key "z" with a default replica descriptor (i.e. StoreID
// = 0, RangeID = 1, etc.). The caller is responsible for closing the
// store on exit.
func createTestStore(createDefaultRange bool, t *testing.T) (*Store, *hlc.ManualClock) {
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)
	store := NewStore(clock, eng, nil, nil)
	if err := store.Bootstrap(proto.StoreIdent{StoreID: 1}); err != nil {
		t.Fatal(err)
	}
	db, _ := newTestDB(store)
	store.db = db
	replica := proto.Replica{StoreID: 1, RangeID: 1}
	// Create system key range for allocations.
	meta := store.BootstrapRangeMetadata()
	meta.StartKey = engine.KeySystemPrefix
	meta.EndKey = engine.PrefixEndKey(engine.KeySystemPrefix)
	_, err := store.CreateRange(meta)
	if err != nil {
		t.Fatal(err)
	}
	if err := store.Init(); err != nil {
		t.Fatal(err)
	}
	// Now that the system key range is available, initialize the store. set store DB so new
	// ranges can be allocated as needed for tests.
	// If requested, create a default range for tests from "a"-"z".
	if createDefaultRange {
		replica = proto.Replica{StoreID: 1}
		_, err := store.CreateRange(store.NewRangeMetadata(engine.Key("a"), engine.Key("z"), []proto.Replica{replica}))
		if err != nil {
			t.Fatal(err)
		}
	}
	return store, &manual
}
Ejemplo n.º 7
0
// TestCoordinatorAddRequest verifies adding a request creates a
// transaction metadata and adding multiple requests with same
// transaction ID updates the last update timestamp.
func TestCoordinatorAddRequest(t *testing.T) {
	db, clock, manual := createTestDB(t)
	defer db.Close()

	txnID := engine.Key("txn")
	putReq := createPutRequest(engine.Key("a"), []byte("value"), txnID)

	// Put request will create a new transaction.
	<-db.Put(putReq)
	txnMeta, ok := db.coordinator.txns[string(txnID)]
	if !ok {
		t.Fatal("expected a transaction to be created on coordinator")
	}
	ts := txnMeta.lastUpdateTS
	if !ts.Less(clock.Now()) {
		t.Errorf("expected earlier last update timestamp; got: %+v", ts)
	}

	// Advance time and send another put request.
	// Locking the coordinator to prevent a data race.
	db.coordinator.Lock()
	*manual = hlc.ManualClock(1)
	db.coordinator.Unlock()
	<-db.Put(putReq)
	if len(db.coordinator.txns) != 1 {
		t.Errorf("expected length of transactions map to be 1; got %d", len(db.coordinator.txns))
	}
	txnMeta = db.coordinator.txns[string(txnID)]
	if !ts.Less(txnMeta.lastUpdateTS) || txnMeta.lastUpdateTS.WallTime != int64(*manual) {
		t.Errorf("expected last update time to advance; got %+v", txnMeta.lastUpdateTS)
	}
}
Ejemplo n.º 8
0
// TestInternalPushTxnPushTimestamp verifies that with args.Abort is
// false (i.e. for read/write conflict), the pushed txn keeps status
// PENDING, but has its txn Timestamp moved forward to the pusher's
// txn Timestamp + 1.
func TestInternalPushTxnPushTimestamp(t *testing.T) {
	rng, _, clock, _ := createTestRangeWithClock(t)
	defer rng.Stop()

	pusher := NewTransaction(engine.Key("a"), 1, proto.SERIALIZABLE, clock)
	pushee := NewTransaction(engine.Key("b"), 1, proto.SERIALIZABLE, clock)
	pusher.Priority = 2
	pushee.Priority = 1 // pusher will win
	pusher.Timestamp = proto.Timestamp{WallTime: 50, Logical: 25}
	pushee.Timestamp = proto.Timestamp{WallTime: 5, Logical: 1}

	// Now, push the transaction with args.Abort=false.
	args, reply := pushTxnArgs(pusher, pushee, false /* abort */, 0)
	if err := rng.ReadWriteCmd("InternalPushTxn", args, reply); err != nil {
		t.Errorf("unexpected error on push: %v", err)
	}
	expTS := pusher.Timestamp
	expTS.Logical++
	if !reply.PusheeTxn.Timestamp.Equal(expTS) {
		t.Errorf("expected timestamp to be pushed to %+v; got %+v", expTS, reply.PusheeTxn.Timestamp)
	}
	if reply.PusheeTxn.Status != proto.PENDING {
		t.Errorf("expected pushed txn to have status PENDING; got %s", reply.PusheeTxn.Status)
	}
}
Ejemplo n.º 9
0
// TestPrefixConfigSort verifies sorting of keys.
func TestPrefixConfigSort(t *testing.T) {
	keys := []engine.Key{
		engine.KeyMax,
		engine.Key("c"),
		engine.Key("a"),
		engine.Key("b"),
		engine.Key("aa"),
		engine.Key("\xfe"),
		engine.KeyMin,
	}
	expKeys := []engine.Key{
		engine.KeyMin,
		engine.Key("a"),
		engine.Key("aa"),
		engine.Key("b"),
		engine.Key("c"),
		engine.Key("\xfe"),
		engine.KeyMax,
	}
	pcc := PrefixConfigMap{}
	for _, key := range keys {
		pcc = append(pcc, &PrefixConfig{key, nil, nil})
	}
	sort.Sort(pcc)
	for i, pc := range pcc {
		if bytes.Compare(pc.Prefix, expKeys[i]) != 0 {
			t.Errorf("order for index %d incorrect; expected %q, got %q", i, expKeys[i], pc.Prefix)
		}
	}
}
Ejemplo n.º 10
0
// TestCoordinatorHeartbeat verifies periodic heartbeat of the
// transaction record.
func TestCoordinatorHeartbeat(t *testing.T) {
	db, _, manual := createTestDB(t)
	defer db.Close()

	// Set heartbeat interval to 1ms for testing.
	db.coordinator.heartbeatInterval = 1 * time.Millisecond

	txnID := engine.Key("txn")
	<-db.Put(createPutRequest(engine.Key("a"), []byte("value"), txnID))

	// Verify 3 heartbeats.
	var heartbeatTS proto.Timestamp
	for i := 0; i < 3; i++ {
		if err := util.IsTrueWithin(func() bool {
			ok, txn, err := getTxn(db, engine.MakeKey(engine.KeyLocalTransactionPrefix, txnID))
			if !ok || err != nil {
				return false
			}
			// Advance clock by 1ns.
			// Locking the coordinator to prevent a data race.
			db.coordinator.Lock()
			*manual = hlc.ManualClock(*manual + 1)
			db.coordinator.Unlock()
			if heartbeatTS.Less(*txn.LastHeartbeat) {
				heartbeatTS = *txn.LastHeartbeat
				return true
			}
			return false
		}, 50*time.Millisecond); err != nil {
			t.Error("expected initial heartbeat within 50ms")
		}
	}
}
Ejemplo n.º 11
0
func doLookup(t *testing.T, rc *RangeMetadataCache, key string) {
	r, err := rc.LookupRangeMetadata(engine.Key(key))
	if err != nil {
		t.Fatalf("Unexpected error from LookupRangeMetadata: %s", err.Error())
	}
	if !r.ContainsKey(engine.Key(key)) {
		t.Fatalf("Returned range did not contain key: %s-%s, %s", r.StartKey, r.EndKey, key)
	}
}
Ejemplo n.º 12
0
// TestInternalPushTxnBadKey verifies that args.Key equals args.PusheeTxn.ID.
func TestInternalPushTxnBadKey(t *testing.T) {
	rng, _, clock, _ := createTestRangeWithClock(t)
	defer rng.Stop()

	pusher := NewTransaction(engine.Key("a"), 1, proto.SERIALIZABLE, clock)
	pushee := NewTransaction(engine.Key("b"), 1, proto.SERIALIZABLE, clock)

	args, reply := pushTxnArgs(pusher, pushee, true, 0)
	args.Key = pusher.ID
	verifyErrorMatches(rng.ReadWriteCmd("InternalPushTxn", args, reply), ".*should match pushee.*", t)
}
Ejemplo n.º 13
0
// createTestStore creates a test store using an in-memory
// engine. Returns the store clock's manual unix nanos time and the
// store. A single range from key "a" to key "z" is setup in the store
// with a default replica descriptor (i.e. StoreID = 0, RangeID = 1,
// etc.). The caller is responsible for closing the store on exit.
func createTestStore(t *testing.T) (*Store, *hlc.ManualClock) {
	manual := hlc.ManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)
	store := NewStore(clock, eng, nil)
	replica := proto.Replica{RangeID: 1}
	_, err := store.CreateRange(engine.Key("a"), engine.Key("z"), []proto.Replica{replica})
	if err != nil {
		t.Fatal(err)
	}
	return store, &manual
}
Ejemplo n.º 14
0
// ExampleLsZones creates a series of zone configs and verifies
// zone-ls works. First, no regexp lists all zone configs. Second,
// regexp properly matches results.
func ExampleLsZones() {
	httpServer := startAdminServer()
	defer httpServer.Close()
	testConfigFn := createTestConfigFile()
	defer os.Remove(testConfigFn)

	keys := []engine.Key{
		engine.KeyMin,
		engine.Key("db1"),
		engine.Key("db2"),
		engine.Key("db3"),
		engine.Key("user"),
	}

	regexps := []string{
		"",
		"db*",
		"db[12]",
	}

	for _, key := range keys {
		prefix := url.QueryEscape(string(key))
		runSetZone(CmdSetZone, []string{prefix, testConfigFn})
	}

	for i, regexp := range regexps {
		fmt.Fprintf(os.Stdout, "test case %d: %q\n", i, regexp)
		if regexp == "" {
			runLsZones(CmdLsZones, []string{})
		} else {
			runLsZones(CmdLsZones, []string{regexp})
		}
	}
	// Output:
	// set zone config for key prefix ""
	// set zone config for key prefix "db1"
	// set zone config for key prefix "db2"
	// set zone config for key prefix "db3"
	// set zone config for key prefix "user"
	// test case 0: ""
	// [default]
	// db1
	// db2
	// db3
	// user
	// test case 1: "db*"
	// db1
	// db2
	// db3
	// test case 2: "db[12]"
	// db1
	// db2
}
Ejemplo n.º 15
0
// TestCoordinatorMultipleTxns verifies correct operation with
// multiple outstanding transactions.
func TestCoordinatorMultipleTxns(t *testing.T) {
	db, _, _ := createTestDB(t)
	defer db.Close()

	txn1ID := engine.Key("txn1")
	txn2ID := engine.Key("txn2")
	<-db.Put(createPutRequest(engine.Key("a"), []byte("value"), txn1ID))
	<-db.Put(createPutRequest(engine.Key("b"), []byte("value"), txn2ID))

	if len(db.coordinator.txns) != 2 {
		t.Errorf("expected length of transactions map to be 2; got %d", len(db.coordinator.txns))
	}
}
Ejemplo n.º 16
0
func buildTestPrefixConfigMap() PrefixConfigMap {
	configs := []*PrefixConfig{
		{engine.KeyMin, nil, config1},
		{engine.Key("/db1"), nil, config2},
		{engine.Key("/db1/table"), nil, config3},
		{engine.Key("/db3"), nil, config4},
	}
	pcc, err := NewPrefixConfigMap(configs)
	if err != nil {
		log.Fatalf("unexpected error building config map: %v", err)
	}
	return pcc
}
Ejemplo n.º 17
0
// TestStoreRaftIDAllocation verifies that raft IDs are
// allocated in successive blocks.
func TestStoreRaftIDAllocation(t *testing.T) {
	store, _ := createTestStore(false, t)
	defer store.Close()

	// Raft IDs should be allocated from ID 2 (first alloc'd range)
	// to raftIDAllocCount * 3 + 1.
	for i := 0; i < raftIDAllocCount*3; i++ {
		r := addTestRange(store, engine.Key(fmt.Sprintf("%03d", i)), engine.Key(fmt.Sprintf("%03d", i+1)), t)
		if r.Meta.RaftID != int64(2+i) {
			t.Error("expected Raft id %d; got %d", 2+i, r.Meta.RaftID)
		}
	}
}
Ejemplo n.º 18
0
// TestRangeCache is a simple test which verifies that metadata ranges
// are being cached and retrieved properly. It sets up a fake backing
// store for the cache, and measures how often that backing store is
// accessed when looking up metadata keys through the cache.
func TestRangeCache(t *testing.T) {
	db := newTestMetadataDB()
	for i, char := range "abcdefghijklmnopqrstuvwx" {
		db.splitRange(t, engine.Key(string(char)))
		if i > 0 && i%6 == 0 {
			db.splitRange(t, engine.RangeMetaKey(engine.Key(string(char))))
		}
	}

	rangeCache := NewRangeMetadataCache(db)
	db.cache = rangeCache

	doLookup(t, rangeCache, "aa")
	db.assertHitCount(t, 2)

	// Metadata for the following ranges should be cached
	doLookup(t, rangeCache, "ab")
	db.assertHitCount(t, 0)
	doLookup(t, rangeCache, "ba")
	db.assertHitCount(t, 0)
	doLookup(t, rangeCache, "cz")
	db.assertHitCount(t, 0)

	// Metadata two ranges weren't cached, same metadata 1 range
	doLookup(t, rangeCache, "d")
	db.assertHitCount(t, 1)
	doLookup(t, rangeCache, "fa")
	db.assertHitCount(t, 0)

	// Metadata two ranges weren't cached, metadata 1 was aggressively cached
	doLookup(t, rangeCache, "ij")
	db.assertHitCount(t, 1)
	doLookup(t, rangeCache, "jk")
	db.assertHitCount(t, 0)
	doLookup(t, rangeCache, "pn")
	db.assertHitCount(t, 1)

	// Totally uncached ranges
	doLookup(t, rangeCache, "vu")
	db.assertHitCount(t, 2)
	doLookup(t, rangeCache, "xx")
	db.assertHitCount(t, 0)

	// Evict clears one level 1 and one level 2 cache
	rangeCache.EvictCachedRangeMetadata(engine.Key("da"))
	doLookup(t, rangeCache, "fa")
	db.assertHitCount(t, 0)
	doLookup(t, rangeCache, "da")
	db.assertHitCount(t, 2)
}
Ejemplo n.º 19
0
// TestCoordinatorEndTxn verifies that ending a transaction
// sends resolve write intent requests and removes the transaction
// from the txns map.
func TestCoordinatorEndTxn(t *testing.T) {
	db, _, _ := createTestDB(t)
	defer db.Close()

	txnID := engine.Key("txn")
	<-db.Put(createPutRequest(engine.Key("a"), []byte("value"), txnID))

	db.coordinator.EndTxn(txnID, true)
	if len(db.coordinator.txns) != 0 {
		t.Errorf("expected empty transactions map; got %d", len(db.coordinator.txns))
	}

	// TODO(spencer): need to test that write intents were sent to key "a".
}
Ejemplo n.º 20
0
// TestRangeCache is a simple test which verifies that metadata ranges are being
// cached and retrieved properly.  It sets up a fake backing store for the
// cache, and measures how often that backing store is accessed when looking up
// metadata keys through the cache.
func TestRangeCache(t *testing.T) {
	db := newTestMetadataDB()
	db.splitRange(t, engine.Key("a"))
	db.splitRange(t, engine.Key("b"))
	db.splitRange(t, engine.Key("c"))
	db.splitRange(t, engine.Key("d"))
	db.splitRange(t, engine.Key("e"))
	db.splitRange(t, engine.Key("f"))
	db.splitRange(t, engine.RangeMetaKey(engine.Key("d")))
	db.hitCount = 0

	rangeCache := NewRangeMetadataCache(db)
	db.cache = rangeCache

	doLookup(t, rangeCache, "ba")
	db.assertHitCount(t, 2)
	doLookup(t, rangeCache, "bb")
	db.assertHitCount(t, 0)
	doLookup(t, rangeCache, "ca")
	db.assertHitCount(t, 1)

	// Different metadata one range
	doLookup(t, rangeCache, "da")
	db.assertHitCount(t, 2)
	doLookup(t, rangeCache, "fa")
	db.assertHitCount(t, 1)

	// Evict clears both level 1 and level 2 cache for a key
	rangeCache.EvictCachedRangeMetadata(engine.Key("da"))
	doLookup(t, rangeCache, "fa")
	db.assertHitCount(t, 0)
	doLookup(t, rangeCache, "da")
	db.assertHitCount(t, 2)
}
Ejemplo n.º 21
0
// AddRequest is called on every client request to update the
// lastUpdateTS to prevent live transactions from being considered
// abandoned and garbage collected. Read/write mutating requests have
// their key(s) added to the transaction's keys slice for eventual
// cleanup via resolved write intents.
func (tc *coordinator) AddRequest(method string, header *proto.RequestHeader) {
	// Ignore non-transactional requests.
	if len(header.TxnID) == 0 || !isTransactional(method) {
		return
	}

	tc.Lock()
	defer tc.Unlock()
	if _, ok := tc.txns[string(header.TxnID)]; !ok {
		tc.txns[string(header.TxnID)] = &txnMetadata{
			lastUpdateTS:    tc.clock.Now(),
			timeoutDuration: tc.clientTimeout,
			closer:          make(chan struct{}),
		}

		// TODO(jiajia): Reevaluate this logic of creating a goroutine
		// for each active transaction. Spencer suggests a heap
		// containing next heartbeat timeouts which is processed by a
		// single goroutine.
		go tc.heartbeat(engine.Key(header.TxnID), tc.txns[string(header.TxnID)].closer)
	}
	txnMeta := tc.txns[string(header.TxnID)]
	txnMeta.lastUpdateTS = tc.clock.Now()

	// If read-only, exit now; otherwise, store the affected key range.
	if storage.IsReadOnly(method) {
		return
	}
	// Otherwise, append a new key range to the set of affected keys.
	txnMeta.keys = append(txnMeta.keys, engine.KeyRange{
		Start: header.Key,
		End:   header.EndKey,
	})
}
Ejemplo n.º 22
0
// ExampleRmZones creates a series of zone configs and verifies
// zone-rm works by deleting some and then all and verifying entries
// have been removed via zone-ls. Also verify the default zone cannot
// be removed.
func ExampleRmZones() {
	httpServer := startAdminServer()
	defer httpServer.Close()
	testConfigFn := createTestConfigFile()
	defer os.Remove(testConfigFn)

	keys := []engine.Key{
		engine.KeyMin,
		engine.Key("db1"),
	}

	for _, key := range keys {
		prefix := url.QueryEscape(string(key))
		runSetZone(CmdSetZone, []string{prefix, testConfigFn})
	}

	for _, key := range keys {
		prefix := url.QueryEscape(string(key))
		runRmZone(CmdRmZone, []string{prefix})
		runLsZones(CmdLsZones, []string{})
	}
	// Output:
	// set zone config for key prefix ""
	// set zone config for key prefix "db1"
	// [default]
	// db1
	// removed zone config for key prefix "db1"
	// [default]
}
Ejemplo n.º 23
0
// TestInternalPushTxnAlreadyCommittedOrAborted verifies success
// (noop) in event that pushee is already committed or aborted.
func TestInternalPushTxnAlreadyCommittedOrAborted(t *testing.T) {
	rng, _, clock, _ := createTestRangeWithClock(t)
	defer rng.Stop()

	for i, status := range []proto.TransactionStatus{proto.COMMITTED, proto.ABORTED} {
		key := engine.Key(fmt.Sprintf("key-%d", i))
		pusher := NewTransaction(engine.MakeKey(key, []byte{1}), 1, proto.SERIALIZABLE, clock)
		pushee := NewTransaction(engine.MakeKey(key, []byte{2}), 1, proto.SERIALIZABLE, clock)
		pusher.Priority = 1
		pushee.Priority = 2 // pusher will lose, meaning we shouldn't push unless pushee is already ended.

		// End the pushee's transaction.
		etArgs, etReply := endTxnArgs(pushee, status == proto.COMMITTED, 0)
		etArgs.Timestamp = pushee.Timestamp
		if err := rng.ReadWriteCmd("EndTransaction", etArgs, etReply); err != nil {
			t.Fatal(err)
		}

		// Now try to push what's already committed or aborted.
		args, reply := pushTxnArgs(pusher, pushee, true, 0)
		if err := rng.ReadWriteCmd("InternalPushTxn", args, reply); err != nil {
			t.Fatal(err)
		}
		if reply.PusheeTxn.Status != status {
			t.Errorf("expected push txn to return with status == %s; got %+v", status, reply.PusheeTxn)
		}
	}
}
Ejemplo n.º 24
0
func TestKeysAndBodyArePreserved(t *testing.T) {
	encKey := "%00some%2Fkey%20that%20encodes%E4%B8%96%E7%95%8C"
	encBody := "%00some%2FBODY%20that%20encodes"
	s := runHTTPTestFixture(t, []RequestResponse{
		{
			NewRequest("POST", encKey, encBody),
			NewResponse(200),
		},
		{
			NewRequest("GET", encKey),
			NewResponse(200, encBody, "application/octet-stream"),
		},
	})
	gr := <-s.db.Get(&proto.GetRequest{
		RequestHeader: proto.RequestHeader{
			Key:  engine.Key("\x00some/key that encodes世界"),
			User: storage.UserRoot,
		},
	})
	if gr.Error != nil {
		t.Errorf("Unable to fetch values from local db")
	}
	if !bytes.Equal(gr.Value.Bytes, []byte(encBody)) {
		t.Errorf("Expected value (%s) but got (%s)", encBody, gr.Value.Bytes)
	}
}
Ejemplo n.º 25
0
// DeleteSchema removes s from the kv store.
func (db *structuredDB) DeleteSchema(s *Schema) error {
	return (<-db.kvDB.Delete(&proto.DeleteRequest{
		RequestHeader: proto.RequestHeader{
			Key: engine.MakeKey(engine.KeySchemaPrefix, engine.Key(s.Key)),
		},
	})).GoError()
}
Ejemplo n.º 26
0
// PutSchema inserts s into the kv store for subsequent
// usage by clients.
func (db *structuredDB) PutSchema(s *Schema) error {
	if err := s.Validate(); err != nil {
		return err
	}
	k := engine.MakeKey(engine.KeySchemaPrefix, engine.Key(s.Key))
	return storage.PutI(db.kvDB, k, s, proto.Timestamp{})
}
Ejemplo n.º 27
0
// TestEndTransactionWithErrors verifies various error conditions
// are checked such as transaction already being committed or
// aborted, or timestamp or epoch regression.
func TestEndTransactionWithErrors(t *testing.T) {
	rng, mc, clock, _ := createTestRangeWithClock(t)
	defer rng.Stop()

	regressTS := clock.Now()
	*mc = hlc.ManualClock(1)
	txn := NewTransaction(engine.Key(""), 1, proto.SERIALIZABLE, clock)

	testCases := []struct {
		key          engine.Key
		existStatus  proto.TransactionStatus
		existEpoch   int32
		existTS      proto.Timestamp
		expErrRegexp string
	}{
		{engine.Key("a"), proto.COMMITTED, txn.Epoch, txn.Timestamp, "txn {.*}: already committed"},
		{engine.Key("b"), proto.ABORTED, txn.Epoch, txn.Timestamp, "txn {.*}: already aborted"},
		{engine.Key("c"), proto.PENDING, txn.Epoch + 1, txn.Timestamp, "txn {.*}: epoch regression: 0"},
		{engine.Key("d"), proto.PENDING, txn.Epoch, regressTS, "txn {.*}: timestamp regression: {WallTime:1 Logical:0 .*}"},
	}
	for _, test := range testCases {
		// Establish existing txn state by writing directly to range engine.
		var existTxn proto.Transaction
		gogoproto.Merge(&existTxn, txn)
		existTxn.ID = test.key
		existTxn.Status = test.existStatus
		existTxn.Epoch = test.existEpoch
		existTxn.Timestamp = test.existTS
		txnKey := engine.MakeKey(engine.KeyLocalTransactionPrefix, test.key)
		if err := engine.PutProto(rng.engine, txnKey, &existTxn); err != nil {
			t.Fatal(err)
		}

		// End the transaction, verify expected error.
		txn.ID = test.key
		args, reply := endTxnArgs(txn, true, 0)
		args.Timestamp = txn.Timestamp
		err := rng.ReadWriteCmd("EndTransaction", args, reply)
		if err == nil {
			t.Errorf("expected error matching %q", test.expErrRegexp)
		} else {
			if matched, regexpErr := regexp.MatchString(test.expErrRegexp, err.Error()); !matched || regexpErr != nil {
				t.Errorf("expected error to match %q (%v): %v", test.expErrRegexp, regexpErr, err.Error())
			}
		}
	}
}
Ejemplo n.º 28
0
// ExampleSetAndGetZone sets zone configs for a variety of key
// prefixes and verifies they can be fetched directly.
func ExampleSetAndGetZone() {
	httpServer := startAdminServer()
	defer httpServer.Close()
	testConfigFn := createTestConfigFile()
	defer os.Remove(testConfigFn)

	testData := []struct {
		prefix engine.Key
		yaml   string
	}{
		{engine.KeyMin, testConfig},
		{engine.Key("db1"), testConfig},
		{engine.Key("db 2"), testConfig},
		{engine.Key("\xfe"), testConfig},
	}

	for _, test := range testData {
		prefix := url.QueryEscape(string(test.prefix))
		runSetZone(CmdSetZone, []string{prefix, testConfigFn})
		runGetZones(CmdGetZone, []string{prefix})
	}
	// Output:
	// set zone config for key prefix ""
	// zone config for key prefix "":
	// replicas: [[dc1, ssd]]
	// range_min_bytes: 1048576
	// range_max_bytes: 67108864

	// set zone config for key prefix "db1"
	// zone config for key prefix "db1":
	// replicas: [[dc1, ssd]]
	// range_min_bytes: 1048576
	// range_max_bytes: 67108864

	// set zone config for key prefix "db+2"
	// zone config for key prefix "db+2":
	// replicas: [[dc1, ssd]]
	// range_min_bytes: 1048576
	// range_max_bytes: 67108864

	// set zone config for key prefix "%FE"
	// zone config for key prefix "%FE":
	// replicas: [[dc1, ssd]]
	// range_min_bytes: 1048576
	// range_max_bytes: 67108864
}
Ejemplo n.º 29
0
// makeKey encodes the range ID and client command ID into a key
// for storage in the underlying engine. Note that the prefix for
// response cache keys sorts them at the very top of the engine's
// keyspace.
// TODO(spencer): going to need to encode the server timestamp
//   for when the value was written for GC.
func (rc *ResponseCache) makeKey(cmdID ClientCmdID) engine.Key {
	return engine.Key(bytes.Join([][]byte{
		engine.KeyLocalRangeResponseCachePrefix,
		encoding.EncodeInt(rc.rangeID),
		encoding.EncodeInt(cmdID.WallTime), // wall time helps sort for locality
		encoding.EncodeInt(cmdID.Random),   // TODO(spencer): encode as Fixed64
	}, []byte{}))
}
Ejemplo n.º 30
0
// TestInternalPushTxnUpgradeExistingTxn verifies that pushing
// a transaction record with a new epoch upgrades the pushee's
// epoch and timestamp if greater. In all test cases, the
// priorities are set such that the push will succeed.
func TestInternalPushTxnUpgradeExistingTxn(t *testing.T) {
	rng, _, clock, _ := createTestRangeWithClock(t)
	defer rng.Stop()

	ts1 := proto.Timestamp{WallTime: 1}
	ts2 := proto.Timestamp{WallTime: 2}
	testCases := []struct {
		startEpoch, epoch, expEpoch int32
		startTS, ts, expTS          proto.Timestamp
	}{
		// Move epoch forward.
		{0, 1, 1, ts1, ts1, ts1},
		// Move timestamp forward.
		{0, 0, 0, ts1, ts2, ts2},
		// Move epoch backwards (has no effect).
		{1, 0, 1, ts1, ts1, ts1},
		// Move timestamp backwards (has no effect).
		{0, 0, 0, ts2, ts1, ts2},
		// Move both epoch & timestamp forward.
		{0, 1, 1, ts1, ts2, ts2},
		// Move both epoch & timestamp backward (has no effect).
		{1, 0, 1, ts2, ts1, ts2},
	}

	for i, test := range testCases {
		key := engine.Key(fmt.Sprintf("key-%d", i))
		pusher := NewTransaction(engine.MakeKey(key, []byte{1}), 1, proto.SERIALIZABLE, clock)
		pushee := NewTransaction(engine.MakeKey(key, []byte{2}), 1, proto.SERIALIZABLE, clock)
		pushee.Priority = 1
		pusher.Priority = 2 // Pusher will win.

		// First, establish "start" of existing pushee's txn via heartbeat.
		pushee.Epoch = test.startEpoch
		pushee.Timestamp = test.startTS
		hbArgs, hbReply := heartbeatArgs(pushee, 0)
		hbArgs.Timestamp = pushee.Timestamp
		if err := rng.ReadWriteCmd("InternalHeartbeatTxn", hbArgs, hbReply); err != nil {
			t.Fatal(err)
		}

		// Now, attempt to push the transaction using updated values for epoch & timestamp.
		pushee.Epoch = test.epoch
		pushee.Timestamp = test.ts
		args, reply := pushTxnArgs(pusher, pushee, true, 0)
		if err := rng.ReadWriteCmd("InternalPushTxn", args, reply); err != nil {
			t.Fatal(err)
		}
		expTxn := gogoproto.Clone(pushee).(*proto.Transaction)
		expTxn.Epoch = test.expEpoch
		expTxn.Timestamp = test.expTS
		expTxn.Status = proto.ABORTED
		expTxn.LastHeartbeat = &test.startTS

		if !reflect.DeepEqual(expTxn, reply.PusheeTxn) {
			t.Errorf("unexpected push txn in trial %d; expected %+v, got %+v", i, expTxn, reply.PusheeTxn)
		}
	}
}