// TestTimestampCacheWithTxnID verifies that timestamps matching
// the specified txn ID are ignored.
func TestTimestampCacheWithTxnID(t *testing.T) {
	defer leaktest.AfterTest(t)
	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	tc := NewTimestampCache(clock)

	// Add two successive txn entries.
	txn1ID := uuid.NewUUID4()
	txn2ID := uuid.NewUUID4()
	ts1 := clock.Now()
	tc.Add(roachpb.Key("a"), roachpb.Key("c"), ts1, txn1ID, true)
	ts2 := clock.Now()
	// This entry will remove "a"-"b" from the cache.
	tc.Add(roachpb.Key("b"), roachpb.Key("d"), ts2, txn2ID, true)

	// Fetching with no transaction gets latest value.
	if ts, _ := tc.GetMax(roachpb.Key("b"), nil, nil); !ts.Equal(ts2) {
		t.Errorf("expected %s; got %s", ts2, ts)
	}
	// Fetching with txn ID "1" gets most recent.
	if ts, _ := tc.GetMax(roachpb.Key("b"), nil, txn1ID); !ts.Equal(ts2) {
		t.Errorf("expected %s; got %s", ts2, ts)
	}
	// Fetching with txn ID "2" skips most recent.
	if ts, _ := tc.GetMax(roachpb.Key("b"), nil, txn2ID); !ts.Equal(ts1) {
		t.Errorf("expected %s; got %s", ts1, ts)
	}
}
Exemple #2
0
func TestUUID(t *testing.T) {
	uuid1 := uuid.NewUUID4()
	uuid2 := uuid.NewUUID4()
	if bytes.Equal(uuid1, uuid2) {
		t.Errorf("consecutive uuids equal %s", uuid1)
	}
}
// TestTimestampCacheReadVsWrite verifies that the timestamp cache
// can differentiate between read and write timestamp.
func TestTimestampCacheReadVsWrite(t *testing.T) {
	defer leaktest.AfterTest(t)
	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	tc := NewTimestampCache(clock)

	// Add read-only non-txn entry at current time.
	ts1 := clock.Now()
	tc.Add(roachpb.Key("a"), roachpb.Key("b"), ts1, nil, true)

	// Add two successive txn entries; one read-only and one read-write.
	txn1ID := uuid.NewUUID4()
	txn2ID := uuid.NewUUID4()
	ts2 := clock.Now()
	tc.Add(roachpb.Key("a"), nil, ts2, txn1ID, true)
	ts3 := clock.Now()
	tc.Add(roachpb.Key("a"), nil, ts3, txn2ID, false)

	// Fetching with no transaction gets latest values.
	if rTS, wTS := tc.GetMax(roachpb.Key("a"), nil, nil); !rTS.Equal(ts2) || !wTS.Equal(ts3) {
		t.Errorf("expected %s %s; got %s %s", ts2, ts3, rTS, wTS)
	}
	// Fetching with txn ID "1" gets original for read and most recent for write.
	if rTS, wTS := tc.GetMax(roachpb.Key("a"), nil, txn1ID); !rTS.Equal(ts1) || !wTS.Equal(ts3) {
		t.Errorf("expected %s %s; got %s %s", ts1, ts3, rTS, wTS)
	}
	// Fetching with txn ID "2" gets ts2 for read and low water mark for write.
	if rTS, wTS := tc.GetMax(roachpb.Key("a"), nil, txn2ID); !rTS.Equal(ts2) || !wTS.Equal(tc.lowWater) {
		t.Errorf("expected %s %s; got %s %s", ts2, tc.lowWater, rTS, wTS)
	}
}
Exemple #4
0
// TestRocksDBCompaction verifies that a garbage collector can be
// installed on a RocksDB engine and will properly compact response
// cache and transaction entries.
func TestRocksDBCompaction(t *testing.T) {
	defer leaktest.AfterTest(t)
	gob.Register(proto.Timestamp{})
	rocksdb := newMemRocksDB(proto.Attributes{Attrs: []string{"ssd"}}, testCacheSize)
	err := rocksdb.Open()
	if err != nil {
		t.Fatalf("could not create new in-memory rocksdb db instance: %v", err)
	}
	rocksdb.SetGCTimeouts(1, 2)
	defer rocksdb.Close()

	cmdID := &proto.ClientCmdID{WallTime: 1, Random: 1}

	// Write two transaction values and two response cache values such
	// that exactly one of each should be GC'd based on our GC timeouts.
	kvs := []proto.KeyValue{
		{
			Key:   keys.ResponseCacheKey(1, cmdID),
			Value: proto.Value{Bytes: encodePutResponse(makeTS(2, 0), t)},
		},
		{
			Key:   keys.ResponseCacheKey(2, cmdID),
			Value: proto.Value{Bytes: encodePutResponse(makeTS(3, 0), t)},
		},
		{
			Key:   keys.TransactionKey(proto.Key("a"), proto.Key(uuid.NewUUID4())),
			Value: proto.Value{Bytes: encodeTransaction(makeTS(1, 0), t)},
		},
		{
			Key:   keys.TransactionKey(proto.Key("b"), proto.Key(uuid.NewUUID4())),
			Value: proto.Value{Bytes: encodeTransaction(makeTS(2, 0), t)},
		},
	}
	for _, kv := range kvs {
		if err := MVCCPut(rocksdb, nil, kv.Key, proto.ZeroTimestamp, kv.Value, nil); err != nil {
			t.Fatal(err)
		}
	}

	// Compact range and scan remaining values to compare.
	rocksdb.CompactRange(nil, nil)
	actualKVs, _, err := MVCCScan(rocksdb, proto.KeyMin, proto.KeyMax,
		0, proto.ZeroTimestamp, true, nil)
	if err != nil {
		t.Fatalf("could not run scan: %v", err)
	}
	var keys []proto.Key
	for _, kv := range actualKVs {
		keys = append(keys, kv.Key)
	}
	expKeys := []proto.Key{
		kvs[1].Key,
		kvs[3].Key,
	}
	if !reflect.DeepEqual(expKeys, keys) {
		t.Errorf("expected keys %+v, got keys %+v", expKeys, keys)
	}
}
// TestTimestampCacheReplacements verifies that a newer entry
// in the timestamp cache which completely "covers" an older
// entry will replace it.
func TestTimestampCacheReplacements(t *testing.T) {
	defer leaktest.AfterTest(t)
	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	tc := NewTimestampCache(clock)

	txn1ID := uuid.NewUUID4()
	txn2ID := uuid.NewUUID4()

	ts1 := clock.Now()
	tc.Add(roachpb.Key("a"), nil, ts1, nil, true)
	if ts, _ := tc.GetMax(roachpb.Key("a"), nil, nil); !ts.Equal(ts1) {
		t.Errorf("expected %s; got %s", ts1, ts)
	}
	// Write overlapping value with txn1 and verify with txn1--we should get
	// low water mark, not ts1.
	ts2 := clock.Now()
	tc.Add(roachpb.Key("a"), nil, ts2, txn1ID, true)
	if ts, _ := tc.GetMax(roachpb.Key("a"), nil, txn1ID); !ts.Equal(tc.lowWater) {
		t.Errorf("expected low water (empty) time; got %s", ts)
	}
	// Write range which overlaps "a" with txn2 and verify with txn2--we should
	// get low water mark, not ts2.
	ts3 := clock.Now()
	tc.Add(roachpb.Key("a"), roachpb.Key("c"), ts3, txn2ID, true)
	if ts, _ := tc.GetMax(roachpb.Key("a"), nil, txn2ID); !ts.Equal(tc.lowWater) {
		t.Errorf("expected low water (empty) time; got %s", ts)
	}
	// Also, verify txn1 sees ts3.
	if ts, _ := tc.GetMax(roachpb.Key("a"), nil, txn1ID); !ts.Equal(ts3) {
		t.Errorf("expected %s; got %s", ts3, ts)
	}
	// Now, write to "b" with a higher timestamp and no txn. Should be
	// visible to all txns.
	ts4 := clock.Now()
	tc.Add(roachpb.Key("b"), nil, ts4, nil, true)
	if ts, _ := tc.GetMax(roachpb.Key("b"), nil, nil); !ts.Equal(ts4) {
		t.Errorf("expected %s; got %s", ts4, ts)
	}
	if ts, _ := tc.GetMax(roachpb.Key("b"), nil, txn1ID); !ts.Equal(ts4) {
		t.Errorf("expected %s; got %s", ts4, ts)
	}
	// Finally, write an earlier version of "a"; should simply get
	// tossed and we should see ts4 still.
	tc.Add(roachpb.Key("b"), nil, ts1, nil, true)
	if ts, _ := tc.GetMax(roachpb.Key("b"), nil, nil); !ts.Equal(ts4) {
		t.Errorf("expected %s; got %s", ts4, ts)
	}
}
Exemple #6
0
func newTestSender(handler func(proto.Call)) SenderFunc {
	txnKey := proto.Key("test-txn")
	txnID := []byte(uuid.NewUUID4())

	return func(_ context.Context, call proto.Call) {
		header := call.Args.Header()
		header.UserPriority = gogoproto.Int32(-1)
		if header.Txn != nil && len(header.Txn.ID) == 0 {
			header.Txn.Key = txnKey
			header.Txn.ID = txnID
		}
		call.Reply.Reset()
		var writing bool
		switch call.Args.(type) {
		case *proto.PutRequest:
			gogoproto.Merge(call.Reply, testPutResp)
			writing = true
		case *proto.EndTransactionRequest:
			writing = true
		default:
			// Do nothing.
		}
		call.Reply.Header().Txn = gogoproto.Clone(call.Args.Header().Txn).(*proto.Transaction)
		if txn := call.Reply.Header().Txn; txn != nil {
			txn.Writing = writing
		}

		if handler != nil {
			handler(call)
		}
	}
}
Exemple #7
0
func newBlockWriter(db *sql.DB) blockWriter {
	source := rand.NewSource(int64(time.Now().UnixNano()))
	return blockWriter{
		db:   db,
		id:   uuid.NewUUID4().String(),
		rand: rand.New(source),
	}
}
Exemple #8
0
// TestRocksDBCompaction verifies that a garbage collector can be
// installed on a RocksDB engine and will properly compact transaction
// entries.
func TestRocksDBCompaction(t *testing.T) {
	defer leaktest.AfterTest(t)
	stopper := stop.NewStopper()
	defer stopper.Stop()
	rocksdb := newMemRocksDB(roachpb.Attributes{}, testCacheSize, stopper)
	err := rocksdb.Open()
	if err != nil {
		t.Fatalf("could not create new in-memory rocksdb db instance: %v", err)
	}
	rocksdb.SetGCTimeouts(1)

	// Write two transaction values such that exactly one should be GC'd based
	// on our GC timeouts.
	kvs := []roachpb.KeyValue{
		{
			Key:   keys.TransactionKey(roachpb.Key("a"), roachpb.Key(uuid.NewUUID4())),
			Value: roachpb.MakeValueFromBytes(encodeTransaction(makeTS(1, 0), t)),
		},
		{
			Key:   keys.TransactionKey(roachpb.Key("b"), roachpb.Key(uuid.NewUUID4())),
			Value: roachpb.MakeValueFromBytes(encodeTransaction(makeTS(2, 0), t)),
		},
	}
	for _, kv := range kvs {
		if err := MVCCPut(rocksdb, nil, kv.Key, roachpb.ZeroTimestamp, kv.Value, nil); err != nil {
			t.Fatal(err)
		}
	}

	// Compact range and scan remaining values to compare.
	rocksdb.CompactRange(nil, nil)
	actualKVs, _, err := MVCCScan(rocksdb, keyMin, keyMax, 0, roachpb.ZeroTimestamp, true, nil)
	if err != nil {
		t.Fatalf("could not run scan: %v", err)
	}
	var keys []roachpb.Key
	for _, kv := range actualKVs {
		keys = append(keys, kv.Key)
	}
	expKeys := []roachpb.Key{
		kvs[1].Key,
	}
	if !reflect.DeepEqual(expKeys, keys) {
		t.Errorf("expected keys %+v, got keys %+v", expKeys, keys)
	}
}
Exemple #9
0
// TestSender mocks out some of the txn coordinator sender's
// functionality. It responds to PutRequests using testPutResp.
func newTestSender(pre, post func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error)) SenderFunc {
	txnKey := roachpb.Key("test-txn")
	txnID := []byte(uuid.NewUUID4())

	return func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
		ba.UserPriority = 1
		if ba.Txn != nil && len(ba.Txn.ID) == 0 {
			ba.Txn.Key = txnKey
			ba.Txn.ID = txnID
		}

		var br *roachpb.BatchResponse
		var pErr *roachpb.Error
		if pre != nil {
			br, pErr = pre(ba)
		} else {
			br = ba.CreateReply()
		}
		if pErr != nil {
			return nil, pErr
		}
		var writing bool
		status := roachpb.PENDING
		for i, req := range ba.Requests {
			args := req.GetInner()
			if _, ok := args.(*roachpb.PutRequest); ok {
				if !br.Responses[i].SetValue(proto.Clone(testPutResp).(roachpb.Response)) {
					panic("failed to set put response")
				}
			}
			if roachpb.IsTransactionWrite(args) {
				writing = true
			}
		}
		if args, ok := ba.GetArg(roachpb.EndTransaction); ok {
			et := args.(*roachpb.EndTransactionRequest)
			writing = true
			if et.Commit {
				status = roachpb.COMMITTED
			} else {
				status = roachpb.ABORTED
			}
		}
		if ba.Txn != nil {
			txnClone := ba.Txn.Clone()
			br.Txn = &txnClone
			if pErr == nil {
				br.Txn.Writing = writing
				br.Txn.Status = status
			}
		}

		if post != nil {
			br, pErr = post(ba)
		}
		return br, pErr
	}
}
func TestTxnIDEqual(t *testing.T) {
	txn1, txn2 := uuid.NewUUID4(), uuid.NewUUID4()
	txn1Copy := append([]byte(nil), txn1...)

	testCases := []struct {
		a, b     []byte
		expEqual bool
	}{
		{txn1, txn1, true},
		{txn1, txn2, false},
		{txn1, txn1Copy, true},
	}
	for i, test := range testCases {
		if eq := TxnIDEqual(test.a, test.b); eq != test.expEqual {
			t.Errorf("%d: expected %q == %q: %t; got %t", i, test.a, test.b, test.expEqual, eq)
		}
	}
}
Exemple #11
0
func TestKeyAddress(t *testing.T) {
	defer leaktest.AfterTest(t)
	testCases := []struct {
		key, expAddress roachpb.Key
	}{
		{roachpb.Key{}, roachpb.KeyMin},
		{roachpb.Key("123"), roachpb.Key("123")},
		{RangeDescriptorKey(roachpb.Key("foo")), roachpb.Key("foo")},
		{TransactionKey(roachpb.Key("baz"), roachpb.Key(uuid.NewUUID4())), roachpb.Key("baz")},
		{TransactionKey(roachpb.KeyMax, roachpb.Key(uuid.NewUUID4())), roachpb.KeyMax},
		{nil, nil},
	}
	for i, test := range testCases {
		result := KeyAddress(test.key)
		if !result.Equal(test.expAddress) {
			t.Errorf("%d: expected address for key %q doesn't match %q", i, test.key, test.expAddress)
		}
	}
}
Exemple #12
0
func TestKeyAddress(t *testing.T) {
	defer leaktest.AfterTest(t)
	testCases := []struct {
		key, expAddress proto.Key
	}{
		{proto.Key{}, proto.KeyMin},
		{proto.Key("123"), proto.Key("123")},
		{MakeKey(ConfigAccountingPrefix, proto.Key("foo")), proto.Key("\x00acctfoo")},
		{RangeDescriptorKey(proto.Key("foo")), proto.Key("foo")},
		{TransactionKey(proto.Key("baz"), proto.Key(uuid.NewUUID4())), proto.Key("baz")},
		{TransactionKey(proto.KeyMax, proto.Key(uuid.NewUUID4())), proto.KeyMax},
		{nil, nil},
	}
	for i, test := range testCases {
		result := KeyAddress(test.key)
		if !result.Equal(test.expAddress) {
			t.Errorf("%d: expected address for key %q doesn't match %q", i, test.key, test.expAddress)
		}
	}
}
Exemple #13
0
func newTestSender(pre, post func(proto.BatchRequest) (*proto.BatchResponse, *proto.Error)) SenderFunc {
	txnKey := proto.Key("test-txn")
	txnID := []byte(uuid.NewUUID4())

	return func(_ context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
		ba.UserPriority = gogoproto.Int32(-1)
		if ba.Txn != nil && len(ba.Txn.ID) == 0 {
			ba.Txn.Key = txnKey
			ba.Txn.ID = txnID
		}

		var br *proto.BatchResponse
		var pErr *proto.Error
		if pre != nil {
			br, pErr = pre(ba)
		} else {
			br = &proto.BatchResponse{}
		}
		if pErr != nil {
			return nil, pErr
		}
		var writing bool
		status := proto.PENDING
		if _, ok := ba.GetArg(proto.Put); ok {
			br.Add(gogoproto.Clone(testPutResp).(proto.Response))
			writing = true
		}
		if args, ok := ba.GetArg(proto.EndTransaction); ok {
			et := args.(*proto.EndTransactionRequest)
			writing = true
			if et.Commit {
				status = proto.COMMITTED
			} else {
				status = proto.ABORTED
			}
		}
		br.Txn = gogoproto.Clone(ba.Txn).(*proto.Transaction)
		if br.Txn != nil && pErr == nil {
			br.Txn.Writing = writing
			br.Txn.Status = status
		}

		if post != nil {
			br, pErr = post(ba)
		}
		return br, pErr
	}
}
Exemple #14
0
func TestTransactionUpdate(t *testing.T) {
	nodes := NodeList{
		Nodes: []int32{101, 103, 105},
	}
	ts := makeTS(10, 11)

	txn := Transaction{
		Name:          "name",
		Key:           Key("foo"),
		ID:            uuid.NewUUID4(),
		Priority:      957356782,
		Isolation:     SNAPSHOT,
		Status:        COMMITTED,
		Epoch:         2,
		LastHeartbeat: &ts,
		Timestamp:     makeTS(20, 21),
		OrigTimestamp: makeTS(30, 31),
		MaxTimestamp:  makeTS(40, 41),
		CertainNodes:  nodes,
		Writing:       true,
		Sequence:      123,
	}

	noZeroField := func(txn Transaction) error {
		ele := reflect.ValueOf(&txn).Elem()
		eleT := ele.Type()
		for i := 0; i < ele.NumField(); i++ {
			f := ele.Field(i)
			zero := reflect.Zero(f.Type())
			if reflect.DeepEqual(f.Interface(), zero.Interface()) {
				return fmt.Errorf("expected %s field to be non-zero", eleT.Field(i).Name)
			}
		}
		return nil
	}

	if err := noZeroField(txn); err != nil {
		t.Fatal(err)
	}

	var txn2 Transaction
	txn2.Update(&txn)

	if err := noZeroField(txn2); err != nil {
		t.Fatal(err)
	}
}
Exemple #15
0
func newTestSender(pre, post func(proto.Call)) SenderFunc {
	txnKey := proto.Key("test-txn")
	txnID := []byte(uuid.NewUUID4())

	return func(_ context.Context, call proto.Call) {
		header := call.Args.Header()
		header.UserPriority = gogoproto.Int32(-1)
		if header.Txn != nil && len(header.Txn.ID) == 0 {
			header.Txn.Key = txnKey
			header.Txn.ID = txnID
		}
		call.Reply.Reset()

		if pre != nil {
			pre(call)
		}

		var writing bool
		var status proto.TransactionStatus
		switch t := call.Args.(type) {
		case *proto.PutRequest:
			gogoproto.Merge(call.Reply, testPutResp)
			writing = true
		case *proto.EndTransactionRequest:
			writing = true
			if t.Commit {
				status = proto.COMMITTED
			} else {
				status = proto.ABORTED
			}
		default:
			// Do nothing.
		}
		call.Reply.Header().Txn = gogoproto.Clone(call.Args.Header().Txn).(*proto.Transaction)
		if txn := call.Reply.Header().Txn; txn != nil && call.Reply.Header().GoError() == nil {
			txn.Writing = writing
			txn.Status = status
		}

		if post != nil {
			post(call)
		}
	}
}
Exemple #16
0
func newTestSender(pre, post func(proto.Call)) SenderFunc {
	txnKey := proto.Key("test-txn")
	txnID := []byte(uuid.NewUUID4())

	return func(_ context.Context, call proto.Call) {
		header := call.Args.Header()
		header.UserPriority = gogoproto.Int32(-1)
		if header.Txn != nil && len(header.Txn.ID) == 0 {
			header.Txn.Key = txnKey
			header.Txn.ID = txnID
		}
		call.Reply.Reset()

		if pre != nil {
			pre(call)
		}

		var writing bool
		status := proto.PENDING
		if _, ok := call.Args.(*proto.BatchRequest).GetArg(proto.Put); ok {
			call.Reply.(*proto.BatchResponse).Add(gogoproto.Clone(testPutResp).(proto.Response))
			writing = true
		}
		if args, ok := call.Args.(*proto.BatchRequest).GetArg(proto.EndTransaction); ok {
			et := args.(*proto.EndTransactionRequest)
			writing = true
			if et.Commit {
				status = proto.COMMITTED
			} else {
				status = proto.ABORTED
			}
		}
		call.Reply.Header().Txn = gogoproto.Clone(call.Args.Header().Txn).(*proto.Transaction)
		if txn := call.Reply.Header().Txn; txn != nil && call.Reply.Header().GoError() == nil {
			txn.Writing = writing
			txn.Status = status
		}

		if post != nil {
			post(call)
		}
	}
}
Exemple #17
0
// NewTransaction creates a new transaction. The transaction key is
// composed using the specified baseKey (for locality with data
// affected by the transaction) and a random ID to guarantee
// uniqueness. The specified user-level priority is combined with a
// randomly chosen value to yield a final priority, used to settle
// write conflicts in a way that avoids starvation of long-running
// transactions (see Range.InternalPushTxn).
func NewTransaction(name string, baseKey Key, userPriority int32,
	isolation IsolationType, now Timestamp, maxOffset int64) *Transaction {
	// Compute priority by adjusting based on userPriority factor.
	priority := MakePriority(nil, userPriority)
	// Compute timestamp and max timestamp.
	max := now
	max.WallTime += maxOffset

	return &Transaction{
		Name:          name,
		Key:           baseKey,
		ID:            uuid.NewUUID4(),
		Priority:      priority,
		Isolation:     isolation,
		Timestamp:     now,
		OrigTimestamp: now,
		MaxTimestamp:  max,
	}
}
Exemple #18
0
// runInit initializes the engine based on the first
// store. The bootstrap engine may not be an in-memory type.
func runInit(cmd *cobra.Command, args []string) {
	// Default user for servers.
	context.User = security.NodeUser

	if err := context.InitStores(); err != nil {
		log.Errorf("failed to initialize stores: %s", err)
		return
	}

	// Generate a new UUID for cluster ID and bootstrap the cluster.
	clusterID := uuid.NewUUID4().String()
	stopper := stop.NewStopper()
	if _, err := server.BootstrapCluster(clusterID, context.Engines, stopper); err != nil {
		log.Errorf("unable to bootstrap cluster: %s", err)
		return
	}
	stopper.Stop()

	log.Infof("cockroach cluster %s has been initialized", clusterID)
}
Exemple #19
0
func TestTransactionUpdate(t *testing.T) {
	txn := nonZeroTxn
	if err := util.NoZeroField(txn); err != nil {
		t.Fatal(err)
	}

	var txn2 Transaction
	txn2.Update(&txn)

	if err := util.NoZeroField(txn2); err != nil {
		t.Fatal(err)
	}

	var txn3 Transaction
	txn3.ID = uuid.NewUUID4()
	txn3.Name = "carl"
	txn3.Isolation = SNAPSHOT
	txn3.Update(&txn)

	if err := util.NoZeroField(txn3); err != nil {
		t.Fatal(err)
	}
}
Exemple #20
0
func TestTransactionUpdate(t *testing.T) {
	noZeroField := func(txn Transaction) error {
		ele := reflect.ValueOf(&txn).Elem()
		eleT := ele.Type()
		for i := 0; i < ele.NumField(); i++ {
			f := ele.Field(i)
			zero := reflect.Zero(f.Type())
			if reflect.DeepEqual(f.Interface(), zero.Interface()) {
				return fmt.Errorf("expected %s field to be non-zero", eleT.Field(i).Name)
			}
		}
		return nil
	}
	txn := nonZeroTxn

	if err := noZeroField(txn); err != nil {
		t.Fatal(err)
	}

	var txn2 Transaction
	txn2.Update(&txn)

	if err := noZeroField(txn2); err != nil {
		t.Fatal(err)
	}

	var txn3 Transaction
	txn3.ID = uuid.NewUUID4()
	txn3.Name = "carl"
	txn3.Isolation = SNAPSHOT
	txn3.Update(&txn)

	if err := noZeroField(txn3); err != nil {
		t.Fatal(err)
	}

}
Exemple #21
0
// TestStoreVerifyKeys checks that key length is enforced and
// that end keys must sort >= start.
func TestStoreVerifyKeys(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, _, stopper := createTestStore(t)
	defer stopper.Stop()
	tooLongKey := roachpb.Key(strings.Repeat("x", roachpb.KeyMaxLength+1))

	// Start with a too-long key on a get.
	gArgs := getArgs(tooLongKey, 1, store.StoreID())
	if _, err := client.SendWrapped(store, nil, &gArgs); !testutils.IsError(err, "exceeded") {
		t.Fatalf("unexpected error for key too long: %v", err)
	}
	// Try a start key == KeyMax.
	gArgs.Key = roachpb.KeyMax
	if _, err := client.SendWrapped(store, nil, &gArgs); !testutils.IsError(err, "must be less than KeyMax") {
		t.Fatalf("expected error for start key == KeyMax: %v", err)
	}
	// Try a get with an end key specified (get requires only a start key and should fail).
	gArgs.EndKey = roachpb.KeyMax
	if _, err := client.SendWrapped(store, nil, &gArgs); !testutils.IsError(err, "must be less than KeyMax") {
		t.Fatalf("unexpected error for end key specified on a non-range-based operation: %v", err)
	}
	// Try a scan with too-long EndKey.
	sArgs := scanArgs(roachpb.KeyMin, tooLongKey, 1, store.StoreID())
	if _, err := client.SendWrapped(store, nil, &sArgs); !testutils.IsError(err, "length exceeded") {
		t.Fatalf("unexpected error for end key too long: %v", err)
	}
	// Try a scan with end key < start key.
	sArgs.Key = []byte("b")
	sArgs.EndKey = []byte("a")
	if _, err := client.SendWrapped(store, nil, &sArgs); !testutils.IsError(err, "must be greater than") {
		t.Fatalf("unexpected error for end key < start: %v", err)
	}
	// Try a scan with start key == end key.
	sArgs.Key = []byte("a")
	sArgs.EndKey = sArgs.Key
	if _, err := client.SendWrapped(store, nil, &sArgs); !testutils.IsError(err, "must be greater than") {
		t.Fatalf("unexpected error for start == end key: %v", err)
	}
	// Try a scan with range-local start key, but "regular" end key.
	sArgs.Key = keys.MakeRangeKey([]byte("test"), []byte("sffx"), nil)
	sArgs.EndKey = []byte("z")
	if _, err := client.SendWrapped(store, nil, &sArgs); !testutils.IsError(err, "range-local") {
		t.Fatalf("unexpected error for local start, non-local end key: %v", err)
	}

	// Try a put to meta2 key which would otherwise exceed maximum key
	// length, but is accepted because of the meta prefix.
	meta2KeyMax := keys.MakeKey(keys.Meta2Prefix, roachpb.KeyMax)
	pArgs := putArgs(meta2KeyMax, []byte("value"), 1, store.StoreID())
	if _, err := client.SendWrapped(store, nil, &pArgs); err != nil {
		t.Fatalf("unexpected error on put to meta2 value: %s", err)
	}
	// Try to put a range descriptor record for a start key which is
	// maximum length.
	key := append([]byte{}, roachpb.KeyMax...)
	key[len(key)-1] = 0x01
	pArgs = putArgs(keys.RangeDescriptorKey(key), []byte("value"), 1, store.StoreID())
	if _, err := client.SendWrapped(store, nil, &pArgs); err != nil {
		t.Fatalf("unexpected error on put to range descriptor for KeyMax value: %s", err)
	}
	// Try a put to txn record for a meta2 key (note that this doesn't
	// actually happen in practice, as txn records are not put directly,
	// but are instead manipulated only through txn methods).
	pArgs = putArgs(keys.TransactionKey(meta2KeyMax, []byte(uuid.NewUUID4())),
		[]byte("value"), 1, store.StoreID())
	if _, err := client.SendWrapped(store, nil, &pArgs); err != nil {
		t.Fatalf("unexpected error on put to txn meta2 value: %s", err)
	}
}
Exemple #22
0
// bootstrapCluster bootstraps a multiple stores using the provided
// engines and cluster ID. The first bootstrapped store contains a
// single range spanning all keys. Initial range lookup metadata is
// populated for the range. Returns the cluster ID.
func bootstrapCluster(engines []engine.Engine) (string, error) {
	clusterID := uuid.NewUUID4().String()
	stopper := stop.NewStopper()
	defer stopper.Stop()

	ctx := storage.StoreContext{}
	ctx.ScanInterval = 10 * time.Minute
	ctx.Clock = hlc.NewClock(hlc.UnixNano)
	ctx.Tracer = tracing.NewTracer()
	// Create a KV DB with a local sender.
	stores := storage.NewStores(ctx.Clock)
	sender := kv.NewTxnCoordSender(stores, ctx.Clock, false, ctx.Tracer, stopper)
	ctx.DB = client.NewDB(sender)
	ctx.Transport = storage.NewLocalRPCTransport(stopper)
	for i, eng := range engines {
		sIdent := roachpb.StoreIdent{
			ClusterID: clusterID,
			NodeID:    1,
			StoreID:   roachpb.StoreID(i + 1),
		}

		// The bootstrapping store will not connect to other nodes so its
		// StoreConfig doesn't really matter.
		s := storage.NewStore(ctx, eng, &roachpb.NodeDescriptor{NodeID: 1})

		// Verify the store isn't already part of a cluster.
		if len(s.Ident.ClusterID) > 0 {
			return "", util.Errorf("storage engine already belongs to a cluster (%s)", s.Ident.ClusterID)
		}

		// Bootstrap store to persist the store ident.
		if err := s.Bootstrap(sIdent, stopper); err != nil {
			return "", err
		}
		// Create first range, writing directly to engine. Note this does
		// not create the range, just its data. Only do this if this is the
		// first store.
		if i == 0 {
			initialValues := GetBootstrapSchema().GetInitialValues()
			if err := s.BootstrapRange(initialValues); err != nil {
				return "", err
			}
		}
		if err := s.Start(stopper); err != nil {
			return "", err
		}

		stores.AddStore(s)

		// Initialize node and store ids.  Only initialize the node once.
		if i == 0 {
			if nodeID, err := allocateNodeID(ctx.DB); nodeID != sIdent.NodeID || err != nil {
				return "", util.Errorf("expected to initialize node id allocator to %d, got %d: %s",
					sIdent.NodeID, nodeID, err)
			}
		}
		if storeID, err := allocateStoreIDs(sIdent.NodeID, 1, ctx.DB); storeID != sIdent.StoreID || err != nil {
			return "", util.Errorf("expected to initialize store id allocator to %d, got %d: %s",
				sIdent.StoreID, storeID, err)
		}
	}
	return clusterID, nil
}
		if nodes := sn.Nodes; len(nodes) != i+1 {
			t.Fatalf("%d: missing values or duplicates: %v",
				i, nodes)
		}
		if !sn.Contains(n) {
			t.Fatalf("%d: false negative hit for %d on slice %v",
				i, n, sn.Nodes)
		}
	}
}

var ts = makeTS(10, 11)
var nonZeroTxn = Transaction{
	Name:          "name",
	Key:           Key("foo"),
	ID:            uuid.NewUUID4(),
	Priority:      957356782,
	Isolation:     SNAPSHOT,
	Status:        COMMITTED,
	Epoch:         2,
	LastHeartbeat: &Timestamp{1, 2},
	Timestamp:     makeTS(20, 21),
	OrigTimestamp: makeTS(30, 31),
	MaxTimestamp:  makeTS(40, 41),
	CertainNodes: NodeList{
		Nodes: []NodeID{101, 103, 105},
	},
	Writing:  true,
	Sequence: 123,
	Intents:  []Span{{Key: []byte("a")}},
}
Exemple #24
0
			types:      typeList{},
			returnType: DummyInt,
			impure:     true,
			fn: func(ctx EvalContext, args DTuple) (Datum, error) {
				return generateUniqueInt(ctx.NodeID), nil
			},
		},
	},

	"experimental_uuid_v4": {
		builtin{
			types:      typeList{},
			returnType: DummyBytes,
			impure:     true,
			fn: func(_ EvalContext, args DTuple) (Datum, error) {
				return DBytes(uuid.NewUUID4()), nil
			},
		},
	},

	"greatest": {
		builtin{
			types: nil,
			fn: func(ctx EvalContext, args DTuple) (Datum, error) {
				return pickFromTuple(ctx, true /* greatest */, args)
			},
		},
	},

	"least": {
		builtin{
Exemple #25
0
// TestStoreVerifyKeys checks that key length is enforced and
// that end keys must sort >= start.
func TestStoreVerifyKeys(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, _, stopper := createTestStore(t)
	defer stopper.Stop()
	tooLongKey := proto.Key(strings.Repeat("x", proto.KeyMaxLength+1))

	// Start with a too-long key on a get.
	gArgs := getArgs(tooLongKey, 1, store.StoreID())
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &gArgs, Reply: gArgs.CreateReply()}); err == nil {
		t.Fatal("expected error for key too long")
	}
	// Try a start key == KeyMax.
	gArgs.Key = proto.KeyMax
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &gArgs, Reply: gArgs.CreateReply()}); err == nil {
		t.Fatal("expected error for start key == KeyMax")
	}
	// Try a get with an end key specified (get requires only a start key and should fail).
	gArgs.EndKey = proto.KeyMax
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &gArgs, Reply: gArgs.CreateReply()}); err == nil {
		t.Fatal("expected error for end key specified on a non-range-based operation")
	}
	// Try a scan with too-long EndKey.
	sArgs := scanArgs(proto.KeyMin, tooLongKey, 1, store.StoreID())
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &sArgs, Reply: sArgs.CreateReply()}); err == nil {
		t.Fatal("expected error for end key too long")
	}
	// Try a scan with end key < start key.
	sArgs.Key = []byte("b")
	sArgs.EndKey = []byte("a")
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &sArgs, Reply: sArgs.CreateReply()}); err == nil {
		t.Fatal("expected error for end key < start")
	}
	// Try a scan with start key == end key.
	sArgs.Key = []byte("a")
	sArgs.EndKey = sArgs.Key
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &sArgs, Reply: sArgs.CreateReply()}); err == nil {
		t.Fatal("expected error for start == end key")
	}
	// Try a put to meta2 key which would otherwise exceed maximum key
	// length, but is accepted because of the meta prefix.
	meta2KeyMax := keys.MakeKey(keys.Meta2Prefix, proto.KeyMax)
	pArgs := putArgs(meta2KeyMax, []byte("value"), 1, store.StoreID())
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &pArgs, Reply: pArgs.CreateReply()}); err != nil {
		t.Fatalf("unexpected error on put to meta2 value: %s", err)
	}
	// Try to put a range descriptor record for a start key which is
	// maximum length.
	key := append([]byte{}, proto.KeyMax...)
	key[len(key)-1] = 0x01
	pArgs = putArgs(keys.RangeDescriptorKey(key), []byte("value"), 1, store.StoreID())
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &pArgs, Reply: pArgs.CreateReply()}); err != nil {
		t.Fatalf("unexpected error on put to range descriptor for KeyMax value: %s", err)
	}
	// Try a put to txn record for a meta2 key (note that this doesn't
	// actually happen in practice, as txn records are not put directly,
	// but are instead manipulated only through txn methods).
	pArgs = putArgs(keys.TransactionKey(meta2KeyMax, []byte(uuid.NewUUID4())),
		[]byte("value"), 1, store.StoreID())
	if err := store.ExecuteCmd(context.Background(), proto.Call{Args: &pArgs, Reply: pArgs.CreateReply()}); err != nil {
		t.Fatalf("unexpected error on put to txn meta2 value: %s", err)
	}
}