예제 #1
0
// TODO(tschottdorf): this method is somewhat awkward but unless we want to
// give this error back to the client, our options are limited. We'll have to
// run the whole thing for them, or any restart will still end up at the client
// which will not be prepared to be handed a Txn.
func (tc *TxnCoordSender) resendWithTxn(
	ba roachpb.BatchRequest,
) (*roachpb.BatchResponse, *roachpb.Error) {
	ctx := tc.AnnotateCtx(context.TODO())
	// Run a one-off transaction with that single command.
	if log.V(1) {
		log.Infof(ctx, "%s: auto-wrapping in txn and re-executing: ", ba)
	}
	// TODO(bdarnell): need to be able to pass other parts of DBContext
	// through here.
	dbCtx := client.DefaultDBContext()
	dbCtx.UserPriority = ba.UserPriority
	tmpDB := client.NewDBWithContext(tc, dbCtx)
	var br *roachpb.BatchResponse
	err := tmpDB.Txn(ctx, func(txn *client.Txn) error {
		txn.SetDebugName("auto-wrap", 0)
		b := txn.NewBatch()
		b.Header = ba.Header
		for _, arg := range ba.Requests {
			req := arg.GetInner()
			b.AddRawRequest(req)
		}
		err := txn.CommitInBatch(b)
		br = b.RawResponse()
		return err
	})
	if err != nil {
		return nil, roachpb.NewError(err)
	}
	br.Txn = nil // hide the evidence
	return br, nil
}
예제 #2
0
// TestClientRunTransaction verifies some simple transaction isolation
// semantics.
func TestClientRunTransaction(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
	defer s.Stopper().Stop()
	dbCtx := client.DefaultDBContext()
	dbCtx.TxnRetryOptions.InitialBackoff = 1 * time.Millisecond
	db := createTestClientForUser(t, s, security.NodeUser, dbCtx)

	for _, commit := range []bool{true, false} {
		value := []byte("value")
		key := []byte(fmt.Sprintf("%s/key-%t", testUser, commit))

		// Use snapshot isolation so non-transactional read can always push.
		err := db.Txn(context.TODO(), func(txn *client.Txn) error {
			if err := txn.SetIsolation(enginepb.SNAPSHOT); err != nil {
				return err
			}

			// Put transactional value.
			if err := txn.Put(key, value); err != nil {
				return err
			}
			// Attempt to read outside of txn.
			if gr, err := db.Get(context.TODO(), key); err != nil {
				return err
			} else if gr.Value != nil {
				return errors.Errorf("expected nil value; got %+v", gr.Value)
			}
			// Read within the transaction.
			if gr, err := txn.Get(key); err != nil {
				return err
			} else if gr.Value == nil || !bytes.Equal(gr.ValueBytes(), value) {
				return errors.Errorf("expected value %q; got %q", value, gr.ValueBytes())
			}
			if !commit {
				return errors.Errorf("purposefully failing transaction")
			}
			return nil
		})

		if commit != (err == nil) {
			t.Errorf("expected success? %t; got %s", commit, err)
		} else if !commit && !testutils.IsError(err, "purposefully failing transaction") {
			t.Errorf("unexpected failure with !commit: %v", err)
		}

		// Verify the value is now visible on commit == true, and not visible otherwise.
		gr, err := db.Get(context.TODO(), key)
		if commit {
			if err != nil || gr.Value == nil || !bytes.Equal(gr.ValueBytes(), value) {
				t.Errorf("expected success reading value: %+v, %s", gr.Value, err)
			}
		} else {
			if err != nil || gr.Value != nil {
				t.Errorf("expected success and nil value: %+v, %s", gr.Value, err)
			}
		}
	}
}
예제 #3
0
// TestClientPermissions verifies permission enforcement.
func TestClientPermissions(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
	defer s.Stopper().Stop()

	// NodeUser certs are required for all KV operations.
	// RootUser has no KV privileges whatsoever.
	nodeClient := createTestClientForUser(t, s.Stopper(), s.ServingAddr(),
		security.NodeUser, client.DefaultDBContext())
	rootClient := createTestClientForUser(t, s.Stopper(), s.ServingAddr(),
		security.RootUser, client.DefaultDBContext())

	testCases := []struct {
		path    string
		client  *client.DB
		allowed bool
	}{
		{"foo", rootClient, false},
		{"foo", nodeClient, true},

		{testUser + "/foo", rootClient, false},
		{testUser + "/foo", nodeClient, true},

		{testUser + "foo", rootClient, false},
		{testUser + "foo", nodeClient, true},

		{testUser, rootClient, false},
		{testUser, nodeClient, true},

		{"unknown/foo", rootClient, false},
		{"unknown/foo", nodeClient, true},
	}

	value := []byte("value")
	const matchErr = "is not allowed"
	for tcNum, tc := range testCases {
		err := tc.client.Put(context.TODO(), tc.path, value)
		if (err == nil) != tc.allowed || (!tc.allowed && !testutils.IsError(err, matchErr)) {
			t.Errorf("#%d: expected allowed=%t, got err=%v", tcNum, tc.allowed, err)
		}
		_, err = tc.client.Get(context.TODO(), tc.path)
		if (err == nil) != tc.allowed || (!tc.allowed && !testutils.IsError(err, matchErr)) {
			t.Errorf("#%d: expected allowed=%t, got err=%v", tcNum, tc.allowed, err)
		}
	}
}
예제 #4
0
// createTestNotifyClient creates a new client which connects using an HTTP
// sender to the server at addr. It contains a waitgroup to allow waiting.
func createTestNotifyClient(
	t *testing.T, s serverutils.TestServerInterface, priority roachpb.UserPriority,
) (*client.DB, *notifyingSender) {
	db := createTestClient(t, s)
	sender := &notifyingSender{wrapped: db.GetSender()}
	dbCtx := client.DefaultDBContext()
	dbCtx.UserPriority = priority
	return client.NewDBWithContext(sender, dbCtx), sender
}
예제 #5
0
// createTestNotifyClient creates a new client which connects using an HTTP
// sender to the server at addr. It contains a waitgroup to allow waiting.
func createTestNotifyClient(
	t *testing.T, stopper *stop.Stopper, addr string, priority roachpb.UserPriority,
) (*client.DB, *notifyingSender) {
	db := createTestClient(t, stopper, addr)
	sender := &notifyingSender{wrapped: db.GetSender()}
	dbCtx := client.DefaultDBContext()
	dbCtx.UserPriority = priority
	return client.NewDBWithContext(sender, dbCtx), sender
}
예제 #6
0
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester, baseCtx *base.Config, initSender InitSenderFn) {
	ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
	nc := &base.NodeIDContainer{}
	ambient.AddLogTag("n", nc)

	nodeID := roachpb.NodeID(1)
	nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}

	ltc.tester = t
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(ambient, baseCtx, ltc.Clock, ltc.Stopper)
	server := rpc.NewServer(rpcContext) // never started
	ltc.Gossip = gossip.New(ambient, nc, rpcContext, server, nil, ltc.Stopper, metric.NewRegistry())
	ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20)
	ltc.Stopper.AddCloser(ltc.Eng)

	ltc.Stores = storage.NewStores(ambient, ltc.Clock)

	ltc.Sender = initSender(nodeDesc, ambient.Tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper,
		ltc.Gossip)
	if ltc.DBContext == nil {
		dbCtx := client.DefaultDBContext()
		ltc.DBContext = &dbCtx
	}
	ltc.DB = client.NewDBWithContext(ltc.Sender, *ltc.DBContext)
	transport := storage.NewDummyRaftTransport()
	cfg := storage.TestStoreConfig()
	if ltc.RangeRetryOptions != nil {
		cfg.RangeRetryOptions = *ltc.RangeRetryOptions
	}
	cfg.AmbientCtx = ambient
	cfg.Clock = ltc.Clock
	cfg.DB = ltc.DB
	cfg.Gossip = ltc.Gossip
	cfg.Transport = transport
	cfg.MetricsSampleInterval = metric.TestSampleInterval
	ltc.Store = storage.NewStore(cfg, ltc.Eng, nodeDesc)
	if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.Stores.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(context.Background(), ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	nc.Set(context.TODO(), nodeDesc.NodeID)
	if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
		t.Fatalf("unable to set node descriptor: %s", err)
	}
}
예제 #7
0
// TestRangeSplitsWithWritePressure sets the zone config max bytes for
// a range to 256K and writes data until there are five ranges.
func TestRangeSplitsWithWritePressure(t *testing.T) {
	defer leaktest.AfterTest(t)()
	// Override default zone config.
	cfg := config.DefaultZoneConfig()
	cfg.RangeMaxBytes = 1 << 18
	defer config.TestingSetDefaultZoneConfig(cfg)()

	dbCtx := client.DefaultDBContext()
	dbCtx.TxnRetryOptions = retry.Options{
		InitialBackoff: 1 * time.Millisecond,
		MaxBackoff:     10 * time.Millisecond,
		Multiplier:     2,
	}
	s, _ := createTestDBWithContext(t, dbCtx)
	// This is purely to silence log spam.
	config.TestingSetupZoneConfigHook(s.Stopper)
	defer s.Stop()

	// Start test writer write about a 32K/key so there aren't too many writes necessary to split 64K range.
	done := make(chan struct{})
	var wg sync.WaitGroup
	wg.Add(1)
	go startTestWriter(s.DB, int64(0), 1<<15, &wg, nil, nil, done, t)

	// Check that we split 5 times in allotted time.
	testutils.SucceedsSoon(t, func() error {
		// Scan the txn records.
		rows, err := s.DB.Scan(context.TODO(), keys.Meta2Prefix, keys.MetaMax, 0)
		if err != nil {
			return errors.Errorf("failed to scan meta2 keys: %s", err)
		}
		if lr := len(rows); lr < 5 {
			return errors.Errorf("expected >= 5 scans; got %d", lr)
		}
		return nil
	})
	close(done)
	wg.Wait()

	// This write pressure test often causes splits while resolve
	// intents are in flight, causing them to fail with range key
	// mismatch errors. However, LocalSender should retry in these
	// cases. Check here via MVCC scan that there are no dangling write
	// intents. We do this using a SucceedsSoon construct to account
	// for timing of finishing the test writer and a possibly-ongoing
	// asynchronous split.
	testutils.SucceedsSoon(t, func() error {
		if _, _, _, err := engine.MVCCScan(context.Background(), s.Eng, keys.LocalMax, roachpb.KeyMax, math.MaxInt64, hlc.MaxTimestamp, true, nil); err != nil {
			return errors.Errorf("failed to verify no dangling intents: %s", err)
		}
		return nil
	})
}
예제 #8
0
// TestUncertaintyRestart verifies that a transaction which finds a write in
// its near future will restart exactly once, meaning that it's made a note of
// that node's clock for its new timestamp.
func TestUncertaintyRestart(t *testing.T) {
	defer leaktest.AfterTest(t)()

	const maxOffset = 250 * time.Millisecond
	dbCtx := client.DefaultDBContext()
	s := &localtestcluster.LocalTestCluster{
		Clock:     hlc.NewClock(hlc.UnixNano, maxOffset),
		DBContext: &dbCtx,
	}
	s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster)
	defer s.Stop()
	if err := disableOwnNodeCertain(s); err != nil {
		t.Fatal(err)
	}
	s.Manual.Increment(s.Clock.MaxOffset().Nanoseconds() + 1)

	var key = roachpb.Key("a")

	errChan := make(chan error)
	start := make(chan struct{})
	go func() {
		<-start
		errChan <- s.DB.Txn(context.TODO(), func(txn *client.Txn) error {
			return txn.Put(key, "hi")
		})
	}()

	if err := s.DB.Txn(context.TODO(), func(txn *client.Txn) error {
		if txn.Proto.Epoch > 2 {
			t.Fatal("expected only one restart")
		}
		// Issue a read to pick a timestamp.
		if _, err := txn.Get(key.Next()); err != nil {
			t.Fatal(err)
		}
		if txn.Proto.Epoch == 0 {
			close(start) // let someone write into our future
			// when they're done, try to read
			if err := <-errChan; err != nil {
				t.Fatal(err)
			}
		}
		if _, err := txn.Get(key.Next()); err != nil {
			if _, ok := err.(*roachpb.ReadWithinUncertaintyIntervalError); !ok {
				t.Fatalf("unexpected error: %T: %s", err, err)
			}
		}
		return nil
	}); err != nil {
		t.Fatal(err)
	}
}
예제 #9
0
// checkConcurrency creates a history verifier, starts a new database
// and runs the verifier.
func checkConcurrency(
	name string, isolations []enginepb.IsolationType, txns []string, verify *verifier, t *testing.T,
) {
	verifier := newHistoryVerifier(name, txns, verify, t)
	dbCtx := client.DefaultDBContext()
	dbCtx.TxnRetryOptions = correctnessTestRetryOptions
	s := &localtestcluster.LocalTestCluster{
		DBContext:         &dbCtx,
		RangeRetryOptions: &correctnessTestRetryOptions,
	}
	s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster)
	defer s.Stop()
	verifier.run(isolations, s.DB, t)
}
예제 #10
0
// createTestDB creates a local test server and starts it. The caller
// is responsible for stopping the test server.
func createTestDB(t testing.TB) (*localtestcluster.LocalTestCluster, *TxnCoordSender) {
	return createTestDBWithContext(t, client.DefaultDBContext())
}
예제 #11
0
// TestUncertaintyObservedTimestampForwarding checks that when receiving an
// uncertainty restart on a node, the next attempt to read (at the increased
// timestamp) is free from uncertainty. See roachpb.Transaction for details.
func TestUncertaintyMaxTimestampForwarding(t *testing.T) {
	defer leaktest.AfterTest(t)()

	dbCtx := client.DefaultDBContext()
	s := &localtestcluster.LocalTestCluster{
		// Large offset so that any value in the future is an uncertain read. Also
		// makes sure that the values we write in the future below don't actually
		// wind up in the past.
		Clock:     hlc.NewClock(hlc.UnixNano, 50*time.Second),
		DBContext: &dbCtx,
	}
	s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster)
	defer s.Stop()
	disableOwnNodeCertain(t, s)

	offsetNS := int64(100)
	keySlow := roachpb.Key("slow")
	keyFast := roachpb.Key("fast")
	valSlow := []byte("wols")
	valFast := []byte("tsaf")

	// Write keySlow at now+offset, keyFast at now+2*offset
	futureTS := s.Clock.Now()
	futureTS.WallTime += offsetNS
	val := roachpb.MakeValueFromBytes(valSlow)
	if err := engine.MVCCPut(context.Background(), s.Eng, nil, keySlow, futureTS, val, nil); err != nil {
		t.Fatal(err)
	}
	futureTS.WallTime += offsetNS
	val.SetBytes(valFast)
	if err := engine.MVCCPut(context.Background(), s.Eng, nil, keyFast, futureTS, val, nil); err != nil {
		t.Fatal(err)
	}

	i := 0
	if tErr := s.DB.Txn(context.TODO(), func(txn *client.Txn) error {
		i++
		// The first command serves to start a Txn, fixing the timestamps.
		// There will be a restart, but this is idempotent.
		if _, err := txn.Scan("t", roachpb.Key("t").Next(), 0); err != nil {
			t.Fatal(err)
		}
		// This is a bit of a hack for the sake of this test: By visiting the
		// node above, we've made a note of its clock, which allows us to
		// prevent the restart. But we want to catch the restart, so reset the
		// observed timestamps.
		txn.Proto.ResetObservedTimestamps()

		// The server's clock suddenly jumps ahead of keyFast's timestamp.
		s.Manual.Increment(2*offsetNS + 1)

		// Now read slowKey first. It should read at 0, catch an uncertainty error,
		// and get keySlow's timestamp in that error, but upgrade it to the larger
		// node clock (which is ahead of keyFast as well). If the last part does
		// not happen, the read of keyFast should fail (i.e. read nothing).
		// There will be exactly one restart here.
		if gr, err := txn.Get(keySlow); err != nil {
			if i != 1 {
				t.Fatalf("unexpected transaction error: %s", err)
			}
			return err
		} else if !gr.Exists() || !bytes.Equal(gr.ValueBytes(), valSlow) {
			t.Fatalf("read of %q returned %v, wanted value %q", keySlow, gr.Value, valSlow)
		}

		// The node should already be certain, so we expect no restart here
		// and to read the correct key.
		if gr, err := txn.Get(keyFast); err != nil {
			t.Fatalf("second Get failed with %s", err)
		} else if !gr.Exists() || !bytes.Equal(gr.ValueBytes(), valFast) {
			t.Fatalf("read of %q returned %v, wanted value %q", keyFast, gr.Value, valFast)
		}
		return nil
	}); tErr != nil {
		t.Fatal(tErr)
	}
}
예제 #12
0
func createTestClient(t *testing.T, s serverutils.TestServerInterface) *client.DB {
	return createTestClientForUser(t, s, security.NodeUser, client.DefaultDBContext())
}
예제 #13
0
func createTestClient(t *testing.T, stopper *stop.Stopper, addr string) *client.DB {
	return createTestClientForUser(t, stopper, addr, security.NodeUser, client.DefaultDBContext())
}