func TestBadRequest(t *testing.T) { defer leaktest.AfterTest(t) s := server.StartTestServer(t) db := createTestClient(t, s.Stopper(), s.ServingAddr()) defer s.Stop() // Write key "a". if pErr := db.Put("a", "value"); pErr != nil { t.Fatal(pErr) } if _, pErr := db.Scan("a", "a", 0); !testutils.IsPError(pErr, "truncation resulted in empty batch") { t.Fatalf("unexpected error on scan with startkey == endkey: %v", pErr) } if _, pErr := db.ReverseScan("a", "a", 0); !testutils.IsPError(pErr, "truncation resulted in empty batch") { t.Fatalf("unexpected pError on reverse scan with startkey == endkey: %v", pErr) } if pErr := db.DelRange("x", "a"); !testutils.IsPError(pErr, "truncation resulted in empty batch") { t.Fatalf("unexpected error on deletion on [x, a): %v", pErr) } if pErr := db.DelRange("", "z"); !testutils.IsPError(pErr, "must be greater than LocalMax") { t.Fatalf("unexpected error on deletion on [KeyMin, z): %v", pErr) } }
// TestAuthentication tests authentication for the KV endpoint. func TestAuthentication(t *testing.T) { defer leaktest.AfterTest(t)() s := server.StartTestServer(t) defer s.Stop() var b1 client.Batch b1.Put("a", "b") // Create a node user client and call Run() on it which lets us build our own // request, specifying the user. db1 := createTestClientForUser(t, s.Stopper(), s.ServingAddr(), security.NodeUser) if pErr := db1.Run(&b1); pErr != nil { t.Fatal(pErr) } var b2 client.Batch b2.Put("c", "d") // Try again, but this time with certs for a non-node user (even the root // user has no KV permissions). db2 := createTestClientForUser(t, s.Stopper(), s.ServingAddr(), security.RootUser) if pErr := db2.Run(&b2); !testutils.IsPError(pErr, "is not allowed") { t.Fatal(pErr) } }
func TestTxnAbortCount(t *testing.T) { defer leaktest.AfterTest(t)() _, sender, cleanupFn := setupMetricsTest(t) defer cleanupFn() value := []byte("value") db := client.NewDB(sender) intentionalErrText := "intentional error to cause abort" // Test aborted transaction. if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error { key := []byte("key-abort") if err := txn.SetIsolation(roachpb.SNAPSHOT); err != nil { return roachpb.NewError(err) } if pErr := txn.Put(key, value); pErr != nil { t.Fatal(pErr) } return roachpb.NewErrorf(intentionalErrText) }); !testutils.IsPError(pErr, intentionalErrText) { t.Fatalf("unexpected error: %s", pErr) } teardownHeartbeats(sender) checkTxnMetrics(t, sender, "abort txn", 0, 0, 1, 0) }
// TestTxnCoordSenderErrorWithIntent validates that if a transactional request // returns an error but also indicates a Writing transaction, the coordinator // tracks it just like a successful request. func TestTxnCoordSenderErrorWithIntent(t *testing.T) { defer leaktest.AfterTest(t) stopper := stop.NewStopper() manual := hlc.NewManualClock(0) clock := hlc.NewClock(manual.UnixNano) clock.SetMaxOffset(20) ts := NewTxnCoordSender(senderFn(func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { txn := ba.Txn.Clone() txn.Writing = true pErr := roachpb.NewError(roachpb.NewTransactionRetryError()) pErr.SetTxn(txn) return nil, pErr }), clock, false, nil, stopper) defer stopper.Stop() var ba roachpb.BatchRequest key := roachpb.Key("test") ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}}) ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}}) ba.Add(&roachpb.EndTransactionRequest{}) ba.Txn = &roachpb.Transaction{Name: "test"} if _, pErr := ts.Send(context.Background(), ba); !testutils.IsPError(pErr, "retry txn") { t.Fatalf("unexpected error: %v", pErr) } defer teardownHeartbeats(ts) ts.Lock() defer ts.Unlock() if len(ts.txns) != 1 { t.Fatalf("expected transaction to be tracked") } }
// Verifies that an inner transaction in a nested transaction strips the transaction // information in its error when propagating it to an other transaction. func TestNestedTransaction(t *testing.T) { defer leaktest.AfterTest(t)() s, db := setup() defer s.Stop() pErr := db.Txn(func(txn1 *client.Txn) *roachpb.Error { if pErr := txn1.Put("a", "1"); pErr != nil { t.Fatalf("unexpected put error: %s", pErr) } iPErr := db.Txn(func(txn2 *client.Txn) *roachpb.Error { txnProto := roachpb.NewTransaction("test", roachpb.Key("a"), 1, roachpb.SERIALIZABLE, roachpb.Timestamp{}, 0) return roachpb.NewErrorWithTxn(util.Errorf("inner txn error"), txnProto) }) if iPErr.GetTxn() != nil { t.Errorf("error txn must be stripped: %s", iPErr) } return iPErr }) if pErr == nil { t.Fatal("unexpected success of txn") } if !testutils.IsPError(pErr, "inner txn error") { t.Errorf("unexpected failure: %s", pErr) } }
func TestTxnAbandonCount(t *testing.T) { defer leaktest.AfterTest(t)() manual, sender, cleanupFn := setupMetricsTest(t) defer cleanupFn() value := []byte("value") db := client.NewDB(sender) // Test abandoned transaction by making the client timeout ridiculously short. We also set // the sender to heartbeat very frequently, because the heartbeat detects and tears down // abandoned transactions. sender.heartbeatInterval = 2 * time.Millisecond sender.clientTimeout = 1 * time.Millisecond if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error { key := []byte("key-abandon") if err := txn.SetIsolation(roachpb.SNAPSHOT); err != nil { return roachpb.NewError(err) } if pErr := txn.Put(key, value); pErr != nil { return pErr } manual.Increment(int64(sender.clientTimeout + sender.heartbeatInterval*2)) checkTxnMetrics(t, sender, "abandon txn", 0, 1, 0, 0) return nil }); !testutils.IsPError(pErr, "already committed or aborted") { t.Fatalf("unexpected error: %s", pErr) } }
// TestTxnDBBasics verifies that a simple transaction can be run and // either committed or aborted. On commit, mutations are visible; on // abort, mutations are never visible. During the txn, verify that // uncommitted writes cannot be read outside of the txn but can be // read from inside the txn. func TestTxnDBBasics(t *testing.T) { defer leaktest.AfterTest(t)() s := createTestDB(t) defer s.Stop() value := []byte("value") for _, commit := range []bool{true, false} { key := []byte(fmt.Sprintf("key-%t", commit)) pErr := s.DB.Txn(func(txn *client.Txn) *roachpb.Error { // Use snapshot isolation so non-transactional read can always push. if err := txn.SetIsolation(roachpb.SNAPSHOT); err != nil { return roachpb.NewError(err) } // Put transactional value. if pErr := txn.Put(key, value); pErr != nil { return pErr } // Attempt to read outside of txn. if gr, pErr := s.DB.Get(key); pErr != nil { return pErr } else if gr.Exists() { return roachpb.NewErrorf("expected nil value; got %v", gr.Value) } // Read within the transaction. if gr, pErr := txn.Get(key); pErr != nil { return pErr } else if !gr.Exists() || !bytes.Equal(gr.ValueBytes(), value) { return roachpb.NewErrorf("expected value %q; got %q", value, gr.Value) } if !commit { return roachpb.NewErrorf("purposefully failing transaction") } return nil }) if commit != (pErr == nil) { t.Errorf("expected success? %t; got %s", commit, pErr) } else if !commit && !testutils.IsPError(pErr, "purposefully failing transaction") { t.Errorf("unexpected failure with !commit: %s", pErr) } // Verify the value is now visible on commit == true, and not visible otherwise. gr, pErr := s.DB.Get(key) if commit { if pErr != nil || !gr.Exists() || !bytes.Equal(gr.ValueBytes(), value) { t.Errorf("expected success reading value: %+v, %s", gr.ValueBytes(), pErr) } } else { if pErr != nil || gr.Exists() { t.Errorf("expected success and nil value: %s, %s", gr, pErr) } } } }
// TestClientRunTransaction verifies some simple transaction isolation // semantics. func TestClientRunTransaction(t *testing.T) { defer leaktest.AfterTest(t) s := server.StartTestServer(t) defer s.Stop() defer setTxnRetryBackoff(1 * time.Millisecond)() db := createTestClient(t, s.Stopper(), s.ServingAddr()) for _, commit := range []bool{true, false} { value := []byte("value") key := []byte(fmt.Sprintf("%s/key-%t", testUser, commit)) // Use snapshot isolation so non-transactional read can always push. pErr := db.Txn(func(txn *client.Txn) *roachpb.Error { if pErr := txn.SetIsolation(roachpb.SNAPSHOT); pErr != nil { return pErr } // Put transactional value. if pErr := txn.Put(key, value); pErr != nil { return pErr } // Attempt to read outside of txn. if gr, pErr := db.Get(key); pErr != nil { return pErr } else if gr.Value != nil { return roachpb.NewErrorf("expected nil value; got %+v", gr.Value) } // Read within the transaction. if gr, pErr := txn.Get(key); pErr != nil { return pErr } else if gr.Value == nil || !bytes.Equal(gr.ValueBytes(), value) { return roachpb.NewErrorf("expected value %q; got %q", value, gr.ValueBytes()) } if !commit { return roachpb.NewErrorf("purposefully failing transaction") } return nil }) if commit != (pErr == nil) { t.Errorf("expected success? %t; got %s", commit, pErr) } else if !commit && !testutils.IsPError(pErr, "purposefully failing transaction") { t.Errorf("unexpected failure with !commit: %s", pErr) } // Verify the value is now visible on commit == true, and not visible otherwise. gr, pErr := db.Get(key) if commit { if pErr != nil || gr.Value == nil || !bytes.Equal(gr.ValueBytes(), value) { t.Errorf("expected success reading value: %+v, %s", gr.Value, pErr) } } else { if pErr != nil || gr.Value != nil { t.Errorf("expected success and nil value: %+v, %s", gr.Value, pErr) } } } }
// TestClientPermissions verifies permission enforcement. func TestClientPermissions(t *testing.T) { defer leaktest.AfterTest(t)() s := server.StartTestServer(t) defer s.Stop() // NodeUser certs are required for all KV operations. // RootUser has no KV privileges whatsoever. nodeClient := createTestClientForUser(t, s.Stopper(), s.ServingAddr(), security.NodeUser) rootClient := createTestClientForUser(t, s.Stopper(), s.ServingAddr(), security.RootUser) testCases := []struct { path string client *client.DB allowed bool }{ {"foo", rootClient, false}, {"foo", nodeClient, true}, {testUser + "/foo", rootClient, false}, {testUser + "/foo", nodeClient, true}, {testUser + "foo", rootClient, false}, {testUser + "foo", nodeClient, true}, {testUser, rootClient, false}, {testUser, nodeClient, true}, {"unknown/foo", rootClient, false}, {"unknown/foo", nodeClient, true}, } value := []byte("value") const matchErr = "is not allowed" for tcNum, tc := range testCases { pErr := tc.client.Put(tc.path, value) if (pErr == nil) != tc.allowed || (!tc.allowed && !testutils.IsPError(pErr, matchErr)) { t.Errorf("#%d: expected allowed=%t, got err=%s", tcNum, tc.allowed, pErr) } _, pErr = tc.client.Get(tc.path) if (pErr == nil) != tc.allowed || (!tc.allowed && !testutils.IsPError(pErr, matchErr)) { t.Errorf("#%d: expected allowed=%t, got err=%s", tcNum, tc.allowed, pErr) } } }
// TestStoreRangeMergeLastRange verifies that merging the last range // fails. func TestStoreRangeMergeLastRange(t *testing.T) { defer leaktest.AfterTest(t) store, stopper := createTestStore(t) defer stopper.Stop() // Merge last range. args := adminMergeArgs(roachpb.KeyMin) if _, pErr := client.SendWrapped(rg1(store), nil, &args); !testutils.IsPError(pErr, "cannot merge final range") { t.Fatalf("expected 'cannot merge final range' error; got %s", pErr) } }
// TestTxnCoordSenderErrorWithIntent validates that if a transactional request // returns an error but also indicates a Writing transaction, the coordinator // tracks it just like a successful request. func TestTxnCoordSenderErrorWithIntent(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() defer stopper.Stop() manual := hlc.NewManualClock(0) clock := hlc.NewClock(manual.UnixNano) clock.SetMaxOffset(20) testCases := []struct { roachpb.Error errMsg string }{ {*roachpb.NewError(roachpb.NewTransactionRetryError()), "retry txn"}, {*roachpb.NewError(roachpb.NewTransactionPushError(roachpb.Transaction{ TxnMeta: enginepb.TxnMeta{ ID: uuid.NewV4(), }})), "failed to push"}, {*roachpb.NewErrorf("testError"), "testError"}, } for i, test := range testCases { func() { senderFunc := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { txn := ba.Txn.Clone() txn.Writing = true pErr := &roachpb.Error{} *pErr = test.Error pErr.SetTxn(&txn) return nil, pErr } ctx := tracing.WithTracer(context.Background(), tracing.NewTracer()) ts := NewTxnCoordSender(ctx, senderFn(senderFunc), clock, false, stopper, MakeTxnMetrics()) var ba roachpb.BatchRequest key := roachpb.Key("test") ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}}) ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}}) ba.Add(&roachpb.EndTransactionRequest{}) ba.Txn = &roachpb.Transaction{Name: "test"} _, pErr := ts.Send(context.Background(), ba) if !testutils.IsPError(pErr, test.errMsg) { t.Errorf("%d: error did not match %s: %v", i, test.errMsg, pErr) } defer teardownHeartbeats(ts) ts.Lock() defer ts.Unlock() if len(ts.txns) != 1 { t.Errorf("%d: expected transaction to be tracked", i) } }() } }
// TestStoreRangeMergeLastRange verifies that merging the last range // fails. func TestStoreRangeMergeLastRange(t *testing.T) { defer leaktest.AfterTest(t)() sCtx := storage.TestStoreContext() sCtx.TestingKnobs.DisableSplitQueue = true store, stopper, _ := createTestStoreWithContext(t, sCtx) defer stopper.Stop() // Merge last range. args := adminMergeArgs(roachpb.KeyMin) if _, pErr := client.SendWrapped(rg1(store), nil, &args); !testutils.IsPError(pErr, "cannot merge final range") { t.Fatalf("expected 'cannot merge final range' error; got %s", pErr) } }
// TestPushTransactionsWithNonPendingIntent verifies that maybePushTransactions // returns an error when a non-pending intent is passed. func TestPushTransactionsWithNonPendingIntent(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} tc.Start(t) defer tc.Stop() intents := []roachpb.Intent{{Span: roachpb.Span{Key: roachpb.Key("a")}, Status: roachpb.ABORTED}} if _, pErr := tc.store.intentResolver.maybePushTransactions( context.Background(), intents, roachpb.Header{}, roachpb.PUSH_TOUCH, true); !testutils.IsPError(pErr, "unexpected aborted/resolved intent") { t.Errorf("expected error on aborted/resolved intent, but got %s", pErr) } }
// TestStoreRangeMergeNonCollocated attempts to merge two ranges // that are not on the same stores. func TestStoreRangeMergeNonCollocated(t *testing.T) { defer leaktest.AfterTest(t)() mtc := startMultiTestContext(t, 4) defer mtc.Stop() store := mtc.stores[0] // Split into 3 ranges argsSplit := adminSplitArgs(roachpb.KeyMin, []byte("d")) if _, pErr := client.SendWrapped(rg1(store), nil, &argsSplit); pErr != nil { t.Fatalf("Can't split range %s", pErr) } argsSplit = adminSplitArgs(roachpb.KeyMin, []byte("b")) if _, pErr := client.SendWrapped(rg1(store), nil, &argsSplit); pErr != nil { t.Fatalf("Can't split range %s", pErr) } rangeA := store.LookupReplica([]byte("a"), nil) rangeADesc := rangeA.Desc() rangeB := store.LookupReplica([]byte("c"), nil) rangeBDesc := rangeB.Desc() rangeC := store.LookupReplica([]byte("e"), nil) rangeCDesc := rangeC.Desc() if bytes.Equal(rangeADesc.StartKey, rangeBDesc.StartKey) { log.Errorf(context.TODO(), "split ranges keys are equal %q!=%q", rangeADesc.StartKey, rangeBDesc.StartKey) } if bytes.Equal(rangeBDesc.StartKey, rangeCDesc.StartKey) { log.Errorf(context.TODO(), "split ranges keys are equal %q!=%q", rangeBDesc.StartKey, rangeCDesc.StartKey) } if bytes.Equal(rangeADesc.StartKey, rangeCDesc.StartKey) { log.Errorf(context.TODO(), "split ranges keys are equal %q!=%q", rangeADesc.StartKey, rangeCDesc.StartKey) } // Replicate the ranges to different sets of stores. Ranges A and C // are collocated, but B is different. mtc.replicateRange(rangeA.RangeID, 1, 2) mtc.replicateRange(rangeB.RangeID, 1, 3) mtc.replicateRange(rangeC.RangeID, 1, 2) // Attempt to merge. rangeADesc = rangeA.Desc() argsMerge := adminMergeArgs(roachpb.Key(rangeADesc.StartKey)) if _, pErr := rangeA.AdminMerge(context.Background(), argsMerge, rangeADesc); !testutils.IsPError(pErr, "ranges not collocated") { t.Fatalf("did not got expected error; got %s", pErr) } }
// TestStoreRangeSplitAtIllegalKeys verifies a range cannot be split // at illegal keys. func TestStoreRangeSplitAtIllegalKeys(t *testing.T) { defer leaktest.AfterTest(t)() store, stopper, _ := createTestStore(t) defer stopper.Stop() for _, key := range []roachpb.Key{ keys.Meta1Prefix, testutils.MakeKey(keys.Meta1Prefix, []byte("a")), testutils.MakeKey(keys.Meta1Prefix, roachpb.RKeyMax), keys.Meta2KeyMax, keys.MakeTablePrefix(10 /* system descriptor ID */), } { args := adminSplitArgs(roachpb.KeyMin, key) _, pErr := client.SendWrapped(rg1(store), nil, &args) if !testutils.IsPError(pErr, "cannot split") { t.Errorf("%q: unexpected split error %s", key, pErr) } } }
func TestKVDBInternalMethods(t *testing.T) { defer leaktest.AfterTest(t)() s := server.StartTestServer(t) defer s.Stop() testCases := []roachpb.Request{ &roachpb.HeartbeatTxnRequest{}, &roachpb.GCRequest{}, &roachpb.PushTxnRequest{}, &roachpb.RangeLookupRequest{}, &roachpb.ResolveIntentRequest{}, &roachpb.ResolveIntentRangeRequest{}, &roachpb.MergeRequest{}, &roachpb.TruncateLogRequest{}, &roachpb.LeaderLeaseRequest{}, &roachpb.EndTransactionRequest{ InternalCommitTrigger: &roachpb.InternalCommitTrigger{}, }, } // Verify internal methods experience bad request errors. db := createTestClient(t, s.Stopper(), s.ServingAddr()) for i, args := range testCases { { header := args.Header() header.Key = roachpb.Key("a") args.SetHeader(header) } if roachpb.IsRange(args) { header := args.Header() header.EndKey = args.Header().Key.Next() args.SetHeader(header) } b := &client.Batch{} b.InternalAddRequest(args) pErr := db.Run(b) if pErr == nil { t.Errorf("%d: unexpected success calling %s", i, args.Method()) } else if !testutils.IsPError(pErr, "contains an internal request|contains commit trigger") { t.Errorf("%d: unexpected error for %s: %s", i, args.Method(), pErr) } } }
func TestPrimaryKeyUnspecified(t *testing.T) { defer leaktest.AfterTest(t) stmt, err := parser.ParseOneTraditional( "CREATE TABLE foo.test (a INT, b INT, CONSTRAINT c UNIQUE (b))") if err != nil { t.Fatal(err) } create := stmt.(*parser.CreateTable) if err := create.Table.NormalizeTableName(""); err != nil { t.Fatal(err) } desc, pErr := makeTableDesc(create, 1) if pErr != nil { t.Fatal(pErr) } pErr = desc.AllocateIDs() if !testutils.IsPError(pErr, errMissingPrimaryKey.Error()) { t.Fatalf("unexpected error: %s", pErr) } }
// TestTxnReadAfterAbandon checks the fix for the condition in issue #4787: // after a transaction is abandoned we do a read as part of that transaction // which should fail. func TestTxnReadAfterAbandon(t *testing.T) { defer leaktest.AfterTest(t)() manual, sender, cleanupFn := setupMetricsTest(t) defer cleanupFn() value := []byte("value") db := client.NewDB(sender) // Test abandoned transaction by making the client timeout ridiculously short. We also set // the sender to heartbeat very frequently, because the heartbeat detects and tears down // abandoned transactions. sender.heartbeatInterval = 2 * time.Millisecond sender.clientTimeout = 1 * time.Millisecond pErr := db.Txn(func(txn *client.Txn) *roachpb.Error { key := []byte("key-abandon") if err := txn.SetIsolation(roachpb.SNAPSHOT); err != nil { t.Fatal(err) } if pErr := txn.Put(key, value); pErr != nil { t.Fatal(pErr) } manual.Increment(int64(sender.clientTimeout + sender.heartbeatInterval*2)) checkTxnMetrics(t, sender, "abandon txn", 0, 1, 0, 0) _, pErr := txn.Get(key) if pErr == nil { t.Fatalf("Get succeeded on abandoned txn") } else if !testutils.IsPError(pErr, "writing transaction timed out") { t.Fatalf("unexpected error from Get on abandoned txn: %s", pErr) } return pErr }) if pErr == nil { t.Fatalf("abandoned txn didn't fail") } }
// Verifies that a nested transaction returns an error if an inner txn // propagates an error to an outer txn. func TestNestedTransaction(t *testing.T) { defer leaktest.AfterTest(t)() s, db := setup() defer s.Stop() txnProto := roachpb.NewTransaction("test", roachpb.Key("a"), 1, roachpb.SERIALIZABLE, roachpb.Timestamp{}, 0) pErr := db.Txn(func(txn1 *client.Txn) *roachpb.Error { if pErr := txn1.Put("a", "1"); pErr != nil { t.Fatalf("unexpected put error: %s", pErr) } return db.Txn(func(txn2 *client.Txn) *roachpb.Error { return roachpb.NewErrorWithTxn(util.Errorf("err"), txnProto) }) }) if pErr == nil { t.Fatal("unexpected success of txn") } if !testutils.IsPError(pErr, "mismatching transaction record in the error") { t.Errorf("unexpected failure: %s", pErr) } }
// TestTxnCoordIdempotentCleanup verifies that cleanupTxn is idempotent. func TestTxnCoordIdempotentCleanup(t *testing.T) { defer leaktest.AfterTest(t)() s := createTestDB(t) defer s.Stop() defer teardownHeartbeats(s.Sender) txn := client.NewTxn(*s.DB) ba := txn.NewBatch() ba.Put(roachpb.Key("a"), []byte("value")) if pErr := txn.Run(ba); pErr != nil { t.Fatal(pErr) } s.Sender.cleanupTxn(context.Background(), txn.Proto) ba = txn.NewBatch() ba.InternalAddRequest(&roachpb.EndTransactionRequest{}) pErr := txn.Run(ba) if pErr != nil && !testutils.IsPError(pErr, "aborted") { t.Fatal(pErr) } }
func TestEvictCacheOnError(t *testing.T) { defer leaktest.AfterTest(t)() // if rpcError is true, the first attempt gets an RPC error, otherwise // the RPC call succeeds but there is an error in the RequestHeader. // Currently leader and cached range descriptor are treated equally. testCases := []struct{ rpcError, retryable, shouldClearLeader, shouldClearReplica bool }{ {false, false, false, false}, // non-retryable replica error {false, true, false, false}, // retryable replica error {true, false, true, true}, // RPC error aka all nodes dead {true, true, false, false}, // retryable RPC error } for i, tc := range testCases { g, s := makeTestGossip(t) defer s() leader := roachpb.ReplicaDescriptor{ NodeID: 99, StoreID: 999, } first := true var testFn rpcSendFn = func(_ SendOptions, _ ReplicaSlice, args roachpb.BatchRequest, _ *rpc.Context) (*roachpb.BatchResponse, error) { if !first { return args.CreateReply(), nil } first = false if tc.rpcError { return nil, roachpb.NewSendError("boom", tc.retryable) } var err error if tc.retryable { err = &roachpb.RangeKeyMismatchError{} } else { err = errors.New("boom") } reply := &roachpb.BatchResponse{} reply.Error = roachpb.NewError(err) return reply, nil } ctx := &DistSenderContext{ RPCSend: testFn, RangeDescriptorDB: mockRangeDescriptorDB(func(_ roachpb.RKey, _, _ bool) ([]roachpb.RangeDescriptor, *roachpb.Error) { return []roachpb.RangeDescriptor{testRangeDescriptor}, nil }), } ds := NewDistSender(ctx, g) ds.updateLeaderCache(1, leader) put := roachpb.NewPut(roachpb.Key("a"), roachpb.MakeValueFromString("value")).(*roachpb.PutRequest) if _, pErr := client.SendWrapped(ds, nil, put); pErr != nil && !testutils.IsPError(pErr, "boom") { t.Errorf("put encountered unexpected error: %s", pErr) } if cur := ds.leaderCache.Lookup(1); reflect.DeepEqual(cur, &roachpb.ReplicaDescriptor{}) && !tc.shouldClearLeader { t.Errorf("%d: leader cache eviction: shouldClearLeader=%t, but value is %v", i, tc.shouldClearLeader, cur) } _, cachedDesc := ds.rangeCache.getCachedRangeDescriptor(roachpb.RKey(put.Key), false /* !inclusive */) if cachedDesc == nil != tc.shouldClearReplica { t.Errorf("%d: unexpected second replica lookup behaviour: wanted=%t", i, tc.shouldClearReplica) } } }
// TestServerNodeEventFeed verifies that a test server emits Node-specific // events. func TestServerNodeEventFeed(t *testing.T) { defer leaktest.AfterTest(t) s := server.StartTestServer(t) feed := s.EventFeed() // Start reading events from the feed before starting the stores. ner := nodeEventReader{} ner.readEvents(feed) db, err := client.Open(s.Stopper(), fmt.Sprintf("rpcs://%s@%s?certs=%s", security.NodeUser, s.ServingAddr(), security.EmbeddedCertsDir)) if err != nil { t.Fatal(err) } // Add some data in a transaction. It could restart, but we return nil // intentionally (i.e. we're giving up if we don't succeed immediately, // this is all about generating events and we don't check success). if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error { b := txn.NewBatch() b.Put("a", "asdf") b.Put("c", "jkl;") err := txn.CommitInBatch(b) if err != nil { log.Warning(err) } return nil }); pErr != nil { t.Fatalf("error putting data to db: %s", pErr) } // Get some data, discarding the result. if _, err := db.Get("a"); err != nil { t.Fatalf("error getting data from db: %s", err) } // Scan, which should fail (before it makes it to server, so this won't // be tracked) if _, pErr := db.Scan("b", "a", 0); !testutils.IsPError(pErr, "empty batch") { t.Fatalf("unexpected Scan error: %v", pErr) } if pErr := db.CPut("test", "will", "fail"); !testutils.IsPError(pErr, "unexpected value") { t.Fatalf("unexpected CPut error: %v", pErr) } // Close feed and wait for reader to receive all events. feed.Flush() s.Stop() expectedNodeEvents := map[roachpb.NodeID][]string{ roachpb.NodeID(1): { "BeginTransaction", "Put", "Put", "EndTransaction", "Get", "failed ConditionalPut", }, } // TODO(mtracy): This assertion has been made "fuzzy" in order to account // for the unpredictably ordered events from an asynchronous background // task. A future commit should disable that background task (status // recording) during this test, and exact matching should be restored. /* if a, e := ner.perNodeFeeds, expectedNodeEvents; !reflect.DeepEqual(a, e) { t.Errorf("node feed did not match expected value. Actual values have been printed to compare with above expectation.\n") log.Infof("Event feed information:\n%s", ner.eventFeedString()) } */ // The actual results should contain the expected results as an ordered // subset. passed := true for k := range expectedNodeEvents { // Maintain an index into the actual and expected feed slices. actual, expected := ner.perNodeFeeds[k], expectedNodeEvents[k] i, j := 0, 0 // Advance indexes until one or both slices are exhausted. for i < len(expected) && j < len(actual) { // If the current expected value matches the current actual value, // advance both indexes. Otherwise, advance only the actual index. if reflect.DeepEqual(expected[i], actual[j]) { i++ } j++ } // Test succeeded if it advanced over every expected event. if i != len(expected) { passed = false break } } if !passed { t.Fatalf("received unexpected events: %s", ner.eventFeedString()) } }
// TestRequestToUninitializedRange tests the behavior when a request // is sent to a node which should be a replica of the correct range // but has not yet received its initial snapshot. This would // previously panic due to a malformed error response from the server, // as seen in https://github.com/cockroachdb/cockroach/issues/6027. // // Prior to the other changes in the commit that introduced it, this // test would reliable trigger the panic from #6027. However, it // relies on some hacky tricks to both trigger the panic and shut down // cleanly. If this test needs a lot of maintenance in the future we // should be willing to get rid of it. func TestRequestToUninitializedRange(t *testing.T) { defer leaktest.AfterTest(t)() s := server.TestServer{StoresPerNode: 2} if err := s.Start(); err != nil { t.Fatalf("Could not start server: %v", err) } defer s.Stop() // Choose a range ID that is much larger than any that would be // created by initial splits. const rangeID = roachpb.RangeID(1000) // Set up a range with replicas on two stores of the same node. This // ensures that the DistSender will consider both replicas healthy // and will try to talk to both (so we can get a non-retryable error // from the second store). replica1 := roachpb.ReplicaDescriptor{ NodeID: 1, StoreID: 1, ReplicaID: 1, } replica2 := roachpb.ReplicaDescriptor{ NodeID: 1, StoreID: 2, ReplicaID: 2, } // HACK: remove the second store from the node to generate a // non-retryable error when we try to talk to it. store2, err := s.Stores().GetStore(2) if err != nil { t.Fatal(err) } s.Stores().RemoveStore(store2) // Create the uninitialized range by sending an isolated raft // message to the first store. conn, err := s.RPCContext().GRPCDial(s.ServingAddr()) if err != nil { t.Fatal(err) } raftClient := storage.NewMultiRaftClient(conn) ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := raftClient.RaftMessage(ctx) if err != nil { t.Fatal(err) } msg := storage.RaftMessageRequest{ GroupID: rangeID, ToReplica: replica1, FromReplica: replica2, Message: raftpb.Message{ Type: raftpb.MsgApp, To: 1, }, } if err := stream.Send(&msg); err != nil { t.Fatal(err) } // Make sure the replica was created. store1, err := s.Stores().GetStore(1) if err != nil { t.Fatal(err) } util.SucceedsSoon(t, func() error { if replica, err := store1.GetReplica(rangeID); err != nil { return util.Errorf("failed to look up replica: %s", err) } else if replica.IsInitialized() { return util.Errorf("expected replica to be uninitialized") } return nil }) // Create our own DistSender so we can force some requests to the // bogus range. The DistSender needs to be in scope for its own // MockRangeDescriptorDB closure. var sender *kv.DistSender sender = kv.NewDistSender(&kv.DistSenderContext{ Clock: s.Clock(), RPCContext: s.RPCContext(), RangeDescriptorDB: kv.MockRangeDescriptorDB( func(key roachpb.RKey, considerIntents, useReverseScan bool, ) ([]roachpb.RangeDescriptor, []roachpb.RangeDescriptor, *roachpb.Error) { if key.Equal(roachpb.RKeyMin) { // Pass through requests for the first range to the real sender. desc, err := sender.FirstRange() if err != nil { return nil, nil, roachpb.NewError(err) } return []roachpb.RangeDescriptor{*desc}, nil, nil } return []roachpb.RangeDescriptor{{ RangeID: rangeID, StartKey: roachpb.RKey(keys.Meta2Prefix), EndKey: roachpb.RKeyMax, Replicas: []roachpb.ReplicaDescriptor{replica1, replica2}, }}, nil, nil }), }, s.Gossip()) // Only inconsistent reads triggered the panic in #6027. hdr := roachpb.Header{ ReadConsistency: roachpb.INCONSISTENT, } req := roachpb.NewGet(roachpb.Key("asdf")) // Repeat the test a few times: due to the randomization between the // two replicas, each attempt only had a 50% chance of triggering // the panic. for i := 0; i < 5; i++ { _, pErr := client.SendWrappedWith(sender, context.Background(), hdr, req) // Each attempt fails with "store 2 not found" because that is the // non-retryable error. if !testutils.IsPError(pErr, "store 2 not found") { t.Fatal(pErr) } } }