// StartTestCluster starts up a TestCluster made up of `nodes` in-memory testing // servers. // The cluster should be stopped using cluster.Stopper().Stop(). func StartTestCluster(t testing.TB, nodes int, args ClusterArgs) *TestCluster { if nodes < 1 { t.Fatal("invalid cluster size: ", nodes) } if args.ServerArgs.JoinAddr != "" { t.Fatal("can't specify a join addr when starting a cluster") } if args.ServerArgs.Stopper != nil { t.Fatal("can't set individual server stoppers when starting a cluster") } storeKnobs := args.ServerArgs.Knobs.Store if storeKnobs != nil && (storeKnobs.(*storage.StoreTestingKnobs).DisableSplitQueue || storeKnobs.(*storage.StoreTestingKnobs).DisableReplicateQueue) { t.Fatal("can't disable an individual server's queues when starting a cluster; " + "the cluster controls replication") } if args.Stopper == nil { args.Stopper = stop.NewStopper() args.ServerArgs.Stopper = args.Stopper } switch args.ReplicationMode { case ReplicationFull: // Force all ranges to be replicated everywhere. cfg := config.DefaultZoneConfig() cfg.ReplicaAttrs = make([]roachpb.Attributes, nodes) fn := config.TestingSetDefaultZoneConfig(cfg) args.Stopper.AddCloser(stop.CloserFn(fn)) case ReplicationManual: if args.ServerArgs.Knobs.Store == nil { args.ServerArgs.Knobs.Store = &storage.StoreTestingKnobs{} } storeKnobs := args.ServerArgs.Knobs.Store.(*storage.StoreTestingKnobs) storeKnobs.DisableSplitQueue = true storeKnobs.DisableReplicateQueue = true default: t.Fatal("unexpected replication mode") } tc := &TestCluster{} args.ServerArgs.PartOfCluster = true first, conn, _ := serverutils.StartServer(t, args.ServerArgs) tc.Servers = append(tc.Servers, first.(*server.TestServer)) tc.Conns = append(tc.Conns, conn) args.ServerArgs.JoinAddr = first.ServingAddr() for i := 1; i < nodes; i++ { s, conn, _ := serverutils.StartServer(t, args.ServerArgs) tc.Servers = append(tc.Servers, s.(*server.TestServer)) tc.Conns = append(tc.Conns, conn) } tc.waitForStores(t) return tc }
// Tests a batch of bounded DelRange() requests. func TestMultiRangeBoundedBatchDelRange(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() db := setupMultipleRanges(t, s, "a", "b", "c", "d", "e", "f", "g", "h") // These are the expected results if there is no bound. expResults := [][]string{ {"a1", "a2", "a3", "b1", "b2"}, {"c1", "c2", "d1"}, {"g1", "g2"}, } maxExpCount := 0 for _, res := range expResults { maxExpCount += len(res) } for bound := 1; bound <= 20; bound++ { // Initialize all keys. for _, key := range []string{"a1", "a2", "a3", "b1", "b2", "c1", "c2", "d1", "f1", "f2", "f3", "g1", "g2", "h1"} { if err := db.Put(key, "value"); err != nil { t.Fatal(err) } } b := &client.Batch{} b.Header.MaxSpanRequestKeys = int64(bound) spans := [][]string{{"a", "c"}, {"c", "f"}, {"g", "h"}} for _, span := range spans { b.DelRange(span[0], span[1], true) } if err := db.Run(b); err != nil { t.Fatal(err) } if len(expResults) != len(b.Results) { t.Fatalf("bound: %d, only got %d results, wanted %d", bound, len(expResults), len(b.Results)) } expCount := maxExpCount if bound < maxExpCount { expCount = bound } rem := expCount for i, res := range b.Results { // Verify that the KeyValue slice contains the given keys. rem -= len(res.Keys) for j, key := range res.Keys { if expKey := expResults[i][j]; string(key) != expKey { t.Errorf("%s: expected scan key %d, %d to be %q; got %q", errInfo(), i, j, expKey, key) } } } if rem != 0 { t.Errorf("expected %d keys, got %d", bound, expCount-rem) } checkResumeSpanDelRangeResults(t, spans, b.Results, expResults, expCount) } }
func (t *logicTest) setup() { // TODO(pmattis): Add a flag to make it easy to run the tests against a local // MySQL or Postgres instance. // TODO(andrei): if createTestServerParams() is used here, the command filter // it installs detects a transaction that doesn't have // modifiedSystemConfigSpan set even though it should, for // "testdata/rename_table". Figure out what's up with that. params := base.TestServerArgs{ MaxOffset: logicMaxOffset, Knobs: base.TestingKnobs{ SQLExecutor: &sql.ExecutorTestingKnobs{ WaitForGossipUpdate: true, CheckStmtStringChange: true, }, }, } t.srv, _, _ = serverutils.StartServer(t.T, params) // db may change over the lifetime of this function, with intermediate // values cached in t.clients and finally closed in t.close(). t.cleanupRootUser = t.setUser(security.RootUser) if _, err := t.db.Exec(` CREATE DATABASE test; SET DATABASE = test; `); err != nil { t.Fatal(err) } t.labelMap = make(map[string]string) t.progress = 0 t.failures = 0 t.unsupported = 0 }
// TestNonRetryableError verifies that a non-retryable error from the // execution of EndTransactionRequests is propagated to the client. func TestNonRetryableErrorFromCommit(t *testing.T) { defer leaktest.AfterTest(t)() params, cmdFilters := createTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop() hitError := false cleanupFilter := cmdFilters.AppendFilter( func(args storagebase.FilterArgs) *roachpb.Error { if req, ok := args.Req.(*roachpb.EndTransactionRequest); ok { if bytes.Contains(req.Key, []byte(keys.DescIDGenerator)) { hitError = true return roachpb.NewErrorWithTxn(fmt.Errorf("testError"), args.Hdr.Txn) } } return nil }, false) defer cleanupFilter() if _, err := sqlDB.Exec("CREATE DATABASE t;"); !testutils.IsError(err, "pq: testError") { t.Errorf("unexpected error %v", err) } if !hitError { t.Errorf("expected to hit error, but it didn't happen") } }
// TestTxnObeysLeaseExpiration tests that a transaction is aborted when it tries // to use a table descriptor with an expired lease. func TestTxnObeysLeaseExpiration(t *testing.T) { defer leaktest.AfterTest(t)() t.Skip("TODO(vivek): #7031") // Set the lease duration such that it expires quickly. savedLeaseDuration, savedMinLeaseDuration := csql.LeaseDuration, csql.MinLeaseDuration defer func() { csql.LeaseDuration, csql.MinLeaseDuration = savedLeaseDuration, savedMinLeaseDuration }() csql.MinLeaseDuration = 100 * time.Millisecond csql.LeaseDuration = 2 * csql.MinLeaseDuration params, _ := createTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop() if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR); INSERT INTO t.kv VALUES ('a', 'b'); `); err != nil { t.Fatal(err) } clock := s.Clock() // Increase the MaxOffset so that the clock can be updated to expire the // table leases. clock.SetMaxOffset(10 * csql.LeaseDuration) // Run a number of sql operations and expire the lease they acquire. runCommandAndExpireLease(t, clock, sqlDB, `INSERT INTO t.kv VALUES ('c', 'd')`) runCommandAndExpireLease(t, clock, sqlDB, `UPDATE t.kv SET v = 'd' WHERE k = 'a'`) runCommandAndExpireLease(t, clock, sqlDB, `DELETE FROM t.kv WHERE k = 'a'`) runCommandAndExpireLease(t, clock, sqlDB, `TRUNCATE TABLE t.kv`) }
// TestMultiRangeBoundedBatchScanSortedOverlapping runs two overlapping // ordered (by start key) scan requests, and shows how the batch response can // contain two partial responses. func TestMultiRangeBoundedBatchScanSortedOverlapping(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() db := setupMultipleRanges(t, s, "a", "b", "c", "d", "e", "f") for _, key := range []string{"a1", "a2", "a3", "b1", "b2", "c1", "c2", "d1", "f1", "f2", "f3"} { if err := db.Put(key, "value"); err != nil { t.Fatal(err) } } bound := 6 b := &client.Batch{} b.Header.MaxSpanRequestKeys = int64(bound) // Two ordered overlapping requests. spans := [][]string{{"a", "d"}, {"b", "g"}} for _, span := range spans { b.Scan(span[0], span[1]) } if err := db.Run(b); err != nil { t.Fatal(err) } // See incomplete results for the two requests. expResults := [][]string{ {"a1", "a2", "a3", "b1", "b2"}, {"b1"}, } checkScanResults(t, spans, b.Results, expResults, bound) }
// Test that table names are not treated as case sensitive by the name cache. func TestTableNameNotCaseSensitive(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() leaseManager := s.LeaseManager().(*LeaseManager) if _, err := db.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); `); err != nil { t.Fatal(err) } // Populate the name cache. if _, err := db.Exec("SELECT * FROM t.test;"); err != nil { t.Fatal(err) } tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") // Check that we can get the table by a different name. lease := leaseManager.tableNames.get(tableDesc.ParentID, "tEsT", s.Clock()) if lease == nil { t.Fatalf("no name cache entry") } if err := leaseManager.Release(lease); err != nil { t.Fatal(err) } }
// TestNoSequenceCachePutOnRangeMismatchError verifies that the // sequence cache is not updated with RangeKeyMismatchError. This is a // higher-level version of TestSequenceCacheShouldCache. func TestNoSequenceCachePutOnRangeMismatchError(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() db := setupMultipleRanges(t, s, "b", "c") // The requests in the transaction below will be chunked and // sent to replicas in the following way: // 1) A batch request containing a BeginTransaction and a // put on "a" are sent to a replica owning range ["a","b"). // 2) A next batch request containing a put on "b" and a put // on "c" are sent to a replica owning range ["b","c"). // (The range cache has a stale range descriptor.) // 3) The put request on "c" causes a RangeKeyMismatchError. // 4) The dist sender re-sends a request to the same replica. // This time the request contains only the put on "b" to the // same replica. // 5) The command succeeds since the sequence cache has not yet been updated. epoch := 0 if err := db.Txn(func(txn *client.Txn) error { epoch++ b := txn.NewBatch() b.Put("a", "val") b.Put("b", "val") b.Put("c", "val") return txn.CommitInBatch(b) }); err != nil { t.Errorf("unexpected error on transactional Puts: %s", err) } if epoch != 1 { t.Errorf("unexpected epoch; the txn must not be retried, but got %d retries", epoch) } }
// TestRangeLookupWithOpenTransaction verifies that range lookups are // done in such a way (e.g. using inconsistent reads) that they // proceed in the event that a write intent is extant at the meta // index record being read. func TestRangeLookupWithOpenTransaction(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() db := createTestClient(t, s.Stopper(), s.ServingAddr()) // Create an intent on the meta1 record by writing directly to the // engine. key := testutils.MakeKey(keys.Meta1Prefix, roachpb.KeyMax) now := s.Clock().Now() txn := roachpb.NewTransaction("txn", roachpb.Key("foobar"), 0, enginepb.SERIALIZABLE, now, 0) if err := engine.MVCCPutProto( context.Background(), s.(*server.TestServer).Ctx.Engines[0], nil, key, now, txn, &roachpb.RangeDescriptor{}); err != nil { t.Fatal(err) } // Now, with an intent pending, attempt (asynchronously) to read // from an arbitrary key. This will cause the distributed sender to // do a range lookup, which will encounter the intent. We're // verifying here that the range lookup doesn't fail with a write // intent error. If it did, it would go into a deadloop attempting // to push the transaction, which in turn requires another range // lookup, etc, ad nauseam. if _, err := db.Get("a"); err != nil { t.Fatal(err) } }
// TestReverseScanWithSplitAndMerge verifies that ReverseScan gets the right results // across multiple ranges while range splits and merges happen. func TestReverseScanWithSplitAndMerge(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() db := initReverseScanTestEnv(s, t) // Case 1: An encounter with a range split. // Split the range ["b", "e") at "c". if err := db.AdminSplit("c"); err != nil { t.Fatal(err) } // The ReverseScan will run into a stale descriptor. if rows, err := db.ReverseScan("a", "d", 0); err != nil { t.Fatalf("unexpected error on ReverseScan: %s", err) } else if l := len(rows); l != 3 { t.Errorf("expected 3 rows; got %d", l) } // Case 2: encounter with range merge . // Merge the range ["e", "g") and ["g", "\xff\xff") . if err := db.AdminMerge("e"); err != nil { t.Fatal(err) } if rows, err := db.ReverseScan("d", "g", 0); err != nil { t.Fatalf("unexpected error on ReverseScan: %s", err) } else if l := len(rows); l != 3 { t.Errorf("expected 3 rows; got %d", l) } }
func TestBadRequest(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() db := createTestClient(t, s.Stopper(), s.ServingAddr()) // Write key "a". if err := db.Put("a", "value"); err != nil { t.Fatal(err) } if _, err := db.Scan("a", "a", 0); !testutils.IsError(err, "truncation resulted in empty batch") { t.Fatalf("unexpected error on scan with startkey == endkey: %v", err) } if _, err := db.ReverseScan("a", "a", 0); !testutils.IsError(err, "truncation resulted in empty batch") { t.Fatalf("unexpected error on reverse scan with startkey == endkey: %v", err) } if err := db.DelRange("x", "a"); !testutils.IsError(err, "truncation resulted in empty batch") { t.Fatalf("unexpected error on deletion on [x, a): %v", err) } if err := db.DelRange("", "z"); !testutils.IsError(err, "must be greater than LocalMax") { t.Fatalf("unexpected error on deletion on [KeyMin, z): %v", err) } }
// TestMultiRangeBoundedWithCompletedUnboundedScan runs a batch request with a // bounded and unbounded scan, such that the bounded scan is saturated after // the unbounded scan has already completed. Additionally, the bound for the // bounded scan is picked to end at the boundary of a range. It exercises an // edge case in DistSender in which a saturated scan being masked out means // that a multi-range request ends before the originally assumed key range is // exhausted. func TestMultiRangeBoundedWithCompletedUnboundedScan(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() db := setupMultipleRanges(t, s, "a", "b", "c", "d", "e", "f") for _, key := range []string{"a1", "a2", "a3", "b1", "b2", "c1", "c2", "d1", "f1", "f2", "f3"} { if err := db.Put(key, "value"); err != nil { t.Fatal(err) } } b := db.NewBatch() // An unbounded scan that is completed before the bounded scan below. b.Scan("a", "d", 0) // A bounded scan that ends at the boundary of range "d". b.Scan("c", "g", 3) if err := db.Run(b); err != nil { t.Fatal(err) } // These are the expected results. expResults := [][]string{ {"a1", "a2", "a3", "b1", "b2", "c1", "c2"}, {"c1", "c2", "d1"}, } checkScanResults(t, b.Results, expResults) }
// TestSingleRangeReverseScan verifies that ReverseScan gets the right results // on a single range. func TestSingleRangeReverseScan(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() db := initReverseScanTestEnv(s, t) // Case 1: Request.EndKey is in the middle of the range. if rows, err := db.ReverseScan("b", "d", 0); err != nil { t.Fatalf("unexpected error on ReverseScan: %s", err) } else if l := len(rows); l != 2 { t.Errorf("expected 2 rows; got %d", l) } // Case 2: Request.EndKey is equal to the EndKey of the range. if rows, pErr := db.ReverseScan("e", "g", 0); pErr != nil { t.Fatalf("unexpected error on ReverseScan: %s", pErr) } else if l := len(rows); l != 2 { t.Errorf("expected 2 rows; got %d", l) } // Case 3: Test roachpb.TableDataMin. Expected to return "g" and "h". wanted := 2 if rows, pErr := db.ReverseScan("g", keys.TableDataMin, 0); pErr != nil { t.Fatalf("unexpected error on ReverseScan: %s", pErr) } else if l := len(rows); l != wanted { t.Errorf("expected %d rows; got %d", wanted, l) } // Case 4: Test keys.SystemMax // This span covers the system DB keys. Note sql.GetInitialSystemValues // returns one key before keys.SystemMax, but our scan is including one key // (\xffa) created for the test. if rows, pErr := db.ReverseScan(keys.SystemMax, "b", 0); pErr != nil { t.Fatalf("unexpected error on ReverseScan: %s", pErr) } else if l := len(rows); l != 1 { t.Errorf("expected 1 row; got %d", l) } }
// TestMultiRangeBatchBoundedScans runs a batch request with scans that are // all bounded. func TestMultiRangeBatchBoundedScans(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() db := setupMultipleRanges(t, s, "a", "b", "c", "d", "e", "f") for _, key := range []string{"a", "aa", "aaa", "b", "bb", "cc", "d", "dd", "ff"} { if err := db.Put(key, "value"); err != nil { t.Fatal(err) } } b := db.NewBatch() b.Scan("aaa", "dd", 3) b.Scan("a", "z", 2) b.Scan("cc", "ff", 3) if err := db.Run(b); err != nil { t.Fatal(err) } checkScanResults(t, b.Results, [][]string{ {"aaa", "b", "bb"}, {"a", "aa"}, {"cc", "d", "dd"}, }) }
func TestDropTableInTxn(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := createTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop() if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR); `); err != nil { t.Fatal(err) } tx, err := sqlDB.Begin() if err != nil { t.Fatal(err) } if _, err := tx.Exec(`DROP TABLE t.kv`); err != nil { t.Fatal(err) } // We might still be able to read/write in the table inside this transaction // until the schema changer runs, but we shouldn't be able to ALTER it. if _, err := tx.Exec(`ALTER TABLE t.kv ADD COLUMN w CHAR`); !testutils.IsError(err, `table "kv" has been deleted`) { t.Fatalf("different error than expected: %s", err) } // Can't commit after ALTER errored, so we ROLLBACK. if err := tx.Rollback(); err != nil { t.Fatal(err) } }
// TestMetricsRecording verifies that Node statistics are periodically recorded // as time series data. func TestMetricsRecording(t *testing.T) { defer leaktest.AfterTest(t)() s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{ MetricsSampleInterval: 5 * time.Millisecond}) defer s.Stopper().Stop() checkTimeSeriesKey := func(now int64, keyName string) error { key := ts.MakeDataKey(keyName, "", ts.Resolution10s, now) data := roachpb.InternalTimeSeriesData{} return kvDB.GetProto(key, &data) } // Verify that metrics for the current timestamp are recorded. This should // be true very quickly. util.SucceedsSoon(t, func() error { now := s.Clock().PhysicalNow() if err := checkTimeSeriesKey(now, "cr.store.livebytes.1"); err != nil { return err } if err := checkTimeSeriesKey(now, "cr.node.sys.go.allocbytes.1"); err != nil { return err } return nil }) }
// TestAuthentication tests authentication for the KV endpoint. func TestAuthentication(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() var b1 client.Batch b1.Put("a", "b") // Create a node user client and call Run() on it which lets us build our own // request, specifying the user. db1 := createTestClientForUser(t, s.Stopper(), s.ServingAddr(), security.NodeUser) if err := db1.Run(&b1); err != nil { t.Fatal(err) } var b2 client.Batch b2.Put("c", "d") // Try again, but this time with certs for a non-node user (even the root // user has no KV permissions). db2 := createTestClientForUser(t, s.Stopper(), s.ServingAddr(), security.RootUser) if err := db2.Run(&b2); !testutils.IsError(err, "is not allowed") { t.Fatal(err) } }
func TestExplainTrace(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := createTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop() if _, err := sqlDB.Exec(`CREATE DATABASE test; CREATE TABLE test.foo (id INT PRIMARY KEY)`); err != nil { t.Fatal(err) } rows, err := sqlDB.Query(`EXPLAIN (TRACE) INSERT INTO test.foo VALUES (1)`) if err != nil { t.Fatal(err) } expParts := []string{"coordinator", "node.Batch"} var parts []string pretty := rowsToStrings(rows) for _, row := range pretty[1:] { part := row[3] // Operation if ind := sort.SearchStrings(parts, part); ind == len(parts) || parts[ind] != part { parts = append(parts, part) sort.Strings(parts) } } sort.Strings(expParts) if err := rows.Err(); err != nil { t.Fatal(err) } if !reflect.DeepEqual(expParts, parts) { t.Fatalf("expected %v, got %v\n\nResults:\n%v", expParts, parts, prettyPrint(pretty)) } }
// TestMultiRangeScanWithMaxResults tests that commands which access multiple // ranges with MaxResults parameter are carried out properly. func TestMultiRangeScanWithMaxResults(t *testing.T) { defer leaktest.AfterTest(t)() testCases := []struct { splitKeys []roachpb.Key keys []roachpb.Key }{ {[]roachpb.Key{roachpb.Key("m")}, []roachpb.Key{roachpb.Key("a"), roachpb.Key("z")}}, {[]roachpb.Key{roachpb.Key("h"), roachpb.Key("q")}, []roachpb.Key{roachpb.Key("b"), roachpb.Key("f"), roachpb.Key("k"), roachpb.Key("r"), roachpb.Key("w"), roachpb.Key("y")}}, } for i, tc := range testCases { s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() ts := s.(*TestServer) retryOpts := base.DefaultRetryOptions() retryOpts.Closer = ts.stopper.ShouldDrain() ds := kv.NewDistSender(&kv.DistSenderContext{ Clock: s.Clock(), RPCContext: s.RPCContext(), RPCRetryOptions: &retryOpts, }, ts.Gossip()) tds := kv.NewTxnCoordSender(ds, ts.Clock(), ts.Ctx.Linearizable, tracing.NewTracer(), ts.stopper, kv.NewTxnMetrics(metric.NewRegistry())) for _, sk := range tc.splitKeys { if err := ts.node.ctx.DB.AdminSplit(sk); err != nil { t.Fatal(err) } } for _, k := range tc.keys { put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k)) if _, err := client.SendWrapped(tds, nil, put); err != nil { t.Fatal(err) } } // Try every possible ScanRequest startKey. for start := 0; start < len(tc.keys); start++ { // Try every possible maxResults, from 1 to beyond the size of key array. for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ { scan := roachpb.NewScan(tc.keys[start], tc.keys[len(tc.keys)-1].Next(), int64(maxResults)) reply, err := client.SendWrapped(tds, nil, scan) if err != nil { t.Fatal(err) } rows := reply.(*roachpb.ScanResponse).Rows if start+maxResults <= len(tc.keys) && len(rows) != maxResults { t.Errorf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows)) } else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 { t.Errorf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows)) } } } } }
// TestRollbackToSavepointStatement tests that issuing a RESTART outside of a // txn produces the proper error. func TestRollbackToSavepointStatement(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := createTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop() // ROLLBACK TO SAVEPOINT without a transaction _, err := sqlDB.Exec("ROLLBACK TO SAVEPOINT cockroach_restart") if !testutils.IsError(err, "the transaction is not in a retriable state") { t.Fatalf("expected to fail here. err: %v", err) } // ROLLBACK TO SAVEPOINT with a wrong name _, err = sqlDB.Exec("ROLLBACK TO SAVEPOINT foo") if !testutils.IsError(err, "SAVEPOINT not supported except for COCKROACH_RESTART") { t.Fatalf("expected to fail here. err: %v", err) } // ROLLBACK TO SAVEPOINT in a non-retriable transaction tx, err := sqlDB.Begin() if err != nil { t.Fatal(err) } if _, err := tx.Exec("SAVEPOINT cockroach_restart"); err != nil { t.Fatal(err) } if _, err = tx.Exec("BOGUS SQL STATEMENT"); err == nil { t.Fatalf("expected to fail here. err: %v", err) } _, err = tx.Exec("ROLLBACK TO SAVEPOINT cockroach_restart") if !testutils.IsError(err, "SAVEPOINT COCKROACH_RESTART has not been used or a non-retriable error was encountered") { t.Fatalf("expected to fail here. err: %v", err) } }
func newKVNative(b *testing.B) kvInterface { enableTracing := tracing.Disable() s, _, _ := serverutils.StartServer(b, base.TestServerArgs{}) // TestServer.DB() returns the TxnCoordSender wrapped client. But that isn't // a fair comparison with SQL as we want these client requests to be sent // over the network. sender, err := client.NewSender( rpc.NewContext(&base.Context{ User: security.NodeUser, SSLCA: filepath.Join(security.EmbeddedCertsDir, security.EmbeddedCACert), SSLCert: filepath.Join(security.EmbeddedCertsDir, "node.crt"), SSLCertKey: filepath.Join(security.EmbeddedCertsDir, "node.key"), }, nil, s.Stopper()), s.ServingAddr()) if err != nil { b.Fatal(err) } return &kvNative{ db: client.NewDB(sender), doneFn: func() { s.Stopper().Stop() enableTracing() }, } }
// Test that rando commands while in COMMIT_WAIT return a particular error. func TestCommitWaitState(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := createTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop() if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k INT PRIMARY KEY, v TEXT); `); err != nil { t.Fatal(err) } tx, err := sqlDB.Begin() if err != nil { t.Fatal(err) } if _, err := tx.Exec( "SAVEPOINT cockroach_restart; RELEASE cockroach_restart;"); err != nil { t.Fatal(err) } _, err = tx.Exec("INSERT INTO t.test (k, v) VALUES (0, 'sentinel');") if !testutils.IsError(err, "current transaction is committed") { t.Fatal(err) } // Rollback should respond with a COMMIT command tag. err = tx.Rollback() if !testutils.IsError(err, "unexpected command tag COMMIT") { t.Fatal(err) } }
// TestClientGetAndPutProto verifies gets and puts of protobufs using the // client's convenience methods. func TestClientGetAndPutProto(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() db := createTestClient(t, s.Stopper(), s.ServingAddr()) zoneConfig := config.ZoneConfig{ ReplicaAttrs: []roachpb.Attributes{ {Attrs: []string{"dc1", "mem"}}, {Attrs: []string{"dc2", "mem"}}, }, RangeMinBytes: 1 << 10, // 1k RangeMaxBytes: 1 << 18, // 256k } key := roachpb.Key(testUser + "/zone-config") if err := db.Put(key, &zoneConfig); err != nil { t.Fatalf("unable to put proto: %s", err) } var readZoneConfig config.ZoneConfig if err := db.GetProto(key, &readZoneConfig); err != nil { t.Fatalf("unable to get proto: %s", err) } if !proto.Equal(&zoneConfig, &readZoneConfig) { t.Errorf("expected %+v, but found %+v", zoneConfig, readZoneConfig) } }
// TestClientEmptyValues verifies that empty values are preserved // for both empty []byte and integer=0. This used to fail when we // allowed the protobufs to be gob-encoded using the default go rpc // gob codec because gob treats pointer values and non-pointer values // as equivalent and elides zero-valued defaults on decode. func TestClientEmptyValues(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() db := createTestClient(t, s.Stopper(), s.ServingAddr()) if err := db.Put(testUser+"/a", []byte{}); err != nil { t.Error(err) } if gr, err := db.Get(testUser + "/a"); err != nil { t.Error(err) } else if bytes := gr.ValueBytes(); bytes == nil || len(bytes) != 0 { t.Errorf("expected non-nil empty byte slice; got %q", bytes) } if _, err := db.Inc(testUser+"/b", 0); err != nil { t.Error(err) } if gr, err := db.Get(testUser + "/b"); err != nil { t.Error(err) } else if gr.Value == nil { t.Errorf("expected non-nil integer") } else if gr.ValueInt() != 0 { t.Errorf("expected 0-valued integer, but got %d", gr.ValueInt()) } }
func TestPGWireDBName(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() pgURL, cleanupFn := sqlutils.PGUrl(t, s.ServingAddr(), security.RootUser, "TestPGWireDBName") pgURL.Path = "foo" defer cleanupFn() { db, err := gosql.Open("postgres", pgURL.String()) if err != nil { t.Fatal(err) } defer db.Close() if _, err := db.Exec(`CREATE DATABASE foo`); err != nil { t.Fatal(err) } if _, err := db.Exec(`CREATE TABLE bar (i INT PRIMARY KEY)`); err != nil { t.Fatal(err) } } db, err := gosql.Open("postgres", pgURL.String()) if err != nil { t.Fatal(err) } defer db.Close() if _, err := db.Exec(`INSERT INTO bar VALUES ($1)`, 1); err != nil { t.Fatal(err) } }
func TestPGWireOverUnixSocket(t *testing.T) { defer leaktest.AfterTest(t)() // We need a temp directory in which we'll create the // unix socket ".s.PGSQL.<port>". // We hard-code "/tmp" as the directory as the osx default can cause // the socket filename length to exceed 104 characters, triggering an error. tempDir, err := ioutil.TempDir("/tmp", "cockroach-unix") if err != nil { t.Fatal(err) } defer func() { _ = os.RemoveAll(tempDir) }() socketFile := filepath.Join(tempDir, ".s.PGSQL.123456") params, _ := createTestServerParams() params.Insecure = true params.SocketFile = socketFile s, _, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop() // We can't pass socket paths as url.Host to libpq, use ?host=/... instead. options := url.Values{ "host": []string{tempDir}, } pgURL := url.URL{ Scheme: "postgres", Host: ":123456", RawQuery: options.Encode(), } t.Logf("PGURL: %s", pgURL.String()) if err := trivialQuery(pgURL); err != nil { t.Fatal(err) } }
// TestPlainHTTPServer verifies that we can serve plain http and talk to it. // This is controlled by -cert="" func TestPlainHTTPServer(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{ // The default context uses embedded certs. Insecure: true, }) defer s.Stopper().Stop() ts := s.(*TestServer) httpClient, err := s.GetHTTPClient() if err != nil { t.Fatal(err) } httpURL := "http://" + ts.Ctx.HTTPAddr + healthPath if resp, err := httpClient.Get(httpURL); err != nil { t.Fatalf("error requesting health at %s: %s", httpURL, err) } else { defer resp.Body.Close() var data serverpb.HealthResponse if err := jsonpb.Unmarshal(resp.Body, &data); err != nil { t.Error(err) } } httpsURL := "https://" + ts.Ctx.HTTPAddr + healthPath if _, err := httpClient.Get(httpsURL); err == nil { t.Fatalf("unexpected success fetching %s", httpsURL) } }
// TestClientRunTransaction verifies some simple transaction isolation // semantics. func TestClientRunTransaction(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() dbCtx := client.DefaultDBContext() dbCtx.TxnRetryOptions.InitialBackoff = 1 * time.Millisecond db := createTestClientForUser(t, s.Stopper(), s.ServingAddr(), security.NodeUser, dbCtx) for _, commit := range []bool{true, false} { value := []byte("value") key := []byte(fmt.Sprintf("%s/key-%t", testUser, commit)) // Use snapshot isolation so non-transactional read can always push. err := db.Txn(func(txn *client.Txn) error { if err := txn.SetIsolation(enginepb.SNAPSHOT); err != nil { return err } // Put transactional value. if err := txn.Put(key, value); err != nil { return err } // Attempt to read outside of txn. if gr, err := db.Get(key); err != nil { return err } else if gr.Value != nil { return errors.Errorf("expected nil value; got %+v", gr.Value) } // Read within the transaction. if gr, err := txn.Get(key); err != nil { return err } else if gr.Value == nil || !bytes.Equal(gr.ValueBytes(), value) { return errors.Errorf("expected value %q; got %q", value, gr.ValueBytes()) } if !commit { return errors.Errorf("purposefully failing transaction") } return nil }) if commit != (err == nil) { t.Errorf("expected success? %t; got %s", commit, err) } else if !commit && !testutils.IsError(err, "purposefully failing transaction") { t.Errorf("unexpected failure with !commit: %s", err) } // Verify the value is now visible on commit == true, and not visible otherwise. gr, err := db.Get(key) if commit { if err != nil || gr.Value == nil || !bytes.Equal(gr.ValueBytes(), value) { t.Errorf("expected success reading value: %+v, %s", gr.Value, err) } } else { if err != nil || gr.Value != nil { t.Errorf("expected success and nil value: %+v, %s", gr.Value, err) } } } }
func TestAdminAPIDatabaseDoesNotExist(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() if err := apiGet(s, "databases/I_DO_NOT_EXIST", nil); !testutils.IsError(err, "database.+does not exist") { t.Fatalf("unexpected error: %s", err) } }
// Test that abruptly closing a pgwire connection releases all leases held by // that session. func TestPGWireConnectionCloseReleasesLeases(t *testing.T) { defer leaktest.AfterTest(t)() s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() url, cleanupConn := sqlutils.PGUrl(t, s.ServingAddr(), security.RootUser, "SetupServer") defer cleanupConn() conn, err := pq.Open(url.String()) if err != nil { t.Fatal(err) } ex := conn.(driver.Execer) if _, err := ex.Exec("CREATE DATABASE test", nil); err != nil { t.Fatal(err) } if _, err := ex.Exec("CREATE TABLE test.t (i INT PRIMARY KEY)", nil); err != nil { t.Fatal(err) } // Start a txn so leases are accumulated by queries. if _, err := ex.Exec("BEGIN", nil); err != nil { t.Fatal(err) } // Get a table lease. if _, err := ex.Exec("SELECT * FROM test.t", nil); err != nil { t.Fatal(err) } // Abruptly close the connection. if err := conn.Close(); err != nil { t.Fatal(err) } // Verify that there are no leases held. tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t") lm := s.LeaseManager().(*LeaseManager) // Looking for a table state validates that there used to be a lease on the // table. ts := lm.findTableState(tableDesc.ID, false /* create */) if ts == nil { t.Fatal("table state not found") } ts.mu.Lock() leases := ts.active.data ts.mu.Unlock() if len(leases) != 1 { t.Fatalf("expected one lease, found: %d", len(leases)) } // Wait for the lease to be released. util.SucceedsSoon(t, func() error { ts.mu.Lock() refcount := ts.active.data[0].refcount ts.mu.Unlock() if refcount != 0 { return errors.Errorf( "expected lease to be unused, found refcount: %d", refcount) } return nil }) }