func benchmarkMultinodeCockroach(b *testing.B, f func(b *testing.B, db *gosql.DB)) { defer tracing.Disable()() tc := testcluster.StartTestCluster(b, 3, testcluster.ClusterArgs{ ReplicationMode: testcluster.ReplicationFull, ServerArgs: base.TestServerArgs{ UseDatabase: "bench", }, }) if _, err := tc.Conns[0].Exec(`CREATE DATABASE bench`); err != nil { b.Fatal(err) } if err := tc.WaitForFullReplication(); err != nil { b.Fatal(err) } defer tc.Stopper().Stop() f(b, tc.Conns[0]) }
func TestGossipFirstRange(t *testing.T) { defer leaktest.AfterTest(t)() tc := testcluster.StartTestCluster(t, 3, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, }) defer tc.Stopper().Stop() errors := make(chan error) descs := make(chan *roachpb.RangeDescriptor) tc.Servers[0].Gossip().RegisterCallback(gossip.KeyFirstRangeDescriptor, func(_ string, content roachpb.Value) { var desc roachpb.RangeDescriptor if err := content.GetProto(&desc); err != nil { errors <- err } else { descs <- &desc } }) // Wait for the specified descriptor to be gossiped for the first range. We // loop because the timing of replica addition and lease transfer can cause // extra gossiping of the first range. waitForGossip := func(desc *roachpb.RangeDescriptor) { for { select { case err := <-errors: t.Fatal(err) case gossiped := <-descs: if reflect.DeepEqual(desc, gossiped) { return } log.Infof(context.TODO(), "expected\n%+v\nbut found\n%+v", desc, gossiped) } } } // Expect an initial callback of the first range descriptor. select { case err := <-errors: t.Fatal(err) case <-descs: } // Add two replicas. The first range descriptor should be gossiped after each // addition. var desc *roachpb.RangeDescriptor firstRangeKey := keys.MinKey for i := 1; i <= 2; i++ { var err error if desc, err = tc.AddReplicas(firstRangeKey, tc.Target(i)); err != nil { t.Fatal(err) } waitForGossip(desc) } // Transfer the lease to a new node. This should cause the first range to be // gossiped again. if err := tc.TransferRangeLease(desc, tc.Target(1)); err != nil { t.Fatal(err) } waitForGossip(desc) // Remove a non-lease holder replica. desc, err := tc.RemoveReplicas(firstRangeKey, tc.Target(0)) if err != nil { t.Fatal(err) } waitForGossip(desc) // TODO(peter): Re-enable or remove when we've resolved the discussion // about removing the lease-holder replica. See #7872. // // Remove the lease holder replica. // leaseHolder, err := tc.FindRangeLeaseHolder(desc, nil) // desc, err = tc.RemoveReplicas(firstRangeKey, leaseHolder) // if err != nil { // t.Fatal(err) // } // select { // case err := <-errors: // t.Fatal(err) // case gossiped := <-descs: // if !reflect.DeepEqual(desc, gossiped) { // t.Fatalf("expected\n%+v\nbut found\n%+v", desc, gossiped) // } // } }
func TestAdminAPITableStats(t *testing.T) { defer leaktest.AfterTest(t)() const nodeCount = 3 tc := testcluster.StartTestCluster(t, nodeCount, base.TestClusterArgs{ ReplicationMode: base.ReplicationAuto, ServerArgs: base.TestServerArgs{ ScanInterval: time.Millisecond, ScanMaxIdleTime: time.Millisecond, }, }) defer tc.Stopper().Stop() if err := tc.WaitForFullReplication(); err != nil { t.Fatal(err) } server0 := tc.Server(0) // Create clients (SQL, HTTP) connected to server 0. db := tc.ServerConn(0) client, err := server0.GetHTTPClient() if err != nil { t.Fatal(err) } client.Timeout = base.NetworkTimeout * 3 // Make a single table and insert some data. The database and test have // names which require escaping, in order to verify that database and // table names are being handled correctly. if _, err := db.Exec(`CREATE DATABASE "test test"`); err != nil { t.Fatal(err) } if _, err := db.Exec(` CREATE TABLE "test test"."foo foo" ( id INT PRIMARY KEY, val STRING )`, ); err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { if _, err := db.Exec(` INSERT INTO "test test"."foo foo" VALUES( $1, $2 )`, i, "test", ); err != nil { t.Fatal(err) } } url := server0.AdminURL() + "/_admin/v1/databases/test test/tables/foo foo/stats" var tsResponse serverpb.TableStatsResponse // The new SQL table may not yet have split into its own range. Wait for // this to occur, and for full replication. util.SucceedsSoon(t, func() error { if err := util.GetJSON(client, url, &tsResponse); err != nil { return err } if tsResponse.RangeCount != 1 { return errors.Errorf("Table range not yet separated.") } if tsResponse.NodeCount != nodeCount { return errors.Errorf("Table range not yet replicated to %d nodes.", 3) } if a, e := tsResponse.ReplicaCount, int64(nodeCount); a != e { return errors.Errorf("expected %d replicas, found %d", e, a) } return nil }) // These two conditions *must* be true, given that the above // SucceedsSoon has succeeded. if a, e := tsResponse.Stats.KeyCount, int64(20); a < e { t.Fatalf("expected at least 20 total keys, found %d", a) } if len(tsResponse.MissingNodes) > 0 { t.Fatalf("expected no missing nodes, found %v", tsResponse.MissingNodes) } // Kill a node, ensure it shows up in MissingNodes and that ReplicaCount is // lower. tc.StopServer(1) if err := util.GetJSON(client, url, &tsResponse); err != nil { t.Fatal(err) } if a, e := tsResponse.NodeCount, int64(nodeCount); a != e { t.Errorf("expected %d nodes, found %d", e, a) } if a, e := tsResponse.RangeCount, int64(1); a != e { t.Errorf("expected %d ranges, found %d", e, a) } if a, e := tsResponse.ReplicaCount, int64((nodeCount/2)+1); a != e { t.Errorf("expected %d replicas, found %d", e, a) } if a, e := tsResponse.Stats.KeyCount, int64(10); a < e { t.Errorf("expected at least 10 total keys, found %d", a) } if len(tsResponse.MissingNodes) != 1 { t.Errorf("expected one missing node, found %v", tsResponse.MissingNodes) } // Call TableStats with a very low timeout. This tests that fan-out queries // do not leak goroutines if the calling context is abandoned. // Interestingly, the call can actually sometimes succeed, despite the small // timeout; however, in aggregate (or in stress tests) this will suffice for // detecting leaks. client.Timeout = 1 * time.Nanosecond _ = util.GetJSON(client, url, &tsResponse) }