// StartCluster starts a cluster from the relevant flags. All test clusters // should be created through this command since it sets up the logging in a // unified way. func StartCluster(t *testing.T, cfg cluster.TestConfig) (c cluster.Cluster) { var completed bool defer func() { if !completed && c != nil { c.AssertAndStop(t) } }() if !*flagRemote { logDir := *flagLogDir if logDir != "" { logDir = func(d string) string { for i := 1; i < 100; i++ { _, _, fun := caller.Lookup(i) if testFuncRE.MatchString(fun) { return filepath.Join(d, fun) } } panic("no caller matching Test(.*) in stack trace") }(logDir) } l := cluster.CreateLocal(cfg, logDir, *flagPrivileged, stopper) l.Start() c = l checkRangeReplication(t, l, 20*time.Second) completed = true return l } f := farmer(t, "") c = f if err := f.Resize(*flagNodes); err != nil { t.Fatal(err) } if err := f.WaitReady(5 * time.Minute); err != nil { if destroyErr := f.Destroy(t); destroyErr != nil { t.Fatalf("could not destroy cluster after error %v: %v", err, destroyErr) } t.Fatalf("cluster not ready in time: %v", err) } checkRangeReplication(t, f, 20*time.Second) completed = true return f }
// StartCluster starts a cluster from the relevant flags. All test clusters // should be created through this command since it sets up the logging in a // unified way. func StartCluster(t *testing.T, cfg cluster.TestConfig) (c cluster.Cluster) { var completed bool defer func() { if !completed && c != nil { c.AssertAndStop(t) } }() if *flagRemote { f := farmer(t, "") c = f if err := f.Resize(*flagNodes); err != nil { t.Fatal(err) } if err := f.WaitReady(5 * time.Minute); err != nil { if destroyErr := f.Destroy(t); destroyErr != nil { t.Fatalf("could not destroy cluster after error %s: %s", err, destroyErr) } t.Fatalf("cluster not ready in time: %s", err) } } else { logDir := *flagLogDir if logDir != "" { logDir = func(d string) string { for i := 1; i < 100; i++ { _, _, fun := caller.Lookup(i) if testFuncRE.MatchString(fun) { return filepath.Join(d, fun) } } panic("no caller matching Test(.*) in stack trace") }(logDir) } l := cluster.CreateLocal(cfg, logDir, *flagPrivileged, stopper) l.Start() c = l } wantedReplicas := 3 if numNodes := c.NumNodes(); numNodes < wantedReplicas { wantedReplicas = numNodes } // Looks silly, but we actually start zero-node clusters in the // reference tests. if wantedReplicas > 0 { ctx := context.TODO() log.Infof(ctx, "waiting for first range to have %d replicas", wantedReplicas) util.SucceedsSoon(t, func() error { select { case <-stopper: t.Fatal("interrupted") case <-time.After(time.Second): } // Reconnect on every iteration; gRPC will eagerly tank the connection // on transport errors. Always talk to node 0 because it's guaranteed // to exist. client, dbStopper := c.NewClient(t, 0) defer dbStopper.Stop() ctxWithTimeout, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() var desc roachpb.RangeDescriptor if err := client.GetProto(ctxWithTimeout, keys.RangeDescriptorKey(roachpb.RKeyMin), &desc); err != nil { return err } foundReplicas := len(desc.Replicas) if log.V(1) { log.Infof(ctxWithTimeout, "found %d replicas", foundReplicas) } if foundReplicas < wantedReplicas { return errors.Errorf("expected %d replicas, only found %d", wantedReplicas, foundReplicas) } return nil }) } completed = true return c }
// StartCluster starts a cluster from the relevant flags. All test clusters // should be created through this command since it sets up the logging in a // unified way. func StartCluster(ctx context.Context, t *testing.T, cfg cluster.TestConfig) (c cluster.Cluster) { var completed bool defer func() { if !completed && c != nil { c.AssertAndStop(ctx, t) } }() if *flagRemote { f := MakeFarmer(t, "", stopper) c = f if err := f.Resize(*flagNodes); err != nil { t.Fatal(err) } if err := f.WaitReady(5 * time.Minute); err != nil { if destroyErr := f.Destroy(t); destroyErr != nil { t.Fatalf("could not destroy cluster after error %s: %s", err, destroyErr) } t.Fatalf("cluster not ready in time: %s", err) } } else { logDir := *flagLogDir if logDir != "" { logDir = func(d string) string { for i := 1; i < 100; i++ { _, _, fun := caller.Lookup(i) if testFuncRE.MatchString(fun) { return filepath.Join(d, fun) } } panic("no caller matching Test(.*) in stack trace") }(logDir) } l := cluster.CreateLocal(ctx, cfg, logDir, *flagPrivileged, stopper) l.Start(ctx) c = l } wantedReplicas := 3 if numNodes := c.NumNodes(); numNodes < wantedReplicas { wantedReplicas = numNodes } // Looks silly, but we actually start zero-node clusters in the // reference tests. if wantedReplicas > 0 { log.Infof(ctx, "waiting for first range to have %d replicas", wantedReplicas) testutils.SucceedsSoon(t, func() error { select { case <-stopper.ShouldStop(): t.Fatal("interrupted") case <-time.After(time.Second): } // Reconnect on every iteration; gRPC will eagerly tank the connection // on transport errors. Always talk to node 0 because it's guaranteed // to exist. client, err := c.NewClient(ctx, 0) if err != nil { t.Fatal(err) } var desc roachpb.RangeDescriptor if err := client.GetProto(ctx, keys.RangeDescriptorKey(roachpb.RKeyMin), &desc); err != nil { return err } foundReplicas := len(desc.Replicas) if log.V(1) { log.Infof(ctx, "found %d replicas", foundReplicas) } if foundReplicas < wantedReplicas { return errors.Errorf("expected %d replicas, only found %d", wantedReplicas, foundReplicas) } return nil }) } // Ensure that all nodes are serving SQL by making sure a simple // read-only query succeeds. for i := 0; i < c.NumNodes(); i++ { testutils.SucceedsSoon(t, func() error { db, err := gosql.Open("postgres", c.PGUrl(ctx, i)) if err != nil { return err } if _, err := db.Exec("SHOW DATABASES;"); err != nil { return err } return nil }) } completed = true return c }