func TestGossipPeerings(t *testing.T) { c := StartCluster(t) defer c.AssertAndStop(t) num := c.NumNodes() checkGossip(t, c, 20*time.Second, hasPeers(num)) // Restart the first node. log.Infof("restarting node 0") if err := c.Restart(0); err != nil { t.Fatal(err) } checkGossip(t, c, 20*time.Second, hasPeers(num)) // Restart another node (if there is one). rand.Seed(randutil.NewPseudoSeed()) var pickedNode int if num > 1 { pickedNode = rand.Intn(num-1) + 1 } log.Infof("restarting node %d", pickedNode) if err := c.Restart(pickedNode); err != nil { t.Fatal(err) } checkGossip(t, c, 20*time.Second, hasPeers(num)) }
func main() { // Seed the math/rand RNG from crypto/rand. rand.Seed(randutil.NewPseudoSeed()) if len(os.Args) == 1 { os.Args = append(os.Args, "help") } if err := cli.Run(os.Args[1:]); err != nil { fmt.Fprintf(os.Stderr, "Failed running command %q: %v\n", os.Args[1:], err) os.Exit(1) } }
func main() { // Instruct Go to use all CPU cores. // TODO(spencer): this may be excessive and result in worse // performance. We should keep an eye on this as we move to // production workloads. numCPU := runtime.NumCPU() runtime.GOMAXPROCS(numCPU) rand.Seed(randutil.NewPseudoSeed()) if log.V(1) { log.Infof("running using %d processor cores", numCPU) } if len(os.Args) == 1 { os.Args = append(os.Args, "help") } if err := cli.Run(os.Args[1:]); err != nil { fmt.Fprintf(os.Stderr, "Failed running command %q: %v\n", os.Args[1:], err) os.Exit(1) } }
func TestGossipPeerings(t *testing.T) { l := localcluster.Create(*numNodes, stopper) l.Start() defer l.AssertAndStop(t) checkGossip(t, l, 20*time.Second, hasPeers(len(l.Nodes))) // Restart the first node. log.Infof("restarting node 0") if err := l.Nodes[0].Restart(5); err != nil { t.Fatal(err) } checkGossip(t, l, 20*time.Second, hasPeers(len(l.Nodes))) // Restart another node. rand.Seed(randutil.NewPseudoSeed()) pickedNode := rand.Intn(len(l.Nodes)-1) + 1 log.Infof("restarting node %d", pickedNode) if err := l.Nodes[pickedNode].Restart(5); err != nil { t.Fatal(err) } checkGossip(t, l, 20*time.Second, hasPeers(len(l.Nodes))) }
func TestFailedReplicaChange(t *testing.T) { defer leaktest.AfterTest(t) defer func() { storage.TestingCommandFilter = nil }() seed := randutil.NewPseudoSeed() rand.Seed(seed) log.Infof("using seed %d", seed) mtc := startMultiTestContext(t, 2) defer mtc.Stop() var runFilter atomic.Value runFilter.Store(true) storage.TestingCommandFilter = func(args roachpb.Request, _ roachpb.Header) error { if runFilter.Load().(bool) { if et, ok := args.(*roachpb.EndTransactionRequest); ok && et.Commit { return util.Errorf("boom") } return nil } return nil } rng, err := mtc.stores[0].GetReplica(1) if err != nil { t.Fatal(err) } err = rng.ChangeReplicas(roachpb.ADD_REPLICA, roachpb.ReplicaDescriptor{ NodeID: mtc.stores[1].Ident.NodeID, StoreID: mtc.stores[1].Ident.StoreID, }, rng.Desc()) if err == nil || !strings.Contains(err.Error(), "boom") { t.Fatalf("did not get expected error: %s", err) } // After the aborted transaction, r.Desc was not updated. // TODO(bdarnell): expose and inspect raft's internal state. if len(rng.Desc().Replicas) != 1 { t.Fatalf("expected 1 replica, found %d", len(rng.Desc().Replicas)) } // The pending config change flag was cleared, so a subsequent attempt // can succeed. runFilter.Store(false) // The first failed replica change has laid down intents. Make sure those // are pushable by making the transaction abandoned. mtc.manualClock.Increment(10 * storage.DefaultHeartbeatInterval.Nanoseconds()) err = rng.ChangeReplicas(roachpb.ADD_REPLICA, roachpb.ReplicaDescriptor{ NodeID: mtc.stores[1].Ident.NodeID, StoreID: mtc.stores[1].Ident.StoreID, }, rng.Desc()) if err != nil { t.Fatal(err) } // Wait for the range to sync to both replicas (mainly so leaktest doesn't // complain about goroutines involved in the process). if err := util.IsTrueWithin(func() bool { for _, store := range mtc.stores { rang, err := store.GetReplica(1) if err != nil { return false } if len(rang.Desc().Replicas) == 1 { return false } } return true }, 1*time.Second); err != nil { t.Fatal(err) } }