Пример #1
0
func cutNetwork(t *testing.T, c cluster.Cluster, closer <-chan struct{}, partitions ...[]int) {
	defer func() {
		if errs := restoreNetwork(t, c); len(errs) > 0 {
			t.Fatalf("errors restoring the network: %+v", errs)
		}
	}()
	addrs, addrsToNode := mustGetHosts(t, c)
	ipPartitions := make([][]iptables.IP, 0, len(partitions))
	for _, partition := range partitions {
		ipPartition := make([]iptables.IP, 0, len(partition))
		for _, nodeIndex := range partition {
			ipPartition = append(ipPartition, addrs[nodeIndex])
		}
		ipPartitions = append(ipPartitions, ipPartition)
	}
	log.Warningf(context.TODO(), "partitioning: %v (%v)", partitions, ipPartitions)
	for host, cmds := range iptables.Rules(iptables.Bidirectional(ipPartitions...)) {
		for _, cmd := range cmds {
			if err := c.ExecRoot(addrsToNode[host], cmd); err != nil {
				t.Fatal(err)
			}
		}
	}
	<-closer
	log.Warningf(context.TODO(), "resolved all partitions")
}
Пример #2
0
func postFreeze(c cluster.Cluster, freeze bool, timeout time.Duration) (serverpb.ClusterFreezeResponse, error) {
	httpClient := cluster.HTTPClient
	httpClient.Timeout = timeout

	var resp serverpb.ClusterFreezeResponse
	log.Infof("requesting: freeze=%t, timeout=%s", freeze, timeout)
	cb := func(v proto.Message) {
		oldNum := resp.RangesAffected
		resp = *v.(*serverpb.ClusterFreezeResponse)
		if oldNum > resp.RangesAffected {
			resp.RangesAffected = oldNum
		}
		if (resp != serverpb.ClusterFreezeResponse{}) {
			log.Infof("%+v", &resp)
		}
	}
	err := util.StreamJSON(
		httpClient,
		c.URL(0)+"/_admin/v1/cluster/freeze",
		&serverpb.ClusterFreezeRequest{Freeze: freeze},
		&serverpb.ClusterFreezeResponse{},
		cb,
	)
	return resp, err
}
Пример #3
0
func testBuildInfoInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	checkGossip(t, c, 20*time.Second, hasPeers(c.NumNodes()))

	util.SucceedsSoon(t, func() error {
		select {
		case <-stopper:
			t.Fatalf("interrupted")
			return nil
		default:
		}
		var r struct {
			BuildInfo map[string]string
		}
		if err := getJSON(c.URL(0), "/_status/details/local", &r); err != nil {
			return err
		}
		for _, key := range []string{"goVersion", "tag", "time", "dependencies"} {
			if val, ok := r.BuildInfo[key]; !ok {
				t.Errorf("build info missing for \"%s\"", key)
			} else if val == "" {
				t.Errorf("build info not set for \"%s\"", key)
			}
		}
		return nil
	})
}
Пример #4
0
func testStatusServerInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	// Get the ids for each node.
	idMap := make(map[int]string)
	for i := 0; i < c.NumNodes(); i++ {
		var detail details
		if err := getJSON(c.URL(i), "/_status/details/local", &detail); err != nil {
			t.Fatal(err)
		}
		idMap[i] = detail.NodeID.String()
	}

	// Check local response for the every node.
	for i := 0; i < c.NumNodes(); i++ {
		checkNode(t, c, i, idMap[i], "local", idMap[i])
		get(t, c.URL(i), "/_status/nodes")
		get(t, c.URL(i), "/_status/stores")
	}

	// Proxy from the first node to the last node.
	firstNode := 0
	lastNode := c.NumNodes() - 1
	firstID := idMap[firstNode]
	lastID := idMap[lastNode]
	checkNode(t, c, firstNode, firstID, lastID, lastID)

	// And from the last node to the first node.
	checkNode(t, c, lastNode, lastID, firstID, firstID)

	// And from the last node to the last node.
	checkNode(t, c, lastNode, lastID, lastID, lastID)
}
// checkGossip fetches the gossip infoStore from each node and invokes the given
// function. The test passes if the function returns 0 for every node,
// retrying for up to the given duration.
func checkGossip(t *testing.T, c cluster.Cluster, d time.Duration,
	f checkGossipFunc) {
	util.SucceedsWithin(t, d, func() error {
		select {
		case <-stopper:
			t.Fatalf("interrupted")
			return nil
		case <-time.After(1 * time.Second):
		}

		for i := 0; i < c.NumNodes(); i++ {
			var m map[string]interface{}
			if err := getJSON(c.URL(i), "/_status/gossip/local", &m); err != nil {
				return err
			}
			infos, ok := m["infos"].(map[string]interface{})
			if !ok {
				return errors.New("no infos yet")
			}
			if err := f(infos); err != nil {
				return util.Errorf("node %d: %s", i, err)
			}
		}

		return nil
	})
}
Пример #6
0
func testBuildInfoInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	checkGossip(t, c, 20*time.Second, hasPeers(c.NumNodes()))

	var details server.DetailsResponse
	util.SucceedsSoon(t, func() error {
		select {
		case <-stopper:
			t.Fatalf("interrupted")
		default:
		}
		return util.GetJSON(cluster.HTTPClient, c.URL(0)+"/_status/details/local", &details)
	})

	bi := details.BuildInfo
	testData := map[string]string{
		"go_version":   bi.GoVersion,
		"tag":          bi.Tag,
		"time":         bi.Time,
		"dependencies": bi.Dependencies,
	}
	for key, val := range testData {
		if val == "" {
			t.Errorf("build info not set for \"%s\"", key)
		}
	}
}
Пример #7
0
func testStatusServerInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	// Get the ids for each node.
	idMap := make(map[int]roachpb.NodeID)
	for i := 0; i < c.NumNodes(); i++ {
		var details server.DetailsResponse
		if err := util.GetJSON(cluster.HTTPClient, c.URL(i)+"/_status/details/local", &details); err != nil {
			t.Fatal(err)
		}
		idMap[i] = details.NodeID
	}

	// Check local response for the every node.
	for i := 0; i < c.NumNodes(); i++ {
		id := idMap[i]
		checkNode(t, c, i, id, id, id)
		get(t, c.URL(i), "/_status/nodes")
	}

	// Proxy from the first node to the last node.
	firstNode := 0
	lastNode := c.NumNodes() - 1
	firstID := idMap[firstNode]
	lastID := idMap[lastNode]
	checkNode(t, c, firstNode, firstID, lastID, lastID)

	// And from the last node to the first node.
	checkNode(t, c, lastNode, lastID, firstID, firstID)

	// And from the last node to the last node.
	checkNode(t, c, lastNode, lastID, lastID, lastID)
}
Пример #8
0
// checkGossip fetches the gossip infoStore from each node and invokes the given
// function. The test passes if the function returns 0 for every node,
// retrying for up to the given duration.
func checkGossip(t *testing.T, c cluster.Cluster, d time.Duration, f checkGossipFunc) {
	err := util.RetryForDuration(d, func() error {
		select {
		case <-stopper:
			t.Fatalf("interrupted")
			return nil
		case <-time.After(1 * time.Second):
		}

		var infoStatus gossip.InfoStatus
		for i := 0; i < c.NumNodes(); i++ {
			if err := util.GetJSON(cluster.HTTPClient, c.URL(i)+"/_status/gossip/local", &infoStatus); err != nil {
				return err
			}
			if err := f(infoStatus.Infos); err != nil {
				return errors.Errorf("node %d: %s", i, err)
			}
		}

		return nil
	})
	if err != nil {
		t.Fatal(errors.Errorf("condition failed to evaluate within %s: %s", d, err))
	}
}
Пример #9
0
func postFreeze(c cluster.Cluster, freeze bool) (server.ClusterFreezeResponse, error) {
	httpClient := cluster.HTTPClient()
	httpClient.Timeout = 10 * time.Second

	var resp server.ClusterFreezeResponse
	err := postJSON(httpClient, c.URL(0), "/_admin/v1/cluster/freeze",
		&server.ClusterFreezeRequest{Freeze: freeze}, &resp)
	return resp, err
}
Пример #10
0
func restoreNetwork(t *testing.T, c cluster.Cluster) []error {
	var errs []error
	for i := 0; i < c.NumNodes(); i++ {
		for _, cmd := range iptables.Reset() {
			if err := c.ExecRoot(i, cmd); err != nil {
				errs = append(errs, err)
			}
		}
	}
	return errs
}
Пример #11
0
func mustGetHosts(t *testing.T, c cluster.Cluster) (
	[]iptables.IP, map[iptables.IP]int,
) {
	var addrs []iptables.IP
	addrsToNode := make(map[iptables.IP]int)
	for i := 0; i < c.NumNodes(); i++ {
		addr := iptables.IP(c.InternalIP(i).String())
		addrsToNode[addr] = i
		addrs = append(addrs, addr)
	}
	return addrs, addrsToNode
}
Пример #12
0
func postFreeze(c cluster.Cluster, freeze bool, timeout time.Duration) (server.ClusterFreezeResponse, error) {
	httpClient := cluster.HTTPClient
	httpClient.Timeout = timeout

	var resp server.ClusterFreezeResponse
	err := util.PostJSON(
		httpClient,
		c.URL(0)+"/_admin/v1/cluster/freeze",
		&server.ClusterFreezeRequest{Freeze: freeze},
		&resp,
	)
	return resp, err
}
Пример #13
0
func testClusterRecoveryInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	num := c.NumNodes()
	if num <= 0 {
		t.Fatalf("%d nodes in cluster", num)
	}

	// One client for each node.
	initBank(t, c.PGUrl(0))

	start := timeutil.Now()
	state := testState{
		t:        t,
		errChan:  make(chan error, num),
		teardown: make(chan struct{}),
		deadline: start.Add(cfg.Duration),
		clients:  make([]testClient, num),
	}

	for i := 0; i < num; i++ {
		state.clients[i].Lock()
		state.initClient(t, c, i)
		state.clients[i].Unlock()
		go transferMoneyLoop(i, &state, *numAccounts, *maxTransfer)
	}

	defer func() {
		<-state.teardown
	}()

	// Chaos monkey.
	rnd, seed := randutil.NewPseudoRand()
	log.Warningf("monkey starts (seed %d)", seed)
	pickNodes := func() []int {
		return rnd.Perm(num)[:rnd.Intn(num)+1]
	}
	go chaosMonkey(&state, c, true, pickNodes)

	waitClientsStop(num, &state, cfg.Stall)

	// Verify accounts.
	verifyAccounts(t, &state.clients[0])

	elapsed := time.Since(start)
	var count uint64
	counts := state.counts()
	for _, c := range counts {
		count += c
	}
	log.Infof("%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
Пример #14
0
func testPutInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	db, dbStopper := c.NewClient(t, 0)
	defer dbStopper.Stop()

	errs := make(chan error, c.NumNodes())
	start := timeutil.Now()
	deadline := start.Add(cfg.Duration)
	var count int64
	for i := 0; i < c.NumNodes(); i++ {
		go func() {
			r, _ := randutil.NewPseudoRand()
			value := randutil.RandBytes(r, 8192)

			for timeutil.Now().Before(deadline) {
				k := atomic.AddInt64(&count, 1)
				v := value[:r.Intn(len(value))]
				if err := db.Put(fmt.Sprintf("%08d", k), v); err != nil {
					errs <- err
					return
				}
			}
			errs <- nil
		}()
	}

	for i := 0; i < c.NumNodes(); {
		baseCount := atomic.LoadInt64(&count)
		select {
		case <-stopper:
			t.Fatalf("interrupted")
		case err := <-errs:
			if err != nil {
				t.Fatal(err)
			}
			i++
		case <-time.After(1 * time.Second):
			// Periodically print out progress so that we know the test is still
			// running.
			loadedCount := atomic.LoadInt64(&count)
			log.Infof(context.Background(), "%d (%d/s)", loadedCount, loadedCount-baseCount)
			c.Assert(t)
			cluster.Consistent(t, c)
		}
	}

	elapsed := timeutil.Since(start)
	log.Infof(context.Background(), "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
Пример #15
0
// BidirectionalPartitionNemesis is a nemesis which randomly severs the network
// symmetrically between two random groups of nodes. Partitioned and connected
// mode take alternating turns, with random durations of up to 15s.
func BidirectionalPartitionNemesis(t *testing.T, stop <-chan struct{}, c cluster.Cluster) {
	randSec := func() time.Duration { return time.Duration(rand.Int63n(15 * int64(time.Second))) }
	for {
		ch := make(chan struct{})
		go func() {
			select {
			case <-time.After(randSec()):
			case <-stop:
			}
			close(ch)
		}()
		cutNetwork(t, c, ch, randomBidirectionalPartition(c.NumNodes())...)
		select {
		case <-stop:
			return
		case <-time.After(randSec()):
		}
	}
}
Пример #16
0
func testNodeRestartInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	num := c.NumNodes()
	if num <= 0 {
		t.Fatalf("%d nodes in cluster", num)
	}

	// One client for each node.
	initBank(t, c.PGUrl(0))

	start := timeutil.Now()
	state := testState{
		t:        t,
		errChan:  make(chan error, 1),
		teardown: make(chan struct{}),
		deadline: start.Add(cfg.Duration),
		clients:  make([]testClient, 1),
	}

	client := &state.clients[0]
	client.Lock()
	client.db = makePGClient(t, c.PGUrl(num-1))
	client.Unlock()
	go transferMoneyLoop(0, &state, *numAccounts, *maxTransfer)

	defer func() {
		<-state.teardown
	}()

	// Chaos monkey.
	rnd, seed := randutil.NewPseudoRand()
	log.Warningf("monkey starts (seed %d)", seed)
	pickNodes := func() []int {
		return []int{rnd.Intn(num - 1)}
	}
	go chaosMonkey(&state, c, false, pickNodes)

	waitClientsStop(1, &state, cfg.Stall)

	// Verify accounts.
	verifyAccounts(t, client)

	elapsed := time.Since(start)
	count := atomic.LoadUint64(&client.count)
	log.Infof("%d %.1f/sec", count, float64(count)/elapsed.Seconds())
	kvClient, kvStopper := c.NewClient(t, num-1)
	defer kvStopper.Stop()
	if pErr := kvClient.CheckConsistency(keys.TableDataMin, keys.TableDataMax); pErr != nil {
		// TODO(.*): change back to t.Fatal after #5051.
		log.Error(pErr)
	}
}
Пример #17
0
func testRepairInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	testStopper := stop.NewStopper()
	dc := newDynamicClient(c, testStopper)
	testStopper.AddCloser(dc)
	defer testStopper.Stop()

	// Add some loads.
	for i := 0; i < c.NumNodes()*2; i++ {
		ID := i
		testStopper.RunWorker(func() {
			insertLoad(t, dc, ID)
		})
	}

	// TODO(bram): #5345 add repair mechanism.

	select {
	case <-stopper:
	case <-time.After(cfg.Duration):
	}
}
Пример #18
0
func testGossipPeeringsInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	num := c.NumNodes()

	deadline := timeutil.Now().Add(cfg.Duration)

	waitTime := longWaitTime
	if cfg.Duration < waitTime {
		waitTime = shortWaitTime
	}

	for timeutil.Now().Before(deadline) {
		checkGossip(t, c, waitTime, hasPeers(num))

		// Restart the first node.
		log.Infof(context.Background(), "restarting node 0")
		if err := c.Restart(0); err != nil {
			t.Fatal(err)
		}
		checkGossip(t, c, waitTime, hasPeers(num))

		// Restart another node (if there is one).
		var pickedNode int
		if num > 1 {
			pickedNode = rand.Intn(num-1) + 1
		}
		log.Infof(context.Background(), "restarting node %d", pickedNode)
		if err := c.Restart(pickedNode); err != nil {
			t.Fatal(err)
		}
		checkGossip(t, c, waitTime, hasPeers(num))
	}
}
Пример #19
0
func checkRangeReplication(t *testing.T, c cluster.Cluster, d time.Duration) {
	// Always talk to node 0.
	client, dbStopper := makeClient(t, c.ConnString(0))
	defer dbStopper.Stop()

	wantedReplicas := 3
	if c.NumNodes() < 3 {
		wantedReplicas = c.NumNodes()
	}

	log.Infof("waiting for first range to have %d replicas", wantedReplicas)

	util.SucceedsWithin(t, d, func() error {
		select {
		case <-stopper:
			t.Fatalf("interrupted")
			return nil
		case <-time.After(1 * time.Second):
		}

		foundReplicas, err := countRangeReplicas(client)
		if err != nil {
			return err
		}

		if log.V(1) {
			log.Infof("found %d replicas", foundReplicas)
		}
		if foundReplicas >= wantedReplicas {
			return nil
		}
		return fmt.Errorf("expected %d replicas, only found %d", wantedReplicas, foundReplicas)
	})
}
Пример #20
0
func cutNetwork(t *testing.T, c cluster.Cluster, closer <-chan struct{}, partitions ...[]int) {
	addrs, addrsToNode := mustGetHosts(t, c)
	ipPartitions := make([][]iptables.IP, 0, len(partitions))
	for _, partition := range partitions {
		ipPartition := make([]iptables.IP, 0, len(partition))
		for _, nodeIndex := range partition {
			ipPartition = append(ipPartition, addrs[nodeIndex])
		}
		ipPartitions = append(ipPartitions, ipPartition)
	}
	log.Warningf("partitioning: %v (%v)", partitions, ipPartitions)
	for host, cmds := range iptables.Rules(iptables.Bidirectional(ipPartitions...)) {
		for _, cmd := range cmds {
			if err := c.ExecRoot(addrsToNode[host], cmd); err != nil {
				t.Fatal(err)
			}
		}
	}
	<-closer
	for i := 0; i < c.NumNodes(); i++ {
		for _, cmd := range iptables.Reset() {
			if err := c.ExecRoot(i, cmd); err != nil {
				t.Fatal(err)
			}
		}
	}
	log.Warningf("resolved all partitions")
}
Пример #21
0
// BidirectionalPartitionNemesis is a nemesis which randomly severs the network
// symmetrically between two random groups of nodes. Partitioned and connected
// mode take alternating turns, with random durations of up to 15s.
func BidirectionalPartitionNemesis(t *testing.T, stop <-chan struct{}, c cluster.Cluster) {
	randSec := func() time.Duration { return time.Duration(rand.Int63n(15 * int64(time.Second))) }
	log.Infof(context.Background(), "cleaning up any previous rules")
	_ = restoreNetwork(t, c) // clean up any potential leftovers
	log.Infof(context.Background(), "starting partition nemesis")
	for {
		ch := make(chan struct{})
		go func() {
			select {
			case <-time.After(randSec()):
			case <-stop:
			}
			close(ch)
		}()
		cutNetwork(t, c, ch, randomBidirectionalPartition(c.NumNodes())...)
		select {
		case <-stop:
			return
		case <-time.After(randSec()):
		}
	}
}
Пример #22
0
// checkNode checks all the endpoints of the status server hosted by node and
// requests info for the node with otherNodeID. That node could be the same
// other node, the same node or "local".
func checkNode(t *testing.T, c cluster.Cluster, i int, nodeID, otherNodeID, expectedNodeID string) {
	var detail details
	if err := getJSON(c.URL(i), "/_status/details/"+otherNodeID, &detail); err != nil {
		t.Fatal(util.ErrorfSkipFrames(1, "unable to parse details - %s", err))
	}
	if actualNodeID := detail.NodeID.String(); actualNodeID != expectedNodeID {
		t.Fatal(util.ErrorfSkipFrames(1, "%s calling %s: node ids don't match - expected %s, actual %s", nodeID, otherNodeID, expectedNodeID, actualNodeID))
	}

	get(t, c.URL(i), fmt.Sprintf("/_status/gossip/%s", otherNodeID))
	get(t, c.URL(i), fmt.Sprintf("/_status/logfiles/%s", otherNodeID))
	get(t, c.URL(i), fmt.Sprintf("/_status/logs/%s", otherNodeID))
	get(t, c.URL(i), fmt.Sprintf("/_status/stacks/%s", otherNodeID))
	get(t, c.URL(i), fmt.Sprintf("/_status/nodes/%s", otherNodeID))
}
Пример #23
0
func checkRangeReplication(t *testing.T, c cluster.Cluster, d time.Duration) {
	if c.NumNodes() < 1 {
		// Looks silly, but we actually start zero-node clusters in the
		// reference tests.
		t.Log("replication test is a no-op for empty cluster")
		return
	}

	wantedReplicas := 3
	if c.NumNodes() < 3 {
		wantedReplicas = c.NumNodes()
	}

	log.Infof(context.Background(), "waiting for first range to have %d replicas", wantedReplicas)

	util.SucceedsSoon(t, func() error {
		// Reconnect on every iteration; gRPC will eagerly tank the connection
		// on transport errors. Always talk to node 0 because it's guaranteed
		// to exist.
		client, dbStopper := c.NewClient(t, 0)
		defer dbStopper.Stop()

		select {
		case <-stopper:
			t.Fatalf("interrupted")
			return nil
		case <-time.After(1 * time.Second):
		}

		foundReplicas, err := countRangeReplicas(client)
		if err != nil {
			return err
		}

		if log.V(1) {
			log.Infof(context.Background(), "found %d replicas", foundReplicas)
		}
		if foundReplicas >= wantedReplicas {
			return nil
		}
		return fmt.Errorf("expected %d replicas, only found %d", wantedReplicas, foundReplicas)
	})

	log.Infof(context.Background(), "found %d replicas", wantedReplicas)
}
Пример #24
0
// checkNode checks all the endpoints of the status server hosted by node and
// requests info for the node with otherNodeID. That node could be the same
// other node, the same node or "local".
func checkNode(t *testing.T, c cluster.Cluster, i int, nodeID, otherNodeID, expectedNodeID roachpb.NodeID) {
	urlIDs := []string{otherNodeID.String()}
	if nodeID == otherNodeID {
		urlIDs = append(urlIDs, "local")
	}
	var details server.DetailsResponse
	for _, urlID := range urlIDs {
		if err := util.GetJSON(cluster.HTTPClient, c.URL(i)+"/_status/details/"+urlID, &details); err != nil {
			t.Fatal(util.ErrorfSkipFrames(1, "unable to parse details - %s", err))
		}
		if details.NodeID != expectedNodeID {
			t.Fatal(util.ErrorfSkipFrames(1, "%d calling %s: node ids don't match - expected %d, actual %d", nodeID, urlID, expectedNodeID, details.NodeID))
		}

		get(t, c.URL(i), fmt.Sprintf("/_status/gossip/%s", urlID))
		get(t, c.URL(i), fmt.Sprintf("/_status/nodes/%s", urlID))
		get(t, c.URL(i), fmt.Sprintf("/_status/logfiles/%s", urlID))
		get(t, c.URL(i), fmt.Sprintf("/_status/logs/%s", urlID))
		get(t, c.URL(i), fmt.Sprintf("/_status/stacks/%s", urlID))
	}
}
Пример #25
0
func testNodeRestartInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	num := c.NumNodes()
	if minNum := 3; num < minNum {
		t.Skipf("need at least %d nodes, got %d", minNum, num)
	}

	// One client for each node.
	initBank(t, c.PGUrl(0))

	start := timeutil.Now()
	state := testState{
		t:        t,
		errChan:  make(chan error, 1),
		teardown: make(chan struct{}),
		deadline: start.Add(cfg.Duration),
		clients:  make([]testClient, 1),
	}

	client := &state.clients[0]
	client.Lock()
	client.db = makePGClient(t, c.PGUrl(num-1))
	client.Unlock()
	go transferMoneyLoop(0, &state, *numAccounts, *maxTransfer)

	defer func() {
		<-state.teardown
	}()

	// Chaos monkey.
	rnd, seed := randutil.NewPseudoRand()
	log.Warningf(context.Background(), "monkey starts (seed %d)", seed)
	pickNodes := func() []int {
		return []int{rnd.Intn(num - 1)}
	}
	go chaosMonkey(&state, c, false, pickNodes)

	waitClientsStop(1, &state, stall)

	// Verify accounts.
	verifyAccounts(t, client)

	elapsed := timeutil.Since(start)
	count := atomic.LoadUint64(&client.count)
	log.Infof(context.Background(), "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
Пример #26
0
func testGossipRestartInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	// This already replicates the first range (in the local setup).
	// The replication of the first range is important: as long as the
	// first range only exists on one node, that node can trivially
	// acquire the range lease. Once the range is replicated, however,
	// nodes must be able to discover each other over gossip before the
	// lease can be acquired.
	num := c.NumNodes()

	deadline := timeutil.Now().Add(cfg.Duration)

	waitTime := longWaitTime
	if cfg.Duration < waitTime {
		waitTime = shortWaitTime
	}

	for timeutil.Now().Before(deadline) {
		log.Infof(context.Background(), "waiting for initial gossip connections")
		checkGossip(t, c, waitTime, hasPeers(num))
		checkGossip(t, c, waitTime, hasClusterID)
		checkGossip(t, c, waitTime, hasSentinel)

		log.Infof(context.Background(), "killing all nodes")
		for i := 0; i < num; i++ {
			if err := c.Kill(i); err != nil {
				t.Fatal(err)
			}
		}

		log.Infof(context.Background(), "restarting all nodes")
		for i := 0; i < num; i++ {
			if err := c.Restart(i); err != nil {
				t.Fatal(err)
			}
		}

		log.Infof(context.Background(), "waiting for gossip to be connected")
		checkGossip(t, c, waitTime, hasPeers(num))
		checkGossip(t, c, waitTime, hasClusterID)
		checkGossip(t, c, waitTime, hasSentinel)

		for i := 0; i < num; i++ {
			db, dbStopper := c.NewClient(t, i)
			if i == 0 {
				if err := db.Del("count"); err != nil {
					t.Fatal(err)
				}
			}
			var kv client.KeyValue
			if err := db.Txn(func(txn *client.Txn) error {
				var err error
				kv, err = txn.Inc("count", 1)
				return err
			}); err != nil {
				t.Fatal(err)
			} else if v := kv.ValueInt(); v != int64(i+1) {
				t.Fatalf("unexpected value %d for write #%d (expected %d)", v, i, i+1)
			}
			dbStopper.Stop()
		}
	}
}
Пример #27
0
func testSingleKeyInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	num := c.NumNodes()

	// Initialize the value for our test key to zero.
	const key = "test-key"
	initDB, initDBStopper := c.NewClient(t, 0)
	defer initDBStopper.Stop()
	if err := initDB.Put(key, 0); err != nil {
		t.Fatal(err)
	}

	type result struct {
		err        error
		maxLatency time.Duration
	}

	resultCh := make(chan result, num)
	deadline := timeutil.Now().Add(cfg.Duration)
	var expected int64

	// Start up num workers each reading and writing the same
	// key. Each worker is configured to talk to a different node in the
	// cluster.
	for i := 0; i < num; i++ {
		db, dbStopper := c.NewClient(t, i)
		defer dbStopper.Stop()
		go func() {
			var r result
			for timeutil.Now().Before(deadline) {
				start := timeutil.Now()
				err := db.Txn(func(txn *client.Txn) error {
					minExp := atomic.LoadInt64(&expected)
					r, err := txn.Get(key)
					if err != nil {
						return err
					}
					b := txn.NewBatch()
					v := r.ValueInt()
					b.Put(key, v+1)
					err = txn.CommitInBatch(b)
					// Atomic updates after the fact mean that we should read
					// exp or larger (since concurrent writers might have
					// committed but not yet performed their atomic update).
					if err == nil && v < minExp {
						return util.Errorf("unexpected read: %d, expected >= %d", v, minExp)
					}
					return err
				})
				if err != nil {
					resultCh <- result{err: err}
					return
				}
				atomic.AddInt64(&expected, 1)
				latency := timeutil.Since(start)
				if r.maxLatency < latency {
					r.maxLatency = latency
				}
			}
			resultCh <- r
		}()
	}

	// Verify that none of the workers encountered an error.
	var results []result
	for len(results) < num {
		select {
		case <-stopper:
			t.Fatalf("interrupted")
		case r := <-resultCh:
			if r.err != nil {
				t.Fatal(r.err)
			}
			results = append(results, r)
		case <-time.After(1 * time.Second):
			// Periodically print out progress so that we know the test is still
			// running.
			log.Infof("%d", atomic.LoadInt64(&expected))
		}
	}

	// Verify the resulting value stored at the key is what we expect.
	r, err := initDB.Get(key)
	if err != nil {
		t.Fatal(err)
	}
	v := r.ValueInt()
	if expected != v {
		t.Fatalf("expected %d, but found %d", expected, v)
	}
	var maxLatency []time.Duration
	for _, r := range results {
		maxLatency = append(maxLatency, r.maxLatency)
	}
	log.Infof("%d increments: %s", v, maxLatency)
}
Пример #28
0
// initClient initializes the client talking to node "i".
// It requires that the caller hold the client's write lock.
func (state *testState) initClient(t *testing.T, c cluster.Cluster, i int) {
	state.clients[i].db = makePGClient(t, c.PGUrl(i))
}
Пример #29
0
// chaosMonkey picks a set of nodes and restarts them. If stopClients is set
// all the clients are locked before the nodes are restarted.
func chaosMonkey(state *testState, c cluster.Cluster, stopClients bool, pickNodes func() []int) {
	defer close(state.teardown)
	for curRound := uint64(1); !state.done(); curRound++ {
		atomic.StoreUint64(&state.monkeyIteration, curRound)
		select {
		case <-stopper:
			return
		default:
		}

		// Pick nodes to be restarted.
		nodes := pickNodes()

		if stopClients {
			// Prevent all clients from writing while nodes are being restarted.
			for i := 0; i < len(state.clients); i++ {
				state.clients[i].Lock()
			}
		}
		log.Infof("round %d: restarting nodes %v", curRound, nodes)
		for _, i := range nodes {
			// Two early exit conditions.
			select {
			case <-stopper:
				break
			default:
			}
			if state.done() {
				break
			}
			log.Infof("round %d: restarting %d", curRound, i)
			if err := c.Kill(i); err != nil {
				state.t.Error(err)
			}
			if err := c.Restart(i); err != nil {
				state.t.Error(err)
			}
			if stopClients {
				// Reinitialize the client talking to the restarted node.
				state.initClient(state.t, c, i)
			}
		}
		if stopClients {
			for i := 0; i < len(state.clients); i++ {
				state.clients[i].Unlock()
			}
		}

		preCount := state.counts()

		madeProgress := func() bool {
			c.Assert(state.t)
			newCounts := state.counts()
			for i := range newCounts {
				if newCounts[i] > preCount[i] {
					return true
				}
			}
			return false
		}

		// Sleep until at least one client is writing successfully.
		log.Warningf("round %d: monkey sleeping while cluster recovers...", curRound)
		for !state.done() && !madeProgress() {
			time.Sleep(time.Second)
		}
		log.Warningf("round %d: cluster recovered", curRound)
	}
}
Пример #30
0
func testRaftUpdateInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	minAffected := int64(server.ExpectedInitialRangeCount())

	const long = time.Minute
	const short = 10 * time.Second

	mustPost := func(freeze bool) serverpb.ClusterFreezeResponse {
		reply, err := postFreeze(c, freeze, long)
		if err != nil {
			t.Fatal(errors.Errorf("%v", err))
		}
		return reply
	}

	if reply := mustPost(false); reply.RangesAffected != 0 {
		t.Fatalf("expected initial unfreeze to affect no ranges, got %d", reply.RangesAffected)
	}

	if reply := mustPost(true); reply.RangesAffected < minAffected {
		t.Fatalf("expected >=%d frozen ranges, got %d", minAffected, reply.RangesAffected)
	}

	if reply := mustPost(true); reply.RangesAffected != 0 {
		t.Fatalf("expected second freeze to affect no ranges, got %d", reply.RangesAffected)
	}

	if reply := mustPost(false); reply.RangesAffected < minAffected {
		t.Fatalf("expected >=%d thawed ranges, got %d", minAffected, reply.RangesAffected)
	}

	num := c.NumNodes()
	if num < 3 {
		t.Skip("skipping remainder of test; needs at least 3 nodes")
	}

	// Kill the last node.
	if err := c.Kill(num - 1); err != nil {
		t.Fatal(err)
	}

	// Attempt to freeze should get stuck (since it does not get confirmation
	// of the last node receiving the freeze command).
	// Note that this is the freeze trigger stalling on the Replica, not the
	// Store-polling mechanism.
	acceptErrs := strings.Join([]string{
		"timed out waiting for Range",
		"Timeout exceeded while",
		"connection is closing",
		"deadline",
		// error returned via JSON when the server-side gRPC stream times out (due to
		// lack of new input). Unmarshaling that JSON fails with a message referencing
		// unknown fields, unfortunately in map order.
		"unknown field .*",
	}, "|")
	if reply, err := postFreeze(c, true, short); !testutils.IsError(err, acceptErrs) {
		t.Fatalf("expected timeout, got %v: %v", err, reply)
	}

	// Shut down the remaining nodes and restart them.
	for i := 0; i < num-1; i++ {
		if err := c.Kill(i); err != nil {
			t.Fatal(err)
		}
	}
	for i := 0; i < num; i++ {
		if err := c.Restart(i); err != nil {
			t.Fatal(err)
		}
	}

	// The cluster should now be fully operational (at least after waiting
	// a little bit) since each node tries to unfreeze everything when it
	// starts.
	//
	// TODO(tschottdorf): we unfreeze again in the loop since Raft reproposals
	// can re-freeze Ranges unexpectedly. This should be re-evaluated after
	// #6287 removes that problem.
	if err := util.RetryForDuration(time.Minute, func() error {
		if _, err := postFreeze(c, false, short); err != nil {
			return err
		}

		// TODO(tschottdorf): moving the client creation outside of the retry
		// loop will break the test with the following message:
		//
		//   client/rpc_sender.go:61: roachpb.Batch RPC failed as client
		//   connection was closed
		//
		// Perhaps the cluster updates the address too late after restarting
		// the node.
		db, dbStopper := c.NewClient(t, 0)
		defer dbStopper.Stop()

		_, err := db.Scan(keys.LocalMax, roachpb.KeyMax, 0)
		if err != nil {
			log.Info(err)
		}
		return err
	}); err != nil {
		t.Fatal(err)
	}

	// Unfreezing again should be a no-op.
	if reply, err := postFreeze(c, false, long); err != nil {
		t.Fatal(err)
	} else if reply.RangesAffected > 0 {
		t.Fatalf("still %d frozen ranges", reply.RangesAffected)
	}
}