func TestBuildInfo(t *testing.T) {
	if *numLocal == 0 {
		t.Skip("skipping since not run against local cluster")
	}
	l := cluster.CreateLocal(1, 1, *logDir, stopper) // intentionally using a local cluster
	l.Start()
	defer l.AssertAndStop(t)

	checkGossip(t, l, 20*time.Second, hasPeers(l.NumNodes()))

	util.SucceedsWithin(t, 10*time.Second, func() error {
		select {
		case <-stopper:
			t.Fatalf("interrupted")
			return nil
		case <-time.After(200 * time.Millisecond):
		}
		var r struct {
			BuildInfo map[string]string
		}
		if err := l.Nodes[0].GetJSON("", "/_status/details/local", &r); err != nil {
			return err
		}
		for _, key := range []string{"goVersion", "tag", "time", "dependencies"} {
			if val, ok := r.BuildInfo[key]; !ok {
				t.Errorf("build info missing for \"%s\"", key)
			} else if val == "" {
				t.Errorf("build info not set for \"%s\"", key)
			}
		}
		return nil
	})
}
示例#2
0
// StartCluster starts a cluster from the relevant flags. All test clusters
// should be created through this command since it sets up the logging in a
// unified way.
func StartCluster(t *testing.T) cluster.Cluster {
	if *numLocal > 0 {
		if *numRemote > 0 {
			t.Fatal("cannot both specify -num-local and -num-remote")
		}
		logDir := *logDir
		if logDir != "" {
			if _, _, fun := caller.Lookup(1); fun != "" {
				logDir = filepath.Join(logDir, fun)
			}
		}
		l := cluster.CreateLocal(*numLocal, *numStores, logDir, stopper)
		l.Start()
		checkRangeReplication(t, l, 20*time.Second)
		return l
	}
	if *numRemote == 0 {
		t.Fatal("need to either specify -num-local or -num-remote")
	}
	f := farmer(t)
	if err := f.Resize(*numRemote, 0); err != nil {
		t.Fatal(err)
	}
	if err := f.WaitReady(5 * time.Minute); err != nil {
		_ = f.Destroy()
		t.Fatalf("cluster not ready in time: %v", err)
	}
	checkRangeReplication(t, f, 20*time.Second)
	return f
}
示例#3
0
func TestRangeReplication(t *testing.T) {
	l := cluster.CreateLocal(*numNodes, stopper) // intentionally using local cluster
	l.Start()
	defer l.AssertAndStop(t)

	checkRangeReplication(t, l, 20*time.Second)
}
示例#4
0
// MustStartLocal skips tests not running against a local cluster.
func MustStartLocal(t *testing.T) *cluster.LocalCluster {
	if *numLocal == 0 {
		t.Skip("skipping since not run against local cluster")
	}
	l := cluster.CreateLocal(*numLocal, *numStores, *logDir, stopper)
	l.Start()
	return l
}
func TestRangeReplication(t *testing.T) {
	if *numLocal == 0 {
		t.Skip("skipping since not run against local cluster")
	}
	l := cluster.CreateLocal(*numLocal, *numStores, *logDir, stopper) // intentionally using local cluster
	l.Start()
	defer l.AssertAndStop(t)

	checkRangeReplication(t, l, 20*time.Second)
}
示例#6
0
// StartCluster starts a cluster from the relevant flags.
func StartCluster(t *testing.T) cluster.Cluster {
	if *numNodes > 0 {
		if len(*peers) > 0 {
			t.Fatal("cannot both specify -num and -peers")
		}
		l := cluster.CreateLocal(*numNodes, stopper)
		l.Start()
		checkRangeReplication(t, l, 20*time.Second)
		return l
	}
	if len(*peers) == 0 {
		t.Fatal("need to either specify -num or -peers")
	}
	return cluster.CreateRemote(strings.Split(*peers, ","))
}
// TestStatusServer starts up an N node cluster and tests the status server on
// each node.
func TestStatusServer(t *testing.T) {
	if *numLocal == 0 {
		t.Skip("skipping since not run against local cluster")
	}

	l := cluster.CreateLocal(*numLocal, *numStores, *logDir, stopper) // intentionally using local cluster
	l.ForceLogging = true
	l.Start()
	defer l.AssertAndStop(t)

	checkRangeReplication(t, l, 20*time.Second)

	// Get the ids for each node.
	idMap := make(map[string]string)
	for _, node := range l.Nodes {
		body := get(t, node, "/_status/details/local")
		var detail details
		if err := json.Unmarshal(body, &detail); err != nil {
			t.Fatalf("unable to parse details - %s", err)
		}
		idMap[node.ID] = detail.NodeID.String()
	}

	// Check local response for the every node.
	for _, node := range l.Nodes {
		checkNode(t, node, idMap[node.ID], "local", idMap[node.ID])
		get(t, node, "/_status/nodes")
		get(t, node, "/_status/stores")
	}

	// Proxy from the first node to the last node.
	firstNode := l.Nodes[0]
	lastNode := l.Nodes[len(l.Nodes)-1]
	firstID := idMap[firstNode.ID]
	lastID := idMap[lastNode.ID]
	checkNode(t, firstNode, firstID, lastID, lastID)

	// And from the last node to the first node.
	checkNode(t, lastNode, lastID, firstID, firstID)

	// And from the last node to the last node.
	checkNode(t, lastNode, lastID, lastID, lastID)
}
示例#8
0
// StartCluster starts a cluster from the relevant flags. All test clusters
// should be created through this command since it sets up the logging in a
// unified way.
func StartCluster(t *testing.T, cfg cluster.TestConfig) (c cluster.Cluster) {
	var completed bool
	defer func() {
		if !completed && c != nil {
			c.AssertAndStop(t)
		}
	}()
	if !*flagRemote {
		logDir := *flagLogDir
		if logDir != "" {
			logDir = func(d string) string {
				for i := 1; i < 100; i++ {
					_, _, fun := caller.Lookup(i)
					if testFuncRE.MatchString(fun) {
						return filepath.Join(d, fun)
					}
				}
				panic("no caller matching Test(.*) in stack trace")
			}(logDir)
		}
		l := cluster.CreateLocal(cfg, logDir, *flagPrivileged, stopper)
		l.Start()
		c = l
		checkRangeReplication(t, l, 20*time.Second)
		completed = true
		return l
	}
	f := farmer(t, "")
	c = f
	if err := f.Resize(*flagNodes); err != nil {
		t.Fatal(err)
	}
	if err := f.WaitReady(5 * time.Minute); err != nil {
		if destroyErr := f.Destroy(t); destroyErr != nil {
			t.Fatalf("could not destroy cluster after error %v: %v", err, destroyErr)
		}
		t.Fatalf("cluster not ready in time: %v", err)
	}
	checkRangeReplication(t, f, 20*time.Second)
	completed = true
	return f
}
示例#9
0
// StartCluster starts a cluster from the relevant flags.
func StartCluster(t *testing.T) cluster.Cluster {
	if *numLocal > 0 {
		if *numRemote > 0 {
			t.Fatal("cannot both specify -num-local and -num-remote")
		}
		l := cluster.CreateLocal(*numLocal, *logDir, stopper)
		l.Start()
		checkRangeReplication(t, l, 20*time.Second)
		return l
	}
	if *numRemote == 0 {
		t.Fatal("need to either specify -num-local or -num-remote")
	}
	f := farmer(t)
	if err := f.Destroy(); err != nil {
		t.Fatal(err)
	}
	if err := f.Add(*numRemote, 0); err != nil {
		t.Fatal(err)
	}
	return f
}
示例#10
0
// StartCluster starts a cluster from the relevant flags. All test clusters
// should be created through this command since it sets up the logging in a
// unified way.
func StartCluster(t *testing.T, cfg cluster.TestConfig) (c cluster.Cluster) {
	var completed bool
	defer func() {
		if !completed && c != nil {
			c.AssertAndStop(t)
		}
	}()
	if !*flagRemote {
		logDir := *flagLogDir
		if logDir != "" {
			_, _, fun := caller.Lookup(3)
			if !testFuncRE.MatchString(fun) {
				t.Fatalf("invalid caller %s; want TestX -> runTestOnConfigs -> func()", fun)
			}
			logDir = filepath.Join(logDir, fun)
		}
		l := cluster.CreateLocal(cfg, logDir, *flagPrivileged, stopper)
		l.Start()
		c = l
		checkRangeReplication(t, l, 20*time.Second)
		completed = true
		return l
	}
	f := farmer(t)
	c = f
	if err := f.Resize(*flagNodes, 0); err != nil {
		t.Fatal(err)
	}
	if err := f.WaitReady(5 * time.Minute); err != nil {
		_ = f.Destroy()
		t.Fatalf("cluster not ready in time: %v", err)
	}
	checkRangeReplication(t, f, 20*time.Second)
	completed = true
	return f
}
示例#11
0
// StartCluster starts a cluster from the relevant flags. All test clusters
// should be created through this command since it sets up the logging in a
// unified way.
func StartCluster(t *testing.T) cluster.Cluster {
	if !*flagRemote {
		logDir := *flagLogDir
		if logDir != "" {
			if _, _, fun := caller.Lookup(1); fun != "" {
				logDir = filepath.Join(logDir, fun)
			}
		}
		l := cluster.CreateLocal(*flagNodes, *flagStores, logDir, stopper)
		l.Start()
		checkRangeReplication(t, l, 20*time.Second)
		return l
	}
	f := farmer(t)
	if err := f.Resize(*flagNodes, 0); err != nil {
		t.Fatal(err)
	}
	if err := f.WaitReady(5 * time.Minute); err != nil {
		_ = f.Destroy()
		t.Fatalf("cluster not ready in time: %v", err)
	}
	checkRangeReplication(t, f, 20*time.Second)
	return f
}
示例#12
0
// StartCluster starts a cluster from the relevant flags.
func StartCluster(t *testing.T) cluster.Cluster {
	l := cluster.CreateLocal(*numNodes, stopper)
	l.Start()
	checkRangeReplication(t, l, 20*time.Second)
	return l
}