func postFreeze( c cluster.Cluster, freeze bool, timeout time.Duration, ) (serverpb.ClusterFreezeResponse, error) { httpClient := cluster.HTTPClient httpClient.Timeout = timeout var resp serverpb.ClusterFreezeResponse log.Infof(context.Background(), "requesting: freeze=%t, timeout=%s", freeze, timeout) cb := func(v proto.Message) { oldNum := resp.RangesAffected resp = *v.(*serverpb.ClusterFreezeResponse) if oldNum > resp.RangesAffected { resp.RangesAffected = oldNum } if (resp != serverpb.ClusterFreezeResponse{}) { log.Infof(context.Background(), "%+v", &resp) } } err := httputil.StreamJSON( httpClient, c.URL(0)+"/_admin/v1/cluster/freeze", &serverpb.ClusterFreezeRequest{Freeze: freeze}, &serverpb.ClusterFreezeResponse{}, cb, ) return resp, err }
func testBuildInfoInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) { checkGossip(t, c, 20*time.Second, hasPeers(c.NumNodes())) var details serverpb.DetailsResponse util.SucceedsSoon(t, func() error { select { case <-stopper: t.Fatalf("interrupted") default: } return httputil.GetJSON(cluster.HTTPClient, c.URL(0)+"/_status/details/local", &details) }) bi := details.BuildInfo testData := map[string]string{ "go_version": bi.GoVersion, "tag": bi.Tag, "time": bi.Time, "dependencies": bi.Dependencies, } for key, val := range testData { if val == "" { t.Errorf("build info not set for \"%s\"", key) } } }
func testStatusServerInner( ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig, ) { // Get the ids for each node. idMap := make(map[int]roachpb.NodeID) for i := 0; i < c.NumNodes(); i++ { var details serverpb.DetailsResponse if err := httputil.GetJSON(cluster.HTTPClient, c.URL(ctx, i)+"/_status/details/local", &details); err != nil { t.Fatal(err) } idMap[i] = details.NodeID } // Check local response for the every node. for i := 0; i < c.NumNodes(); i++ { id := idMap[i] checkNode(ctx, t, c, i, id, id, id) get(ctx, t, c.URL(ctx, i), "/_status/nodes") } // Proxy from the first node to the last node. firstNode := 0 lastNode := c.NumNodes() - 1 firstID := idMap[firstNode] lastID := idMap[lastNode] checkNode(ctx, t, c, firstNode, firstID, lastID, lastID) // And from the last node to the first node. checkNode(ctx, t, c, lastNode, lastID, firstID, firstID) // And from the last node to the last node. checkNode(ctx, t, c, lastNode, lastID, lastID, lastID) }
// CheckGossip fetches the gossip infoStore from each node and invokes the given // function. The test passes if the function returns 0 for every node, // retrying for up to the given duration. func CheckGossip( ctx context.Context, t testing.TB, c cluster.Cluster, d time.Duration, f CheckGossipFunc, ) { err := util.RetryForDuration(d, func() error { select { case <-stopper.ShouldStop(): t.Fatalf("interrupted") return nil case <-time.After(1 * time.Second): } var infoStatus gossip.InfoStatus for i := 0; i < c.NumNodes(); i++ { if err := httputil.GetJSON(cluster.HTTPClient, c.URL(ctx, i)+"/_status/gossip/local", &infoStatus); err != nil { return errors.Wrapf(err, "failed to get gossip status from node %d", i) } if err := f(infoStatus.Infos); err != nil { return errors.Errorf("node %d: %s", i, err) } } return nil }) if err != nil { t.Fatal(errors.Errorf("condition failed to evaluate within %s: %s", d, err)) } }
// checkGossip fetches the gossip infoStore from each node and invokes the given // function. The test passes if the function returns 0 for every node, // retrying for up to the given duration. func checkGossip(t *testing.T, c cluster.Cluster, d time.Duration, f checkGossipFunc) { err := util.RetryForDuration(d, func() error { select { case <-stopper: t.Fatalf("interrupted") return nil case <-time.After(1 * time.Second): } var infoStatus gossip.InfoStatus for i := 0; i < c.NumNodes(); i++ { if err := httputil.GetJSON(cluster.HTTPClient, c.URL(i)+"/_status/gossip/local", &infoStatus); err != nil { return err } if err := f(infoStatus.Infos); err != nil { return errors.Errorf("node %d: %s", i, err) } } return nil }) if err != nil { t.Fatal(errors.Errorf("condition failed to evaluate within %s: %s", d, err)) } }
// checkNode checks all the endpoints of the status server hosted by node and // requests info for the node with otherNodeID. That node could be the same // other node, the same node or "local". func checkNode( ctx context.Context, t *testing.T, c cluster.Cluster, i int, nodeID, otherNodeID, expectedNodeID roachpb.NodeID, ) { urlIDs := []string{otherNodeID.String()} if nodeID == otherNodeID { urlIDs = append(urlIDs, "local") } var details serverpb.DetailsResponse for _, urlID := range urlIDs { if err := httputil.GetJSON(cluster.HTTPClient, c.URL(ctx, i)+"/_status/details/"+urlID, &details); err != nil { t.Fatal(errors.Errorf("unable to parse details - %s", err)) } if details.NodeID != expectedNodeID { t.Fatal(errors.Errorf("%d calling %s: node ids don't match - expected %d, actual %d", nodeID, urlID, expectedNodeID, details.NodeID)) } get(ctx, t, c.URL(ctx, i), fmt.Sprintf("/_status/gossip/%s", urlID)) get(ctx, t, c.URL(ctx, i), fmt.Sprintf("/_status/nodes/%s", urlID)) get(ctx, t, c.URL(ctx, i), fmt.Sprintf("/_status/logfiles/%s", urlID)) get(ctx, t, c.URL(ctx, i), fmt.Sprintf("/_status/logs/%s", urlID)) get(ctx, t, c.URL(ctx, i), fmt.Sprintf("/_status/stacks/%s", urlID)) } }
func testAdminLossOfQuorumInner( ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig, ) { if c.NumNodes() < 2 { t.Logf("skipping test %s because given cluster has too few nodes", cfg.Name) return } // Get the ids for each node. nodeIDs := make([]roachpb.NodeID, c.NumNodes()) for i := 0; i < c.NumNodes(); i++ { var details serverpb.DetailsResponse if err := httputil.GetJSON(cluster.HTTPClient, c.URL(ctx, i)+"/_status/details/local", &details); err != nil { t.Fatalf("failed to get local details from node %d: %s", i, err) } nodeIDs[i] = details.NodeID } // Leave only the first node alive. for i := 1; i < c.NumNodes(); i++ { if err := c.Kill(ctx, i); err != nil { t.Fatal(err) } } // Retrieve node statuses. var nodes serverpb.NodesResponse if err := httputil.GetJSON(cluster.HTTPClient, c.URL(ctx, 0)+"/_status/nodes", &nodes); err != nil { t.Fatal(err) } for _, nodeID := range nodeIDs { var nodeStatus status.NodeStatus if err := httputil.GetJSON(cluster.HTTPClient, c.URL(ctx, 0)+"/_status/nodes/"+strconv.Itoa(int(nodeID)), &nodeStatus); err != nil { t.Fatal(err) } } // Retrieve time-series data. nowNanos := timeutil.Now().UnixNano() queryRequest := tspb.TimeSeriesQueryRequest{ StartNanos: nowNanos - 10*time.Second.Nanoseconds(), EndNanos: nowNanos, Queries: []tspb.Query{ {Name: "doesn't_matter", Sources: []string{}}, }, } var queryResponse tspb.TimeSeriesQueryResponse if err := httputil.PostJSON(cluster.HTTPClient, c.URL(ctx, 0)+"/ts/query", &queryRequest, &queryResponse); err != nil { t.Fatal(err) } // TODO(cdo): When we're able to issue SQL queries without a quorum, test all // admin endpoints that issue SQL queries here. }