func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) { By("getting list of nodes") nodeList, err := c.Nodes().List(labels.Everything(), fields.Everything()) expectNoError(err) var errors []error retries := maxRetries for { errors = []error{} for _, node := range nodeList.Items { // cadvisor is not accessible directly unless its port (4194 by default) is exposed. // Here, we access '/stats/' REST endpoint on the qinglet which polls cadvisor internally. statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name) By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource)) _, err = c.Get().AbsPath(statsResource).Timeout(timeout).Do().Raw() if err != nil { errors = append(errors, err) } } if len(errors) == 0 { return } if retries--; retries <= 0 { break } Logf("failed to retrieve qinglet stats -\n %v", errors) time.Sleep(sleepDuration) } Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors) }
func pickNode(c *client.Client) (string, error) { nodes, err := c.Nodes().List(labels.Everything(), fields.Everything()) if err != nil { return "", err } if len(nodes.Items) == 0 { return "", fmt.Errorf("no nodes exist, can't test node proxy") } return nodes.Items[0].Name, nil }
func getAllNodesInCluster(c *client.Client) ([]string, error) { nodeList, err := c.Nodes().List(labels.Everything(), fields.Everything()) if err != nil { return nil, err } result := []string{} for _, node := range nodeList.Items { result = append(result, node.Name) } return result, nil }
func getMinionPublicIps(c *client.Client) ([]string, error) { nodes, err := c.Nodes().List(labels.Everything(), fields.Everything()) if err != nil { return nil, err } ips := collectAddresses(nodes, api.NodeExternalIP) if len(ips) == 0 { ips = collectAddresses(nodes, api.NodeLegacyHostIP) } return ips, nil }
func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, nodeStore cache.Store) { goodCondition := api.NodeCondition{ Type: api.NodeReady, Status: api.ConditionTrue, Reason: fmt.Sprintf("schedulable condition"), LastHeartbeatTime: util.Time{time.Now()}, } badCondition := api.NodeCondition{ Type: api.NodeReady, Status: api.ConditionUnknown, Reason: fmt.Sprintf("unschedulable condition"), LastHeartbeatTime: util.Time{time.Now()}, } // Create a new schedulable node, since we're first going to apply // the unschedulable condition and verify that pods aren't scheduled. node := &api.Node{ ObjectMeta: api.ObjectMeta{Name: "node-scheduling-test-node"}, Spec: api.NodeSpec{Unschedulable: false}, Status: api.NodeStatus{ Capacity: api.ResourceList{ api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), }, Conditions: []api.NodeCondition{goodCondition}, }, } nodeKey, err := cache.MetaNamespaceKeyFunc(node) if err != nil { t.Fatalf("Couldn't retrieve key for node %v", node.Name) } // The test does the following for each nodeStateManager in this list: // 1. Create a new node // 2. Apply the makeUnSchedulable function // 3. Create a new pod // 4. Check that the pod doesn't get assigned to the node // 5. Apply the schedulable function // 6. Check that the pod *does* get assigned to the node // 7. Delete the pod and node. nodeModifications := []nodeStateManager{ // Test node.Spec.Unschedulable=true/false { makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) { n.Spec.Unschedulable = true if _, err := c.Nodes().Update(n); err != nil { t.Fatalf("Failed to update node with unschedulable=true: %v", err) } err = waitForReflection(s, nodeKey, func(node interface{}) bool { // An unschedulable node should get deleted from the store return node == nil }) if err != nil { t.Fatalf("Failed to observe reflected update for setting unschedulable=true: %v", err) } }, makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) { n.Spec.Unschedulable = false if _, err := c.Nodes().Update(n); err != nil { t.Fatalf("Failed to update node with unschedulable=false: %v", err) } err = waitForReflection(s, nodeKey, func(node interface{}) bool { return node != nil && node.(*api.Node).Spec.Unschedulable == false }) if err != nil { t.Fatalf("Failed to observe reflected update for setting unschedulable=false: %v", err) } }, }, // Test node.Status.Conditions=ConditionTrue/Unknown { makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) { n.Status = api.NodeStatus{ Capacity: api.ResourceList{ api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), }, Conditions: []api.NodeCondition{badCondition}, } if _, err = c.Nodes().UpdateStatus(n); err != nil { t.Fatalf("Failed to update node with bad status condition: %v", err) } err = waitForReflection(s, nodeKey, func(node interface{}) bool { return node != nil && node.(*api.Node).Status.Conditions[0].Status == api.ConditionUnknown }) if err != nil { t.Fatalf("Failed to observe reflected update for status condition update: %v", err) } }, makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) { n.Status = api.NodeStatus{ Capacity: api.ResourceList{ api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), }, Conditions: []api.NodeCondition{goodCondition}, } if _, err = c.Nodes().UpdateStatus(n); err != nil { t.Fatalf("Failed to update node with healthy status condition: %v", err) } waitForReflection(s, nodeKey, func(node interface{}) bool { return node != nil && node.(*api.Node).Status.Conditions[0].Status == api.ConditionTrue }) if err != nil { t.Fatalf("Failed to observe reflected update for status condition update: %v", err) } }, }, } for i, mod := range nodeModifications { unSchedNode, err := restClient.Nodes().Create(node) if err != nil { t.Fatalf("Failed to create node: %v", err) } // Apply the unschedulable modification to the node, and wait for the reflection mod.makeUnSchedulable(t, unSchedNode, nodeStore, restClient) // Create the new pod, note that this needs to happen post unschedulable // modification or we have a race in the test. pod := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "node-scheduling-test-pod"}, Spec: api.PodSpec{ Containers: []api.Container{{Name: "container", Image: "qingyuan/pause:go"}}, }, } myPod, err := restClient.Pods(api.NamespaceDefault).Create(pod) if err != nil { t.Fatalf("Failed to create pod: %v", err) } // There are no schedulable nodes - the pod shouldn't be scheduled. err = wait.Poll(time.Second, time.Second*10, podScheduled(restClient, myPod.Namespace, myPod.Name)) if err == nil { t.Errorf("Pod scheduled successfully on unschedulable nodes") } if err != wait.ErrWaitTimeout { t.Errorf("Test %d: failed while trying to confirm the pod does not get scheduled on the node: %v", i, err) } else { t.Logf("Test %d: Pod did not get scheduled on an unschedulable node", i) } // Apply the schedulable modification to the node, and wait for the reflection schedNode, err := restClient.Nodes().Get(unSchedNode.Name) if err != nil { t.Fatalf("Failed to get node: %v", err) } mod.makeSchedulable(t, schedNode, nodeStore, restClient) // Wait until the pod is scheduled. err = wait.Poll(time.Second, time.Second*10, podScheduled(restClient, myPod.Namespace, myPod.Name)) if err != nil { t.Errorf("Test %d: failed to schedule a pod: %v", i, err) } else { t.Logf("Test %d: Pod got scheduled on a schedulable node", i) } err = restClient.Pods(api.NamespaceDefault).Delete(myPod.Name, nil) if err != nil { t.Errorf("Failed to delete pod: %v", err) } err = restClient.Nodes().Delete(schedNode.Name) if err != nil { t.Errorf("Failed to delete node: %v", err) } } }
// This test suite can take a long time to run, so by default it is added to // the ginkgo.skip list (see driver.go). // To run this suite you must explicitly ask for it by setting the // -t/--test flag or ginkgo.focus flag. var _ = Describe("Load capacity", func() { var c *client.Client var nodeCount int var ns string var configs []*RCConfig BeforeEach(func() { var err error c, err = loadClient() expectNoError(err) nodes, err := c.Nodes().List(labels.Everything(), fields.Everything()) expectNoError(err) nodeCount = len(nodes.Items) Expect(nodeCount).NotTo(BeZero()) // Terminating a namespace (deleting the remaining objects from it - which // generally means events) can affect the current run. Thus we wait for all // terminating namespace to be finally deleted before starting this test. err = deleteTestingNS(c) expectNoError(err) nsForTesting, err := createTestingNS("load", c) ns = nsForTesting.Name expectNoError(err) expectNoError(resetMetrics(c))
// rebootNode takes node name on provider through the following steps using c: // - ensures the node is ready // - ensures all pods on the node are running and ready // - reboots the node (by executing rebootCmd over ssh) // - ensures the node reaches some non-ready state // - ensures the node becomes ready again // - ensures all pods on the node become running and ready again // // It returns true through result only if all of the steps pass; at the first // failed step, it will return false through result and not run the rest. func rebootNode(c *client.Client, provider, name, rebootCmd string, result chan bool) { // Setup ns := api.NamespaceDefault ps := newPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(client.PodHost, name)) defer ps.Stop() // Get the node initially. Logf("Getting %s", name) node, err := c.Nodes().Get(name) if err != nil { Logf("Couldn't get node %s", name) result <- false return } // Node sanity check: ensure it is "ready". if !waitForNodeToBeReady(c, name, nodeReadyInitialTimeout) { result <- false return } // Get all the pods on the node. pods := ps.List() podNames := make([]string, len(pods)) for i, p := range pods { podNames[i] = p.ObjectMeta.Name } Logf("Node %s has %d pods: %v", name, len(podNames), podNames) // For each pod, we do a sanity check to ensure it's running / healthy // now, as that's what we'll be checking later. if !checkPodsRunningReady(c, ns, podNames, podReadyBeforeTimeout) { result <- false return } // Reboot the node. if err = issueSSHCommand(node, provider, rebootCmd); err != nil { Logf("Error while issuing ssh command: %v", err) result <- false return } // Wait for some kind of "not ready" status. if !waitForNodeToBeNotReady(c, name, rebootNodeNotReadyTimeout) { result <- false return } // Wait for some kind of "ready" status. if !waitForNodeToBeReady(c, name, rebootNodeReadyAgainTimeout) { result <- false return } // Ensure all of the pods that we found on this node before the reboot are // running / healthy. if !checkPodsRunningReady(c, ns, podNames, rebootPodReadyAgainTimeout) { result <- false return } Logf("Reboot successful on node %s", name) result <- true }