Exemple #1
0
// recoverDiskSpace recovers disk space, filled by creating a large file, on a given node.
func recoverDiskSpace(c *client.Client, node *api.Node) {
	By(fmt.Sprintf("Recovering disk space on node %s", node.Name))
	cmd := "rm -f test.img"
	framework.ExpectNoError(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node))

	ood := framework.WaitForNodeToBe(c, node.Name, api.NodeOutOfDisk, false, nodeOODTimeOut)
	Expect(ood).To(BeTrue(), "Node %s's out of disk condition status did not change to false within %v", node.Name, nodeOODTimeOut)
}
Exemple #2
0
// fillDiskSpace fills the available disk space on a given node by creating a large file. The disk
// space on the node is filled in such a way that the available space after filling the disk is just
// below the lowDiskSpaceThreshold mark.
func fillDiskSpace(c *client.Client, node *api.Node) {
	avail, err := availSize(c, node)
	framework.ExpectNoError(err, "Node %s: couldn't obtain available disk size %v", node.Name, err)

	fillSize := (avail - lowDiskSpaceThreshold + (100 * mb))

	framework.Logf("Node %s: disk space available %d bytes", node.Name, avail)
	By(fmt.Sprintf("Node %s: creating a file of size %d bytes to fill the available disk space", node.Name, fillSize))

	cmd := fmt.Sprintf("fallocate -l %d test.img", fillSize)
	framework.ExpectNoError(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node))

	ood := framework.WaitForNodeToBe(c, node.Name, api.NodeOutOfDisk, true, nodeOODTimeOut)
	Expect(ood).To(BeTrue(), "Node %s did not run out of disk within %v", node.Name, nodeOODTimeOut)

	avail, err = availSize(c, node)
	framework.Logf("Node %s: disk space available %d bytes", node.Name, avail)
	Expect(avail < lowDiskSpaceThreshold).To(BeTrue())
}
Exemple #3
0
// rebootNode takes node name on provider through the following steps using c:
//  - ensures the node is ready
//  - ensures all pods on the node are running and ready
//  - reboots the node (by executing rebootCmd over ssh)
//  - ensures the node reaches some non-ready state
//  - ensures the node becomes ready again
//  - ensures all pods on the node become running and ready again
//
// It returns true through result only if all of the steps pass; at the first
// failed step, it will return false through result and not run the rest.
func rebootNode(c *client.Client, provider, name, rebootCmd string) bool {
	// Setup
	ns := api.NamespaceSystem
	ps := framework.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name))
	defer ps.Stop()

	// Get the node initially.
	framework.Logf("Getting %s", name)
	node, err := c.Nodes().Get(name)
	if err != nil {
		framework.Logf("Couldn't get node %s", name)
		return false
	}

	// Node sanity check: ensure it is "ready".
	if !framework.WaitForNodeToBeReady(c, name, framework.NodeReadyInitialTimeout) {
		return false
	}

	// Get all the pods on the node that don't have liveness probe set.
	// Liveness probe may cause restart of a pod during node reboot, and the pod may not be running.
	pods := ps.List()
	podNames := []string{}
	for _, p := range pods {
		probe := false
		for _, c := range p.Spec.Containers {
			if c.LivenessProbe != nil {
				probe = true
				break
			}
		}
		if !probe {
			podNames = append(podNames, p.ObjectMeta.Name)
		}
	}
	framework.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames)

	// For each pod, we do a sanity check to ensure it's running / healthy
	// or succeeded now, as that's what we'll be checking later.
	if !framework.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, framework.PodReadyBeforeTimeout) {
		printStatusAndLogsForNotReadyPods(c, ns, podNames, pods)
		return false
	}

	// Reboot the node.
	if err = framework.IssueSSHCommand(rebootCmd, provider, node); err != nil {
		framework.Logf("Error while issuing ssh command: %v", err)
		return false
	}

	// Wait for some kind of "not ready" status.
	if !framework.WaitForNodeToBeNotReady(c, name, rebootNodeNotReadyTimeout) {
		return false
	}

	// Wait for some kind of "ready" status.
	if !framework.WaitForNodeToBeReady(c, name, rebootNodeReadyAgainTimeout) {
		return false
	}

	// Ensure all of the pods that we found on this node before the reboot are
	// running / healthy, or succeeded.
	if !framework.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, rebootPodReadyAgainTimeout) {
		newPods := ps.List()
		printStatusAndLogsForNotReadyPods(c, ns, podNames, newPods)
		return false
	}

	framework.Logf("Reboot successful on node %s", name)
	return true
}
					break
				}
			}
			Expect(node).NotTo(BeNil())
			By("Generate event list options")
			selector := fields.Set{
				"involvedObject.kind":      "Node",
				"involvedObject.name":      node.Name,
				"involvedObject.namespace": api.NamespaceAll,
				"source":                   source,
			}.AsSelector()
			eventListOptions = api.ListOptions{FieldSelector: selector}
			By("Create the test log file")
			tmpDir = "/tmp/" + name
			cmd := fmt.Sprintf("mkdir %s; > %s/%s", tmpDir, tmpDir, logFile)
			Expect(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)).To(Succeed())
			By("Create config map for the node problem detector")
			_, err = c.ConfigMaps(ns).Create(&api.ConfigMap{
				ObjectMeta: api.ObjectMeta{
					Name: configName,
				},
				Data: map[string]string{configFile: config},
			})
			Expect(err).NotTo(HaveOccurred())
			By("Create the node problem detector")
			_, err = c.Pods(ns).Create(&api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name: name,
				},
				Spec: api.PodSpec{
					NodeName:        node.Name,
					break
				}
			}
			Expect(node).NotTo(BeNil())
			By("Generate event list options")
			selector := fields.Set{
				"involvedObject.kind":      "Node",
				"involvedObject.name":      node.Name,
				"involvedObject.namespace": v1.NamespaceAll,
				"source":                   source,
			}.AsSelector().String()
			eventListOptions = v1.ListOptions{FieldSelector: selector}
			By("Create the test log file")
			tmpDir = "/tmp/" + name
			cmd := fmt.Sprintf("mkdir %s; > %s/%s", tmpDir, tmpDir, logFile)
			Expect(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)).To(Succeed())
			By("Create config map for the node problem detector")
			_, err = c.Core().ConfigMaps(ns).Create(&v1.ConfigMap{
				ObjectMeta: metav1.ObjectMeta{
					Name: configName,
				},
				Data: map[string]string{configFile: config},
			})
			Expect(err).NotTo(HaveOccurred())
			By("Create the node problem detector")
			_, err = c.Core().Pods(ns).Create(&v1.Pod{
				ObjectMeta: metav1.ObjectMeta{
					Name: name,
				},
				Spec: v1.PodSpec{
					NodeName:        node.Name,