Esempio n. 1
0
func masterExec(cmd string) {
	result, err := framework.SSH(cmd, framework.GetMasterHost()+":22", framework.TestContext.Provider)
	Expect(err).NotTo(HaveOccurred())
	if result.Code != 0 {
		framework.LogSSHResult(result)
		framework.Failf("master exec command returned non-zero")
	}
}
// testVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
	nodeIP, err := framework.GetHostExternalAddress(c, clientPod)
	Expect(err).NotTo(HaveOccurred())
	nodeIP = nodeIP + ":22"

	By("Expecting the volume mount to be found.")
	result, err := framework.SSH("mount | grep "+string(clientPod.UID), nodeIP, framework.TestContext.Provider)
	Expect(err).NotTo(HaveOccurred())
	Expect(result.Code).To(BeZero())

	By("Restarting the kubelet.")
	kubeletCommand(kStop, c, clientPod)
	deletePod(f, c, clientPod.Namespace, clientPod)
	kubeletCommand(kStart, c, clientPod)

	By("Expecting the volume mount not to be found.")
	result, err = framework.SSH("mount| grep "+string(clientPod.UID), nodeIP, framework.TestContext.Provider)
	Expect(err).NotTo(HaveOccurred())
	Expect(result.Code).NotTo(BeZero())

	framework.Logf("Volume mount detected on pod and written file is readable post-restart.")
}
// kubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod.
// Allowed kubeltOps are `kStart`, `kStop`, and `kRestart`
func kubeletCommand(kOp kubeletOpt, c clientset.Interface, pod *v1.Pod) {
	nodeIP, err := framework.GetHostExternalAddress(c, pod)
	Expect(err).NotTo(HaveOccurred())
	nodeIP = nodeIP + ":22"
	sshResult, err := framework.SSH("sudo /etc/init.d/kubelet "+string(kOp), nodeIP, framework.TestContext.Provider)
	Expect(err).NotTo(HaveOccurred())
	framework.LogSSHResult(sshResult)

	// On restart, waiting for node NotReady prevents a race condition where the node takes a few moments to leave the
	// Ready state which in turn short circuits WaitForNodeToBeReady()
	if kOp == kStop || kOp == kRestart {
		if ok := framework.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
			framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
		}
	}
	if kOp == kStart || kOp == kRestart {
		if ok := framework.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
			framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
		}
	}
}
// nodeExec execs the given cmd on node via SSH. Note that the nodeName is an sshable name,
// eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across
// cloud providers since it involves ssh.
func nodeExec(nodeName, cmd string) (framework.SSHResult, error) {
	result, err := framework.SSH(cmd, fmt.Sprintf("%v:%v", nodeName, sshPort), framework.TestContext.Provider)
	Expect(err).NotTo(HaveOccurred())
	return result, err
}
Esempio n. 5
0
		for i, testCase := range testCases {
			// Only run the first testcase against max 100 nodes. Run
			// the rest against the first node we find only, since
			// they're basically testing SSH semantics (and we don't
			// need to do that against each host in the cluster).
			nodes := len(hosts)
			if i > 0 {
				nodes = 1
			} else if nodes > maxNodes {
				nodes = maxNodes
			}
			testhosts := hosts[:nodes]
			By(fmt.Sprintf("SSH'ing to %d nodes and running %s", len(testhosts), testCase.cmd))

			for _, host := range testhosts {
				result, err := framework.SSH(testCase.cmd, host, framework.TestContext.Provider)
				stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr)
				if err != testCase.expectedError {
					framework.Failf("Ran %s on %s, got error %v, expected %v", testCase.cmd, host, err, testCase.expectedError)
				}
				if testCase.checkStdout && stdout != testCase.expectedStdout {
					framework.Failf("Ran %s on %s, got stdout '%s', expected '%s'", testCase.cmd, host, stdout, testCase.expectedStdout)
				}
				if stderr != testCase.expectedStderr {
					framework.Failf("Ran %s on %s, got stderr '%s', expected '%s'", testCase.cmd, host, stderr, testCase.expectedStderr)
				}
				if result.Code != testCase.expectedCode {
					framework.Failf("Ran %s on %s, got exit code %d, expected %d", testCase.cmd, host, result.Code, testCase.expectedCode)
				}
				// Show stdout, stderr for logging purposes.
				if len(stdout) > 0 {
								},
								{
									Name:      configVolume,
									MountPath: configDir,
								},
							},
						},
					},
				},
			})
			Expect(err).NotTo(HaveOccurred())
			By("Wait for node problem detector running")
			Expect(f.WaitForPodRunning(name)).To(Succeed())
			// Get the node time
			nodeIP := framework.GetNodeExternalIP(node)
			result, err := framework.SSH("date '+%FT%T.%N%:z'", nodeIP, framework.TestContext.Provider)
			Expect(err).ShouldNot(HaveOccurred())
			Expect(result.Code).Should(BeZero())
			nodeTime, err = time.Parse(time.RFC3339, strings.TrimSpace(result.Stdout))
			Expect(err).ShouldNot(HaveOccurred())
		})

		It("should generate node condition and events for corresponding errors", func() {
			for _, test := range []struct {
				description      string
				timestamp        time.Time
				message          string
				messageNum       int
				events           int
				conditionReason  string
				conditionMessage string