Пример #1
0
func testAppArmorNode() {
	BeforeEach(func() {
		By("Loading AppArmor profiles for testing")
		framework.ExpectNoError(loadTestProfiles(), "Could not load AppArmor test profiles")
	})
	Context("when running with AppArmor", func() {
		f := framework.NewDefaultFramework("apparmor-test")

		It("should reject an unloaded profile", func() {
			status := runAppArmorTest(f, "localhost/"+"non-existant-profile")
			Expect(status.Phase).To(Equal(api.PodFailed), "PodStatus: %+v", status)
			Expect(status.Reason).To(Equal("AppArmor"), "PodStatus: %+v", status)
		})
		It("should enforce a profile blocking writes", func() {
			status := runAppArmorTest(f, "localhost/"+apparmorProfilePrefix+"deny-write")
			if len(status.ContainerStatuses) == 0 {
				framework.Failf("Unexpected pod status: %s", spew.Sdump(status))
				return
			}
			state := status.ContainerStatuses[0].State.Terminated
			Expect(state.ExitCode).To(Not(BeZero()), "ContainerStateTerminated: %+v", state)

		})
		It("should enforce a permissive profile", func() {
			status := runAppArmorTest(f, "localhost/"+apparmorProfilePrefix+"audit-write")
			if len(status.ContainerStatuses) == 0 {
				framework.Failf("Unexpected pod status: %s", spew.Sdump(status))
				return
			}
			state := status.ContainerStatuses[0].State.Terminated
			Expect(state.ExitCode).To(BeZero(), "ContainerStateTerminated: %+v", state)
		})
	})
}
func updateIngressOrFail(clientset *federation_release_1_4.Clientset, namespace string) (newIng *v1beta1.Ingress) {
	var err error
	if clientset == nil || len(namespace) == 0 {
		Fail(fmt.Sprintf("Internal error: invalid parameters passed to createIngressOrFail: clientset: %v, namespace: %v", clientset, namespace))
	}
	ingress := &v1beta1.Ingress{
		ObjectMeta: v1.ObjectMeta{
			Name: FederatedIngressName,
		},
		Spec: v1beta1.IngressSpec{
			Backend: &v1beta1.IngressBackend{
				ServiceName: "updated-testingress-service",
				ServicePort: intstr.FromInt(80),
			},
		},
	}

	for MaxRetriesOnFederatedApiserver := 0; MaxRetriesOnFederatedApiserver < 3; MaxRetriesOnFederatedApiserver++ {
		_, err = clientset.Extensions().Ingresses(namespace).Get(FederatedIngressName)
		if err != nil {
			framework.Failf("failed to get ingress %q: %v", FederatedIngressName, err)
		}
		newIng, err = clientset.Extensions().Ingresses(namespace).Update(ingress)
		if err == nil {
			describeIng(namespace)
			return
		}
		if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
			framework.Failf("failed to update ingress %q: %v", FederatedIngressName, err)
		}
	}
	framework.Failf("too many retries updating ingress %q", FederatedIngressName)
	return newIng
}
Пример #3
0
func (f *Framework) GetUnderlyingFederatedContexts() []E2EContext {
	kubeconfig := framework.KubeConfig{}
	configBytes, err := ioutil.ReadFile(framework.TestContext.KubeConfig)
	framework.ExpectNoError(err)
	err = yaml.Unmarshal(configBytes, &kubeconfig)
	framework.ExpectNoError(err)

	e2eContexts := []E2EContext{}
	for _, context := range kubeconfig.Contexts {
		if strings.HasPrefix(context.Name, "federation") && context.Name != framework.TestContext.FederatedKubeContext {
			user := kubeconfig.FindUser(context.Context.User)
			if user == nil {
				framework.Failf("Could not find user for context %+v", context)
			}

			cluster := kubeconfig.FindCluster(context.Context.Cluster)
			if cluster == nil {
				framework.Failf("Could not find cluster for context %+v", context)
			}

			dnsSubdomainName, err := GetValidDNSSubdomainName(context.Name)
			if err != nil {
				framework.Failf("Could not convert context name %s to a valid dns subdomain name, error: %s", context.Name, err)
			}
			e2eContexts = append(e2eContexts, E2EContext{
				RawName: context.Name,
				Name:    dnsSubdomainName,
				Cluster: cluster,
				User:    user,
			})
		}
	}

	return e2eContexts
}
Пример #4
0
func validateDNSResults(f *framework.Framework, pod *api.Pod, fileNames []string) {

	By("submitting the pod to kubernetes")
	podClient := f.Client.Pods(f.Namespace.Name)
	defer func() {
		By("deleting the pod")
		defer GinkgoRecover()
		podClient.Delete(pod.Name, api.NewDeleteOptions(0))
	}()
	if _, err := podClient.Create(pod); err != nil {
		framework.Failf("Failed to create %s pod: %v", pod.Name, err)
	}

	framework.ExpectNoError(f.WaitForPodRunning(pod.Name))

	By("retrieving the pod")
	pod, err := podClient.Get(pod.Name)
	if err != nil {
		framework.Failf("Failed to get pod %s: %v", pod.Name, err)
	}
	// Try to find results for each expected name.
	By("looking for the results for each expected name from probiers")
	assertFilesExist(fileNames, "results", pod, f.Client)

	// TODO: probe from the host, too.

	framework.Logf("DNS probes using %s succeeded\n", pod.Name)
}
Пример #5
0
func updateSecretOrFail(clientset *federation_release_1_4.Clientset, namespace string) *v1.Secret {
	if clientset == nil || len(namespace) == 0 {
		Fail(fmt.Sprintf("Internal error: invalid parameters passed to updateSecretOrFail: clientset: %v, namespace: %v", clientset, namespace))
	}

	var err error
	var newSecret *v1.Secret
	secret := &v1.Secret{
		ObjectMeta: v1.ObjectMeta{
			Name: UpdatedFederatedSecretName,
		},
	}

	for retryCount := 0; retryCount < MaxRetries; retryCount++ {
		_, err = clientset.Core().Secrets(namespace).Get(FederatedSecretName)
		if err != nil {
			framework.Failf("failed to get secret %q: %v", FederatedSecretName, err)
		}
		newSecret, err = clientset.Core().Secrets(namespace).Update(secret)
		if err == nil {
			return newSecret
		}
		if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
			framework.Failf("failed to update secret %q: %v", FederatedSecretName, err)
		}
	}
	framework.Failf("too many retries updating secret %q", FederatedSecretName)
	return newSecret
}
Пример #6
0
func updateDaemonSetOrFail(clientset *fedclientset.Clientset, namespace string) *v1beta1.DaemonSet {
	if clientset == nil || len(namespace) == 0 {
		Fail(fmt.Sprintf("Internal error: invalid parameters passed to updateDaemonSetOrFail: clientset: %v, namespace: %v", clientset, namespace))
	}

	var newDaemonSet *v1beta1.DaemonSet
	for retryCount := 0; retryCount < FederatedDaemonSetMaxRetries; retryCount++ {
		daemonset, err := clientset.Extensions().DaemonSets(namespace).Get(FederatedDaemonSetName)
		if err != nil {
			framework.Failf("failed to get daemonset %q: %v", FederatedDaemonSetName, err)
		}

		// Update one of the data in the daemonset.
		daemonset.Annotations = map[string]string{"ccc": "ddd"}
		newDaemonSet, err = clientset.Extensions().DaemonSets(namespace).Update(daemonset)
		if err == nil {
			return newDaemonSet
		}
		if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
			framework.Failf("failed to update daemonset %q: %v", FederatedDaemonSetName, err)
		}
	}
	framework.Failf("too many retries updating daemonset %q", FederatedDaemonSetName)
	return newDaemonSet
}
Пример #7
0
func (f *Framework) deleteFederationNs() {
	ns := f.FederationNamespace
	By(fmt.Sprintf("Destroying federation namespace %q for this suite.", ns.Name))
	timeout := 5 * time.Minute
	if f.NamespaceDeletionTimeout != 0 {
		timeout = f.NamespaceDeletionTimeout
	}

	clientset := f.FederationClientset
	// First delete the namespace from federation apiserver.
	// Also delete the corresponding namespaces from underlying clusters.
	orphanDependents := false
	if err := clientset.Core().Namespaces().Delete(ns.Name, &v1.DeleteOptions{OrphanDependents: &orphanDependents}); err != nil {
		framework.Failf("Error while deleting federation namespace %s: %s", ns.Name, err)
	}
	// Verify that it got deleted.
	err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
		if _, err := clientset.Core().Namespaces().Get(ns.Name, metav1.GetOptions{}); err != nil {
			if apierrors.IsNotFound(err) {
				return true, nil
			}
			framework.Logf("Error while waiting for namespace to be terminated: %v", err)
			return false, nil
		}
		return false, nil
	})
	if err != nil {
		if !apierrors.IsNotFound(err) {
			framework.Failf("Couldn't delete ns %q: %s", ns.Name, err)
		} else {
			framework.Logf("Namespace %v was already deleted", ns.Name)
		}
	}
}
func updateSecretOrFail(clientset *fedclientset.Clientset, namespace string) *v1.Secret {
	if clientset == nil || len(namespace) == 0 {
		Fail(fmt.Sprintf("Internal error: invalid parameters passed to updateSecretOrFail: clientset: %v, namespace: %v", clientset, namespace))
	}

	var newSecret *v1.Secret
	for retryCount := 0; retryCount < MaxRetries; retryCount++ {
		secret, err := clientset.Core().Secrets(namespace).Get(FederatedSecretName)
		if err != nil {
			framework.Failf("failed to get secret %q: %v", FederatedSecretName, err)
		}

		// Update one of the data in the secret.
		secret.Data = map[string][]byte{
			"key": []byte("value"),
		}
		newSecret, err = clientset.Core().Secrets(namespace).Update(secret)
		if err == nil {
			return newSecret
		}
		if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
			framework.Failf("failed to update secret %q: %v", FederatedSecretName, err)
		}
	}
	framework.Failf("too many retries updating secret %q", FederatedSecretName)
	return newSecret
}
Пример #9
0
// Blocks outgoing network traffic on 'node'. Then runs testFunc and returns its status.
// At the end (even in case of errors), the network traffic is brought back to normal.
// This function executes commands on a node so it will work only for some
// environments.
func testUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *api.Node, testFunc func()) {
	host := framework.GetNodeExternalIP(node)
	master := framework.GetMasterAddress(c)
	By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
	defer func() {
		// This code will execute even if setting the iptables rule failed.
		// It is on purpose because we may have an error even if the new rule
		// had been inserted. (yes, we could look at the error code and ssh error
		// separately, but I prefer to stay on the safe side).
		By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name))
		framework.UnblockNetwork(host, master)
	}()

	framework.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
	if !framework.WaitForNodeToBe(c, node.Name, api.NodeReady, true, resizeNodeReadyTimeout) {
		framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
	}
	framework.BlockNetwork(host, master)

	framework.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
	if !framework.WaitForNodeToBe(c, node.Name, api.NodeReady, false, resizeNodeNotReadyTimeout) {
		framework.Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
	}

	testFunc()
	// network traffic is unblocked in a deferred function
}
Пример #10
0
func startPodAndGetBackOffs(podClient *framework.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) {
	podClient.CreateSync(pod)
	time.Sleep(sleepAmount)
	Expect(pod.Spec.Containers).NotTo(BeEmpty())
	podName := pod.Name
	containerName := pod.Spec.Containers[0].Name

	By("getting restart delay-0")
	_, err := getRestartDelay(podClient, podName, containerName)
	if err != nil {
		framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
	}

	By("getting restart delay-1")
	delay1, err := getRestartDelay(podClient, podName, containerName)
	if err != nil {
		framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
	}

	By("getting restart delay-2")
	delay2, err := getRestartDelay(podClient, podName, containerName)
	if err != nil {
		framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
	}
	return delay1, delay2
}
Пример #11
0
func (rc *ResourceConsumer) GetReplicas() int {
	switch rc.kind {
	case kindRC:
		replicationController, err := rc.framework.ClientSet.Core().ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
		framework.ExpectNoError(err)
		if replicationController == nil {
			framework.Failf(rcIsNil)
		}
		return int(replicationController.Status.Replicas)
	case kindDeployment:
		deployment, err := rc.framework.ClientSet.Extensions().Deployments(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
		framework.ExpectNoError(err)
		if deployment == nil {
			framework.Failf(deploymentIsNil)
		}
		return int(deployment.Status.Replicas)
	case kindReplicaSet:
		rs, err := rc.framework.ClientSet.Extensions().ReplicaSets(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
		framework.ExpectNoError(err)
		if rs == nil {
			framework.Failf(rsIsNil)
		}
		return int(rs.Status.Replicas)
	default:
		framework.Failf(invalidKind)
	}
	return 0
}
Пример #12
0
// Blocks outgoing network traffic on 'node'. Then verifies that 'podNameToDisappear',
// that belongs to replication controller 'rcName', really disappeared.
// Finally, it checks that the replication controller recreates the
// pods on another node and that now the number of replicas is equal 'replicas'.
// At the end (even in case of errors), the network traffic is brought back to normal.
// This function executes commands on a node so it will work only for some
// environments.
func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replicas int32, podNameToDisappear string, node *api.Node) {
	host := getNodeExternalIP(node)
	master := getMaster(c)
	By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
	defer func() {
		// This code will execute even if setting the iptables rule failed.
		// It is on purpose because we may have an error even if the new rule
		// had been inserted. (yes, we could look at the error code and ssh error
		// separately, but I prefer to stay on the safe side).
		By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name))
		framework.UnblockNetwork(host, master)
	}()

	framework.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
	if !framework.WaitForNodeToBe(c, node.Name, api.NodeReady, true, resizeNodeReadyTimeout) {
		framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
	}
	framework.BlockNetwork(host, master)

	framework.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
	if !framework.WaitForNodeToBe(c, node.Name, api.NodeReady, false, resizeNodeNotReadyTimeout) {
		framework.Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
	}

	framework.Logf("Waiting for pod %s to be removed", podNameToDisappear)
	err := framework.WaitForRCPodToDisappear(c, ns, rcName, podNameToDisappear)
	Expect(err).NotTo(HaveOccurred())

	By("verifying whether the pod from the unreachable node is recreated")
	err = framework.VerifyPods(c, ns, rcName, true, replicas)
	Expect(err).NotTo(HaveOccurred())

	// network traffic is unblocked in a deferred function
}
// getTestNodeInfo fetches the capacity of a node from API server and returns a map of labels.
func getTestNodeInfo(f *framework.Framework, testName string) map[string]string {
	nodeName := framework.TestContext.NodeName
	node, err := f.ClientSet.Core().Nodes().Get(nodeName)
	Expect(err).NotTo(HaveOccurred())

	cpu, ok := node.Status.Capacity["cpu"]
	if !ok {
		framework.Failf("Fail to fetch CPU capacity value of test node.")
	}

	memory, ok := node.Status.Capacity["memory"]
	if !ok {
		framework.Failf("Fail to fetch Memory capacity value of test node.")
	}

	cpuValue, ok := cpu.AsInt64()
	if !ok {
		framework.Failf("Fail to fetch CPU capacity value as Int64.")
	}

	memoryValue, ok := memory.AsInt64()
	if !ok {
		framework.Failf("Fail to fetch Memory capacity value as Int64.")
	}

	return map[string]string{
		"node":    nodeName,
		"test":    testName,
		"image":   node.Status.NodeInfo.OSImage,
		"machine": fmt.Sprintf("cpu:%dcore,memory:%.1fGB", cpuValue, float32(memoryValue)/(1024*1024*1024)),
	}
}
Пример #14
0
// testHostIP tests that a pod gets a host IP
func testHostIP(c *client.Client, ns string, pod *api.Pod) {
	podClient := c.Pods(ns)
	By("creating pod")
	defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
	if _, err := podClient.Create(pod); err != nil {
		framework.Failf("Failed to create pod: %v", err)
	}
	By("ensuring that pod is running and has a hostIP")
	// Wait for the pods to enter the running state. Waiting loops until the pods
	// are running so non-running pods cause a timeout for this test.
	err := framework.WaitForPodRunningInNamespace(c, pod.Name, ns)
	Expect(err).NotTo(HaveOccurred())
	// Try to make sure we get a hostIP for each pod.
	hostIPTimeout := 2 * time.Minute
	t := time.Now()
	for {
		p, err := podClient.Get(pod.Name)
		Expect(err).NotTo(HaveOccurred())
		if p.Status.HostIP != "" {
			framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
			break
		}
		if time.Since(t) >= hostIPTimeout {
			framework.Failf("Gave up waiting for hostIP of pod %s after %v seconds",
				p.Name, time.Since(t).Seconds())
		}
		framework.Logf("Retrying to get the hostIP of pod %s", p.Name)
		time.Sleep(5 * time.Second)
	}
}
Пример #15
0
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {

	By("submitting the pod to kubernetes")
	podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
	defer func() {
		By("deleting the pod")
		defer GinkgoRecover()
		podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
	}()
	if _, err := podClient.Create(pod); err != nil {
		framework.Failf("Failed to create %s pod: %v", pod.Name, err)
	}

	framework.ExpectNoError(f.WaitForPodRunning(pod.Name))

	By("retrieving the pod")
	pod, err := podClient.Get(pod.Name)
	if err != nil {
		framework.Failf("Failed to get pod %s: %v", pod.Name, err)
	}
	// Try to find the expected value for each expected name.
	By("looking for the results for each expected name from probers")
	assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value)

	framework.Logf("DNS probes using %s succeeded\n", pod.Name)
}
Пример #16
0
func runPortForward(ns, podName string, port int) *portForwardCommand {
	cmd := framework.KubectlCmd("port-forward", fmt.Sprintf("--namespace=%v", ns), podName, fmt.Sprintf(":%d", port))
	// This is somewhat ugly but is the only way to retrieve the port that was picked
	// by the port-forward command. We don't want to hard code the port as we have no
	// way of guaranteeing we can pick one that isn't in use, particularly on Jenkins.
	framework.Logf("starting port-forward command and streaming output")
	_, stderr, err := framework.StartCmdAndStreamOutput(cmd)
	if err != nil {
		framework.Failf("Failed to start port-forward command: %v", err)
	}

	buf := make([]byte, 128)
	var n int
	framework.Logf("reading from `kubectl port-forward` command's stderr")
	if n, err = stderr.Read(buf); err != nil {
		framework.Failf("Failed to read from kubectl port-forward stderr: %v", err)
	}
	portForwardOutput := string(buf[:n])
	match := portForwardRegexp.FindStringSubmatch(portForwardOutput)
	if len(match) != 2 {
		framework.Failf("Failed to parse kubectl port-forward output: %s", portForwardOutput)
	}

	listenPort, err := strconv.Atoi(match[1])
	if err != nil {
		framework.Failf("Error converting %s to an int: %v", match[1], err)
	}

	return &portForwardCommand{
		cmd:  cmd,
		port: listenPort,
	}
}
Пример #17
0
func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
	podClient := f.PodClient()
	ns := f.Namespace.Name
	Expect(pod.Spec.Containers).NotTo(BeEmpty())
	containerName := pod.Spec.Containers[0].Name
	// At the end of the test, clean up by removing the pod.
	defer func() {
		By("deleting the pod")
		podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
	}()
	By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
	podClient.Create(pod)

	// Wait until the pod is not pending. (Here we need to check for something other than
	// 'Pending' other than checking for 'Running', since when failures occur, we go to
	// 'Terminated' which can cause indefinite blocking.)
	framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name, pod.ResourceVersion),
		fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
	framework.Logf("Started pod %s in namespace %s", pod.Name, ns)

	// Check the pod's current state and verify that restartCount is present.
	By("checking the pod's current state and verifying that restartCount is present")
	pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
	framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
	initialRestartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
	framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)

	// Wait for the restart state to be as desired.
	deadline := time.Now().Add(timeout)
	lastRestartCount := initialRestartCount
	observedRestarts := int32(0)
	for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
		pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
		framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
		restartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
		if restartCount != lastRestartCount {
			framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
				ns, pod.Name, restartCount, time.Since(start))
			if restartCount < lastRestartCount {
				framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
					ns, pod.Name, lastRestartCount, restartCount)
			}
		}
		observedRestarts = restartCount - initialRestartCount
		if expectNumRestarts > 0 && int(observedRestarts) >= expectNumRestarts {
			// Stop if we have observed more than expectNumRestarts restarts.
			break
		}
		lastRestartCount = restartCount
	}

	// If we expected 0 restarts, fail if observed any restart.
	// If we expected n restarts (n > 0), fail if we observed < n restarts.
	if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
		int(observedRestarts) < expectNumRestarts) {
		framework.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d",
			ns, pod.Name, expectNumRestarts, observedRestarts)
	}
}
Пример #18
0
func observeCreation(w watch.Interface) {
	select {
	case event, _ := <-w.ResultChan():
		if event.Type != watch.Added {
			framework.Failf("Failed to observe the creation: %v", event)
		}
	case <-time.After(30 * time.Second):
		framework.Failf("Timeout while waiting for observing the creation")
	}
}
Пример #19
0
func observePodCreation(w watch.Interface) {
	select {
	case event, _ := <-w.ResultChan():
		if event.Type != watch.Added {
			framework.Failf("Failed to observe pod creation: %v", event)
		}
	case <-time.After(framework.PodStartTimeout):
		framework.Failf("Timeout while waiting for pod creation")
	}
}
Пример #20
0
func (config *KubeletManagedHostConfig) createPod(podSpec *api.Pod) *api.Pod {
	createdPod, err := config.getPodClient().Create(podSpec)
	if err != nil {
		framework.Failf("Failed to create %s pod: %v", podSpec.Name, err)
	}
	framework.ExpectNoError(config.f.WaitForPodRunning(podSpec.Name))
	createdPod, err = config.getPodClient().Get(podSpec.Name)
	if err != nil {
		framework.Failf("Failed to retrieve %s pod: %v", podSpec.Name, err)
	}
	return createdPod
}
Пример #21
0
func (config *PrivilegedPodTestConfig) createPod(pod *api.Pod) *api.Pod {
	createdPod, err := config.getPodClient().Create(pod)
	if err != nil {
		framework.Failf("Failed to create %q pod: %v", pod.Name, err)
	}
	framework.ExpectNoError(config.f.WaitForPodRunning(pod.Name))
	createdPod, err = config.getPodClient().Get(pod.Name)
	if err != nil {
		framework.Failf("Failed to retrieve %q pod: %v", pod.Name, err)
	}
	return createdPod
}
Пример #22
0
// RunLogPodsWithSleepOf creates a pod on every node, logs continuously (with "sleep" pauses), and verifies that the log string
// was produced in each and every pod at least once.  The final arg is the timeout for the test to verify all the pods got logs.
func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) {

	nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
	totalPods := len(nodes.Items)
	Expect(totalPods).NotTo(Equal(0))

	kilobyte := strings.Repeat("logs-123", 128) // 8*128=1024 = 1KB of text.

	appName := "logging-soak" + podname
	podlables := f.CreatePodsPerNodeForSimpleApp(
		appName,
		func(n v1.Node) v1.PodSpec {
			return v1.PodSpec{
				Containers: []v1.Container{{
					Name:  "logging-soak",
					Image: "gcr.io/google_containers/busybox:1.24",
					Args: []string{
						"/bin/sh",
						"-c",
						fmt.Sprintf("while true ; do echo %v ; sleep %v; done", kilobyte, sleep.Seconds()),
					},
				}},
				NodeName:      n.Name,
				RestartPolicy: v1.RestartPolicyAlways,
			}
		},
		totalPods,
	)

	logSoakVerification := f.NewClusterVerification(
		f.Namespace,
		framework.PodStateVerification{
			Selectors:   podlables,
			ValidPhases: []v1.PodPhase{v1.PodRunning, v1.PodSucceeded},
			// we don't validate total log data, since there is no gaurantee all logs will be stored forever.
			// instead, we just validate that some logs are being created in std out.
			Verify: func(p v1.Pod) (bool, error) {
				s, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second)
				return s != "", err
			},
		},
	)

	largeClusterForgiveness := time.Duration(len(nodes.Items)/5) * time.Second // i.e. a 100 node cluster gets an extra 20 seconds to complete.
	pods, err := logSoakVerification.WaitFor(totalPods, timeout+largeClusterForgiveness)

	if err != nil {
		framework.Failf("Error in wait... %v", err)
	} else if len(pods) < totalPods {
		framework.Failf("Only got %v out of %v", len(pods), totalPods)
	}
}
Пример #23
0
func verifyDNSPodIsRunning(f *framework.Framework) {
	systemClient := f.Client.Pods(api.NamespaceSystem)
	By("Waiting for DNS Service to be Running")
	options := api.ListOptions{LabelSelector: dnsServiceLabelSelector}
	dnsPods, err := systemClient.List(options)
	if err != nil {
		framework.Failf("Failed to list all dns service pods")
	}
	if len(dnsPods.Items) != 1 {
		framework.Failf("Unexpected number of pods (%d) matches the label selector %v", len(dnsPods.Items), dnsServiceLabelSelector.String())
	}
	framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, dnsPods.Items[0].Name, api.NamespaceSystem))
}
Пример #24
0
func assertManagedStatus(
	config *KubeletManagedHostConfig, podName string, expectedIsManaged bool, name string) {
	// See https://github.com/kubernetes/kubernetes/issues/27023
	//
	// Retry until timeout for the right contents of /etc/hosts to show
	// up. There may be a low probability race here. We still fail the
	// test if retry was necessary, but at least we will know whether or
	// not it resolves or seems to be a permanent condition.
	//
	// If /etc/hosts is properly mounted, then this will succeed
	// immediately.
	const retryTimeout = 30 * time.Second

	retryCount := 0
	etcHostsContent := ""
	matched := false

	for startTime := time.Now(); time.Since(startTime) < retryTimeout; {
		etcHostsContent = config.getEtcHostsContent(podName, name)
		isManaged := strings.Contains(etcHostsContent, etcHostsPartialContent)

		if expectedIsManaged == isManaged {
			matched = true
			break
		}

		glog.Errorf(
			"For pod: %s, name: %s, expected %t, actual %t (/etc/hosts was %q), retryCount: %d",
			podName, name, expectedIsManaged, isManaged, etcHostsContent, retryCount)

		retryCount++
		time.Sleep(100 * time.Millisecond)
	}

	if retryCount > 0 {
		if matched {
			conditionText := "should"
			if !expectedIsManaged {
				conditionText = "should not"
			}

			framework.Failf(
				"/etc/hosts file %s be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q",
				conditionText, name, retryCount, etcHostsContent)
		} else {
			framework.Failf(
				"had to retry %d times to get matching content in /etc/hosts (name: %s)",
				retryCount, name)
		}
	}
}
Пример #25
0
func verifyDNSPodIsRunning(f *framework.Framework) {
	systemClient := f.ClientSet.Core().Pods(api.NamespaceSystem)
	By("Waiting for DNS Service to be Running")
	options := v1.ListOptions{LabelSelector: dnsServiceLabelSelector.String()}
	dnsPods, err := systemClient.List(options)
	if err != nil {
		framework.Failf("Failed to list all dns service pods")
	}
	if len(dnsPods.Items) < 1 {
		framework.Failf("No pods match the label selector %v", dnsServiceLabelSelector.String())
	}
	pod := dnsPods.Items[0]
	framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, &pod))
}
Пример #26
0
func deleteAllTestNamespaces(clientset *federation_release_1_4.Clientset) {
	list, err := clientset.Core().Namespaces().List(api.ListOptions{})
	if err != nil {
		framework.Failf("Failed to get all namespaes: %v", err)
		return
	}
	for _, namespace := range list.Items {
		if strings.HasPrefix(namespace.Name, namespacePrefix) {
			err := clientset.Core().Namespaces().Delete(namespace.Name, &api.DeleteOptions{})
			if err != nil {
				framework.Failf("Failed to delete %s: %v", namespace.Name, err)
			}
		}
	}
}
Пример #27
0
func validateDNSResults(f *e2e.Framework, pod *api.Pod, fileNames sets.String, expect int) {
	By("submitting the pod to kubernetes")
	podClient := f.Client.Pods(f.Namespace.Name)
	defer func() {
		By("deleting the pod")
		defer GinkgoRecover()
		podClient.Delete(pod.Name, api.NewDeleteOptions(0))
	}()
	if _, err := podClient.Create(pod); err != nil {
		e2e.Failf("Failed to create %s pod: %v", pod.Name, err)
	}

	Expect(f.WaitForPodRunning(pod.Name)).To(BeNil())
	Expect(wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) {
		pod, err := podClient.Get(pod.Name)
		if err != nil {
			return false, err
		}
		switch pod.Status.Phase {
		case api.PodSucceeded:
			return true, nil
		case api.PodFailed:
			return false, fmt.Errorf("pod failed")
		default:
			return false, nil
		}
	})).To(BeNil())

	By("retrieving the pod logs")
	r, err := podClient.GetLogs(pod.Name, &api.PodLogOptions{Container: "querier"}).Stream()
	if err != nil {
		e2e.Failf("Failed to get pod logs %s: %v", pod.Name, err)
	}
	out, err := ioutil.ReadAll(r)
	if err != nil {
		e2e.Failf("Failed to read pod logs %s: %v", pod.Name, err)
	}

	// Try to find results for each expected name.
	By("looking for the results for each expected name from probiers")

	if err := assertLinesExist(fileNames, expect, bytes.NewBuffer(out)); err != nil {
		e2e.Logf("Got results from pod:\n%s", out)
		e2e.Failf("Unexpected results: %v", err)
	}

	e2e.Logf("DNS probes using %s succeeded\n", pod.Name)
}
Пример #28
0
func verifyCPULimits(expected framework.ContainersCPUSummary, actual framework.NodesCPUSummary) {
	if expected == nil {
		return
	}
	var errList []string
	for nodeName, perNodeSummary := range actual {
		var nodeErrs []string
		for cName, expectedResult := range expected {
			perContainerSummary, ok := perNodeSummary[cName]
			if !ok {
				nodeErrs = append(nodeErrs, fmt.Sprintf("container %q: missing", cName))
				continue
			}
			for p, expectedValue := range expectedResult {
				actualValue, ok := perContainerSummary[p]
				if !ok {
					nodeErrs = append(nodeErrs, fmt.Sprintf("container %q: missing percentile %v", cName, p))
					continue
				}
				if actualValue > expectedValue {
					nodeErrs = append(nodeErrs, fmt.Sprintf("container %q: expected %.0fth%% usage < %.3f; got %.3f",
						cName, p*100, expectedValue, actualValue))
				}
			}
		}
		if len(nodeErrs) > 0 {
			errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", ")))
		}
	}
	if len(errList) > 0 {
		framework.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n"))
	}
}
Пример #29
0
func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) {
	if expected == nil {
		return
	}
	var errList []string
	for nodeName, nodeSummary := range actual {
		var nodeErrs []string
		for cName, expectedResult := range expected {
			container, ok := nodeSummary[cName]
			if !ok {
				nodeErrs = append(nodeErrs, fmt.Sprintf("container %q: missing", cName))
				continue
			}

			expectedValue := expectedResult.MemoryRSSInBytes
			actualValue := container.MemoryRSSInBytes
			if expectedValue != 0 && actualValue > expectedValue {
				nodeErrs = append(nodeErrs, fmt.Sprintf("container %q: expected RSS memory (MB) < %d; got %d",
					cName, expectedValue, actualValue))
			}
		}
		if len(nodeErrs) > 0 {
			errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", ")))
			heapStats, err := framework.GetKubeletHeapStats(c, nodeName)
			if err != nil {
				framework.Logf("Unable to get heap stats from %q", nodeName)
			} else {
				framework.Logf("Heap stats on %q\n:%v", nodeName, heapStats)
			}
		}
	}
	if len(errList) > 0 {
		framework.Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n"))
	}
}
Пример #30
0
func testReboot(c *client.Client, rebootCmd string) {
	// Get all nodes, and kick off the test on each.
	nodelist := framework.GetReadySchedulableNodesOrDie(c)
	result := make([]bool, len(nodelist.Items))
	wg := sync.WaitGroup{}
	wg.Add(len(nodelist.Items))

	failed := false
	for ix := range nodelist.Items {
		go func(ix int) {
			defer wg.Done()
			n := nodelist.Items[ix]
			result[ix] = rebootNode(c, framework.TestContext.Provider, n.ObjectMeta.Name, rebootCmd)
			if !result[ix] {
				failed = true
			}
		}(ix)
	}

	// Wait for all to finish and check the final result.
	wg.Wait()

	if failed {
		for ix := range nodelist.Items {
			n := nodelist.Items[ix]
			if !result[ix] {
				framework.Logf("Node %s failed reboot test.", n.ObjectMeta.Name)
			}
		}
		framework.Failf("Test failed; at least one node failed to reboot in the time given.")
	}
}