Esempio n. 1
0
func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1.PodStatus {
	pod := createPodWithAppArmor(f, profile)
	if shouldRun {
		// The pod needs to start before it stops, so wait for the longer start timeout.
		framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace(
			f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
	} else {
		// Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor".
		w, err := f.PodClient().Watch(v1.SingleObject(metav1.ObjectMeta{Name: pod.Name}))
		framework.ExpectNoError(err)
		_, err = watch.Until(framework.PodStartTimeout, w, func(e watch.Event) (bool, error) {
			switch e.Type {
			case watch.Deleted:
				return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, pod.Name)
			}
			switch t := e.Object.(type) {
			case *v1.Pod:
				if t.Status.Reason == "AppArmor" {
					return true, nil
				}
			}
			return false, nil
		})
		framework.ExpectNoError(err)
	}
	p, err := f.PodClient().Get(pod.Name, metav1.GetOptions{})
	framework.ExpectNoError(err)
	return p.Status
}
Esempio n. 2
0
func runAppArmorTest(f *framework.Framework, profile string) api.PodStatus {
	pod := createPodWithAppArmor(f, profile)
	// The pod needs to start before it stops, so wait for the longer start timeout.
	framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace(
		f.Client, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
	p, err := f.PodClient().Get(pod.Name)
	framework.ExpectNoError(err)
	return p.Status
}
Esempio n. 3
0
			ObjectMeta: metav1.ObjectMeta{
				Name: "pod-with-repodigest",
			},
			Spec: v1.PodSpec{
				Containers: []v1.Container{{
					Name:    "test",
					Image:   busyBoxImage,
					Command: []string{"sh"},
				}},
				RestartPolicy: v1.RestartPolicyNever,
			},
		}

		pod := f.PodClient().Create(podDesc)

		framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace(
			f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
		runningPod, err := f.PodClient().Get(pod.Name, metav1.GetOptions{})
		framework.ExpectNoError(err)

		status := runningPod.Status

		if len(status.ContainerStatuses) == 0 {
			framework.Failf("Unexpected pod status; %s", spew.Sdump(status))
			return
		}

		Expect(status.ContainerStatuses[0].ImageID).To(Equal(dockertools.DockerPullablePrefix + busyBoxImage))
	})
})
Esempio n. 4
0
			_, err := c.Apps().StatefulSets(ns).Create(ps)
			Expect(err).NotTo(HaveOccurred())

			pst := statefulSetTester{c: c}
			pst.waitForRunningAndReady(ps.Spec.Replicas, ps)

			pod := pst.getPodList(ps).Items[0]
			node, err := c.Core().Nodes().Get(pod.Spec.NodeName)
			framework.ExpectNoError(err)

			// Blocks outgoing network traffic on 'node'. Then verifies that 'podNameToDisappear',
			// that belongs to StatefulSet 'petSetName', **does not** disappear due to forced deletion from the apiserver.
			// The grace period on the petset pods is set to a value > 0.
			testUnderTemporaryNetworkFailure(c, ns, node, func() {
				framework.Logf("Checking that the NodeController does not force delete pet %v", pod.Name)
				err := framework.WaitTimeoutForPodNoLongerRunningInNamespace(c, pod.Name, ns, pod.ResourceVersion, 10*time.Minute)
				Expect(err).To(Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
			})

			framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
			if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
				framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
			}

			By("waiting for pods to be running again")
			pst.waitForRunningAndReady(ps.Spec.Replicas, ps)
		})
	})

	framework.KubeDescribe("[Job]", func() {
		It("should create new pods when node is partitioned", func() {