//TODO: To merge this with the emptyDir tests, we can make source a lambda. func testPodWithHostVol(path string, source *api.HostPathVolumeSource) *api.Pod { podName := "pod-" + string(util.NewUUID()) return &api.Pod{ TypeMeta: api.TypeMeta{ Kind: "Pod", APIVersion: latest.Version, }, ObjectMeta: api.ObjectMeta{ Name: podName, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: containerName, Image: "qingyuan/mounttest:0.2", VolumeMounts: []api.VolumeMount{ { Name: volumeName, MountPath: path, }, }, }, }, RestartPolicy: api.RestartPolicyNever, Volumes: mount(source), }, } }
// Create a LimitRange object func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) { limitRange, ok := obj.(*api.LimitRange) if !ok { return nil, fmt.Errorf("invalid object type") } if !api.ValidNamespace(ctx, &limitRange.ObjectMeta) { return nil, errors.NewConflict("limitRange", limitRange.Namespace, fmt.Errorf("LimitRange.Namespace does not match the provided context")) } if len(limitRange.Name) == 0 { limitRange.Name = string(util.NewUUID()) } if errs := validation.ValidateLimitRange(limitRange); len(errs) > 0 { return nil, errors.NewInvalid("limitRange", limitRange.Name, errs) } api.FillObjectMetaSystemFields(ctx, &limitRange.ObjectMeta) err := rs.registry.CreateWithName(ctx, limitRange.Name, limitRange) if err != nil { return nil, err } return rs.registry.Get(ctx, limitRange.Name) }
// testHostIP tests that a pod gets a host IP func testHostIP(c *client.Client, pod *api.Pod) { ns := "e2e-test-" + string(util.NewUUID()) _, err := createNamespaceIfDoesNotExist(c, ns) expectNoError(err, fmt.Sprintf("creating namespace %s", ns)) podClient := c.Pods(ns) By("creating pod") defer podClient.Delete(pod.Name, nil) _, err = podClient.Create(pod) if err != nil { Fail(fmt.Sprintf("Failed to create pod: %v", err)) } By("ensuring that pod is running and has a hostIP") // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. err = waitForPodRunningInNamespace(c, pod.Name, ns) Expect(err).NotTo(HaveOccurred()) // Try to make sure we get a hostIP for each pod. hostIPTimeout := 2 * time.Minute t := time.Now() for { p, err := podClient.Get(pod.Name) Expect(err).NotTo(HaveOccurred()) if p.Status.HostIP != "" { Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP) break } if time.Since(t) >= hostIPTimeout { Failf("Gave up waiting for hostIP of pod %s after %v seconds", p.Name, time.Since(t).Seconds()) } Logf("Retrying to get the hostIP of pod %s", p.Name) time.Sleep(5 * time.Second) } }
func testPDPod(diskName, targetHost string, readOnly bool) *api.Pod { pod := &api.Pod{ TypeMeta: api.TypeMeta{ Kind: "Pod", APIVersion: latest.Version, }, ObjectMeta: api.ObjectMeta{ Name: "pd-test-" + string(util.NewUUID()), }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "testpd", Image: "qingyuan/pause", VolumeMounts: []api.VolumeMount{ { Name: "testpd", MountPath: "/testpd", }, }, }, }, NodeName: targetHost, }, } if testContext.Provider == "gce" { pod.Spec.Volumes = []api.Volume{ { Name: "testpd", VolumeSource: api.VolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ PDName: diskName, FSType: "ext4", ReadOnly: readOnly, }, }, }, } } else if testContext.Provider == "aws" { pod.Spec.Volumes = []api.Volume{ { Name: "testpd", VolumeSource: api.VolumeSource{ AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{ VolumeID: diskName, FSType: "ext4", ReadOnly: readOnly, }, }, }, } } else { panic("Unknown provider: " + testContext.Provider) } return pod }
func createDNSPod(namespace, probeCmd string) *api.Pod { pod := &api.Pod{ TypeMeta: api.TypeMeta{ Kind: "Pod", APIVersion: latest.Version, }, ObjectMeta: api.ObjectMeta{ Name: "dns-test-" + string(util.NewUUID()), Namespace: namespace, }, Spec: api.PodSpec{ Volumes: []api.Volume{ { Name: "results", VolumeSource: api.VolumeSource{ EmptyDir: &api.EmptyDirVolumeSource{}, }, }, }, Containers: []api.Container{ // TODO: Consider scraping logs instead of running a webserver. { Name: "webserver", Image: "qingyuan/test-webserver", Ports: []api.ContainerPort{ { Name: "http", ContainerPort: 80, }, }, VolumeMounts: []api.VolumeMount{ { Name: "results", MountPath: "/results", }, }, }, { Name: "querier", Image: "qingyuan/dnsutils", Command: []string{"sh", "-c", probeCmd}, VolumeMounts: []api.VolumeMount{ { Name: "results", MountPath: "/results", }, }, }, }, }, } return pod }
func runLivenessTest(c *client.Client, podDescr *api.Pod, expectRestart bool) { ns := "e2e-test-" + string(util.NewUUID()) _, err := createNamespaceIfDoesNotExist(c, ns) expectNoError(err, fmt.Sprintf("creating namespace %s", ns)) By(fmt.Sprintf("Creating pod %s in namespace %s", podDescr.Name, ns)) _, err = c.Pods(ns).Create(podDescr) expectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) // At the end of the test, clean up by removing the pod. defer func() { By("deleting the pod") c.Pods(ns).Delete(podDescr.Name, nil) }() // Wait until the pod is not pending. (Here we need to check for something other than // 'Pending' other than checking for 'Running', since when failures occur, we go to // 'Terminated' which can cause indefinite blocking.) expectNoError(waitForPodNotPending(c, ns, podDescr.Name), fmt.Sprintf("starting pod %s in namespace %s", podDescr.Name, ns)) By(fmt.Sprintf("Started pod %s in namespace %s", podDescr.Name, ns)) // Check the pod's current state and verify that restartCount is present. By("checking the pod's current state and verifying that restartCount is present") pod, err := c.Pods(ns).Get(podDescr.Name) expectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", podDescr.Name, ns)) initialRestartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount By(fmt.Sprintf("Initial restart count of pod %s is %d", podDescr.Name, initialRestartCount)) // Wait for at most 48 * 5 = 240s = 4 minutes until restartCount is incremented restarts := false for i := 0; i < 48; i++ { // Wait until restartCount is incremented. time.Sleep(5 * time.Second) pod, err = c.Pods(ns).Get(podDescr.Name) expectNoError(err, fmt.Sprintf("getting pod %s", podDescr.Name)) restartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount By(fmt.Sprintf("Restart count of pod %s in namespace %s is now %d", podDescr.Name, ns, restartCount)) if restartCount > initialRestartCount { By(fmt.Sprintf("Restart count of pod %s in namespace %s increased from %d to %d during the test", podDescr.Name, ns, initialRestartCount, restartCount)) restarts = true break } } if restarts != expectRestart { Fail(fmt.Sprintf("pod %s in namespace %s - expected restarts: %v, found restarts: %v", podDescr.Name, ns, expectRestart, restarts)) } }
func makePodSpec(readinessProbe, livenessProbe *api.Probe) *api.Pod { pod := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "nginx-" + string(util.NewUUID())}, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "nginx", Image: "nginx", LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, }, }, }, } return pod }
func NewWebserverTest(client *client.Client, namespace string, serviceName string) *WebserverTest { t := &WebserverTest{} t.Client = client t.Namespace = namespace t.ServiceName = serviceName t.TestId = t.ServiceName + "-" + string(util.NewUUID()) t.Labels = map[string]string{ "testid": t.TestId, } t.rcs = make(map[string]bool) t.services = make(map[string]bool) t.name = "webserver" t.image = "qingyuan/test-webserver" return t }
// Return a prototypical entrypoint test pod func entrypointTestPod() *api.Pod { podName := "client-containers-" + string(util.NewUUID()) return &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: podName, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: testContainerName, Image: "qingyuan/eptest:0.1", }, }, RestartPolicy: api.RestartPolicyNever, }, } }
func createPD() (string, error) { if testContext.Provider == "gce" { pdName := fmt.Sprintf("%s-%s", testContext.prefix, string(util.NewUUID())) zone := testContext.CloudConfig.Zone // TODO: make this hit the compute API directly instread of shelling out to gcloud. err := exec.Command("gcloud", "compute", "--project="+testContext.CloudConfig.ProjectID, "disks", "create", "--zone="+zone, "--size=10GB", pdName).Run() if err != nil { return "", err } return pdName, nil } else { volumes, ok := testContext.CloudConfig.Provider.(aws_cloud.Volumes) if !ok { return "", fmt.Errorf("Provider does not support volumes") } volumeOptions := &aws_cloud.VolumeOptions{} volumeOptions.CapacityMB = 10 * 1024 return volumes.CreateVolume(volumeOptions) } }
func newReplicationController(replicas int) *api.ReplicationController { rc := &api.ReplicationController{ TypeMeta: api.TypeMeta{APIVersion: testapi.Version()}, ObjectMeta: api.ObjectMeta{ UID: util.NewUUID(), Name: "foobar", Namespace: api.NamespaceDefault, ResourceVersion: "18", }, Spec: api.ReplicationControllerSpec{ Replicas: replicas, Selector: map[string]string{"foo": "bar"}, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{ "name": "foo", "type": "production", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Image: "foo/bar", TerminationMessagePath: api.TerminationMessagePathDefault, ImagePullPolicy: api.PullIfNotPresent, SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), }, }, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSDefault, NodeSelector: map[string]string{ "baz": "blah", }, }, }, }, } return rc }
// A basic test to check the deployment of an image using // a replication controller. The image serves its hostname // which is checked for each replica. func ServeImageOrFail(c *client.Client, test string, image string) { ns := api.NamespaceDefault name := "my-hostname-" + test + "-" + string(util.NewUUID()) replicas := 2 // Create a replication controller for a service // that serves its hostname. // The source for the Docker containter qingyuan/serve_hostname is // in contrib/for-demos/serve_hostname By(fmt.Sprintf("Creating replication controller %s", name)) controller, err := c.ReplicationControllers(ns).Create(&api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Name: name, }, Spec: api.ReplicationControllerSpec{ Replicas: replicas, Selector: map[string]string{ "name": name, }, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{"name": name}, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: name, Image: image, Ports: []api.ContainerPort{{ContainerPort: 9376}}, }, }, }, }, }, }) Expect(err).NotTo(HaveOccurred()) // Cleanup the replication controller when we are done. defer func() { // Resize the replication controller to zero to get rid of pods. By("Cleaning up the replication controller") rcReaper, err := qingctl.ReaperFor("ReplicationController", c) if err != nil { Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) } if _, err = rcReaper.Stop(ns, controller.Name, 0, nil); err != nil { Logf("Failed to stop replication controller %v: %v.", controller.Name, err) } }() // List the pods, making sure we observe all the replicas. listTimeout := time.Minute label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) pods, err := c.Pods(ns).List(label, fields.Everything()) Expect(err).NotTo(HaveOccurred()) t := time.Now() for { Logf("Controller %s: Found %d pods out of %d", name, len(pods.Items), replicas) if len(pods.Items) == replicas { break } if time.Since(t) > listTimeout { Failf("Controller %s: Gave up waiting for %d pods to come up after seeing only %d pods after %v seconds", name, replicas, len(pods.Items), time.Since(t).Seconds()) } time.Sleep(5 * time.Second) pods, err = c.Pods(ns).List(label, fields.Everything()) Expect(err).NotTo(HaveOccurred()) } By("Ensuring each pod is running") // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. for _, pod := range pods.Items { err = waitForPodRunning(c, pod.Name) Expect(err).NotTo(HaveOccurred()) } // Verify that something is listening. By("Trying to dial each unique pod") retryTimeout := 2 * time.Minute retryInterval := 5 * time.Second err = wait.Poll(retryInterval, retryTimeout, podResponseChecker{c, ns, label, name, true, pods}.checkAllResponses) if err != nil { Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) } }
time.Sleep(5 * time.Second) } } var _ = Describe("Pods", func() { var c *client.Client // TODO convert this to use the NewFramework(...) BeforeEach(func() { var err error c, err = loadClient() expectNoError(err) }) PIt("should get a host IP", func() { name := "pod-hostip-" + string(util.NewUUID()) testHostIP(c, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "test", Image: "qingyuan/pause", }, }, }, }) }) It("should be schedule with cpu and memory limits", func() {
var _ = Describe("Events", func() { var c *client.Client BeforeEach(func() { var err error c, err = loadClient() Expect(err).NotTo(HaveOccurred()) }) It("should be sent by qinglets and the scheduler about pods scheduling and running", func() { podClient := c.Pods(api.NamespaceDefault) By("creating the pod") name := "send-events-" + string(util.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, Labels: map[string]string{ "name": "foo", "time": value, }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "p", Image: "qingyuan/serve_hostname:1.1", Ports: []api.ContainerPort{{ContainerPort: 80}},
package e2e import ( "fmt" "github.com/qingyuancloud/QingYuan/pkg/api" "github.com/qingyuancloud/QingYuan/pkg/util" . "github.com/onsi/ginkgo" ) var _ = Describe("Secrets", func() { f := NewFramework("secrets") It("should be consumable from pods", func() { name := "secret-test-" + string(util.NewUUID()) volumeName := "secret-volume" volumeMountPath := "/etc/secret-volume" secret := &api.Secret{ ObjectMeta: api.ObjectMeta{ Namespace: f.Namespace.Name, Name: name, }, Data: map[string][]byte{ "data-1": []byte("value-1\n"), "data-2": []byte("value-2\n"), "data-3": []byte("value-3\n"), }, }
// FillObjectMetaSystemFields populates fields that are managed by the system on ObjectMeta. func FillObjectMetaSystemFields(ctx Context, meta *ObjectMeta) { meta.CreationTimestamp = util.Now() meta.UID = util.NewUUID() meta.SelfLink = "" }
ns_, err := createTestingNS("downward-api", c) ns = ns_.Name Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { // Clean up the namespace if a non-default one was used if ns != api.NamespaceDefault { By("Cleaning up the namespace") err := c.Namespaces().Delete(ns) expectNoError(err) } }) It("should provide pod name and namespace as env vars", func() { podName := "downward-api-" + string(util.NewUUID()) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: podName, Labels: map[string]string{"name": podName}, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "dapi-container", Image: "qingyuan/busybox", Command: []string{"sh", "-c", "env"}, Env: []api.EnvVar{ { Name: "POD_NAME", ValueFrom: &api.EnvVarSource{
return false, err } if len(secrets.Items) == 0 { return false, nil } if len(secrets.Items) > 1 { return false, fmt.Errorf("Expected 1 token secret, got %d", len(secrets.Items)) } tokenName = secrets.Items[0].Name tokenContent = string(secrets.Items[0].Data[api.ServiceAccountTokenKey]) return true, nil })) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod-service-account-" + string(util.NewUUID()), }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "service-account-test", Image: "qingyuan/mounttest:0.2", Args: []string{ fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountTokenKey), }, }, }, RestartPolicy: api.RestartPolicyNever, }, }
expectNoError(err) minions, err := c.Nodes().List(labels.Everything(), fields.Everything()) expectNoError(err) minionCount = len(minions.Items) Expect(minionCount).NotTo(BeZero()) // Terminating a namespace (deleting the remaining objects from it - which // generally means events) can affect the current run. Thus we wait for all // terminating namespace to be finally deleted before starting this test. err = deleteTestingNS(c) expectNoError(err) nsForTesting, err := createTestingNS("density", c) ns = nsForTesting.Name expectNoError(err) uuid = string(util.NewUUID()) expectNoError(resetMetrics(c)) expectNoError(os.Mkdir(fmt.Sprintf(testContext.OutputDir+"/%s", uuid), 0777)) expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "before")) }) AfterEach(func() { // Remove any remaining pods from this test if the // replication controller still exists and the replica count // isn't 0. This means the controller wasn't cleaned up // during the test so clean it up here rc, err := c.ReplicationControllers(ns).Get(RCName) if err == nil && rc.Spec.Replicas != 0 { By("Cleaning up the replication controller") err := DeleteRC(c, ns, RCName)