Example #1
0
func TestDeleter(t *testing.T) {
	tempPath := fmt.Sprintf("/tmp/hostpath/%s", util.NewUUID())
	defer os.RemoveAll(tempPath)
	err := os.MkdirAll(tempPath, 0750)
	if err != nil {
		t.Fatal("Failed to create tmp directory for deleter: %v", err)
	}

	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))

	spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}}
	plug, err := plugMgr.FindDeletablePluginBySpec(spec)
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}
	deleter, err := plug.NewDeleter(spec)
	if err != nil {
		t.Errorf("Failed to make a new Deleter: %v", err)
	}
	if deleter.GetPath() != tempPath {
		t.Errorf("Expected %s but got %s", tempPath, deleter.GetPath())
	}
	if err := deleter.Delete(); err != nil {
		t.Errorf("Mock Recycler expected to return nil but got %s", err)
	}
	if exists, _ := util.FileExists("foo"); exists {
		t.Errorf("Temp path expected to be deleted, but was found at %s", tempPath)
	}
}
func TestOverlappingRCs(t *testing.T) {
	client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.Version()})

	for i := 0; i < 5; i++ {
		manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, 10)
		manager.podStoreSynced = alwaysReady

		// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
		var controllers []*api.ReplicationController
		for j := 1; j < 10; j++ {
			controllerSpec := newReplicationController(1)
			controllerSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local)
			controllerSpec.Name = string(util.NewUUID())
			controllers = append(controllers, controllerSpec)
		}
		shuffledControllers := shuffle(controllers)
		for j := range shuffledControllers {
			manager.rcStore.Store.Add(shuffledControllers[j])
		}
		// Add a pod and make sure only the oldest rc is synced
		pods := newPodList(nil, 1, api.PodPending, controllers[0])
		rcKey := getKey(controllers[0], t)

		manager.addPod(&pods.Items[0])
		queueRC, _ := manager.queue.Get()
		if queueRC != rcKey {
			t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
		}
	}
}
Example #3
0
func testPodWithVolume(image, path string, source *api.EmptyDirVolumeSource) *api.Pod {
	podName := "pod-" + string(util.NewUUID())
	return &api.Pod{
		TypeMeta: unversioned.TypeMeta{
			Kind:       "Pod",
			APIVersion: latest.GroupOrDie("").Version,
		},
		ObjectMeta: api.ObjectMeta{
			Name: podName,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  containerName,
					Image: image,
					VolumeMounts: []api.VolumeMount{
						{
							Name:      volumeName,
							MountPath: path,
						},
					},
				},
			},
			RestartPolicy: api.RestartPolicyNever,
			Volumes: []api.Volume{
				{
					Name: volumeName,
					VolumeSource: api.VolumeSource{
						EmptyDir: source,
					},
				},
			},
		},
	}
}
Example #4
0
// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume.
// This Creater is meant for development and testing only and WILL NOT WORK in a multi-node cluster.
func (r *hostPathCreater) Create() (*api.PersistentVolume, error) {
	fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", util.NewUUID())
	err := os.MkdirAll(fullpath, 0750)
	if err != nil {
		return nil, err
	}

	return &api.PersistentVolume{
		ObjectMeta: api.ObjectMeta{
			GenerateName: "pv-hostpath-",
			Labels: map[string]string{
				"createdby": "hostpath dynamic provisioner",
			},
		},
		Spec: api.PersistentVolumeSpec{
			PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy,
			AccessModes:                   r.options.AccessModes,
			Capacity: api.ResourceList{
				api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dMi", r.options.CapacityMB)),
			},
			PersistentVolumeSource: api.PersistentVolumeSource{
				HostPath: &api.HostPathVolumeSource{
					Path: fullpath,
				},
			},
		},
	}, nil
}
Example #5
0
func (config *KubeProxyTestConfig) setup() {
	By("creating a selector")
	selectorName := "selector-" + string(util.NewUUID())
	serviceSelector := map[string]string{
		selectorName: "true",
	}

	By("Getting ssh-able hosts")
	hosts, err := NodeSSHHosts(config.f.Client)
	Expect(err).NotTo(HaveOccurred())
	config.nodes = make([]string, 0, len(hosts))
	for _, h := range hosts {
		config.nodes = append(config.nodes, strings.TrimSuffix(h, ":22"))
	}

	if enableLoadBalancerTest {
		By("Creating the LoadBalancer Service on top of the pods in kubernetes")
		config.createLoadBalancerService(serviceSelector)
	}

	By("Creating the service pods in kubernetes")
	podName := "netserver"
	config.endpointPods = config.createNetProxyPods(podName, serviceSelector, testContext.CloudConfig.NumNodes)

	By("Creating the service on top of the pods in kubernetes")
	config.createNodePortService(serviceSelector)

	By("Creating test pods")
	config.createTestPod()
}
func getService(servicePorts []api.ServicePort) *api.Service {
	return &api.Service{
		ObjectMeta: api.ObjectMeta{
			Name: string(util.NewUUID()), Namespace: ns},
		Spec: api.ServiceSpec{
			Ports: servicePorts,
		},
	}
}
Example #7
0
func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) (string, error) {
	ds, err := reaper.Extensions().DaemonSets(namespace).Get(name)
	if err != nil {
		return "", err
	}

	// We set the nodeSelector to a random label. This label is nearly guaranteed
	// to not be set on any node so the DameonSetController will start deleting
	// daemon pods. Once it's done deleting the daemon pods, it's safe to delete
	// the DaemonSet.
	ds.Spec.Template.Spec.NodeSelector = map[string]string{
		string(util.NewUUID()): string(util.NewUUID()),
	}
	// force update to avoid version conflict
	ds.ResourceVersion = ""

	if ds, err = reaper.Extensions().DaemonSets(namespace).Update(ds); err != nil {
		return "", err
	}

	// Wait for the daemon set controller to kill all the daemon pods.
	if err := wait.Poll(reaper.pollInterval, reaper.timeout, func() (bool, error) {
		updatedDS, err := reaper.Extensions().DaemonSets(namespace).Get(name)
		if err != nil {
			return false, nil
		}
		return updatedDS.Status.CurrentNumberScheduled+updatedDS.Status.NumberMisscheduled == 0, nil
	}); err != nil {
		return "", err
	}

	if err := reaper.Extensions().DaemonSets(namespace).Delete(name); err != nil {
		return "", err
	}
	return fmt.Sprintf("%s stopped", name), nil
}
Example #8
0
func makePodSpec(readinessProbe, livenessProbe *api.Probe) *api.Pod {
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "test-webserver-" + string(util.NewUUID())},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:           "test-webserver",
					Image:          "gcr.io/google_containers/test-webserver",
					LivenessProbe:  livenessProbe,
					ReadinessProbe: readinessProbe,
				},
			},
		},
	}
	return pod
}
Example #9
0
func runResourceTrackingTest(framework *Framework, podsPerNode int, nodeNames sets.String, resourceMonitor *resourceMonitor) {
	numNodes := nodeNames.Len()
	totalPods := podsPerNode * numNodes
	By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
	rcName := fmt.Sprintf("resource%d-%s", totalPods, string(util.NewUUID()))

	// TODO: Use a more realistic workload
	Expect(RunRC(RCConfig{
		Client:    framework.Client,
		Name:      rcName,
		Namespace: framework.Namespace.Name,
		Image:     "gcr.io/google_containers/pause:go",
		Replicas:  totalPods,
	})).NotTo(HaveOccurred())

	// Log once and flush the stats.
	resourceMonitor.LogLatest()
	resourceMonitor.Reset()

	By("Start monitoring resource usage")
	// Periodically dump the cpu summary until the deadline is met.
	// Note that without calling resourceMonitor.Reset(), the stats
	// would occupy increasingly more memory. This should be fine
	// for the current test duration, but we should reclaim the
	// entries if we plan to monitor longer (e.g., 8 hours).
	deadline := time.Now().Add(monitoringTime)
	for time.Now().Before(deadline) {
		Logf("Still running...%v left", deadline.Sub(time.Now()))
		time.Sleep(reportingPeriod)
		timeLeft := deadline.Sub(time.Now())
		Logf("Still running...%v left", timeLeft)
		if timeLeft < reportingPeriod {
			time.Sleep(timeLeft)
		} else {
			time.Sleep(reportingPeriod)
		}
		logPodsOnNodes(framework.Client, nodeNames.List())
	}

	By("Reporting overall resource usage")
	logPodsOnNodes(framework.Client, nodeNames.List())
	resourceMonitor.LogCPUSummary()
	resourceMonitor.LogLatest()

	By("Deleting the RC")
	DeleteRC(framework.Client, framework.Namespace.Name, rcName)
}
Example #10
0
func NewWebserverTest(client *client.Client, namespace string, serviceName string) *WebserverTest {
	t := &WebserverTest{}
	t.Client = client
	t.Namespace = namespace
	t.ServiceName = serviceName
	t.TestId = t.ServiceName + "-" + string(util.NewUUID())
	t.Labels = map[string]string{
		"testid": t.TestId,
	}

	t.rcs = make(map[string]bool)
	t.services = make(map[string]bool)

	t.name = "webserver"
	t.image = "gcr.io/google_containers/test-webserver"

	return t
}
Example #11
0
// Return a prototypical entrypoint test pod
func entrypointTestPod() *api.Pod {
	podName := "client-containers-" + string(util.NewUUID())

	return &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: podName,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  testContainerName,
					Image: "gcr.io/google_containers/eptest:0.1",
				},
			},
			RestartPolicy: api.RestartPolicyNever,
		},
	}
}
Example #12
0
func createTerminatingPod(f *Framework) (*api.Pod, error) {
	uuid := util.NewUUID()
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: string(uuid),
		},
		Spec: api.PodSpec{
			NodeName: "nonexistant-node",
			Containers: []api.Container{
				{
					Name:  string(uuid),
					Image: "beta.gcr.io/google_containers/busybox",
				},
			},
		},
	}
	return f.Client.Pods(f.Namespace.Name).Create(pod)
}
Example #13
0
func createPD() (string, error) {
	if testContext.Provider == "gce" || testContext.Provider == "gke" {
		pdName := fmt.Sprintf("%s-%s", testContext.prefix, string(util.NewUUID()))

		zone := testContext.CloudConfig.Zone
		// TODO: make this hit the compute API directly instread of shelling out to gcloud.
		err := exec.Command("gcloud", "compute", "--quiet", "--project="+testContext.CloudConfig.ProjectID, "disks", "create", "--zone="+zone, "--size=10GB", pdName).Run()
		if err != nil {
			return "", err
		}
		return pdName, nil
	} else {
		volumes, ok := testContext.CloudConfig.Provider.(aws_cloud.Volumes)
		if !ok {
			return "", fmt.Errorf("Provider does not support volumes")
		}
		volumeOptions := &aws_cloud.VolumeOptions{}
		volumeOptions.CapacityMB = 10 * 1024
		return volumes.CreateVolume(volumeOptions)
	}
}
//	buildTestLoadBalancer build a common loadBalancerController to be used
//	in the tests to verify the generated HAProxy configuration file
func buildTestLoadBalancer(lbDefAlgorithm string) *loadBalancerController {
	endpointAddresses := []api.EndpointAddress{
		{IP: "1.2.3.4"},
		{IP: "5.6.7.8"},
	}
	ports := []int{80, 443}
	endpointPorts := []api.EndpointPort{
		{Port: ports[0], Protocol: "TCP"},
		{Port: ports[1], Protocol: "HTTP"},
	}
	servicePorts := []api.ServicePort{
		{Port: ports[0], TargetPort: util.NewIntOrStringFromInt(ports[0])},
		{Port: ports[1], TargetPort: util.NewIntOrStringFromInt(ports[1])},
	}

	svc1 := getService(servicePorts)
	svc1.ObjectMeta.Name = "svc-1"
	svc2 := getService(servicePorts)
	svc2.ObjectMeta.Name = "svc-2"
	endpoints := []*api.Endpoints{
		getEndpoints(svc1, endpointAddresses, endpointPorts),
		getEndpoints(svc2, endpointAddresses, endpointPorts),
	}
	flb := newFakeLoadBalancerController(endpoints, []*api.Service{svc1, svc2})
	cfg, _ := filepath.Abs("./test-samples/loadbalancer_test.json")
	// do not have the input parameters. We need to specify a default.
	if lbDefAlgorithm == "" {
		lbDefAlgorithm = "roundrobin"
	}

	flb.cfg = parseCfg(cfg, lbDefAlgorithm)
	cfgFile, _ := filepath.Abs("test-" + string(util.NewUUID()))
	flb.cfg.Config = cfgFile
	flb.tcpServices = map[string]int{
		svc1.Name: 20,
	}

	return flb
}
Example #15
0
func newReplicationController(replicas int) *api.ReplicationController {
	rc := &api.ReplicationController{
		TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.Version()},
		ObjectMeta: api.ObjectMeta{
			UID:             util.NewUUID(),
			Name:            "foobar",
			Namespace:       api.NamespaceDefault,
			ResourceVersion: "18",
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: replicas,
			Selector: map[string]string{"foo": "bar"},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{
						"name": "foo",
						"type": "production",
					},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Image: "foo/bar",
							TerminationMessagePath: api.TerminationMessagePathDefault,
							ImagePullPolicy:        api.PullIfNotPresent,
							SecurityContext:        securitycontext.ValidSecurityContextWithContainerDefaults(),
						},
					},
					RestartPolicy: api.RestartPolicyAlways,
					DNSPolicy:     api.DNSDefault,
					NodeSelector: map[string]string{
						"baz": "blah",
					},
				},
			},
		},
	}
	return rc
}
Example #16
0
// newIngress returns a new Ingress with the given path map.
func newIngress(hostRules map[string]fakeIngressRuleValueMap) *extensions.Ingress {
	return &extensions.Ingress{
		ObjectMeta: api.ObjectMeta{
			Name:      fmt.Sprintf("%v", util.NewUUID()),
			Namespace: api.NamespaceNone,
		},
		Spec: extensions.IngressSpec{
			Backend: &extensions.IngressBackend{
				ServiceName: defaultBackendName(testClusterName),
				ServicePort: testBackendPort,
			},
			Rules: toIngressRules(hostRules),
		},
		Status: extensions.IngressStatus{
			LoadBalancer: api.LoadBalancerStatus{
				Ingress: []api.LoadBalancerIngress{
					{IP: testIPManager.ip()},
				},
			},
		},
	}
}
Example #17
0
func (limitrangeStrategy) PrepareForCreate(obj runtime.Object) {
	limitRange := obj.(*api.LimitRange)
	if len(limitRange.Name) == 0 {
		limitRange.Name = string(util.NewUUID())
	}
}
Example #18
0
func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd string) *api.Pod {
	pod := &api.Pod{
		TypeMeta: unversioned.TypeMeta{
			Kind:       "Pod",
			APIVersion: latest.GroupOrDie("").Version,
		},
		ObjectMeta: api.ObjectMeta{
			Name:      "dns-test-" + string(util.NewUUID()),
			Namespace: namespace,
		},
		Spec: api.PodSpec{
			Volumes: []api.Volume{
				{
					Name: "results",
					VolumeSource: api.VolumeSource{
						EmptyDir: &api.EmptyDirVolumeSource{},
					},
				},
			},
			Containers: []api.Container{
				// TODO: Consider scraping logs instead of running a webserver.
				{
					Name:  "webserver",
					Image: "gcr.io/google_containers/test-webserver",
					Ports: []api.ContainerPort{
						{
							Name:          "http",
							ContainerPort: 80,
						},
					},
					VolumeMounts: []api.VolumeMount{
						{
							Name:      "results",
							MountPath: "/results",
						},
					},
				},
				{
					Name:    "querier",
					Image:   "gcr.io/google_containers/dnsutils",
					Command: []string{"sh", "-c", wheezyProbeCmd},
					VolumeMounts: []api.VolumeMount{
						{
							Name:      "results",
							MountPath: "/results",
						},
					},
				},
				{
					Name:    "jessie-querier",
					Image:   "gcr.io/google_containers/jessie-dnsutils",
					Command: []string{"sh", "-c", jessieProbeCmd},
					VolumeMounts: []api.VolumeMount{
						{
							Name:      "results",
							MountPath: "/results",
						},
					},
				},
			},
		},
	}
	return pod
}
Example #19
0
		c = framework.Client
		ns = framework.Namespace.Name
		var err error

		nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
		expectNoError(err)
		nodeCount = len(nodes.Items)
		Expect(nodeCount).NotTo(BeZero())

		// Terminating a namespace (deleting the remaining objects from it - which
		// generally means events) can affect the current run. Thus we wait for all
		// terminating namespace to be finally deleted before starting this test.
		err = checkTestingNSDeletedExcept(c, ns)
		expectNoError(err)

		uuid = string(util.NewUUID())

		expectNoError(resetMetrics(c))
		expectNoError(os.Mkdir(fmt.Sprintf(testContext.OutputDir+"/%s", uuid), 0777))
		expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "before"))

		Logf("Listing nodes for easy debugging:\n")
		for _, node := range nodes.Items {
			for _, address := range node.Status.Addresses {
				if address.Type == api.NodeInternalIP {
					Logf("Name: %v IP: %v", node.ObjectMeta.Name, address.Address)
				}
			}
		}
	})
Example #20
0
func testPDPod(diskNames []string, targetHost string, readOnly bool, numContainers int) *api.Pod {
	containers := make([]api.Container, numContainers)
	for i := range containers {
		containers[i].Name = "mycontainer"
		if numContainers > 1 {
			containers[i].Name = fmt.Sprintf("mycontainer%v", i+1)
		}

		containers[i].Image = "gcr.io/google_containers/busybox"

		containers[i].Command = []string{"sleep", "6000"}

		containers[i].VolumeMounts = make([]api.VolumeMount, len(diskNames))
		for k := range diskNames {
			containers[i].VolumeMounts[k].Name = fmt.Sprintf("testpd%v", k+1)
			containers[i].VolumeMounts[k].MountPath = fmt.Sprintf("/testpd%v", k+1)
		}

		containers[i].Resources.Limits = api.ResourceList{}
		containers[i].Resources.Limits[api.ResourceCPU] = *resource.NewQuantity(int64(0), resource.DecimalSI)

	}

	pod := &api.Pod{
		TypeMeta: unversioned.TypeMeta{
			Kind:       "Pod",
			APIVersion: latest.GroupOrDie("").Version,
		},
		ObjectMeta: api.ObjectMeta{
			Name: "pd-test-" + string(util.NewUUID()),
		},
		Spec: api.PodSpec{
			Containers: containers,
			NodeName:   targetHost,
		},
	}

	if testContext.Provider == "gce" || testContext.Provider == "gke" {
		pod.Spec.Volumes = make([]api.Volume, len(diskNames))
		for k, diskName := range diskNames {
			pod.Spec.Volumes[k].Name = fmt.Sprintf("testpd%v", k+1)
			pod.Spec.Volumes[k].VolumeSource = api.VolumeSource{
				GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
					PDName:   diskName,
					FSType:   "ext4",
					ReadOnly: readOnly,
				},
			}
		}
	} else if testContext.Provider == "aws" {
		pod.Spec.Volumes = make([]api.Volume, len(diskNames))
		for k, diskName := range diskNames {
			pod.Spec.Volumes[k].Name = fmt.Sprintf("testpd%v", k+1)
			pod.Spec.Volumes[k].VolumeSource = api.VolumeSource{
				AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
					VolumeID: diskName,
					FSType:   "ext4",
					ReadOnly: readOnly,
				},
			}
		}
	} else {
		panic("Unknown provider: " + testContext.Provider)
	}

	return pod
}
Example #21
0
package e2e

import (
	"fmt"

	"vulcan/kubernetes/pkg/api"
	"vulcan/kubernetes/pkg/util"

	. "github.com/onsi/ginkgo"
)

var _ = Describe("Downward API volume", func() {
	f := NewFramework("downward-api")

	It("should provide labels and annotations files [Conformance]", func() {
		podName := "metadata-volume-" + string(util.NewUUID())
		pod := &api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name:        podName,
				Labels:      map[string]string{"cluster": "rack10"},
				Annotations: map[string]string{"builder": "john-doe"},
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{
						Name:    "client-container",
						Image:   "gcr.io/google_containers/busybox",
						Command: []string{"sh", "-c", "cat /etc/labels /etc/annotations /etc/podname"},
						VolumeMounts: []api.VolumeMount{
							{
								Name:      "podinfo",
Example #22
0
package e2e

import (
	"fmt"

	"vulcan/kubernetes/pkg/api"
	"vulcan/kubernetes/pkg/util"

	. "github.com/onsi/ginkgo"
)

var _ = Describe("Secrets", func() {
	f := NewFramework("secrets")

	It("should be consumable from pods [Conformance]", func() {
		name := "secret-test-" + string(util.NewUUID())
		volumeName := "secret-volume"
		volumeMountPath := "/etc/secret-volume"

		secret := &api.Secret{
			ObjectMeta: api.ObjectMeta{
				Namespace: f.Namespace.Name,
				Name:      name,
			},
			Data: map[string][]byte{
				"data-1": []byte("value-1\n"),
				"data-2": []byte("value-2\n"),
				"data-3": []byte("value-3\n"),
			},
		}
Example #23
0
	"vulcan/kubernetes/pkg/util"
	"vulcan/kubernetes/pkg/util/wait"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = Describe("Events", func() {
	framework := NewFramework("events")

	It("should be sent by kubelets and the scheduler about pods scheduling and running [Conformance]", func() {

		podClient := framework.Client.Pods(framework.Namespace.Name)

		By("creating the pod")
		name := "send-events-" + string(util.NewUUID())
		value := strconv.Itoa(time.Now().Nanosecond())
		pod := &api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name: name,
				Labels: map[string]string{
					"name": "foo",
					"time": value,
				},
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{
						Name:  "p",
						Image: "gcr.io/google_containers/serve_hostname:1.1",
						Ports: []api.ContainerPort{{ContainerPort: 80}},
Example #24
0
	Describe("Clean up pods on node", func() {
		type DeleteTest struct {
			podsPerNode int
			timeout     time.Duration
		}
		deleteTests := []DeleteTest{
			{podsPerNode: 10, timeout: 1 * time.Minute},
		}
		for _, itArg := range deleteTests {
			name := fmt.Sprintf(
				"kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout)
			It(name, func() {
				totalPods := itArg.podsPerNode * numNodes
				By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
				rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(util.NewUUID()))

				Expect(RunRC(RCConfig{
					Client:    framework.Client,
					Name:      rcName,
					Namespace: framework.Namespace.Name,
					Image:     "gcr.io/google_containers/pause:go",
					Replicas:  totalPods,
				})).NotTo(HaveOccurred())
				// Perform a sanity check so that we know all desired pods are
				// running on the nodes according to kubelet. The timeout is set to
				// only 30 seconds here because RunRC already waited for all pods to
				// transition to the running status.
				Expect(waitTillNPodsRunningOnNodes(framework.Client, nodeNames, rcName, framework.Namespace.Name, totalPods,
					time.Second*30)).NotTo(HaveOccurred())
				resourceMonitor.LogLatest()
Example #25
0
	BeforeEach(func() {
		var err error
		c, err = loadClient()
		expectNoError(err)
		nodeList, err = c.Nodes().List(labels.Everything(), fields.Everything())
		expectNoError(err)
		nodeCount = len(nodeList.Items)
		Expect(nodeCount).NotTo(BeZero())

		err = checkTestingNSDeletedExcept(c, "")
		expectNoError(err)

		nsForTesting, err := createTestingNS("sched-pred", c)
		ns = nsForTesting.Name
		expectNoError(err)
		uuid = string(util.NewUUID())
	})

	AfterEach(func() {
		rc, err := c.ReplicationControllers(ns).Get(RCName)
		if err == nil && rc.Spec.Replicas != 0 {
			By("Cleaning up the replication controller")
			err := DeleteRC(c, ns, RCName)
			expectNoError(err)
		}

		By(fmt.Sprintf("Destroying namespace for this suite %v", ns))
		if err := deleteNS(c, ns, 10*time.Minute /* namespace deletion timeout */); err != nil {
			Failf("Couldn't delete ns %s", err)
		}
	})
Example #26
0
package e2e

import (
	"fmt"

	"vulcan/kubernetes/pkg/api"
	"vulcan/kubernetes/pkg/util"

	. "github.com/onsi/ginkgo"
)

var _ = Describe("Downward API", func() {
	framework := NewFramework("downward-api")

	It("should provide pod name and namespace as env vars [Conformance]", func() {
		podName := "downward-api-" + string(util.NewUUID())
		pod := &api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name:   podName,
				Labels: map[string]string{"name": podName},
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{
						Name:    "dapi-container",
						Image:   "gcr.io/google_containers/busybox",
						Command: []string{"sh", "-c", "env"},
						Env: []api.EnvVar{
							{
								Name: "POD_NAME",
								ValueFrom: &api.EnvVarSource{
Example #27
0
// A basic test to check the deployment of an image using
// a replication controller. The image serves its hostname
// which is checked for each replica.
func ServeImageOrFail(f *Framework, test string, image string) {
	name := "my-hostname-" + test + "-" + string(util.NewUUID())
	replicas := 2

	// Create a replication controller for a service
	// that serves its hostname.
	// The source for the Docker containter kubernetes/serve_hostname is
	// in contrib/for-demos/serve_hostname
	By(fmt.Sprintf("Creating replication controller %s", name))
	controller, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: replicas,
			Selector: map[string]string{
				"name": name,
			},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{"name": name},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  name,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())
	// Cleanup the replication controller when we are done.
	defer func() {
		// Resize the replication controller to zero to get rid of pods.
		if err := DeleteRC(f.Client, f.Namespace.Name, controller.Name); err != nil {
			Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
		}
	}()

	// List the pods, making sure we observe all the replicas.
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))

	pods, err := podsCreated(f.Client, f.Namespace.Name, name, replicas)

	By("Ensuring each pod is running")

	// Wait for the pods to enter the running state. Waiting loops until the pods
	// are running so non-running pods cause a timeout for this test.
	for _, pod := range pods.Items {
		if pod.DeletionTimestamp != nil {
			continue
		}
		err = f.WaitForPodRunning(pod.Name)
		Expect(err).NotTo(HaveOccurred())
	}

	// Verify that something is listening.
	By("Trying to dial each unique pod")
	retryTimeout := 2 * time.Minute
	retryInterval := 5 * time.Second
	err = wait.Poll(retryInterval, retryTimeout, podResponseChecker{f.Client, f.Namespace.Name, label, name, true, pods}.checkAllResponses)
	if err != nil {
		Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
	}
}
Example #28
0
*/

package e2e

import (
	"vulcan/kubernetes/pkg/api"
	"vulcan/kubernetes/pkg/util"

	. "github.com/onsi/ginkgo"
)

var _ = Describe("Variable Expansion", func() {
	framework := NewFramework("var-expansion")

	It("should allow composing env vars into new env vars [Conformance]", func() {
		podName := "var-expansion-" + string(util.NewUUID())
		pod := &api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name:   podName,
				Labels: map[string]string{"name": podName},
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{
						Name:    "dapi-container",
						Image:   "gcr.io/google_containers/busybox",
						Command: []string{"sh", "-c", "env"},
						Env: []api.EnvVar{
							{
								Name:  "FOO",
								Value: "foo-value",
Example #29
0
	expectNoError(err)
	failedContainers := 0
	containerRestartNodes := sets.NewString()
	for _, p := range pods.Items {
		for _, v := range FailedContainers(&p) {
			failedContainers = failedContainers + v.restarts
			containerRestartNodes.Insert(p.Spec.NodeName)
		}
	}
	return failedContainers, containerRestartNodes.List()
}

var _ = Describe("DaemonRestart", func() {

	framework := Framework{BaseName: "daemonrestart"}
	rcName := "daemonrestart" + strconv.Itoa(numPods) + "-" + string(util.NewUUID())
	labelSelector := labels.Set(map[string]string{"name": rcName}).AsSelector()
	existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc)
	var ns string
	var config RCConfig
	var controller *controllerFramework.Controller
	var newPods cache.Store
	var stopCh chan struct{}

	BeforeEach(func() {

		// These tests require SSH
		// TODO(11834): Enable this test in GKE once experimental API there is switched on
		SkipUnlessProviderIs("gce")
		framework.beforeEach()
		ns = framework.Namespace.Name
Example #30
0
// FillObjectMetaSystemFields populates fields that are managed by the system on ObjectMeta.
func FillObjectMetaSystemFields(ctx Context, meta *ObjectMeta) {
	meta.CreationTimestamp = util.Now()
	meta.UID = util.NewUUID()
	meta.SelfLink = ""
}