Beispiel #1
0
func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
	ds, err := reaper.Extensions().DaemonSets(namespace).Get(name)
	if err != nil {
		return err
	}

	// We set the nodeSelector to a random label. This label is nearly guaranteed
	// to not be set on any node so the DameonSetController will start deleting
	// daemon pods. Once it's done deleting the daemon pods, it's safe to delete
	// the DaemonSet.
	ds.Spec.Template.Spec.NodeSelector = map[string]string{
		string(util.NewUUID()): string(util.NewUUID()),
	}
	// force update to avoid version conflict
	ds.ResourceVersion = ""

	if ds, err = reaper.Extensions().DaemonSets(namespace).Update(ds); err != nil {
		return err
	}

	// Wait for the daemon set controller to kill all the daemon pods.
	if err := wait.Poll(reaper.pollInterval, reaper.timeout, func() (bool, error) {
		updatedDS, err := reaper.Extensions().DaemonSets(namespace).Get(name)
		if err != nil {
			return false, nil
		}
		return updatedDS.Status.CurrentNumberScheduled+updatedDS.Status.NumberMisscheduled == 0, nil
	}); err != nil {
		return err
	}

	return reaper.Extensions().DaemonSets(namespace).Delete(name)
}
Beispiel #2
0
// Create a LimitRange object
func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) {
	limitRange, ok := obj.(*api.LimitRange)
	if !ok {
		return nil, fmt.Errorf("invalid object type")
	}

	if !api.ValidNamespace(ctx, &limitRange.ObjectMeta) {
		return nil, errors.NewConflict("limitRange", limitRange.Namespace, fmt.Errorf("LimitRange.Namespace does not match the provided context"))
	}

	if len(limitRange.Name) == 0 {
		limitRange.Name = string(util.NewUUID())
	}

	if errs := validation.ValidateLimitRange(limitRange); len(errs) > 0 {
		return nil, errors.NewInvalid("limitRange", limitRange.Name, errs)
	}
	api.FillObjectMetaSystemFields(ctx, &limitRange.ObjectMeta)

	err := rs.registry.CreateWithName(ctx, limitRange.Name, limitRange)
	if err != nil {
		return nil, err
	}
	return rs.registry.Get(ctx, limitRange.Name)
}
Beispiel #3
0
// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume.
// This Provisioner is meant for development and testing only and WILL NOT WORK in a multi-node cluster.
func (r *hostPathProvisioner) Provision() (*api.PersistentVolume, error) {
	fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", util.NewUUID())

	pv := &api.PersistentVolume{
		ObjectMeta: api.ObjectMeta{
			Name: r.options.PVName,
			Annotations: map[string]string{
				"kubernetes.io/createdby": "hostpath-dynamic-provisioner",
			},
		},
		Spec: api.PersistentVolumeSpec{
			PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy,
			AccessModes:                   r.options.AccessModes,
			Capacity: api.ResourceList{
				api.ResourceName(api.ResourceStorage): r.options.Capacity,
			},
			PersistentVolumeSource: api.PersistentVolumeSource{
				HostPath: &api.HostPathVolumeSource{
					Path: fullpath,
				},
			},
		},
	}

	return pv, os.MkdirAll(pv.Spec.HostPath.Path, 0750)
}
func scTestPod(hostIPC bool, hostPID bool) *api.Pod {
	podName := "security-context-" + string(util.NewUUID())
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:        podName,
			Labels:      map[string]string{"name": podName},
			Annotations: map[string]string{},
		},
		Spec: api.PodSpec{
			SecurityContext: &api.PodSecurityContext{
				HostIPC: hostIPC,
				HostPID: hostPID,
			},
			Containers: []api.Container{
				{
					Name:  "test-container",
					Image: "gcr.io/google_containers/busybox:1.24",
				},
			},
			RestartPolicy: api.RestartPolicyNever,
		},
	}

	return pod
}
Beispiel #5
0
func (config *KubeProxyTestConfig) setup() {
	By("creating a selector")
	selectorName := "selector-" + string(util.NewUUID())
	serviceSelector := map[string]string{
		selectorName: "true",
	}

	By("Getting two nodes")
	nodeList, err := config.f.Client.Nodes().List(labels.Everything(), fields.Everything(), unversioned.ListOptions{})
	Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get node list: %v", err))
	config.externalAddrs = NodeAddresses(nodeList, api.NodeExternalIP)
	if len(config.externalAddrs) < 2 {
		// fall back to legacy IPs
		config.externalAddrs = NodeAddresses(nodeList, api.NodeLegacyHostIP)
	}
	Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
	config.nodes = nodeList.Items

	if enableLoadBalancerTest {
		By("Creating the LoadBalancer Service on top of the pods in kubernetes")
		config.createLoadBalancerService(serviceSelector)
	}

	By("Creating the service pods in kubernetes")
	podName := "netserver"
	config.endpointPods = config.createNetProxyPods(podName, serviceSelector)

	By("Creating the service on top of the pods in kubernetes")
	config.createNodePortService(serviceSelector)

	By("Creating test pods")
	config.createTestPods()
}
Beispiel #6
0
func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (volumeID string, volumeSizeGB int, err error) {
	cloud, err := getCloudProvider()
	if err != nil {
		return "", 0, err
	}

	name := fmt.Sprintf("kube-dynamic-%s", util.NewUUID())
	requestBytes := c.options.Capacity.Value()
	// GCE works with gigabytes, convert to GiB with rounding up
	requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024)

	// The disk will be created in the zone in which this code is currently running
	// TODO: We should support auto-provisioning volumes in multiple/specified zones
	zone, err := cloud.GetZone()
	if err != nil {
		glog.V(2).Infof("error getting zone information from GCE: %v", err)
		return "", 0, err
	}

	err = cloud.CreateDisk(name, zone.FailureDomain, int64(requestGB), *c.options.CloudTags)
	if err != nil {
		glog.V(2).Infof("Error creating GCE PD volume: %v", err)
		return "", 0, err
	}
	glog.V(2).Infof("Successfully created GCE PD volume %s", name)
	return name, int(requestGB), nil
}
// generateTestingImageList generate randomly generated image list and corresponding expectedImageList.
func generateTestingImageList(count int) ([]kubecontainer.Image, []api.ContainerImage) {
	// imageList is randomly generated image list
	var imageList []kubecontainer.Image
	for ; count > 0; count-- {
		imageItem := kubecontainer.Image{
			ID:       string(util.NewUUID()),
			RepoTags: generateImageTags(),
			Size:     rand.Int63nRange(minImgSize, maxImgSize+1),
		}
		imageList = append(imageList, imageItem)
	}

	// expectedImageList is generated by imageList according to size and maxImagesInNodeStatus
	// 1. sort the imageList by size
	sort.Sort(byImageSize(imageList))
	// 2. convert sorted imageList to api.ContainerImage list
	var expectedImageList []api.ContainerImage
	for _, kubeImage := range imageList {
		apiImage := api.ContainerImage{
			Names:     kubeImage.RepoTags,
			SizeBytes: kubeImage.Size,
		}

		expectedImageList = append(expectedImageList, apiImage)
	}
	// 3. only returns the top maxImagesInNodeStatus images in expectedImageList
	return imageList, expectedImageList[0:maxImagesInNodeStatus]
}
func TestDeleter(t *testing.T) {
	tempPath := fmt.Sprintf("/tmp/hostpath/%s", util.NewUUID())
	defer os.RemoveAll(tempPath)
	err := os.MkdirAll(tempPath, 0750)
	if err != nil {
		t.Fatalf("Failed to create tmp directory for deleter: %v", err)
	}

	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))

	spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}}
	plug, err := plugMgr.FindDeletablePluginBySpec(spec)
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}
	deleter, err := plug.NewDeleter(spec)
	if err != nil {
		t.Errorf("Failed to make a new Deleter: %v", err)
	}
	if deleter.GetPath() != tempPath {
		t.Errorf("Expected %s but got %s", tempPath, deleter.GetPath())
	}
	if err := deleter.Delete(); err != nil {
		t.Errorf("Mock Recycler expected to return nil but got %s", err)
	}
	if exists, _ := util.FileExists("foo"); exists {
		t.Errorf("Temp path expected to be deleted, but was found at %s", tempPath)
	}
}
func TestOverlappingRSs(t *testing.T) {
	client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	labelMap := map[string]string{"foo": "bar"}

	for i := 0; i < 5; i++ {
		manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 10)
		manager.podStoreSynced = alwaysReady

		// Create 10 ReplicaSets, shuffled them randomly and insert them into the ReplicaSet controller's store
		var controllers []*extensions.ReplicaSet
		for j := 1; j < 10; j++ {
			rsSpec := newReplicaSet(1, labelMap)
			rsSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local)
			rsSpec.Name = string(util.NewUUID())
			controllers = append(controllers, rsSpec)
		}
		shuffledControllers := shuffle(controllers)
		for j := range shuffledControllers {
			manager.rsStore.Store.Add(shuffledControllers[j])
		}
		// Add a pod and make sure only the oldest ReplicaSet is synced
		pods := newPodList(nil, 1, api.PodPending, labelMap, controllers[0])
		rsKey := getKey(controllers[0], t)

		manager.addPod(&pods.Items[0])
		queueRS, _ := manager.queue.Get()
		if queueRS != rsKey {
			t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS)
		}
	}
}
Beispiel #10
0
func makeContainerLogMount(opts *kubecontainer.RunContainerOptions, container *api.Container) (*kubecontainer.Mount, error) {
	if opts.PodContainerDir == "" || container.TerminationMessagePath == "" {
		return nil, nil
	}

	// In docker runtime, the container log path contains the container ID.
	// However, for rkt runtime, we cannot get the container ID before the
	// the container is launched, so here we generate a random uuid to enable
	// us to map a container's termination message path to an unique log file
	// on the disk.
	randomUID := util.NewUUID()
	containerLogPath := path.Join(opts.PodContainerDir, string(randomUID))
	fs, err := os.Create(containerLogPath)
	if err != nil {
		return nil, err
	}

	if err := fs.Close(); err != nil {
		return nil, err
	}

	mnt := &kubecontainer.Mount{
		// Use a random name for the termination message mount, so that
		// when a container restarts, it will not overwrite the old termination
		// message.
		Name:          fmt.Sprintf("termination-message-%s", randomUID),
		ContainerPath: container.TerminationMessagePath,
		HostPath:      containerLogPath,
		ReadOnly:      false,
	}
	opts.Mounts = append(opts.Mounts, *mnt)

	return mnt, nil
}
Beispiel #11
0
func (cc *ConformanceContainer) Create() error {
	cc.podName = cc.Container.Name + string(util.NewUUID())
	imagePullSecrets := []api.LocalObjectReference{}
	for _, s := range cc.ImagePullSecrets {
		imagePullSecrets = append(imagePullSecrets, api.LocalObjectReference{Name: s})
	}
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:      cc.podName,
			Namespace: cc.Namespace,
		},
		Spec: api.PodSpec{
			NodeName:      cc.NodeName,
			RestartPolicy: cc.RestartPolicy,
			Containers: []api.Container{
				cc.Container,
			},
			Volumes:          cc.Volumes,
			ImagePullSecrets: imagePullSecrets,
		},
	}

	_, err := cc.Client.Pods(cc.Namespace).Create(pod)
	return err
}
Beispiel #12
0
func (fc *FakeProvisioner) Provision() (*api.PersistentVolume, error) {
	fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", util.NewUUID())

	pv := &api.PersistentVolume{
		ObjectMeta: api.ObjectMeta{
			Name: fc.Options.PVName,
			Annotations: map[string]string{
				"kubernetes.io/createdby": "fakeplugin-provisioner",
			},
		},
		Spec: api.PersistentVolumeSpec{
			PersistentVolumeReclaimPolicy: fc.Options.PersistentVolumeReclaimPolicy,
			AccessModes:                   fc.Options.AccessModes,
			Capacity: api.ResourceList{
				api.ResourceName(api.ResourceStorage): fc.Options.Capacity,
			},
			PersistentVolumeSource: api.PersistentVolumeSource{
				HostPath: &api.HostPathVolumeSource{
					Path: fullpath,
				},
			},
		},
	}

	return pv, nil
}
Beispiel #13
0
func createPD() (string, error) {
	if testContext.Provider == "gce" || testContext.Provider == "gke" {
		pdName := fmt.Sprintf("%s-%s", testContext.prefix, string(util.NewUUID()))

		gceCloud, err := getGCECloud()
		if err != nil {
			return "", err
		}

		tags := map[string]string{}
		err = gceCloud.CreateDisk(pdName, testContext.CloudConfig.Zone, 10 /* sizeGb */, tags)
		if err != nil {
			return "", err
		}
		return pdName, nil
	} else {
		volumes, ok := testContext.CloudConfig.Provider.(awscloud.Volumes)
		if !ok {
			return "", fmt.Errorf("Provider does not support volumes")
		}
		volumeOptions := &awscloud.VolumeOptions{}
		volumeOptions.CapacityGB = 10
		return volumes.CreateDisk(volumeOptions)
	}
}
Beispiel #14
0
//TODO: To merge this with the emptyDir tests, we can make source a lambda.
func testPodWithHostVol(path string, source *api.HostPathVolumeSource) *api.Pod {
	podName := "pod-" + string(util.NewUUID())

	return &api.Pod{
		TypeMeta: api.TypeMeta{
			Kind:       "Pod",
			APIVersion: latest.Version,
		},
		ObjectMeta: api.ObjectMeta{
			Name: podName,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  containerName,
					Image: "gcr.io/google_containers/mounttest:0.2",
					VolumeMounts: []api.VolumeMount{
						{
							Name:      volumeName,
							MountPath: path,
						},
					},
				},
			},
			RestartPolicy: api.RestartPolicyNever,
			Volumes:       mount(source),
		},
	}
}
Beispiel #15
0
func (config *KubeProxyTestConfig) setup() {
	By("creating a selector")
	selectorName := "selector-" + string(util.NewUUID())
	serviceSelector := map[string]string{
		selectorName: "true",
	}

	By("Getting ssh-able hosts")
	hosts, err := NodeSSHHosts(config.f.Client)
	Expect(err).NotTo(HaveOccurred())
	if len(hosts) == 0 {
		Failf("No ssh-able nodes")
	}
	config.nodes = hosts

	if enableLoadBalancerTest {
		By("Creating the LoadBalancer Service on top of the pods in kubernetes")
		config.createLoadBalancerService(serviceSelector)
	}

	By("Creating the service pods in kubernetes")
	podName := "netserver"
	config.endpointPods = config.createNetProxyPods(podName, serviceSelector, testContext.CloudConfig.NumNodes)

	By("Creating the service on top of the pods in kubernetes")
	config.createNodePortService(serviceSelector)

	By("Creating test pods")
	config.createTestPod()
}
Beispiel #16
0
func (config *KubeProxyTestConfig) setup() {
	By("creating a selector")
	selectorName := "selector-" + string(util.NewUUID())
	serviceSelector := map[string]string{
		selectorName: "true",
	}

	By("Getting node addresses")
	nodeList := ListSchedulableNodesOrDie(config.f.Client)
	config.externalAddrs = NodeAddresses(nodeList, api.NodeExternalIP)
	if len(config.externalAddrs) < 2 {
		// fall back to legacy IPs
		config.externalAddrs = NodeAddresses(nodeList, api.NodeLegacyHostIP)
	}
	Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
	config.nodes = nodeList.Items

	if enableLoadBalancerTest {
		By("Creating the LoadBalancer Service on top of the pods in kubernetes")
		config.createLoadBalancerService(serviceSelector)
	}

	By("Creating the service pods in kubernetes")
	podName := "netserver"
	config.endpointPods = config.createNetProxyPods(podName, serviceSelector)

	By("Creating the service on top of the pods in kubernetes")
	config.createNodePortService(serviceSelector)

	By("Creating test pods")
	config.createTestPods()
}
func newDeployment(replicas int) *exp.Deployment {
	d := exp.Deployment{
		TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()},
		ObjectMeta: api.ObjectMeta{
			UID:             util.NewUUID(),
			Name:            "foobar",
			Namespace:       api.NamespaceDefault,
			ResourceVersion: "18",
		},
		Spec: exp.DeploymentSpec{
			Strategy: exp.DeploymentStrategy{
				Type:          exp.RollingUpdateDeploymentStrategyType,
				RollingUpdate: &exp.RollingUpdateDeployment{},
			},
			Replicas: replicas,
			Selector: map[string]string{"foo": "bar"},
			Template: api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{
						"name": "foo",
						"type": "production",
					},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Image: "foo/bar",
						},
					},
				},
			},
		},
	}
	return &d
}
Beispiel #18
0
func testPodWithVolume(image, path string, source *api.EmptyDirVolumeSource) *api.Pod {
	podName := "pod-" + string(util.NewUUID())
	return &api.Pod{
		TypeMeta: unversioned.TypeMeta{
			Kind:       "Pod",
			APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
		},
		ObjectMeta: api.ObjectMeta{
			Name: podName,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  containerName,
					Image: image,
					VolumeMounts: []api.VolumeMount{
						{
							Name:      volumeName,
							MountPath: path,
						},
					},
				},
			},
			RestartPolicy: api.RestartPolicyNever,
			Volumes: []api.Volume{
				{
					Name: volumeName,
					VolumeSource: api.VolumeSource{
						EmptyDir: source,
					},
				},
			},
		},
	}
}
Beispiel #19
0
// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume.
// This Creater is meant for development and testing only and WILL NOT WORK in a multi-node cluster.
func (r *hostPathCreater) Create() (*api.PersistentVolume, error) {
	fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", util.NewUUID())
	err := os.MkdirAll(fullpath, 0750)
	if err != nil {
		return nil, err
	}

	return &api.PersistentVolume{
		ObjectMeta: api.ObjectMeta{
			GenerateName: "pv-hostpath-",
			Labels: map[string]string{
				"createdby": "hostpath dynamic provisioner",
			},
		},
		Spec: api.PersistentVolumeSpec{
			PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy,
			AccessModes:                   r.options.AccessModes,
			Capacity: api.ResourceList{
				api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dMi", r.options.CapacityMB)),
			},
			PersistentVolumeSource: api.PersistentVolumeSource{
				HostPath: &api.HostPathVolumeSource{
					Path: fullpath,
				},
			},
		},
	}, nil
}
func TestOverlappingRCs(t *testing.T) {
	c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})

	for i := 0; i < 5; i++ {
		manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
		manager.podStoreSynced = alwaysReady

		// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
		var controllers []*api.ReplicationController
		for j := 1; j < 10; j++ {
			controllerSpec := newReplicationController(1)
			controllerSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local)
			controllerSpec.Name = string(util.NewUUID())
			controllers = append(controllers, controllerSpec)
		}
		shuffledControllers := shuffle(controllers)
		for j := range shuffledControllers {
			manager.rcStore.Store.Add(shuffledControllers[j])
		}
		// Add a pod and make sure only the oldest rc is synced
		pods := newPodList(nil, 1, api.PodPending, controllers[0], "pod")
		rcKey := getKey(controllers[0], t)

		manager.addPod(&pods.Items[0])
		queueRC, _ := manager.queue.Get()
		if queueRC != rcKey {
			t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
		}
	}
}
func (c *mockControllerClient) CreatePersistentVolume(pv *api.PersistentVolume) (*api.PersistentVolume, error) {
	if pv.GenerateName != "" && pv.Name == "" {
		pv.Name = fmt.Sprintf(pv.GenerateName, util.NewUUID())
	}
	c.volume = pv
	return c.volume, nil
}
Beispiel #22
0
func (config *KubeProxyTestConfig) setup() {
	By("creating a selector")
	selectorName := "selector-" + string(util.NewUUID())
	serviceSelector := map[string]string{
		selectorName: "true",
	}

	By("Getting ssh-able hosts")
	hosts, err := NodeSSHHosts(config.f.Client)
	Expect(err).NotTo(HaveOccurred())
	config.nodes = make([]string, 0, len(hosts))
	for _, h := range hosts {
		config.nodes = append(config.nodes, strings.TrimSuffix(h, ":22"))
	}

	if enableLoadBalancerTest {
		By("Creating the LoadBalancer Service on top of the pods in kubernetes")
		config.createLoadBalancerService(serviceSelector)
	}

	By("Creating the service pods in kubernetes")
	podName := "netserver"
	config.endpointPods = config.createNetProxyPods(podName, serviceSelector, testContext.CloudConfig.NumNodes)

	By("Creating the service on top of the pods in kubernetes")
	config.createNodePortService(serviceSelector)

	By("Creating test pods")
	config.createTestPod()
}
func getService(servicePorts []api.ServicePort) *api.Service {
	return &api.Service{
		ObjectMeta: api.ObjectMeta{
			Name: string(util.NewUUID()), Namespace: ns},
		Spec: api.ServiceSpec{
			Ports: servicePorts,
		},
	}
}
func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames sets.String, rm *framework.ResourceMonitor,
	expectedCPU map[string]map[float64]float64, expectedMemory framework.ResourceUsagePerContainer) {
	numNodes := nodeNames.Len()
	totalPods := podsPerNode * numNodes
	By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
	rcName := fmt.Sprintf("resource%d-%s", totalPods, string(util.NewUUID()))

	// TODO: Use a more realistic workload
	Expect(framework.RunRC(framework.RCConfig{
		Client:    f.Client,
		Name:      rcName,
		Namespace: f.Namespace.Name,
		Image:     framework.GetPauseImageName(f.Client),
		Replicas:  totalPods,
	})).NotTo(HaveOccurred())

	// Log once and flush the stats.
	rm.LogLatest()
	rm.Reset()

	By("Start monitoring resource usage")
	// Periodically dump the cpu summary until the deadline is met.
	// Note that without calling framework.ResourceMonitor.Reset(), the stats
	// would occupy increasingly more memory. This should be fine
	// for the current test duration, but we should reclaim the
	// entries if we plan to monitor longer (e.g., 8 hours).
	deadline := time.Now().Add(monitoringTime)
	for time.Now().Before(deadline) {
		timeLeft := deadline.Sub(time.Now())
		framework.Logf("Still running...%v left", timeLeft)
		if timeLeft < reportingPeriod {
			time.Sleep(timeLeft)
		} else {
			time.Sleep(reportingPeriod)
		}
		logPodsOnNodes(f.Client, nodeNames.List())
	}

	By("Reporting overall resource usage")
	logPodsOnNodes(f.Client, nodeNames.List())
	usageSummary, err := rm.GetLatest()
	Expect(err).NotTo(HaveOccurred())
	// TODO(random-liu): Remove the original log when we migrate to new perfdash
	framework.Logf("%s", rm.FormatResourceUsage(usageSummary))
	// Log perf result
	framework.PrintPerfData(framework.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
	verifyMemoryLimits(f.Client, expectedMemory, usageSummary)

	cpuSummary := rm.GetCPUSummary()
	framework.Logf("%s", rm.FormatCPUSummary(cpuSummary))
	// Log perf result
	framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
	verifyCPULimits(expectedCPU, cpuSummary)

	By("Deleting the RC")
	framework.DeleteRC(f.Client, f.Namespace.Name, rcName)
}
Beispiel #25
0
func runResourceTrackingTest(framework *Framework, podsPerNode int, nodeNames sets.String, rm *resourceMonitor, expected map[string]map[float64]float64) {
	numNodes := nodeNames.Len()
	totalPods := podsPerNode * numNodes
	By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
	rcName := fmt.Sprintf("resource%d-%s", totalPods, string(util.NewUUID()))

	// TODO: Use a more realistic workload
	Expect(RunRC(RCConfig{
		Client:    framework.Client,
		Name:      rcName,
		Namespace: framework.Namespace.Name,
		Image:     "gcr.io/google_containers/pause:2.0",
		Replicas:  totalPods,
	})).NotTo(HaveOccurred())

	// Log once and flush the stats.
	rm.LogLatest()
	rm.Reset()

	By("Start monitoring resource usage")
	// Periodically dump the cpu summary until the deadline is met.
	// Note that without calling resourceMonitor.Reset(), the stats
	// would occupy increasingly more memory. This should be fine
	// for the current test duration, but we should reclaim the
	// entries if we plan to monitor longer (e.g., 8 hours).
	deadline := time.Now().Add(monitoringTime)
	for time.Now().Before(deadline) {
		timeLeft := deadline.Sub(time.Now())
		Logf("Still running...%v left", timeLeft)
		if timeLeft < reportingPeriod {
			time.Sleep(timeLeft)
		} else {
			time.Sleep(reportingPeriod)
		}
		logPodsOnNodes(framework.Client, nodeNames.List())
	}

	By("Reporting overall resource usage")
	logPodsOnNodes(framework.Client, nodeNames.List())
	rm.LogLatest()
	usageSummary, err := rm.GetLatest()
	Expect(err).NotTo(HaveOccurred())
	Logf("%s", rm.FormatResourceUsage(usageSummary))
	// TODO(yujuhong): Set realistic values after gathering enough data.
	verifyMemoryLimits(resourceUsagePerContainer{
		"/kubelet":       &containerResourceUsage{MemoryRSSInBytes: 500 * 1024 * 1024},
		"/docker-daemon": &containerResourceUsage{MemoryRSSInBytes: 500 * 1024 * 1024},
	}, usageSummary)

	cpuSummary := rm.GetCPUSummary()
	Logf("%s", rm.FormatCPUSummary(cpuSummary))
	verifyCPULimits(expected, cpuSummary)

	By("Deleting the RC")
	DeleteRC(framework.Client, framework.Namespace.Name, rcName)
}
Beispiel #26
0
func (zones Zones) Add(zone dnsprovider.Zone) (dnsprovider.Zone, error) {
	dnsName := zone.Name()
	callerReference := string(util.NewUUID())
	input := route53.CreateHostedZoneInput{Name: &dnsName, CallerReference: &callerReference}
	output, err := zones.interface_.service.CreateHostedZone(&input)
	if err != nil {
		return nil, err
	}
	return &Zone{output.HostedZone, &zones}, nil
}
Beispiel #27
0
// FillObjectMetaSystemFields populates fields that are managed by the system on ObjectMeta.
func FillObjectMetaSystemFields(ctx Context, meta *ObjectMeta) {
	meta.CreationTimestamp = unversioned.Now()
	// allows admission controllers to assign a UID earlier in the request processing
	// to support tracking resources pending creation.
	uid, found := UIDFrom(ctx)
	if !found {
		uid = util.NewUUID()
	}
	meta.UID = uid
	meta.SelfLink = ""
}
Beispiel #28
0
// Check that the pods comprising a replication controller get spread evenly across available zones
func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
	name := "ubelite-spread-rc-" + string(util.NewUUID())
	By(fmt.Sprintf("Creating replication controller %s", name))
	controller, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Namespace: f.Namespace.Name,
			Name:      name,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: replicaCount,
			Selector: map[string]string{
				"name": name,
			},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{"name": name},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  name,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())
	// Cleanup the replication controller when we are done.
	defer func() {
		// Resize the replication controller to zero to get rid of pods.
		if err := framework.DeleteRC(f.Client, f.Namespace.Name, controller.Name); err != nil {
			framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
		}
	}()
	// List the pods, making sure we observe all the replicas.
	selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
	pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, name, replicaCount)
	Expect(err).NotTo(HaveOccurred())

	// Wait for all of them to be scheduled
	By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled.  Selector: %v", replicaCount, name, selector))
	pods, err = framework.WaitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector)
	Expect(err).NotTo(HaveOccurred())

	// Now make sure they're spread across zones
	zoneNames, err := getZoneNames(f.Client)
	Expect(err).NotTo(HaveOccurred())
	Expect(checkZoneSpreading(f.Client, pods, zoneNames)).To(Equal(true))
}
Beispiel #29
0
func getUniqueName(basename string, existingNames *sets.String) string {
	if !existingNames.Has(basename) {
		return basename
	}

	for i := 0; i < 100; i++ {
		trialName := fmt.Sprintf("%v-%d", basename, i)
		if !existingNames.Has(trialName) {
			return trialName
		}
	}

	return string(util.NewUUID())
}
Beispiel #30
0
func (pwxutil *PWXDiskUtil) CreateVolume(c *pwxDiskProvisioner) (volumeID string, volumeSizeGB int, err error) {
	name := fmt.Sprintf("kube-dynamic-%s", util.NewUUID())
	//requestBytes := c.options.Capacity.Value()
	// PWX works with megabytes, convert to MiB with rounding up
	//requestMB := volume.RoundUpSize(requestBytes, 1024*1024)

	//TODO: Create disk on PX.
	/*
		err = cloud.CreateDisk(name, zone.FailureDomain, int64(requestGB))
		if err != nil {
			return "", 0, err
		}
	*/
	return name, 0, nil //int(requestGB), nil
}