Esempio n. 1
0
func TestIncrementUsageReplicationControllers(t *testing.T) {
	namespace := "default"
	client := testclient.NewSimpleFake(&api.ReplicationControllerList{
		Items: []api.ReplicationController{
			{
				ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
			},
		},
	})
	status := &api.ResourceQuotaStatus{
		Hard: api.ResourceList{},
		Used: api.ResourceList{},
	}
	r := api.ResourceReplicationControllers
	status.Hard[r] = resource.MustParse("2")
	status.Used[r] = resource.MustParse("1")
	dirty, err := IncrementUsage(admission.NewAttributesRecord(&api.ReplicationController{}, "ReplicationController", namespace, "name", "replicationcontrollers", "", admission.Create, nil), status, client)
	if err != nil {
		t.Errorf("Unexpected error: %v", err)
	}
	if !dirty {
		t.Errorf("Expected the status to get incremented, therefore should have been dirty")
	}
	quantity := status.Used[r]
	if quantity.Value() != int64(2) {
		t.Errorf("Expected new item count to be 2, but was %s", quantity.String())
	}
}
Esempio n. 2
0
func TestCalculateTimeoutForVolume(t *testing.T) {
	pv := &api.PersistentVolume{
		Spec: api.PersistentVolumeSpec{
			Capacity: api.ResourceList{
				api.ResourceName(api.ResourceStorage): resource.MustParse("500M"),
			},
		},
	}

	timeout := CalculateTimeoutForVolume(50, 30, pv)
	if timeout != 50 {
		t.Errorf("Expected 50 for timeout but got %v", timeout)
	}

	pv.Spec.Capacity[api.ResourceStorage] = resource.MustParse("2Gi")
	timeout = CalculateTimeoutForVolume(50, 30, pv)
	if timeout != 60 {
		t.Errorf("Expected 60 for timeout but got %v", timeout)
	}

	pv.Spec.Capacity[api.ResourceStorage] = resource.MustParse("150Gi")
	timeout = CalculateTimeoutForVolume(50, 30, pv)
	if timeout != 4500 {
		t.Errorf("Expected 4500 for timeout but got %v", timeout)
	}
}
Esempio n. 3
0
// DaemonSets should place onto nodes with sufficient free resource
func TestSufficentCapacityNodeDaemonLaunchesPod(t *testing.T) {
	podSpec := api.PodSpec{
		NodeName: "not-too-much-mem",
		Containers: []api.Container{{
			Resources: api.ResourceRequirements{
				Requests: api.ResourceList{
					api.ResourceMemory: resource.MustParse("75M"),
					api.ResourceCPU:    resource.MustParse("75m"),
				},
			},
		}},
	}
	manager, podControl := newTestController()
	node := newNode("not-too-much-mem", nil)
	node.Status.Allocatable = api.ResourceList{
		api.ResourceMemory: resource.MustParse("200M"),
		api.ResourceCPU:    resource.MustParse("200m"),
	}
	manager.nodeStore.Add(node)
	manager.podStore.Add(&api.Pod{
		Spec: podSpec,
	})
	ds := newDaemonSet("foo")
	ds.Spec.Template.Spec = podSpec
	manager.dsStore.Add(ds)
	syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
}
Esempio n. 4
0
func TestAdd(t *testing.T) {
	testCases := map[string]struct {
		a        api.ResourceList
		b        api.ResourceList
		expected api.ResourceList
	}{
		"noKeys": {
			a:        api.ResourceList{},
			b:        api.ResourceList{},
			expected: api.ResourceList{},
		},
		"toEmpty": {
			a:        api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
			b:        api.ResourceList{},
			expected: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
		},
		"matching": {
			a:        api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
			b:        api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
			expected: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
		},
	}
	for testName, testCase := range testCases {
		sum := Add(testCase.a, testCase.b)
		if result := Equals(testCase.expected, sum); !result {
			t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, sum)
		}
	}
}
Esempio n. 5
0
func (h *HeapsterMetricsClient) GetResourceConsumptionAndRequest(resourceName api.ResourceName, namespace string, selector map[string]string) (consumption *ResourceConsumption, request *resource.Quantity, err error) {
	podList, err := h.client.Pods(namespace).
		List(labels.SelectorFromSet(labels.Set(selector)), fields.Everything())

	if err != nil {
		return nil, nil, fmt.Errorf("failed to get pod list: %v", err)
	}
	podNames := []string{}
	sum := resource.MustParse("0")
	missing := false
	for _, pod := range podList.Items {
		podNames = append(podNames, pod.Name)
		for _, container := range pod.Spec.Containers {
			containerRequest := container.Resources.Requests[resourceName]
			if containerRequest.Amount != nil {
				sum.Add(containerRequest)
			} else {
				missing = true
			}
		}
	}
	if missing || sum.Cmp(resource.MustParse("0")) == 0 {
		return nil, nil, fmt.Errorf("some pods do not have request for %s", resourceName)
	}
	glog.Infof("Sum of %s requested: %v", resourceName, sum)
	avg := resource.MustParse(fmt.Sprintf("%dm", sum.MilliValue()/int64(len(podList.Items))))
	request = &avg
	consumption, err = h.getForPods(resourceName, namespace, podNames)
	if err != nil {
		return nil, nil, err
	}
	return consumption, request, nil
}
Esempio n. 6
0
func TestInvalidLimitRangeUpdate(t *testing.T) {
	ns := api.NamespaceDefault
	limitRange := &api.LimitRange{
		ObjectMeta: api.ObjectMeta{
			Name: "abc",
		},
		Spec: api.LimitRangeSpec{
			Limits: []api.LimitRangeItem{
				{
					Type: api.LimitTypePod,
					Max: api.ResourceList{
						api.ResourceCPU:    resource.MustParse("100"),
						api.ResourceMemory: resource.MustParse("10000"),
					},
					Min: api.ResourceList{
						api.ResourceCPU:    resource.MustParse("0"),
						api.ResourceMemory: resource.MustParse("100"),
					},
				},
			},
		},
	}
	c := &testClient{
		Request:  testRequest{Method: "PUT", Path: testapi.ResourcePath(getLimitRangesResourceName(), ns, "abc"), Query: buildQueryValues(ns, nil)},
		Response: Response{StatusCode: 200, Body: limitRange},
	}
	_, err := c.Setup().LimitRanges(ns).Update(limitRange)
	if err == nil {
		t.Errorf("Expected an error due to missing ResourceVersion")
	}
}
Esempio n. 7
0
func TestResourceNames(t *testing.T) {
	testCases := map[string]struct {
		a        api.ResourceList
		expected []api.ResourceName
	}{
		"empty": {
			a:        api.ResourceList{},
			expected: []api.ResourceName{},
		},
		"values": {
			a: api.ResourceList{
				api.ResourceCPU:    resource.MustParse("100m"),
				api.ResourceMemory: resource.MustParse("1Gi"),
			},
			expected: []api.ResourceName{api.ResourceMemory, api.ResourceCPU},
		},
	}
	for testName, testCase := range testCases {
		actualSet := ToSet(ResourceNames(testCase.a))
		expectedSet := ToSet(testCase.expected)
		if !actualSet.Equal(expectedSet) {
			t.Errorf("%s expected: %v, actual: %v", testName, expectedSet, actualSet)
		}
	}
}
func TestLimitRangeUpdate(t *testing.T) {
	ns := api.NamespaceDefault
	limitRange := &api.LimitRange{
		ObjectMeta: api.ObjectMeta{
			Name:            "abc",
			ResourceVersion: "1",
		},
		Spec: api.LimitRangeSpec{
			Limits: []api.LimitRangeItem{
				{
					Type: api.LimitTypePod,
					Max: api.ResourceList{
						api.ResourceCPU:    resource.MustParse("100"),
						api.ResourceMemory: resource.MustParse("10000"),
					},
					Min: api.ResourceList{
						api.ResourceCPU:    resource.MustParse("0"),
						api.ResourceMemory: resource.MustParse("100"),
					},
				},
			},
		},
	}
	c := &simple.Client{
		Request:  simple.Request{Method: "PUT", Path: testapi.Default.ResourcePath(getLimitRangesResourceName(), ns, "abc"), Query: simple.BuildQueryValues(nil)},
		Response: simple.Response{StatusCode: 200, Body: limitRange},
	}
	response, err := c.Setup(t).LimitRanges(ns).Update(limitRange)
	defer c.Close()
	c.Validate(t, response, err)
}
func downwardAPIVolumeBaseContainers(name, filePath string) []api.Container {
	return []api.Container{
		{
			Name:    name,
			Image:   "gcr.io/google_containers/mounttest:0.7",
			Command: []string{"/mt", "--file_content=" + filePath},
			Resources: api.ResourceRequirements{
				Requests: api.ResourceList{
					api.ResourceCPU:    resource.MustParse("250m"),
					api.ResourceMemory: resource.MustParse("32Mi"),
				},
				Limits: api.ResourceList{
					api.ResourceCPU:    resource.MustParse("1250m"),
					api.ResourceMemory: resource.MustParse("64Mi"),
				},
			},
			VolumeMounts: []api.VolumeMount{
				{
					Name:      "podinfo",
					MountPath: "/etc",
					ReadOnly:  false,
				},
			},
		},
	}

}
Esempio n. 10
0
func testDownwardAPI(f *framework.Framework, podName string, env []api.EnvVar, expectations []string) {
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:   podName,
			Labels: map[string]string{"name": podName},
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:    "dapi-container",
					Image:   "gcr.io/google_containers/busybox:1.24",
					Command: []string{"sh", "-c", "env"},
					Resources: api.ResourceRequirements{
						Requests: api.ResourceList{
							api.ResourceCPU:    resource.MustParse("250m"),
							api.ResourceMemory: resource.MustParse("32Mi"),
						},
						Limits: api.ResourceList{
							api.ResourceCPU:    resource.MustParse("1250m"),
							api.ResourceMemory: resource.MustParse("64Mi"),
						},
					},
					Env: env,
				},
			},
			RestartPolicy: api.RestartPolicyNever,
		},
	}
	// TODO(random-liu): Change TestContainerOutputRegexp to use PodClient and avoid MungeSpec explicitly
	f.PodClient().MungeSpec(pod)

	f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations)
}
Esempio n. 11
0
func TestCreateMinion(t *testing.T) {
	requestMinion := &api.Node{
		ObjectMeta: api.ObjectMeta{
			Name: "minion-1",
		},
		Status: api.NodeStatus{
			Capacity: api.ResourceList{
				api.ResourceCPU:    resource.MustParse("1000m"),
				api.ResourceMemory: resource.MustParse("1Mi"),
			},
		},
		Spec: api.NodeSpec{
			Unschedulable: false,
		},
	}
	c := &testClient{
		Request: testRequest{
			Method: "POST",
			Path:   testapi.ResourcePath(getNodesResourceName(), "", ""),
			Body:   requestMinion},
		Response: Response{
			StatusCode: 200,
			Body:       requestMinion,
		},
	}
	receivedMinion, err := c.Setup().Nodes().Create(requestMinion)
	c.Validate(t, receivedMinion, err)
}
Esempio n. 12
0
func TestResourceHelpers(t *testing.T) {
	cpuLimit := resource.MustParse("10")
	memoryLimit := resource.MustParse("10G")
	resourceSpec := ResourceRequirements{
		Limits: ResourceList{
			"cpu":             cpuLimit,
			"memory":          memoryLimit,
			"kube.io/storage": memoryLimit,
		},
	}
	if res := resourceSpec.Limits.Cpu(); res.Cmp(cpuLimit) != 0 {
		t.Errorf("expected cpulimit %v, got %v", cpuLimit, res)
	}
	if res := resourceSpec.Limits.Memory(); res.Cmp(memoryLimit) != 0 {
		t.Errorf("expected memorylimit %v, got %v", memoryLimit, res)
	}
	resourceSpec = ResourceRequirements{
		Limits: ResourceList{
			"memory":          memoryLimit,
			"kube.io/storage": memoryLimit,
		},
	}
	if res := resourceSpec.Limits.Cpu(); res.Value() != 0 {
		t.Errorf("expected cpulimit %v, got %v", 0, res)
	}
	if res := resourceSpec.Limits.Memory(); res.Cmp(memoryLimit) != 0 {
		t.Errorf("expected memorylimit %v, got %v", memoryLimit, res)
	}
}
Esempio n. 13
0
func TestIncrementUsagePods(t *testing.T) {
	namespace := "default"
	client := testclient.NewSimpleFake(&api.PodList{
		Items: []api.Pod{
			{
				ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
				Spec: api.PodSpec{
					Volumes:    []api.Volume{{Name: "vol"}},
					Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}},
				},
			},
		},
	})
	status := &api.ResourceQuotaStatus{
		Hard: api.ResourceList{},
		Used: api.ResourceList{},
	}
	r := api.ResourcePods
	status.Hard[r] = resource.MustParse("2")
	status.Used[r] = resource.MustParse("1")
	dirty, err := IncrementUsage(admission.NewAttributesRecord(&api.Pod{}, "Pod", namespace, "name", "pods", "", admission.Create, nil), status, client)
	if err != nil {
		t.Errorf("Unexpected error: %v", err)
	}
	if !dirty {
		t.Errorf("Expected the status to get incremented, therefore should have been dirty")
	}
	quantity := status.Used[r]
	if quantity.Value() != int64(2) {
		t.Errorf("Expected new item count to be 2, but was %s", quantity.String())
	}
}
Esempio n. 14
0
func TestAdmissionIgnoresSubresources(t *testing.T) {
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	handler := createResourceQuota(&testclient.Fake{}, indexer)

	quota := &api.ResourceQuota{}
	quota.Name = "quota"
	quota.Namespace = "test"
	quota.Status = api.ResourceQuotaStatus{
		Hard: api.ResourceList{},
		Used: api.ResourceList{},
	}
	quota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi")
	quota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi")

	indexer.Add(quota)

	newPod := &api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "123", Namespace: quota.Namespace},
		Spec: api.PodSpec{
			Volumes:    []api.Volume{{Name: "vol"}},
			Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "2Gi")}},
		}}

	err := handler.Admit(admission.NewAttributesRecord(newPod, "Pod", newPod.Namespace, "123", "pods", "", admission.Create, nil))
	if err == nil {
		t.Errorf("Expected an error because the pod exceeded allowed quota")
	}

	err = handler.Admit(admission.NewAttributesRecord(newPod, "Pod", newPod.Namespace, "123", "pods", "subresource", admission.Create, nil))
	if err != nil {
		t.Errorf("Did not expect an error because the action went to a subresource: %v", err)
	}

}
Esempio n. 15
0
func allocatableResources(memory, cpu string) api.ResourceList {
	return api.ResourceList{
		api.ResourceMemory: resource.MustParse(memory),
		api.ResourceCPU:    resource.MustParse(cpu),
		api.ResourcePods:   resource.MustParse("100"),
	}
}
Esempio n. 16
0
// TestAdmissionIgnoresSubresources verifies that the admission controller ignores subresources
// It verifies that creation of a pod that would have exceeded quota is properly failed
// It verifies that create operations to a subresource that would have exceeded quota would succeed
func TestAdmissionIgnoresSubresources(t *testing.T) {
	resourceQuota := &api.ResourceQuota{}
	resourceQuota.Name = "quota"
	resourceQuota.Namespace = "test"
	resourceQuota.Status = api.ResourceQuotaStatus{
		Hard: api.ResourceList{},
		Used: api.ResourceList{},
	}
	resourceQuota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi")
	resourceQuota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi")
	kubeClient := fake.NewSimpleClientset(resourceQuota)
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	handler := &quotaAdmission{
		Handler:  admission.NewHandler(admission.Create, admission.Update),
		client:   kubeClient,
		indexer:  indexer,
		registry: install.NewRegistry(kubeClient),
	}
	handler.indexer.Add(resourceQuota)
	newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", "")))
	err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod"), newPod.Namespace, newPod.Name, api.Resource("pods"), "", admission.Create, nil))
	if err == nil {
		t.Errorf("Expected an error because the pod exceeded allowed quota")
	}
	err = handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod"), newPod.Namespace, newPod.Name, api.Resource("pods"), "subresource", admission.Create, nil))
	if err != nil {
		t.Errorf("Did not expect an error because the action went to a subresource: %v", err)
	}
}
Esempio n. 17
0
func TestNodeBuilder(t *testing.T) {
	node := &api.Node{
		ObjectMeta: api.ObjectMeta{Name: "node1", Namespace: "should-not-have", ResourceVersion: "10"},
		Spec:       api.NodeSpec{},
		Status: api.NodeStatus{
			Capacity: api.ResourceList{
				api.ResourceCPU:    resource.MustParse("1000m"),
				api.ResourceMemory: resource.MustParse("1Mi"),
			},
		},
	}
	r, w := io.Pipe()
	go func() {
		defer w.Close()
		w.Write([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), node)))
	}()

	b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()).
		NamespaceParam("test").Stream(r, "STDIN")

	test := &testVisitor{}

	err := b.Do().Visit(test.Handle)
	if err != nil || len(test.Infos) != 1 {
		t.Fatalf("unexpected response: %v %#v", err, test.Infos)
	}
	info := test.Infos[0]
	if info.Name != "node1" || info.Namespace != "" || info.Object == nil {
		t.Errorf("unexpected info: %#v", info)
	}
}
Esempio n. 18
0
func TestUpdate(t *testing.T) {
	storage, fakeClient := newStorage(t)
	test := resttest.New(t, storage, fakeClient.SetError).AllowCreateOnUpdate()
	test.TestUpdate(
		// valid
		validNewLimitRange(),
		func(ctx api.Context, obj runtime.Object) error {
			return registrytest.SetObject(fakeClient, storage.KeyFunc, ctx, obj)
		},
		func(resourceVersion uint64) {
			registrytest.SetResourceVersion(fakeClient, resourceVersion)
		},
		func(ctx api.Context, obj runtime.Object) (runtime.Object, error) {
			return registrytest.GetObject(fakeClient, storage.KeyFunc, storage.NewFunc, ctx, obj)
		},
		// updateFunc
		func(obj runtime.Object) runtime.Object {
			object := obj.(*api.LimitRange)
			object.Spec.Limits = []api.LimitRangeItem{
				{
					Type: api.LimitTypePod,
					Max: api.ResourceList{
						api.ResourceCPU:    resource.MustParse("1000"),
						api.ResourceMemory: resource.MustParse("100000"),
					},
					Min: api.ResourceList{
						api.ResourceCPU:    resource.MustParse("10"),
						api.ResourceMemory: resource.MustParse("1000"),
					},
				},
			}
			return object
		},
	)
}
Esempio n. 19
0
func TestLimitRangeGet(t *testing.T) {
	ns := api.NamespaceDefault
	limitRange := &api.LimitRange{
		ObjectMeta: api.ObjectMeta{
			Name: "abc",
		},
		Spec: api.LimitRangeSpec{
			Limits: []api.LimitRangeItem{
				{
					Type: api.LimitTypePod,
					Max: api.ResourceList{
						api.ResourceCPU:    resource.MustParse("100"),
						api.ResourceMemory: resource.MustParse("10000"),
					},
					Min: api.ResourceList{
						api.ResourceCPU:    resource.MustParse("0"),
						api.ResourceMemory: resource.MustParse("100"),
					},
				},
			},
		},
	}
	c := &testClient{
		Request: testRequest{
			Method: "GET",
			Path:   testapi.ResourcePath(getLimitRangesResourceName(), ns, "abc"),
			Query:  buildQueryValues(ns, nil),
			Body:   nil,
		},
		Response: Response{StatusCode: 200, Body: limitRange},
	}

	response, err := c.Setup().LimitRanges(ns).Get("abc")
	c.Validate(t, response, err)
}
func TestSyncResourceQuotaNoChange(t *testing.T) {
	quota := api.ResourceQuota{
		Spec: api.ResourceQuotaSpec{
			Hard: api.ResourceList{
				api.ResourceCPU: resource.MustParse("4"),
			},
		},
		Status: api.ResourceQuotaStatus{
			Hard: api.ResourceList{
				api.ResourceCPU: resource.MustParse("4"),
			},
			Used: api.ResourceList{
				api.ResourceCPU: resource.MustParse("0"),
			},
		},
	}

	kubeClient := fake.NewSimpleClientset(&api.PodList{}, &quota)

	ResourceQuotaController := NewResourceQuotaController(kubeClient, controller.StaticResyncPeriodFunc(time.Second))
	err := ResourceQuotaController.syncResourceQuota(quota)
	if err != nil {
		t.Fatalf("Unexpected error %v", err)
	}

	actions := kubeClient.Actions()
	if len(actions) != 1 && !actions[0].Matches("list", "pods") {
		t.Errorf("SyncResourceQuota made an unexpected client action when state was not dirty: %v", kubeClient.Actions)
	}
}
Esempio n. 21
0
func TestIsZero(t *testing.T) {
	testCases := map[string]struct {
		a        api.ResourceList
		expected bool
	}{
		"empty": {
			a:        api.ResourceList{},
			expected: true,
		},
		"zero": {
			a: api.ResourceList{
				api.ResourceCPU:    resource.MustParse("0"),
				api.ResourceMemory: resource.MustParse("0"),
			},
			expected: true,
		},
		"non-zero": {
			a: api.ResourceList{
				api.ResourceCPU:    resource.MustParse("200m"),
				api.ResourceMemory: resource.MustParse("1Gi"),
			},
			expected: false,
		},
	}
	for testName, testCase := range testCases {
		if result := IsZero(testCase.a); result != testCase.expected {
			t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected)
		}
	}
}
Esempio n. 22
0
// TestAdmitBestEffortQuotaLimitIgnoresBurstable validates that a besteffort quota does not match a resource
// guaranteed pod.
func TestAdmitBestEffortQuotaLimitIgnoresBurstable(t *testing.T) {
	resourceQuota := &api.ResourceQuota{
		ObjectMeta: api.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
		Spec: api.ResourceQuotaSpec{
			Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort},
		},
		Status: api.ResourceQuotaStatus{
			Hard: api.ResourceList{
				api.ResourcePods: resource.MustParse("5"),
			},
			Used: api.ResourceList{
				api.ResourcePods: resource.MustParse("3"),
			},
		},
	}
	kubeClient := fake.NewSimpleClientset(resourceQuota)
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	handler := &quotaAdmission{
		Handler:  admission.NewHandler(admission.Create, admission.Update),
		client:   kubeClient,
		indexer:  indexer,
		registry: install.NewRegistry(kubeClient),
	}
	handler.indexer.Add(resourceQuota)
	newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")))
	err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
	if err != nil {
		t.Errorf("Unexpected error: %v", err)
	}
	if len(kubeClient.Actions()) != 0 {
		t.Errorf("Expected no client actions because the incoming pod did not match best effort quota")
	}
}
// Usage knows how to measure usage associated with item.
func (p *pvcEvaluator) Usage(item runtime.Object) (api.ResourceList, error) {
	result := api.ResourceList{}
	pvc, err := toInternalPersistentVolumeClaimOrError(item)
	if err != nil {
		return result, err
	}
	storageClassRef := util.GetClaimStorageClass(pvc)

	// charge for claim
	result[api.ResourcePersistentVolumeClaims] = resource.MustParse("1")
	if len(storageClassRef) > 0 {
		storageClassClaim := api.ResourceName(storageClassRef + storageClassSuffix + string(api.ResourcePersistentVolumeClaims))
		result[storageClassClaim] = resource.MustParse("1")
	}

	// charge for storage
	if request, found := pvc.Spec.Resources.Requests[api.ResourceStorage]; found {
		result[api.ResourceRequestsStorage] = request
		// charge usage to the storage class (if present)
		if len(storageClassRef) > 0 {
			storageClassStorage := api.ResourceName(storageClassRef + storageClassSuffix + string(api.ResourceRequestsStorage))
			result[storageClassStorage] = request
		}
	}
	return result, nil
}
Esempio n. 24
0
func mockResources() kapi.ResourceRequirements {
	res := kapi.ResourceRequirements{}
	res.Limits = kapi.ResourceList{}
	res.Limits[kapi.ResourceCPU] = resource.MustParse("100m")
	res.Limits[kapi.ResourceMemory] = resource.MustParse("100Mi")
	return res
}
Esempio n. 25
0
func testDownwardAPI(f *framework.Framework, podName string, env []api.EnvVar, expectations []string) {
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:   podName,
			Labels: map[string]string{"name": podName},
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:    "dapi-container",
					Image:   "gcr.io/google_containers/busybox:1.24",
					Command: []string{"sh", "-c", "env"},
					Resources: api.ResourceRequirements{
						Requests: api.ResourceList{
							api.ResourceCPU:    resource.MustParse("250m"),
							api.ResourceMemory: resource.MustParse("32Mi"),
						},
						Limits: api.ResourceList{
							api.ResourceCPU:    resource.MustParse("1250m"),
							api.ResourceMemory: resource.MustParse("64Mi"),
						},
					},
					Env: env,
				},
			},
			RestartPolicy: api.RestartPolicyNever,
		},
	}

	testDownwardAPIUsingPod(f, pod, env, expectations)
}
Esempio n. 26
0
func TestUpdate(t *testing.T) {
	storage, fakeClient := newStorage(t)
	test := registrytest.New(t, fakeClient, storage.Etcd).AllowCreateOnUpdate()
	test.TestUpdate(
		// valid
		validNewLimitRange(),
		// updateFunc
		func(obj runtime.Object) runtime.Object {
			object := obj.(*api.LimitRange)
			object.Spec.Limits = []api.LimitRangeItem{
				{
					Type: api.LimitTypePod,
					Max: api.ResourceList{
						api.ResourceCPU:    resource.MustParse("1000"),
						api.ResourceMemory: resource.MustParse("100000"),
					},
					Min: api.ResourceList{
						api.ResourceCPU:    resource.MustParse("10"),
						api.ResourceMemory: resource.MustParse("1000"),
					},
				},
			}
			return object
		},
	)
}
Esempio n. 27
0
// TestAdmissionIgnoresSubresources verifies that the admission controller ignores subresources
// It verifies that creation of a pod that would have exceeded quota is properly failed
// It verifies that create operations to a subresource that would have exceeded quota would succeed
func TestAdmissionIgnoresSubresources(t *testing.T) {
	resourceQuota := &api.ResourceQuota{}
	resourceQuota.Name = "quota"
	resourceQuota.Namespace = "test"
	resourceQuota.Status = api.ResourceQuotaStatus{
		Hard: api.ResourceList{},
		Used: api.ResourceList{},
	}
	resourceQuota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi")
	resourceQuota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi")
	kubeClient := fake.NewSimpleClientset(resourceQuota)
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	stopCh := make(chan struct{})
	defer close(stopCh)

	quotaAccessor, _ := newQuotaAccessor(kubeClient)
	quotaAccessor.indexer = indexer
	go quotaAccessor.Run(stopCh)
	evaluator := NewQuotaEvaluator(quotaAccessor, install.NewRegistry(nil, nil), nil, 5, stopCh)

	handler := &quotaAdmission{
		Handler:   admission.NewHandler(admission.Create, admission.Update),
		evaluator: evaluator,
	}
	indexer.Add(resourceQuota)
	newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", "")))
	err := handler.Admit(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
	if err == nil {
		t.Errorf("Expected an error because the pod exceeded allowed quota")
	}
	err = handler.Admit(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "subresource", admission.Create, nil))
	if err != nil {
		t.Errorf("Did not expect an error because the action went to a subresource: %v", err)
	}
}
func TestClusterResourceOverridePluginWithNoLimits(t *testing.T) {
	defer testutil.DumpEtcdOnFailure(t)
	config := &overrideapi.ClusterResourceOverrideConfig{
		LimitCPUToMemoryPercent:     100,
		CPURequestToLimitPercent:    50,
		MemoryRequestToLimitPercent: 50,
	}
	kubeClientset := setupClusterResourceOverrideTest(t, config)
	podHandler := kubeClientset.Core().Pods(testutil.Namespace())

	// test with no limits object present

	podCreated, err := podHandler.Create(testClusterResourceOverridePod("limitless", "2Gi", "1"))
	if err != nil {
		t.Fatal(err)
	}
	if memory := podCreated.Spec.Containers[0].Resources.Requests.Memory(); memory.Cmp(resource.MustParse("1Gi")) != 0 {
		t.Errorf("limitlesspod: Memory did not match expected 1Gi: %#v", memory)
	}
	if cpu := podCreated.Spec.Containers[0].Resources.Limits.Cpu(); cpu.Cmp(resource.MustParse("2")) != 0 {
		t.Errorf("limitlesspod: CPU limit did not match expected 2 core: %#v", cpu)
	}
	if cpu := podCreated.Spec.Containers[0].Resources.Requests.Cpu(); cpu.Cmp(resource.MustParse("1")) != 0 {
		t.Errorf("limitlesspod: CPU req did not match expected 1 core: %#v", cpu)
	}
}
Esempio n. 29
0
func makeNNodes(c client.Interface, N int) {
	baseNode := &api.Node{
		ObjectMeta: api.ObjectMeta{
			GenerateName: "scheduler-test-node-",
		},
		Spec: api.NodeSpec{
			ExternalID: "foobar",
		},
		Status: api.NodeStatus{
			Capacity: api.ResourceList{
				api.ResourcePods:   *resource.NewQuantity(32, resource.DecimalSI),
				api.ResourceCPU:    resource.MustParse("4"),
				api.ResourceMemory: resource.MustParse("32Gi"),
			},
			Phase: api.NodeRunning,
			Conditions: []api.NodeCondition{
				{Type: api.NodeReady, Status: api.ConditionTrue},
			},
		},
	}
	for i := 0; i < N; i++ {
		if _, err := c.Nodes().Create(baseNode); err != nil {
			panic("error creating node: " + err.Error())
		}
	}
}
Esempio n. 30
0
func TestExceedUsagePods(t *testing.T) {
	namespace := "default"
	client := testclient.NewSimpleFake(&api.PodList{
		Items: []api.Pod{
			{
				ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
				Spec: api.PodSpec{
					Volumes:    []api.Volume{{Name: "vol"}},
					Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}},
				},
			},
		},
	})
	status := &api.ResourceQuotaStatus{
		Hard: api.ResourceList{},
		Used: api.ResourceList{},
	}
	r := api.ResourcePods
	status.Hard[r] = resource.MustParse("1")
	status.Used[r] = resource.MustParse("1")
	_, err := IncrementUsage(admission.NewAttributesRecord(&api.Pod{}, "Pod", namespace, "name", "pods", "", admission.Create, nil), status, client)
	if err == nil {
		t.Errorf("Expected error because this would exceed your quota")
	}
}