func TestInvalidLimitRangeUpdate(t *testing.T) { ns := api.NamespaceDefault limitRange := &api.LimitRange{ ObjectMeta: api.ObjectMeta{ Name: "abc", }, Spec: api.LimitRangeSpec{ Limits: []api.LimitRangeItem{ { Type: api.LimitTypePod, Max: api.ResourceList{ api.ResourceCPU: resource.MustParse("100"), api.ResourceMemory: resource.MustParse("10000"), }, Min: api.ResourceList{ api.ResourceCPU: resource.MustParse("0"), api.ResourceMemory: resource.MustParse("100"), }, }, }, }, } c := &testClient{ Request: testRequest{Method: "PUT", Path: testapi.Default.ResourcePath(getLimitRangesResourceName(), ns, "abc"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: limitRange}, } _, err := c.Setup(t).LimitRanges(ns).Update(limitRange) if err == nil { t.Errorf("Expected an error due to missing ResourceVersion") } }
func TestSyncResourceQuotaNoChange(t *testing.T) { quota := api.ResourceQuota{ Spec: api.ResourceQuotaSpec{ Hard: api.ResourceList{ api.ResourceCPU: resource.MustParse("4"), }, }, Status: api.ResourceQuotaStatus{ Hard: api.ResourceList{ api.ResourceCPU: resource.MustParse("4"), }, Used: api.ResourceList{ api.ResourceCPU: resource.MustParse("0"), }, }, } kubeClient := testclient.NewSimpleFake(&api.PodList{}, "a) ResourceQuotaController := NewResourceQuotaController(kubeClient) err := ResourceQuotaController.syncResourceQuota(quota) if err != nil { t.Fatalf("Unexpected error %v", err) } actions := kubeClient.Actions() if len(actions) != 1 && !actions[0].Matches("list", "pods") { t.Errorf("SyncResourceQuota made an unexpected client action when state was not dirty: %v", kubeClient.Actions) } }
func TestLimitRangeGet(t *testing.T) { ns := api.NamespaceDefault limitRange := &api.LimitRange{ ObjectMeta: api.ObjectMeta{ Name: "abc", }, Spec: api.LimitRangeSpec{ Limits: []api.LimitRangeItem{ { Type: api.LimitTypePod, Max: api.ResourceList{ api.ResourceCPU: resource.MustParse("100"), api.ResourceMemory: resource.MustParse("10000"), }, Min: api.ResourceList{ api.ResourceCPU: resource.MustParse("0"), api.ResourceMemory: resource.MustParse("100"), }, }, }, }, } c := &testClient{ Request: testRequest{ Method: "GET", Path: testapi.Default.ResourcePath(getLimitRangesResourceName(), ns, "abc"), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: limitRange}, } response, err := c.Setup(t).LimitRanges(ns).Get("abc") c.Validate(t, response, err) }
func TestNodeBuilder(t *testing.T) { node := &api.Node{ ObjectMeta: api.ObjectMeta{Name: "node1", Namespace: "should-not-have", ResourceVersion: "10"}, Spec: api.NodeSpec{}, Status: api.NodeStatus{ Capacity: api.ResourceList{ api.ResourceCPU: resource.MustParse("1000m"), api.ResourceMemory: resource.MustParse("1Mi"), }, }, } r, w := io.Pipe() go func() { defer w.Close() w.Write([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), node))) }() b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient()). NamespaceParam("test").Stream(r, "STDIN") test := &testVisitor{} err := b.Do().Visit(test.Handle) if err != nil || len(test.Infos) != 1 { t.Fatalf("unexpected response: %v %#v", err, test.Infos) } info := test.Infos[0] if info.Name != "node1" || info.Namespace != "" || info.Object == nil { t.Errorf("unexpected info: %#v", info) } }
func TestCalculateTimeoutForVolume(t *testing.T) { pv := &api.PersistentVolume{ Spec: api.PersistentVolumeSpec{ Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("500M"), }, }, } timeout := CalculateTimeoutForVolume(50, 30, pv) if timeout != 50 { t.Errorf("Expected 50 for timeout but got %v", timeout) } pv.Spec.Capacity[api.ResourceStorage] = resource.MustParse("2Gi") timeout = CalculateTimeoutForVolume(50, 30, pv) if timeout != 60 { t.Errorf("Expected 60 for timeout but got %v", timeout) } pv.Spec.Capacity[api.ResourceStorage] = resource.MustParse("150Gi") timeout = CalculateTimeoutForVolume(50, 30, pv) if timeout != 4500 { t.Errorf("Expected 4500 for timeout but got %v", timeout) } }
func TestIncrementUsageReplicationControllers(t *testing.T) { namespace := "default" client := testclient.NewSimpleFake(&api.ReplicationControllerList{ Items: []api.ReplicationController{ { ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace}, }, }, }) status := &api.ResourceQuotaStatus{ Hard: api.ResourceList{}, Used: api.ResourceList{}, } r := api.ResourceReplicationControllers status.Hard[r] = resource.MustParse("2") status.Used[r] = resource.MustParse("1") dirty, err := IncrementUsage(admission.NewAttributesRecord(&api.ReplicationController{}, "ReplicationController", namespace, "name", "replicationcontrollers", "", admission.Create, nil), status, client) if err != nil { t.Errorf("Unexpected error: %v", err) } if !dirty { t.Errorf("Expected the status to get incremented, therefore should have been dirty") } quantity := status.Used[r] if quantity.Value() != int64(2) { t.Errorf("Expected new item count to be 2, but was %s", quantity.String()) } }
func (h *HeapsterMetricsClient) GetResourceConsumptionAndRequest(resourceName api.ResourceName, namespace string, selector map[string]string) (consumption *ResourceConsumption, request *resource.Quantity, timestamp time.Time, err error) { podList, err := h.client.Pods(namespace). List(labels.SelectorFromSet(labels.Set(selector)), fields.Everything()) if err != nil { return nil, nil, time.Time{}, fmt.Errorf("failed to get pod list: %v", err) } podNames := []string{} sum := resource.MustParse("0") missing := false for _, pod := range podList.Items { podNames = append(podNames, pod.Name) for _, container := range pod.Spec.Containers { containerRequest := container.Resources.Requests[resourceName] if containerRequest.Amount != nil { sum.Add(containerRequest) } else { missing = true } } } if missing || sum.Cmp(resource.MustParse("0")) == 0 { return nil, nil, time.Time{}, fmt.Errorf("some pods do not have request for %s", resourceName) } glog.Infof("Sum of %s requested: %v", resourceName, sum) avg := resource.MustParse(fmt.Sprintf("%dm", sum.MilliValue()/int64(len(podList.Items)))) request = &avg consumption, timestamp, err = h.getForPods(resourceName, namespace, podNames) if err != nil { return nil, nil, time.Time{}, err } return consumption, request, timestamp, nil }
func TestAdmissionIgnoresSubresources(t *testing.T) { indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}) handler := createResourceQuota(&testclient.Fake{}, indexer) quota := &api.ResourceQuota{} quota.Name = "quota" quota.Namespace = "test" quota.Status = api.ResourceQuotaStatus{ Hard: api.ResourceList{}, Used: api.ResourceList{}, } quota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi") quota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi") indexer.Add(quota) newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", ""))) err := handler.Admit(admission.NewAttributesRecord(newPod, "Pod", newPod.Namespace, newPod.Name, "pods", "", admission.Create, nil)) if err == nil { t.Errorf("Expected an error because the pod exceeded allowed quota") } err = handler.Admit(admission.NewAttributesRecord(newPod, "Pod", newPod.Namespace, newPod.Name, "pods", "subresource", admission.Create, nil)) if err != nil { t.Errorf("Did not expect an error because the action went to a subresource: %v", err) } }
func TestResourceHelpers(t *testing.T) { cpuLimit := resource.MustParse("10") memoryLimit := resource.MustParse("10G") resourceSpec := ResourceRequirements{ Limits: ResourceList{ "cpu": cpuLimit, "memory": memoryLimit, "kube.io/storage": memoryLimit, }, } if res := resourceSpec.Limits.Cpu(); *res != cpuLimit { t.Errorf("expected cpulimit %v, got %v", cpuLimit, res) } if res := resourceSpec.Limits.Memory(); *res != memoryLimit { t.Errorf("expected memorylimit %v, got %v", memoryLimit, res) } resourceSpec = ResourceRequirements{ Limits: ResourceList{ "memory": memoryLimit, "kube.io/storage": memoryLimit, }, } if res := resourceSpec.Limits.Cpu(); res.Value() != 0 { t.Errorf("expected cpulimit %v, got %v", 0, res) } if res := resourceSpec.Limits.Memory(); *res != memoryLimit { t.Errorf("expected memorylimit %v, got %v", memoryLimit, res) } }
func TestCreateNode(t *testing.T) { requestNode := &api.Node{ ObjectMeta: api.ObjectMeta{ Name: "node-1", }, Status: api.NodeStatus{ Capacity: api.ResourceList{ api.ResourceCPU: resource.MustParse("1000m"), api.ResourceMemory: resource.MustParse("1Mi"), }, }, Spec: api.NodeSpec{ Unschedulable: false, }, } c := &testClient{ Request: testRequest{ Method: "POST", Path: testapi.Default.ResourcePath(getNodesResourceName(), "", ""), Body: requestNode}, Response: Response{ StatusCode: 200, Body: requestNode, }, } receivedNode, err := c.Setup(t).Nodes().Create(requestNode) c.Validate(t, receivedNode, err) }
func TestUpdate(t *testing.T) { storage, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd).AllowCreateOnUpdate() test.TestUpdate( // valid validNewLimitRange(), // updateFunc func(obj runtime.Object) runtime.Object { object := obj.(*api.LimitRange) object.Spec.Limits = []api.LimitRangeItem{ { Type: api.LimitTypePod, Max: api.ResourceList{ api.ResourceCPU: resource.MustParse("1000"), api.ResourceMemory: resource.MustParse("100000"), }, Min: api.ResourceList{ api.ResourceCPU: resource.MustParse("10"), api.ResourceMemory: resource.MustParse("1000"), }, }, } return object }, ) }
func TestSyncResourceQuotaSpecChange(t *testing.T) { quota := api.ResourceQuota{ Spec: api.ResourceQuotaSpec{ Hard: api.ResourceList{ api.ResourceCPU: resource.MustParse("4"), }, }, Status: api.ResourceQuotaStatus{ Hard: api.ResourceList{ api.ResourceCPU: resource.MustParse("3"), }, Used: api.ResourceList{ api.ResourceCPU: resource.MustParse("0"), }, }, } expectedUsage := api.ResourceQuota{ Status: api.ResourceQuotaStatus{ Hard: api.ResourceList{ api.ResourceCPU: resource.MustParse("4"), }, Used: api.ResourceList{ api.ResourceCPU: resource.MustParse("0"), }, }, } kubeClient := testclient.NewSimpleFake("a) ResourceQuotaController := NewResourceQuotaController(kubeClient) err := ResourceQuotaController.syncResourceQuota(quota) if err != nil { t.Fatalf("Unexpected error %v", err) } usage := kubeClient.Actions()[1].(testclient.UpdateAction).GetObject().(*api.ResourceQuota) // ensure hard and used limits are what we expected for k, v := range expectedUsage.Status.Hard { actual := usage.Status.Hard[k] actualValue := actual.String() expectedValue := v.String() if expectedValue != actualValue { t.Errorf("Usage Hard: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue) } } for k, v := range expectedUsage.Status.Used { actual := usage.Status.Used[k] actualValue := actual.String() expectedValue := v.String() if expectedValue != actualValue { t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue) } } }
func getResourceList(cpu, memory string) api.ResourceList { res := api.ResourceList{} if cpu != "" { res[api.ResourceCPU] = resource.MustParse(cpu) } if memory != "" { res[api.ResourceMemory] = resource.MustParse(memory) } return res }
func TestTolerance(t *testing.T) { tc := testCase{ minReplicas: 1, maxReplicas: 5, initialReplicas: 3, desiredReplicas: 3, CPUTarget: 100, reportedLevels: []uint64{1010, 1030, 1020}, reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, } tc.runTest(t) }
func TestEmptyMetrics(t *testing.T) { tc := testCase{ minReplicas: 2, maxReplicas: 6, initialReplicas: 4, desiredReplicas: 4, CPUTarget: 100, reportedLevels: []uint64{}, reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, } tc.runTest(t) }
func TestMaxReplicas(t *testing.T) { tc := testCase{ minReplicas: 2, maxReplicas: 5, initialReplicas: 3, desiredReplicas: 5, CPUTarget: 90, reportedLevels: []uint64{8000, 9500, 1000}, reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, } tc.runTest(t) }
func TestScaleDown(t *testing.T) { tc := testCase{ minReplicas: 2, maxReplicas: 6, initialReplicas: 5, desiredReplicas: 3, CPUTarget: 50, reportedLevels: []uint64{100, 300, 500, 250, 250}, reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, } tc.runTest(t) }
func parseReq(cpu, mem string) api.ResourceList { if cpu == "" && mem == "" { return nil } req := api.ResourceList{} if cpu != "" { req[api.ResourceCPU] = resource.MustParse(cpu) } if mem != "" { req[api.ResourceMemory] = resource.MustParse(mem) } return req }
func TestEventNotCreated(t *testing.T) { tc := testCase{ minReplicas: 1, maxReplicas: 5, initialReplicas: 2, desiredReplicas: 2, CPUTarget: 50, reportedLevels: []uint64{200, 200}, reportedCPURequests: []resource.Quantity{resource.MustParse("0.4"), resource.MustParse("0.4")}, verifyEvents: true, } tc.runTest(t) }
func newNode(name string) *api.Node { return &api.Node{ ObjectMeta: api.ObjectMeta{Name: name}, Spec: api.NodeSpec{ ExternalID: name, }, Status: api.NodeStatus{ Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, } }
func TestPersistentVolumeGet(t *testing.T) { persistentVolume := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "abc", Namespace: "foo", }, Spec: api.PersistentVolumeSpec{ Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), }, PersistentVolumeSource: api.PersistentVolumeSource{ HostPath: &api.HostPathVolumeSource{Path: "/foo"}, }, }, } c := &testClient{ Request: testRequest{ Method: "GET", Path: testapi.Default.ResourcePath(getPersistentVolumesResoureName(), "", "abc"), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: persistentVolume}, } response, err := c.Setup(t).PersistentVolumes().Get("abc") c.Validate(t, response, err) }
func TestPersistentVolumeStatusUpdate(t *testing.T) { persistentVolume := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "abc", ResourceVersion: "1", }, Spec: api.PersistentVolumeSpec{ Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), }, PersistentVolumeSource: api.PersistentVolumeSource{ HostPath: &api.HostPathVolumeSource{Path: "/foo"}, }, }, Status: api.PersistentVolumeStatus{ Phase: api.VolumeBound, Message: "foo", }, } c := &testClient{ Request: testRequest{ Method: "PUT", Path: testapi.Default.ResourcePath(getPersistentVolumesResoureName(), "", "abc") + "/status", Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: persistentVolume}, } response, err := c.Setup(t).PersistentVolumes().UpdateStatus(persistentVolume) c.Validate(t, response, err) }
// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume. // This Creater is meant for development and testing only and WILL NOT WORK in a multi-node cluster. func (r *hostPathCreater) Create() (*api.PersistentVolume, error) { fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", util.NewUUID()) err := os.MkdirAll(fullpath, 0750) if err != nil { return nil, err } return &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ GenerateName: "pv-hostpath-", Labels: map[string]string{ "createdby": "hostpath dynamic provisioner", }, }, Spec: api.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy, AccessModes: r.options.AccessModes, Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dMi", r.options.CapacityMB)), }, PersistentVolumeSource: api.PersistentVolumeSource{ HostPath: &api.HostPathVolumeSource{ Path: fullpath, }, }, }, }, nil }
func TestDefaultRequestIsNotSetForReplicationController(t *testing.T) { s := versioned.PodSpec{} s.Containers = []versioned.Container{ { Resources: versioned.ResourceRequirements{ Limits: versioned.ResourceList{ versioned.ResourceCPU: resource.MustParse("100m"), }, }, }, } rc := &versioned.ReplicationController{ Spec: versioned.ReplicationControllerSpec{ Replicas: newInt(3), Template: &versioned.PodTemplateSpec{ ObjectMeta: versioned.ObjectMeta{ Labels: map[string]string{ "foo": "bar", }, }, Spec: s, }, }, } output := roundTrip(t, runtime.Object(rc)) rc2 := output.(*versioned.ReplicationController) defaultRequest := rc2.Spec.Template.Spec.Containers[0].Resources.Requests requestValue := defaultRequest[versioned.ResourceCPU] if requestValue.String() != "0" { t.Errorf("Expected 0 request value, got: %s", requestValue.String()) } }
func TestMergePodResourceRequirements(t *testing.T) { limitRange := validLimitRange() // pod with no resources enumerated should get each resource from default request expected := getResourceRequirements(getResourceList("", ""), getResourceList("", "")) pod := validPod("empty-resources", 1, expected) defaultRequirements := defaultContainerResourceRequirements(&limitRange) mergePodResourceRequirements(&pod, &defaultRequirements) for i := range pod.Spec.Containers { actual := pod.Spec.Containers[i].Resources if !api.Semantic.DeepEqual(expected, actual) { t.Errorf("pod %v, expected != actual; %v != %v", pod.Name, expected, actual) } } // pod with some resources enumerated should only merge empty input := getResourceRequirements(getResourceList("", "512Mi"), getResourceList("", "")) pod = validPod("limit-memory", 1, input) expected = api.ResourceRequirements{ Requests: api.ResourceList{ api.ResourceCPU: defaultRequirements.Requests[api.ResourceCPU], api.ResourceMemory: resource.MustParse("512Mi"), }, Limits: defaultRequirements.Limits, } mergePodResourceRequirements(&pod, &defaultRequirements) for i := range pod.Spec.Containers { actual := pod.Spec.Containers[i].Resources if !api.Semantic.DeepEqual(expected, actual) { t.Errorf("pod %v, expected != actual; %v != %v", pod.Name, expected, actual) } } }
func TestExceedUsagePods(t *testing.T) { pod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))) podList := &api.PodList{Items: []api.Pod{*pod}} client := testclient.NewSimpleFake(podList) status := &api.ResourceQuotaStatus{ Hard: api.ResourceList{}, Used: api.ResourceList{}, } r := api.ResourcePods status.Hard[r] = resource.MustParse("1") status.Used[r] = resource.MustParse("1") _, err := IncrementUsage(admission.NewAttributesRecord(&api.Pod{}, "Pod", pod.Namespace, "name", "pods", "", admission.Create, nil), status, client) if err == nil { t.Errorf("Expected error because this would exceed your quota") } }
func makePersistentVolume(serverIP string) *api.PersistentVolume { return &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ GenerateName: "nfs-", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle, Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("2Gi"), }, PersistentVolumeSource: api.PersistentVolumeSource{ NFS: &api.NFSVolumeSource{ Server: serverIP, Path: "/", ReadOnly: false, }, }, AccessModes: []api.PersistentVolumeAccessMode{ api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany, }, }, } }
func TestPersistentVolumeClaimGet(t *testing.T) { ns := api.NamespaceDefault persistentVolumeClaim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "abc", Namespace: "foo", }, Spec: api.PersistentVolumeClaimSpec{ AccessModes: []api.PersistentVolumeAccessMode{ api.ReadWriteOnce, api.ReadOnlyMany, }, Resources: api.ResourceRequirements{ Requests: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), }, }, }, } c := &testClient{ Request: testRequest{ Method: "GET", Path: testapi.Default.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, "abc"), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: persistentVolumeClaim}, } response, err := c.Setup(t).PersistentVolumeClaims(ns).Get("abc") c.Validate(t, response, err) }
// PodsRequests returns sum of each resource request for each pod in list // If a given pod in the list does not have a request for the named resource, we log the error // but still attempt to get the most representative count func PodsRequests(pods []*api.Pod, resourceName api.ResourceName) *resource.Quantity { var sum *resource.Quantity for i := range pods { pod := pods[i] podQuantity, err := PodRequests(pod, resourceName) if err != nil { // log the error, but try to keep the most accurate count possible in log // rationale here is that you may have had pods in a namespace that did not have // explicit requests prior to adding the quota glog.Infof("No explicit request for resource, pod %s/%s, %s", pod.Namespace, pod.Name, resourceName) } else { if sum == nil { sum = podQuantity } else { sum.Add(*podQuantity) } } } // if list is empty if sum == nil { q := resource.MustParse("0") sum = &q } return sum }
func validNewNode() *api.Node { return &api.Node{ ObjectMeta: api.ObjectMeta{ Name: "foo", Labels: map[string]string{ "name": "foo", }, }, Spec: api.NodeSpec{ ExternalID: "external", }, Status: api.NodeStatus{ Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("0"), }, }, } }