// PodConstraintsFunc verifies that all required resources are present on the pod func PodConstraintsFunc(required []api.ResourceName, object runtime.Object) error { pod, ok := object.(*api.Pod) if !ok { return fmt.Errorf("Unexpected input object %v", object) } // TODO: fix this when we have pod level cgroups // since we do not yet pod level requests/limits, we need to ensure each // container makes an explict request or limit for a quota tracked resource requiredSet := quota.ToSet(required) missingSet := sets.NewString() for i := range pod.Spec.Containers { requests := pod.Spec.Containers[i].Resources.Requests limits := pod.Spec.Containers[i].Resources.Limits containerUsage := podUsageHelper(requests, limits) containerSet := quota.ToSet(quota.ResourceNames(containerUsage)) if !containerSet.Equal(requiredSet) { difference := requiredSet.Difference(containerSet) missingSet.Insert(difference.List()...) } } if len(missingSet) == 0 { return nil } return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ",")) }
func TestGetStarvedResources(t *testing.T) { testCases := map[string]struct { inputs []Threshold result []api.ResourceName }{ "memory.available": { inputs: []Threshold{ {Signal: SignalMemoryAvailable}, }, result: []api.ResourceName{api.ResourceMemory}, }, "imagefs.available": { inputs: []Threshold{ {Signal: SignalImageFsAvailable}, }, result: []api.ResourceName{resourceImageFs}, }, "nodefs.available": { inputs: []Threshold{ {Signal: SignalNodeFsAvailable}, }, result: []api.ResourceName{resourceNodeFs}, }, } for testName, testCase := range testCases { actual := getStarvedResources(testCase.inputs) actualSet := quota.ToSet(actual) expectedSet := quota.ToSet(testCase.result) if !actualSet.Equal(expectedSet) { t.Errorf("Test case: %s, expected: %v, actual: %v", testName, expectedSet, actualSet) } } }
func TestServiceEvaluatorMatchesResources(t *testing.T) { kubeClient := fake.NewSimpleClientset() evaluator := NewServiceEvaluator(kubeClient) expected := quota.ToSet([]api.ResourceName{ api.ResourceServices, api.ResourceServicesNodePorts, api.ResourceServicesLoadBalancers, }) actual := quota.ToSet(evaluator.MatchesResources()) if !expected.Equal(actual) { t.Errorf("expected: %v, actual: %v", expected, actual) } }
// NewSharedContextEvaluator creates an evaluator object that allows to share context while computing usage of // single namespace. Context is represented by an object returned by usageComputerFactory and is destroyed // when the namespace is processed. func NewSharedContextEvaluator( name string, groupKind unversioned.GroupKind, operationResources map[admission.Operation][]kapi.ResourceName, matchedResourceNames []kapi.ResourceName, matchesScopeFunc generic.MatchesScopeFunc, getFuncByNamespace generic.GetFuncByNamespace, listFuncByNamespace generic.ListFuncByNamespace, constraintsFunc generic.ConstraintsFunc, usageComputerFactory UsageComputerFactory, ) quota.Evaluator { rnSet := sets.String{} for _, resourceNames := range operationResources { rnSet.Insert(quota.ToSet(resourceNames).List()...) } return &SharedContextEvaluator{ GenericEvaluator: &generic.GenericEvaluator{ Name: name, InternalGroupKind: groupKind, InternalOperationResources: operationResources, MatchedResourceNames: matchedResourceNames, MatchesScopeFunc: matchesScopeFunc, GetFuncByNamespace: getFuncByNamespace, ListFuncByNamespace: listFuncByNamespace, ConstraintsFunc: constraintsFunc, UsageFunc: func(object runtime.Object) kapi.ResourceList { comp := usageComputerFactory() return comp.Usage(object) }, }, UsageComputerFactory: usageComputerFactory, } }
// PersistentVolumeClaimConstraintsFunc verifies that all required resources are present on the claim // In addition, it validates that the resources are valid (i.e. requests < limits) func PersistentVolumeClaimConstraintsFunc(required []api.ResourceName, object runtime.Object) error { pvc, ok := object.(*api.PersistentVolumeClaim) if !ok { return fmt.Errorf("unexpected input object %v", object) } requiredSet := quota.ToSet(required) missingSet := sets.NewString() pvcUsage := PersistentVolumeClaimUsageFunc(pvc) pvcSet := quota.ToSet(quota.ResourceNames(pvcUsage)) if diff := requiredSet.Difference(pvcSet); len(diff) > 0 { missingSet.Insert(diff.List()...) } if len(missingSet) == 0 { return nil } return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ",")) }
// enforcePodContainerConstraints checks for required resources that are not set on this container and // adds them to missingSet. func enforcePodContainerConstraints(container *api.Container, requiredSet, missingSet sets.String) { requests := container.Resources.Requests limits := container.Resources.Limits containerUsage := podUsageHelper(requests, limits) containerSet := quota.ToSet(quota.ResourceNames(containerUsage)) if !containerSet.Equal(requiredSet) { difference := requiredSet.Difference(containerSet) missingSet.Insert(difference.List()...) } }
// ServiceConstraintsFunc verifies that all required resources are captured in service usage. func ServiceConstraintsFunc(required []api.ResourceName, object runtime.Object) error { service, ok := object.(*api.Service) if !ok { return fmt.Errorf("unexpected input object %v", object) } requiredSet := quota.ToSet(required) missingSet := sets.NewString() serviceUsage := ServiceUsageFunc(service) serviceSet := quota.ToSet(quota.ResourceNames(serviceUsage)) if diff := requiredSet.Difference(serviceSet); len(diff) > 0 { missingSet.Insert(diff.List()...) } if len(missingSet) == 0 { return nil } return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ",")) }
func TestGetStarvedResources(t *testing.T) { testCases := map[string]struct { inputs []Threshold result []v1.ResourceName }{ "memory.available": { inputs: []Threshold{ {Signal: SignalMemoryAvailable}, }, result: []v1.ResourceName{v1.ResourceMemory}, }, "imagefs.available": { inputs: []Threshold{ {Signal: SignalImageFsAvailable}, }, result: []v1.ResourceName{resourceImageFs}, }, "nodefs.available": { inputs: []Threshold{ {Signal: SignalNodeFsAvailable}, }, result: []v1.ResourceName{resourceNodeFs}, }, } var internalResourceNames = func(in []v1.ResourceName) []api.ResourceName { var out []api.ResourceName for _, name := range in { out = append(out, api.ResourceName(name)) } return out } for testName, testCase := range testCases { actual := getStarvedResources(testCase.inputs) actualSet := quota.ToSet(internalResourceNames(actual)) expectedSet := quota.ToSet(internalResourceNames(testCase.result)) if !actualSet.Equal(expectedSet) { t.Errorf("Test case: %s, expected: %v, actual: %v", testName, expectedSet, actualSet) } } }
// PodConstraintsFunc verifies that all required resources are present on the pod // In addition, it validates that the resources are valid (i.e. requests < limits) func PodConstraintsFunc(required []api.ResourceName, object runtime.Object) error { pod, ok := object.(*api.Pod) if !ok { return fmt.Errorf("Unexpected input object %v", object) } // Pod level resources are often set during admission control // As a consequence, we want to verify that resources are valid prior // to ever charging quota prematurely in case they are not. allErrs := field.ErrorList{} fldPath := field.NewPath("spec").Child("containers") for i, ctr := range pod.Spec.Containers { idxPath := fldPath.Index(i) allErrs = append(allErrs, validation.ValidateResourceRequirements(&ctr.Resources, idxPath.Child("resources"))...) } if len(allErrs) > 0 { return allErrs.ToAggregate() } // TODO: fix this when we have pod level cgroups // since we do not yet pod level requests/limits, we need to ensure each // container makes an explict request or limit for a quota tracked resource requiredSet := quota.ToSet(required) missingSet := sets.NewString() for i := range pod.Spec.Containers { requests := pod.Spec.Containers[i].Resources.Requests limits := pod.Spec.Containers[i].Resources.Limits containerUsage := podUsageHelper(requests, limits) containerSet := quota.ToSet(quota.ResourceNames(containerUsage)) if !containerSet.Equal(requiredSet) { difference := requiredSet.Difference(containerSet) missingSet.Insert(difference.List()...) } } if len(missingSet) == 0 { return nil } return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ",")) }
// Constraints verifies that all required resources are present on the item func (p *serviceEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error { service, ok := item.(*api.Service) if !ok { return fmt.Errorf("unexpected input object %v", item) } requiredSet := quota.ToSet(required) missingSet := sets.NewString() serviceUsage, err := p.Usage(service) if err != nil { return err } serviceSet := quota.ToSet(quota.ResourceNames(serviceUsage)) if diff := requiredSet.Difference(serviceSet); len(diff) > 0 { missingSet.Insert(diff.List()...) } if len(missingSet) == 0 { return nil } return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ",")) }
func TestServiceEvaluatorMatchesResources(t *testing.T) { kubeClient := fake.NewSimpleClientset() evaluator := NewServiceEvaluator(kubeClient) // we give a lot of resources input := []api.ResourceName{ api.ResourceConfigMaps, api.ResourceCPU, api.ResourceServices, api.ResourceServicesNodePorts, api.ResourceServicesLoadBalancers, } // but we only match these... expected := quota.ToSet([]api.ResourceName{ api.ResourceServices, api.ResourceServicesNodePorts, api.ResourceServicesLoadBalancers, }) actual := quota.ToSet(evaluator.MatchingResources(input)) if !expected.Equal(actual) { t.Errorf("expected: %v, actual: %v", expected, actual) } }
// Constraints verifies that all required resources are present on the item. func (p *pvcEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error { pvc, ok := item.(*api.PersistentVolumeClaim) if !ok { return fmt.Errorf("unexpected input object %v", item) } // these are the items that we will be handling based on the objects actual storage-class pvcRequiredSet := append([]api.ResourceName{}, pvcResources...) if storageClassRef := util.GetClaimStorageClass(pvc); len(storageClassRef) > 0 { pvcRequiredSet = append(pvcRequiredSet, ResourceByStorageClass(storageClassRef, api.ResourcePersistentVolumeClaims)) pvcRequiredSet = append(pvcRequiredSet, ResourceByStorageClass(storageClassRef, api.ResourceRequestsStorage)) } // in effect, this will remove things from the required set that are not tied to this pvcs storage class // for example, if a quota has bronze and gold storage class items defined, we should not error a bronze pvc for not being gold. // but we should error a bronze pvc if it doesn't make a storage request size... requiredResources := quota.Intersection(required, pvcRequiredSet) requiredSet := quota.ToSet(requiredResources) // usage for this pvc will only include global pvc items + this storage class specific items pvcUsage, err := p.Usage(item) if err != nil { return err } // determine what required resources were not tracked by usage. missingSet := sets.NewString() pvcSet := quota.ToSet(quota.ResourceNames(pvcUsage)) if diff := requiredSet.Difference(pvcSet); len(diff) > 0 { missingSet.Insert(diff.List()...) } if len(missingSet) == 0 { return nil } return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ",")) }
// Constraints verifies that all required resources are present on the pod // In addition, it validates that the resources are valid (i.e. requests < limits) func (p *podEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error { pod, ok := item.(*api.Pod) if !ok { return fmt.Errorf("Unexpected input object %v", item) } // Pod level resources are often set during admission control // As a consequence, we want to verify that resources are valid prior // to ever charging quota prematurely in case they are not. allErrs := field.ErrorList{} fldPath := field.NewPath("spec").Child("containers") for i, ctr := range pod.Spec.Containers { allErrs = append(allErrs, validation.ValidateResourceRequirements(&ctr.Resources, fldPath.Index(i).Child("resources"))...) } fldPath = field.NewPath("spec").Child("initContainers") for i, ctr := range pod.Spec.InitContainers { allErrs = append(allErrs, validation.ValidateResourceRequirements(&ctr.Resources, fldPath.Index(i).Child("resources"))...) } if len(allErrs) > 0 { return allErrs.ToAggregate() } // TODO: fix this when we have pod level resource requirements // since we do not yet pod level requests/limits, we need to ensure each // container makes an explict request or limit for a quota tracked resource requiredSet := quota.ToSet(required) missingSet := sets.NewString() for i := range pod.Spec.Containers { enforcePodContainerConstraints(&pod.Spec.Containers[i], requiredSet, missingSet) } for i := range pod.Spec.InitContainers { enforcePodContainerConstraints(&pod.Spec.InitContainers[i], requiredSet, missingSet) } if len(missingSet) == 0 { return nil } return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ",")) }